index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
8,600 | e9659555938211d067919ee5e0083efb29d42d7b | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-22 00:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('classroom', '0003_remove_anouncements_classroom'),
]
operations = [
migrations.AddField(
model_name='anouncements',
name='classrm',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='anouncements', to='classroom.Classroom'),
),
]
|
8,601 | 2432e2b4da8af284055e7edf6e0bd94b7b293f0b | from __future__ import annotations
from .base import * # noqa
SECRET_KEY = "django-insecure-usp0sg081f=9+_j95j@-k^sfp+9c*!qrwh-m17%=_9^xot#9fn"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "puka-test",
"USER": "jeff",
"PASSWORD": "",
"HOST": "127.0.0.1",
"PORT": "5432",
},
}
|
8,602 | 7727896d4e1b2b415c398b206f9fb7e228e6f26d | # DO NOT EDIT THIS FILE!
#
# Python module managedElementManager generated by omniidl
import omniORB
omniORB.updateModule("managedElementManager")
# ** 1. Stub files contributing to this module
import managedElementManager_idl
# ** 2. Sub-modules
# ** 3. End
|
8,603 | 9447d0d0481df3d0ee4273256d02977bc8044e4e | # -*- coding: utf-8 -*-
"""
python3
description :Fingerprint image enhancement by using gabor
"""
import os
import cv2
import math
import scipy
import numpy as np
from scipy import signal
def normalise(img):
normed = (img - np.mean(img)) / (np.std(img))
return normed
def ridge_segment(im, blksze, thresh):
rows, cols = im.shape
im = normalise(im)
new_rows = np.int(blksze * np.ceil(rows / blksze))
new_cols = np.int(blksze * np.ceil(cols / blksze))
padded_img = np.zeros((new_rows, new_cols))
stddevim = np.zeros((new_rows, new_cols))
padded_img[0:rows][:, 0:cols] = im
for i in range(0, new_rows, blksze):
for j in range(0, new_cols, blksze):
block = padded_img[i:i + blksze][:, j:j + blksze]
stddevim[i:i + blksze][:, j:j +
blksze] = np.std(block) * np.ones(block.shape)
stddevim = stddevim[0:rows][:, 0:cols]
mask = stddevim > thresh
mean_val = np.mean(im[mask])
std_val = np.std(im[mask])
normim = (im - mean_val) / (std_val)
return (normim, mask)
def ridge_orient(im, gradientsigma, blocksigma, orientsmoothsigma):
# Calculate image gradients.
sze = np.fix(6 * gradientsigma)
if np.remainder(sze, 2) == 0:
sze = sze + 1
gauss = cv2.getGaussianKernel(np.int(sze), gradientsigma)
f = gauss * gauss.T
fy, fx = np.gradient(f) # Gradient of Gaussian
Gx = signal.convolve2d(im, fx, mode='same')
Gy = signal.convolve2d(im, fy, mode='same')
Gxx = np.power(Gx, 2)
Gyy = np.power(Gy, 2)
Gxy = Gx * Gy
# Now smooth the covariance data to perform a weighted summation of the data.
sze = np.fix(6 * blocksigma)
gauss = cv2.getGaussianKernel(np.int(sze), blocksigma)
f = gauss * gauss.T
Gxx = scipy.ndimage.convolve(Gxx, f)
Gyy = scipy.ndimage.convolve(Gyy, f)
Gxy = 2 * scipy.ndimage.convolve(Gxy, f)
# Analytic solution of principal direction
denom = np.sqrt(np.power(Gxy, 2) + np.power((Gxx - Gyy), 2)
) + np.finfo(float).eps
sin2theta = Gxy / denom # Sine and cosine of doubled angles
cos2theta = (Gxx - Gyy) / denom
if orientsmoothsigma:
sze = np.fix(6 * orientsmoothsigma)
if np.remainder(sze, 2) == 0:
sze = sze + 1
gauss = cv2.getGaussianKernel(np.int(sze), orientsmoothsigma)
f = gauss * gauss.T
# Smoothed sine and cosine of
cos2theta = scipy.ndimage.convolve(cos2theta, f)
sin2theta = scipy.ndimage.convolve(sin2theta, f) # doubled angles
orientim = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2
return (orientim)
def frequest(im, orientim, windsze, minWaveLength, maxWaveLength):
rows, cols = np.shape(im)
# Find mean orientation within the block. This is done by averaging the
# sines and cosines of the doubled angles before reconstructing the
# angle again. This avoids wraparound problems at the origin.
cosorient = np.mean(np.cos(2 * orientim))
sinorient = np.mean(np.sin(2 * orientim))
orient = math.atan2(sinorient, cosorient) / 2
# Rotate the image block so that the ridges are vertical
# ROT_mat = cv2.getRotationMatrix2D((cols/2,rows/2),orient/np.pi*180 + 90,1)
# rotim = cv2.warpAffine(im,ROT_mat,(cols,rows))
rotim = scipy.ndimage.rotate(
im, orient / np.pi * 180 + 90, axes=(1, 0), reshape=False, order=3, mode='nearest')
# Now crop the image so that the rotated image does not contain any
# invalid regions. This prevents the projection down the columns
# from being mucked up.
cropsze = int(np.fix(rows / np.sqrt(2)))
offset = int(np.fix((rows - cropsze) / 2))
rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]
# Sum down the columns to get a projection of the grey values down
# the ridges.
proj = np.sum(rotim, axis=0)
dilation = scipy.ndimage.grey_dilation(
proj, windsze, structure=np.ones(windsze))
temp = np.abs(dilation - proj)
peak_thresh = 2
maxpts = (temp < peak_thresh) & (proj > np.mean(proj))
maxind = np.where(maxpts)
rows_maxind, cols_maxind = np.shape(maxind)
# Determine the spatial frequency of the ridges by divinding the
# distance between the 1st and last peaks by the (No of peaks-1). If no
# peaks are detected, or the wavelength is outside the allowed bounds,
# the frequency image is set to 0
if cols_maxind < 2:
freqim = np.zeros(im.shape)
else:
NoOfPeaks = cols_maxind
waveLength = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (NoOfPeaks - 1)
if waveLength >= minWaveLength and waveLength <= maxWaveLength:
freqim = 1 / np.double(waveLength) * np.ones(im.shape)
else:
freqim = np.zeros(im.shape)
return freqim
def ridge_freq(im, mask, orient, blksze, windsze, minWaveLength, maxWaveLength):
rows, cols = im.shape
freq = np.zeros((rows, cols))
for r in range(0, rows - blksze, blksze):
for c in range(0, cols - blksze, blksze):
blkim = im[r:r + blksze][:, c:c + blksze]
blkor = orient[r:r + blksze][:, c:c + blksze]
freq[r:r + blksze][:, c:c +
blksze] = frequest(blkim, blkor, windsze, minWaveLength, maxWaveLength)
freq = freq * mask
freq_1d = np.reshape(freq, (1, rows * cols))
ind = np.where(freq_1d > 0)
ind = np.array(ind)
ind = ind[1, :]
non_zero_elems_in_freq = freq_1d[0][ind]
meanfreq = np.mean(non_zero_elems_in_freq)
# does not work properly
medianfreq = np.median(non_zero_elems_in_freq)
return freq, meanfreq
def ridge_filter(im, orient, freq, kx, ky):
angleInc = 3
im = np.double(im)
rows, cols = im.shape
new_im = np.zeros((rows, cols))
freq_1d = np.reshape(freq, (1, rows * cols))
ind = np.where(freq_1d > 0)
ind = np.array(ind)
ind = ind[1, :]
# Round the array of frequencies to the nearest 0.01 to reduce the
# number of distinct frequencies we have to deal with.
non_zero_elems_in_freq = freq_1d[0][ind]
non_zero_elems_in_freq = np.double(
np.round((non_zero_elems_in_freq * 100))) / 100
unfreq = np.unique(non_zero_elems_in_freq)
# Generate filters corresponding to these distinct frequencies and
# orientations in 'angleInc' increments.
sigmax = 1 / unfreq[0] * kx
sigmay = 1 / unfreq[0] * ky
sze = np.round(3 * np.max([sigmax, sigmay]))
x, y = np.meshgrid(np.linspace(-sze, sze, (2 * sze + 1)),
np.linspace(-sze, sze, (2 * sze + 1)))
reffilter = np.exp(-((np.power(x, 2)) / (sigmax * sigmax) + (np.power(y, 2)) / (sigmay * sigmay))
) * np.cos(2 * np.pi * unfreq[0] * x) # this is the original gabor filter
filt_rows, filt_cols = reffilter.shape
gabor_filter = np.array(np.zeros((180 // angleInc, filt_rows, filt_cols)))
for o in range(0, 180 // angleInc):
# Generate rotated versions of the filter. Note orientation
# image provides orientation *along* the ridges, hence +90
# degrees, and imrotate requires angles +ve anticlockwise, hence
# the minus sign.
rot_filt = scipy.ndimage.rotate(reffilter, -(o * angleInc + 90), reshape=False)
gabor_filter[o] = rot_filt
# Find indices of matrix points greater than maxsze from the image
# boundary
maxsze = int(sze)
temp = freq > 0
validr, validc = np.where(temp)
temp1 = validr > maxsze
temp2 = validr < rows - maxsze
temp3 = validc > maxsze
temp4 = validc < cols - maxsze
final_temp = temp1 & temp2 & temp3 & temp4
finalind = np.where(final_temp)
# Convert orientation matrix values from radians to an index value
# that corresponds to round(degrees/angleInc)
maxorient_index = np.round(180 / angleInc)
orient_index = np.round(orient / np.pi * 180 / angleInc)
# do the filtering
for i in range(0, rows):
for j in range(0, cols):
if orient_index[i][j] < 1:
orient_index[i][j] = orient_index[i][j] + maxorient_index
if orient_index[i][j] > maxorient_index:
orient_index[i][j] = orient_index[i][j] - maxorient_index
finalind_rows, finalind_cols = np.shape(finalind)
sze = int(sze)
for k in range(0, finalind_cols):
r = validr[finalind[0][k]]
c = validc[finalind[0][k]]
img_block = im[r - sze:r + sze + 1][:, c - sze:c + sze + 1]
new_im[r][c] = np.sum(
img_block * gabor_filter[int(orient_index[r][c]) - 1])
return new_im
def image_enhance(img):
blksze = 16
thresh = 0.1
# normalise the image and find a ROI
normim, mask = ridge_segment(img, blksze, thresh)
gradientsigma = 1
blocksigma = 7
orientsmoothsigma = 7
# find orientation of every pixel
orientim = ridge_orient(normim, gradientsigma,
blocksigma, orientsmoothsigma)
blksze = 38
windsze = 5
min_wave_length = 5
max_wave_length = 15
# find the overall frequency of ridges
freq, medfreq = ridge_freq(
normim, mask, orientim, blksze, windsze, min_wave_length, max_wave_length)
freq = medfreq * mask
kx = ky = 0.65
# create gabor filter and do the actual filtering
new_im = ridge_filter(normim, orientim, freq, kx, ky)
return (new_im < -3)
def gabor_enhance(in_path, out_dir='./'):
img = cv2.imread(in_path, 0)
enhanced_img = image_enhance(img)
enhanced_img = np.invert(enhanced_img)
# print('saving the image')
img = enhanced_img * 255
base_image_name = os.path.splitext(os.path.basename(in_path))[0]
prefix = base_image_name.split('_normal')[0]
img_out = out_dir + prefix + '_enhanced.png'
# img.save(base_image_name + "_enhanced.png", "PNG")
cv2.imwrite(img_out, img)
return img_out
|
8,604 | 3b7c30718838a164eaf3aa12cd7b6a68930346f8 | '''Mock classes that imitate idlelib modules or classes.
Attributes and methods will be added as needed for tests.
'''
from idlelib.idle_test.mock_tk import Text
class Editor:
'''Minimally imitate EditorWindow.EditorWindow class.
'''
def __init__(self, flist=None, filename=None, key=None, root=None):
self.text = Text()
self.undo = UndoDelegator()
def get_selection_indices(self):
first = self.text.index('1.0')
last = self.text.index('end')
return first, last
class UndoDelegator:
'''Minimally imitate UndoDelegator,UndoDelegator class.
'''
# A real undo block is only needed for user interaction.
def undo_block_start(*args):
pass
def undo_block_stop(*args):
pass
|
8,605 | fc2748d766ebce8c9577f1eebc8435e2aa58ae25 |
import numpy as np
import random
import argparse
import networkx as nx
from gensim.models import Word2Vec
from utils import read_node_label, plot_embeddings
class node2vec_walk():
def __init__(self, nx_G, is_directed, p, q):
self.G = nx_G
self.is_directed = is_directed
self.p = p
self.q = q
def node2vec_walk(self, walk_length, start_node):
G = self.G
alias_nodes = self.alias_nodes
alias_edges = self.alias_edges
walk = [start_node]
while len(walk) < walk_length:
curr = walk[-1]
cur_nbrs = sorted(G.neighbors(curr))
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0], alias_nodes[curr][1])])
else:
prev = walk[-2]
next = cur_nbrs[alias_draw(alias_edges[(prev, curr)][0], alias_edges[(prev, curr)][1])]
walk.append(next)
else:
break
return walk
def simulate_walks(self, num_walks, walk_length):
G = self.G
walks = []
nodes = list(G.nodes())
print("Walk iteration...")
for walk_iter in range(num_walks):
print(f"{walk_iter + 1}/{num_walks}")
random.shuffle(nodes)
for node in nodes:
walks.append(self.node2vec_walk(walk_length, node))
return walks
def get_alias_edge(self, src, dst):
G = self.G
p = self.p
q = self.q
unnormalized_probs = []
for dst_nbr in sorted(G.neighbors(dst)):
if dst_nbr == src:
unnormalized_probs.append(G[dst][dst_nbr]["weight"] / p)
elif G.has_edge(dst_nbr, src):
unnormalized_probs.append(G[dst][dst_nbr]["weight"])
else:
unnormalized_probs.append(G[dst][dst_nbr]["weight"] / q)
norm_cost = sum(unnormalized_probs)
normalized_probs = [float(v) / norm_cost for v in unnormalized_probs]
return alias_setup(normalized_probs)
def preprocess_transition_probs(self):
# 预处理转移概率
G = self.G
is_directed = self.is_directed
alias_nodes = {}
for node in G.nodes():
unnormalized_probs = [G[node][nbr]["weight"] for nbr in sorted(G.neighbors(node))]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(v) / norm_const for v in unnormalized_probs]
alias_nodes[node] = alias_setup(normalized_probs)
alias_edges = {}
if is_directed:
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
else:
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0])
self.alias_nodes = alias_nodes
self.alias_edges = alias_edges
def alias_setup(probs):
K = len(probs)
q = np.zeros(K)
J = np.zeros(K, dtype=np.int)
smaller = []
larger = []
for kk, prob in enumerate(probs):
q[kk] = K * prob
# 记录小于均匀分布概率的Index
if q[kk] > 1.0:
larger.append(kk)
else:
smaller.append(kk)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
# 记录index
J[small] = large
# 将small的补充满1后,算出剩余large的概率
q[large] = q[small] + q[large] - 1
# 若q[large]不等于1,则继续放入smaller和larger的数组中进行迭代
if q[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return J, q
def alias_draw(J, q):
# 非均匀分布进行采样
K = len(J)
kk = int(np.floor(np.random.rand() * K))
if np.random.rand() < q[kk]:
return kk
else:
return J[kk]
def parse_args():
parser = argparse.ArgumentParser(description="Run node2vec.")
parser.add_argument('--input', nargs='?', default='./data/Wiki_edgelist.txt', help='Input graph path')
parser.add_argument('--output', nargs='?', default='emb/node2vec_wiki.emb', help='Embeddings path')
parser.add_argument('--label_file', nargs='?', default='data/wiki_labels.txt', help='Labels path')
parser.add_argument('--dimensions', type=int, default=128, help='Number of dimensions. Default is 128.')
parser.add_argument('--walk-length', type=int, default=80, help='Length of walk per source. Default is 80.')
parser.add_argument('--num-walks', type=int, default=20, help='Number of walks per source. Default is 10.')
parser.add_argument('--window-size', type=int, default=10, help='Context size for optimization. Default is 10.')
parser.add_argument('--iter', default=2, type=int, help='Number of epochs in SGD')
parser.add_argument('--workers', type=int, default=8, help='Number of parallel workers. Default is 8.')
parser.add_argument('--p', type=float, default=1, help='Return hyperparameter. Default is 1.')
parser.add_argument('--q', type=float, default=1, help='Inout hyperparameter. Default is 1.')
parser.add_argument('--weighted', dest='weighted', action='store_true', help='Boolean specifying (un)weighted. Default is unweighted.')
parser.add_argument('--unweighted', dest='unweighted', action='store_false')
parser.set_defaults(weighted=False)
parser.add_argument('--directed', dest='directed', action='store_true', help='Graph is (un)directed. Default is undirected.')
parser.add_argument('--undirected', dest='undirected', action='store_false')
parser.set_defaults(directed=False)
return parser.parse_args()
def read_graph():
if args.weighted:
G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float), ), create_using=nx.DiGraph)
else:
G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.DiGraph())
for edge in G.edges():
G[edge[0]][edge[1]]['weight'] = 1
if not args.directed:
G = G.to_undirected()
return G
def learning_walks(walks):
walks = [list(map(str, walk)) for walk in walks]
model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers, iter=args.iter)
model.wv.save_word2vec_format(args.output)
return model
def main(args):
nx_G = read_graph()
G = node2vec_walk(nx_G, args.directed, args.p, args.q)
G.preprocess_transition_probs()
walks = G.simulate_walks(args.num_walks, args.walk_length)
model = learning_walks(walks)
_embeddings = {}
for v in nx_G.nodes():
_embeddings[str(v)] = model.wv[str(v)]
plot_embeddings(_embeddings, args.label_file)
if __name__ == "__main__":
args = parse_args()
main(args)
|
8,606 | 3c8352ff2fc92ada1b58603df2a1a402e57842be | # coding: utf-8
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get("https://www.baidu.com")
elem = driver.find_element_by_xpath('//*[@id="kw"]')
elem.send_keys("python selenium", Keys.ENTER)
print(driver.page_source)
|
8,607 | 7f62af951b49c3d1796c2811527ceb30ca931632 | import pandas as pd
from datetime import datetime
from iFinDPy import *
thsLogin = THS_iFinDLogin("iFind账号","iFind账号密码")
index_list = ['000001.SH','399001.SZ','399006.SZ']
result = pd.DataFrame()
today =datetime.today().strftime('%Y-%m-%d')
for index in index_list:
data_js = THS_DateSerial(index,'ths_pre_close_index;ths_open_price_index;ths_close_price_index;ths_high_price_index',';;;',\
'Days:Tradedays,Fill:Previous,Interval:D,block:history','2000-01-01',today,True)
data_df = THS_Trans2DataFrame(data_js)
data_df['close_chg'] = data_df['ths_close_price_index'] / data_df['ths_pre_close_index'] * 100 - 100
result_pd = data_df[(data_df['close_chg'] < -5)]
date_list = result_pd['time'].tolist()
print('{}收盘在-5%的交易日有{}'.format(index,str(date_list)))
for date in date_list:
date_after_1month = THS_DateOffset('SSE','dateType:1,period:D,offset:30,dateFormat:0,output:singledate',date)['tables']['time'][0]
date_after_3month = THS_DateOffset('SSE','dateType:1,period:D,offset:90,dateFormat:0,output:singledate',date)['tables']['time'][0]
date_after_1year = THS_DateOffset('SSE','dateType:1,period:D,offset:365,dateFormat:0,output:singledate',date)['tables']['time'][0]
if date > (datetime.today() + timedelta(days=-365)).strftime('%Y-%m-%d'):
continue
index_close_date = THS_BasicData(index,'ths_close_price_index',date)['tables'][0]['table']['ths_close_price_index'][0]
index_close_date_after_1month = THS_BasicData(index,'ths_close_price_index',date_after_1month)['tables'][0]['table']['ths_close_price_index'][0]
index_close_date_after_3month = THS_BasicData(index,'ths_close_price_index',date_after_3month)['tables'][0]['table']['ths_close_price_index'][0]
index_close_date_after_1year = THS_BasicData(index,'ths_close_price_index',date_after_1year)['tables'][0]['table']['ths_close_price_index'][0]
result = result.append(pd.DataFrame([index,date,index_close_date,index_close_date_after_1month,index_close_date_after_3month,index_close_date_after_1year]).T)
result.columns = ['指数代码','大跌日','大跌日点数','一个月后点数','三个月后点数','一年后点数']
result = result.set_index('指数代码')
result['大跌一个月后涨跌幅'] = result['一个月后点数']/result['大跌日点数'] *100 -100
result['大跌三个月后涨跌幅'] = result['三个月后点数']/result['大跌日点数'] *100 -100
result['大跌一年后涨跌幅'] = result['一年后点数']/result['大跌日点数'] *100 -100
result |
8,608 | 465d5baae8d5be77fbf3d550d10667da420a8fbe | import sys
sys.path.append("../")
import numpy as np
import tensorflow as tf
from utils import eval_accuracy_main_cdan
from models import mnist2mnistm_shared_discrepancy, mnist2mnistm_predictor_discrepancy
import keras
import argparse
import pickle as pkl
parser = argparse.ArgumentParser(description='Training', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--USE_POISON', type=int, default=1, help='POISON used or not')
args = parser.parse_args()
USE_POISON = bool(args.USE_POISON)
METHOD = "mcd"
IMG_WIDTH = 28
IMG_HEIGHT = 28
NCH = 3
NUM_CLASSES_MAIN = 2
NUM_CLASSES_DC = 2
EPOCHS = 101
BATCH_SIZE = 64
PLOT_POINTS = 100
NUM_MODELS = 5
ce_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
shared = [mnist2mnistm_shared_discrepancy([50000, IMG_HEIGHT, IMG_WIDTH, NCH]) for i in range(NUM_MODELS)]
main_classifier_1 = [mnist2mnistm_predictor_discrepancy(shared[i], NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]#48*4*4, 500
main_classifier_2 = [mnist2mnistm_predictor_discrepancy(shared[i], NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]
optimizer_shared = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]
optimizer_main_classifier_1 = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]
optimizer_main_classifier_2 = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]
@tf.function
def train_discrepancy_1(main_data, main_labels, target_data):
# persistent is set to True because the tape is used more than
# once to calculate the gradients.
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
main_loss = [ce_loss(main_labels, main_logits_1[i]) + ce_loss(main_labels, main_logits_2[i]) for i in range(NUM_MODELS)]
shared_target = [shared[i](target_data, training=True) for i in range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for i in range(NUM_MODELS)]
loss = [main_loss[i] - adv_loss[i] for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(gradients_main_classifier_1[i], main_classifier_1[i].trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(gradients_main_classifier_2[i], main_classifier_2[i].trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_discrepancy_2(target_data):
# persistent is set to True because the tape is used more than
# once to calculate the gradients.
with tf.GradientTape(persistent=True) as tape:
shared_target = [shared[i](target_data, training=True) for i in range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) - tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(adv_loss[i], shared[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i].trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_step_erm(main_data, main_labels):
# persistent is set to True because the tape is used more than
# once to calculate the gradients.
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
loss = [ce_loss(main_labels, main_logits_1[i]) + ce_loss(main_labels, main_logits_2[i]) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(loss[i], shared[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i].trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(gradients_main_classifier_1[i], main_classifier_1[i].trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(gradients_main_classifier_2[i], main_classifier_2[i].trainable_variables)) for i in range(NUM_MODELS)]
return loss
mnist = tf.keras.datasets.mnist
(x_train_mnist_all, y_train_mnist_all), (x_test_mnist_all, y_test_mnist_all) = mnist.load_data()
x_train_mnist_all = np.stack((x_train_mnist_all,)*3, axis=-1)/255.
x_test_mnist_all = np.stack((x_test_mnist_all,)*3, axis=-1)/255.
mnistm = pkl.load(open('../../../../MNIST_MNIST-m/mnistm_data.pkl', 'rb'))
x_train_mnistm_all = mnistm['train']/255.
x_test_mnistm_all = mnistm['test']/255.
picked_class = 3
picked_class_next = 8
train_points_class_0 = np.argwhere(y_train_mnist_all == picked_class).flatten()
train_points_class_1 = np.argwhere(y_train_mnist_all == picked_class_next).flatten()
test_points_class_0 = np.argwhere(y_test_mnist_all == picked_class).flatten()
test_points_class_1 = np.argwhere(y_test_mnist_all == picked_class_next).flatten()
x_train_mnist = x_train_mnist_all[np.concatenate([train_points_class_0, train_points_class_1])]
y_train_mnist = y_train_mnist_all[np.concatenate([train_points_class_0, train_points_class_1])]
x_test_mnist = x_test_mnist_all[np.concatenate([test_points_class_0, test_points_class_1])]
y_test_mnist = y_test_mnist_all[np.concatenate([test_points_class_0, test_points_class_1])]
x_train_mnistm = x_train_mnistm_all[np.concatenate([train_points_class_0, train_points_class_1])]
x_test_mnistm = x_test_mnistm_all[np.concatenate([test_points_class_0, test_points_class_1])]
zeros_train = np.argwhere(y_train_mnist == picked_class).flatten()
ones_train = np.argwhere(y_train_mnist == picked_class_next).flatten()
zeros_test = np.argwhere(y_test_mnist == picked_class).flatten()
ones_test = np.argwhere(y_test_mnist == picked_class_next).flatten()
y_train_mnist[zeros_train] = 0
y_train_mnist[ones_train] = 1
y_test_mnist[zeros_test] = 0
y_test_mnist[ones_test] = 1
y_train_mnist = keras.utils.to_categorical(y_train_mnist, NUM_CLASSES_MAIN)
y_test_mnist = keras.utils.to_categorical(y_test_mnist, NUM_CLASSES_MAIN)
x_target_test = np.load("data/" + METHOD + "_TARGET_DATA.npy")
y_target_test = np.load("data/" + METHOD + "_TARGET_LABEL.npy")
y_target_test_incorrect_label = np.zeros([1, NUM_CLASSES_MAIN])
target_correct_label = np.argmax(y_target_test,1).flatten()[0]
y_target_test_incorrect_label[0][(target_correct_label+1)%NUM_CLASSES_MAIN]=1
if USE_POISON:
x_poison = np.load("data/" + METHOD + "_GENERATED_POISON_DATA.npy")
y_poison = np.load("data/" + METHOD + "_GENERATED_POISON_LABELS.npy")
x_train_mnist = np.concatenate([x_train_mnist, x_poison])
y_train_mnist = np.concatenate([y_train_mnist, y_poison])
for epoch in range(EPOCHS):
nb_batches_train = int(len(x_train_mnist)/BATCH_SIZE)
if len(x_train_mnist) % BATCH_SIZE != 0:
nb_batches_train += 1
ind_shuf = np.arange(len(x_train_mnist))
np.random.shuffle(ind_shuf)
for batch in range(nb_batches_train):
ind_batch = range(BATCH_SIZE * batch, min(BATCH_SIZE * (1+batch), len(x_train_mnist)))
ind_source = ind_shuf[ind_batch]
ind_target = np.random.choice(len(x_train_mnistm), size=len(ind_source), replace=False)
x_source_batch = x_train_mnist[ind_source]
y_source_batch = y_train_mnist[ind_source]
x_target_batch = x_train_mnistm[ind_target]
train_step_erm(x_source_batch, y_source_batch)
train_discrepancy_1(x_source_batch, y_source_batch, x_target_batch)
train_discrepancy_2(x_target_batch)
if epoch % 20 == 0:
print("Full training Poisoning:", USE_POISON, "MNIST->MNIST_M:", epoch, "METHOD:", METHOD, "\n")
print([eval_accuracy_main_cdan(x_target_test, y_target_test_incorrect_label, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
print([eval_accuracy_main_cdan(x_target_test, y_target_test, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
print([eval_accuracy_main_cdan(x_test_mnistm, y_test_mnist, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
if USE_POISON:
print([eval_accuracy_main_cdan(x_poison, y_poison, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
print("\n")
|
8,609 | 09905d4b5ad2e59578d874db171aafb6c42db105 | # Given an unsorted integer array nums, find the smallest missing positive integer.
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
# if nums is emtpy, first pos int is 1
if not nums:
return 1
maxnum = max(nums) # for speed we assign max of nums to var maxnum
# if maxnum is neg in or 0, first pos int is 1
if maxnum < 1:
return 1
# else, for all in from 1 to maxnum + 2, return the first missing int
else:
for i in range(1, (maxnum+2)):
if i not in nums:
return i
|
8,610 | 45d57f8392b89776f9349c32b4bb2fa71a4aaa83 | # -*- coding: utf-8 -*-
"""
A customised logger for this project for logging to the file and console
Created on 29/07/2022
@author: PNimbhore
"""
# imports
import os
import logging
class Logger:
"""
A custom logger which will take care
of logging to console and file.
"""
def __init__(self, filepath):
"""
Constructor
:param filepath:
"""
self.filepath = filepath
self.logger = logging.getLogger('util')
self.logger.setLevel(logging.DEBUG)
self._formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# file handler
file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')
file_handller.setLevel(logging.DEBUG)
file_handller.setFormatter(self._formatter)
self.logger.addHandler(file_handller)
# console handler
con_handler = logging.StreamHandler()
con_handler.setLevel(logging.ERROR)
con_handler.setFormatter(self._formatter)
self.logger.addHandler(con_handler)
log_file = "slb_config.log"
logger = Logger(log_file).logger
|
8,611 | 530ec3df27cc4c8f0798566f0c66cfbffe510786 | import os
import subprocess
import sys
import time
# print sys.argv
start = time.time()
subprocess.call(sys.argv[1:], shell=True)
stop = time.time()
print "\nTook %.1f seconds" % (stop - start)
|
8,612 | 02c32cf04529ff8b5edddf4e4117f8c4fdf27da9 | class Formater():
def clean_number (posible_number):
sanitize_number = posible_number.replace(' ', '')
number_of_dots = sanitize_number.count('.')
if number_of_dots > 1:
return None
if number_of_dots == 1:
dot_position = sanitize_number.index('.')
try:
sanitize_number.index(',', dot_position)
except Exception:
sanitize_number = sanitize_number.replace(',', '')
else:
return None
finally:
try:
return float(sanitize_number)
except Exception:
return None
if number_of_dots == 0:
sanitize_number = sanitize_number.replace(',', '')
try:
return int(sanitize_number)
except Exception:
return None |
8,613 | 2ccc3bb63445572610f6dbdfe5b1cbeef506c9a9 | from pygraphblas.matrix import Matrix
from pygraphblas.types import BOOL
from pyformlang.regular_expression import Regex
class Graph:
def __init__(self):
self.n_vertices = 0
self.label_matrices = dict()
self.start_vertices = set()
self.final_vertices = set()
def from_trans(self, filename):
input_file = open(filename)
edges = input_file.read().rstrip().split('\n')
input_file.close()
max_vertice_number = 0
for edge in edges:
fro, label, to = edge.split(' ')
max_vertice_number = max(max_vertice_number, int(fro))
max_vertice_number = max(max_vertice_number, int(to))
self.n_vertices = max_vertice_number + 1
for edge in edges:
fro, label, to = edge.split(' ')
self.get_by_label(label)[int(fro), int(to)] = True
def from_regex(self, filename):
input_file = open(filename)
regex = Regex(input_file.read().rstrip())
dfa = regex.to_epsilon_nfa().to_deterministic().minimize()
self.n_vertices = len(dfa.states)
state_renumeration = dict()
i = 0
for state in dfa.states:
state_renumeration[state] = i
i += 1
for fro, label, to in dfa._transition_function.get_edges():
self.get_by_label(str(label))[state_renumeration[fro], state_renumeration[to]] = True
self.start_vertices.add(state_renumeration[dfa.start_state])
for state in dfa.final_states:
self.final_vertices.add(state_renumeration[state])
def transitive_closure_1(self):
adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
for label_matrix in self.label_matrices.values():
adj_matrix += label_matrix
if adj_matrix.nvals != 0:
while True:
old = adj_matrix.nvals
adj_matrix += adj_matrix @ adj_matrix
if old == adj_matrix:
break
return adj_matrix
def transitive_closure_2(self):
adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
result = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
for label_matrix in self.label_matrices.values():
adj_matrix += label_matrix
if adj_matrix.nvals != 0:
while True:
old = result.nvals
result += adj_matrix
if old == result.nvals:
break
return result
def labels(self):
return self.label_matrices.keys()
def get_by_label(self, label):
if label not in self.label_matrices.keys():
self.label_matrices[label] = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
return self.label_matrices[label]
|
8,614 | 55acae8129ddaba9a860d5d356e91f40607ac95a | def func(n):
return n*2
def my_map(f, seq):
return [f(item) for item in seq]
def main():
numbers = [1, 2, 3, 4]
result = list(map(func, numbers))
print(result)
result = [func(item) for item in numbers]
print(result)
if __name__ == '__main__':
main()
|
8,615 | acd2d84529e197d6f9d134e8d7e25a51a442f3ae | # MÁSTER EN BIG DATA Y BUSINESS ANALYTICS
# MOD 1 - FINAL EVALUATION - EX. 2: dado un archivo que contiene en cada línea
# una palabra o conjunto de palabras seguido de un valor numérico denominado
# “sentimiento” y un conjunto de tweets, se pide calcular el sentimiento de
# aquellas palabras o conjunto de palabras que no tienen un valor asociado en el
# archivo de “sentimientos”. Se pueden seguir distintas estrategias para asignar
# un valor. Por ejemplo, se podría asignar como valor el valor del “sentimiento”
# del tweet en que se encuentra la palabra o conjunto de palabras sin valor, o
# el valor medio del “sentimiento” del tweet.
import json
import pandas as pd
# ---- FUNCTIONS ---------------------------------------------------------------
def get_tweets(filename):
""" Process a json formatted file with tweets using pandas read_json """
try:
tweets = []
pd_tweets = pd.read_json(filename, lines=True) # use parameter lines=True to read the file as a json object per line
pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']
tweets = pd_tweets.to_list()
return tweets
except:
print("Something went wrong parsing the file " + filename)
def get_sentiments(filename):
""" Process a file that contains in each line a word or
set of words followed by a numerical value, called "feeling
- returns a dictionary with pairs of words and sentiments
"""
valores = {}
for linea in open(filename, 'r'):
termino, valor = linea.split('\t')
valores[termino] = int(valor)
return valores
# ---- MAIN PROGRAM -------------------------------------------------------------------------------------------------
# ---- Filenames (including path)
file_tweet = 'Tweets.txt'
file_sentimientos = 'Sentimientos.txt'
# -- PROCESS TWEETS FILE WITH PANDAS READ_JSON
list_of_tweets = get_tweets(file_tweet)
# -- PROCESS SENTIMIENTOS FILE TO A DICITIONARY
valores = get_sentiments(file_sentimientos)
# -- PROCESS TWEETS SENTIMENT AND PRINT
for tweet in list_of_tweets:
tweet_sentimiento = 0
words_without_sent = []
number_of_words = 0
for word in tweet.split(" "):
tweet_sentimiento += valores.get(word.lower(),0)
number_of_words += 1
if valores.get(word.lower(),0)==0:
words_without_sent.append(word)
# asignar como valor el valor medio del “sentimiento” del tweet
for item in words_without_sent:
print(item + ': ' + str(tweet_sentimiento/number_of_words))
print("\n")
print("--- THE END ---")
|
8,616 | c485466a736fa0a4f183092e561a27005c01316d | import pylab,numpy as np
from numpy import sin
from matplotlib.patches import FancyArrowPatch
fig=pylab.figure()
w=1
h=1
th=3.14159/25.
x=np.r_[0,0,w,w,0]
y=np.r_[0,h,h-w*sin(th),0-w*sin(th),0]
pylab.plot(x,y)
x=np.r_[0,0,w/2.0,w/2.0,0]
y=np.r_[0,h/6.0,h/6.0-w/2.0*sin(th),0-w/2.0*sin(th),0]
pylab.plot(x,y,'--')
pylab.text(w/4.0,h/12.0-w/4.0*sin(th)-h/30.,'$A_{a,subcool}$',ha='center',va='center')
h0=h-w/2.0*sin(th)-h/6.0
x=np.r_[w/2.0,w/2.0,w,w,w/2.0]
y=np.r_[0+h0,h/6.0+h0,h/6.0-w/2.0*sin(th)+h0,0-w/2.0*sin(th)+h0,0+h0]
pylab.plot(x,y,'--')
pylab.text(0.75*w,h-h/12.0-0.75*w*sin(th)-h/30.,'$A_{a,superheat}$',ha='center',va='center')
pylab.text(0.5*w,h/2.0-0.5*w*sin(th),'$A_{a,two-phase}$',ha='center',va='center')
##Add the circuits
for y0 in [h/12.,h/12.+h/6.,h/12.+2*h/6.,h/12.+3*h/6.,h/12.+4*h/6.,h/12.+5*h/6.]:
pylab.plot(np.r_[0,w],np.r_[y0,y0-w*sin(th)],'k',lw=4)
pylab.gca().add_patch(FancyArrowPatch((w+w/10.,h-h/12.0-(w+w/10.)*sin(th)),(w,h-h/12.0-w*sin(th)),arrowstyle='-|>',fc='k',ec='k',mutation_scale=20,lw=0.8))
pylab.gca().add_patch(FancyArrowPatch((0,h/12.0),(-w/10.,h/12.0-(-w/10.)*sin(th)),arrowstyle='-|>',fc='k',ec='k',mutation_scale=20,lw=0.8))
pylab.gca().axis('equal')
pylab.gca().axis('off')
pylab.show() |
8,617 | 4e6e4917aee2385fe118d6e58c359a4c9fc50943 | # -*- coding: utf-8 -*-
'''
File Name: bubustatus/utils.py
Author: JackeyGao
mail: junqi.gao@shuyun.com
Created Time: 一 9/14 12:51:37 2015
'''
from rest_framework.views import exception_handler
def custom_exception_handler(exc, context):
# Call REST framework's default exception handler first,
# to get the standard error response.
response = exception_handler(exc, context)
# Now add the HTTP status code to the response.
if response is not None:
response.data['status_code'] = response.status_code
return response
|
8,618 | c65969bba72142f4a328f978d78e0235cd56e393 | from huobi import RequestClient
from huobi.constant.test import *
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
obj_list = request_client.get_cross_margin_loan_orders()
if len(obj_list):
for obj in obj_list:
obj.print_object()
print()
|
8,619 | 4e538251dedfe0b9ffb68de2de7dc50681320f1f | #
# @lc app=leetcode id=267 lang=python3
#
# [267] Palindrome Permutation II
#
# https://leetcode.com/problems/palindrome-permutation-ii/description/
#
# algorithms
# Medium (33.28%)
# Total Accepted: 24.8K
# Total Submissions: 74.4K
# Testcase Example: '"aabb"'
#
# Given a string s, return all the palindromic permutations (without
# duplicates) of it. Return an empty list if no palindromic permutation could
# be form.
#
# Example 1:
#
#
# Input: "aabb"
# Output: ["abba", "baab"]
#
# Example 2:
#
#
# Input: "abc"
# Output: []
#
#
class Solution:
def generatePalindromes(self, s: str) -> List[str]:
|
8,620 | 1b71789ba7c2191b433a405723fe6c985c926610 | # Generated by Django 2.2.6 on 2020-04-06 16:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('user_id', models.IntegerField(primary_key=True, serialize=False)),
('username', models.CharField(max_length=45)),
('userlogin', models.CharField(max_length=45)),
('avartar_url', models.CharField(blank=True, max_length=150, null=True)),
],
options={
'db_table': 'user',
},
),
migrations.CreateModel(
name='Repos',
fields=[
('repo_id', models.IntegerField(primary_key=True, serialize=False)),
('reponame', models.CharField(max_length=150)),
('owner', models.CharField(max_length=45)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='attendance.User')),
],
options={
'db_table': 'repos',
},
),
]
|
8,621 | dfe7f0e25f340601886334c61a50806491a4ae2b | """Tests for our `neo login` subcommand."""
import pytest
import os
from neo.libs import login
from neo.libs import utils
class TestAuth:
@pytest.mark.run(order=0)
def test_do_login(self, monkeypatch):
login.load_env_file()
username = os.environ.get('OS_USERNAME')
passwd = os.environ.get('OS_PASSWORD')
# give value to input() prompt
monkeypatch.setattr('builtins.input', lambda x: username)
monkeypatch.setattr('getpass.getpass', lambda x: passwd)
# return True is login succeed
output = login.do_login()
assert output == True
@pytest.mark.run(order=-1)
def test_do_logout(self):
login.do_logout()
# session removed if logout succeed
output = login.check_session()
assert output == False
def test_env_file(self):
assert login.check_env() == True
def test_create_env_file(self):
home = os.path.expanduser("~")
env_file = "{}/.neo.env".format(home)
env_file_tmp = "{}/.neo.tmp".format(home)
# move already existing file
os.rename(env_file, env_file_tmp)
login.create_env_file("usertest", "passwd", "1")
login.add_token("1abc")
outs = utils.read_file(env_file)
os.remove(env_file)
os.rename(env_file_tmp, env_file)
assert 'usertest' in outs
|
8,622 | a4eca0f5b7d5a03ca3600554ae3fe3b94c59fc68 | from os import environ
from process import process
from s3Service import put_object
environ['ACCESS_KEY'] = '1234567890'
environ['SECRET_KEY'] = '1234567890'
environ['ENDPOINT_URL'] = 'http://localhost:4566'
environ['REGION'] = 'us-east-1'
environ['BUCKET_GLOBAL'] = 'fl2-statement-global'
environ['BUCKET_GLOBAL_BACKUP'] = 'fl2-statement-global-bkp'
environ['BUCKET_TRANSFER'] = 'fl2-statement-transfer'
environ['BUCKET_PENDING_PROCESS'] = 'fl2-statement-pending-process'
BUCKET_GLOBAL = environ['BUCKET_GLOBAL']
# def test():
#
# file = open('EEVC.TXT', mode='rb')
# put_object(BUCKET_GLOBAL, 'EEVC.TXT', file) # OK
#
# file = open('EEVD.TXT', mode='rb')
# put_object(BUCKET_GLOBAL, 'EEVD.TXT', file) # OK
#
# file = open('EEFI.TXT', mode='rb')
# put_object(BUCKET_GLOBAL, 'EEFI.TXT', file) # OK
#
# file = open('EESA.TXT', mode='rb')
# put_object(BUCKET_GLOBAL, 'EESA.TXT', file) # OK
def execute(event, context):
print(event)
pass
# payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EEVC.TXT'}
# process(bucket=payload['Bucket'], key=payload['Key'])
#
# payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EEVD.TXT'}
# process(bucket=payload['Bucket'], key=payload['Key'])
#
# payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EEFI.TXT'}
# process(bucket=payload['Bucket'], key=payload['Key'])
#
# payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EESA.TXT'}
# process(bucket=payload['Bucket'], key=payload['Key'])
# Press the green button in the gutter to run the script.
# if __name__ == '__main__':
# test()
# execute(None, None)
|
8,623 | 09b2c1e69203f440754e82506b42e7856c94639a | from robotcar import RobotCar
import pdb
class RobotCar_Stub(RobotCar):
def forward(self):
print("Forward")
def backward(self):
print("Backward")
def left(self):
print("Left")
def right(self):
print("Right")
def stop(self):
print("Stop")
if __name__ == '__main__':
rc = RobotCar_Stub()
rc.move("fblrs")
|
8,624 | 27e66b2a03bc626d5babd804e736a4652ba030d5 | #!/usr/bin/python2
import unittest
import luna_utils as luna
import time
API_URL = "com.webos.service.videooutput/"
VERBOSE_LOG = True
SUPPORT_REGISTER = False
SINK_MAIN = "MAIN"
SINK_SUB = "SUB0"
#TODO(ekwang): If you connect SUB, HAL error occurs. Just test MAIN in the current state
#SINK_LIST = [SINK_MAIN, SINK_SUB]
SINK_LIST = [SINK_MAIN]
PID1 = "pipeline1"
PID2 = "pipeline2"
PID_LIST = [PID1, PID2]
INPUT_RECT = {'X':0, 'Y':0, 'W':1920, 'H':1080}
OUTPUT_RECT = {'X':400, 'Y':400, 'W':1920, 'H':1080}
#Choose source type VDEC or HDMI for test input
#SOURCE_NAME = SOURCE_NAME
#SOURCE_PORT = 0
SOURCE_NAME = "HDMI"
SOURCE_PORT = 3
SOURCE_WIDTH = 1920
SOURCE_HEIGHT = 1080
SLEEP_TIME = 1
class TestVideoMethods(luna.TestBase):
def vlog(self, message):
if VERBOSE_LOG:
print(message)
def setUp(self):
self.vlog("setUp")
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog("register " + pid)
luna.call(API_URL + "register", { "context": pid })
self.statusSub = luna.subscribe(API_URL + "getStatus", {"subscribe":True})
def tearDown(self):
self.vlog("tearDown")
for sink in SINK_LIST:
self.vlog("disconnect " + sink)
luna.call(API_URL + "disconnect", { "sink": sink })
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog("unregister " + pid)
luna.call(API_URL + "unregister", { "context": pid })
luna.cancelSubscribe(self.statusSub)
def connect(self, sink, source, port, pid):
self.vlog("connect " + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "connect",
{ "outputMode": "DISPLAY", "sink": sink, "source": source, "sourcePort": port },
self.statusSub,
{"video":[{"sink": sink, "connectedSource": source, "connectedSourcePort": port}]})
def mute(self, sink, blank):
self.vlog("- Mute" + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "blankVideo",
{"sink": sink, "blank": blank},
self.statusSub,
{"video":[{"sink": sink, "muted": blank}]})
def disconnect(self, sink, pid):
self.vlog("disconnect " + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "disconnect", { "sink": sink },
self.statusSub,
{"video": [{"sink": sink, "connectedSource": None}]})
def testConnectDisconnect(self):
print("[testConnectDisconnect]")
for source, ports in {"VDEC":[0,1], "HDMI":[0,1,2]}.iteritems():
for port in ports:
for sink in SINK_LIST:
for i in range(3):
self.connect(sink, source, port, "")
self.disconnect(sink, "")
def testDualConnect(self):
print("[testDualConnect]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "connect",
{"outputMode": "DISPLAY", "sink": SINK_SUB, "source": SOURCE_NAME, "sourcePort": SOURCE_PORT},
self.statusSub,
{"video": [{"sink": SINK_MAIN, "connectedSource": SOURCE_NAME, "connectedSourcePort": SOURCE_PORT},
{"sink": SINK_SUB, "connectedSource": SOURCE_NAME, "connectedSourcePort": SOURCE_PORT}]})
self.disconnect(SINK_MAIN, "")
if len(SINK_LIST) > 1:
self.disconnect(SINK_SUB, "")
def testMute(self):
print("[testMute]")
for sink in SINK_LIST:
self.connect(sink, SOURCE_NAME, SOURCE_PORT, "")
for blank in [False, True]:
self.mute(sink, blank)
#test different orders of display window and media data
def testSetDisplayWindowAndVideoData(self):
print("[testSetDisplayWindowAndVideoData]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN,
"fullScreen": False,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}},
self.statusSub,
{"video":[{"sink": "MAIN",
"fullScreen": False,
"width":0,
"height":0,
"frameRate":0,
"sourceInput": {"x":0, "y":0, "width":0, "height":0}, # no media data yet so can't determine appliedsourceInput yet
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": "MAIN",
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetVideoDataAndDisplayWindow(self):
print("[testSetVideoDataAndDisplayWindow]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":0, "height":0},
"displayOutput": {"x":0, "y":0, "width":0, "height":0}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": "MAIN",
"fullScreen": False,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetFullscreen(self):
print("[testSetFullscreen]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":0, "height":0},
"displayOutput": {"x":0, "y":0, "width":0, "height":0}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN,
"fullScreen": True,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT}},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": True,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT},
"displayOutput": {"x":0, "y":0, "width":3840, "height":2160}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetCompositing(self):
print("[testSetCompositing]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
if len(SINK_LIST) > 1:
self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setCompositing",
{"composeOrder": [{"sink":SINK_MAIN, "opacity":20, "zOrder":1},
{"sink":SINK_SUB, "opacity":31, "zOrder":0}]},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":20, "zOrder":1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN, "fullScreen":True, "opacity":130},
self.statusSub, {"video":[{"sink": SINK_MAIN, "opacity":130, "zOrder":1}]})
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":200},
self.statusSub, {"video":[{"sink": "SUB0", "opacity":200, "zOrder":0}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":230},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":130, "zOrder":0}, {"sink": "SUB0", "opacity":230, "zOrder":1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":30, "zOrder": 1},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":130, "zOrder":0}, {"sink": "SUB0", "opacity":30, "zOrder":1}]})
if __name__ == '__main__':
luna.VERBOSE = False
unittest.main()
|
8,625 | 49b007b723b9c43fb79d5dffa2546c856faf4937 | # _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
# 本文件中,用__unicode__代替了__str__,以免在admin界面中显示中文而引发错误。
# 参考:http://blog.csdn.net/jiangnanandi/article/details/3574007
# 或者另一个解决方案:http://blog.sina.com.cn/s/blog_63cf1c510101an74.html
class FatherMenu(models.Model):
title = models.CharField(u"菜单名", max_length=20)
slug = models.CharField(u"链接", max_length=100, db_index=True)
son = models.BooleanField("子菜单?", default=False)
class Meta:
verbose_name = u"一级菜单"
verbose_name_plural = u"一级菜单"
def __unicode__(self):
return self.title
class SonMenu(models.Model):
title = models.CharField(u"菜单名", max_length=20)
slug = models.CharField(u"链接", max_length=100, db_index=True)
father = models.ForeignKey(
'seclab.FatherMenu', blank=True, null=True, verbose_name=u"父菜单")
class Meta:
verbose_name = u"二级菜单"
verbose_name_plural = u"二级菜单"
def __unicode__(self):
return self.title
class Img(models.Model):
tag = models.CharField(u"类型", max_length=20)
tagId = models.IntegerField(u"序号")
intro = models.CharField(u"描述", max_length=100)
title = models.CharField(u"标题", max_length=100)
slug = models.CharField(u"链接", max_length=100, db_index=True)
class Meta:
verbose_name = u"图片"
verbose_name_plural = u"图片"
def __unicode__(self):
return self.slug
class Article(models.Model):
tag = models.CharField(u"类型", max_length=20)
title = models.CharField(u"标题", max_length=100)
content = models.TextField(u"内容", default=u'', blank=True)
author = models.CharField(u"作者", max_length=100)
pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)
home_display = models.BooleanField(u"首页显示", default=False)
class Meta:
verbose_name = u"文章"
verbose_name_plural = u"文章"
def __unicode__(self):
return self.title
|
8,626 | 45b2b611a80b93c9a7d8ec8a09e5838147e1ea76 | # Generated by Django 3.0.2 on 2020-08-27 16:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('info', '0010_auto_20200808_2117'),
]
operations = [
migrations.AddField(
model_name='profile',
name='annual_income',
field=models.CharField(blank=True, choices=[('100000', '<100000'), ('100000-300000', '100000-300000'), ('300000-600000', '300000-600000'), ('600000-1000000', '600000-1000000'), ('1000000-1500000', '1000000-1500000'), ('1500000-2000000', '1500000-2000000'), ('>2000000', '>2000000')], max_length=20, null=True),
),
migrations.AddField(
model_name='profile',
name='birthdate',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='birthplace',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='profile',
name='blood_group',
field=models.CharField(blank=True, choices=[('-A', '-A'), ('B', 'B'), ('AB', 'AB'), ('O', 'O')], max_length=10, null=True),
),
migrations.AddField(
model_name='profile',
name='body_type',
field=models.CharField(blank=True, choices=[('Fair', 'Fair'), ('Black', 'Black'), ('Brown', 'Brown')], max_length=20, null=True),
),
migrations.AddField(
model_name='profile',
name='caste',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='profile',
name='education',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='profile',
name='education_detail',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='profile',
name='height',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='maritial_status',
field=models.CharField(blank=True, choices=[('Single', 'Single'), ('Single', 'Single')], max_length=50, null=True),
),
migrations.AddField(
model_name='profile',
name='mother_tongue',
field=models.CharField(blank=True, choices=[('Assamese', 'Assamese'), ('Bengali', 'Bengali'), ('Bodo', 'Bodo'), ('Dogri', 'Dogri'), ('English', 'English'), ('Gujarati', 'Gujarati'), ('Hindi', 'Hindi'), ('Kannada', 'Kannada'), ('Kashmiri', 'Kashmiri'), ('Konkani', 'Konkani'), ('Maithili', 'Maithili'), ('Malayalam', 'Malayalam'), ('Marathi', 'Marathi'), ('Meitei (Manipuri)', 'Meitei (Manipuri)'), ('Nepali', 'Nepali'), ('Odia', 'Odia'), ('Punjabi', 'Punjabi'), ('Sanskrit', 'Sanskrit'), ('Santali', 'Santali')], max_length=30, null=True),
),
migrations.AddField(
model_name='profile',
name='navaras',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='profile',
name='occupation',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='profile',
name='religion',
field=models.CharField(blank=True, choices=[('Hinduism', 'Hinduism'), ('Islam', 'Islam'), ('Christianity', 'Christianity'), ('Sikhism', 'Sikhism'), ('Buddhism', 'Buddhism'), ('Jainism', 'Jainism'), ('Zoroastrianism', 'Zoroastrianism')], max_length=30, null=True),
),
migrations.AddField(
model_name='profile',
name='sub_caste',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='profile',
name='weight',
field=models.PositiveSmallIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='profile',
name='age',
field=models.PositiveSmallIntegerField(blank=True, null=True),
),
]
|
8,627 | 5fe4f2738285d2f4b8bbfee2c4c6d15665737ea4 | from django.urls import path
from .views import *
urlpatterns = [
path('', ListUser.as_view() , name = 'list'),
path('register/', UserRegister.as_view() , name = 'register'),
path('login/', UserLogin.as_view() , name = 'login'),
path('delete/' , UserDelete.as_view() , name ='delete'),
path('update/' , UserUpdate.as_view() , name = 'update'),
] |
8,628 | d3b6a105b14d9c3485a71058391a03c2f4aa5c10 | import pickle as pickle
import os
import pandas as pd
import torch
import numpy as np
import random
from sklearn.metrics import accuracy_score
from transformers import XLMRobertaTokenizer, XLMRobertaForSequenceClassification, Trainer, TrainingArguments, XLMRobertaConfig, ElectraForSequenceClassification, ElectraTokenizer
from load_data import *
import argparse
from importlib import import_module
from pathlib import Path
import glob
import re
# seed 고정
def seed_everything(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
# 평가를 위한 metrics function.
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
# calculate accuracy using sklearn's function
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
}
def increment_output_dir(output_path, exist_ok=False):
path = Path(output_path)
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}*")
matches = [re.search(rf"%s(\d+)" %path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m]
n = max(i) + 1 if i else 2
return f"{path}{n}"
def train(args):
seed_everything(args.seed)
# load model and tokenizer
# MODEL_NAME = "xlm-roberta-large"
# tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_NAME)
MODEL_NAME = "monologg/koelectra-base-v3-discriminator"
tokenizer = ElectraTokenizer.from_pretrained(MODEL_NAME)
# load dataset
train_dataset = load_data("/opt/ml/input/data/train/train.tsv")
#dev_dataset = load_data("./dataset/train/train_dev.tsv")
train_label = train_dataset['label'].values
#dev_label = dev_dataset['label'].values
# tokenizing dataset
tokenized_train = ko_tokenized_dataset(train_dataset, tokenizer)
#tokenized_dev = tokenized_dataset(dev_dataset, tokenizer)
# make dataset for pytorch.
RE_train_dataset = RE_Dataset(tokenized_train, train_label)
#RE_dev_dataset = RE_Dataset(tokenized_dev, dev_label)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# setting model hyperparameter
# bert_config = XLMRobertaConfig.from_pretrained(MODEL_NAME)
# bert_config.num_labels = 42
# model = XLMRobertaForSequenceClassification.from_pretrained(MODEL_NAME, config=bert_config)
# model.resize_token_embeddings(len(tokenizer))
config_module = getattr(import_module("transformers"), "ElectraConfig")
model_config = config_module.from_pretrained(MODEL_NAME)
model_config.num_labels = 42
model = ElectraForSequenceClassification.from_pretrained(MODEL_NAME, config=model_config)
model.resize_token_embeddings(len(tokenizer))
model.parameters
model.to(device)
output_dir = increment_output_dir(args.output_dir)
# 사용한 option 외에도 다양한 option들이 있습니다.
# https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments 참고해주세요.
training_args = TrainingArguments(
output_dir=output_dir, # output directory
save_total_limit=args.save_total_limit, # number of total save model.
num_train_epochs=args.epochs, # total number of training epochs
learning_rate=args.lr, # learning_rate
per_device_train_batch_size=args.batch_size, # batch size per device during training
warmup_steps=args.warmup_steps, # number of warmup steps for learning rate scheduler
weight_decay=args.weight_decay, # strength of weight decay
logging_dir='./logs', # directory for storing logs
logging_steps=100, # log saving step.
save_steps=100,
dataloader_num_workers=4,
label_smoothing_factor=args.label_smoothing_factor,
)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=RE_train_dataset, # training dataset
compute_metrics = compute_metrics,
)
# train model
trainer.train()
def main(args):
train(args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=142)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--lr', type=float, default=1e-5)
parser.add_argument('--weight_decay', type=float, default=0.01)
parser.add_argument('--warmup_steps', type=int, default=300) # number of warmup steps for learning rate scheduler
parser.add_argument('--output_dir', type=str, default='./results/expr')
parser.add_argument('--save_steps', type=int, default=100)
parser.add_argument('--save_total_limit', type=int, default=1)
parser.add_argument('--logging_steps', type=int, default=100)
parser.add_argument('--logging_dir', type=str, default='./logs') # directory for storing logs
parser.add_argument('--label_smoothing_factor', type=float, default=0.5) # directory for storing logs
args = parser.parse_args()
main(args) |
8,629 | 5f56838ad0717c4f7a2da6b53f586a88b0166113 | from django.urls import path
from . import apiviews
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('contacts', apiviews.ContactsView.as_view(), name='contacts'),
path('contact/<int:pk>', apiviews.ContactView.as_view(), name='contact'),
path('signup', apiviews.create_user_with_token, name='signup'),
path('signin', apiviews.signin, name='signin'),
path('signout', apiviews.sign_out, name='signout'),
path('api-token-auth/', obtain_auth_token, name='api_token_auth')
]
|
8,630 | fa5cbbd03641d2937e4502ce459d64d20b5ee227 |
import matplotlib.pyplot as plt
import numpy as np
from tti_explorer.contacts import he_infection_profile
plt.style.use('default')
loc = 0
# taken from He et al
gamma_params = {
'a': 2.11,
'loc': loc,
'scale': 1/0.69
}
t = 10
days = np.arange(t)
mass = he_infection_profile(t, gamma_params)
fig, ax = plt.subplots(1, figsize=(9*0.8, 5*0.8))
xaxis = np.linspace(-2, t, 1000)
ax.bar(
np.arange(5)+0.1,
[1/5, 1/5, 1/5, 1/5, 1/5],
label="Kucharski profile",
align="edge",
color="C1",
zorder=1,
alpha=0.6
)
ax.bar(days, mass, label="Discretised", align="edge", zorder=1)
ax.legend(loc="upper right")
ax.set_axis_on()
ax.set_ylabel('Secondary attack profile')
ax.set_xlabel('Days since start of infectious period')
ax.set_xticks(days)
plt.show()
# fig.savefig('./charts/inf_profile.pdf')
|
8,631 | 9a62a57f6d9af7ef09c8ed6e78a100df7978da6e | ID = '113'
TITLE = 'Path Sum II'
DIFFICULTY = 'Medium'
URL = 'https://oj.leetcode.com/problems/path-sum-ii/'
BOOK = False
PROBLEM = r"""Given a binary tree and a sum, find all root-to-leaf paths where each path's
sum equals the given sum.
For example:
Given the below binary tree and `sum = 22`,
5
/ \
4 8
/ / \
11 13 4
/ \ / \
7 2 5 1
return
[
[5,4,11,2],
[5,8,4,5]
]
"""
|
8,632 | 492c416becc44deaafef519eae8c9a82ac00cc0e | #!/usr/bin/python
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
ledPin = 4
pinOn = False
GPIO.setup(ledPin, GPIO.OUT)
GPIO.output(ledPin, GPIO.LOW)
def print_pin_status(pin_number):
GPIO.setup(pin_number, GPIO.IN)
value = GPIO.input(pin_number)
print(f'Current Value of {pin_number} is {value}')
GPIO.setup(pin_number, GPIO.OUT)
while True:
print_pin_status(ledPin)
key = input("Action, press q to quit: ")
print(key)
if key == ' ':
print("space pushed")
if key == '1':
if pinOn:
print("turning led off")
GPIO.output(ledPin, GPIO.LOW)
pinOn = False
else:
print("turning led on")
GPIO.output(ledPin, GPIO.HIGH)
pinOn = True
if key == 'q':
print("Quiting. . .")
break
|
8,633 | 61232ec951cf378798220c00280ef2d351088d06 | import random
#liste de choix possibles
liste = ["rock", "paper", "scissors"]
#si le joueur veut jouer il répond y
answer = "y"
while answer == "y":
#choix du joueur
user_choice = input("rock,paper,scissors ?")
#verifie si le joueur a mis la réponse correcte
if user_choice in liste :
#choix de l'ordinateur
prog = random.choice(liste)
print("computer's choice :", prog)
if prog == "rock" :
if user_choice == "paper" :
print("you won") #gagné
elif user_choice == "scissors" :
print("you lost") #perdu
if prog == "paper" :
if user_choice == "scissors" :
print("you won")
elif user_choice == "rock" :
print("you lost")
if prog == "scissors" :
if user_choice == "rock" :
print("you won")
elif user_choice == "paper" :
print("you lost")
else :
print("not the correct answer")
#demande si le joueur veut jouer
answer = input("play again ? write y")
|
8,634 | bf7676dc2c47d9cd2f1ce2d436202ae2c5061265 | from .base import GnuRecipe
class CAresRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(CAresRecipe, self).__init__(*args, **kwargs)
self.sha256 = '45d3c1fd29263ceec2afc8ff9cd06d5f' \
'8f889636eb4e80ce3cc7f0eaf7aadc6e'
self.name = 'c-ares'
self.version = '1.14.0'
self.url = 'https://c-ares.haxx.se/download/$name-$version.tar.gz'
|
8,635 | f28222625e28939b34b1b5c21d28dbf9c49c6374 | import knn
datingDataMat,datingLabels = knn.file2matrix('datingTestSet2.txt')
normMat,ranges,minVals = knn.autoNorm(datingDataMat)
print normMat
print ranges
print minVals |
8,636 | e01b1f57a572571619d6c0981370030dc6105fd2 | import urllib.request
import urllib.parse
import json
content = input("请输入需要翻译的内容:")
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
data = {}
data['action'] = 'FY_BY_CLICKBUTTION'
data['bv'] = '1ca13a5465c2ab126e616ee8d6720cc3'
data['client'] = 'fanyideskweb'
data['doctype'] = 'json'
data['from'] = 'AUTO'
data['i'] = content
data['keyfrom'] = 'fanyi.web'
data['salt'] = '15708737847078'
data['sign'] = '64037c1dd211ea7bd98321a3bd8ab45a'
data['smartresult'] = 'dict'
data['to'] = 'AUTO'
data['ts'] = '1570873784707'
data['version'] = '2.1'
data = urllib.parse.urlencode(data).encode('utf-8')
response = urllib.request.urlopen(url,data)
html = response.read().decode('utf-8')
target = json.loads(html)
print("翻译结果:%s" % (target['translateResult'][0][0]['tgt']))
|
8,637 | c34ff2bbb0ba743268ace77c110ce0b283a25eba | f=open('p102_triangles.txt')
def cross(a,b,c):
t1=b[0]-a[0]
t2=b[1]-a[1]
t3=c[0]-a[0]
t4=c[1]-a[1]
return t1*t4-t2*t3
x=[0,0]
y=[0,0]
z=[0,0]
origin=(0,0)
ans=0
for i in f.xreadlines():
x[0],x[1],y[0],y[1],z[0],z[1]=map(int,i.split(','))
area1=abs(cross(x,y,z))
area2=abs(cross(x,y,origin))+abs(cross(y,z,origin))+abs(cross(z,x,origin))
if area1==area2:
ans+=1
print ans
|
8,638 | 47587cce572807922344523d8c5fefb09552fe34 | import urllib, json
from PyQt4.QtCore import QRectF, Qt
from PyQt4.Qt import QPrinter, QPainter, QFont, QBrush, QColor, QPen, QImage
from PyQt4.QtGui import QApplication
# bkgimg = QImage()
# bkgimg.load("KosyMost.jpg", format = "jpg")
#
# print bkgimg
# exit()
def background(painter, bkgimg):
maxx = painter.device().width()
maxy = painter.device().height()
rimg = QRectF(0,0,maxx,maxy*.9)
#
painter.fillRect(0,0,maxx, maxy, QBrush(Qt.red, Qt.SolidPattern))
painter.drawImage(rimg, bkgimg)
wwh = QColor(255,255,255,128)
painter.fillRect(0,2*maxy/10,maxx, 4*maxy/10, QBrush(wwh, Qt.SolidPattern))
u = QRectF(0,9*maxy/10,maxx,maxy/10)
penHText = QPen(Qt.white);
painter.setPen(penHText);
painter.setFont(QFont("Arial", 16, italic=True));
painter.drawText(u, Qt.AlignLeft | Qt.TextIncludeTrailingSpaces | Qt.AlignVCenter , " ekskursja.pl/flashcards")
# painter.drawLine(0,0,maxx,maxy)
# painter.drawLine(0,maxy,maxx,0)
# proxies = {'http': 'http://126.179.0.206:9090' }
headers = {'User-Agent':'MultiFlashcards/fcset.py 0.1'}
url = 'http://ekskursja.pl/wp-content/plugins/flashcards/flashcards.json.php?name=contigo&id=29072'
print url
# response = urllib.urlopen(url, proxies=proxies)
response = urllib.urlopen(url)
data = json.loads(response.read())
app = QApplication([])
printer = QPrinter(QPrinter.HighResolution);
printer.setOutputFormat(QPrinter.PdfFormat);
printer.setPageSize(QPrinter.A6);
printer.setOrientation(QPrinter.Landscape);
printer.setPageMargins (0,0,0,0, QPrinter.Millimeter);
printer.setFullPage(False);
bkgimg = QImage()
if not bkgimg.load("KosyMost.png", format = "png"):
print "Not loaded"
printer.setOutputFileName("contigo.pdf");
painter = QPainter(printer)
maxx = painter.device().width()
maxy = painter.device().height()
print "Wymiary: %d,%d" % (maxx, maxy)
q = QRectF(0,2*maxy/10,maxx,2*maxy/10)
a = QRectF(0,4*maxy/10,maxx,2*maxy/10)
penHText = QPen(QColor("#c60b1e"));
for qa in data['flashcards']:
print "%s -> %s" % (qa['q'], qa['a'][0])
# painter.drawText(painter.device().width()/2, 500, qa['q'])
background(painter, bkgimg)
painter.setPen(penHText);
painter.setFont(QFont("Arial", 24, QFont.Bold));
painter.drawText(q, Qt.AlignCenter, qa['q'])
printer.newPage()
background(painter, bkgimg)
painter.setPen(penHText);
painter.setFont(QFont("Arial", 24, QFont.Bold));
painter.drawText(q, Qt.AlignCenter | Qt.TextWordWrap, qa['q'])
painter.drawText(a, Qt.AlignCenter | Qt.TextWordWrap, qa['a'][0])
printer.newPage()
painter.end()
|
8,639 | 75833617996549167fa157ff78cc1a11f870784f | import os
import sys
import glob
import shutil
import json
import codecs
from collections import OrderedDict
def getRegionClass(image_path, data_id, imgName):
region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect', 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']
label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp', 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']
select_class = None
for class_id in range(len(region_class)):
cur_class = region_class[class_id]
cur_label_class = label_class[class_id]
check_file_name = os.path.join(image_path, data_id, cur_class, imgName)
if os.path.isfile(check_file_name):
select_class = cur_label_class
#print check_file_name
break
return select_class
def add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir):
if not os.path.exists(dst_json_dir):
os.makedirs(dst_json_dir)
smoke_hand_num, smoke_nohand_num, smoke_hard_num = 0, 0, 0
nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num = 0, 0, 0, 0
for json_file_name in glob.glob(org_json_dir + '/*.json'):
json_file = open(json_file_name, 'r')
base_file_id = os.path.basename(json_file_name)[:-5]
print(base_file_id + '.json')
json_lines = json_file.read().splitlines()
dst_json_lines = []
new_json_file = codecs.open(dst_json_dir + '/' + base_file_id + '.json', "w", "utf-8")
new_json_file.close()
new_json_file = codecs.open(dst_json_dir + '/' + base_file_id + '.json', "a+", 'utf-8')
for line in json_lines:
if line[0] == '#':
new_json_file.write(line + '\n')
continue
js = json.loads(line, object_pairs_hook=OrderedDict)
#new_js_line = json.dumps(js) + "\n"
#new_json_file.write(new_js_line)
#continue
imgName = js["image_key"]
select_class = getRegionClass(done_root_dir, base_file_id, imgName)
if select_class == None:
new_json_file.write(line + '\n') #
#print('Not Found: ', done_root_dir, base_file_id, imgName)
continue
#print select_class
new_common_box = {}
new_attrs = {}
new_attrs['ignore'] = 'no'
new_attrs['type'] = 'smoke_region'
new_attrs['class'] = select_class
new_common_box['attrs'] = new_attrs
if select_class == 'smoke_hard':
new_attrs['ignore'] = 'yes'
# statistic
if select_class == 'smoke_hand':
smoke_hand_num += 1
elif select_class == 'smoke_nohand':
smoke_nohand_num += 1
elif select_class == 'smoke_hard':
smoke_hard_num += 1
elif select_class == 'nosmoke_bg':
nosmoke_bg_num += 1
elif select_class == 'nosmoke_face':
nosmoke_face_num += 1
elif select_class == 'nosmoke_susp':
nosmoke_susp_num += 1
elif select_class == 'nosmoke_cover':
nosmoke_cover_num += 1
else:
print('Invalid smoke class.', select_class)
# common box, like phone, hand
if 'common_box' in js:
js['common_box'].append(new_common_box)
else:
js['common_box'] = [new_common_box]
new_js_line = json.dumps(js) + "\n"
new_json_file.write(new_js_line)
new_json_file.close()
print('write ' + base_file_id + '.json')
print('add_common_box_smoke_region done.')
print('smoke_hand:%d, smoke_nohand:%d, smoke_hard:%d'%(smoke_hand_num, smoke_nohand_num, smoke_hard_num))
print('nosmoke_bg:%d, nosmoke_face:%d, nosmoke_susp:%d, nosmoke_cover:%d'%(nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num))
if __name__ == '__main__':
if len(sys.argv) < 2:
print('useage: add_common_box_smoke_region.py org_json_dir dst_json_dir done_root_dir')
exit()
org_json_dir = sys.argv[1]
dst_json_dir = sys.argv[2]
done_root_dir = sys.argv[3]
add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir)
|
8,640 | 894d8d00fd05bf8648f1b95ecf30b70e7b4e841b | #Copyright [2017] [Mauro Riva <lemariva@mail.com> <lemariva.com>]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
import math as m
import utime
from machine import ADC
from ws2812 import WS2812
class vu_meter:
ledsColors = []
def __init__(self, ledNumber=144, ledPower = 100, adcWindow = 1500, adcMax = 100, adcPin = 'P13', pinLEDs = 'P22'):
self.ledPower = ledPower
self.ledNumber = ledNumber
self.pinLeds = pinLEDs
self.adcPin = adcPin
self.adcWindow = adcWindow
self.ledsColors = []
self.adcIn = 0.0
self.adcMax = adcMax
self.adcMaxDynamic = False
# inizialize ADC
self.init_adc()
self.init_leds()
def init_adc(self):
self.adc = ADC(0)
self.adcUnit = self.adc.channel(pin=self.adcPin)
self.adcMean = 0
def init_leds(self):
self.ledsColors = []
for x in range(0, self.ledNumber):
color = self.color_vu_meter (x)
self.ledsColors.append(color)
self.ledChain = WS2812( ledNumber=self.ledNumber, brightness=self.ledPower, dataPin=self.pinLeds ) # dataPin is for LoPy board only
self.ledChain.show( self.ledsColors )
def test_leds(self):
testData = self.ledsColors
for x in range(0, self.ledNumber):
testData = testData[1:] + testData[0:1]
self.ledChain.show( testData )
self.ledChain.show([])
def lighter(self, color, percent):
percent = percent / 100
if(percent == 1):
return color
if(percent == 0):
return ([0, 0, 0])
#if(percent < 0.65): # driver not working ok with percent under 0.65
# percent = 0.65
rcolor = color[0] - color[0] * (1-percent)
gcolor = color[1] - color[1] * (1-percent)
bcolor = color[2] - color[2] * (1-percent)
newcolor = ([(rcolor), (gcolor), (bcolor)])
return newcolor
def color_vu_meter(self, position):
rcolor = (255 * position) / self.ledNumber
gcolor = (255 * (self.ledNumber - position)) / self.ledNumber
bcolor= 0
newcolor = self.lighter([(rcolor), (gcolor), (bcolor)], self.ledPower)
return newcolor
def adc_max_dynamic(self, state = True, adcMax = 100):
self.adcMaxDynamic = state
self.adcMax = adcMax
return self.adcMaxDynamic
def adc_max(self):
return self.adcMax
def zero_calibration(self):
self.adcMean = 0
for y in range(0, self.adcWindow):
self.adcMean = self.adcMean + self.adcUnit.value()
self.adcMean = self.adcMean / self.adcWindow
return self.adcMean
def update_rms(self):
t1 = utime.ticks_ms()
power = 0
self.audioPower = 0
for x in range(0, self.adcWindow):
adc_value = self.adcUnit.value() - self.adcMean
power = power + m.pow(adc_value, 2)
power = (m.sqrt(power / self.adcWindow))
self.audioPower = power
t2 = utime.ticks_ms()
time_elapsed = t2 - t1
if(self.adcMaxDynamic):
if(self.adcMax < power):
self.adcMax = power
self.normalizedPower = power / self.adcMax
#20 * log10(sqrt(sum / count))
if(self.normalizedPower > 1):
self.normalizedPower = 1
return [time_elapsed, power]
def update_leds(self):
leds_count = m.floor(self.normalizedPower * self.ledNumber)
self.ledChain.show( self.ledsColors[1:leds_count] )
|
8,641 | 8f17c1ed0cb273a88b986cd7fe7a45439211d536 | ### Global parameters ###
seconds_per_unit_time = 0.01
#########################
pars_spont = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"N": 50,
"w_max": 0.05,
"mu": 0.07,
"seed": None,
"tend": 50_000_000,
"r_in": 0.04,
"w_in": 0.05,
"init_W": "random",
"init_scale": 0.2,
}
pars_avg_dw = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"N": 50,
"w_max": 0.05,
"mu": 0.07,
"seed": None,
"tend": 50_000_000,
"init_W": None,
}
pars_learn = {
"tau_p": 3.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.065,
"rho": 0.0015,
"rho_ext": 0.0418,
"N": 81,
"w_max": 0.026,
"w_ext": 0.26,
"mu": 0.07,
"seed": None,
"assembly_size": 20,
"inputs": 1,
"t_ON": 18_000,
"t_OFF": 10_000_000,
"init_W": "random",
"init_scale": 0.1,
}
pars_drift = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.002,
"N": 72,
"w_max": 0.056,
"mu": 0.148,
"seed": None,
"T1": 50_000_000,
"T2": 50_000_000,
"init_W": "random",
"init_scale": 0.25,
}
pars_drift2 = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"rho_small": 0.0003,
"N": 120,
"w_max": 0.024,
"mu": 0.05,
"seed": None,
"t_switch": 30_000_000,
"p_switch": 0.03,
"init_W": "assemblies",
"num_assemblies": 6,
"assembly_size": 20,
}
pars_sizes = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"N": 150,
"mu": 0.04,
"seed": None,
"tend": 150_000_000,
"init_W": "random",
"init_scale": 0.2,
}
pars_intertwined = {
"seconds_per_unit_time": 0.01,
"tau_p": 2.6,
"tau_d": 6.5,
"amp_p": 0.08,
"amp_d": -0.042,
"rho": 0.0015,
"w_max": 0.018,
"N": 190,
"num_assemblies": 20,
"swaps": 0,
"mu": 0.017,
"seed": None,
"t_eq": 20_000_000,
"n_sims": 900,
"t_sim": 100_000,
"init_W": "intertwined",
}
pars_avg_dw = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"N": 50,
"w_max": 0.05,
"mu": 0.07,
"seed": None,
"tend": 50_000_000,
"init_W": None,
}
pars_overlap = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"rho_small": 0.0001,
"N": 60,
"w_max": 0.024,
"mu": 0.045,
"seed": None,
"t_end": 100_000_000,
"init_W": "assemblies",
"num_assemblies": 3,
"assembly_size": 20,
}
pars_sparse = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"N": 50,
"w_max": 0.05,
"mu": 0.07,
"seed": None,
"tend": 20_000_000,
"init_W": None,
"density": 0.8,
}
pars_input_strength = {
"tau_p": 3.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.066,
"rho": 0.0015,
"N": 50,
"N_target": 20,
"w_max": 0.026,
"mu": 0.01,
"seed": None,
"r_in": 0.04,
"w_in": 0.05,
"init_W": None,
}
|
8,642 | 7b5a16fdc536eb4ae3fdc08f827663613560187a | import subprocess
from whoosh.index import create_in
from whoosh.fields import *
import os
import codecs
from whoosh.qparser import QueryParser
import whoosh.index as index
import json
from autosub.autosub import autosub
from azure.storage.blob import AppendBlobService
vedio_formats = ['mp4','avi','wmv','mov'] # 1
audio_formats = ['wav','flac','mp3','aiff'] # 2
def file_upload(file_pwd, append_blob_service):
regex = r"(.+)\/(.+)"
if re.search(regex, file_pwd):
match = re.search(regex, file_pwd)
file_dir = match.group(1) + '/'
file_name_and_type = match.group(2).lower()
else:
raise fileNameError('fileNameError')
regex = r"(.+)\.(.+)"
if re.search(regex, file_name_and_type):
match = re.search(regex, file_name_and_type)
file_name = match.group(1)
file_type = match.group(2).lower()
else:
raise fileNameError('fileNameError')
transcript = autosub(file_pwd, format="json")
print "Generated data structure: \n"
print(file_name_and_type)
whoosh_indexing(file_name_and_type,file_pwd,transcript, append_blob_service)
return transcript
# def autosubing(file_pwd,transcripts_timed_pwd,file_type):
# if not os.path.isfile(transcripts_timed_pwd):
# if file_format(file_type) == 1:
# # command = "python ./autosub/autosub.py -F json -V %s" %(file_pwd)
# # command = "python ./autosub/autosub.py %s -F json" %(file_pwd)
# autosub(file_pwd, format="json")
# elif file_format(file_type) == 2:
# # command = "python ./autosub/autosub.py %s -F json" %(file_pwd)
# autosub(file_pwd, format="json")
# else:
# autosub(file_pwd, format="json")
# print "Autosubed"
# else:
# print 'file has already been autosubed'
def whoosh_indexing(file_name,file_pwd,transcript, append_blob_service):
transcripts_timed = json.loads(transcript)
transcripts_content = ''
for i in transcripts_timed:
transcripts_content = transcripts_content + ' ' + i['content']
# Whoosh the search engine
schema = Schema(title=TEXT(stored=True), path=ID(stored=True), content=TEXT)
if not os.path.exists("temp_index"):
os.mkdir("temp_index")
#ix = index.create_in("temp_index", schema)
ix = index.open_dir("temp_index")
writer = ix.writer()
writer.update_document(title=file_name.decode('utf-8'), path=file_pwd.decode('utf-8'), content=transcripts_content.decode('utf-8'))
writer.commit()
# for filename in os.listdir('temp_index'):
# root, ext = os.path.splitext(filename)
# if root.startswith('MAIN_') and ext == '.seg':
# file = filename
# print(os.path.join('temp_index', file))
# append_blob_service.create_blob('search-file', file)
# append_blob_service.append_blob_from_path(
# 'search-file',
# file,
# os.path.join('temp_index', file)
# )
print("Written")
# throw formatError
def file_format(file_type):
if file_type in vedio_formats:
return 1;
elif file_type in audio_formats:
return 2
else:
return 3
|
8,643 | b1b9840fabc96c901e5ed45e22ee63af2f3550cb |
from os import listdir
from os.path import isfile, join
import sys
cat_list = dict();
def onImport():
mypath = "../../data/roget_processed";
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))];
for f_name in onlyfiles:
f_temp = open(mypath + "/" + f_name);
f_lines = f_temp.readlines();
for line in f_lines:
parts = line.strip().split(",");
if parts[0] in cat_list:
cat_list[parts[0]].add(parts[1]);
else:
cat_list[parts[0]] = {parts[1]};
def getClass(in_str):
if in_str in cat_list:
return list(cat_list[in_str]);
else:
return [];
#print(cat_list);
#print("The categories of \"" + sys.argv[1] + "\" are: " + str(cat_list[sys.argv[1]]));
if __name__ != "__main__":
onImport();
|
8,644 | 85bc304c69dac8bb570f920f9f12f558f4844c49 | listtuple = [(1,2), (2,3), (3,4), (4,5)]
dictn = dict(listtuple)
print(dictn) |
8,645 | dce6ef64cf1a758ed25e11f626ce31206d18f960 | import os
from matplotlib import pyplot as plt
from matplotlib import colors
import numpy as np
class figure:
def __init__(self, dire, dpi, span, data, CIM,
learn_loss=None, eval_loss=None, different_dir_app=True, reference_steps=0, reveal_trend=1):
self.dire = self.new_num_directory(dire)
self.app_dire = [self.make_num_directory("app", i) for i in range(data.app_num)]
self.trend_dire = [self.make_num_directory("trend", i) for i in range(len(data.trend_rule.w))]
self.dpi = dpi
self.span = span
self.app = data.apps
self.trend_rule = data.trend_rule
self.prediction = CIM.prediction
self.prediction_e = CIM.prediction_est_rule
self.prediction_only_ci = CIM.prediction_only_ci
self.predfail_app_num = CIM.predfail_app_num
self.cap_rule_num = CIM.cap_rule_num
self.add_rule_num = CIM.add_rule_num
self.lost_rule_num = CIM.lost_rule_num
self.useless_rule_num = CIM.useless_rule_num
self.merge_rule_num = CIM.merge_rule_num
self.learn_loss = learn_loss
self.eval_loss = eval_loss
self.diff_dir = different_dir_app
self.reference_steps = reference_steps
self.reveal_trend = reveal_trend
def new_num_directory(self, path):
n = 1
while True:
if not os.path.exists(path + "_" + str(n)):
os.mkdir(path + "_" + str(n))
break
else:
n += 1
return path + "_" + str(n) + "/"
def make_num_directory(self, name, num):
os.mkdir(self.dire + "/" + name + "_" + str(num))
return self.dire + "/" + name + "_" + str(num) + "/"
def find_min_max(self, data_list, length, standarize_zero=True):
if standarize_zero:
min = 0
max = 0
else:
min = data_list[0][0]
max = data_list[0][0]
for data in data_list:
for j in range(length):
if j < len(data):
if data[j] < min:
min = data[j]
if data[j] > max:
max = data[j]
return min, max
def savefig_result(self, name):
x = list(range(self.span))
if self.diff_dir:
# トレンドルールごとの色(chosenRuleより)
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
min, max = self.find_min_max([self.prediction[i], self.prediction_e[i]], self.span)
plt.figure(figsize=(len(x) / 10, 5.5))
# (chosenRuleより)
for j in range(len(self.trend_rule.w)):
plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, max * 1.1 + 0.1],
[min * 1.1 - 0.1, min * 1.1 - 0.1],
facecolor=cycle_tr[j], alpha=0.2,
label="Chosenrule:" + str(j))
for j in range(self.span):
plt.fill_between([j - 0.5, j + 0.5], [max*1.1+0.1, max*1.1+0.1], [min*1.1-0.1, min*1.1-0.1],
facecolor=cycle_tr[self.app[i].trend_idx[j]], alpha=0.2)
plt.plot(x, app.trend, label="trend", linestyle="dotted", color="black")
plt.plot(x[self.reference_steps:], self.prediction[i],
label="LSTM pred", linestyle="dotted", color="blue")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.prediction_e[i],
label="CIM pred", color="orange")
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.learn_loss[i], alpha=0.3,
label="learn loss")
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.eval_loss[i], alpha=0.3, marker="X",
label="eval loss")
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.app_dire[i] + name + ".png", dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x)/10, 5.5))
# アプリごとの色
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
plt.plot(x, self.app[i].trend, color=cycle_app[i], label="trend (app:" + str(i) + ")", linestyle="dotted")
plt.plot(x[self.reference_steps:], self.prediction[i], color=cycle_app[i], label="pred (app:" + str(i) + ")")
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.learn_loss[i], color=cycle_app[i], alpha=0.3,
label="learn loss (app:" + str(i) + ")")
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.eval_loss[i], color=cycle_app[i], alpha=0.3, marker="X",
label="evalu loss (app:" + str(i) + ")")
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_ruleweight(self, name):
x = list(range(self.span))
if self.diff_dir:
# 特徴ごとの色
if len(self.trend_rule.w[0]["value"]) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]["value"]) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
for i in range(len(self.trend_rule.w)):
plt.figure(figsize=(len(x) / 10, 5.5))
# 特徴毎に
for j in range(len(self.trend_rule.w[i]["value"])):
plt.plot(x, self.trend_rule.w[i]["value"][j][:-1], color=cycle_ft[j], label="feature:" + str(j))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.trend_dire[i] + name + ".png", dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x)/10, 5.5))
# トレンドルールごとの色
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
# 特徴ごとの色
if len(self.trend_rule.w[0]["value"]) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]["value"]) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
width = 0.8 / len(self.trend_rule.w[0]["value"])
#トレンドルール毎に
for i in range(len(self.trend_rule.w)):
bottom = np.array(- i * 2.0)
# 特徴毎に
for j in range(len(self.trend_rule.w[i]["value"])):
if i == 0:
plt.bar(x + np.array([width * float(j)] * len(x)), self.trend_rule.w[i][j][:-1],
color=cycle_ft[j], align='edge', bottom=bottom, width=width, label="feature:" + str(j))
else:
plt.bar(x + np.array([width * float(j)] * len(x)), self.trend_rule.w[i]["value"][j][:-1],
color=cycle_ft[j], align='edge', bottom=bottom, width=width)
plt.fill_between(list(range(self.span+1)), [- i * 2.0 + 1] * (len(x)+1), [- (i+1) * 2.0 + 1] * (len(x)+1),
facecolor=cycle_tr[i], alpha=0.2, label="trendrule:" + str(i))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_chosenrule(self, name):
x = list(range(self.span))
if self.diff_dir:
pass # savefig_resultに統合
else:
plt.figure(figsize=(len(x)/10, 5.5))
# アプリごとの色
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
# トレンドルールごとの色
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
# 凡例表示用
for i in range(len(self.trend_rule.w)):
plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s=1, marker="D",
label="trendrule:" + str(i))
for id in range(len(self.app)):
colorArr = []
for i in self.app[id].trend_idx:
colorArr.append(cycle_tr[i])
plt.scatter(x, np.array([- id] * len(x)), color=cycle_app[id], s=150, label="app:" + str(id))
plt.scatter(x, np.array([- id] * len(x)), color="w", s=70)
plt.scatter(x, np.array([- id] * len(x)), color=colorArr, s=15, marker="D", alpha=0.5)
plt.xlabel('シーズン')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_compare_prediction(self, name):
x = list(range(self.span))
if self.diff_dir:
for i in range(len(self.app)):
plt.figure(figsize=(len(x) / 10, 5.5))
# *************************(変更してください)
plt.plot(x[self.reference_steps + self.reveal_trend:],
np.abs(np.array(self.prediction_only_ci[i]) - np.array(self.app[i].trend[self.reference_steps + self.reveal_trend:])),
label="only CI loss", linestyle="dotted", color="green")
plt.plot(x[self.reference_steps:],
np.abs(np.array(self.prediction[i]) - np.array(self.app[i].trend[self.reference_steps:])),
label="LSTM loss", linestyle="dotted", color="blue")
plt.plot(x[self.reference_steps + self.reveal_trend:],
np.abs(np.array(self.prediction_e[i]) - np.array(self.app[i].trend[self.reference_steps + self.reveal_trend:])),
label="CIM loss", color="orange")
plt.xlabel('season')
plt.ylabel('prediction loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.app_dire[i] + name + ".png", dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x)/10, 5.5))
# アプリごとの色
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
for id in range(len(self.app)):
plt.plot(x[self.reference_steps:], np.abs(np.array(self.prediction[id]) - np.array(self.app[id].trend[self.reference_steps:])),
color=cycle_app[id], label="classify loss (app:" + str(id) + ")", linestyle="dotted")
plt.plot(x[self.reference_steps + self.reveal_trend:], np.abs(np.array(self.prediction_e[id]) - np.array(self.app[id].trend[self.reference_steps + self.reveal_trend:])),
color=cycle_app[id], label="analyse loss (app:" + str(id) + ")")
plt.xlabel('season')
plt.ylabel('prediction loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_compare_prediction_ave(self, name):
x = list(range(self.span))
if self.diff_dir:
prediction = []
prediction_e = []
prediction_ci = []
# 各アプリに対して平均を算出
for j in range(self.span - self.reference_steps):
sum = 0
sum_e = 0
sum_ci = 0
for i in range(len(self.app)):
sum += (self.prediction[i][j] - self.app[i].trend[j + self.reference_steps])**2
if j < self.span - self.reference_steps - self.reveal_trend:
sum_e += (self.prediction_e[i][j] - self.app[i].trend[j + self.reference_steps + self.reveal_trend])**2
sum_ci += (self.prediction_e[i][j] - self.app[i].trend[j + self.reference_steps + self.reveal_trend])**2
prediction.append(sum / len(self.app))
if j < self.span - self.reference_steps - self.reveal_trend:
prediction_e.append(sum_e / len(self.app))
prediction_ci.append(sum_ci / len(self.app))
plt.figure(figsize=(len(x) / 10, 5.5))
plt.xlabel('season')
plt.ylabel('prediction loss average')
# *************************(変更してください)
plt.plot(x[self.reference_steps + self.reveal_trend:], prediction_ci,
label="only CI loss", linestyle="dotted")
plt.plot(x[self.reference_steps:], prediction, label="LSTM loss", linestyle="dotted")
plt.plot(x[self.reference_steps + self.reveal_trend:], prediction_e, label="CIM loss")
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
def savefig_rule_num(self, name):
x = list(range(self.span))
plt.figure(figsize=(len(x)/10, 5.5))
chart_num = 6
width = 0.8 / chart_num
plt.plot(x[self.reference_steps + self.reveal_trend:], self.predfail_app_num, label="truth rule number")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.predfail_app_num, label="prediction fail app")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.cap_rule_num, label="captured rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.add_rule_num, label="add rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.lost_rule_num, label="lost rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.useless_rule_num, label="useless rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.merge_rule_num, label="merge rule")
plt.xlabel('season')
plt.ylabel('number')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def save_config(self, name, cfg):
import json
setting = dict(
APP_NUM = cfg.APP_NUM,
SPAN = cfg.SPAN,
REVEAL_TREND = cfg.REVEAL_TREND,
FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,
SHIFT_TREND_RULE = cfg.SHIFT_TREND_RULE,
APPEAR_RATE = cfg.APPEAR_RATE,
DISAPPEAR_RATE = cfg.DISAPPEAR_RATE,
EVALUATE_THRESHOLD_PRED_FAIL = cfg.EVALUATE_THRESHOLD_PRED_FAIL,
SAMPLING = cfg.SAMPLING,
EVALUATE_THRESHOLD_DELETE_RULE = cfg.EVALUATE_THRESHOLD_DELETE_RULE,
EVALUATE_THRESHOLD_ADD_RULE = cfg.EVALUATE_THRESHOLD_ADD_RULE,
EVALUATE_THRESHOLD_MERGE_RULE = cfg.EVALUATE_THRESHOLD_MERGE_RULE,
THRESHOLD_APPNUM = cfg.THRESHOLD_APPNUM,
TRY_NEWRULE_NUM = cfg.TRY_NEWRULE_NUM,
LSTM_REFERENCE_STEPS = cfg.LSTM_REFERENCE_STEPS,
LSTM_EPOCHS = cfg.LSTM_EPOCHS,
NN_EPOCHS = cfg.NN_EPOCHS,
DATATYPE = [dict(
name = feat["name"],
type = str(type(feat["data"]))
) for feat in cfg.DATATYPE],
FIRST_BIN = cfg.FIRST_BIN
)
fw = open(self.dire + name + '.json', 'w')
json.dump(setting, fw, indent=4)
return |
8,646 | 4e31619efcaf6eeab3b32116b21e71de8202aee2 | from framework import *
from pebble_game import *
from constructive_pebble_game import *
from nose.tools import ok_
import numpy as np
# initialise the seed for reproducibility np.random.seed(102)
fw_2d = create_framework([0,1,2,3], [(0,1), (0,3), (1,2), (1,3), (2,3)], [(2,3), (4,4), (5,2), (1,1)])
# a 3d fw constricted to 2d
fw_3d = create_framework([0,1,2,3], [(0,1), (0,3), (1,2), (1,3), (2,3)], [(2,3, 0), (4,4, 0), (5,2, 0), (1,1, 0)])
R = create_rigidity_matrix(fw_3d, 3)
fig_39_nodes = [0,1,2,3]
fig_39_edges = [(0,1), (0,2), (0,3), (1,2), (2,3)]
fig_39_pos = [(0,0), (3,0), (3,2), (0,2)]
fig_39_fw = create_framework(fig_39_nodes, fig_39_edges, fig_39_pos)
R39 = create_rigidity_matrix(fig_39_fw, 2)
rigid_3d = create_framework([0,1,2,3,4],
[(0,1), (0,3), (1,2), (1,3), (2,3), (0,2), (0,4), (1,4), (2,4)],
[(2,3, 0), (4,4, 5), (5,2, 0), (1,1, 0), (10,10,10)])
fw_1d = create_framework([0,1,2],
[(0,1), (1,2), (0,2)],
[1,6,20])
ok_(is_inf_rigid(fw_2d, 2))
ok_(not is_inf_rigid(fw_3d, 3))
ok_(is_inf_rigid(fw_1d, 1))
# ok_(not is_inf_rigid(deformable_fw, 2))
# draw_framework(deformable_fw)
reduced_fw = create_reduced_fw(50,0.2, 1)
# p = pebble_game(reduced_fw, 2, 3)
# print(p[1])
# draw_framework(reduced_fw)
# draw_comps(reduced_fw, p[1])
# experimenting with reducing a framework gradually and tracking the number of components
rand_fw = create_random_fw(10,0.1, 1)
print(len(rand_fw.nodes))
draw_framework(rand_fw)
# num_comps = constructive_pebble_game(rand_fw, 2, 3)
# fig = plt.figure(figsize=(20,10))
# plotting the number of comps(reversed to show removal)
# plt.plot(num_comps)
# # fig.savefig("comp_numbers.pdf")
# plt.show()
# draw_framework(rand_fw, "before.pdf")
# num_comps = []
# counter = 0
# while len(rand_fw.edges) > 2*len(rand_fw.nodes):
# index = np.random.choice(len(rand_fw.edges))
# edge = list(rand_fw.edges)[index]
# if rand_fw.degree(edge[0]) > 2 and rand_fw.degree(edge[1]) > 2:
# counter += 1
# rand_fw.remove_edge(edge[0], edge[1])
# comps = pebble_game(rand_fw, 2, 3)[1]
# num_comps.append(len(comps))
# draw_comps(rand_fw, comps, filename="after"+str(counter)+".pdf", show=False)
# plt.close("all")
# draw_comps(rand_fw, comps, "after.pdf")
# Edges are not reported consistently so will always sort them before indexing
# of the edges will always be the same
def_node = [0,1,2,3]
def_edge = [(0,1), (0,3), (1,2), (2,3)]
def_pos = [(0,0), (4,0), (4,2), (0,2)]
deformable_fw = create_framework(def_node, def_edge, def_pos)
R = create_rigidity_matrix(deformable_fw, 2)
# creating a force to apply
# as an example, move points 0 and 2 towards each other
# f is a d*n length vector
R = create_rigidity_matrix(rand_fw, 2)
f = [0] * len(R[0])
f[2] = -0.1
f[3] = 0.1
f[14] = -0.1
f[15] = 0.1
f = np.array(f)
print(R)
print(f)
print(R.dot(f))
draw_stresses(rand_fw, f)
# draw_framework(fw_2d)
sq_nodes = [0,1,2,3]
sq_edges = [(0,1), (0,3), (1,2), (2,3), (0,2)]
sq_pos = [(0,0), (4,0), (4,4), (0,4)]
sq_fw = create_framework(sq_nodes, sq_edges, sq_pos)
# print(sq_fw.edges)
# print(sorted(sq_fw.edges))
f = [0] * len(sq_nodes) * 2
f[0] = 1
f[1] = 1
f[4] = -1
f[5] = -1
draw_stresses(sq_fw, f)
|
8,647 | 24274dddbeb1be743cfcac331ee688d48c9a46dd | import requests
from bs4 import BeautifulSoup
'''
OCWから学院一覧を取得するスクリプト(6個くらいだから必要ない気もする)
gakuinListの各要素は次のような辞書に鳴っている
{
'name' : 学院名,
'url' : その学院の授業の一覧のurl,
}
'''
def getGakuinList():
url = "http://www.ocw.titech.ac.jp/"
response = requests.get(url)
soup = BeautifulSoup(response.content,"lxml")
topMainNav = soup.find("ul",id="top-mein-navi")
gakubus = topMainNav.find_all(class_="gakubuBox")
gakuinList = []
for gakubu_div in gakubus:
gakuin = gakubu_div.find(class_="gakubuHead").span.string
if gakuin[-2::] != "学院":
continue
gakuin_url = url + gakubu_div.parent['href']
gakuinList.append({'name':gakuin,'url':gakuin_url})
return gakuinList
'''
学院名とurlを渡されたらその学院の授業一覧を持ってくる
'''
def getLectures(name,url):
urlprefix = "http://www.ocw.titech.ac.jp"
response = requests.get(url)
soup = BeautifulSoup(response.content,'lxml')
table = soup.find('table',class_='ranking-list').tbody
for item in table.find_all('tr'):
code = item.find('td',class_='code').string
name = item.find('td',class_='course_title').a.string #講義名
lecture_url = urlprefix + item.find('td',class_='course_title').a['href']
teachers = [te.string for te in item.find('td',class_='lecturer').find_all('a')]
quaterColumn = item.find('td',class_='opening_department') #TODO ちゃんととれてない
quater = quaterColumn.a.string if quaterColumn != None else ''
if not name or not code: # 文字列が空の場合はスキップ
continue
if code:
code = code.strip()
if name:
name = name.strip()
if quater:
quater = quater.strip()
print(name)
print(teachers)
print(lecture_url)
print(quater)
if __name__=='__main__':
#print(getGakuinList())
getLectures('情報理工学院','http://www.ocw.titech.ac.jp/index.php?module=General&action=T0100&GakubuCD=4&lang=JA')
|
8,648 | 4293ad0b2a4a352d6bdc4b860448c4a3b14ca629 | import torch
from torchvision import transforms
from torch.autograd import Variable
class NormalizeImageDict(object):
"""
Normalize image in dictionary
normalize range is True, the image is divided by 255
"""
def __init__(self,image_keys, normalizeRange=True):
self.image_keys = image_keys
self.normalizeRange = normalizeRange
self.normalize = transforms.Normalize(mean=[0.485,0.456,0.406],
std=[0.229,0.224,0.225])
def __call__(self,sample):
for key in self.image_keys:
if self.normalizeRange:
sample[key] /= 255.0
sample[key] = self.normalize(sample[key])
return sample |
8,649 | b1dce573e6da81c688b338277af214838bbab9dd | def simple_formatter(zipcode: str, address: str) -> str:
return f'{zipcode}は「{address}」です'
|
8,650 | 16dd73f2c85eff8d62cf0e605489d0db1616e36e | # Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
"""
Holds the AWS SNS email service that can be used to send emails.
"""
import boto3
import os
import cla
import uuid
import json
import datetime
from cla.models import email_service_interface
region = os.environ.get('REGION', '')
sender_email_address = os.environ.get('SES_SENDER_EMAIL_ADDRESS', '')
topic_arn = os.environ.get('SNS_EVENT_TOPIC_ARN', '')
class SNS(email_service_interface.EmailService):
"""
AWS SNS email client model.
"""
def __init__(self):
self.region = None
self.sender_email = None
self.topic_arn = None
def initialize(self, config):
self.region = region
self.sender_email = sender_email_address
self.topic_arn = topic_arn
def send(self, subject, body, recipient, attachment=None):
msg = self.get_email_message(subject, body, self.sender_email, recipient, attachment)
# Connect to SNS.
connection = self._get_connection()
# Send the email.
try:
self._send(connection, msg)
except Exception as err:
cla.log.error('Error while sending AWS SNS email to %s: %s', recipient, str(err))
def _get_connection(self):
"""
Mockable method to get a connection to the SNS service.
"""
return boto3.client('sns', region_name=self.region)
def _send(self, connection, msg): # pylint: disable=no-self-use
"""
Mockable send method.
"""
connection.publish(
TopicArn=self.topic_arn,
Message=msg,
)
def get_email_message(self, subject, body, sender, recipients, attachment=None): # pylint: disable=too-many-arguments
"""
Helper method to get a prepared email message given the subject,
body, and recipient provided.
:param subject: The email subject
:type subject: string
:param body: The email body
:type body: string
:param sender: The sender email
:type sender: string
:param recipients: An array of recipient email addresses
:type recipient: string
:param attachment: The attachment dict (see EmailService.send() documentation).
:type: attachment: dict
:return: The json message
:rtype: string
"""
msg = {}
source = {}
data = {}
data["body"] = body
data["from"] = sender
data["subject"] = subject
data["type"] = "cla-email-event"
if isinstance(recipients, str):
data["recipients"] = [recipients]
else:
data["recipients"] = recipients
# Added MailChip/Mandrill support by setting the template and adding
# email body to the parameters list under the BODY attribute
data["template_name"] = "EasyCLA System Email Template"
data["parameters"] = {
"BODY": body
}
msg["data"] = data
source["client_id"] = "easycla-service"
source["description"] = "EasyCLA Service"
source["name"] = "EasyCLA Service"
msg["source_id"] = source
msg["id"] = str(uuid.uuid4())
msg["type"] = "cla-email-event"
msg["version"] = "0.1.0"
json_string = json.dumps(msg)
# cla.log.debug(f'Email JSON: {json_string}')
return json_string
class MockSNS(SNS):
"""
Mockable AWS SNS email client.
"""
def __init__(self):
super().__init__()
self.emails_sent = []
def _get_connection(self):
return None
def _send(self, connection, msg):
self.emails_sent.append(msg)
|
8,651 | d508cb0a8d4291f1c8e76d9d720be352c05ef146 | """
Given a list of partitioned and sentiment-analyzed tweets, run several trials
to guess who won the election
"""
import json
import math
import sys
import pprint
import feature_vector
def positive_volume(f):
return f['relative_volume'] * f['positive_percent']
def inv_negative_volume(f):
return 1.0 - f['relative_volume'] * f['negative_percent']
def normalized_sentiment(f):
return (f['average_sentiment'] + 1) / 2
def normalized_square_sentiment(f):
return (f['avg_square_sentiment'] + 1) / 2
def weighted_sentiment(f):
return (f['relative_volume'] * f['average_sentiment'] + 1) / 2
# We want a function that's close to 1 unless the relative tweet volume is low
def quadratic_diff_penalty(f, scale):
val = f['relative_volume']
return 1 - scale * (1 - val) ** 2
# Experiment using x ** 3 as the penalty function
def cubic_diff_penalty(f, scale):
val = f['relative_volume']
return 1 - scale * (1 - val) ** 3
def linear_combination(f, a1, a2, a3, a4 = 0, a5 = 0):
return (a1 * positive_volume(f)
+ a2 * inv_negative_volume(f)
+ a3 * normalized_sentiment(f)
+ a4 * normalized_square_sentiment(f)
+ a5 * weighted_sentiment(f))
def run_trial(function, feature_map):
candidate_scores = {}
total_score = 0
for candidate, features in feature_map.items():
score = function(features)
candidate_scores[candidate] = score
total_score += score
for candidate, score in candidate_scores.items():
candidate_scores[candidate] = score / total_score
return candidate_scores
def predict(tweet_dictionary, print_all):
features = feature_vector.gen_feature_vector(tweet_dictionary)
trial_list = [
#1
lambda f: linear_combination(f, 1, 0, 0),
lambda f: linear_combination(f, 0.5, 0, 0.5),
lambda f: linear_combination(f, 0.33, 0.33, 0.33),
lambda f: linear_combination(f, 0.25, 0.25, 0.5),
lambda f: linear_combination(f, 0.5, 0.25, 0.25),
lambda f: linear_combination(f, 0.2, 0.1, 0.0, 0.7),
lambda f: linear_combination(f, 0.0, 0.0, 0.0, 1.0),
lambda f: linear_combination(f, 0.5, 0.0, 0.0, 0.5),
lambda f: linear_combination(f, 0.3, 0.15, 0.15, 0.3),
lambda f: linear_combination(f, 0.5, 0.1, 0.1, 0.3),
#11
lambda f: linear_combination(f, 0.6, 0.0, 0.0, 0.4),
lambda f: linear_combination(f, 0.55, 0.0, 0.2, 0.25),
lambda f: linear_combination(f, 0.5, 0.1, 0.15, 0.25),
lambda f: linear_combination(f, 0.5, 0.05, 0.1, 0.35),
lambda f: linear_combination(f, 0.4, 0.05, 0.1, 0.35, 0.1),
lambda f: linear_combination(f, 0.45, 0.05, 0.05, 0.35, 0.1),
lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.35, 0.2),
lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3),
lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 1),
lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 0.25),
# 21
lambda f: linear_combination(f, 0.25, 0.0, 0.15, 0.4, 0.2) * quadratic_diff_penalty(f, 0.25),
lambda f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.3),
lambda f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.4),
lambda f: linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.4),
lambda f: linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.45),
lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.45),
lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.5),
lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.5),
lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.6),
lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.7),
# 31
lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty(f, 0.7),
lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty(f, 0.75),
lambda f: linear_combination(f, 0.05, 0.0, 0.25, 0.7, 0) * cubic_diff_penalty(f, 0.75),
]
if print_all:
print('Feature vector:')
pprint.pprint(features)
print('\nTrial Results:')
for index, function in enumerate(trial_list):
print('trial %d:' % (index + 1))
print(run_trial(function, features))
print()
print()
final_trial_result = run_trial(trial_list[-1], features)
print('Predicted Outcome:')
max_percent = 0
winning_candidate = ''
for candidate, percent in final_trial_result.items():
print(candidate + ': ', int(percent * 100008) / 1000)
if (percent > max_percent):
max_percent = percent
winning_candidate = candidate
print('\nProjected Winner:')
print(winning_candidate)
if __name__ == '__main__':
if len(sys.argv) != 2 and len(sys.argv) != 3:
print('Usage: python predict.py filename [print_all = True]')
exit()
with open(sys.argv[1], 'r') as tweet_file:
print_all = True if len(sys.argv) == 2 else (sys.argv[2].lower() == 'true')
predict(json.loads(tweet_file.read()), print_all)
|
8,652 | 2b3a42fed98b43cdd78edd751b306ba25328061a | import PyPDF2
from pathlib import Path
def get_filenames():
"""
Get PDF files not yet reordered in the current directory
:return: list of PDF file names
"""
filenames = []
for filename in Path('.').glob('*.pdf'):
if 'reordered' not in filename.stem:
filenames.append(filename)
return filenames
def appendix_and_index_pages():
"""
Prompt user to input appendix pages (if one exists) and index pages
:return: start and end pages of the appendix and index
"""
def index_pages():
"""
Prompt user to input index pages
:return: start and end pages of index
"""
index_start = int(input('Enter the start page of your index: '))
index_end = int(input('Enter the end page of your index: '))
return index_start, index_end
is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')
if is_appendix == 'y':
appendix_start = int(input('Enter the start page of your appendix: '))
appendix_end = int(input('Enter the end page of your appendix: '))
index_start, index_end = index_pages()
else:
# When there is no appendix, set appendix start and end pages such as the page ranges of the
# appendix and the post-appendix (pre-index) will be blank, and the page range of the post-insert
# will be from the insert point to the start of the index. See def reorder for more details.
index_start, index_end = index_pages()
appendix_start = index_start
appendix_end = index_start - 1
return appendix_start, appendix_end, index_start, index_end
def yes_or_no(prompt):
"""
Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input
:param prompt: str prompting user to input their response
:return: yes or no response once user has correctly input their response
"""
response = input(prompt)
while response not in ['y', 'n']:
print('Invalid input')
response = input(prompt)
return response
def write_pages(page_range, pdf_read_object, pdf_write_object):
"""
Read pages within certain page range from the PDF read object and write those pages to the PDF write object
:param page_range: iterable containing pages to be read and written
:param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from
:param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to
:return: None, write object is modified in place.
"""
for page_num in page_range:
page = pdf_read_object.getPage(page_num)
pdf_write_object.addPage(page)
def reorder(filename, insert_page, appendix_start, appendix_end, index_start, index_end):
"""
Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name
:param filename: name of the PDF file to be reordered
:param insert_page: page in the original PDF after which the appendix and index are to be inserted
:param appendix_start: appendix start page in the original PDF
:param appendix_end: appendix end page in the original PDF
:param index_start: index start page in the original PDF
:param index_end: index end page in the original PDF
:return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF
"""
with filename.open('rb') as read_object, open(filename.stem + '_reordered.pdf', 'wb') as write_object:
pdf_read_object = PyPDF2.PdfFileReader(read_object)
pdf_write_object = PyPDF2.PdfFileWriter()
pdf_length = pdf_read_object.numPages
# Check for invalid page numbers
if insert_page < 1 or insert_page >= appendix_start:
raise ValueError('Invalid insert page')
if appendix_start != index_start and appendix_start > appendix_end:
raise ValueError('Invalid appendix start page')
if appendix_start != index_start and appendix_end >= index_start:
raise ValueError('Invalid appendix end page')
if index_start > index_end:
raise ValueError('Invalid index start page')
if index_end > pdf_length:
raise ValueError('Invalid index end page')
# Prepare page ranges to be ordered
pre_insert = range(insert_page)
post_insert = range(insert_page, appendix_start - 1)
appendix = range(appendix_start - 1, appendix_end)
post_appendix = range(appendix_end, index_start - 1)
index = range(index_start - 1, index_end)
post_index = range(index_end, pdf_length)
# Copy pages from original PDF object to new PDF object with the new ordered page ranges
for page_range in [pre_insert, index, appendix, post_insert, post_appendix, post_index]:
write_pages(page_range, pdf_read_object, pdf_write_object)
# Write ordered PDF object to PDF file
pdf_write_object.write(write_object)
def main():
while True:
print('------')
filenames = get_filenames()
if filenames:
print('Unordered PDF files in the current directory: ')
for index, filename in enumerate(filenames):
print('{}: {}'.format(index + 1, filename))
chosen_index = input('\nEnter the number of the file you want to reorder (type q to quit): ')
if chosen_index == 'q':
break
insert_page = int(input('Enter the page you want your appendix and index to come after: '))
appendix_start, appendix_end, index_start, index_end = appendix_and_index_pages()
try:
filename = filenames[int(chosen_index) - 1]
reorder(filename, insert_page, appendix_start, appendix_end, index_start, index_end)
print('\n{} reordered.'.format(filename))
except Exception as error:
print(error)
print('Restarting program\n')
continue
else:
print('No unordered PDF found in current directory')
# Ask user to reorder additional PDFs
is_continue = yes_or_no('\nDo you want to reorder another PDF (y/n)? ')
if is_continue == 'n':
break
if __name__ == '__main__':
main()
|
8,653 | f0c082968e26d414b0dbb679d4e5077056e99979 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import xmlrunner
import os
import sys
import glob
import yaml
ASSETS_DIR = ""
class GenerateMachineConfig(unittest.TestCase):
def setUp(self):
self.machine_configs = []
for machine_config_path in glob.glob(
f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'
):
with open(machine_config_path) as f:
self.machine_configs.append(yaml.load(f, Loader=yaml.FullLoader))
def test_kernel_args(self):
"""Assert there are machine configs configuring the kernel args for masters and workers"""
for machine_config in self.machine_configs:
kernel_args = machine_config["spec"]["kernelArguments"]
self.assertIn("ip=dhcp,dhcp6", kernel_args)
if __name__ == '__main__':
ASSETS_DIR = sys.argv.pop()
with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output), failfast=False, buffer=False, catchbreak=False, verbosity=2)
|
8,654 | c5bdbcc8ba38b02e5e5cf8b53362e87ba761443d | from django.db import models
# Create your models here.
class Advertisement(models.Model):
title = models.CharField(max_length=1500, db_index=True, verbose_name='Заголовок')
description = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
price = models.FloatField(verbose_name='цена', default=0)
views_count = models.IntegerField(verbose_name='количество просмотров', default=0)
status = models.ForeignKey('AdvertisementStatus', default=None,
null=True, on_delete=models.CASCADE,
related_name='advertisements', verbose_name='Статус')
def __str__(self):
return self.title
class Meta:
db_table = 'advertisements'
ordering = ['title']
class AdvertisementStatus(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Authors(models.Model):
name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')
email = models.EmailField()
phone = models.CharField(max_length=20, verbose_name='Телефон')
def __str__(self):
return self.name
|
8,655 | b84b3206e87176feee2c39fc0866ada994c9ac7a | from django.shortcuts import render
from PIL import Image
from django.views.decorators import csrf
import numpy as np
import re
import sys
import os
from .utils import *
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import base64
sys.path.append(os.path.abspath("./models"))
OUTPUT = os.path.join(os.path.dirname(__file__), 'output.png')
from PIL import Image
from io import BytesIO
def getI420FromBase64(codec):
base64_data = re.sub('^data:image/.+;base64,', '', codec)
byte_data = base64.b64decode(base64_data)
image_data = BytesIO(byte_data)
img = Image.open(image_data)
img.save(OUTPUT)
def convertImage(imgData):
getI420FromBase64(imgData)
@csrf_exempt
def predict(request):
imgData = request.POST.get('img')
convertImage(imgData)
x = Image.open(OUTPUT)
x = x.convert('L')
x = x.resize((32,32))
x.save(OUTPUT)
x = np.array(x)
x = x.reshape(1,32,32,1)
model, graph = init()
out = model.predict(x)
response = np.array(np.argmax(out, axis=1))
return JsonResponse({"output": str(response[0]) })
def index(request):
return render(request, 'index.html', { "imagestr" : "static/hindi_characters/1.png"})
|
8,656 | 19962e94afdd3edf298b28b9954f479fefa3bba8 | #1. Create a greeting for your program.
print("Welcome to the Band Name Generator")
#2. Ask the user for the city that they grew up in.
city = input("Which city did you grew up in?\n")
#3. Ask the user for the name of a pet.
pet = input("What is the name of the pet?\n")
#4. Combine the name of their city and pet and show them their band name.
Band_name = city + " " + pet
#5. Make sure the input cursor shows on a new line:
print("Your band name could be ", Band_name)
|
8,657 | 129df937d7d295bae2009cfb65b2f85228206698 | # !/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright (c) 2021 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Stochastic Gradient Descent
"""
from typing import Callable
import numpy as np
from tqdm import tqdm
from ..circuit import BasicCircuit
from .basic_optimizer import BasicOptimizer
class SGD(BasicOptimizer):
r"""SGD Optimizer class
"""
def __init__(self, iterations: int, circuit: BasicCircuit, learning_rate: float):
r"""The constructor of the SGD class
Args:
iterations (int): Number of iterations
circuit (BasicCircuit): Circuit whose parameters are to be optimized
learning_rate (float): Learning rate
"""
super().__init__(iterations, circuit)
self._learning_rate = learning_rate
def minimize(
self, shots: int,
loss_func: Callable[[np.ndarray, int], float],
grad_func: Callable[[np.ndarray, int], np.ndarray]
) -> None:
r"""Minimizes the given loss function
Args:
shots (int): Number of measurement shots
loss_func (Callable[[np.ndarray, int], float]): Loss function to be minimized
grad_func (Callable[[np.ndarray, int], np.ndarray]): Function for calculating gradients
"""
self._loss_history = []
for _ in tqdm(range(self._iterations)):
curr_param = self._circuit.parameters
gradient = grad_func(curr_param, shots)
new_param = curr_param - self._learning_rate * gradient
loss = loss_func(new_param, shots)
self._loss_history.append(loss)
|
8,658 | bb6d6061365fad809448d09a1c031b984423b5e0 | __author__ = 'liwenchang'
#-*- coding:utf-8 -*-
import os
import time
import win32api, win32pdhutil, win32con, win32com.client
import win32pdh, string
def check_exsit(process_name):
WMI = win32com.client.GetObject('winmgmts:')
processCodeCov = WMI.ExecQuery('select * from Win32_Process where Name="%s"' % process_name)
if len(processCodeCov) > 0:
#print '%s is exists' % process_name
return bool(True)
else:
#print '%s is not exists' % process_name
return bool(False)
if __name__ == '__main__':
process='OUTLOOK.EXE'
if check_exsit(process):
os.system('taskkill /F /IM OUTLOOK.EXE')
os.startfile("C:\Program Files (x86)\Microsoft Office\Office15\OUTLOOK.EXE")
else:
os.startfile("C:\Program Files (x86)\Microsoft Office\Office15\OUTLOOK.EXE")
#os.system('taskkill /F /IM OUTLOOK.EXE')
#os.startfile("C:\Program Files (x86)\Microsoft Office\Office15\OUTLOOK.EXE")
'''
# ***********************************************************************
# ***********************************************************************
def GetProcessID( name ):
object = "Process"
items, instances = win32pdh.EnumObjectItems(None,None,object, win32pdh.PERF_DETAIL_WIZARD)
val = None
if name in instances :
hq = win32pdh.OpenQuery()
hcs = []
item = "ID Process"
path = win32pdh.MakeCounterPath( (None,object,name, None, 0, item) )
hcs.append(win32pdh.AddCounter(hq, path))
win32pdh.CollectQueryData(hq)
time.sleep(0.01)
win32pdh.CollectQueryData(hq)
for hc in hcs:
type, val = win32pdh.GetFormattedCounterValue(hc, win32pdh.PDH_FMT_LONG)
win32pdh.RemoveCounter(hc)
win32pdh.CloseQuery(hq)
return val
# ***********************************************************************
# ***********************************************************************
# ***********************************************************************
def Kill_Process ( name ) :
pid = GetProcessID (name)
print pid
if pid:
print "exist"
Kill_Process_pid(pid)
else:
print "not this proccess"
# ***********************************************************************
'''
'''
#THIS IS SLOW !!
def Kill_Process ( process ) :
#get process id's for the given process name
pids = win32pdhutil.FindPerformanceAttributesByName ( process )
for p in pids:
handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, p) #get process handle
win32api.TerminateProcess(handle,0) #kill by handle
win32api.CloseHandle(handle) #close api
'''
# ***********************************************************************
# ***********************************************************************
'''
def Kill_Process ( process_name ) :
#get process id's for the given process name
pids = win32pdhutil.FindPerformanceAttributesByName ( 'OUTLOOK.EXE' )
print pids
for p in pids:
handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, p) #get process handle
# win32api.TerminateProcess(handle,0) #kill by handle
# win32api.CloseHandle(handle) #close api
'''
'''
import os
command = 'taskkill /F /IM QQ.exe'
os.system(command)
'''
'''
# ***********************************************************************
# ***********************************************************************
if __name__ == "__main__":
a = GetAllProcesses()
print a
process = 'alg'# process name
Kill_Process ( process )
os.startfile("C:\Program Files (x86)\Microsoft Office\Office15\OUTLOOK.EXE")
'''
|
8,659 | 2286aa1581ca7d6282b35847505a904980da275e | import cv2
import numpy as np
kernel = np.ones((3, 3), np.uint8)
def mask(image):
# define region of interest
green_frame = image[50:350, 50:350]
cv2.rectangle(image, (50, 50), (350, 350), (0, 255, 0), 0)
hsv = cv2.cvtColor(green_frame, cv2.COLOR_BGR2HSV)
# define range of skin color in HSV
lower_skin = np.array([0, 20, 70], dtype=np.uint8)
upper_skin = np.array([20, 255, 255], dtype=np.uint8)
# extract skin colur imagw
mask = cv2.inRange(hsv, lower_skin, upper_skin)
# extrapolate the hand to fill dark spots within
mask = cv2.dilate(mask, kernel, iterations=4)
mask = cv2.erode(mask, kernel, iterations=9)
# blur the image
mask = cv2.GaussianBlur(mask, (5, 5), 100)
image = cv2.flip(image, 1)
return mask
|
8,660 | 687ab41e9ce94c8d14154a941504845a8fa9f2d9 | def test_number():
pass
|
8,661 | e6d506dd45e72ee7f0162a884981ee1156153d3d | import json
import os
from lib.create import create_server, create_user
os.chdir(r'/home/niko/data/Marvin')
def edit_user_stats(server_id: str, user_id: str, stat: str, datas):
create_user(server_id, user_id)
if os.path.isfile("Server/{}/user.json".format(server_id)):
with open("Server/{}/user.json".format(server_id), 'r') as fp:
data = json.load(fp)
data[user_id][stat] = datas
with open("Server/{}/user.json".format(server_id, user_id), 'w') as fp:
json.dump(data, fp, indent=4)
def set_message(server_id: str, name: str, message_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:
data = json.load(fp)
if name in data:
data[name]['message'] = message_id
with open('Server/{}/ticket.json'.format(server_id), "w+") as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_log(server_id: str, name: str, channel_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:
data = json.load(fp)
if name in data:
data[name]['log'] = channel_id
with open('Server/{}/ticket.json'.format(server_id), "w+") as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_category(server_id: str, name: str, category_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:
data = json.load(fp)
if name in data:
data[name]['category'] = category_id
with open('Server/{}/ticket.json'.format(server_id), "w+") as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_count(server_id: str, name: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:
data = json.load(fp)
if name in data:
count = data[name]['ticket']
data[name]['ticket'] = count + 1
with open('Server/{}/ticket.json'.format(server_id), "w+") as fp:
json.dump(data, fp, indent=4)
else:
return False
def edit_setting(server_id: str, vari: str, new):
create_server(server_id)
with open('Server/{}/settings.json'.format(server_id), encoding='utf-8') as fp:
data = json.load(fp)
if vari in data:
data[vari] = new
with open('Server/{}/settings.json'.format(server_id), "w+") as fp:
json.dump(data, fp, indent=4)
else:
return False
|
8,662 | e44c4b2c3b60d34d4540ec2d3a782c777c52fbc0 | name = input("Введите ваше имя ")
print("Добрый день,", name)
|
8,663 | ddb81e3ce0df44ee503c558b68b41c35935358a0 | #!/usr/bin/env python
"""Server that accepts and executes control-type commands on the bot."""
import sys
import os
from inspect import getmembers, ismethod
from simplejson.decoder import JSONDecodeError
import zmq
import signal
# This is required to make imports work
sys.path = [os.getcwd()] + sys.path
import bot.lib.lib as lib
import pub_server as pub_server_mod
import bot.lib.messages as msgs
from bot.driver.mec_driver import MecDriver
def is_api_method(obj, name):
"""Tests whether named method exists in obj and is flagged for API export.
:param obj: API-exported object to search for the given method on.
:type ojb: string
:param name: Name of method to check for.
:type name: string
:returns: True if given method is on given obj and is exported, else False.
"""
try:
method = getattr(obj, name)
except AttributeError:
return False
return (ismethod(method) and hasattr(method, "__api_call"))
class CtrlServer(object):
"""Exports bot control via ZMQ.
Most functionally exported by CtrlServer is in the form of methods
exposed by the API. @lib.api_call decorators can be added to bot
systems, which tags them for export. They can then be called
remotely via CtrlClient, which is typically owned by an interface
like the CLI, which typically accepts commands from an agent like
a human.
Some control is exported directly by CtrlServer, not through the
API. For example, CtrlServer responds directly to ping messages,
list messages (which give the objects/methods exposed by the API),
and exit messages.
CtrlServer is the primary owner of bot resources, which we call
systems. For example, it's CtrlServer that instantiates gunner
and follower. Through those two, CtrlServer owns the gun, the
IR hub, the turret and basically every other bot system.
The messages that CtrlServer accepts and responds with are fully
specified in lib.messages. Make any changes to messages there.
CtrlServer can be instructed (via the API) to spawn a new thread
for a PubServer. When that happens, CtrlServer passes its systems
to PubServer, which can read their state and publish it over a
ZMQ PUB socket.
"""
def __init__(self, testing=None, config_file="bot/config.yaml"):
"""Build ZMQ REP socket and instantiate bot systems.
:param testing: True if running on simulated HW, False if on bot.
:type testing: boolean
:param config_file: Name of file to read configuration from.
:type config_file: string
"""
# Register signal handler, shut down cleanly (think motors)
signal.signal(signal.SIGINT, self.signal_handler)
# Load configuration and logger
self.config = lib.get_config(config_file)
self.logger = lib.get_logger()
# Testing flag will cause objects to run on simulated hardware
if testing is True or testing == "True":
self.logger.info("CtrlServer running in test mode")
lib.set_testing(True)
elif testing is None:
self.logger.info(
"Defaulting to config testing flag: {}".format(
self.config["testing"]))
lib.set_testing(self.config["testing"])
else:
self.logger.info("CtrlServer running in non-test mode")
lib.set_testing(False)
# Build socket to listen for requests
self.context = zmq.Context()
self.ctrl_sock = self.context.socket(zmq.REP)
self.server_bind_addr = "{protocol}://{host}:{port}".format(
protocol=self.config["server_protocol"],
host=self.config["server_bind_host"],
port=self.config["ctrl_server_port"])
try:
self.ctrl_sock.bind(self.server_bind_addr)
except zmq.ZMQError:
self.logger.error("ZMQ error. Is a server already running?")
self.logger.warning("May be connected to an old server instance.")
sys.exit(1)
self.systems = self.assign_subsystems()
self.logger.info("Control server initialized")
# Don't spawn pub_server until told to
self.pub_server = None
def signal_handler(self, signal, frame):
self.logger.info("Caught SIGINT (Ctrl+C), closing cleanly")
self.clean_up()
self.logger.info("Cleaned up bot, exiting...")
sys.exit(0)
def assign_subsystems(self):
"""Instantiates and stores references to bot subsystems.
:returns: Dict of subsystems, maps system name to instantiated object.
"""
self.driver = MecDriver()
systems = {}
systems["ctrl"] = self
systems["driver"] = self.driver
self.logger.debug("Systems: {}".format(systems))
return systems
def listen(self):
"""Perpetually listen for messages, pass them to generic handler."""
self.logger.info("Control server: {}".format(self.server_bind_addr))
while True:
try:
msg = self.ctrl_sock.recv_json()
reply = self.handle_msg(msg)
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
except JSONDecodeError:
err_msg = "Not a JSON message!"
self.logger.warning(err_msg)
self.ctrl_sock.send_json(msgs.error(err_msg))
except KeyboardInterrupt:
self.logger.info("Exiting control server. Bye!")
self.clean_up()
sys.exit(0)
def handle_msg(self, msg):
"""Generic message handler. Hands-off based on type of message.
:param msg: Message, received via ZMQ from client, to handle.
:type msg: dict
:returns: An appropriate message reply dict, from lib.messages.
"""
self.logger.debug("Received: {}".format(msg))
try:
msg_type = msg["type"]
except KeyError as e:
return msgs.error(e)
if msg_type == "ping_req":
reply = msgs.ping_reply()
elif msg_type == "list_req":
reply = self.list_callables()
elif msg_type == "call_req":
try:
obj_name = msg["obj_name"]
method = msg["method"]
params = msg["params"]
reply = self.call_method(obj_name, method, params)
except KeyError as e:
return msgs.error(e)
elif msg_type == "exit_req":
self.logger.info("Received message to die. Bye!")
reply = msgs.exit_reply()
# Need to actually send reply here as we're about to exit
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
self.clean_up()
sys.exit(0)
else:
err_msg = "Unrecognized message: {}".format(msg)
self.logger.warning(err_msg)
reply = msgs.error(err_msg)
return reply
def list_callables(self):
"""Build list of callable methods on each exported subsystem object.
Uses introspection to create a list of callable methods for each
registered subsystem object. Only methods which are flagged using the
@lib.api_call decorator will be included.
:returns: list_reply message with callable objects and their methods.
"""
self.logger.debug("List of callable API objects requested")
# Dict of subsystem object names to their callable methods.
callables = {}
for name, obj in self.systems.items():
methods = []
# Filter out methods which are not explicitly flagged for export
for member in getmembers(obj):
if is_api_method(obj, member[0]):
methods.append(member[0])
callables[name] = methods
return msgs.list_reply(callables)
def call_method(self, name, method, params):
"""Call a previously registered subsystem method by name. Only
methods tagged with the @api_call decorator can be called.
:param name: Assigned name of the registered subsystem.
:type name: string
:param method: Subsystem method to be called.
:type method: string
:param params: Additional parameters for the called method.
:type params: dict
:returns: call_reply or error message dict to be sent to caller.
"""
self.logger.debug("API call: {}.{}({})".format(name, method, params))
if name in self.systems:
obj = self.systems[name]
if is_api_method(obj, method):
try:
# Calls given obj.method, unpacking and passing params dict
call_return = getattr(obj, method)(**params)
msg = "Called {}.{}".format(name, method)
self.logger.debug(msg + ",returned:{}".format(call_return))
return msgs.call_reply(msg, call_return)
except TypeError:
# Raised when we have a mismatch of the method's kwargs
# TODO: Return argspec here?
err_msg = "Invalid params for {}.{}".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
except Exception as e:
# Catch exception raised by called method, notify client
err_msg = "Exception: '{}'".format(str(e))
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid method: '{}.{}'".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid object: '{}'".format(name)
self.logger.warning(err_msg)
return msgs.error(err_msg)
@lib.api_call
def echo(self, msg=None):
"""Echo a message back to the caller.
:param msg: Message to be echoed back to caller, default is None.
:returns: Message given by param, defaults to None.
"""
return msg
@lib.api_call
def exception(self):
"""Raise a test exception which will be returned to the caller."""
raise Exception("Exception test")
@lib.api_call
def spawn_pub_server(self):
"""Spawn publisher thread."""
if self.pub_server is None:
self.pub_server = pub_server_mod.PubServer(self.systems)
# Prevent pub_server thread from blocking the process from closing
self.pub_server.setDaemon(True)
self.pub_server.start()
msg = "Spawned pub server"
self.logger.info(msg)
return msg
else:
err_msg = "PubServer is already running"
self.logger.warning(err_msg)
return err_msg
@lib.api_call
def stop_full(self):
"""Stop all drive and gun motors, set turret to safe state."""
self.systems["driver"].move(0, 0)
def clean_up(self):
"""Tear down ZMQ socket."""
self.stop_full()
self.ctrl_sock.close()
self.context.term()
if __name__ == "__main__":
if len(sys.argv) == 2:
server = CtrlServer(sys.argv[1])
else:
server = CtrlServer()
server.listen()
|
8,664 | 3fed96e9bedb157a14cf9c441de5aae8b4f6edc8 | import sys
import os
# Module "sys"
#
# See docs for the sys module: https://docs.python.org/3.7/library/sys.html
# Print out the command line arguments in sys.argv, one per line:
# Print out the plaform from sys:
# for arg in sys.argv:
# print(arg)
# Print out the Python version from sys:print(sys.platform)
# print(sys, sep="\n", sys.path)
print("platform: "+sys.platform + "\n" + "maxsize: "+str(sys.maxsize) + "\n" + "argv: "+str(sys.argv))
# # Module "os"
# #
# # See the docs for the OS module: https://docs.python.org/3.7/library/os.html
# # Print the current process ID
print("Process ID: "+ str(os.getpid()) + "\n" + "cwd: " + os.getcwd() + "\n" + "login id: " + os.getlogin())
# # Print the current working directory (cwd):
# print()
# # Print your login name
# print()
|
8,665 | 0279057b3962e4b9839a86fc2e2683ac1da11b1a | from amqpstorm import management
if __name__ == '__main__':
# If using a self-signed certificate, change verify=True to point at your CA bundle.
# You can disable certificate verification for testing by passing in verify=False.
API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest',
'guest', verify=True)
try:
result = API.aliveness_test('/')
if result['status'] == 'ok':
print('RabbitMQ is alive!')
else:
print('RabbitMQ is not alive! :(')
except management.ApiConnectionError as why:
print('Connection Error: %s' % why)
except management.ApiError as why:
print('ApiError: %s' % why)
|
8,666 | 03a13037a9a102397c8be4d9f0f4c5e150965808 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mapGraph.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MapGraphTab(object):
def setupUi(self, MapGraphTab):
MapGraphTab.setObjectName("MapGraphTab")
MapGraphTab.resize(1150, 831)
MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))
MapGraphTab.setStyleSheet("background-color: rgb(255, 96, 117);")
self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)
self.gridLayout.setObjectName("gridLayout")
self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)
self.mapView.setUrl(QtCore.QUrl("about:blank"))
self.mapView.setObjectName("mapView")
self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)
self.label = QtWidgets.QLabel(MapGraphTab)
self.label.setMinimumSize(QtCore.QSize(1050, 0))
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 2)
self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.extractrMapBtn.setFont(font)
self.extractrMapBtn.setStyleSheet("background-color: rgb(255, 255, 255);")
self.extractrMapBtn.setObjectName("extractrMapBtn")
self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)
self.retranslateUi(MapGraphTab)
QtCore.QMetaObject.connectSlotsByName(MapGraphTab)
def retranslateUi(self, MapGraphTab):
_translate = QtCore.QCoreApplication.translate
MapGraphTab.setWindowTitle(_translate("MapGraphTab", "Map Graph"))
self.label.setText(_translate("MapGraphTab", "Map Graph"))
self.extractrMapBtn.setText(_translate("MapGraphTab", "Extract Video"))
from PyQt5 import QtWebEngineWidgets
|
8,667 | 2611d7dd364f6a027da29c005754ac2465faa8be | from numpy import pi,sqrt,cross,dot,zeros,linalg
from defs import *
##from numba import njit, prange
##
##@njit(parallel=True)
def engparallelb2(MU,NU,b1,b2,x1,x2,y1,y2,eta,a):
#For use in enginteract below
#HL p.154 Eq.(6-45)
b1x=b1[0]
b1y=b1[1]
b1z=b1[2]
b2x=b2[0]
b2y=b2[1]
b2z=b2[2]
Rab=Rp(x2,y2,eta,a)-Rp(x2,y1,eta,a)-Rp(x1,y2,eta,a)+Rp(x1,y1,eta,a)
#[b1',b2',x1,x2,y1,y2,eta,a,Rab]
#b1
ap=sqrt(eta**2+a**2)
Iab=Ia(x2,y2,1,ap)-Ia(x2,y1,1,ap)-Ia(x1,y2,1,ap)+Ia(x1,y1,1,ap)
Jab=Ja(x2,y2,1,ap)-Ja(x2,y1,1,ap)-Ja(x1,y2,1,ap)+Ja(x1,y1,1,ap)
return MU/4/pi*(b1x*b2x+(b1z*b2z+b1y*b2y)/(1-NU))*Iab \
+ MU/4/pi*(b1x*b2x)*(a**2/2)*Jab \
- MU/4/pi/(1-NU)*b1z*b2z*eta*eta*Jab
def engnonplanarb2(MU,NU,b1,b2,xi1,xi2,e3,costheta,x1,x2,y1,y2,z,a):
#For use in enginteract below
#
# ^ y axis
# /
# -
# y /
# / theta
# ---------------|----------------> x axis
# x
#
# x>0, y>0 HL p152, Eq.(6-33)
ap=sqrt(z*z+a*a)
Iab = Ia(x2,y2,costheta,ap)-Ia(x2,y1,costheta,ap)-Ia(x1,y2,costheta,ap)+Ia(x1,y1,costheta,ap)
Jab = Ja(x2,y2,costheta,ap)-Ja(x2,y1,costheta,ap)-Ja(x1,y2,costheta,ap)+Ja(x1,y1,costheta,ap)
Tab = ( Tfa(b1,b2,xi1,xi2,e3,costheta,x2,y2,z,a)
- Tfa(b1,b2,xi1,xi2,e3,costheta,x2,y1,z,a)
- Tfa(b1,b2,xi1,xi2,e3,costheta,x1,y2,z,a)
+ Tfa(b1,b2,xi1,xi2,e3,costheta,x1,y1,z,a) )
return ( MU/4/pi*(-2*dot(cross(b1,b2),cross(xi1,xi2))
+ dot(b1,xi1)*dot(b2,xi2) )*(Iab+a**2/2*Jab)
+ MU/4/pi/(1-NU)*Tab )
#When Iab incorporates Jab
#W = ( MU/4/pi* (-2*dot(cross(b1,b2),cross(xi1,xi2)) + dot(b1,xi1)*dot(b2,xi2) )*(Iab)
# + MU/4/pi/(1-NU)* Tab )
def enginteract(MU,NU,b1,b2,r1,r2,r3,r4,a):
#Computes interaction energy between two straight dislocation segments
#r1-r2 (Burgers vector b1) and r3-r4 (Burgers vector b2)
#MU is shear modulus, NU is Poisson ratio, a is core spread radius
r21=r2-r1
r43=r4-r3
r31=r3-r1
#Make sure that the segments are represented by column vectors
#if r21.shape[0]==1:
#r21=r21.T
#if r43.shape[0]==1:
#r43=r43.T
#if r31.shape[0]==1:
#r31=r31.T
#Segment line sense unit vectors
e1=r21/norm(r21)
e2=r43/norm(r43)
#Catagorise line segments according to whether they are parallel or not
e3=cross(e1,e2)
subzero=1e-10
if norm(e3)<subzero:
e2a=schmidt(r31,e1)
e3=cross(e1,e2a)
e3=e3/norm(e3)
eta=(dot(r3-r1,e2a)+dot(r4-r1,e2a))/2
x1=0
x2=dot(r2-r1,e1)
y1=dot(r3-r1,e1)
y2=dot(r4-r1,e1)
#engparallelb2 doesn't rotate b, it needs to be done here
b1n=zeros([3,1])
b2n=zeros([3,1])
b1n[0],b2n[0]=dot(b1,e1),dot(b2,e1)
b1n[1],b2n[1]=dot(b1,e2a),dot(b2,e2a)
b1n[2],b2n[2]=dot(b1,e3),dot(b2,e3)
return engparallelb2(MU,NU,b1n,b2n,x1,x2,y1,y2,eta,a)
else:
costheta=dot(e1,e2)
e3=e3/norm(e3)
e2a=cross(e3,e1)
z=dot(r31,e3)
z=-z
A=zeros([2,2])
A[0,0],A[0,1]=dot(r21,e1),-dot(r43,e1)
A[1,0],A[1,1]=dot(r21,e2a),-dot(r43,e2a)
rhs=zeros([2,1])
rhs[0],rhs[1]=dot(r31,e1),dot(r31,e2a)
t=linalg.solve(A,rhs)
r0=(1-t[0])*r1+t[0]*r2
x1=dot(r1-r0,e1)
x2=dot(r2-r0,e1)
y1=dot(r3-r0,e2)
y2=dot(r4-r0,e2)
return engnonplanarb2(MU,NU,b1,b2,e1,e2,e3,costheta,x1,x2,y1,y2,z,a)
|
8,668 | 2350c2ab05499f1b40ba61f2101c51d9581d57f6 |
def addnumber(i,j):
sum= i+j
print(sum)
num1 = int(input("Enter 1st number"))
num2 = int(input("Enter 2nd number"))
z = addnumber(num1,num2)
|
8,669 | 5f8303ce91c5de779bbddbaafb3fb828596babe5 | # orm/relationships.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`_orm.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`_orm.relationship`.
"""
from __future__ import annotations
import collections
from collections import abc
import dataclasses
import inspect as _py_inspect
import re
import typing
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import Dict
from typing import Generic
from typing import Iterable
from typing import Iterator
from typing import List
from typing import NamedTuple
from typing import NoReturn
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
import weakref
from . import attributes
from . import strategy_options
from ._typing import insp_is_aliased_class
from ._typing import is_has_collection_adapter
from .base import _DeclarativeMapped
from .base import _is_mapped_class
from .base import class_mapper
from .base import DynamicMapped
from .base import LoaderCallableStatus
from .base import PassiveFlag
from .base import state_str
from .base import WriteOnlyMapped
from .interfaces import _AttributeOptions
from .interfaces import _IntrospectsAnnotations
from .interfaces import MANYTOMANY
from .interfaces import MANYTOONE
from .interfaces import ONETOMANY
from .interfaces import PropComparator
from .interfaces import RelationshipDirection
from .interfaces import StrategizedProperty
from .util import _orm_annotate
from .util import _orm_deannotate
from .util import CascadeOptions
from .. import exc as sa_exc
from .. import Exists
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import coercions
from ..sql import expression
from ..sql import operators
from ..sql import roles
from ..sql import visitors
from ..sql._typing import _ColumnExpressionArgument
from ..sql._typing import _HasClauseElement
from ..sql.annotation import _safe_annotate
from ..sql.elements import ColumnClause
from ..sql.elements import ColumnElement
from ..sql.util import _deep_annotate
from ..sql.util import _deep_deannotate
from ..sql.util import _shallow_annotate
from ..sql.util import adapt_criterion_to_null
from ..sql.util import ClauseAdapter
from ..sql.util import join_condition
from ..sql.util import selectables_overlap
from ..sql.util import visit_binary_product
from ..util.typing import de_optionalize_union_types
from ..util.typing import Literal
from ..util.typing import resolve_name_to_real_class_name
if typing.TYPE_CHECKING:
from ._typing import _EntityType
from ._typing import _ExternalEntityType
from ._typing import _IdentityKeyType
from ._typing import _InstanceDict
from ._typing import _InternalEntityType
from ._typing import _O
from ._typing import _RegistryType
from .base import Mapped
from .clsregistry import _class_resolver
from .clsregistry import _ModNS
from .decl_base import _ClassScanMapperConfig
from .dependency import DependencyProcessor
from .mapper import Mapper
from .query import Query
from .session import Session
from .state import InstanceState
from .strategies import LazyLoader
from .util import AliasedClass
from .util import AliasedInsp
from ..sql._typing import _CoreAdapterProto
from ..sql._typing import _EquivalentColumnMap
from ..sql._typing import _InfoType
from ..sql.annotation import _AnnotationDict
from ..sql.annotation import SupportsAnnotations
from ..sql.elements import BinaryExpression
from ..sql.elements import BindParameter
from ..sql.elements import ClauseElement
from ..sql.schema import Table
from ..sql.selectable import FromClause
from ..util.typing import _AnnotationScanType
from ..util.typing import RODescriptorReference
_T = TypeVar("_T", bound=Any)
_T1 = TypeVar("_T1", bound=Any)
_T2 = TypeVar("_T2", bound=Any)
_PT = TypeVar("_PT", bound=Any)
_PT2 = TypeVar("_PT2", bound=Any)
_RelationshipArgumentType = Union[
str,
Type[_T],
Callable[[], Type[_T]],
"Mapper[_T]",
"AliasedClass[_T]",
Callable[[], "Mapper[_T]"],
Callable[[], "AliasedClass[_T]"],
]
_LazyLoadArgumentType = Literal[
"select",
"joined",
"selectin",
"subquery",
"raise",
"raise_on_sql",
"noload",
"immediate",
"write_only",
"dynamic",
True,
False,
None,
]
_RelationshipJoinConditionArgument = Union[
str, _ColumnExpressionArgument[bool]
]
_RelationshipSecondaryArgument = Union[
"FromClause", str, Callable[[], "FromClause"]
]
_ORMOrderByArgument = Union[
Literal[False],
str,
_ColumnExpressionArgument[Any],
Callable[[], _ColumnExpressionArgument[Any]],
Callable[[], Iterable[_ColumnExpressionArgument[Any]]],
Iterable[Union[str, _ColumnExpressionArgument[Any]]],
]
ORMBackrefArgument = Union[str, Tuple[str, Dict[str, Any]]]
_ORMColCollectionElement = Union[
ColumnClause[Any], _HasClauseElement, roles.DMLColumnRole, "Mapped[Any]"
]
_ORMColCollectionArgument = Union[
str,
Sequence[_ORMColCollectionElement],
Callable[[], Sequence[_ORMColCollectionElement]],
Callable[[], _ORMColCollectionElement],
_ORMColCollectionElement,
]
_CEA = TypeVar("_CEA", bound=_ColumnExpressionArgument[Any])
_CE = TypeVar("_CE", bound="ColumnElement[Any]")
_ColumnPairIterable = Iterable[Tuple[ColumnElement[Any], ColumnElement[Any]]]
_ColumnPairs = Sequence[Tuple[ColumnElement[Any], ColumnElement[Any]]]
_MutableColumnPairs = List[Tuple[ColumnElement[Any], ColumnElement[Any]]]
def remote(expr: _CEA) -> _CEA:
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns( # type: ignore
coercions.expect(roles.ColumnArgumentRole, expr), {"remote": True}
)
def foreign(expr: _CEA) -> _CEA:
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns( # type: ignore
coercions.expect(roles.ColumnArgumentRole, expr), {"foreign": True}
)
@dataclasses.dataclass
class _RelationshipArg(Generic[_T1, _T2]):
"""stores a user-defined parameter value that must be resolved and
parsed later at mapper configuration time.
"""
__slots__ = "name", "argument", "resolved"
name: str
argument: _T1
resolved: Optional[_T2]
def _is_populated(self) -> bool:
return self.argument is not None
def _resolve_against_registry(
self, clsregistry_resolver: Callable[[str, bool], _class_resolver]
) -> None:
attr_value = self.argument
if isinstance(attr_value, str):
self.resolved = clsregistry_resolver(
attr_value, self.name == "secondary"
)()
elif callable(attr_value) and not _is_mapped_class(attr_value):
self.resolved = attr_value()
else:
self.resolved = attr_value
class _RelationshipArgs(NamedTuple):
"""stores user-passed parameters that are resolved at mapper configuration
time.
"""
secondary: _RelationshipArg[
Optional[_RelationshipSecondaryArgument],
Optional[FromClause],
]
primaryjoin: _RelationshipArg[
Optional[_RelationshipJoinConditionArgument],
Optional[ColumnElement[Any]],
]
secondaryjoin: _RelationshipArg[
Optional[_RelationshipJoinConditionArgument],
Optional[ColumnElement[Any]],
]
order_by: _RelationshipArg[
_ORMOrderByArgument,
Union[Literal[None, False], Tuple[ColumnElement[Any], ...]],
]
foreign_keys: _RelationshipArg[
Optional[_ORMColCollectionArgument], Set[ColumnElement[Any]]
]
remote_side: _RelationshipArg[
Optional[_ORMColCollectionArgument], Set[ColumnElement[Any]]
]
@log.class_logger
class RelationshipProperty(
_IntrospectsAnnotations, StrategizedProperty[_T], log.Identified
):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`_orm.relationship` function.
.. seealso::
:ref:`relationship_config_toplevel`
"""
strategy_wildcard_key = strategy_options._RELATIONSHIP_TOKEN
inherit_cache = True
""":meta private:"""
_links_to_entity = True
_is_relationship = True
_overlaps: Sequence[str]
_lazy_strategy: LazyLoader
_persistence_only = dict(
passive_deletes=False,
passive_updates=True,
enable_typechecks=True,
active_history=False,
cascade_backrefs=False,
)
_dependency_processor: Optional[DependencyProcessor] = None
primaryjoin: ColumnElement[bool]
secondaryjoin: Optional[ColumnElement[bool]]
secondary: Optional[FromClause]
_join_condition: JoinCondition
order_by: Union[Literal[False], Tuple[ColumnElement[Any], ...]]
_user_defined_foreign_keys: Set[ColumnElement[Any]]
_calculated_foreign_keys: Set[ColumnElement[Any]]
remote_side: Set[ColumnElement[Any]]
local_columns: Set[ColumnElement[Any]]
synchronize_pairs: _ColumnPairs
secondary_synchronize_pairs: Optional[_ColumnPairs]
local_remote_pairs: Optional[_ColumnPairs]
direction: RelationshipDirection
_init_args: _RelationshipArgs
def __init__(
self,
argument: Optional[_RelationshipArgumentType[_T]] = None,
secondary: Optional[_RelationshipSecondaryArgument] = None,
*,
uselist: Optional[bool] = None,
collection_class: Optional[
Union[Type[Collection[Any]], Callable[[], Collection[Any]]]
] = None,
primaryjoin: Optional[_RelationshipJoinConditionArgument] = None,
secondaryjoin: Optional[_RelationshipJoinConditionArgument] = None,
back_populates: Optional[str] = None,
order_by: _ORMOrderByArgument = False,
backref: Optional[ORMBackrefArgument] = None,
overlaps: Optional[str] = None,
post_update: bool = False,
cascade: str = "save-update, merge",
viewonly: bool = False,
attribute_options: Optional[_AttributeOptions] = None,
lazy: _LazyLoadArgumentType = "select",
passive_deletes: Union[Literal["all"], bool] = False,
passive_updates: bool = True,
active_history: bool = False,
enable_typechecks: bool = True,
foreign_keys: Optional[_ORMColCollectionArgument] = None,
remote_side: Optional[_ORMColCollectionArgument] = None,
join_depth: Optional[int] = None,
comparator_factory: Optional[
Type[RelationshipProperty.Comparator[Any]]
] = None,
single_parent: bool = False,
innerjoin: bool = False,
distinct_target_key: Optional[bool] = None,
load_on_pending: bool = False,
query_class: Optional[Type[Query[Any]]] = None,
info: Optional[_InfoType] = None,
omit_join: Literal[None, False] = None,
sync_backref: Optional[bool] = None,
doc: Optional[str] = None,
bake_queries: Literal[True] = True,
cascade_backrefs: Literal[False] = False,
_local_remote_pairs: Optional[_ColumnPairs] = None,
_legacy_inactive_history_style: bool = False,
):
super().__init__(attribute_options=attribute_options)
self.uselist = uselist
self.argument = argument
self._init_args = _RelationshipArgs(
_RelationshipArg("secondary", secondary, None),
_RelationshipArg("primaryjoin", primaryjoin, None),
_RelationshipArg("secondaryjoin", secondaryjoin, None),
_RelationshipArg("order_by", order_by, None),
_RelationshipArg("foreign_keys", foreign_keys, None),
_RelationshipArg("remote_side", remote_side, None),
)
self.post_update = post_update
self.viewonly = viewonly
if viewonly:
self._warn_for_persistence_only_flags(
passive_deletes=passive_deletes,
passive_updates=passive_updates,
enable_typechecks=enable_typechecks,
active_history=active_history,
cascade_backrefs=cascade_backrefs,
)
if viewonly and sync_backref:
raise sa_exc.ArgumentError(
"sync_backref and viewonly cannot both be True"
)
self.sync_backref = sync_backref
self.lazy = lazy
self.single_parent = single_parent
self.collection_class = collection_class
self.passive_deletes = passive_deletes
if cascade_backrefs:
raise sa_exc.ArgumentError(
"The 'cascade_backrefs' parameter passed to "
"relationship() may only be set to False."
)
self.passive_updates = passive_updates
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.distinct_target_key = distinct_target_key
self.doc = doc
self.active_history = active_history
self._legacy_inactive_history_style = _legacy_inactive_history_style
self.join_depth = join_depth
if omit_join:
util.warn(
"setting omit_join to True is not supported; selectin "
"loading of this relationship may not work correctly if this "
"flag is set explicitly. omit_join optimization is "
"automatically detected for conditions under which it is "
"supported."
)
self.omit_join = omit_join
self.local_remote_pairs = _local_remote_pairs
self.load_on_pending = load_on_pending
self.comparator_factory = (
comparator_factory or RelationshipProperty.Comparator
)
util.set_creation_order(self)
if info is not None:
self.info.update(info)
self.strategy_key = (("lazy", self.lazy),)
self._reverse_property: Set[RelationshipProperty[Any]] = set()
if overlaps:
self._overlaps = set(re.split(r"\s*,\s*", overlaps)) # type: ignore # noqa: E501
else:
self._overlaps = ()
# mypy ignoring the @property setter
self.cascade = cascade # type: ignore
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive"
)
self.backref = None
else:
self.backref = backref
def _warn_for_persistence_only_flags(self, **kw: Any) -> None:
for k, v in kw.items():
if v != self._persistence_only[k]:
# we are warning here rather than warn deprecated as this is a
# configuration mistake, and Python shows regular warnings more
# aggressively than deprecation warnings by default. Unlike the
# case of setting viewonly with cascade, the settings being
# warned about here are not actively doing the wrong thing
# against viewonly=True, so it is not as urgent to have these
# raise an error.
util.warn(
"Setting %s on relationship() while also "
"setting viewonly=True does not make sense, as a "
"viewonly=True relationship does not perform persistence "
"operations. This configuration may raise an error "
"in a future release." % (k,)
)
def instrument_class(self, mapper: Mapper[Any]) -> None:
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(util.MemoizedSlots, PropComparator[_PT]):
"""Produce boolean, comparison, and other operators for
:class:`.RelationshipProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview of ORM level operator definition.
.. seealso::
:class:`.PropComparator`
:class:`.ColumnProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = (
"entity",
"mapper",
"property",
"_of_type",
"_extra_criteria",
)
prop: RODescriptorReference[RelationshipProperty[_PT]]
_of_type: Optional[_EntityType[_PT]]
def __init__(
self,
prop: RelationshipProperty[_PT],
parentmapper: _InternalEntityType[Any],
adapt_to_entity: Optional[AliasedInsp[Any]] = None,
of_type: Optional[_EntityType[_PT]] = None,
extra_criteria: Tuple[ColumnElement[bool], ...] = (),
):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self._parententity = parentmapper
self._adapt_to_entity = adapt_to_entity
if of_type:
self._of_type = of_type
else:
self._of_type = None
self._extra_criteria = extra_criteria
def adapt_to_entity(
self, adapt_to_entity: AliasedInsp[Any]
) -> RelationshipProperty.Comparator[Any]:
return self.__class__(
self.prop,
self._parententity,
adapt_to_entity=adapt_to_entity,
of_type=self._of_type,
)
entity: _InternalEntityType[_PT]
"""The target entity referred to by this
:class:`.RelationshipProperty.Comparator`.
This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp`
object.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
mapper: Mapper[_PT]
"""The target :class:`_orm.Mapper` referred to by this
:class:`.RelationshipProperty.Comparator`.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
def _memoized_attr_entity(self) -> _InternalEntityType[_PT]:
if self._of_type:
return inspect(self._of_type) # type: ignore
else:
return self.prop.entity
def _memoized_attr_mapper(self) -> Mapper[_PT]:
return self.entity.mapper
def _source_selectable(self) -> FromClause:
if self._adapt_to_entity:
return self._adapt_to_entity.selectable
else:
return self.property.parent._with_polymorphic_selectable
def __clause_element__(self) -> ColumnElement[bool]:
adapt_from = self._source_selectable()
if self._of_type:
of_type_entity = inspect(self._of_type)
else:
of_type_entity = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.prop._create_joins(
source_selectable=adapt_from,
source_polymorphic=True,
of_type_entity=of_type_entity,
alias_secondary=True,
extra_criteria=self._extra_criteria,
)
if sj is not None:
return pj & sj
else:
return pj
def of_type(self, class_: _EntityType[Any]) -> PropComparator[_PT]:
r"""Redefine this object in terms of a polymorphic subclass.
See :meth:`.PropComparator.of_type` for an example.
"""
return RelationshipProperty.Comparator(
self.prop,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=class_,
extra_criteria=self._extra_criteria,
)
def and_(
self, *criteria: _ColumnExpressionArgument[bool]
) -> PropComparator[Any]:
"""Add AND criteria.
See :meth:`.PropComparator.and_` for an example.
.. versionadded:: 1.4
"""
exprs = tuple(
coercions.expect(roles.WhereHavingRole, clause)
for clause in util.coerce_generator_arg(criteria)
)
return RelationshipProperty.Comparator(
self.prop,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=self._of_type,
extra_criteria=self._extra_criteria + exprs,
)
def in_(self, other: Any) -> NoReturn:
"""Produce an IN clause - this is not implemented
for :func:`_orm.relationship`-based attributes at this time.
"""
raise NotImplementedError(
"in_() not yet supported for "
"relationships. For a simple "
"many-to-one, use in_() against "
"the set of foreign key values."
)
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
def __eq__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.Relationship.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.Relationship.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if other is None or isinstance(other, expression.Null):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(
self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection to an object or collection; "
"use contains() to test for membership."
)
else:
return _orm_annotate(
self.property._optimized_compare(
other, adapt_source=self.adapter
)
)
def _criterion_exists(
self,
criterion: Optional[_ColumnExpressionArgument[bool]] = None,
**kwargs: Any,
) -> Exists:
where_criteria = (
coercions.expect(roles.WhereHavingRole, criterion)
if criterion is not None
else None
)
if getattr(self, "_of_type", None):
info: Optional[_InternalEntityType[Any]] = inspect(
self._of_type
)
assert info is not None
target_mapper, to_selectable, is_aliased_class = (
info.mapper,
info.selectable,
info.is_aliased_class,
)
if self.property._is_self_referential and not is_aliased_class:
to_selectable = to_selectable._anonymous_fromclause()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if where_criteria is not None:
where_criteria = single_crit & where_criteria
else:
where_criteria = single_crit
else:
is_aliased_class = False
to_selectable = None
if self.adapter:
source_selectable = self._source_selectable()
else:
source_selectable = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
dest_selectable=to_selectable,
source_selectable=source_selectable,
)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if where_criteria is None:
where_criteria = crit
else:
where_criteria = where_criteria & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if (
where_criteria is not None
and target_adapter
and not is_aliased_class
):
# limit this adapter to annotated only?
where_criteria = target_adapter.traverse(where_criteria)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if where_criteria is not None:
where_criteria = where_criteria._annotate(
{"no_replacement_traverse": True}
)
crit = j & sql.True_._ifnone(where_criteria)
if secondary is not None:
ex = (
sql.exists(1)
.where(crit)
.select_from(dest, secondary)
.correlate_except(dest, secondary)
)
else:
ex = (
sql.exists(1)
.where(crit)
.select_from(dest)
.correlate_except(dest)
)
return ex
def any(
self,
criterion: Optional[_ColumnExpressionArgument[bool]] = None,
**kwargs: Any,
) -> ColumnElement[bool]:
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.Relationship.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.Relationship.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT (EXISTS (SELECT 1 FROM related WHERE
related.my_id=my_table.id))
:meth:`~.Relationship.Comparator.any` is only
valid for collections, i.e. a :func:`_orm.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.Relationship.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(
self,
criterion: Optional[_ColumnExpressionArgument[bool]] = None,
**kwargs: Any,
) -> ColumnElement[bool]:
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE
related.id==my_table.related_id AND related.x=2)
Because :meth:`~.Relationship.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.Relationship.Comparator.has` is only
valid for scalar references, i.e. a :func:`_orm.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.Relationship.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(criterion, **kwargs)
def contains(
self, other: _ColumnExpressionArgument[Any], **kwargs: Any
) -> ColumnElement[bool]:
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.Relationship.Comparator.contains` is
only valid for a collection, i.e. a
:func:`_orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.Relationship.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.Relationship.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.Relationship.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.Relationship.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.Relationship.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`_query.Query.outerjoin`
as well as :ref:`orm_queryguide_joins`
for more details on constructing outer joins.
kwargs may be ignored by this operator but are required for API
conformance.
"""
if not self.prop.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use =="
)
clause = self.prop._optimized_compare(
other, adapt_source=self.adapter
)
if self.prop.secondaryjoin is not None:
clause.negation_clause = self.__negated_contains_or_equals(
other
)
return clause
def __negated_contains_or_equals(
self, other: Any
) -> ColumnElement[bool]:
if self.prop.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(
local_col: ColumnElement[Any],
state: InstanceState[Any],
remote_col: ColumnElement[Any],
) -> BindParameter[Any]:
dict_ = state.dict
return sql.bindparam(
local_col.key,
type_=local_col.type,
unique=True,
callable_=self.prop._get_attr_w_warn_on_none(
self.prop.mapper, state, dict_, remote_col
),
)
def adapt(col: _CE) -> _CE:
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(
*[
sql.or_(
adapt(x)
!= state_bindparam(adapt(x), state, y),
adapt(x) == None,
)
for (x, y) in self.property.local_remote_pairs
]
)
criterion = sql.and_(
*[
x == y
for (x, y) in zip(
self.property.mapper.primary_key,
self.property.mapper.primary_key_from_instance(other),
)
]
)
return ~self._criterion_exists(criterion)
def __ne__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.Relationship.Comparator.contains`
in conjunction with :func:`_expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.Relationship.Comparator.has` in
conjunction with :func:`_expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if other is None or isinstance(other, expression.Null):
if self.property.direction == MANYTOONE:
return _orm_annotate(
~self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership."
)
else:
return _orm_annotate(self.__negated_contains_or_equals(other))
def _memoized_attr_property(self) -> RelationshipProperty[_PT]:
self.prop.parent._check_configure()
return self.prop
def _with_parent(
self,
instance: object,
alias_secondary: bool = True,
from_entity: Optional[_EntityType[Any]] = None,
) -> ColumnElement[bool]:
assert instance is not None
adapt_source: Optional[_CoreAdapterProto] = None
if from_entity is not None:
insp: Optional[_InternalEntityType[Any]] = inspect(from_entity)
assert insp is not None
if insp_is_aliased_class(insp):
adapt_source = insp._adapter.adapt_clause
return self._optimized_compare(
instance,
value_is_parent=True,
adapt_source=adapt_source,
alias_secondary=alias_secondary,
)
def _optimized_compare(
self,
state: Any,
value_is_parent: bool = False,
adapt_source: Optional[_CoreAdapterProto] = None,
alias_secondary: bool = True,
) -> ColumnElement[bool]:
if state is not None:
try:
state = inspect(state)
except sa_exc.NoInspectionAvailable:
state = None
if state is None or not getattr(state, "is_instance", False):
raise sa_exc.ArgumentError(
"Mapped instance expected for relationship "
"comparison to object. Classes, queries and other "
"SQL elements are not accepted in this context; for "
"comparison with a subquery, "
"use %s.has(**criteria)." % self
)
reverse_direction = not value_is_parent
if state is None:
return self._lazy_none_clause(
reverse_direction, adapt_source=adapt_source
)
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
if reverse_direction:
mapper = self.mapper
else:
mapper = self.parent
dict_ = attributes.instance_dict(state.obj())
def visit_bindparam(bindparam: BindParameter[Any]) -> None:
if bindparam._identifying_key in bind_to_col:
bindparam.callable = self._get_attr_w_warn_on_none(
mapper,
state,
dict_,
bind_to_col[bindparam._identifying_key],
)
if self.secondary is not None and alias_secondary:
criterion = ClauseAdapter(
self.secondary._anonymous_fromclause()
).traverse(criterion)
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _get_attr_w_warn_on_none(
self,
mapper: Mapper[Any],
state: InstanceState[Any],
dict_: _InstanceDict,
column: ColumnElement[Any],
) -> Callable[[], Any]:
"""Create the callable that is used in a many-to-one expression.
E.g.::
u1 = s.query(User).get(5)
expr = Address.user == u1
Above, the SQL should be "address.user_id = 5". The callable
returned by this method produces the value "5" based on the identity
of ``u1``.
"""
# in this callable, we're trying to thread the needle through
# a wide variety of scenarios, including:
#
# * the object hasn't been flushed yet and there's no value for
# the attribute as of yet
#
# * the object hasn't been flushed yet but it has a user-defined
# value
#
# * the object has a value but it's expired and not locally present
#
# * the object has a value but it's expired and not locally present,
# and the object is also detached
#
# * The object hadn't been flushed yet, there was no value, but
# later, the object has been expired and detached, and *now*
# they're trying to evaluate it
#
# * the object had a value, but it was changed to a new value, and
# then expired
#
# * the object had a value, but it was changed to a new value, and
# then expired, then the object was detached
#
# * the object has a user-set value, but it's None and we don't do
# the comparison correctly for that so warn
#
prop = mapper.get_property_by_column(column)
# by invoking this method, InstanceState will track the last known
# value for this key each time the attribute is to be expired.
# this feature was added explicitly for use in this method.
state._track_last_known_value(prop.key)
lkv_fixed = state._last_known_values
def _go() -> Any:
assert lkv_fixed is not None
last_known = to_return = lkv_fixed[prop.key]
existing_is_available = (
last_known is not LoaderCallableStatus.NO_VALUE
)
# we support that the value may have changed. so here we
# try to get the most recent value including re-fetching.
# only if we can't get a value now due to detachment do we return
# the last known value
current_value = mapper._get_state_attr_by_column(
state,
dict_,
column,
passive=PassiveFlag.PASSIVE_OFF
if state.persistent
else PassiveFlag.PASSIVE_NO_FETCH ^ PassiveFlag.INIT_OK,
)
if current_value is LoaderCallableStatus.NEVER_SET:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; no value has been set for this column"
% (column, state_str(state))
)
elif current_value is LoaderCallableStatus.PASSIVE_NO_RESULT:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; the object is detached and the value was "
"expired" % (column, state_str(state))
)
else:
to_return = current_value
if to_return is None:
util.warn(
"Got None for value of column %s; this is unsupported "
"for a relationship comparison and will not "
"currently produce an IS comparison "
"(but may in a future release)" % column
)
return to_return
return _go
def _lazy_none_clause(
self,
reverse_direction: bool = False,
adapt_source: Optional[_CoreAdapterProto] = None,
) -> ColumnElement[bool]:
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
criterion = adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def __str__(self) -> str:
return str(self.parent.class_.__name__) + "." + self.key
def merge(
self,
session: Session,
source_state: InstanceState[Any],
source_dict: _InstanceDict,
dest_state: InstanceState[Any],
dest_dict: _InstanceDict,
load: bool,
_recursive: Dict[Any, object],
_resolve_conflict_map: Dict[_IdentityKeyType[Any], object],
) -> None:
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if "merge" not in self._cascade:
return
if self.key not in source_dict:
return
if self.uselist:
impl = source_state.get_impl(self.key)
assert is_has_collection_adapter(impl)
instances_iterable = impl.get_collection(source_state, source_dict)
# if this is a CollectionAttributeImpl, then empty should
# be False, otherwise "self.key in source_dict" should not be
# True
assert not instances_iterable.empty if impl.collection else True
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttributeImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(
dest_state, dest_dict, passive=PassiveFlag.PASSIVE_MERGE
)
dest_list = []
for current in instances_iterable:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(
dest_state, dest_dict, self.key
)
for c in dest_list:
coll.append_without_event(c)
else:
dest_impl = dest_state.get_impl(self.key)
assert is_has_collection_adapter(dest_impl)
dest_impl.set(
dest_state,
dest_dict,
dest_list,
_adapt=False,
passive=PassiveFlag.PASSIVE_MERGE,
)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, obj, None
)
def _value_as_iterable(
self,
state: InstanceState[_O],
dict_: _InstanceDict,
key: str,
passive: PassiveFlag = PassiveFlag.PASSIVE_OFF,
) -> Sequence[Tuple[InstanceState[_O], _O]]:
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = state.manager[key].impl
x = impl.get(state, dict_, passive=passive)
if x is LoaderCallableStatus.PASSIVE_NO_RESULT or x is None:
return []
elif is_has_collection_adapter(impl):
return [
(attributes.instance_state(o), o)
for o in impl.get_collection(state, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def cascade_iterator(
self,
type_: str,
state: InstanceState[Any],
dict_: _InstanceDict,
visited_states: Set[InstanceState[Any]],
halt_on: Optional[Callable[[InstanceState[Any]], bool]] = None,
) -> Iterator[Tuple[Any, Mapper[Any], InstanceState[Any], _InstanceDict]]:
# assert type_ in self._cascade
# only actively lazy load on the 'delete' cascade
if type_ != "delete" or self.passive_deletes:
passive = PassiveFlag.PASSIVE_NO_INITIALIZE
else:
passive = PassiveFlag.PASSIVE_OFF | PassiveFlag.NO_RAISE
if type_ == "save-update":
tuples = state.manager[self.key].impl.get_all_pending(state, dict_)
else:
tuples = self._value_as_iterable(
state, dict_, self.key, passive=passive
)
skip_pending = (
type_ == "refresh-expire" and "delete-orphan" not in self._cascade
)
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
assert instance_state is not None
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError(
"Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'"
% (self.key, self.parent.class_, c.__class__)
)
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
@property
def _effective_sync_backref(self) -> bool:
if self.viewonly:
return False
else:
return self.sync_backref is not False
@staticmethod
def _check_sync_backref(
rel_a: RelationshipProperty[Any], rel_b: RelationshipProperty[Any]
) -> None:
if rel_a.viewonly and rel_b.sync_backref:
raise sa_exc.InvalidRequestError(
"Relationship %s cannot specify sync_backref=True since %s "
"includes viewonly=True." % (rel_b, rel_a)
)
if (
rel_a.viewonly
and not rel_b.viewonly
and rel_b.sync_backref is not False
):
rel_b.sync_backref = False
def _add_reverse_property(self, key: str) -> None:
other = self.mapper.get_property(key, _configure_mappers=False)
if not isinstance(other, RelationshipProperty):
raise sa_exc.InvalidRequestError(
"back_populates on relationship '%s' refers to attribute '%s' "
"that is not a relationship. The back_populates parameter "
"should refer to the name of a relationship on the target "
"class." % (self, other)
)
# viewonly and sync_backref cases
# 1. self.viewonly==True and other.sync_backref==True -> error
# 2. self.viewonly==True and other.viewonly==False and
# other.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(self, other)
# 3. other.viewonly==True and self.sync_backref==True -> error
# 4. other.viewonly==True and self.viewonly==False and
# self.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(other, self)
self._reverse_property.add(other)
other._reverse_property.add(self)
other._setup_entity()
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError(
"reverse_property %r on "
"relationship %s references relationship %s, which "
"does not reference mapper %s"
% (key, self, other, self.parent)
)
if (
other._configure_started
and self.direction in (ONETOMANY, MANYTOONE)
and self.direction == other.direction
):
raise sa_exc.ArgumentError(
"%s and back-reference %s are "
"both of the same direction %r. Did you mean to "
"set remote_side on the many-to-one side ?"
% (other, self, self.direction)
)
@util.memoized_property
def entity(self) -> _InternalEntityType[_T]:
"""Return the target mapped entity, which is an inspect() of the
class or aliased class that is referred towards.
"""
self.parent._check_configure()
return self.entity
@util.memoized_property
def mapper(self) -> Mapper[_T]:
"""Return the targeted :class:`_orm.Mapper` for this
:class:`.RelationshipProperty`.
"""
return self.entity.mapper
def do_init(self) -> None:
self._check_conflicts()
self._process_dependent_arguments()
self._setup_entity()
self._setup_registry_dependencies()
self._setup_join_conditions()
self._check_cascade_settings(self._cascade)
self._post_init()
self._generate_backref()
self._join_condition._warn_for_conflicting_sync_targets()
super().do_init()
self._lazy_strategy = cast(
"LazyLoader", self._get_strategy((("lazy", "select"),))
)
def _setup_registry_dependencies(self) -> None:
self.parent.mapper.registry._set_depends_on(
self.entity.mapper.registry
)
def _process_dependent_arguments(self) -> None:
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
init_args = self._init_args
for attr in (
"order_by",
"primaryjoin",
"secondaryjoin",
"secondary",
"foreign_keys",
"remote_side",
):
rel_arg = getattr(init_args, attr)
rel_arg._resolve_against_registry(self._clsregistry_resolvers[1])
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in "primaryjoin", "secondaryjoin":
rel_arg = getattr(init_args, attr)
val = rel_arg.resolved
if val is not None:
rel_arg.resolved = _orm_deannotate(
coercions.expect(
roles.ColumnArgumentRole, val, argname=attr
)
)
secondary = init_args.secondary.resolved
if secondary is not None and _is_mapped_class(secondary):
raise sa_exc.ArgumentError(
"secondary argument %s passed to to relationship() %s must "
"be a Table object or other FROM clause; can't send a mapped "
"class directly as rows in 'secondary' are persisted "
"independently of a class that is mapped "
"to that same table." % (secondary, self)
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if (
init_args.order_by.resolved is not False
and init_args.order_by.resolved is not None
):
self.order_by = tuple(
coercions.expect(
roles.ColumnArgumentRole, x, argname="order_by"
)
for x in util.to_list(init_args.order_by.resolved)
)
else:
self.order_by = False
self._user_defined_foreign_keys = util.column_set(
coercions.expect(
roles.ColumnArgumentRole, x, argname="foreign_keys"
)
for x in util.to_column_set(init_args.foreign_keys.resolved)
)
self.remote_side = util.column_set(
coercions.expect(
roles.ColumnArgumentRole, x, argname="remote_side"
)
for x in util.to_column_set(init_args.remote_side.resolved)
)
def declarative_scan(
self,
decl_scan: _ClassScanMapperConfig,
registry: _RegistryType,
cls: Type[Any],
originating_module: Optional[str],
key: str,
mapped_container: Optional[Type[Mapped[Any]]],
annotation: Optional[_AnnotationScanType],
extracted_mapped_annotation: Optional[_AnnotationScanType],
is_dataclass_field: bool,
) -> None:
argument = extracted_mapped_annotation
if extracted_mapped_annotation is None:
if self.argument is None:
self._raise_for_required(key, cls)
else:
return
argument = extracted_mapped_annotation
assert originating_module is not None
is_write_only = mapped_container is not None and issubclass(
mapped_container, WriteOnlyMapped
)
if is_write_only:
self.lazy = "write_only"
self.strategy_key = (("lazy", self.lazy),)
is_dynamic = mapped_container is not None and issubclass(
mapped_container, DynamicMapped
)
if is_dynamic:
self.lazy = "dynamic"
self.strategy_key = (("lazy", self.lazy),)
argument = de_optionalize_union_types(argument)
if hasattr(argument, "__origin__"):
arg_origin = argument.__origin__ # type: ignore
if isinstance(arg_origin, type) and issubclass(
arg_origin, abc.Collection
):
if self.collection_class is None:
if _py_inspect.isabstract(arg_origin):
raise sa_exc.ArgumentError(
f"Collection annotation type {arg_origin} cannot "
"be instantiated; please provide an explicit "
"'collection_class' parameter "
"(e.g. list, set, etc.) to the "
"relationship() function to accompany this "
"annotation"
)
self.collection_class = arg_origin
elif not is_write_only and not is_dynamic:
self.uselist = False
if argument.__args__: # type: ignore
if isinstance(arg_origin, type) and issubclass(
arg_origin, typing.Mapping # type: ignore
):
type_arg = argument.__args__[-1] # type: ignore
else:
type_arg = argument.__args__[0] # type: ignore
if hasattr(type_arg, "__forward_arg__"):
str_argument = type_arg.__forward_arg__
argument = resolve_name_to_real_class_name(
str_argument, originating_module
)
else:
argument = type_arg
else:
raise sa_exc.ArgumentError(
f"Generic alias {argument} requires an argument"
)
elif hasattr(argument, "__forward_arg__"):
argument = argument.__forward_arg__ # type: ignore
argument = resolve_name_to_real_class_name(
argument, originating_module
)
# we don't allow the collection class to be a
# __forward_arg__ right now, so if we see a forward arg here,
# we know there was no collection class either
if (
self.collection_class is None
and not is_write_only
and not is_dynamic
):
self.uselist = False
# ticket #8759
# if a lead argument was given to relationship(), like
# `relationship("B")`, use that, don't replace it with class we
# found in the annotation. The declarative_scan() method call here is
# still useful, as we continue to derive collection type and do
# checking of the annotation in any case.
if self.argument is None:
self.argument = cast("_RelationshipArgumentType[_T]", argument)
@util.preload_module("sqlalchemy.orm.mapper")
def _setup_entity(self, __argument: Any = None) -> None:
if "entity" in self.__dict__:
return
mapperlib = util.preloaded.orm_mapper
if __argument:
argument = __argument
else:
argument = self.argument
resolved_argument: _ExternalEntityType[Any]
if isinstance(argument, str):
# we might want to cleanup clsregistry API to make this
# more straightforward
resolved_argument = cast(
"_ExternalEntityType[Any]",
self._clsregistry_resolve_name(argument)(),
)
elif callable(argument) and not isinstance(
argument, (type, mapperlib.Mapper)
):
resolved_argument = argument()
else:
resolved_argument = argument
entity: _InternalEntityType[Any]
if isinstance(resolved_argument, type):
entity = class_mapper(resolved_argument, configure=False)
else:
try:
entity = inspect(resolved_argument)
except sa_exc.NoInspectionAvailable:
entity = None # type: ignore
if not hasattr(entity, "mapper"):
raise sa_exc.ArgumentError(
"relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(resolved_argument))
)
self.entity = entity # type: ignore
self.target = self.entity.persist_selectable
def _setup_join_conditions(self) -> None:
self._join_condition = jc = JoinCondition(
parent_persist_selectable=self.parent.persist_selectable,
child_persist_selectable=self.entity.persist_selectable,
parent_local_selectable=self.parent.local_table,
child_local_selectable=self.entity.local_table,
primaryjoin=self._init_args.primaryjoin.resolved,
secondary=self._init_args.secondary.resolved,
secondaryjoin=self._init_args.secondaryjoin.resolved,
parent_equivalents=self.parent._equivalent_columns,
child_equivalents=self.mapper._equivalent_columns,
consider_as_foreign_keys=self._user_defined_foreign_keys,
local_remote_pairs=self.local_remote_pairs,
remote_side=self.remote_side,
self_referential=self._is_self_referential,
prop=self,
support_sync=not self.viewonly,
can_be_synced_fn=self._columns_are_mapped,
)
self.primaryjoin = jc.primaryjoin
self.secondaryjoin = jc.secondaryjoin
self.secondary = jc.secondary
self.direction = jc.direction
self.local_remote_pairs = jc.local_remote_pairs
self.remote_side = jc.remote_columns
self.local_columns = jc.local_columns
self.synchronize_pairs = jc.synchronize_pairs
self._calculated_foreign_keys = jc.foreign_key_columns
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
@property
def _clsregistry_resolve_arg(
self,
) -> Callable[[str, bool], _class_resolver]:
return self._clsregistry_resolvers[1]
@property
def _clsregistry_resolve_name(
self,
) -> Callable[[str], Callable[[], Union[Type[Any], Table, _ModNS]]]:
return self._clsregistry_resolvers[0]
@util.memoized_property
@util.preload_module("sqlalchemy.orm.clsregistry")
def _clsregistry_resolvers(
self,
) -> Tuple[
Callable[[str], Callable[[], Union[Type[Any], Table, _ModNS]]],
Callable[[str, bool], _class_resolver],
]:
_resolver = util.preloaded.orm_clsregistry._resolver
return _resolver(self.parent.class_, self)
def _check_conflicts(self) -> None:
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if self.parent.non_primary and not class_mapper(
self.parent.class_, configure=False
).has_property(self.key):
raise sa_exc.ArgumentError(
"Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' "
% (
self.key,
self.parent.class_.__name__,
self.parent.class_.__name__,
)
)
@property
def cascade(self) -> CascadeOptions:
"""Return the current cascade setting for this
:class:`.RelationshipProperty`.
"""
return self._cascade
@cascade.setter
def cascade(self, cascade: Union[str, CascadeOptions]) -> None:
self._set_cascade(cascade)
def _set_cascade(self, cascade_arg: Union[str, CascadeOptions]) -> None:
cascade = CascadeOptions(cascade_arg)
if self.viewonly:
cascade = CascadeOptions(
cascade.intersection(CascadeOptions._viewonly_cascades)
)
if "mapper" in self.__dict__:
self._check_cascade_settings(cascade)
self._cascade = cascade
if self._dependency_processor:
self._dependency_processor.cascade = cascade
def _check_cascade_settings(self, cascade: CascadeOptions) -> None:
if (
cascade.delete_orphan
and not self.single_parent
and (self.direction is MANYTOMANY or self.direction is MANYTOONE)
):
raise sa_exc.ArgumentError(
"For %(direction)s relationship %(rel)s, delete-orphan "
"cascade is normally "
'configured only on the "one" side of a one-to-many '
"relationship, "
'and not on the "many" side of a many-to-one or many-to-many '
"relationship. "
"To force this relationship to allow a particular "
'"%(relatedcls)s" object to be referred towards by only '
'a single "%(clsname)s" object at a time via the '
"%(rel)s relationship, which "
"would allow "
"delete-orphan cascade to take place in this direction, set "
"the single_parent=True flag."
% {
"rel": self,
"direction": "many-to-one"
if self.direction is MANYTOONE
else "many-to-many",
"clsname": self.parent.class_.__name__,
"relatedcls": self.mapper.class_.__name__,
},
code="bbf0",
)
if self.passive_deletes == "all" and (
"delete" in cascade or "delete-orphan" in cascade
):
raise sa_exc.ArgumentError(
"On %s, can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade" % self
)
if cascade.delete_orphan:
self.mapper.primary_mapper()._delete_orphans.append(
(self.key, self.parent.class_)
)
def _persists_for(self, mapper: Mapper[Any]) -> bool:
"""Return True if this property will persist values on behalf
of the given mapper.
"""
return (
self.key in mapper.relationships
and mapper.relationships[self.key] is self
)
def _columns_are_mapped(self, *cols: ColumnElement[Any]) -> bool:
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.RelationshipProperty`.
"""
secondary = self._init_args.secondary.resolved
for c in cols:
if secondary is not None and secondary.c.contains_column(c):
continue
if not self.parent.persist_selectable.c.contains_column(
c
) and not self.target.c.contains_column(c):
return False
return True
def _generate_backref(self) -> None:
"""Interpret the 'backref' instruction to create a
:func:`_orm.relationship` complementary to this one."""
if self.parent.non_primary:
return
if self.backref is not None and not self.back_populates:
kwargs: Dict[str, Any]
if isinstance(self.backref, str):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
if not mapper.concrete:
check = set(mapper.iterate_to_root()).union(
mapper.self_and_descendants
)
for m in check:
if m.has_property(backref_key) and not m.concrete:
raise sa_exc.ArgumentError(
"Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'"
% (backref_key, self, m)
)
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
# a custom join doesn't have to be specified in
# both directions.
if self.secondary is not None:
# for many to many, just switch primaryjoin/
# secondaryjoin. use the annotated
# pj/sj on the _join_condition.
pj = kwargs.pop(
"primaryjoin",
self._join_condition.secondaryjoin_minus_local,
)
sj = kwargs.pop(
"secondaryjoin",
self._join_condition.primaryjoin_minus_local,
)
else:
pj = kwargs.pop(
"primaryjoin",
self._join_condition.primaryjoin_reverse_remote,
)
sj = kwargs.pop("secondaryjoin", None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref "
"against a non-secondary relationship."
)
foreign_keys = kwargs.pop(
"foreign_keys", self._user_defined_foreign_keys
)
parent = self.parent.primary_mapper()
kwargs.setdefault("viewonly", self.viewonly)
kwargs.setdefault("post_update", self.post_update)
kwargs.setdefault("passive_updates", self.passive_updates)
kwargs.setdefault("sync_backref", self.sync_backref)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent,
self.secondary,
primaryjoin=pj,
secondaryjoin=sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs,
)
mapper._configure_property(
backref_key, relationship, warn_for_existing=True
)
if self.back_populates:
self._add_reverse_property(self.back_populates)
@util.preload_module("sqlalchemy.orm.dependency")
def _post_init(self) -> None:
dependency = util.preloaded.orm_dependency
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = ( # type: ignore
dependency.DependencyProcessor.from_relationship
)(self)
@util.memoized_property
def _use_get(self) -> bool:
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._lazy_strategy
return strategy.use_get
@util.memoized_property
def _is_self_referential(self) -> bool:
return self.mapper.common_parent(self.parent)
def _create_joins(
self,
source_polymorphic: bool = False,
source_selectable: Optional[FromClause] = None,
dest_selectable: Optional[FromClause] = None,
of_type_entity: Optional[_InternalEntityType[Any]] = None,
alias_secondary: bool = False,
extra_criteria: Tuple[ColumnElement[bool], ...] = (),
) -> Tuple[
ColumnElement[bool],
Optional[ColumnElement[bool]],
FromClause,
FromClause,
Optional[FromClause],
Optional[ClauseAdapter],
]:
aliased = False
if alias_secondary and self.secondary is not None:
aliased = True
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
if of_type_entity:
dest_mapper = of_type_entity.mapper
if dest_selectable is None:
dest_selectable = of_type_entity.selectable
aliased = True
else:
dest_mapper = self.mapper
if dest_selectable is None:
dest_selectable = self.entity.selectable
if self.mapper.with_polymorphic:
aliased = True
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable._anonymous_fromclause()
aliased = True
elif (
dest_selectable is not self.mapper._with_polymorphic_selectable
or self.mapper.with_polymorphic
):
aliased = True
single_crit = dest_mapper._single_table_criterion
aliased = aliased or (
source_selectable is not None
and (
source_selectable
is not self.parent._with_polymorphic_selectable
or source_selectable._is_subquery
)
)
(
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
) = self._join_condition.join_targets(
source_selectable,
dest_selectable,
aliased,
single_crit,
extra_criteria,
)
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.entity.local_table
return (
primaryjoin,
secondaryjoin,
source_selectable,
dest_selectable,
secondary,
target_adapter,
)
def _annotate_columns(element: _CE, annotations: _AnnotationDict) -> _CE:
def clone(elem: _CE) -> _CE:
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy()) # type: ignore
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
clone = None # type: ignore # remove gc cycles
return element
class JoinCondition:
primaryjoin_initial: Optional[ColumnElement[bool]]
primaryjoin: ColumnElement[bool]
secondaryjoin: Optional[ColumnElement[bool]]
secondary: Optional[FromClause]
prop: RelationshipProperty[Any]
synchronize_pairs: _ColumnPairs
secondary_synchronize_pairs: _ColumnPairs
direction: RelationshipDirection
parent_persist_selectable: FromClause
child_persist_selectable: FromClause
parent_local_selectable: FromClause
child_local_selectable: FromClause
_local_remote_pairs: Optional[_ColumnPairs]
def __init__(
self,
parent_persist_selectable: FromClause,
child_persist_selectable: FromClause,
parent_local_selectable: FromClause,
child_local_selectable: FromClause,
*,
primaryjoin: Optional[ColumnElement[bool]] = None,
secondary: Optional[FromClause] = None,
secondaryjoin: Optional[ColumnElement[bool]] = None,
parent_equivalents: Optional[_EquivalentColumnMap] = None,
child_equivalents: Optional[_EquivalentColumnMap] = None,
consider_as_foreign_keys: Any = None,
local_remote_pairs: Optional[_ColumnPairs] = None,
remote_side: Any = None,
self_referential: Any = False,
prop: RelationshipProperty[Any],
support_sync: bool = True,
can_be_synced_fn: Callable[..., bool] = lambda *c: True,
):
self.parent_persist_selectable = parent_persist_selectable
self.parent_local_selectable = parent_local_selectable
self.child_persist_selectable = child_persist_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin_initial = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
assert self.primaryjoin is not None
self._sanitize_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._annotate_parentmapper()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self) -> None:
log = self.prop.logger
log.info("%s setup primary join %s", self.prop, self.primaryjoin)
log.info("%s setup secondary join %s", self.prop, self.secondaryjoin)
log.info(
"%s synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r) for (l, r) in self.synchronize_pairs
),
)
log.info(
"%s secondary synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r)
for (l, r) in self.secondary_synchronize_pairs or []
),
)
log.info(
"%s local/remote pairs [%s]",
self.prop,
",".join(
"(%s / %s)" % (l, r) for (l, r) in self.local_remote_pairs
),
)
log.info(
"%s remote columns [%s]",
self.prop,
",".join("%s" % col for col in self.remote_columns),
)
log.info(
"%s local columns [%s]",
self.prop,
",".join("%s" % col for col in self.local_columns),
)
log.info("%s relationship direction %s", self.prop, self.direction)
def _sanitize_joins(self) -> None:
"""remove the parententity annotation from our join conditions which
can leak in here based on some declarative patterns and maybe others.
"parentmapper" is relied upon both by the ORM evaluator as well as
the use case in _join_fixture_inh_selfref_w_entity
that relies upon it being present, see :ticket:`3364`.
"""
self.primaryjoin = _deep_deannotate(
self.primaryjoin, values=("parententity", "proxy_key")
)
if self.secondaryjoin is not None:
self.secondaryjoin = _deep_deannotate(
self.secondaryjoin, values=("parententity", "proxy_key")
)
def _determine_joins(self) -> None:
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop
)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = join_condition(
self.child_persist_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
if self.primaryjoin_initial is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
else:
self.primaryjoin = self.primaryjoin_initial
else:
if self.primaryjoin_initial is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.child_persist_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
else:
self.primaryjoin = self.primaryjoin_initial
except sa_exc.NoForeignKeysError as nfe:
if self.secondary is not None:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions." % (self.prop, self.secondary)
) from nfe
else:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression." % self.prop
) from nfe
except sa_exc.AmbiguousForeignKeysError as afe:
if self.secondary is not None:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables." % (self.prop, self.secondary)
) from afe
else:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table." % self.prop
) from afe
@property
def primaryjoin_minus_local(self) -> ColumnElement[bool]:
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self) -> ColumnElement[bool]:
assert self.secondaryjoin is not None
return _deep_deannotate(self.secondaryjoin, values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self) -> ColumnElement[bool]:
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element: _CE, **kw: Any) -> Optional[_CE]:
if "remote" in element._annotations:
v = dict(element._annotations)
del v["remote"]
v["local"] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = dict(element._annotations)
del v["local"]
v["remote"] = True
return element._with_annotations(v)
return None
return visitors.replacement_traverse(self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(
self.primaryjoin, values=("local", "remote")
)
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause: ClauseElement, annotation: str) -> bool:
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self) -> bool:
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self) -> bool:
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self) -> None:
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self) -> None:
def check_fk(element: _CE, **kw: Any) -> Optional[_CE]:
if element in self.consider_as_foreign_keys:
return element._annotate({"foreign": True})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, check_fk
)
def _annotate_present_fks(self) -> None:
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(
a: ColumnElement[Any], b: ColumnElement[Any]
) -> Optional[ColumnElement[Any]]:
if isinstance(a, schema.Column) and isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
return None
def visit_binary(binary: BinaryExpression[Any]) -> None:
if not isinstance(
binary.left, sql.ColumnElement
) or not isinstance(binary.right, sql.ColumnElement):
return
if (
"foreign" not in binary.left._annotations
and "foreign" not in binary.right._annotations
):
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate({"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True}
)
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin, {}, {"binary": visit_binary}
)
def _refers_to_parent_table(self) -> bool:
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_persist_selectable
mt = self.child_persist_selectable
result = False
def visit_binary(binary: BinaryExpression[Any]) -> None:
nonlocal result
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause)
and isinstance(f, expression.ColumnClause)
and pt.is_derived_from(c.table)
and pt.is_derived_from(f.table)
and mt.is_derived_from(c.table)
and mt.is_derived_from(f.table)
):
result = True
visitors.traverse(self.primaryjoin, {}, {"binary": visit_binary})
return result
def _tables_overlap(self) -> bool:
"""Return True if parent/child tables have some overlap."""
return selectables_overlap(
self.parent_persist_selectable, self.child_persist_selectable
)
def _annotate_remote(self) -> None:
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(
lambda col: "foreign" in col._annotations, False
)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self) -> None:
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
assert self.secondary is not None
fixed_secondary = self.secondary
def repl(element: _CE, **kw: Any) -> Optional[_CE]:
if fixed_secondary.c.contains_column(element):
return element._annotate({"remote": True})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
assert self.secondaryjoin is not None
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl
)
def _annotate_selfref(
self, fn: Callable[[ColumnElement[Any]], bool], remote_side_given: bool
) -> None:
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary: BinaryExpression[Any]) -> None:
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and isinstance(
binary.right, expression.ColumnClause
):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate({"remote": True})
elif not remote_side_given:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_from_args(self) -> None:
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument."
)
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side, True)
else:
def repl(element: _CE, **kw: Any) -> Optional[_CE]:
# use set() to avoid generating ``__eq__()`` expressions
# against each element
if element in set(remote_side):
return element._annotate({"remote": True})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _annotate_remote_with_overlap(self) -> None:
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary: BinaryExpression[Any]) -> None:
binary.left, binary.right = proc_left_right(
binary.left, binary.right
)
binary.right, binary.left = proc_left_right(
binary.right, binary.left
)
check_entities = (
self.prop is not None and self.prop.mapper is not self.prop.parent
)
def proc_left_right(
left: ColumnElement[Any], right: ColumnElement[Any]
) -> Tuple[ColumnElement[Any], ColumnElement[Any]]:
if isinstance(left, expression.ColumnClause) and isinstance(
right, expression.ColumnClause
):
if self.child_persist_selectable.c.contains_column(
right
) and self.parent_persist_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
elif (
check_entities
and right._annotations.get("parentmapper") is self.prop.mapper
):
right = right._annotate({"remote": True})
elif (
check_entities
and left._annotations.get("parentmapper") is self.prop.mapper
):
left = left._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_distinct_selectables(self) -> None:
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element: _CE, **kw: Any) -> Optional[_CE]:
if self.child_persist_selectable.c.contains_column(element) and (
not self.parent_local_selectable.c.contains_column(element)
or self.child_local_selectable.c.contains_column(element)
):
return element._annotate({"remote": True})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _warn_non_column_elements(self) -> None:
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side." % self.prop
)
def _annotate_local(self) -> None:
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set(
[l for (l, r) in self._local_remote_pairs]
)
else:
local_side = util.column_set(self.parent_persist_selectable.c)
def locals_(element: _CE, **kw: Any) -> Optional[_CE]:
if "remote" not in element._annotations and element in local_side:
return element._annotate({"local": True})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _annotate_parentmapper(self) -> None:
def parentmappers_(element: _CE, **kw: Any) -> Optional[_CE]:
if "remote" in element._annotations:
return element._annotate({"parentmapper": self.prop.mapper})
elif "local" in element._annotations:
return element._annotate({"parentmapper": self.prop.parent})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, parentmappers_
)
def _check_remote_side(self) -> None:
if not self.local_remote_pairs:
raise sa_exc.ArgumentError(
"Relationship %s could "
"not determine any unambiguous local/remote column "
"pairs based on join condition and remote_side "
"arguments. "
"Consider using the remote() annotation to "
"accurately mark those elements of the join "
"condition that are on the remote side of "
"the relationship." % (self.prop,)
)
else:
not_target = util.column_set(
self.parent_persist_selectable.c
).difference(self.child_persist_selectable.c)
for _, rmt in self.local_remote_pairs:
if rmt in not_target:
util.warn(
"Expression %s is marked as 'remote', but these "
"column(s) are local to the local side. The "
"remote() annotation is needed only for a "
"self-referential relationship where both sides "
"of the relationship refer to the same tables."
% (rmt,)
)
def _check_foreign_cols(
self, join_condition: ColumnElement[bool], primary: bool
) -> None:
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign"
)
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if (
self.support_sync
and can_sync
or (not self.support_sync and has_foreign)
):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = (
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for "
"%s join condition "
"'%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation. To allow comparison operators other than "
"'==', the relationship can be marked as viewonly=True."
)
raise sa_exc.ArgumentError(err)
else:
err = (
"Could not locate any relevant foreign key columns "
"for %s join condition '%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation."
)
raise sa_exc.ArgumentError(err)
def _determine_direction(self) -> None:
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_persist_selectable.c)
targetcols = util.column_set(self.child_persist_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key.
# we will gather columns directly from their annotations
# without deannotating, so that we can distinguish on a column
# that refers to itself.
# 1. columns that are both remote and FK suggest
# onetomany.
onetomany_local = self._gather_columns_with_annotation(
self.primaryjoin, "remote", "foreign"
)
# 2. columns that are FK but are not remote (e.g. local)
# suggest manytoone.
manytoone_local = {
c
for c in self._gather_columns_with_annotation(
self.primaryjoin, "foreign"
)
if "remote" not in c._annotations
}
# 3. if both collections are present, remove columns that
# refer to themselves. This is for the case of
# and_(Me.id == Me.remote_id, Me.version == Me.version)
if onetomany_local and manytoone_local:
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = onetomany_local.difference(self_equated)
manytoone_local = manytoone_local.difference(self_equated)
# at this point, if only one or the other collection is
# present, we know the direction, otherwise it's still
# ambiguous.
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop
)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop
)
def _deannotate_pairs(
self, collection: _ColumnPairIterable
) -> _MutableColumnPairs:
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate()) for x, y in collection]
def _setup_pairs(self) -> None:
sync_pairs: _MutableColumnPairs = []
lrp: util.OrderedSet[
Tuple[ColumnElement[Any], ColumnElement[Any]]
] = util.OrderedSet([])
secondary_sync_pairs: _MutableColumnPairs = []
def go(
joincond: ColumnElement[bool],
collection: _MutableColumnPairs,
) -> None:
def visit_binary(
binary: BinaryExpression[Any],
left: ColumnElement[Any],
right: ColumnElement[Any],
) -> None:
if (
"remote" in right._annotations
and "remote" not in left._annotations
and self.can_be_synced_fn(left)
):
lrp.add((left, right))
elif (
"remote" in left._annotations
and "remote" not in right._annotations
and self.can_be_synced_fn(right)
):
lrp.add((right, left))
if binary.operator is operators.eq and self.can_be_synced_fn(
left, right
):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs),
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = self._deannotate_pairs(
secondary_sync_pairs
)
_track_overlapping_sync_targets: weakref.WeakKeyDictionary[
ColumnElement[Any],
weakref.WeakKeyDictionary[
RelationshipProperty[Any], ColumnElement[Any]
],
] = weakref.WeakKeyDictionary()
def _warn_for_conflicting_sync_targets(self) -> None:
if not self.support_sync:
return
# we would like to detect if we are synchronizing any column
# pairs in conflict with another relationship that wishes to sync
# an entirely different column to the same target. This is a
# very rare edge case so we will try to minimize the memory/overhead
# impact of this check
for from_, to_ in [
(from_, to_) for (from_, to_) in self.synchronize_pairs
] + [
(from_, to_) for (from_, to_) in self.secondary_synchronize_pairs
]:
# save ourselves a ton of memory and overhead by only
# considering columns that are subject to a overlapping
# FK constraints at the core level. This condition can arise
# if multiple relationships overlap foreign() directly, but
# we're going to assume it's typically a ForeignKeyConstraint-
# level configuration that benefits from this warning.
if to_ not in self._track_overlapping_sync_targets:
self._track_overlapping_sync_targets[
to_
] = weakref.WeakKeyDictionary({self.prop: from_})
else:
other_props = []
prop_to_from = self._track_overlapping_sync_targets[to_]
for pr, fr_ in prop_to_from.items():
if (
not pr.mapper._dispose_called
and pr not in self.prop._reverse_property
and pr.key not in self.prop._overlaps
and self.prop.key not in pr._overlaps
# note: the "__*" symbol is used internally by
# SQLAlchemy as a general means of suppressing the
# overlaps warning for some extension cases, however
# this is not currently
# a publicly supported symbol and may change at
# any time.
and "__*" not in self.prop._overlaps
and "__*" not in pr._overlaps
and not self.prop.parent.is_sibling(pr.parent)
and not self.prop.mapper.is_sibling(pr.mapper)
and not self.prop.parent.is_sibling(pr.mapper)
and not self.prop.mapper.is_sibling(pr.parent)
and (
self.prop.key != pr.key
or not self.prop.parent.common_parent(pr.parent)
)
):
other_props.append((pr, fr_))
if other_props:
util.warn(
"relationship '%s' will copy column %s to column %s, "
"which conflicts with relationship(s): %s. "
"If this is not the intention, consider if these "
"relationships should be linked with "
"back_populates, or if viewonly=True should be "
"applied to one or more if they are read-only. "
"For the less common case that foreign key "
"constraints are partially overlapping, the "
"orm.foreign() "
"annotation can be used to isolate the columns that "
"should be written towards. To silence this "
"warning, add the parameter 'overlaps=\"%s\"' to the "
"'%s' relationship."
% (
self.prop,
from_,
to_,
", ".join(
sorted(
"'%s' (copies %s to %s)" % (pr, fr_, to_)
for (pr, fr_) in other_props
)
),
",".join(sorted(pr.key for pr, fr in other_props)),
self.prop,
),
code="qzyx",
)
self._track_overlapping_sync_targets[to_][self.prop] = from_
@util.memoized_property
def remote_columns(self) -> Set[ColumnElement[Any]]:
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self) -> Set[ColumnElement[Any]]:
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self) -> Set[ColumnElement[Any]]:
return self._gather_join_annotations("foreign")
def _gather_join_annotations(
self, annotation: str
) -> Set[ColumnElement[Any]]:
s = set(
self._gather_columns_with_annotation(self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation
)
)
return {x._deannotate() for x in s}
def _gather_columns_with_annotation(
self, clause: ColumnElement[Any], *annotation: Iterable[str]
) -> Set[ColumnElement[Any]]:
annotation_set = set(annotation)
return {
cast(ColumnElement[Any], col)
for col in visitors.iterate(clause, {})
if annotation_set.issubset(col._annotations)
}
def join_targets(
self,
source_selectable: Optional[FromClause],
dest_selectable: FromClause,
aliased: bool,
single_crit: Optional[ColumnElement[bool]] = None,
extra_criteria: Tuple[ColumnElement[bool], ...] = (),
) -> Tuple[
ColumnElement[bool],
Optional[ColumnElement[bool]],
Optional[FromClause],
Optional[ClauseAdapter],
FromClause,
]:
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable, {"no_replacement_traverse": True}
)
primaryjoin, secondaryjoin, secondary = (
self.primaryjoin,
self.secondaryjoin,
self.secondary,
)
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if extra_criteria:
def mark_unrelated_columns_as_ok_to_adapt(
elem: SupportsAnnotations, annotations: _AnnotationDict
) -> SupportsAnnotations:
"""note unrelated columns in the "extra criteria" as OK
to adapt, even though they are not part of our "local"
or "remote" side.
see #9779 for this case
"""
parentmapper_for_element = elem._annotations.get(
"parentmapper", None
)
if (
parentmapper_for_element is not self.prop.parent
and parentmapper_for_element is not self.prop.mapper
):
return _safe_annotate(elem, annotations)
else:
return elem
extra_criteria = tuple(
_deep_annotate(
elem,
{"ok_to_adapt_in_join_condition": True},
annotate_callable=mark_unrelated_columns_as_ok_to_adapt,
)
for elem in extra_criteria
)
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & sql.and_(*extra_criteria)
else:
primaryjoin = primaryjoin & sql.and_(*extra_criteria)
if aliased:
if secondary is not None:
secondary = secondary._anonymous_fromclause(flat=True)
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
)
secondary_aliasizer = ClauseAdapter(
dest_selectable, equivalents=self.child_equivalents
).chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
).chain(
ClauseAdapter(
source_selectable,
equivalents=self.parent_equivalents,
)
)
secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(
dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents,
)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(
source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents,
)
)
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return (
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
)
def create_lazy_clause(
self, reverse_direction: bool = False
) -> Tuple[
ColumnElement[bool],
Dict[str, ColumnElement[Any]],
Dict[ColumnElement[Any], ColumnElement[Any]],
]:
binds: Dict[ColumnElement[Any], BindParameter[Any]] = {}
equated_columns: Dict[ColumnElement[Any], ColumnElement[Any]] = {}
has_secondary = self.secondaryjoin is not None
if has_secondary:
lookup = collections.defaultdict(list)
for l, r in self.local_remote_pairs:
lookup[l].append((l, r))
equated_columns[r] = l
elif not reverse_direction:
for l, r in self.local_remote_pairs:
equated_columns[r] = l
else:
for l, r in self.local_remote_pairs:
equated_columns[l] = r
def col_to_bind(
element: ColumnElement[Any], **kw: Any
) -> Optional[BindParameter[Any]]:
if (
(not reverse_direction and "local" in element._annotations)
or reverse_direction
and (
(has_secondary and element in lookup)
or (not has_secondary and "remote" in element._annotations)
)
):
if element not in binds:
binds[element] = sql.bindparam(
None, None, type_=element.type, unique=True
)
return binds[element]
return None
lazywhere = self.primaryjoin
if self.secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind
)
if self.secondaryjoin is not None:
secondaryjoin = self.secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind
)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = {binds[col].key: col for col in binds}
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations:
"""Serializable object that tests for a name in c._annotations."""
__slots__ = ("name",)
def __init__(self, name: str):
self.name = name
def __call__(self, c: ClauseElement) -> bool:
return (
self.name in c._annotations
or "ok_to_adapt_in_join_condition" in c._annotations
)
class Relationship( # type: ignore
RelationshipProperty[_T],
_DeclarativeMapped[_T],
WriteOnlyMapped[_T], # not compatible with Mapped[_T]
DynamicMapped[_T], # not compatible with Mapped[_T]
):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`_orm.relationship` function.
.. seealso::
:ref:`relationship_config_toplevel`
.. versionchanged:: 2.0 Added :class:`_orm.Relationship` as a Declarative
compatible subclass for :class:`_orm.RelationshipProperty`.
"""
inherit_cache = True
""":meta private:"""
|
8,670 | e486e0ab91a8f5671435f5bbcf5340a62a970d3a | class SmartChineseAnalyzer:
def __init__(self):
pass
def create_components(self, filename):
#tokenizer = SentenceTokenize(filename)
#result = WordTokenFilter(tokenizer)
#result = PorterStemFilter(result)
if self.stopwords:
result = StopFilter(result, self.stopwords)
return TokenStreamComponents(tokenizer, result)
|
8,671 | ef5c51a5c706387b62ef3f40c7cadf7dbef6d082 | from flask_minify.utils import get_optimized_hashing
class MemoryCache:
def __init__(self, store_key_getter=None, limit=0):
self.store_key_getter = store_key_getter
self.limit = limit
self._cache = {}
self.hashing = get_optimized_hashing()
@property
def store(self):
if self.store_key_getter:
return self._cache.setdefault(self.store_key_getter(), {})
return self._cache
@property
def limit_exceeded(self):
return len(self.store) >= self.limit
def __getitem__(self, key):
return self.store.get(key)
def __setitem__(self, key, value):
if self.limit_exceeded:
self.store.popitem()
self.store.update({key: value})
def get_or_set(self, key, getter):
if self.limit == 0:
return getter()
hashed_key = self.hashing(key.encode("utf-8")).hexdigest()
if not self[hashed_key]:
self[hashed_key] = getter()
return self[hashed_key]
def clear(self):
del self._cache
self._cache = {}
|
8,672 | 42187f460a64572d2581ed5baec41eaff47466f8 | version https://git-lfs.github.com/spec/v1
oid sha256:91f725dc0dba902c5c2c91c065346ab402c8bdbf4b5b13bdaec6773df5d06e49
size 964
|
8,673 | 83be35b79dcaa34f9273281976ebb71e81c58cdd | import logging
import os
import time
from datetime import datetime
from pathlib import Path
from configargparse import ArgumentParser
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.x509.oid import ExtensionOID
from cryptography.x509.extensions import ExtensionNotFound
from prettylog import basic_config
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, REGISTRY
parser = ArgumentParser(
default_config_files=[os.path.join("/etc/ssl-exporter.conf")],
auto_env_var_prefix="APP_",
)
parser.add_argument("--host-address", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int, default="9001")
parser.add_argument("--cert-paths", nargs="+", type=Path)
parser.add_argument("--log-level", type=str, default="INFO")
parser.add_argument("--log-format", type=str, default="color")
arguments = parser.parse_args()
log = logging.getLogger()
class SslExporter(object):
gauges = {}
def __init__(self, cert_paths):
self.cert_paths = cert_paths
def collect(self):
self.gauges["ssl_valid_days"] = GaugeMetricFamily(
"ssl_valid_days",
"Ssl cert valid days",
value=None,
labels=["domain", "file_name", "serial_number"],
)
for path in self.cert_paths:
if not path.exists():
log.error("File %r does not exists", path)
exit(1)
self.get_metrics(path)
for name, data in self.gauges.items():
yield data
def get_metrics(self, path: Path):
with path.open("rb") as f:
try:
cert = x509.load_pem_x509_certificate(
f.read(), default_backend()
)
except ValueError:
log.exception("Cannot read certificate - %r", path)
return []
file_name = path.name
log.debug("File name of cert - %r", file_name)
not_valid_after = cert.not_valid_after
log.debug("Ssl not valid after date - %r", str(not_valid_after))
left = not_valid_after - datetime.utcnow()
log.debug("Ssl cert valid days - %r", left.days)
log.debug("Ssl cert serial number - %r", cert.serial_number)
try:
ext = cert.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_ALTERNATIVE_NAME
)
dns_names_list = ext.value.get_values_for_type(x509.DNSName)
except ExtensionNotFound:
dns_names_list = ["noname"]
log.debug("DNS names of cert - %r", dns_names_list)
for domain in dns_names_list:
self.gauges["ssl_valid_days"].add_metric(
[domain, file_name, str(cert.serial_number)], int(left.days)
)
def main():
basic_config(
level=arguments.log_level.upper(),
buffered=False,
log_format=arguments.log_format,
)
start_http_server(addr=arguments.host_address, port=arguments.port)
collector = SslExporter(arguments.cert_paths)
REGISTRY.register(collector)
while True:
time.sleep(1)
if __name__ == "__main__":
main()
|
8,674 | dac8dbb0eba78d4f8dfbe3284325735324a87dc2 | """
时间最优
思路:
将和为目标值的那 两个 整数定义为 num1 和 num2
创建一个新字典,内容存在数组中的数字及索引
将数组nums转换为字典,
遍历字典, num1为字典中的元素(其实与数组总的元素一样),
num2 为 target减去num1, 判定num2是否在字典中,如果存在,返回字典中num2的值(也就是在数组nums中的下标)和 i(也就是num1在数组中的下标)
如果不存在,设置字典num1的值为i
"""
def two_sum(nums, target):
dct = {}
for i, num1 in enumerate(nums):
num2 = target - num1
if num2 in dct:
return [dct[num2], i]
dct[num1] = i
print(two_sum([14, 2, 31, 4], 6))
|
8,675 | 60d8276a5715899823b12ffdf132925c6f2693bd | from __future__ import annotations
from typing import TYPE_CHECKING
from datetime import datetime
from sqlalchemy import Column, ForeignKey, String, DateTime, Float, Integer
from sqlalchemy.orm import relationship
from app.db.base_class import Base
if TYPE_CHECKING:
from .account import Account # noqa: F401
from .code import Code # noqa: F401
class Voucher(Base):
__tablename__ = 't_juju_voucher'
code = Column(String(100), index=True, unique=True)
serial_no = Column(String(120), index=True, unique=True)
amount = Column(Float, default=0, nullable=False)
vtime = Column(DateTime(), nullable=False)
vtype = Column(String(50), ForeignKey("t_juju_code.vtype"))
comment = Column(String(150), nullable=True)
create_time = Column(DateTime(), default=datetime.now)
update_time = Column(DateTime(), default=datetime.now,
onupdate=datetime.now)
owner_id = Column(Integer, ForeignKey("t_juju_account.id"))
modifier_id = Column(Integer, ForeignKey("t_juju_account.id"))
|
8,676 | c87ede0e3c6d4cc305450f68b4cf61fb63986760 | import uvicore
from uvicore.support import module
from uvicore.typing import Dict, List
from uvicore.support.dumper import dump, dd
from uvicore.contracts import Email
@uvicore.service()
class Mail:
def __init__(self, *,
mailer: str = None,
mailer_options: Dict = None,
to: List = [],
cc: List = [],
bcc: List = [],
from_name: str = None,
from_address: str = None,
subject: str = None,
html: str = None,
text: str = None,
attachments: List = [],
) -> None:
# Get mailer and options from config
self._config = uvicore.config.app.mail.clone()
self._mailer = mailer or self._config.default
self._mailer_options = self._config.mailers[self._mailer].clone().merge(mailer_options)
# New message superdict
self._message: Email = Email()
self._message.to = to
self._message.cc = cc
self._message.bcc = bcc
self._message.from_name = from_name or self._config.from_name
self._message.from_address = from_address or self._config.from_address
self._message.subject = subject
self._message.html = html
self._message.text = text
self._message.attachments = attachments
def mailer(self, mailer: str):
self._mailer = mailer
self._mailer_options = self._config.mailers[self._mailer].clone()
return self
def mailer_options(self, options: Dict):
self._mailer_options.merge(Dict(options))
return self
def to(self, to: List):
self._message.to = to
return self
def cc(self, cc: List):
self._message.cc = cc
return self
def bcc(self, bcc: List):
self._message.bcc = bcc
return self
def from_name(self, from_name: str):
self._message.from_name = from_name
return self
def from_address(self, from_address: str):
self._message.from_address = from_address
return self
def subject(self, subject: str):
self._message.subject = subject
return self
def html(self, html: str):
self._message.html = html
return self
def text(self, text: str):
self._message.text = text
return self
def attachments(self, attachments: List):
self._message.attachments = attachments
return self
async def send(self):
# Use dynamic module based on mailer driver
driver = module.load(self._mailer_options.driver).object
await driver.send(self._message, self._mailer_options)
|
8,677 | 4a8a733a965e25ad7ef53600fad6dd47343655b0 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 12 16:38:22 2017
@author: secoder
"""
import io
import random
import nltk
from nltk.tokenize import RegexpTokenizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from collections import OrderedDict
from collections import Counter
from sklearn.metrics import pairwise_distances
import numpy as np
import scipy
import json
import codecs
from dateutil import parser
import time
import datetime
import operator
#import cPickle as pickle
#
#import traceback
from skimage import filters
import unicodedata as ud
from config import project_name
class recommendationsys:
def __init__(self, nyear):
# by default we will filter out those don't have publications in recent 10 years
self.activityyear = 10
self.debug = 0
self.nremd = 3
#----------------------
PROJECT_DIRECTORY = 'output/project/' + project_name
self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'
self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'
self.f_years = PROJECT_DIRECTORY + '/years_target.txt'
self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'
self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'
self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'
self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'
self.npaper = 10
self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())
self.keywordthreshold = 10
#----------------------
self.debugmsg('start init', 0)
self.docluster()
self.initNLTKConditionalFreqDist()
self.filterN = len(self.authors)
self.debugmsg('end init\n', 0)
"""
"""
def debugmsg(self, msg, lvl):
if self.debug <= lvl:
print(msg)
"""
"""
def resentpublicationsidx(self,authoridx):
#print 'start recentpublications\n'
resentpub = []
idx = self.authortitlesidx[authoridx]
# sort by years
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
# if the most recent publication is before the 'nyears'
# remove this one from the list
if (int(self.years[idx[0]]) < self.nyear) or (len(idx) < self.npaper):
return resentpub
# ----
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([("name",author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime("%Y-%m-%d %H:%M:%S")
resentpub.append(OrderedDict([("title",self.rawtitles[i]),("authors",authorsjson), ("year",date),("publicationVenue",self.booktitle[i])]))
#print 'end recentpublications\n'
return resentpub
"""
"""
def resentpublications(self,name):
#print 'start recentpublications\n'
resentpub = []
#if isinstance(name, unicode): for python 2.7
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
idx = self.authortitlesidx[idx]
# sort by years
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
# if the most recent publication is before the 'nyears'
# remove this one from the list
if (int(self.years[idx[0]]) < self.nyear) or (len(idx) < self.npaper):
return resentpub
# ----
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([("name",author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime("%Y-%m-%d %H:%M:%S")
resentpub.append(OrderedDict([("title",self.rawtitles[i]),("authors",authorsjson), ("year",date),("publicationVenue",self.booktitle[i])]))
#print 'end recentpublications\n'
return resentpub
def initNLTKConditionalFreqDist(self):
self.debugmsg('start initNLTK CFD\n', 0)
pairs=[]
# for title in self.titles:
# pairs = pairs + list(nltk.bigrams(title.split()))
pairs = nltk.bigrams(self.allcorp)
self.cfd = nltk.ConditionalFreqDist(pairs)
self.debugmsg('end initNLTK CFD\n', 0)
def keyword(self,name):
#print 'start keyword\n'
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
# content = self.authorcontents[idx].lower()
#
# # get the unique words from the content
# content = set(content.split())
#
# i = []
# for c in content:
# count = self.vectorizer.vocabulary_.get(c, 0)
# i.append(count)
#
# i = np.array(i)
# i = i.argsort()
# content = np.array(list(content))
# content = content[i]
# content = content[-3:]
# keywords = list(reversed(content))
#
contentjson = []
# for topic in keywords:
# contentjson.append(OrderedDict([("topic", topic)]))
# bigram keywords -------------
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
# #print 'start bigram\n'
#
# userpairs = list(nltk.bigrams(content))
#
#
# # do the same on raw titles
#
# keywordsraw=[]
# for p in userpairs:
# pairsdic=self.cfd[p[0]]
# n=pairsdic[p[1]]
# if n>=2:
# keywordsraw.append((p,n))
#
# uniqkeywords=set(keywordsraw)
# keywords=sorted(uniqkeywords, key=lambda keywords: keywords[1])
#
# finalkeywords=[]
# for p in keywords:
# #c=wn.synsets(p[0][1])[0].pos()
# if (p[1]>=2):
# finalkeywords.append((' '.join(p[0]),p[1],keywordsraw.count(p)))
#
# finalkeywords.reverse()
for topic in finalkeywords:
#print topic[0]
contentjson.append(OrderedDict([("topic", topic[0])]))
#print 'end bigram\n'
#print 'end keyword\n'
return contentjson
"""
"""
def keywordbyidx(self,idx):
contentjson = []
# bigram keywords -------------
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
for topic in finalkeywords:
#print topic[0]
contentjson.append(OrderedDict([("topic", topic[0])]))
return contentjson
"""
"""
def bigramkeywords(self, text):
#print 'start bigramkeyword\n'
# bigram keywords -------------
#content = text.lower().split()
content = text
#print 'start bigram\n'
userpairs = list(nltk.bigrams(content))
# in case there is no valid keywords due to our requirement
# the one with highest occurrence will be pick from the backup plan
keywordsbackup = []
# the valid keywords
keywords=[]
for p in userpairs:
pairsdic=self.cfd[p[0]]
n=pairsdic[p[1]]
if n>=self.keywordthreshold:
keywords.append((p,n))
keywordsbackup.append((p,n))
finalkeywords=[]
uniqkeywords=set(keywords)
keywords=sorted(uniqkeywords, key=lambda keywords: keywords[1])
for p in keywords:
if (p[1]>=25) or (userpairs.count(p[0])>1):
finalkeywords.append([' '.join(p[0]),p[1],userpairs.count(p[0])])
finalkeywords.reverse()
if not finalkeywords:
# found valid keywords
uniqkeywords=set(keywordsbackup)
keywordsbackup=sorted(uniqkeywords, key=lambda keywordsbackup: keywordsbackup[1])
finalkeywords.append([' '.join(keywordsbackup[-1][0]), keywordsbackup[-1][1],userpairs.count(keywordsbackup[0])])
else:
# deal with plural
pluralidx = self.findpluralbigram(finalkeywords)
self.removepluralbigram(finalkeywords,pluralidx)
#print 'end bigramkeyword\n'
return finalkeywords
"""
"""
def removepluralbigram(self, bigram, pluralidx):
# if pluralidx is emtpy, just return
if not pluralidx:
print('empty')
return
delcount = 0
pren = 0
for i in pluralidx:
#delcount = 0
for n in i[1:]:
if n > pren:
n = n - delcount
bigram[i[0]][1] = bigram[i[0]][1] + bigram[n][1]
bigram.remove(bigram[n])
delcount = delcount + 1
pren = n
"""
"""
def findpluralbigram(self, keywordsinfo):
c = []
for i in keywordsinfo:
t = i[0].split()
t1 = ''
for n in t:
if n[-1] == 's':
n = n[:-1]
t1 = t1 + n
c.append(t1)
uniqbigram = list(set(c))
pluralidx = []
for i in uniqbigram:
count = c.count(i)
if count > 1:
cc = []
for n in range(len(c)):
if i == c[n]:
cc.append(n)
pluralidx.append(cc)
return pluralidx
"""
"""
def mycoauthorsV2(self, name):
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
coauthorship = self.coauthornetV2[idx]
uniqcoauthors = np.array(list(set(coauthorship)))
coauthorcount = []
for i in uniqcoauthors:
coauthorcount.append(coauthorship.count(i))
countidx = np.argsort(coauthorcount)
# reverse it to descend order
countidx = countidx[::-1]
coauthorcount = np.array(coauthorcount)
result = []
for i in countidx:
result.append(OrderedDict([("name",self.authors[uniqcoauthors[i]]),("cooperationCount",coauthorcount[i])]))
return (result,list(uniqcoauthors[countidx]),list(coauthorcount[countidx]))
"""
"""
def mycoauthorsV3(self, name):
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([("name",self.authors[unicoauthors[-(i+1)]]),("cooperationCount",coauthorcount[-(i+1)])]))
return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
"""
def mycoauthorsV4(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([("name",self.authors[unicoauthors[-(i+1)]]),("cooperationCount",coauthorcount[-(i+1)])]))
return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
"""
def mycoauthorsV4byidx(self, idx):
coauthors = []
for i in self.coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([("name",self.authors[unicoauthors[-(i+1)]]),("cooperationCount",coauthorcount[-(i+1)])]))
return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
"""
def mycoauthorsV4bymentionlist(self, name):
if name in self.mentionnetwork.keys():
mentiondict = self.mentionnetwork[name]
else:
mentiondict ={'None':0}
result = []
# sort by mention counts
sorted_mentiondict = sorted(mentiondict.items(), key=operator.itemgetter(1), reverse=True)
for i in sorted_mentiondict:
result.append(OrderedDict([("name",i[0]),("cooperationCount",i[1])]))
return result
"""
"""
def mycoauthorsbyyear(self, idx, year):
years = np.array(self.years)
yearidx = np.where(years <= year)[0]
coauthorsidx = [ self.coauthorsidx[i] for i in yearidx]
coauthors = []
for i in coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
return (list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
find the new coauthors for a user in current year against previous year
example: mynewcoauthors(23, 2014, 2015) will returen the new coauthors
in 2015 regarding the year 2014 for user 23. 23 is the index of a user
"""
def mynewcoauthors(self, userIdx, yearPre, yearCur):
coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)
coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)
newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)
return newCoauthors
"""
Call the weakties after mynewcoauthors() to find the common nodes
between a user and his/her coming new coauthors in the year before
their coauthorship
"""
def weakties(self, userX, userY, year):
coauthornetX, cx = self.mycoauthorsbyyear(userX, year)
# if userX and userY already have a strong ties, just return []
if userY in coauthornetX:
return ([], [], [])
coauthornetY, cy = self.mycoauthorsbyyear(userY, year)
# find the common nodes
weaktienodes = list(set(coauthornetX).intersection(coauthornetY))
nodescountX = []
nodescountY = []
if weaktienodes:
for i in weaktienodes:
nodescountX.append(cx[coauthornetX.index(i)])
nodescountY.append(cy[coauthornetY.index(i)])
return (weaktienodes, nodescountX, nodescountY)
"""
2nd hoop connection
"""
def secondhoopties(self, userX, userY, year):
result = []
coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)
for i in coauthors1:
coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)
for n in coauthors2:
coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)
if userY in coauthors3:
result.append([[i,n],[count1[coauthors1.index(i)],count2[coauthors2.index(n)], count3[coauthors3.index(userY)]]])
"""
Get all the content(paper titles) of the userIdx before
the 'year'(include the year)
"""
def getcontentbyyear(self, userIdx, year):
titleIdx = self.authortitlesidx[userIdx]
titleIdx = np.array(titleIdx)
years = [self.years[i] for i in titleIdx]
years = np.array(years)
# sort the years and put the latest year first
# then the content will also be sorted by recent paper first
years.sort()
years = years[::-1]
yearIdx = np.where(years<=year)[0]
content = [self.titles[i] for i in titleIdx[yearIdx]]
return content
"""
return the most frequent participated venue of a user
"""
def getVenue(self, userIdx):
venues = self.authorbooktitleidx[userIdx]
c = Counter(venues)
frqvenues = c.most_common()
return frqvenues[0][0]
"""
only consider the recent 10 papers
"""
def contentsimilarity(self, userX, userY, year):
contentX = self.getcontentbyyear(userX, year)
if not contentX:
return -1
contentX = contentX[0:10]
contentY = self.getcontentbyyear(userY, year)
if not contentY:
return -1
contentY = contentY[0:10]
# build the corpus of all the content
contents = []
for i in contentX:
contents.extend(i.split(' '))
lenx = len(contents)
for i in contentY:
contents.extend(i.split(' '))
# normalize the different forms of words
stemmer = nltk.stem.PorterStemmer()
stems = [stemmer.stem(t) for t in contents]
# reconstruct content for userX and userY use the normalized words
newcontentX = stems[0:lenx]
newcontentY = stems[lenx:]
vectorizer = CountVectorizer()
v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(newcontentY)])
cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0]
return cosinesimilarity
"""
network similarity
"""
def networksimilarity(self, userX, userY, year):
# first calculate FG(userX) according to paper
# User similarities on social networks
coauthors, c = self.mycoauthorsbyyear(userX, year)
edgesFG = len(coauthors)
n = 0
for i in coauthors:
subcoauthors, c = self.mycoauthorsbyyear(i, year)
con = list(set(subcoauthors).intersection(coauthors[n:]))
edgesFG = edgesFG + len(con)
n = n + 1
# second, calculate MFG(userX, userY)
weakties, cx, cy = self.weakties(userX, userY, year)
edgesMFG = 2 * len(weakties)
n = 0
for i in weakties:
subcoauthors, c = self.mycoauthorsbyyear(i, year)
con = list(set(subcoauthors).intersection(weakties[n:]))
edgesMFG = edgesMFG + len(con)
n = n + 1
# last calculate the network similarity
if edgesFG * edgesMFG:
ns = np.log(edgesMFG)/np.log(2 * edgesFG)
else:
ns = -1
return (ns, edgesFG, edgesMFG, cx, cy)
"""
text processing, normalize the words to their prototype, such as
plural form, progressive, etc
"""
def textnormalizing(self, text):
#l = len(text)
c = 0
for i in text:
# network - networks
if i[-1] == 's':
ii = i[:-1]
if ii in text:
text[c] = ii
c = c + 1
continue
# bus - buses
if i[-2:] == 'es':
ii = i[:-2]
if ii in text:
text[c] = ii
c = c + 1
continue
# study - studies
if i[-3:] == 'ies':
ii = i[:-3] + 'y'
if ii in text:
text[c] = ii
c = c + 1
continue
# network - networking
# get - getting
# explore - exploring
if i[-3:] == 'ing':
ii = i[:-3]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-4]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-3] + 'e'
if ii in text:
text[c] = c + 1
continue
c = c + 1
return text
"""
"""
"""
radius of the cluster
"""
def radiusofcluster(self, labels, nth, dismatrix):
idx = np.where(labels == nth)[0]
dis = dismatrix[idx,nth]
self.mindis = min(dis)
self.maxdis = max(dis)
self.radius = self.maxdis
# return [mindis, maxdis, radius]
"""
show contents in the same cluster
"""
def showcontents(self,labels, nth, allcontents):
contents = []
idx = np.where(labels == nth)
idx = np.array(idx)
idx = idx.flatten()
for i in idx:
contents.append(allcontents[i])
return contents
"""
check if there is digtial in the string
"""
def digstring(self,s):
for i in s:
if i.isdigit():
return True
return False
"""
compute the distance between two points a and b
"""
def distance(self,a,b):
if scipy.sparse.issparse(a):
a = a.toarray()
a = a[0]
if scipy.sparse.issparse(b):
b = b.toarray()
b = b[0]
a = np.array(a);
b = np.array(b);
return np.sqrt(sum(np.square(a - b)))
"""
"""
def updatecoauthornetworkV2(self,net,authors,namelist):
nameidx = []
for name in namelist:
nameidx.append(authors.index(name))
for i in nameidx:
tmpidx = nameidx[:]
tmpidx.remove(i)
# if net is empty
if not net:
net.append(tmpidx)
else:
if i>len(net)-1:
net.append(tmpidx)
else:
net[i].extend(tmpidx)
"""
load the person or organization label
"""
def per_org_label(self):
f = codecs.open(self.f_perorglabel,'r','utf-8')
labels = {}
for line in f:
items = line.split()
labels[items[0]] = items[1]
f.close()
self.labels = labels
"""
"""
def mention_network(self):
f = codecs.open(self.f_mentionnetwork,'r','utf-8')
source=''
network = {}
for line in f:
items = line.split('"')
if source == '':
source = items[0]
target = {}
if source == items[0]:
target[items[1]] = int(items[2])
else:
network[items[0]] = target
source = items[0]
target = {}
f.close()
return network
"""
"""
def docluster(self):
tokenizer = RegexpTokenizer(r'\w+')
self.rawtitles = []
self.titles = []
self.allcorp = []
sw = set(nltk.corpus.stopwords.words('english'))
self.debugmsg('start titles \n', 0)
f = codecs.open(self.f_titles,'r','utf-8')
for line in f:
# remove the '\n' at the end
if line[-1] == '\n':
line = line[:-1]
self.rawtitles.append(line)
line = line.lower()
tokenlist = tokenizer.tokenize(line)
self.allcorp += tokenlist
#for corp in newline:
# self.allcorp.append(corp)
# collect all the words except digtals and stopwords
tokenlist = ' '.join([w for w in tokenlist if (w.lower() not in sw) & ~(self.digstring(w))])
self.titles.append(tokenlist)
f.close()
# end use codecs
# filename = './CHI/CHI_authors.txt'
self.authordict = {}
self.authors = []
self.authorcontents = []
self.authorrawcontents = []
self.authortitlesidx = []
self.authorbooktitleidx = []
self.coathors = []
self.coauthorsidx = [] # undirect link, etc, dblp coauthorship network
self.mentionnetwork = {} # direct link, etc,tweet mention network
self.id_name = {}
self.coauthornetV2 = []
# readin the mention network
self.mentionnetwork = self.mention_network()
# read years
self.debugmsg('start year \n', 0)
self.years = []
f = codecs.open(self.f_years,'r','utf-8')
for line in f:
# remive \n
if line[-1] == '\n':
line = line[:-1]
if line == '':
line = 0
#line = line.split()
#year = line[-1]
timestamp = time.mktime(parser.parse(line).timetuple())
self.years.append(int(timestamp))
f.close()
# read conference
self.debugmsg('start booktitle \n', 0)
self.booktitle = []
f = codecs.open(self.f_booktitle,'r','utf-8')
for line in f:
# remove the \n at the end
line = line[:-1]
self.booktitle.append(line)
f.close()
# read authors
self.debugmsg('start authors \n', 0)
i = 0
m = 0
f = codecs.open(self.f_authors,'r','utf-8')
for line in f:
# remove the last '\n'
line = line[:-1]
# split the authors by ','
newline = line.split(",")
namelist = newline
self.coathors.append(namelist)
authoridx = []
for name in newline:
# dictonary version
idx = self.authordict.get(name)
if idx is not None:
self.authortitlesidx[idx].append(i)
self.authorbooktitleidx[idx].append(i)
self.authorcontents[idx] = self.authorcontents[idx] + ' ' + self.titles[i]
self.authorrawcontents[idx] = self.authorrawcontents[idx] + ' ' + self.rawtitles[i]
else:
self.authors.append(name)
self.authordict[name] = m
self.authorcontents.append(self.titles[i])
self.authorrawcontents.append(self.rawtitles[i])
self.authortitlesidx.append([i])
self.authorbooktitleidx.append([i])
idx = m
m = m + 1
authoridx.append(idx)
# end dict version
self.coauthorsidx.append(authoridx)
i = i + 1
f.close()
f = codecs.open(self.f_authors_id,'r','utf-8')
i = 0
preline = ''
for line in f:
if preline != line:
#print(i)
#print('preline: {}, line: {}'.format(preline, line))
if line[-1] == '\n':
newline = line[:-1]
self.id_name[self.authors[i]] = newline
preline = line
i = i + 1
else:
continue
#print(i)
f.close()
# load the per and org classification result
self.per_org_label()
self.vectorizer = CountVectorizer(max_df=0.95, min_df=1,stop_words='english')
X = self.vectorizer.fit_transform(self.authorcontents)
#Xarray = X.toarray()
Xarray = X
#plt.plot(hist)
transformer = TfidfTransformer()
self.tfidf = transformer.fit_transform(Xarray)
#self.tfidfarray = self.tfidf.toarray()
self.tfidfarray = self.tfidf
self.featurenames = self.vectorizer.get_feature_names()
"""
"""
def recommendationV3(self, name, n):
self.nremd = n
self.debugmsg('Will generate recommendations in 3 groups and ' + str(n) + ' for each group', 1)
self.debugmsg('find the idx', 0)
if isinstance(name, str):
#idx = self.authors.index(name)
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
name = name.decode('utf-8')
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
#content=[]
self.myidx = authorIdx
self.debugmsg('get the feature vector', 0)
featuretfidf = self.tfidfarray[authorIdx]
self.debugmsg('start distance computing \n', 0)
(self.closeauthors, self.closeauthordis) = self.nNNlinesearch(self.tfidfarray,featuretfidf,0)
self.debugmsg('end distance computing \n', 0)
# here we can define the range to apply the otsu for recommendations
# for example self.closeauthordis[0:1000] or all them
self.debugmsg('start otsuifilter\n', 0)
splitidx = self.otsufilter(self.closeauthordis)
self.debugmsg('end otsufilter\n', 0)
# splitidx contains the first index of three groups, close, medium, far
# now generate three recommendations in each group
recommendations = []
# save the valid remdidx
remdidx = []
for i in splitidx:
n = 0
backwardcount = 1
while n != self.nremd:
if self.closeauthors[i] != self.myidx:
# skip myself go to next one
remdinfo = self.getremdinfo(i)
if remdinfo and ~remdidx.count(i):
#print remdinfo
recommendations.append(remdinfo)
n = n + 1
remdidx.append(i)
#self.debugmsg(str(n) + ' ' + str(i), 0)
i = i + 1
# didn't find required number of valid remd untill the end
# start backwards search
if (i == len(self.closeauthordis)) or (backwardcount > 1):
if backwardcount == 1:
backwardstart = i - self.nremd
i = backwardstart - backwardcount
backwardcount = backwardcount + 1
#self.debugmsg('search backward ' + str(i), 0)
# randomlize the order of the recommendations
random.shuffle(recommendations)
self.result=OrderedDict([("name",name),("recommendations",recommendations)])
self.debugmsg('end recommendationV3 \n', 0)
return self.result
"""
"""
def recommendationV4(self, name, n):
self.nremd = n
self.debugmsg('Will generate recommendations in 3 groups and ' + str(n) + ' for each group', 1)
self.debugmsg('find the idx', 0)
if isinstance(name, str):
#idx = self.authors.index(name)
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
name = name.decode('utf-8')
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
#content=[]
self.myidx = authorIdx
self.debugmsg('get the feature vector', 0)
featuretfidf = self.tfidfarray[authorIdx]
self.debugmsg('start distance computing \n', 0)
(self.closeauthors, self.closeauthordis) = self.nNNlinesearch(self.tfidfarray,featuretfidf,0)
self.debugmsg('end distance computing \n', 0)
# here we can define the range to apply the otsu for recommendations
# for example self.closeauthordis[0:1000] or all them
self.debugmsg('start otsuifilter\n', 0)
splitidx = self.otsufilter(self.closeauthordis)
self.debugmsg('end otsufilter\n', 0)
# splitidx contains the first index of three groups, close, medium, far
# now generate three recommendations in each group
recommendations = []
# save the valid remdidx
remdidx = []
for i in splitidx:
n = 0
backwardcount = 1
while n != self.nremd:
if self.closeauthors[i] != self.myidx:
# skip myself go to next one
remdinfo = self.getremdinfoV2(i)
if remdinfo and ~remdidx.count(i):
#print remdinfo
recommendations.append(remdinfo)
n = n + 1
remdidx.append(i)
#self.debugmsg(str(n) + ' ' + str(i), 0)
i = i + 1
# didn't find required number of valid remd untill the end
# start backwards search
if (i == len(self.closeauthordis)) or (backwardcount > 1):
if backwardcount == 1:
backwardstart = i - self.nremd
i = backwardstart - backwardcount
backwardcount = backwardcount + 1
#self.debugmsg('search backward ' + str(i), 0)
# randomlize the order of the recommendations
random.shuffle(recommendations)
self.result=OrderedDict([("name",name),("recommendations",recommendations)])
self.debugmsg('end recommendationV4 \n', 0)
return self.result
"""
find n nearset neighbors of point p in given space using linear search
if n == 0, sort all the points in space
"""
def nNNlinesearch(self, space, p, n):
closeauthordis = []
closeauthordis = pairwise_distances(space, p, metric='cosine')
closeauthordis = closeauthordis.flatten()
closeauthors = closeauthordis.argsort()
closeauthordis.sort()
if n > 0 :
closeauthors = closeauthors[0:n]
closeauthordis = closeauthordis[0:n]
# delete myself, cuz the distance is always 0
idx = np.where(closeauthors == self.myidx)[0][0]
closeauthors = np.delete(closeauthors, idx)
closeauthordis = np.delete(closeauthordis, idx)
return (closeauthors, closeauthordis)
"""
split the distance in to 3 groups using otsu filtering
return the first index of each group
"""
def otsufilter(self, tdis):
trd = np.zeros(3, int)
#tdis = self.filteredcloseauthordis()
t1 = filters.threshold_otsu(tdis)
t2 = filters.threshold_otsu(tdis[tdis>t1])
# the first index of each group
# trd[1] = len(tdis[tdis<t1])
# trd[2] = len(tdis) - len(tdis[tdis>t2])
# get the medium 3 in the medium group
# get the last 3 in the far group
trd[1] = len(tdis[tdis<t1]) + int((len(tdis[tdis<t2]) - len(tdis[tdis<t1]))/2)-1
trd[2] = len(tdis) - 3
return trd
"""
extract the detail inforamtion of the recommendation by its indx in
the closeauthors
ignor those unqualified ones which has few papers or not active
recently, and also remove my co-authors
"""
def getremdinfo(self, clsidx):
# get the author index from closeauthors
remdidx = self.closeauthors[clsidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)
if idx.count(self.myidx):
# remove the coauthor
return []
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([("name",name), ("relevancy",self.closeauthordis[clsidx]),("coAuthors",coauthors),("researchTopics",researchtopic), ("recentPublications",recentpub)])
else:
return []
"""
extract the detail inforamtion of the recommendation by its indx in
the closeauthors
ignor those unqualified ones which has few papers or not active
recently, and also remove known people in the mention network
"""
def getremdinfoV2(self, clsidx):
# get the author index from closeauthors
remdidx = self.closeauthors[clsidx]
username = self.authors[self.myidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
#[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)
mentionlist = self.mentionnetwork[username]
if name in mentionlist:
# skip the coauthor
return []
#
remdid = self.id_name[name]
if self.labels[remdid] == 'org':
return []
# get the recommendation's mention list
coauthors = self.mycoauthorsV4bymentionlist(name)
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([("name",name), ("relevancy",self.closeauthordis[clsidx]),("coAuthors", coauthors),("researchTopics",researchtopic), ("recentPublications",recentpub)])
else:
return []
"""
"""
def updatedistance(self):
# 1st degree connection in coauthorship
deg1con=self.coauthornet[self.myidx,self.closeauthors]
deg1conidx = np.where(deg1con>0)[0]
#deg1con = deg1con[deg1con>0]
# 2nd degree connection in coauthorship
deg2conidx = np.where(deg1con==0)[0]
deg2con = np.zeros(deg2conidx.size)
for i in self.closeauthors[deg1conidx]:
deg2con = deg2con + self.coauthornet[i,self.closeauthors[deg2conidx]]
deg1con = deg1con[deg1con>0]
deg1con = deg1con/max(deg1con)
return (deg1conidx, deg1con,deg2conidx,deg2con)
"""
return the top N recommendations:
recommendations, coauthors, researchtopics, recentpub(at least 3 and no
morethan 5 years)
"""
def filteredrecommendations(self, n):
recommendations = []
self.filteridx = []
self.filteredauthors = []
i = 0
for name in self.recommendauthor:
#coauthors = []
#researchtopic = []
#recentpub = []
#coauthorsjson = []
#[coauthors, idx, c] = self.mycoauthors(name)
#[coauthors, idx, c] = self.mycoauthorsV2(name)
#[coauthors, idx, c] = self.mycoauthorsV3(name)
[coauthors, idx, c] = self.mycoauthorsV4(name)
# remove the coauthors
if idx.count(self.myidx):
i = i+1
continue
recentpub = self.resentpublications(name)
# check if the recentpub is empty which is not active anymore
if not recentpub:
i = i+1
continue
# --
self.filteredauthors.append(name)
# take too much time skip in test
# researchtopic = self.keyword(name)
researchtopic = []
researchtopic.append(OrderedDict([("topic", "TBD")]))
#recommendations.append({'name':name, 'coAuthors':coauthors, 'researchTopcs':researchtopic, 'recentPublications':recentpub} )
recommendations.append(OrderedDict([("name",name), ("relevancy",self.closeauthordis[i]),("coAuthors",coauthors),("researchTopics",researchtopic), ("recentPublications",recentpub)]))
#result={'name':user, 'recommendations':recommendations};
# save the picked idx
self.filteridx.append(i)
i = i+1
# only need top n recommendations
if len(self.filteridx) == n:
break
return recommendations
"""
"""
def thresholdrecommendations(self, remds,n):
thredremd = []
self.trd = np.zeros(3)
tdis = self.filteredcloseauthordis()
t1 = filters.threshold_otsu(tdis)
t2 = filters.threshold_otsu(tdis[tdis>t1])
# get the top 3 in each group
self.trd[1] = len(tdis[tdis<t1])
self.trd[2] = len(tdis) - len(tdis[tdis>t2])
# get the top 3 in first group, median 3 in second group,
# last 3 in third group
# self.trd[1] = int((len(tdis[tdis<t2]) - len(tdis[tdis<t1]))/2)-1
# self.trd[2] = len(tdis) - 3
for i in range(3):
for j in range(int(n/3)):
k = int(self.trd[i]+j)
name = remds[k]['name']
researchtopic = self.keyword(name)
remds[k]['researchTopics'] = researchtopic
thredremd.append(remds[k])
return thredremd
"""
"""
def filteredcloseauthordis(self):
return self.closeauthordis[self.filteridx]
"""
"""
def save_json(self,filename):
PROJECT_DIRECTORY = 'output/project/' + project_name + '/'
with io.open(PROJECT_DIRECTORY + filename +'.json','w',encoding="utf-8") as outfile:
outfile.write((json.dumps((self.result), ensure_ascii=False)))
|
8,678 | 74028a7b317c02c90603ad24c1ddb35a1d5d0e9d | student = []
while True:
name = str(input('Name: ')).capitalize().strip()
grade1 = float(input('Grade 1: '))
grade2 = float(input('Grade 2: '))
avgrade = (grade1 + grade2) / 2
student.append([name, [grade1, grade2], avgrade])
resp = ' '
while resp not in 'NnYy':
resp = str(input('Another student? [Y/N]'))
if resp == 'N':
break
print('-=' * 15)
print(f'{"No.":<4}{"Name:":<10}{"Average Grade:":>8}')
print('-=' * 15)
for i, a in enumerate(student):
print(f'{i:<4}{a[0]:<8}{a[2]:>8.1f}')
while True:
print('-=' * 20)
opt = int(input('Enter the student ID to show the grades: (999 to exit) '))
if opt == 999:
print('Exiting...')
break
if opt <= len(student) - 1:
print(f'Grades of {student[opt][0]} are {student[opt][1]}')
print('Have a nice day!!!')
|
8,679 | 606abf8501d85c29051df4bf0276ed5b098ee6c5 | from django.contrib import admin
from search.models import PrimaryCategory,PlaceCategory
class PrimaryCategoryAdmin(admin.ModelAdmin):
list_display = ('primary_name','is_active','description','image',)
actions = None
def has_delete_permission(self,request,obj=None):
return False
class PlaceCategoryAdmin(admin.ModelAdmin):
list_display = ('category_name','is_paid','description','is_active','image','primary_category')
actions = None
def primary_category(self,obj):
return obj.primary_category.primary_name
def has_delete_permission(self,request,obj=None):
return False
admin.site.register(PrimaryCategory,PrimaryCategoryAdmin)
admin.site.register(PlaceCategory,PlaceCategoryAdmin)
|
8,680 | 932502c93dd7dfc095adfe2ab88b4404396d9845 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo_concurrency import processutils as putils
import six
from cinder import context
from cinder import exception
from cinder.tests.unit.targets import targets_fixture as tf
from cinder import utils
from cinder.volume.targets import iet
class TestIetAdmDriver(tf.TargetDriverFixture):
def setUp(self):
super(TestIetAdmDriver, self).setUp()
self.target = iet.IetAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
def test_get_target(self):
tmp_file = six.StringIO()
tmp_file.write(
'tid:1 name:iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa
' sid:844427031282176 initiator:iqn.1994-05.com.redhat:5a6894679665\n' # noqa
' cid:0 ip:10.9.8.7 state:active hd:none dd:none')
tmp_file.seek(0)
with mock.patch('six.moves.builtins.open') as mock_open:
mock_open.return_value = contextlib.closing(tmp_file)
self.assertEqual('1',
self.target._get_target(
'iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45' # noqa
))
# Test the failure case: Failed to handle the config file
mock_open.side_effect = MemoryError()
self.assertRaises(MemoryError,
self.target._get_target,
'')
@mock.patch('cinder.volume.targets.iet.IetAdm._get_target',
return_value=0)
@mock.patch('cinder.utils.execute')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('cinder.utils.temporary_chown')
@mock.patch.object(iet, 'LOG')
def test_create_iscsi_target(self, mock_log, mock_chown, mock_exists,
mock_execute, mock_get_targ):
mock_execute.return_value = ('', '')
tmp_file = six.StringIO()
with mock.patch('six.moves.builtins.open') as mock_open:
mock_open.return_value = contextlib.closing(tmp_file)
self.assertEqual(
0,
self.target.create_iscsi_target(
self.test_vol,
0,
0,
self.fake_volumes_dir))
self.assertTrue(mock_execute.called)
self.assertTrue(mock_open.called)
self.assertTrue(mock_get_targ.called)
# Test the failure case: Failed to chown the config file
mock_open.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetCreateFailed,
self.target.create_iscsi_target,
self.test_vol,
0,
0,
self.fake_volumes_dir)
# Test the failure case: Failed to set new auth
mock_execute.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetCreateFailed,
self.target.create_iscsi_target,
self.test_vol,
0,
0,
self.fake_volumes_dir)
@mock.patch('cinder.utils.execute')
@mock.patch('os.path.exists', return_value=True)
def test_update_config_file_failure(self, mock_exists, mock_execute):
# Test the failure case: conf file does not exist
mock_exists.return_value = False
mock_execute.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetCreateFailed,
self.target.update_config_file,
self.test_vol,
0,
self.fake_volumes_dir,
"foo bar")
@mock.patch('cinder.volume.targets.iet.IetAdm._get_target',
return_value=1)
@mock.patch('cinder.utils.execute')
def test_create_iscsi_target_already_exists(self, mock_execute,
mock_get_targ):
mock_execute.return_value = ('fake out', 'fake err')
self.assertEqual(
1,
self.target.create_iscsi_target(
self.test_vol,
1,
0,
self.fake_volumes_dir))
self.assertTrue(mock_get_targ.called)
self.assertTrue(mock_execute.called)
@mock.patch('cinder.volume.targets.iet.IetAdm._find_sid_cid_for_target',
return_value=None)
@mock.patch('os.path.exists', return_value=False)
@mock.patch('cinder.utils.execute')
def test_remove_iscsi_target(self, mock_execute, mock_exists, mock_find):
# Test the normal case
self.target.remove_iscsi_target(1,
0,
self.testvol['id'],
self.testvol['name'])
mock_execute.assert_any_call('ietadm',
'--op',
'delete',
'--tid=1',
run_as_root=True)
# Test the failure case: putils.ProcessExecutionError
mock_execute.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetRemoveFailed,
self.target.remove_iscsi_target,
1,
0,
self.testvol['id'],
self.testvol['name'])
def test_find_sid_cid_for_target(self):
tmp_file = six.StringIO()
tmp_file.write(
'tid:1 name:iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa
' sid:844427031282176 initiator:iqn.1994-05.com.redhat:5a6894679665\n' # noqa
' cid:0 ip:10.9.8.7 state:active hd:none dd:none')
tmp_file.seek(0)
with mock.patch('six.moves.builtins.open') as mock_open:
mock_open.return_value = contextlib.closing(tmp_file)
self.assertEqual(('844427031282176', '0'),
self.target._find_sid_cid_for_target(
'1',
'iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45', # noqa
'volume-83c2e877-feed-46be-8435-77884fe55b45' # noqa
))
@mock.patch('cinder.volume.targets.iet.IetAdm._get_target',
return_value=1)
@mock.patch('cinder.utils.execute')
@mock.patch.object(iet.IetAdm, '_get_target_chap_auth')
def test_create_export(self, mock_get_chap, mock_execute,
mock_get_targ):
mock_execute.return_value = ('', '')
mock_get_chap.return_value = ('QZJbisGmn9AL954FNF4D',
'P68eE7u9eFqDGexd28DQ')
expected_result = {'location': '10.9.8.7:3260,1 '
'iqn.2010-10.org.openstack:testvol 0',
'auth': 'CHAP '
'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'}
ctxt = context.get_admin_context()
self.assertEqual(expected_result,
self.target.create_export(ctxt,
self.testvol,
self.fake_volumes_dir))
self.assertTrue(mock_execute.called)
@mock.patch('cinder.volume.targets.iet.IetAdm._get_target_chap_auth',
return_value=None)
@mock.patch('cinder.volume.targets.iet.IetAdm._get_target',
return_value=1)
def test_ensure_export(self, mock_get_targetm, mock_get_chap):
ctxt = context.get_admin_context()
with mock.patch.object(self.target, 'create_iscsi_target'):
self.target.ensure_export(ctxt,
self.testvol,
self.fake_volumes_dir)
self.target.create_iscsi_target.assert_called_once_with(
'iqn.2010-10.org.openstack:testvol',
1, 0, self.fake_volumes_dir, None,
portals_ips=[self.configuration.iscsi_ip_address],
portals_port=int(self.configuration.iscsi_port),
check_exit_code=False,
old_name=None)
|
8,681 | 7c6ada250770e04b395dda774a78042da69e2854 | from collections import Counter
def main():
N = int(input())
A = tuple(map(int, input().split()))
c = Counter(A).most_common()
if c[0][0] == 0 and c[0][1] == N:
print("Yes")
elif len(c) == 2 and c[0][1] == 2*N//3 and c[1][0] == 0 and c[1][1] == N//3:
print("Yes")
elif len(c) == 3 and int(c[0][0])^int(c[1][0]) == int(c[2][0]) and c[0][1] == c[1][1] and c[1][1] == c[2][1]:
print("Yes")
else:
print("No")
if __name__ == "__main__":
main() |
8,682 | 130581ddb0394dcceabc316468385d4e21959b63 | import unittest
from domain.Activity import Activity
from domain.NABException import NABException
from domain.Person import Person
from domain.ActivityValidator import ActivityValidator
from repository.PersonRepository import PersonRepository
from repository.PersonFileRepository import PersonFileRepository
from repository.ActivityRepository import ActivityRepository
from repository.ActivityFileRepository import ActivityFileRepository
from controller.StatsController import StatsController
class StatsControllerTestCase(unittest.TestCase):
def setUp(self):
pR = PersonRepository()
aR = ActivityRepository()
self.L = StatsController(pR, aR)
self.p = Person(1, "John", "1", "A")
self.q = Person(2, "Mary", "1", "B")
self.a1 = Activity(self.p, "2015.12.20", "12:12", "Swimming")
self.a2 = Activity(self.p, "2016.01.20", "12:12", "Mapping")
self.a3 = Activity(self.q, "2015.12.21", "12:12", "Swimming")
self.a4 = Activity(self.q, "2015.12.20", "10:12", "Reading")
pR.add(self.p)
pR.add(self.q)
aR.add(self.a1)
aR.add(self.a2)
aR.add(self.a3)
aR.add(self.a4)
def test_activities_for_person_alphabetically(self):
L = self.L
a1 = self.a1
a2 = self.a2
a3 = self.a3
a4 = self.a4
assert L.activities_for_person_alphabetically(1) == [a2, a1]
assert L.activities_for_person_alphabetically(2) == [a4, a3]
assert L.activities_for_person_alphabetically(4) == []
def test_activities_for_person_by_date(self):
L = self.L
a1 = self.a1
a2 = self.a2
a3 = self.a3
a4 = self.a4
assert L.activities_for_person_by_date(1) == [a1, a2]
assert L.activities_for_person_by_date(2) == [a4, a3]
assert L.activities_for_person_by_date(4) == []
def test_people_with_activities_in_interval(self):
L = self.L
p = self.p
q = self.q
assert L.people_with_activities_in_interval("2015.12.20", "2016.01.01") == [p, q]
assert L.people_with_activities_in_interval("2000.01.01", "2010.01.01") == []
assert L.people_with_activities_in_interval("2016.01.01", "2017.01.01") == [p]
assert L.people_with_activities_in_interval("2015.12.21", "2015.12.21") == [q]
def test_activities_in_interval_alphabetically(self):
L = self.L
a1 = self.a1
a2 = self.a2
a3 = self.a3
a4 = self.a4
assert L.activities_in_interval_alphabetically("2015.12.20", "2016.01.01") == [a4, a1, a3]
assert L.activities_in_interval_alphabetically("2000.01.01", "2010.01.01") == []
assert L.activities_in_interval_alphabetically("2016.01.01", "2017.01.01") == [a2]
assert L.activities_in_interval_alphabetically("2015.12.21", "2015.12.21") == [a3]
def test_activities_in_interval_by_date(self):
L = self.L
a1 = self.a1
a2 = self.a2
a3 = self.a3
a4 = self.a4
assert L.activities_in_interval_by_date("2015.12.20", "2016.01.01") == [a4, a1, a3]
assert L.activities_in_interval_by_date("2000.01.01", "2010.01.01") == []
assert L.activities_in_interval_by_date("2016.01.01", "2017.01.01") == [a2]
assert L.activities_in_interval_by_date("2015.12.21", "2015.12.21") == [a3] |
8,683 | e7d63c3b56459297eb67c56e93a3c640d93e5f6d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tensorflow
from pyspark.sql.functions import split
from pyspark.ml.fpm import FPGrowth
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.sql.functions import udf, array
import re
from pyspark.sql.types import *
import pyspark.sql.functions as F
price_pattern = re.compile(r'^\d+\.\d\d$')
myconf = SparkConf()
myconf.setAppName("test").setMaster("local[40]")
myconf.set('spark.executor.instances','40')
myconf.set('spark.driver.memory','6G')
#myconf.set('spark.executor.memory','1G')
myconf.set('spark.executor.cores','40')
myconf.set('spark.task.cpus','40')
# 指定连接器对应的spark-package
myconf.set("spark.jars.packages","org.mongodb.spark:mongo-spark-connector_2.11:2.4.1")
spark = SparkSession.builder.config(conf=myconf).getOrCreate()
logger = spark._jvm.org.apache.log4j
logger.LogManager.getRootLogger().setLevel(logger.Level.FATAL)
filter_hosts=["vivo","google.com","google.cn","oppomobile","baidu.com","hicloud"]
@udf(returnType=BooleanType())
def filter_host(item):
for i in filter_hosts:
if item.find(i) != -1:
return False
return True
contains_hosts=["jd.com"]
@udf(returnType=BooleanType())
def contains_host(item):
for i in contains_hosts:
if item.find(i) != -1:
return True
return False
df=spark.read.format("mongo").option("uri","mongodb://192.168.0.13:27017/jicheng.autopkgcatpure20210420").option("spark.mongodb.input.partitioner","MongoSplitVectorPartitioner").load()
df=df.filter(filter_host('host')).select(['app_id','host','session_id'])
hosts=df.select(['host']).distinct().rdd.map(lambda r : r['host']).collect()
hosts.sort()
df1=df.groupBy('app_id','session_id') \
.pivot('host', hosts) \
.agg(F.count('host')).fillna(0)
df2=df1.toPandas()
df2.to_csv("tf22.csv") |
8,684 | e403be68894ba283d71a0b71bb0bfd0adfab8c41 | import logging
def log_func(handler):
if handler.get_status() < 400:
log_method = logging.info
elif handler.get_status() < 500:
log_method = logging.warning
else:
log_method = logging.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %s (%s) %s %s %.2fms",
handler.get_status(), handler.request.method,
handler.request.uri, handler.request.remote_ip,
handler.request.arguments,
request_time)
configs = dict(
LOG_LEVEL=logging.INFO, # 日志等级
debug=True, # Debug
log_function=log_func, # 日志处理方法
template_path='views', # html文件
static_path='statics', # 静态文件(css,js,img)
static_url_prefix='/statics/', # 静态文件前缀
cookie_secret='suoning', # cookie自定义字符串加盐
xsrf_cookies=True, # 防止跨站伪造
)
|
8,685 | 5c179752f4c4e1d693346c6edddd79211a895735 | valor1=input("Ingrese Primera Cantidad ")
valor2=input("Ingrese Segunda Cantidad ")
Total = valor1 + valor2
print "El total es: " + str(Total)
|
8,686 | ec2d3bbfce06c498790afd491931df3f391dafbe | ../PyFoam/bin/pyFoamPlotWatcher.py |
8,687 | 022f588455d8624d0b0107180417f65816254cb1 | class car:
def info(self):
print(self.speed,self. color,self.model)
def increment(self):
print('increment')
def decrement(self):
print ('decrement')
BMW = car()
BMW.speed = 320
BMW.color = 'red'
BMW.model = 1982
BMW.info()
Camry = car()
Camry.speed = 220
Camry.color = 'blue'
|
8,688 | a494b3469682a909b76e67e1b78ad25affe99f24 | # Your code here
d = dict()
count = 0
fave_fast_food = input("Fave fast food restaurant: ")
for i in range(1, 11):
if fave_fast_food in d:
d[fave_fast_food] += 1
else:
d[fave_fast_food] = 1
count+= 1
fave_fast_food = input("Fave fast food restaurant: ")
for k,v in d.items():
print('Fast Food Resturants that are ' + k + ": " + str(v))
maximum = max(d, key=d.get) # Just use 'min' instead of 'max' for minimum.
print("The fast food restaurant " + maximum + " has this many votes:", d[maximum]) |
8,689 | a87ab07bb1502a75a7e705cd5c92db829ebdd966 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
from flask import jsonify
from flask import make_response
from MultipleInterfaceManager.settings import STATUS_CODE
def _render(resp):
response = make_response(jsonify(resp))
# response.headers["Access-Control-Allow-Origin"] = "*"
return response
def json_list_render(code, data, limit, offset, message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(
code = code,
limit = limit,
offset = offset,
message = message,
data = data
)
return _render(resp)
def json_detail_render(code, data = [], message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(
code = code,
message = message,
data = data
)
return _render(resp)
def json_token_render(code, token, message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(
code = code,
token = token,
message = message
)
return _render(resp)
def json_detail_render_sse(code, data = [], message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, message=message, data=data)
return json.dumps(resp)
|
8,690 | 24891cdefcd061f04e7b7768b1bde4e32b78adcc | import heapq
from util import edit_distance
def autocomplete(suggest_tree, bktree, prefix, count=5):
"""Suggest top completions for a prefix given a SuggestTree and BKTree.
Completions for a given prefix are weighted primarily by their weight in the
suggest tree, and secondarily by their Levenshtein distance to words in the
BK-tree (where nearby words are weighted higher)."""
completion_weights = suggest_tree.completion_weights(prefix)
if completion_weights:
weight = lambda completion: completion_weights[completion]
proximity = lambda completion: completion_proximity_score(
prefix, completion)
selection_criteria = lambda completion: (
weight(completion), proximity(completion))
completions = completion_weights.keys()
return heapq.nlargest(count, completions, key=selection_criteria)
else:
matches = bktree.search(prefix)
proximity = lambda completion: edit_distance(prefix, completion)
return heapq.nsmallest(count, matches, key=proximity)
def completion_proximity_score(prefix, completion):
"""Calculate a score based on suffix length where a shorter length always
yields a higher score."""
if prefix == completion:
return float("inf")
else:
return 1.0 / float(len(completion))
|
8,691 | b934770e9e57a0ead124e245f394433ce853dec9 | import time
import machine
from machine import Timer
import network
import onewire, ds18x20
import ujson
import ubinascii
from umqtt.simple import MQTTClient
import ntptime
import errno
#Thrown if an error that is fatal occurs,
#stop measurement cycle.
class Error(Exception):
pass
#Thrown if an error that is not fatal occurs,
#goes to deep sleep and continues as normal.
#For example no wifi connection at this time.
class Warning(Exception):
pass
def gettimestr():
rtc=machine.RTC()
curtime=rtc.datetime()
_time="%04d" % curtime[0]+ "%02d" % curtime[1]+ "%02d" % curtime[2]+" "+ "%02d" % curtime[4]+ "%02d" % curtime[5]
return _time
def deepsleep():
# configure RTC.ALARM0 to be able to wake the device
rtc = machine.RTC()
rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)
# set RTC.ALARM0 to fire after 60 seconds (waking the device)
rtc.alarm(rtc.ALARM0, 60000)
# put the device to sleep
machine.deepsleep()
timer_index=20
def timercallback(tim):
global timer_index
if timer_index==0:
print("Timer reached 0, something went wrong -> sleep.")
deepsleep()
print("Timer index "+str(timer_index))
timer_index=timer_index-1
#check if gpio4 is pulled down
stoppin = machine.Pin(4,mode=machine.Pin.IN,pull=machine.Pin.PULL_UP)
if stoppin.value()==0:
print("Pin down, stop")
else:
try:
#normal loop
tim = Timer(-1)
tim.init(period=1000, mode=Timer.PERIODIC, callback=timercallback)
try:
f = open('config.json', 'r')
config = ujson.loads(f.readall())
except OSError as e:
if e.args[0] == errno.MP_ENOENT or e.args[0] == errno.MP_EIO:
print("I/O error({0}): {1}".format(e.args[0], e.args[1]))
raise Error
# the device is on GPIOxx
ONEWIREPIN = config['ONEWIREPIN']
dat = machine.Pin(ONEWIREPIN)
# create the onewire object
ds = ds18x20.DS18X20(onewire.OneWire(dat))
# scan for devices on the bus
roms = ds.scan()
print('found devices:', roms)
if (len(roms)>0):
ds.convert_temp()
time.sleep_ms(750)
# Check if we have wifi, and wait for connection if not.
print("Check wifi connection.")
wifi = network.WLAN(network.STA_IF)
i = 0
while not wifi.isconnected():
if (i>10):
print("No wifi connection.")
raise Warning
print(".")
time.sleep(1)
i=i+1
try:
print("Get time.")
ntptime.settime()
except OSError as e:
if e.args[0] == errno.ETIMEDOUT: #OSError: [Errno 110] ETIMEDOUT
print("Timeout error, didn't get ntptime.")
#if we did not wake up from deep sleep
#we cannot continue until we get correct time
if (machine.reset_cause()!=machine.DEEPSLEEP):
raise Warning
if e.args[0] == -2: #OSError: dns error
print("DNS error, didn't get ntptime.")
#if we did not wake up from deep sleep
#we cannot continue until we get correct time
if (machine.reset_cause()!=machine.DEEPSLEEP):
raise Warning
else:
raise
_time=gettimestr()
print("Open MQTT connection.")
c = MQTTClient("umqtt_client", config['MQTT_BROKER'])
c.connect()
#check battery voltage?
if (config['MEASURE_VOLTAGE']):
adc = machine.ADC(0)
voltage = adc.read();
topic="raw/esp8266/"+ubinascii.hexlify(machine.unique_id()).decode()+"/voltage"
message=_time+" "+str(voltage)
c.publish(topic,message)
#loop ds18b20 and send results to mqtt broker
for rom in roms:
print("topic "+config['MQTT_TOPIC']+ubinascii.hexlify(rom).decode())
topic=config['MQTT_TOPIC']+ubinascii.hexlify(rom).decode()+"/temperature"
print(_time)
print(ds.read_temp(rom))
message=_time+' '+str(ds.read_temp(rom))
c.publish(topic,message)
c.disconnect()
deepsleep()
except Warning:
deepsleep()
except Error:
print("Error({0}): {1}".format(e.args[0], e.args[1]))
|
8,692 | 5d3b9005b8924da36a5885201339aa41082034cd | from selenium.webdriver.common.by import By
class BasePageLocators:
LOGIN_LINK = (By.CSS_SELECTOR, "#login_link")
BASKET_LINK = (By.CSS_SELECTOR, '[class="btn btn-default"]:nth-child(1)')
USER_ICON = (By.CSS_SELECTOR, ".icon-user")
class LoginPageLocators:
LOG_IN_FORM = (By.CSS_SELECTOR, "#login_form")
REGISTER_FORM = (By.CSS_SELECTOR, "#register_form")
REGISTRATION_EMAIL = (By.CSS_SELECTOR, '#id_registration-email')
REGISTRATION_PASSWORD = (By.CSS_SELECTOR, '#id_registration-password1')
REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR, '#id_registration-password2')
REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR, '[name="registration_submit"]')
class BasketPageLocators:
BASKET_STATUS = (By.CSS_SELECTOR, '#content_inner')
NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(1) > .alertinner strong')
PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(3) > .alertinner strong')
class ProductPageLocators:
ADD_IN_BASKET = (By.CSS_SELECTOR, '.btn-add-to-basket')
SHIPMENT_PRICE = (By.CSS_SELECTOR, '.product_main .price_color')
SHIPMENT_NAME = (By.CSS_SELECTOR, '.product_main h1')
|
8,693 | 252a6b97f108b7fdc165ccb2a7f61ce31f129d3d | import sys
from collections import namedtuple
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, \
QHBoxLayout, QStackedWidget, QListWidget, QListWidgetItem
from PyQt5.QtCore import Qt, QSize
from runWidget import RunWidget
from recordWidget import RecordWidget
def QListWidget_qss():
return '''
QListWidget{
outline: 0px;
}
QListWidget {
min-width: 30px;
max-width: 50px;
color: Black;
background: #CCCCCC;
}
QListWidget::Item:selected {
background: #888888;
border-left: 5px solid red;
}
HistoryPanel:hover {
background: rgb(52, 52, 52);
}
'''
class MainCentralWidget(QWidget):
def __init__(self):
super().__init__()
tab_bar = self.getTabBar(('录制', '运行'))
tab_page = self.getTabPage()
tab_bar.currentRowChanged.connect(tab_page.setCurrentIndex)
hbox = QHBoxLayout(spacing=0)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(tab_bar)
hbox.addWidget(tab_page)
self.setLayout(hbox)
def getTabBar(self, names):
tab_bar = QListWidget()
tab_bar.setStyleSheet(QListWidget_qss())
tab_bar.setFrameShape(QListWidget.NoFrame)
tab_bar.setItemAlignment(Qt.AlignCenter)
tab_bar.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
for name in names:
item = QListWidgetItem(name)
item.setTextAlignment(Qt.AlignCenter)
item.setSizeHint(QSize(50, 50))
tab_bar.addItem(item)
tab_bar.setCurrentRow(0)
return tab_bar
def getTabPage(self):
tab_page = QStackedWidget()
tab_page.addWidget(RecordWidget())
tab_page.addWidget(RunWidget())
return tab_page
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setGeometry(50, 50, 900, 300)
self.setWindowTitle('AutoMouse')
self.setCentralWidget(MainCentralWidget())
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
main_window = MainWindow()
sys.exit(app.exec_()) |
8,694 | 57bc34c6a23c98fd031ea6634441d4d135c06590 | import sys
sys.path.append("./")
from torchtext.datasets import Multi30k
from torchtext.data import Field
from torchtext import data
import pickle
import models.transformer as h
import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from metrics.metrics import bleu
import numpy as np
from torch.autograd import Variable
from utils import plot_training_curve,plot_loss_curves
from torch import nn
import torch
import time
import matplotlib.pyplot as plt
import seaborn
global max_src_in_batch, max_tgt_in_batch
def batch_size_fn(new, count, sofar):
"Keep augmenting batch and calculate total number of tokens + padding."
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(vars(new)["src"]))
max_tgt_in_batch = max(max_tgt_in_batch, len(vars(new)["trg"]) + 2)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
class Batch:
"Object for holding a batch of data with mask during training."
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = (src != pad).unsqueeze(-2)
if trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum()
@staticmethod
def make_std_mask(tgt, pad):
"Create a mask to hide padding and future words."
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask
class MyIterator(data.Iterator):
def create_batches(self):
if self.train:
def pool(d, random_shuffler):
for p in data.batch(d, self.batch_size * 100):
p_batch = data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def greedy_decode(model, src, src_mask, max_len, start_symbol):
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)
for i in range(max_len-1):
out = model.decode(memory, src_mask,
Variable(ys),
Variable(subsequent_mask(ys.size(1))
.type_as(src.data)))
prob = model.generator(out[:, -1])
# vals, idxs = torch.topk(torch.softmax(prob, dim=1).flatten(), 10, largest=True)
# print((vals*100).tolist())
# print([TRG.vocab.itos[idx] for idx in idxs])
_, next_word = torch.max(prob, dim = 1)
next_word = next_word.data[0]
ys = torch.cat([ys,
torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)
return ys
def visualise_attention(tgt_sent, sent):
def draw(data, x, y, ax):
seaborn.heatmap(data,
xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0,
cbar=False, ax=ax)
# bottom, top = ax.get_ylim()
# ax.set_ylim(bottom + 0.5, top - 0.5)
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Encoder Layer", layer+1)
for h in range(4):
vals = model.encoder.layers[layer].self_attn.attn[0, h].data.cpu()
draw(vals, sent, sent if h ==0 else [], ax=axs[h])
plt.show()
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Decoder Self Layer", layer+1)
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(tgt_sent)].cpu()
draw(vals, tgt_sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
print("Decoder Src Layer", layer+1)
fig, axs = plt.subplots(1,4, figsize=(16, 5))
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(sent)].cpu()
draw(vals, sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
class SimpleLossCompute:
"A simple loss compute and train function."
def __init__(self, generator, criterion, opt=None):
self.generator = generator
self.criterion = criterion
self.opt = opt
def __call__(self, x, y, norm):
x = self.generator(x)
loss = self.criterion(x.contiguous().view(-1, x.size(-1)),
y.contiguous().view(-1)) / norm
if self.opt is not None:
loss.backward()
self.opt.step()
self.opt.optimizer.zero_grad()
return loss.data.item() * norm
def rebatch(pad_idx, batch):
"Fix order in torchtext to match ours"
src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0, 1)
return Batch(src, trg, pad_idx)
def evaluate(data_iter, model, criterion):
model.eval()
with torch.no_grad():
eval_loss = run_epoch((rebatch(pad_idx, b) for b in data_iter), model,
SimpleLossCompute(model.generator, criterion, opt=None))
return eval_loss
def run_epoch(data_iter, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = []
tokens = 0
for i, batch in enumerate(data_iter):
out = model.forward(batch.src, batch.trg,
batch.src_mask, batch.trg_mask)
loss = loss_compute(out, batch.trg_y, batch.ntokens) #/ batch.ntokens
total_loss.append(loss.item())
total_tokens += batch.ntokens
tokens += batch.ntokens
if i % 50 == 1:
elapsed = time.time() - start
print("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss, tokens / elapsed))
start = time.time()
tokens = 0
return total_loss
SRC = Field(tokenize = "spacy",
tokenizer_language="de_core_news_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
TRG = Field(tokenize = "spacy",
tokenizer_language="en_core_web_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MAX_LEN = 100
train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),fields = (SRC, TRG)
,filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN)
SRC.build_vocab(train_data.src, min_freq=2)
TRG.build_vocab(train_data.trg, min_freq=2)
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 64
train_iter = MyIterator(train_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=True)
valid_iter = MyIterator(valid_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
test_iter = MyIterator(test_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
model_name = "harvard_transformer2_state"
args = (INPUT_DIM, OUTPUT_DIM)
kwargs = {"N" : 6}
model = h.make_model(*args, **kwargs).to(device)
state = torch.load(model_name + ".pt", map_location=device)
model.load_state_dict(state["state_dict"])
losses = state["loss"]
pad_idx = TRG.vocab.stoi["<pad>"]
criterion_test = nn.CrossEntropyLoss(ignore_index=pad_idx)
test_losses = evaluate(test_iter, model, criterion_test)
losses["test"].append(test_losses)
test_loss = torch.tensor(sum(test_losses) / len(test_losses))
print(test_loss)
print('Perplexity:', torch.exp(test_loss))
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a man in a blue shirt is standing on a ladder and cleaning a window")
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a group of people stands in front of an igloo.")
sentence = [SRC.preprocess("ein mann mit kariertem hut in einer schwarzen jacke und einer schwarz-weiß gestreiften hose spielt auf einer bühne mit einem sänger und einem weiteren gitarristen im hintergrund auf einer e-gitarre .")]
real_translation = TRG.preprocess("a man in a black jacket and checkered hat wearing black and white striped pants plays an electric guitar on a stage with a singer and another guitar player in the background .")
src = SRC.process(sentence).to(device).T
src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
model.eval()
out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
translation = ["<sos>"]
for i in range(1, out.size(1)):
sym = TRG.vocab.itos[out[0, i]]
translation.append(sym)
if sym == "<eos>":
break
print(' '.join(translation))
print(' '.join(real_translation))
# plot_loss_curves(losses["train"], losses["val"])
visualise_attention(translation, ["<sos>"] + sentence[0] + ["<eos>"])
# candidate = []
# reference = []
# for i, batch in enumerate(test_iter):
# src = batch.src.transpose(0, 1)[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# model.eval()
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# print("Translation: \t", ' '.join(translation))
# target = []
# for i in range(1, batch.trg.size(0)):
# sym = TRG.vocab.itos[batch.trg.data[i, 0]]
# if sym == "<eos>": break
# target.append(sym)
# print("Target: \t", ' '.join(target))
# print()
# candidate.append(translation)
# reference.append([target])
# score = bleu(candidate, reference)
# print(score)
# # state["bleu"] = bleu
# # save_model_state("harvard_transformer2_state.pt", model, {"args" : args, "kwargs" : kwargs}, epoch+1, state["loss"], state["bleu"])
# dataset = load_dataset('wmt14', 'de-en', 'test')['test']['translation']
# trainloader = DataLoader(dataset, batch_size=1, shuffle=True)
# model.eval()
# candidate = []
# reference = []
# for val in trainloader:
# de=val['de']
# en=val['en']
# de_tokens = [SRC.preprocess(sentence) for sentence in de]
# en_tokens = [TRG.preprocess(sentence) for sentence in en]
# src = SRC.process(de_tokens).to(device).T[:1]
# trg = TRG.process(en_tokens).to(device).T[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# target = []
# for i in range(1, trg.size(1)):
# sym = TRG.vocab.itos[trg[0, i]]
# if sym == "<eos>": break
# target.append(sym)
# candidate.append(translation)
# reference.append([target])
# print(bleu(candidate, reference))
|
8,695 | f6401eca2dc0ea86a934e859c35fa2d6c85a61b3 | import turtle
hexagon = turtle.Turtle()
for i in range (6):
hexagon.forward(100)
hexagon.left(60)
|
8,696 | 4932a357cfd60cb65630345e75794ebf58b82c82 | import matplotlib; matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from uncertainties import ufloat
#Holt Werte aus Textdatei
I, U = np.genfromtxt('werte2.txt', unpack=True)
#Definiert Funktion mit der ihr fitten wollt (hier eine Gerade)
def f(x, A, B):
return A*x + B
#Erstellt linspace von Bereich, in dem Ausgleichsfunktion erstellt wird
x_plot = np.linspace(50, 160, 1000)
#Fittet
params, covariance_matrix = curve_fit(f, I, U)
errors = np.sqrt(np.diag(covariance_matrix))
#Plottet Fit
plt.plot(x_plot, f(x_plot, *params), 'k-', label='Anpassungsfunktion', linewidth=0.5)
#Gibt berechnete Parameter aus
print(params)
print(np.sqrt(np.diag(covariance_matrix)))
plt.gcf().subplots_adjust(bottom=0.18)
#Plot eurer eigentlichen Messwerte
plt.plot(I , U, 'r.', label='Messwerte', Markersize=4)
plt.xlim(50, 160)
plt.ylim(1.7,2.3)
plt.legend()
plt.grid()
plt.xlabel(r'$I\,/\,\mathrm{mA}$')
plt.ylabel(r'$U_K\,/\,\mathrm{V}$')
plt.savefig('plot2.pdf')
|
8,697 | 641cbe2f35925d070249820a2e3a4f1cdd1cf642 | # -*- coding: utf-8 -*-
"""
app definition
"""
from django.apps import AppConfig
class CoopHtmlEditorAppConfig(AppConfig):
name = 'coop_html_editor'
verbose_name = "Html Editor"
|
8,698 | 3164eab8dc221149c9f865645edf9991d810d2ac | import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import time
import sys
class ConsensusSimulation:
"""Class to model a general consensus problem
see DOI: 10.1109/JPROC.2006.887293"""
def __init__(self,
topology,
dynamics,
dynamics_args,
time_step=0.01,
x_init=None,
convergence_warning=True,
delay=0):
# check arguments are of the
# correct form
if(isinstance(topology,nx.Graph)):
self.graph = topology
self.size = len(self.graph)
else:
print("Argument Error: topology must be type"
, type(nx.Graph()))
if(callable(dynamics)):
self.f = dynamics
if(len(dynamics_args)==1):
self.f_arg = (dynamics_args,1)
self.f_arg = dynamics_args
else:
print("Argument Error: dynamics must be a function")
self.dt = time_step
self.tau = delay
# set up initial vector to
# 1,2,3,...,n
if(not isinstance(x_init, type(np.ones(1))) and x_init==None):
self.x = np.linspace(1,self.size,self.size)
self.x = self.x.reshape(self.size,1)
else:
self.x = x_init.copy().reshape(self.size,1)
# The Laplacian matrix, quite the building block
# for the algorithms
self.L = nx.laplacian_matrix(self.graph).todense()
self.X = list()
self.T = list()
# connected graph won't converge
# maybe there's some algorithm that will
# though...
self.warn = convergence_warning
self.d_max = max(np.array(self.graph.degree)[:,1])
self.tau_max = (np.pi)/(4*self.d_max)
def disagreement(self):
"""Returns the 'error'/inhomogeneity in the
decision vector"""
return 0.5*(np.dot(np.dot(np.transpose(self.x),self.L),self.x)).item(0)
def agreement(self,tol=1e-6):
"""Test for convergence"""
if(self.disagreement()<tol):
return True
else:
return False
def run_sim(self,record_all=False,update_every=1.0):
"""run the core simulation"""
t=0
self.x_init = self.x
self.X = list()
self.T = list()
flag = False
self.X.append(self.x)
self.T.append(0)
start = time.time()
time_since_last_update = 0.0
progress = 1
while self.agreement() == False:
start_it = time.time()
if(t==0 and self.warn and not nx.is_connected(self.graph)):
print("Graph not connected, consensus algorithm will probably not converge!")
print("Simulating to 5 seconds...")
flag = True
if(flag and time.time()-start>5):
break
# core simulation done here
# very simple discretisation...
self.x = self.x+self.dt*self.f(self.x,*self.f_arg)
# odd way to test for 1,2,3,etc
# when arg is float
if (record_all):
self.X.append(self.x)
self.T.append(time.time()-start)
else:
if (t-np.floor(t)<1e-2):
self.X.append(self.x)
self.T.append(time.time()-start)
t = t+self.dt
end = time.time()-start_it
time_since_last_update += end
if time_since_last_update >= update_every:
sys.stdout.write("\r" + "Iteration: {}, disagreement: {}, time: {}".format(progress,self.disagreement(),time.time()-start))
sys.stdout.flush()
time_since_last_update = 0.0
progress += 1
print("")
end = time.time()
return self.T[-1]
def sim_delay(self, delay = 1, runtime=100):
t=0
self.tau=delay
self.x_init = self.x
self.X = list()
self.T = list()
flag = False
for i in range(0,delay+1):
self.X.append(self.x)
self.T.append(0)
start = time.time()
while self.agreement() == False:
if (self.T[-1] > runtime):
break
if (t==0 and self.warn and not nx.is_connected(self.graph)):
print("Graph not connected, consensus algorithm will probably not converge!")
print("Simulating to 5 seconds...")
flag = True
if(flag and time.time()-start>5):
break
# core simulation done here
# very simple discretisation...
self.x = self.X[-1]
if (len(self.X)-delay<0):
pass
else:
index = len(self.X)-delay
self.x = self.X[-1]+self.dt*self.f(self.X[index],*self.f_arg)
# odd way to test for 1,2,3,etc
# when arg is float
self.X.append(self.x)
self.T.append(time.time()-start)
t = t+self.dt
end = time.time()
return self.T[-1]
def plot(self, weight_average=False):
"""Show the convergence analysis"""
if(len(self.X)==0 or len(self.T)==0):
print("Nothing to plot...")
x = np.array(self.X)
for i in range(0,x.shape[1]):
plt.plot(self.T,x[:,i,0])
if(weight_average):
w_i = np.zeros(self.size)
s = sum(np.array(self.graph.degree)[:,1])
x = self.x_init
for i in nx.nodes(self.graph):
w_i[i] = self.graph.degree(i)/s
x[i] = x[i]*w_i[i]
plt.plot(np.linspace(0,self.T[-1],10),np.zeros(10)+sum(x), label="Connected graph consensus: "+str(sum(x)),color='red',marker='s')
else:
plt.plot(np.linspace(0,self.T[-1],10),np.zeros(10)+np.mean(self.x_init), label="Connected graph consensus: "+str(round(np.mean(self.x_init),3)),color='red',marker='s')
plt.grid()
plt.xlabel("Time (seconds)")
plt.ylabel("State")
plt.title("Convergence of consensus algorithm")
plt.legend()
def print_delay(self):
print("Delay in seconds")
return self.dt*self.tau
def delay_stable_max(self):
d = maximum_degree(self.graph)
return (np.pi)/(4*d[1])
|
8,699 | 5ccfad17ede9f685ea9ef9c514c0108a61c2dfd6 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 13:39:05 2017
@author: jaredhaeme15
"""
import cv2
import numpy as np
from collections import deque
import imutils
import misc_image_tools
frameFileName = r"H:\Summer Research 2017\Whirligig Beetle pictures and videos\large1.mp4"
cap = cv2.VideoCapture(r"H:\Summer Research 2017\Whirligig Beetle pictures and videos\large1.mp4")
while(1):
successFlag, frame = cap.read()
if not successFlag:
cv2.waitKey(0)
break
lower_hsv_thresholdcr = np.array([0,250,250])
upper_hsv_thresholdcr = np.array([10,255,255])
gray = np.float32(cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY))
dst = cv2.cornerHarris(gray,2,3,0.04)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)
frameWithRedCorners = np.copy(frame)
# Threshold for an optimal value, it may vary depending on the image.
frameWithRedCorners[dst>0.005*dst.max()]=[0,0,255]
hsv = cv2.cvtColor(frameWithRedCorners, cv2.COLOR_BGR2HSV)
#construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
crmask = cv2.inRange(hsv, lower_hsv_thresholdcr, upper_hsv_thresholdcr)
cntscr = cv2.findContours(crmask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
cv2.imshow("Frame", frameWithRedCorners)
k = cv2.waitKey(10000) & 0xFF
if k == 27: # esc key
break
cv2.destroyAllWindows()
cap.release() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.