blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
919b34168c6995dd3e33d56cea20abf55d47fcd9 | 318b737f3fe69171f706d2d990c818090ee6afce | /demo/jd-recommendation/serving/predict.py | 2b17f9268e1eec771811ed0f8b9293813a3e79da | [
"Apache-2.0"
] | permissive | 4paradigm/OpenMLDB | e884c33f62177a70749749bd3b67e401c135f645 | a013ba33e4ce131353edc71e27053b1801ffb8f7 | refs/heads/main | 2023-09-01T02:15:28.821235 | 2023-08-31T11:42:02 | 2023-08-31T11:42:02 | 346,976,717 | 3,323 | 699 | Apache-2.0 | 2023-09-14T09:55:44 | 2021-03-12T07:18:31 | C++ | UTF-8 | Python | false | false | 1,059 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module of request predict in script"""
import requests
url = "http://127.0.0.1:8887/predict"
req = {"reqId": "200080_5505_2016-03-15 20:43:04",
"eventTime": 1458045784000,
"main_id": "681271",
"pair_id": "200080_5505",
"user_id": "200080",
"sku_id": "5505",
"time": 1458045784000,
"split_id": 1,
"time1":"2016-03-15 20:43:04"}
res = requests.post(url, json=req)
print(res.text)
| [
"noreply@github.com"
] | noreply@github.com |
ab39a91bd73b9190c2b2fb44a1f6afacac8a6ec1 | f5734b34df71d8610e8b17cd5eab9b401fe0a634 | /evaluate.py | 0ab8b5c2b884dbdef178065d80a6d46e6acc6bb8 | [] | no_license | toytag/LanguageModeling | 86902b4d18b7d155c828dfa2bddb8b64936bcf60 | f56abc5332757bf06626fcc9beac421da77cc6e8 | refs/heads/master | 2022-11-10T21:22:01.474550 | 2020-07-01T18:37:13 | 2020-07-01T18:37:13 | 273,858,178 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | import torch
import torch.nn.functional as F
from tokenizer import WordTokenizer
from model import LanguageModel
if __name__ == '__main__':
tokenizer = WordTokenizer('models/tokenizer/tokenizer.json')
checkpoint = torch.load('models/lm/latest.pth')
model = LanguageModel(n_vocab=10000)
model.load_state_dict(checkpoint['model_state_dict'])
input_tokens = tokenizer.encode('a king is a man and a queen is a <mask>')
input_tokens = torch.LongTensor([input_tokens])
input_mask = torch.ones_like(input_tokens)
input_mask[0, -1] = 0
with torch.no_grad():
model.eval()
output, *_ = model(input_tokens, src_mask=input_mask)
output = torch.argmax(F.softmax(output, dim=-1), dim=-1).squeeze(0).numpy()
print(tokenizer.decode(output)) | [
"tangzhzh@shanghaitech.edu.cn"
] | tangzhzh@shanghaitech.edu.cn |
84a35b2fba4d1eebc2c4306bd0a7b28a108ea36d | aa14ff35ba9f7e94276599f1d80244911d5944f5 | /catkin_ws/src/turtlesim_expl/src/pipes/pose_pipe.py | d8c53a18fdd834bbcacfcac48f9776d29840966f | [] | no_license | tum-i4/rritbed | 7d542b190796c14139dc34d571259cf61e2ba72b | f268988b607118c8aa7e94eff6165e44c9a40dff | refs/heads/master | 2022-04-26T03:47:46.474795 | 2022-03-16T14:36:49 | 2022-03-17T17:50:49 | 131,807,366 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,255 | py | #!/usr/bin/env python
""" Module for the PosePipe class """
import random
from pose_processor import PoseProcessor, CC_STR, POI_STR, TSP_STR
class PosePipe(object):
""" Subscribe to a pose topic and process it with the specified PoseProcessor. """
_POSSIBLE_PROCESSORS = {
CC_STR : PoseProcessor(CC_STR),
POI_STR : PoseProcessor(POI_STR),
TSP_STR : PoseProcessor(TSP_STR)
}
@staticmethod
def get_possible_processors():
""" Get a list of possible processor names. """
return PosePipe._POSSIBLE_PROCESSORS.keys()
@staticmethod
def create(intrusion, intrusion_field, **kwargs):
"""
Create a PosePipe with a randomly chosen PoseProcessor.
kwargs: For each possible processor a value denoting how likely it will be chosen.\
Must be positive integers.
"""
if(len(kwargs) != len(PosePipe._POSSIBLE_PROCESSORS)):
raise ValueError("Invalid number of arguments given!")
if any([not isinstance(x, int) or x < 0 for x in kwargs.values()]):
raise ValueError("All arguments must be positive integers.")
# Ensure each key is present in the dict
for key in kwargs:
if key not in PosePipe._POSSIBLE_PROCESSORS.keys():
raise KeyError("Given key [{}] not found".format(key))
choices = []
for name, likelihood in kwargs.items():
choices += [name] * likelihood
choice = random.choice(choices)
processor = PosePipe._POSSIBLE_PROCESSORS[choice]
processor.set_intrusion(intrusion, intrusion_field)
return PosePipe(processor)
def __init__(self, processor):
""" Ctor """
object.__init__(self)
assert(isinstance(processor, PoseProcessor))
self._selected_processor = processor
def get_processor_name(self):
""" Get this pipe's processor name. """
return self._selected_processor.name
def process(self, request, label):
""" Process the given request. """
return self._selected_processor.process(request, label=label)
if __name__ == "__main__":
PP = PosePipe.create(intrusion=None, intrusion_field="INVALID", cc=50, poi=25, tsp=25)
print("Possible processors: {}".format(PP.get_possible_processors()))
print("Processor name: {}".format(PP.get_processor_name()))
print("Process [x: 5] [y: 10]: {}".format(
PP.process(PoseProcessor.add_to_request({}, 5, 10), label=True)))
| [
"vauhochzett@users.noreply.github.com"
] | vauhochzett@users.noreply.github.com |
d5655e2f75b95748ef37bafad03e58914c293918 | 86904ef9990ba9381ac12ced3ff166a881994fcf | /Attackers/PGD_ad.py | dd998bf394f3ab39a780d7262c80b3532df286e2 | [] | no_license | Ailon-Island/MNIST-attacker | 2e2c21095b73669981febf7aaf32f7165c9faea4 | 7dc49ec5e41ad188a433b4513a592421b1336363 | refs/heads/main | 2023-06-24T00:21:54.952085 | 2021-07-28T03:50:58 | 2021-07-28T03:50:58 | 389,301,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,418 | py | # 我试着在 PGD 的基础上,改成直接利用归一化的梯度进行 perturbation 的方式,使得对原图像作等量改变的时候,对模型的计算产生更大的影响
import torch
import torch.nn.functional as F
from random_chooser import random_chooser
from projector import project
from attacker import attacker
class PGD_ad_attacker(attacker):
def __init__(self, k, epsilon, alpha, model, training = True, train_lambda=1., train_attack_on = False, device='cpu'):
super(PGD_ad_attacker, self).__init__(model, epsilon, training, train_lambda, train_attack_on, device)
self.k = k
self.alpha = alpha
def generate_perturbation(self, img_raw, target):
# randomize the input image
if isinstance(self.k, random_chooser):
k = self.k.choice(size=img_raw.shape[0], dtype=torch.long).to(self.device).view(-1, 1, 1, 1)
else:
k = torch.full([img_raw.shape[0]], self.k, device=self.device, ).view(-1, 1, 1, 1)
k_max = torch.max(k)
if isinstance(self.eps, random_chooser):
eps = self.eps.choice(size=img_raw.shape[0]).to(self.device).view(-1, 1, 1, 1)
else:
eps = self.eps
if isinstance(self.alpha, random_chooser):
alpha = self.alpha.choice(size=img_raw.shape[0]).to(self.device).view(-1, 1, 1, 1)
else:
alpha = self.alpha
init_perb = torch.rand(img_raw.shape).to(self.device) * 2 - 1
img = img_raw + project(init_perb, eps)
img = torch.clamp(img, 0, 1) # control the values in image
for t in range(k_max.item()):
continue_perturbation = t < k
continue_perturbation = continue_perturbation.view(-1, 1, 1, 1)
img.detach_()
img.requires_grad = True
# get the gradient of the t-th image
loss = F.nll_loss(self.model(img), target)
self.model.zero_grad()
loss.backward()
grad = img.grad.data
# get the (t+1)-th image
perturbation = project(grad, alpha)
img = img + perturbation * continue_perturbation # perturbed
perturbation = img - img_raw # true perturbation
perturbation = project(perturbation, eps)
img = img_raw + perturbation
img = torch.clamp(img, 0, 1) # 保证不超出数据范围
return img | [
"74121851+Ailon-Island@users.noreply.github.com"
] | 74121851+Ailon-Island@users.noreply.github.com |
cf4ca5b50b35fc04d8bf757191e45210c72776af | 79b254dc2b2df1ff3cb2cf1ed3d6e446797a2a76 | /mysite/admin.py | 6e7224c4f33e18eb86d4f7f1d325c8f745127edf | [] | no_license | SamVonderahe/TechPointSOS | 8c23869e84e6c00525a4e1c81ac1f2e33195399f | 517d4a2dbe0be2db4cf5e690b9fd6fff06a7610c | refs/heads/master | 2022-11-18T11:24:40.015999 | 2020-07-24T03:44:24 | 2020-07-24T03:44:24 | 276,447,638 | 2 | 2 | null | 2020-07-10T06:58:05 | 2020-07-01T17:57:22 | Python | UTF-8 | Python | false | false | 363 | py | from django.contrib import admin
from mysite.models import Food
from mysite.models import Pantry
from mysite.models import Need
from mysite.models import Donor
from mysite.models import Hours
@admin.register(Food)
@admin.register(Pantry)
@admin.register(Need)
@admin.register(Hours)
@admin.register(Donor)
class ViewAdmin(admin.ModelAdmin):
pass
| [
"noreply@github.com"
] | noreply@github.com |
d89a4dc3330b472019968ea7e7a9892fc8c24ade | 438b7d6f9edb127ab19af5ad434f1df716117486 | /tempCodeRunnerFile.py | cfab979d7cb817adf252c6b0925eca2955af7bd0 | [] | no_license | ntl870/BTL-DSP-2 | 3c93fd1b97e7c74ebc5f189166a753e7e62b466c | 2ef89dea68c0c22b73207b5d5ff94b41316d3769 | refs/heads/main | 2023-02-06T20:37:14.089479 | 2020-12-25T05:15:24 | 2020-12-25T05:15:24 | 319,061,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py |
# print(pos + 75) | [
"ntlong870@gmail.com"
] | ntlong870@gmail.com |
cb0aea2b59411ffb92e3cddc9a98775ba3f3be0a | b024068e0b6a1e62dca5a1db41b5c46b2b7f70cd | /venv/Scripts/easy_install-script.py | 547bdb2ab5da6c35153a5940e67081b47b5d4731 | [] | no_license | brimcrob/PythonMathCode | 2bdf77e5d50ae89cdc6c93baa5bfc3fa5af21eaa | 4837bab6ce6abf3396f1794759be04b561c9a5e8 | refs/heads/master | 2021-01-03T00:12:26.794467 | 2020-02-11T18:43:29 | 2020-02-11T18:43:29 | 239,828,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | #!C:\Users\BrianM\PycharmProjects\MathProject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"brimcrob@gmail.com"
] | brimcrob@gmail.com |
e62e7b72e72a454971171e61c2ef825762d5c059 | b27322173ff8307fabfad4f890bcfa25af9a95f5 | /functions.py | 9e8e3d00f259c69820ad9f69ab74691f394b168c | [] | no_license | dreemorra/MChA | d37d61bd6f0e98cb7976ffb02e937fbc02289eaa | fb0a1b29fc2514aaa2a15917efe7e8fba780a94b | refs/heads/master | 2021-02-10T17:57:56.963290 | 2019-05-18T19:51:57 | 2019-05-18T19:51:57 | 244,405,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,933 | py | import numpy as np
import math
def read_matrix(fname: str) -> np.array:
"""Чтение матрицы из файла."""
matrix = []
with open(fname, 'r') as file_mat:
for line in file_mat.readlines():
matrix.append(line.strip().split(" "))
matrix = np.array(matrix, dtype=float)
return matrix
def matrix_multiplication(m1: np.array, m2: np.array):
"""Нахождение произведения матриц: m1*m2."""
m1 = m1.copy()
m2 = m2.copy()
return np.array([[sum(a*b for a,b in zip(X_row, Y_col)) for Y_col in zip(*m2)] for X_row in m1])
def norm(arr: np.array):
"""Нахождение нормы матрицы."""
return max([np.sum(abs(line)) for line in arr])
def apply_mat(A: np.array, x: np.array):
"""Перемножение матрицы и вектора."""
return np.array([np.sum(x*A[i]) for i in range(len(A))])
def gauss_invert(A: np.array) -> np.array:
"""Вычисление обратной матрицы методом Гаусса."""
A = A.copy()
E = np.identity(len(A)) #единичная матрица
for i in range(len(A)): #прямой и обратный ход
E[i] /= A[i, i]
A[i] /= A[i, i]
for j in range(len(A)):
if j != i:
E[j] -= E[i]*A[j, i]
A[j] -= A[i]*A[j, i]
return E
def square_invert(A: np.array) -> np.array:
"""Вычисление обратной матрицы методом квадратного корня."""
A = A.copy()
E = np.identity(len(A))
inv_A = np.zeros(A.shape)
for i in range(len(A)):
inv_A[i] = square_root_method(A, E[i], True)
return inv_A
def transp_mat(A: np.array) -> np.array:
"""Транспонирование матрицы А."""
A = A.copy()
for i in range(len(A)):
for j in range(i+1, len(A)):
A[i, j], A[j, i] = A[j, i], A[i, j]
return A
def determinant(A: np.array) -> np.array:
"""Вычисление определителя симмметризированной матрицы"""
A = A.copy()
A = matrix_multiplication(transp_mat(A), A) #симметризация
U = np.zeros(A.shape)
for i in range(len(A)):
s = A[i,i]
for k in range(i):
s-= U[k, i]**2
U[i, i] = math.sqrt(s)
for j in range(i+1, len(A)):
s = A[i, j]
for k in range(i):
s-= U[k, i]*U[k, j]
U[i, j] = s/U[i, i]
det = 1
for i in range(len(U)):
det *= U[i][i] ** 2
return det
def gaussian_elim(A: np.array, b: np.array) -> np.array:
"""Решение системы методом Гаусса."""
A = A.copy()
b = b.copy()
for i in range(len(A)): # прямой ход
b[i] /= A[i, i]
A[i] /= A[i, i]
for j in range(i+1, len(A)):
b[j] -= b[i]*A[j, i]
A[j] -= A[i]*A[j, i]
x = b
for k in range(len(A)-1, -1, -1): # обратный ход
for m in range(len(A)-1, k, -1):
x[k] -= (A[k, m]*x[m])/A[k, k]
return x
def jacobi_method(A: np.array, b: np.array) -> np.array:
"""Решение системы методом простых итераций(метод Якоби)"""
A = A.copy()
b = b.copy()
E = np.identity(len(A))
#преобразуем к виду x = (E-A)x + b или x = Bx + b
for i in range(len(A)):
b[i] /= A[i, i]
A[i] /= A[i, i]
B = E - A
if norm(B) < 1:
x0 = np.array([1/2 for i in range(len(b))])
x = apply_mat(B, x0) + b
k = math.floor(math.log(0.01/(norm(x-x0))*(1-norm(B)), norm(B)))
print(f"Jacobi iterations: {k+1}")
#x_k+1 = B*x_k + b
for i in range(k+1):
x = apply_mat(B, x) + b
return x
def gauss_seidel_method(A: np.array, b: np.array) -> np.array:
"""Решение системы методом Гаусса-Зейделя."""
A = A.copy()
b = b.copy()
E = np.identity(len(A))
#преобразуем к виду x = (E-A)x + b или x = Bx + b
for i in range(len(A)):
b[i] /= A[i, i]
A[i] /= A[i, i]
B = E - A
if np.sum([abs(B[i,i]) > np.sum(B[i]) - B[i,i] for i in range(len(A))]):
x0 = b
x = np.array(apply_mat(B, x0) + b)
k = math.floor(math.log(0.01/(norm(x-x0))*(1-norm(B)), norm(B)))
print(f"Seidel iterations: {k+1}")
while norm(x - x0) > 0.01:
x0 = x
#x[i]_k+1 = b[i] - sum(a[i,j]*x[j]_(k+1) from j = 1, j!=i to i-1) - sum(a[i,j]*x[j]_k from j = i+1 to n)
for i in range(len(A)):
x[i] = np.sum(B[i, :i]*x[:i]) + np.sum(B[i, i+1:len(A)]*x0[i+1:len(A)]) + b[i]
return x
def square_root_method(A: np.array, b: np.array, isInverse: bool = 0) -> np.array:
"""Решение системы методом квадратного корня."""
A = A.copy()
b = b.copy()
#симметризация
if isInverse == 0:
b = apply_mat(transp_mat(A), b)
A = matrix_multiplication(transp_mat(A), A)
U = np.zeros(A.shape)
for i in range(len(A)):
#U[i,i] = sqrt(a[i,i] - sum(u[k, i]**2 for k from 1 to i-1))
s = A[i,i]
for k in range(i):
s-= U[k, i]**2
U[i, i] = math.sqrt(s)
#U[i, j] = (a[i,j] - sum(u[k,i]*u[k, j] for k from 1 to i-1), j = i+1, n) / u[i,i]
for j in range(i+1, len(A)):
s = A[i, j]
for k in range(i):
s-= U[k, i]*U[k, j]
U[i, j] = s/U[i, i]
y = np.zeros(b.shape)
x = np.zeros(b.shape)
Ut = transp_mat(U)
#решаем систему Ut*y = B
for i in range(len(y)):
s = b[i]
for k in range(i):
s-=Ut[i, k]*y[k]
y[i] = s/Ut[i, i]
#решаем систему U*x = y
for i in range(len(y)-1, -1, -1):
s = y[i]
for k in range(i+1, len(A)):
s-=U[i, k]*x[k]
x[i] = s/U[i,i]
return x
def max_elem(A: np.array):
"""Поиск максимального элемента в матрице А а[i0][j0], где i0 < j0"""
max_num = 0.0
a = 0
b = 0
for i in range(len(A)):
for j in range(i+1, len(A)):
if math.fabs(A[i, j]) >= max_num:
max_num = math.fabs(A[i, j])
a, b = i, j
return max_num, a, b
def jacobi_eigenvalue(A: np.array):
"""Вычисление собственных значений матрицы."""
A = A.copy()
UVectors = np.identity(len(A))
#симметризация
A = matrix_multiplication(transp_mat(A), A)
sumOfElements = 1
while sumOfElements > 0.001:
#макс. элемент
kek = max_elem(A)
#считается угол f, такой, чтобы у матрицы (A(новая) = UT*A*U) a[i][j] обращался в нуль
f = math.atan(2*A[kek[1], kek[2]]/(A[kek[1], kek[1]] - A[kek[2], kek[2]])) / 2
#создается единичная матрица и матрица поворота
U = np.identity(len(A))
U[kek[1], kek[1]], U[kek[2], kek[2]] = math.cos(f), math.cos(f)
U[kek[2], kek[1]] = math.sin(f)
U[kek[1], kek[2]] = -1*math.sin(f)
#собственные векторы U = U[0]*U[1]*...*U[k-2]*U[k-1]
UVectors = matrix_multiplication(UVectors, U)
#A(новая) = UT*A*U
A = transp_mat(U) @ A @ U
#точность
sumOfElements = 0
for i in range(len(A)):
for j in range(len(A)):
if i != j:
sumOfElements += A[i][j] ** 2
return np.array([A[i,i] for i in range(len(A))]), np.array(UVectors)
| [
"satturn@bitbucket.org"
] | satturn@bitbucket.org |
a4ce40b628c5233a0ceb47770c61b8eedccb5638 | af2f52c48681a241cf2d99791ceada25e5a85967 | /hw3/hw3.py | 28192302b4d443ab147279cf14f83aadda98b921 | [] | no_license | dinadaas/Programming-in-Python | 300d064888e563d9b01ee3750ea6142dfaae3ee0 | ebaf782a36b3b6b74cdf5f134c4699a2bf2b4590 | refs/heads/master | 2021-06-29T13:25:03.814141 | 2017-09-19T05:12:31 | 2017-09-19T05:12:31 | 104,029,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,622 | py | from collections import defaultdict
import time
######################### Problem 1 and 2 code below #####################
class Constraint:
"""A constraint of a CSP. Members include
- name: a string for debugging
- domain, a list of variables on which the constraint acts
- predicate, a boolean function with the same arity as the domain.
"""
def __init__(self,name,domain,pred):
self.name = name
self.domain = domain
self.predicate = pred
def isSatisfied(self,vars):
"""Given a dictionary of variables, evaluates the predicate.
If a variable in the domain isn't present, raises a KeyError."""
args = [vars[v] for v in self.domain]
return self.predicate(*args)
class CSP:
"""Defines a constraint satisfaction problem. Contains 4 members:
- variables: a list of variables
- domains: a dictionary mapping variables to domains
- constraints: a list of Constraints.
- incidentConstraints: a dict mapping each variable to a list of
constraints acting on it.
"""
def __init__(self,variables=[],domains=[]):
"""Input: a list of variables and a list of domains.
Note: The variable names must be unique, otherwise undefined behavior
will result.
"""
self.variables = variables[:]
self.domains = dict(zip(variables,domains))
self.constraints = []
self.incidentConstraints = dict((v,[]) for v in variables)
def addVariable(self,var,domain):
"""Adds a new variable with a given domain. var must not already
be present in the CSP."""
if var in self.domains:
raise ValueError("Variable with name "+val+" already exists in CSP")
self.variables.append(var)
self.domains[var] = domain
self.incidentConstraints[var] = []
def addConstraint(self,varlist,pred,name=None):
"""Adds a constraint with the domain varlist, the predicate pred,
and optionally a name for printing."""
if name==None:
name = "c("+",".join(str(v) for v in varlist)+")"
self.constraints.append(Constraint(name,varlist,pred))
for v in varlist:
self.incidentConstraints[v].append(self.constraints[-1])
def addUnaryConstraint(self,var,pred,name=None):
"""Adds a unary constraint with the argument var, the predicate pred,
and optionally a name for printing."""
self.addConstraint((var,),pred,name)
def addBinaryConstraint(self,var1,var2,pred,name=None):
"""Adds a unary constraint with the arguments (var1,var2), the
predicate pred, and optionally a name for printing."""
self.addConstraint((var1,var2),pred,name)
def fixValue(self,var,value,name=None):
"""Adds a constraint that states var = value."""
if name==None:
name = str(var)+'='+str(value)
self.addUnaryConstraint(var,lambda x:x==value,name)
def nAryConstraints(self,n,var=None):
"""Returns a list of all n-ary constraints in the CSP if var==None,
or if var is given, returns a list of all n-ary constraints involving
var."""
if var==None:
return [c for c in self.constraints if len(c.domain)==n]
else:
return [c for c in self.incidentConstraints[var] if len(c.domain)==n]
def incident(self,*vars):
"""incident(var1,...,varn) will return a list of constraints
that involve all of var1 to varn."""
if len(vars)==0: return self.constraints
res = set(self.incidentConstraints[vars[0]])
for v in vars[1:]:
res &= set(self.incidentConstraints[v])
return [c for c in res]
def isConstraintSatisfied(self,c,partialAssignment):
"""Checks if the partial assignment satisfies the constraint c.
If the partial assignment doesn't cover the domain, this returns
None. """
try:
res = c.isSatisfied(partialAssignment)
return res
except KeyError:
return None
def isValid(self,partialAssignment,*vars):
"""Checks if the assigned variables in a partial assignment
are mutually compatible. Only checks those constraints
involving assigned variables, and ignores any constraints involving
unassigned ones.
If no extra arguments are given, checks all constraints relating
assigned variables.
If extra arguments var1,...,vark are given, this only checks
constraints that are incident to those given variables."""
for c in self.incident(*vars):
#all entries in partialAssignment must be in the domain of c
#for this to be checked
if self.isConstraintSatisfied(c,partialAssignment)==False:
return False
return True
def streetCSP():
"""Returns a CSP corresponding to the street puzzle covered in class."""
nationalityVars = ['N1','N2','N3','N4','N5']
colorVars = ['C1','C2','C3','C4','C5']
drinkVars = ['D1','D2','D3','D4','D5']
jobVars = ['J1','J2','J3','J4','J5']
animalVars = ['A1','A2','A3','A4','A5']
nationalities = ['E','S','J','I','N']
colors = ['R','G','W','Y','B']
drinks = ['T','C','M','F','W']
jobs = ['P','S','Di','V','Do']
animals = ['D','S','F','H','Z']
csp = CSP(nationalityVars+colorVars+drinkVars+jobVars+animalVars,
[nationalities]*5+[colors]*5+[drinks]*5+[jobs]*5+[animals]*5)
#TODO: fill me in. Slide 18 is filled in for you. Don't forget to enforce
#that all nationalities, colors, drinks, jobs, and animals are distinct!
for Ni in range(5):
for Nn in range(5):
if Ni != Nn:
csp.addBinaryConstraint(nationalityVars[Ni], nationalityVars[Nn], lambda x,y: x!=y)
for Ci in range(5):
for Cn in range(5):
if Ci != Cn:
csp.addBinaryConstraint(colorVars[Ci], colorVars[Cn], lambda x,y: x!=y)
for Di in range(5):
for Dn in range(5):
if Di != Dn:
csp.addBinaryConstraint(drinkVars[Di], drinkVars[Dn], lambda x,y: x!=y)
for Ji in range(5):
for Jn in range(5):
if Ji != Jn:
csp.addBinaryConstraint(jobVars[Ji], jobVars[Jn], lambda x,y: x!=y)
for Ai in range(5):
for An in range(5):
if Ai != An:
csp.addBinaryConstraint(animalVars[Ai], animalVars[An], lambda x,y: x!=y)
#Englishman lives in the red house
for Ni,Ci in zip(nationalityVars,colorVars):
csp.addBinaryConstraint(Ni,Ci,lambda x,y:(x=='E')==(y=='R'),'Englishman lives in the red house')
#Japanese is a painter
for Ni,Ji in zip(nationalityVars,jobVars):
csp.addBinaryConstraint(Ni,Ji,lambda x,y:(x=='J')==(y=='P'),'Japanese is a painter')
#Spaniard has a dog
for Ni,Ai in zip(nationalityVars,animalVars):
csp.addBinaryConstraint(Ni,Ai,lambda x,y:(x=='S')==(y=='D'),'Spaniard has a dog')
#Italian drinks tea
for Ni,Di in zip(nationalityVars,drinkVars):
csp.addBinaryConstraint(Ni,Di,lambda x,y:(x=='I')==(y=='T'),'Italian drinks tea')
#Sculptor breeds Snails
for Ji,Ai in zip(jobVars,animalVars):
csp.addBinaryConstraint(Ji,Ai,lambda x,y:(x=='S')==(y=='S'),'Sculptor breeds Snails')
#Violinist drinks Fruit juice
for Ji,Di in zip(jobVars,drinkVars):
csp.addBinaryConstraint(Ji,Di,lambda x,y:(x=='V')==(y=='F'),'Violinist drinks Fruit juice')
#Diplomat lives in the Yellow house
for Ji,Ci in zip(jobVars,colorVars):
csp.addBinaryConstraint(Ji,Ci,lambda x,y:(x=='Di')==(y=='Y'),'Diplomat lives in the Yellow house')
#owner of the green house drinks Coffee
for Ci,Di in zip(jobVars,colorVars):
csp.addBinaryConstraint(Ci,Di,lambda x,y:(x=='G')==(y=='C'),'owner of the green house drinks Coffee')
#Norwegian lives in first house
csp.fixValue('N1','N','Norwegian lives in the first house')
#Second house is blue
csp.fixValue('C2','B','Second house is blue')
#owner of the middle house drinks Milk
csp.fixValue('D3','M','owner of the middle house drinks Milk')
#green house is to the right of the white house
for Ci,Cn in zip(colorVars[:-1],colorVars[1:]):
csp.addBinaryConstraint(Ci,Cn,lambda x,y:(x=='W')==(y=='G'),'Green house is to the right of the white house')
csp.addUnaryConstraint('C5',lambda x:x!='W','Green house is to the right of the white house')
csp.addUnaryConstraint('C1',lambda x:x!='G','Green house is to the right of the white house')
#Fox is in the house next to the Doctors
for Ai,Ji,An in zip(animalVars[0:3],jobVars[1:4],animalVars[2:5]):
t=[Ai,Ji,An]
csp.addConstraint(['A1','A2','A3','A4','A5','J1','J2','J3','J4','J5'],lambda A1,A2,A3,A4,A5,J1,J2,J3,J4,J5: ((A1=='F') and (J2=='Do')) or ((A2=='F') and (J1=='Do')) or ((A2=='F') and (J3=='Do')) or ((A3=='F') and (J2=='Do')) or ((A3=='F') and (J4=='Do')) or ((A4=='F') and (J3=='Do')) or ((A4=='F') and (J5=='Do')) or ((A5=='F') and (J4=='Do')), 'Fox next to Doctor')
#Horse is next to the Diplomats
for Ai,Ji,An in zip(animalVars[0:3],jobVars[1:4],animalVars[2:5]):
t=[Ai,Ji,An]
csp.addConstraint(['A1','A2','A3','A4','A5','J1','J2','J3','J4','J5'],lambda A1,A2,A3,A4,A5,J1,J2,J3,J4,J5: ((A1=='H') and (J2=='Di')) or ((A2=='H') and (J1=='Di')) or ((A2=='H') and (J3=='Di')) or ((A3=='H') and (J2=='Di')) or ((A3=='H') and (J4=='Di')) or ((A4=='H') and (J3=='Di')) or ((A4=='H') and (J5=='Di')) or ((A5=='H') and (J4=='Di')), 'Horse next to Diplomat')
print "CSP has",len(csp.constraints),"constraints"
#TODO:
return csp
def p1():
csp = streetCSP()
for x in csp.incident('N1', 'N2', 'N3'):
print x.name
solution = dict([('A1', 'F'), ('A2', 'H'), ('A3', 'S'), ('A4', 'D'), ('A5', 'Z'),
('C1', 'Y'), ('C2', 'B'), ('C3', 'R'), ('C4', 'W'), ('C5', 'G'),
('D1', 'W'), ('D2', 'T'), ('D3', 'M'), ('D4', 'F'), ('D5', 'C'),
('J1', 'Di'), ('J2', 'Do'), ('J3', 'S'), ('J4', 'V'), ('J5', 'P'),
('N1', 'N'), ('N2', 'I'), ('N3', 'E'), ('N4', 'S'), ('N5', 'J')])
invalid1 = dict([('A1', 'F'), ('A2', 'H'), ('A3', 'S'), ('A4', 'D'), ('A5', 'Z'),
('C1', 'Y'), ('C2', 'B'), ('C3', 'R'), ('C4', 'W'), ('C5', 'G'),
('D1', 'T'), ('D2', 'W'), ('D3', 'M'), ('D4', 'F'), ('D5', 'C'),
('J1', 'Di'), ('J2', 'Do'), ('J3', 'S'), ('J4', 'V'), ('J5', 'P'),
('N1', 'N'), ('N2', 'I'), ('N3', 'E'), ('N4', 'S'), ('N5', 'J')])
invalid2 = dict([('A1', 'F'), ('A2', 'F'), ('A3', 'S'), ('A4', 'D'), ('A5', 'Z'),
('C1', 'Y'), ('C2', 'B'), ('C3', 'R'), ('C4', 'W'), ('C5', 'G'),
('D1', 'W'), ('D2', 'T'), ('D3', 'M'), ('D4', 'F'), ('D5', 'C'),
('J1', 'Di'), ('J2', 'Do'), ('J3', 'S'), ('J4', 'V'), ('J5', 'P'),
('N1', 'N'), ('N2', 'I'), ('N3', 'E'), ('N4', 'S'), ('N5', 'J')])
print "Valid assignment valid?",csp.isValid(solution)
print "Invalid assignment valid?",csp.isValid(invalid1)
print "Invalid assignment valid?",csp.isValid(invalid2)
#you may wish to check the solver once you've solved problem 2
#solver = CSPBacktrackingSolver(csp)
#res = solver.solve()
#print "Result:",sorted(res.items())
############################ Problem 2 code below #######################
countCheck = 0
noCheck = 0
class CSPBacktrackingSolver:
""" A CSP solver that uses backtracking.
A state is a partial assignment dictionary {var1:value1,...,vark:valuek}.
Also contains a member oneRings that is a dict mapping each variable to
all variables that share a constraint.
"""
def __init__(self,csp,doForwardChecking=True,doConstraintPropagation=False):
self.csp = csp
self.doForwardChecking = doForwardChecking
self.doConstraintPropagation = doConstraintPropagation
#compute 1-rings
self.oneRings = dict((v,set()) for v in csp.variables)
for c in csp.constraints:
cdomain = set(c.domain)
for v in c.domain:
self.oneRings[v] |= cdomain
for v in csp.variables:
if v in self.oneRings[v]:
self.oneRings[v].remove(v)
def solve(self):
"""Solves the CSP, returning an assignment if solved, or False if
failed."""
domains = self.initialDomains()
return self.search({},domains)
def search(self,partialAssignment,domains):
"""Runs recursive backtracking search."""
if len(partialAssignment)==len(self.csp.variables):
return partialAssignment
if self.doConstraintPropagation:
domains = self.constraintPropagation(partialAssignment,domains)
#contradiction detected
if any(len(d)==0 for (v,d) in domains.iteritems()):
return False
indent = " "*len(partialAssignment)
X = self.pickVariable(partialAssignment,domains)
values = self.orderValues(partialAssignment,domains,X)
for v in values:
partialAssignment[X] = v
if self.doForwardChecking:
global countCheck
countCheck = countCheck + 1
print indent+"Trying",X,"=",v
#do forward checking
newDomains = self.forwardChecking(partialAssignment,X,domains)
if any(len(d)==0 for (v,d) in newDomains.iteritems()):
#contradiction, go on to next value
emptyvars = [v for (v,d) in newDomains.iteritems() if len(d)==0]
print indent+" Forward checking found contradiction on",emptyvars[0]
continue
#recursive call
res = self.search(partialAssignment,newDomains)
if res!=False: return res
else:
#check whether the assignment X=v is valid
if self.csp.isValid(partialAssignment,X):
global noCheck
noCheck = noCheck + 1
print indent+"Trying",X,"=",v
#recursive call
res = self.search(partialAssignment,domains)
if res!=False: return res
#remove the partial assignment to X, backtrack
del partialAssignment[X]
return False
def initialDomains(self):
"""Does the basic step of checking all unary constraints"""
domains = dict()
for v,domain in self.csp.domains.iteritems():
#save only valid constraints
vconstraints = self.csp.nAryConstraints(1,v)
dvalid = [val for val in domain if all(c.predicate(val) for c in vconstraints)]
domains[v] = dvalid
return domains
def pickVariable(self,partialAssignment,domains):
"""Return an unassigned variable to assign next"""
#TODO (Problem 2): implement heuristics
vals = [(v, len(domain)) for v,domain in domains.iteritems() if v not in partialAssignment]
minVal = 'x'
minLength = 100000000
for i in vals:
if i[1] < minLength:
minLength = i[1]
minVal = i[0]
if i[1] == minLength:
if self.csp.incident(i[0]) > self.csp.incident(minVal):
minLength = i[1]
minVal = i[0]
return minVal
def orderValues(self,partialAssignment,domains,var):
"""Return an ordering on the domain domains[var]"""
#TODO (Bonus): implement heuristics. Currently doesn't do anything
return domains[var]
def constraintPropagation(self,partialAssignment,domains):
"""domains is a dict mapping vars to valid values.
Return a copy of domains but with all invalid values removed."""
#TODO (Bonus): implement AC3. Currently doesn't do anything
return domains
def forwardChecking(self,partialAssignment,var,domains):
"""domains is a dict mapping vars to valid values. var has just been
assigned.
Return a copy of domains but with all invalid values removed"""
resdomain = dict()
#do a shallow copy for all unaffected domains, this saves time
for v,domain in domains.iteritems():
resdomain[v] = domain
resdomain[var] = [partialAssignment[var]]
#TODO: comment this line to perform forward checking
#return resdomain
#TODO: perform forward checking on binary constraints
#NOTE: be sure not to modify the resdomains directly, but to create
# new lists
for c in self.csp.incidentConstraints[var]:
#If the domain has size k and exactly k-1 entries are filled, then
#do forward checking. If so, 'unassigned' will contain the name of
#the unassigned variable.
kassigned = 0
unassigned = None
for v in c.domain:
if v in partialAssignment:
kassigned += 1
else:
unassigned = v
if kassigned+1 == len(c.domain):
#print "Forward checking",unassigned
validvalues = []
#TODO (Problem 2): check whether each values in the domain of unassigned
#(resdomain[unassigned]) is compatible under c. May want to use
#self.csp.isConstraintSatisfied(c,assignment). If compatible,
#append the value to validvalues
for x in resdomain[unassigned]:
assignment = dict()
for v,y in partialAssignment.iteritems():
assignment[v] = y
assignment[unassigned] = x
if self.csp.isConstraintSatisfied(c, assignment):
validvalues.append(x)
del assignment
resdomain[unassigned] = validvalues
if len(validvalues)==0:
#print "Domain of",unassigned,"emptied due to",c.name
#early terminate, this setting is a contradiction
return resdomain
return resdomain
def nQueensCSP(n):
"""Returns a CSP for an n-queens problem"""
vars = ['Q'+str(i) for i in range(1,n+1)]
domain = range(1,n+1)
csp = CSP(vars,[domain]*len(vars))
for i in range(1,n+1):
for j in range(1,i):
Qi = 'Q'+str(i)
Qj = 'Q'+str(j)
ofs = i-j
#this weird default argument thing is needed for lambda closure
csp.addBinaryConstraint(Qi,Qj,(lambda x,y: x!=y),Qi+"!="+Qj)
csp.addBinaryConstraint(Qi,Qj,(lambda x,y,ofs=ofs: x!=(y+ofs)),Qi+"!="+Qj+"+"+str(i-j))
csp.addBinaryConstraint(Qi,Qj,(lambda x,y,ofs=ofs: x!=(y-ofs)),Qi+"!="+Qj+"-"+str(i-j))
return csp
def p2():
csp = nQueensCSP(4)
solver = CSPBacktrackingSolver(csp,doForwardChecking=True)
res = solver.solve()
print "Result:",sorted(res.items())
#TODO: implement forward checking, change False to True
csp = nQueensCSP(4)
solver = CSPBacktrackingSolver(csp,doForwardChecking=True)
res = solver.solve()
print "Result:",sorted(res.items())
raw_input()
csp = nQueensCSP(8)
solver = CSPBacktrackingSolver(csp,doForwardChecking=True)
res = solver.solve()
print "Result:",sorted(res.items())
raw_input()
csp = nQueensCSP(12)
solver = CSPBacktrackingSolver(csp,doForwardChecking=True)
res = solver.solve()
print "Result:",sorted(res.items())
raw_input()
csp = nQueensCSP(16)
solver = CSPBacktrackingSolver(csp,doForwardChecking=True)
res = solver.solve()
print "Result:",sorted(res.items())
#TODO: see how high you can crank n!
############################ Problem 4 code below #######################
def marginalize(probabilities,index):
"""Given a probability distribution P(X1,...,Xi,...,Xn),
return the distribution P(X1,...,Xi-1,Xi+1,...,Xn).
- probabilities: a probability table, given as a map from tuples
of variable assignments to values
- index: the value of i.
"""
#TODO (Problem 3): you may hard-code two routines for n=2 and n=3, but there's an
#elegant solution that uses defaultdict(float)
newProb = dict()
for i in probabilities.keys():
key = i
break
length = len(key)
if length == 2:
sumTrue = 0
sumFalse = 0
if index == 0:
for x in probabilities:
if x[1] == 1:
sumTrue += probabilities[x]
if x[1] == 0:
sumFalse += probabilities[x]
if index == 1:
for x in probabilities:
if x[0] == 1:
sumTrue += probabilities[x]
if x[0] == 0:
sumFalse += probabilities[x]
newProb[(0,)] = sumFalse
newProb[(1,)] = sumTrue
return newProb
else:
sumTrueFalse = 0
sumFalseTrue = 0
sumTrueTrue = 0
sumFalseFalse = 0
if index == 0:
for x in probabilities:
if x[1] == 1 and x[2] == 1:
sumTrueTrue += probabilities[x]
if x[1] == 0 and x[2] == 0:
sumFalseFalse += probabilities[x]
if x[1] == 1 and x[2] == 0:
sumTrueFalse += probabilities[x]
if x[1] == 0 and x[2] == 1:
sumFalseTrue += probabilities[x]
if index == 1:
for x in probabilities:
if x[0] == 1 and x[2] == 1:
sumTrueTrue += probabilities[x]
if x[0] == 0 and x[2] == 0:
sumFalseFalse += probabilities[x]
if x[0] == 1 and x[2] == 0:
sumTrueFalse += probabilities[x]
if x[0] == 0 and x[2] == 1:
sumFalseTrue += probabilities[x]
if index == 2:
for x in probabilities:
if x[0] == 1 and x[1] == 1:
sumTrueTrue += probabilities[x]
if x[0] == 0 and x[1] == 0:
sumFalseFalse += probabilities[x]
if x[0] == 1 and x[1] == 0:
sumTrueFalse += probabilities[x]
if x[0] == 0 and x[1] == 1:
sumFalseTrue += probabilities[x]
newProb[(0,0,)] = sumFalseFalse
newProb[(1,0,)] = sumTrueFalse
newProb[(0,1,)] = sumFalseTrue
newProb[(1,1,)] = sumTrueTrue
return newProb
def marginalize_multiple(probabilities,indices):
"""Safely marginalizes multiple indices"""
pmarg = probabilities
for index in reversed(sorted(indices)):
pmarg = marginalize(pmarg,index)
return pmarg
def condition1(probabilities,index,value):
"""Given a probability distribution P(X1,...,Xi,...,Xn),
return the distribution P(X1,...,Xi-1,Xi+1,...,Xn | Xi=v).
- probabilities: a probability table, given as a map from tuples
of variable assignments to values
- index: the value of i.
- value: the value of v
"""
#TODO (Problem 3)
#Compute the denominator by marginalizing over everything but Xi
newProb = dict()
for i in probabilities.keys():
key = i
break
length = len(key)
denom = 0
if length == 2:
denom = marginalize(probabilities, 1-index)[(index,)]
sumTrue = 0
sumFalse = 0
if index == 0:
for x in probabilities:
if x[1] == 1 and x[0] == value:
sumTrue += probabilities[x]
if x[1] == 0 and x[0] == value:
sumFalse += probabilities[x]
if index == 1:
for x in probabilities:
if x[0] == 1 and x[1] == value:
sumTrue += probabilities[x]
if x[0] == 0 and x[1] == value:
sumFalse += probabilities[x]
newProb[(0,)] = sumFalse /denom
newProb[(1,)] = sumTrue /denom
return newProb
else:
sumTrueFalse = 0
sumFalseTrue = 0
sumTrueTrue = 0
sumFalseFalse = 0
if index == 0:
denom = marginalize_multiple(probabilities,[1,2])[(2,)]
for x in probabilities:
if x[1] == 1 and x[2] == 1 and x[0] == value:
sumTrueTrue += probabilities[x]
if x[1] == 0 and x[2] == 0 and x[0] == value:
sumFalseFalse += probabilities[x]
if x[1] == 1 and x[2] == 0 and x[0] == value:
sumTrueFalse += probabilities[x]
if x[1] == 0 and x[2] == 1 and x[0] == value:
sumFalseTrue += probabilities[x]
if index == 1:
denom = marginalize_multiple(probabilities,[0,2])[(2,)]
for x in probabilities:
if x[0] == 1 and x[2] == 1 and x[1] == value:
sumTrueTrue += probabilities[x]
if x[0] == 0 and x[2] == 0 and x[1] == value:
sumFalseFalse += probabilities[x]
if x[0] == 1 and x[2] == 0 and x[1] == value:
sumTrueFalse += probabilities[x]
if x[0] == 0 and x[2] == 1 and x[1] == value:
sumFalseTrue += probabilities[x]
if index == 2:
denom = marginalize_multiple(probabilities,[0,1])[(1,)]
for x in probabilities:
if x[0] == 1 and x[1] == 1 and x[2] == value:
sumTrueTrue += probabilities[x]
if x[0] == 0 and x[1] == 0 and x[2] == value:
sumFalseFalse += probabilities[x]
if x[0] == 1 and x[1] == 0 and x[2] == value:
sumTrueFalse += probabilities[x]
if x[0] == 0 and x[1] == 1 and x[2] == value:
sumFalseTrue += probabilities[x]
newProb[(0,0,)] = sumFalseFalse/denom
newProb[(1,0,)] = sumTrueFalse/denom
newProb[(0,1,)] = sumFalseTrue/denom
newProb[(1,1,)] = sumTrueTrue/denom
return newProb
# denom = 0
# if index = 0:
# for x in probabilities:
# if x[0]=1 and x[1]=1 and x[2] =1:
# denom+= probabilities[x]
# if x[0]=1 and x[1]=1 and x[2] =0:
# denom+= probabilities[x]
# if x[0]=1 and x[1]=0 and x[2] =1:
# denom+= probabilities[x]
# if x[0]=1 and x[1]=0 and x[2] =0:
# denom+= probabilities[x]
def normalize(probabilities):
"""Given an unnormalized distribution, returns a normalized copy that
sums to 1."""
vtotal = sum(probabilities.values())
return dict((k,v/vtotal) for k,v in probabilities.iteritems())
def condition2(probabilities,index,value):
"""Given a probability distribution P(X1,...,Xi,...,Xn),
return the distribution P(X1,...,Xi-1,Xi+1,...,Xn | Xi=v).
- probabilities: a probability table, given as a map from tuples
of variable assignments to values
- index: the value of i.
- value: the value of v
"""
#TODO (Problem 3)
#Compute the result by normalizing
newProb = dict()
for i in probabilities.keys():
key = i
break
length = len(key)
denom = 0
if length == 2:
sumTrue = 0
sumFalse = 0
if index == 0:
for x in probabilities:
if x[1] == 1 and x[0] == value:
sumTrue += probabilities[x]
if x[1] == 0 and x[0] == value:
sumFalse += probabilities[x]
if index == 1:
for x in probabilities:
if x[0] == 1 and x[1] == value:
sumTrue += probabilities[x]
if x[0] == 0 and x[1] == value:
sumFalse += probabilities[x]
newProb[(0,)] = sumFalse
newProb[(1,)] = sumTrue
return normalize(newProb)
else:
sumTrueFalse = 0
sumFalseTrue = 0
sumTrueTrue = 0
sumFalseFalse = 0
if index == 0:
for x in probabilities:
if x[1] == 1 and x[2] == 1 and x[0] == value:
sumTrueTrue += probabilities[x]
if x[1] == 0 and x[2] == 0 and x[0] == value:
sumFalseFalse += probabilities[x]
if x[1] == 1 and x[2] == 0 and x[0] == value:
sumTrueFalse += probabilities[x]
if x[1] == 0 and x[2] == 1 and x[0] == value:
sumFalseTrue += probabilities[x]
if index == 1:
for x in probabilities:
if x[0] == 1 and x[2] == 1 and x[1] == value:
sumTrueTrue += probabilities[x]
if x[0] == 0 and x[2] == 0 and x[1] == value:
sumFalseFalse += probabilities[x]
if x[0] == 1 and x[2] == 0 and x[1] == value:
sumTrueFalse += probabilities[x]
if x[0] == 0 and x[2] == 1 and x[1] == value:
sumFalseTrue += probabilities[x]
if index == 2:
for x in probabilities:
if x[0] == 1 and x[1] == 1 and x[2] == value:
sumTrueTrue += probabilities[x]
if x[0] == 0 and x[1] == 0 and x[2] == value:
sumFalseFalse += probabilities[x]
if x[0] == 1 and x[1] == 0 and x[2] == value:
sumTrueFalse += probabilities[x]
if x[0] == 0 and x[1] == 1 and x[2] == value:
sumFalseTrue += probabilities[x]
newProb[(0,0,)] = sumFalseFalse
newProb[(1,0,)] = sumTrueFalse
newProb[(0,1,)] = sumFalseTrue
newProb[(1,1,)] = sumTrueTrue
return normalize(newProb)
def p4():
pAB = {(0,0):0.5,
(0,1):0.3,
(1,0):0.1,
(1,1):0.1}
pA = marginalize(pAB,1)
print (pA[(0,)],pA[(1,)]),"should be",(0.8,0.2)
pABC = {(0,0,0):0.2,
(0,0,1):0.3,
(0,1,0):0.06,
(0,1,1):0.24,
(1,0,0):0.02,
(1,0,1):0.08,
(1,1,0):0.06,
(1,1,1):0.04}
print "marginalized p(A,B): ",dict(marginalize(pABC,2))
pA = marginalize(marginalize(pABC,2),1)
print (pA[(0,)],pA[(1,)]),"should be",(0.8,0.2)
pA_B = condition1(pAB,1,1)
print (pA_B[(0,)],pA_B[(1,)]),"should be",(0.75,0.25)
pA_B = condition2(pAB,1,1)
print (pA_B[(0,)],pA_B[(1,)]),"should be",(0.75,0.25)
pAB_C = condition1(pABC,2,1)
print "p(A,B|C): ",dict(pAB_C)
pAB_C = condition2(pABC,2,1)
print "p(A,B|C): ",dict(pAB_C)
pA_BC = condition1(condition1(pABC,2,1),1,1)
print "p(A|B,C): ",dict(pA_BC)
pA_BC = condition2(condition2(pABC,2,1),1,1)
print "p(A|BC): ",dict(pA_BC)
if __name__=='__main__':
print "###### Problem 1 ######"
p1()
raw_input()
print
print "###### Problem 2 ######"
p2()
raw_input()
print
print "###### Problem 4 ######"
p4()
| [
"dina.daas@duke.edu"
] | dina.daas@duke.edu |
ea4800a0af01ed28d90e42a14e1db14972484790 | 88e8cad27f94c0463a4ebb0bdd93258964538b7f | /test/venv/distinct.py | 3751850eb69be1251d64a3e4beec3e1718746c5f | [] | no_license | ljx4471817/python_maoyan_xiebushengzheng | 48b243112ede861d508c9a67532a582459a60df1 | 429c8518934f2b262795849ecc23903171027a95 | refs/heads/master | 2020-03-24T05:01:16.149301 | 2018-08-08T11:14:24 | 2018-08-08T11:14:24 | 142,472,281 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # 获取的评论可能有重复,为了最终统计的真实性,需做去重处理
def main(old,new):
oldfile = open(old,'r',encoding='UTF-8')
newfile = open(new,'w',encoding='UTF-8')
content_list = oldfile.readlines() #读取的数据集
content_alreadly_ditinct = [] #存储不重复的评论数据
for line in content_list:
if line not in content_alreadly_ditinct: #评论不重复
newfile.write(line+'\n')
content_alreadly_ditinct.append(line)
if __name__ =='__main__':
main(r'D:\邪不压正影评\xie_zheng23.txt', r'D:\邪不压正影评\xie_zheng_new23.txt') | [
"313135277@qq.com"
] | 313135277@qq.com |
79b12beb277459c9737c3397ebad566584d4c414 | 1244b0c0a0c72b9682f74b0b067934044b83e814 | /main.py | 2b0609b8d3a0d15949dbbec82c3565b96cac89f7 | [] | no_license | arivvid27/Ultimate-Dictionary | af2dab25ba1065d871f3136d9cf7290258bf10f1 | 763ad382b59dfe939c423f305e6d2209cd0214bd | refs/heads/master | 2023-02-20T18:19:11.542741 | 2021-01-27T03:01:04 | 2021-01-27T03:01:04 | 333,279,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | from replit import db
from PyDictionary import PyDictionary
from time import sleep
PyDict = PyDictionary()
while True:
print('#' * 5,'USER MANUAL', '#' * 5 )
user = input("""
INSERT = [insert]
KNOW MEANING = [know]
SHOW ALL CREATED WORDS = [show]
DELETE = [delete]
QUIT = [quit]
>>> """)
if user == 'insert':
user_input_name = input('What is the name? > ')
user_input_define = input('What is the definition > ')
print('Ok')
print('INSERTING...')
sleep(3)
try:
db[user_input_name] = user_input_define
print('Success!')
except Exception:
print('Oops. There seems to be an error. Please run this on repl.it, or comment below so arivvid27 can fix it.')
elif user == 'know':
user_know = input('What word do you want to know the meaning for? > ')
if user_know in db:
try:
value = db[user_know]
print(value)
except:
print('Oops. There seems to be an error. Please comment below so arivvid27 can fix it.')
elif user_know not in db:
print(PyDict.meaning(user_know))
elif user_know not in db or PyDict:
print('Oops. The word you are looking for does not seem to exist. Please check your spelling, or insert a word of your own.')
elif user == 'show':
keys = db.keys()
for keys in keys:
print(keys)
sleep(5)
elif user == 'delete':
user_delete = input('What word do you want to delete? > ')
del db[user_delete]
print('Deleted')
elif user == 'quit':
print('ok!')
exit()
else:
print('I don\'t know that') | [
"arivvid27@outlook.com"
] | arivvid27@outlook.com |
f65bffa85b6ace7dff888a6ae57104777026d021 | 48bcb32b77f1773642830e3029c8d455e0b6e5c3 | /clustering.py | c3cee4293823cd237736acf64d1e513a4fcbe956 | [] | no_license | tyleratk/churn-case-study | 7dcc43c961c0f5d0c6bb8ffd4fa0234001e445b8 | 399191dbe1961097732a80d5683acf8bda615544 | refs/heads/master | 2021-05-04T01:45:52.925016 | 2018-02-07T21:09:04 | 2018-02-07T21:09:04 | 120,362,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,145 | py | from sklearn.cluster import KMeans
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
from time import time
from sklearn import metrics
from sklearn.decomposition import PCA
def get_x(file_name):
npz = np.load(file_name)
X_filled_knn = npz['X_filled_knn']
return X_filled_knn
def rss(data, labels):
rss = []
for label in np.unique(labels):
filter_data = data[labels == label]
rss.append(((filter_data - filter_data.mean(axis = 0))**2).sum())
return sum(rss)
def make_elbow_plot(data, ks, plotname=None):
rsss = []
for k in ks:
print('Running with k', k)
km = KMeans(n_clusters=k)
km.fit(data)
rsss.append(rss(data, km.labels_))
fig, ax = plt.subplots()
ax.plot(ks, rsss)
ax.set_xlabel('k')
ax.set_ylabel('RSS')
ax.set_title('Elbow Plot')
if plotname:
plt.savefig(plotname)
else:
plt.show()
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
if __name__ == '__main__':
df = pd.read_csv('train.csv')
df['last_trip_date'] = pd.to_datetime(df['last_trip_date'])
df['active'] = (df['last_trip_date'].dt.month >= 6).astype(int)
cols = df.columns
n_samples, n_features = df.shape
n_clusters = 3
sample_size = 300
y = df.pop('active').values
labels = y
X = get_x('X_filled_knn.npz')
# print('Loaded data\n')
# kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
# make_elbow_plot(X, range(1, 15))
# bench_k_means(KMeans(init='k-means++', n_clusters=n_clusters, n_init=10),
# name="k-means++", data=X)
#
# bench_k_means(KMeans(init='random', n_clusters=n_clusters, n_init=10),
# name="random", data=X)
# pca = PCA(n_components=n_clusters).fit(X)
# bench_k_means(KMeans(init=pca.components_, n_clusters=n_clusters, n_init=1),
# name="PCA-based",
# data=X)
reduced_data = PCA(n_components=2).fit_transform(X)
kmeans = KMeans(init='k-means++', n_clusters=n_clusters, n_init=50)
kmeans.fit(reduced_data)
h = .02
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering (PCA-reduced data)\n')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| [
"tyler.atk@hotmail.com"
] | tyler.atk@hotmail.com |
93bec70c16ee77fe017a5bf0e0438fc1eaa2d99d | 2030aff0dfca64c8982f064335b120e20e866d41 | /programming/photo/admin.py | e8a4bfac72c087b04959d4837bf3452e4d9e7175 | [] | no_license | rheehyerin/hyerin_glazers | dd10b914e8282ab73a8ed99a8940a7d65999825f | 2267ecbd9a1d526d017634068fac90950bccb5f4 | refs/heads/master | 2021-01-17T12:52:53.010542 | 2016-08-05T11:29:50 | 2016-08-05T11:29:50 | 59,261,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | from django.contrib import admin
from photo.models import Photo
# Register your models here.
admin.site.register(Photo) | [
"hyerin rhee"
] | hyerin rhee |
fb803c13d2775e370e83cc94af6382e554a403cb | 4e9325fa3c1fbaad93253f96de7f9cdffd54032b | /.ci-scripts/run-linter.py | b461e9e1c6f3706d72bc56a34e3186a499a663c7 | [] | no_license | zhangshijie1998/MIPS-CPU | 4cbfa9a403973b7c83ee0fe1a47ae1631e2d1cf1 | 1ede83912a04e8f5e21a058dccd783636ceba854 | refs/heads/main | 2023-01-12T15:45:47.359644 | 2020-11-13T01:27:54 | 2020-11-13T01:27:54 | 312,323,932 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,032 | py | #!/usr/bin/env python3
from lxml import etree
import chardet
import sys
import os
from pathlib import Path
from typing import Tuple, List, Set
import shutil
import subprocess
import traceback
def is_header(f: Path):
with f.open('rb') as fd:
return -1 == fd.read().find(b'endmodule')
def transcoding(src: Path, target: Path):
with src.open("rb") as fd:
rawdata = fd.read()
res = chardet.detect(rawdata)
encoding = res['encoding'] if res['confidence'] > 0.5 else "GBK"
try:
s = rawdata.decode(encoding)
except:
print("Transcoding", src, ':')
traceback.print_exc()
s = rawdata.decode(encoding, 'replace')
with target.open("wb") as wfd:
wfd.write(s.encode('utf-8'))
def parse_project(xpr: Path) -> Tuple[str, Set[str], Set[str]]:
prjname = os.path.splitext(xpr.name)[0]
prjdir = xpr.parent
srcdir = prjdir / (prjname + ".srcs")
topname = ''
target = prjdir / '.lint'
tree = etree.parse(str(xpr))
srclist = set()
inclist = set()
for fileset in tree.xpath("/Project/FileSets/FileSet"):
if fileset.attrib['Type'] != 'DesignSrcs' and \
fileset.attrib['Type'] != 'BlockSrcs':
continue
for child in fileset:
if child.tag == 'File':
tmp = child.attrib['Path']
tmp = tmp.replace('$PSRCDIR', str(srcdir))
tmp = tmp.replace('$PPRDIR', str(prjdir))
tmp = tmp.replace(".xci", '_stub.v')
vlog = Path(tmp)
if not vlog.is_file():
print("Source file", vlog, "does not exist")
continue
vlog_target = target / vlog.relative_to(prjdir)
vlog_target.parent.mkdir(exist_ok=True, parents=True)
# shutil.copy(vlog, vlog_target)
transcoding(vlog, vlog_target)
inclist.add(str(vlog_target.parent))
if not is_header(vlog_target):
srclist.add(str(vlog_target))
elif child.tag == 'Config' and fileset.attrib['Type'] == 'DesignSrcs':
topname = child.xpath("./Option[@Name='TopModule']")[0].attrib['Val']
return (topname, srclist, inclist)
def run_linter(prjdir: Path, topname: str, srclist: Set[str], inclist: Set[str]):
linter_log = prjdir / "linter.log"
args = ["verilator","--lint-only","-Wall","-Wno-DECLFILENAME","-Wno-PINCONNECTEMPTY","-Wno-UNUSED"]
args += ['--top-module', topname]
incargs = [ '-I' + i for i in inclist]
args += incargs
args += srclist
# print(args)
res = subprocess.run(args)
if res.returncode != 0:
print("Return code of verilator is", res.returncode)
if __name__ == "__main__":
try:
xpr = Path(sys.argv[1])
topname, srclist, inclist = parse_project(xpr)
run_linter(xpr.parent, topname, srclist, inclist)
except:
traceback.print_exc()
| [
"69901447+zhangshijie1998@users.noreply.github.com"
] | 69901447+zhangshijie1998@users.noreply.github.com |
ab421663cc8094e7c6a552e148f312ceaa20c3f7 | d9cf44ed3e734ce27d7d6d8ca0d95654a27d76d6 | /src/escaping/GenericReadTest.py | 432541c2346546d3c0105f5d187010adb1a33672 | [] | no_license | skill-lang/pythonTest | 87d273fc018302fc18e207b4744a559d98ace2f0 | 2891d6bee891d9885701c9ce1afbb62767b8b455 | refs/heads/master | 2020-07-02T07:47:47.377793 | 2019-08-09T12:20:55 | 2019-08-09T12:20:55 | 201,461,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,259 | py |
import unittest
from tempfile import TemporaryFile
from python.src.escaping.api import *
from python.src.common.CommonTest import CommonTest
class GenericReadTest(unittest.TestCase, CommonTest):
"""
Tests the file reading capabilities.
"""
def read(self, s):
return SkillFile.open("../../../../" + s, Mode.Read, Mode.ReadOnly)
def test_writeGeneric(self):
path = self.tmpFile("write.generic")
sf = SkillFile.open(path.name)
self.reflectiveInit(sf)
def test_writeGenericChecked(self):
path = self.tmpFile("write.generic.checked")
# create a name -> type map
types = dict()
sf = SkillFile.open(path.name)
self.reflectiveInit(sf)
for t in sf.allTypes():
types[t.name()] = t
# read file and check skill IDs
sf2 = SkillFile.open(path.name, Mode.Read)
for t in sf2.allTypes():
os = types.get(t.name()).__iter__()
for o in t:
self.assertTrue("to few instances in read stat", os.hasNext())
self.assertEquals(o.getSkillID(), os.next().getSkillID())
def test_escaping_read_accept_age_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/age.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_age16_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/age16.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_ageUnrestricted_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/ageUnrestricted.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_aircraft_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/aircraft.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_annotationNull_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/annotationNull.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_annotationString_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/annotationString.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_annotationTest_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/annotationTest.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_coloredNodes_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/coloredNodes.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_container_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/container.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_crossNodes_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/crossNodes.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_date_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/date.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_emptyBlocks_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/emptyBlocks.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_emptyFile_sf(self):
sf = self.read("src/test/resources/genbinary/[[all]]/accept/emptyFile.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_fourColoredNodes_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/fourColoredNodes.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_localBasePoolOffset_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/localBasePoolOffset.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_noFieldRegressionTest_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/noFieldRegressionTest.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_nodeFirstBlockOnly_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/nodeFirstBlockOnly.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_partial_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/partial.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_restrictionsAll_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/restrictionsAll.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_trivialType_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/trivialType.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_twoNodeBlocks_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/twoNodeBlocks.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_twoTypes_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/twoTypes.sf")
self.assertIsNotNone(sf)
def test_escaping_read_accept_unicode_reference_sf(self):
sf = self.read("src/test/resources/genbinary/[[empty]]/accept/unicode-reference.sf")
self.assertIsNotNone(sf)
| [
"feldentm@informatik.uni-stuttgart.de"
] | feldentm@informatik.uni-stuttgart.de |
1c171829107154b30dfead06e0f1004688090e91 | 5919d50c19a24334987f3dc8fa7e2430c51a1917 | /Hash Function/proof_of_work.py | 5da2326fda61d0affbed0d9c588c752d18ec50e5 | [] | no_license | chrisyy2003/CryptoTools | a4397d02473be63b04a7b686fce713cebb4c0005 | 101c4e3235acc67112504710179675d4eef7e3ee | refs/heads/master | 2022-12-02T17:24:00.420743 | 2020-08-14T09:13:56 | 2020-08-14T09:13:56 | 256,257,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | from pwn import *
from Crypto.Util.number import *
import re
import string
import hashlib
import random
ADDRESS = '127.0.0.1'
PORT = 10000
sh = remote(ADDRESS, 10000)
def proof_of_work():
'''
it will get suffix and digest from the model below
sha256(XXXX+TsygnWMdo40yVu6J) == 8d06431e754bdd45f3a3e0375c6312d2ca52a33b79eb30f165cfda0a4b0e1a82
it will works well if needed a little modified,
'''
rec = sh.recvline().decode()
suffix = re.findall(r'\(XXXX\+(.*?)\)', rec)[0]
digest = re.findall(r'== (.*?)\n', rec)[0]
print(suffix, digest)
def f(x):
return hashlib.sha256((x + suffix).encode()).hexdigest() == digest
prefix = util.iters.mbruteforce(
f, string.ascii_letters + string.digits, 4, 'fixed')
return prefix
prefix = proof_of_work()
sh.sendlineafter('Give me XXXX:\n', prefix)
sh.interactive()
| [
"1017975501@qq.com"
] | 1017975501@qq.com |
e2ff3e605f5d643adb4a22ce53b2aa918cf781f4 | e5c9fc4dc73536e75cf4ab119bbc642c28d44591 | /src/leetcodepython/array/day_week_1185.py | d66cac9a636edc8be0acebb4fb26b98b46b0000b | [
"MIT"
] | permissive | zhangyu345293721/leetcode | 0a22034ac313e3c09e8defd2d351257ec9f285d0 | 50f35eef6a0ad63173efed10df3c835b1dceaa3f | refs/heads/master | 2023-09-01T06:03:18.231266 | 2023-08-31T15:23:03 | 2023-08-31T15:23:03 | 163,050,773 | 101 | 29 | null | 2020-12-09T06:26:35 | 2018-12-25T05:58:16 | Java | UTF-8 | Python | false | false | 1,803 | py | # -*- coding:utf-8 -*-
'''
/**
* This is the solution of No. 1185 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/day-of-the-week
*
* The description of problem is as follow:
* ==========================================================================================================
* 给你一个日期,请你设计一个算法来判断它是对应一周中的哪一天。
*
* 输入为三个整数:day、month 和 year,分别表示日、月、年。
*
* 您返回的结果必须是这几个值中的一个 {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}。
* 示例 1:
*
* 输入:day = 31, month = 8, year = 2019
* 输出:"Saturday"
* 示例 2:
*
* 输入:day = 18, month = 7, year = 1999
* 输出:"Sunday"
* 示例 3:
*
* 输入:day = 15, month = 8, year = 1993
* 输出:"Sunday"
*
* 来源:力扣(LeetCode)
* ==========================================================================================================
*
* @author zhangyu (zhangyuyu417@gmail.com)
*/
'''
import datetime
class Solution:
def day_of_the_week(self, day: int, month: int, year: int) -> str:
'''
判断一天是星期几?
Args:
day:天
month:月
year:年
Returns:
年字符串
'''
lis = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
dic = dict(enumerate(lis))
w = datetime.date(year, month, day)
return dic[w.weekday()]
if __name__ == '__main__':
y, m, d = 2020, 2, 8
solution = Solution()
day_of_week = solution.day_of_the_week(d, m, y)
print(day_of_week)
assert day_of_week=='Saturday'
| [
"zhangyu_xtb@geekplus.cc"
] | zhangyu_xtb@geekplus.cc |
89fa8c90ed25654494a457945231b21e298c8c69 | 39179ab5fdb0478b18ab29e06e767573a2a651a4 | /main.py | bfadfbc486c5df4a45aff4f4a67018dafbc22255 | [] | no_license | MichalTesnar/Fractals | 560dddf16fa359a21c41cb526da8f8e2a4ffaf75 | e19421e980f0992bfc380f90564884ae4f837174 | refs/heads/master | 2021-07-03T11:36:14.825930 | 2021-01-24T08:24:02 | 2021-01-24T08:24:02 | 218,233,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | # Importování konstant z constants.py
from constants import *
# Hlavní funkce pro spuštění dalších skriptů pro tvorbu fraktálů
def main():
# Iniciace prostředí knihovny Pygame
pygame.init()
DISPLAY.fill(WHITE)
# Výběr skriptu, který chceme pustit a jeho spuštění
print("Který fraktál byste rád?" "\n"
"1: Čtverec;" "\n"
"2: Strom;" "\n"
"3: Trojúhelník;" "\n"
"4: Vločka;" "\n"
"5: Kantorovo diskontiuum;")
choice = int(input())
print(choice)
if choice == 1:
import square
if choice == 2:
import tree
if choice == 3:
import triangle
if choice == 4:
import snowflake
if choice == 5:
import CantorLine
# Životní cyklus knihovny Pygame s vytvořením screenshotu právě zobrazeného snímku
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.image.save(DISPLAY, "screenshot.jpg")
pygame.quit()
sys.exit()
pygame.display.update()
# Spuštění vlastního souboru
if __name__ == "__main__":
main()
| [
"michal.tesnar007@gmail.com"
] | michal.tesnar007@gmail.com |
d8e0ba47cc6f27c5c00b272d32fbb87d58db0a46 | d41027f30776a7658d6c2ed5a82283e3fc05b153 | /Sensehat_humidity.py | 92b3899a9a323d1fb37dde717bb042c370fa018d | [] | no_license | SamipThulung/IoT-RaspberryPI- | 03a2e8ebef3c2e57265ea325ee9d65d3ce66284d | badfe5fefa15c359a8aff9a7015fd44d5fb3c319 | refs/heads/master | 2020-05-23T22:37:48.239063 | 2019-05-16T07:52:43 | 2019-05-16T07:52:43 | 186,978,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | #Islington College
from sense_hat import SenseHat #Calling Header file for SenseHat
sense = SenseHat()
green = (0, 255, 0) #Set Color to Green
white = (255, 255, 255) #set color for white
while True:
humidity = sense.humidity #call humidity from sensehat
humidity_value = 64 * humidity / 100 #Formula calculating humidity
print(humidity) #Print humidity in console
pixels = [green if i < humidity_value else white for i in range(64)]
sense.set_pixels(pixels) # Print humidity in sensehat led | [
"samipthulung3@gmail.com"
] | samipthulung3@gmail.com |
ecccc8d7b9dfbdf9a933c5d88bf273a484f80c5e | b44caf63cf9f902bd86419a26a3039104093c636 | /Code/PokemonMovesScript.py | 66b1e862864931b6607032ca9aeea32ba917c168 | [] | no_license | hamishtodd1/typeOptimalPokemonRun | 2d6d34fb731415985d1c42667f30b09df3221ed7 | 5bb5f819cf94f9c1477c341d6c610bd67955fbe9 | refs/heads/master | 2021-07-12T18:37:23.165890 | 2021-06-20T17:37:59 | 2021-06-20T17:37:59 | 128,482,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,419 | py | #todo: pokemon test script, which you can run things through
import sys
ptypelist = ["Normal", "Fire", "Water", "Electric", "Grass", "Ice", "Fighting", "Poison", "Ground", "Flying", "Psychic", "Bug", "Rock", "Ghost", "Dragon"]
#a for attacker, d for defender. 1, 2, 0.5, or 0
def effective( a, d ):
h = 0.5
t = 10
e = 11
w = 12
i = 13
if a == ptypelist[0]: #normal
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return 1
if d == ptypelist[2]: return 1
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return 1
if d == ptypelist[5]: return 1
if d == ptypelist[6]: return 1
if d == ptypelist[7]: return 1
if d == ptypelist[8]: return 1
if d == ptypelist[9]: return 1
if d == ptypelist[t]: return 1
if d == ptypelist[e]: return 1
if d == ptypelist[w]: return h
if d == ptypelist[i]: return 0
if d == ptypelist[14]: return 1
if a == ptypelist[1]: #fire
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return h
if d == ptypelist[2]: return h
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return 2
if d == ptypelist[5]: return 2
if d == ptypelist[6]: return 1
if d == ptypelist[7]: return 1
if d == ptypelist[8]: return 1
if d == ptypelist[9]: return 1
if d == ptypelist[t]: return 1
if d == ptypelist[e]: return 2
if d == ptypelist[w]: return h
if d == ptypelist[i]: return 1
if d == ptypelist[14]: return h
if a == ptypelist[2]: #water
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return 2
if d == ptypelist[2]: return h
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return h
if d == ptypelist[5]: return 1
if d == ptypelist[6]: return 1
if d == ptypelist[7]: return 1
if d == ptypelist[8]: return 2
if d == ptypelist[9]: return 1
if d == ptypelist[t]: return 1
if d == ptypelist[e]: return 1
if d == ptypelist[w]: return 2
if d == ptypelist[i]: return 1
if d == ptypelist[14]: return h
if a == ptypelist[3]: #electric
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return 1
if d == ptypelist[2]: return 2
if d == ptypelist[3]: return h
if d == ptypelist[4]: return h
if d == ptypelist[5]: return 1
if d == ptypelist[6]: return 1
if d == ptypelist[7]: return 1
if d == ptypelist[8]: return 0
if d == ptypelist[9]: return 2
if d == ptypelist[t]: return 1
if d == ptypelist[e]: return 1
if d == ptypelist[w]: return 1
if d == ptypelist[i]: return 1
if d == ptypelist[14]: return h
if a == ptypelist[4]: #grass
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return h
if d == ptypelist[2]: return 2
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return h
if d == ptypelist[5]: return 1
if d == ptypelist[6]: return 1
if d == ptypelist[7]: return h
if d == ptypelist[8]: return 2
if d == ptypelist[9]: return h
if d == ptypelist[t]: return 1
if d == ptypelist[e]: return h
if d == ptypelist[w]: return 2
if d == ptypelist[i]: return 1
if d == ptypelist[14]: return h
if a == ptypelist[5]: #ice
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return 1
if d == ptypelist[2]: return h
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return 2
if d == ptypelist[5]: return h
if d == ptypelist[6]: return 1
if d == ptypelist[7]: return 1
if d == ptypelist[8]: return 2
if d == ptypelist[9]: return 2
if d == ptypelist[t]: return 1
if d == ptypelist[e]: return 1
if d == ptypelist[w]: return 1
if d == ptypelist[i]: return 1
if d == ptypelist[14]: return 2
if a == ptypelist[6]: #fighting
if d == ptypelist[0]: return 2
if d == ptypelist[1]: return 1
if d == ptypelist[2]: return 1
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return 1
if d == ptypelist[5]: return 2
if d == ptypelist[6]: return 1
if d == ptypelist[7]: return h
if d == ptypelist[8]: return 1
if d == ptypelist[9]: return h
if d == ptypelist[t]: return h
if d == ptypelist[e]: return h
if d == ptypelist[w]: return 2
if d == ptypelist[i]: return 0
if d == ptypelist[14]: return 1
if a == ptypelist[7]: #poison
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return 1
if d == ptypelist[2]: return 1
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return 2
if d == ptypelist[5]: return 1
if d == ptypelist[6]: return 1
if d == ptypelist[7]: return h
if d == ptypelist[8]: return h
if d == ptypelist[9]: return 1
if d == ptypelist[t]: return 1
if d == ptypelist[e]: return 2
if d == ptypelist[w]: return h
if d == ptypelist[i]: return h
if d == ptypelist[14]: return 1
if a == ptypelist[8]: #ground
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return 2
if d == ptypelist[2]: return 1
if d == ptypelist[3]: return 2
if d == ptypelist[4]: return h
if d == ptypelist[5]: return 1
if d == ptypelist[6]: return 1
if d == ptypelist[7]: return 2
if d == ptypelist[8]: return 1
if d == ptypelist[9]: return 0
if d == ptypelist[t]: return 1
if d == ptypelist[e]: return h
if d == ptypelist[w]: return 2
if d == ptypelist[i]: return 1
if d == ptypelist[14]: return 1
if a == ptypelist[9]: #flying
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return 1
if d == ptypelist[2]: return 1
if d == ptypelist[3]: return h
if d == ptypelist[4]: return 2
if d == ptypelist[5]: return 1
if d == ptypelist[6]: return 2
if d == ptypelist[7]: return 1
if d == ptypelist[8]: return 1
if d == ptypelist[9]: return 1
if d == ptypelist[t]: return 1
if d == ptypelist[e]: return 2
if d == ptypelist[w]: return h
if d == ptypelist[i]: return 1
if d == ptypelist[14]: return 1
if a == ptypelist[10]: #psychic
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return 1
if d == ptypelist[2]: return 1
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return 1
if d == ptypelist[5]: return 1
if d == ptypelist[6]: return 2
if d == ptypelist[7]: return 2
if d == ptypelist[8]: return 1
if d == ptypelist[9]: return 1
if d == ptypelist[t]: return h
if d == ptypelist[e]: return 1
if d == ptypelist[w]: return 1
if d == ptypelist[i]: return 1
if d == ptypelist[14]: return 1
if a == ptypelist[11]: #bug
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return h
if d == ptypelist[2]: return 1
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return 2
if d == ptypelist[5]: return 1
if d == ptypelist[6]: return h
if d == ptypelist[7]: return 2
if d == ptypelist[8]: return 1
if d == ptypelist[9]: return h
if d == ptypelist[t]: return 2
if d == ptypelist[e]: return 1
if d == ptypelist[w]: return 1
if d == ptypelist[i]: return 1
if d == ptypelist[14]: return 1
if a == ptypelist[12]: #rock
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return 2
if d == ptypelist[2]: return 1
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return 1
if d == ptypelist[5]: return 2
if d == ptypelist[6]: return h
if d == ptypelist[7]: return 1
if d == ptypelist[8]: return h
if d == ptypelist[9]: return 2
if d == ptypelist[t]: return 1
if d == ptypelist[e]: return 2
if d == ptypelist[w]: return 1
if d == ptypelist[i]: return 1
if d == ptypelist[14]: return 1
if a == ptypelist[13]: #ghost
if d == ptypelist[0]: return 0
if d == ptypelist[1]: return 1
if d == ptypelist[2]: return 1
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return 1
if d == ptypelist[5]: return 1
if d == ptypelist[6]: return 1
if d == ptypelist[7]: return 1
if d == ptypelist[8]: return 1
if d == ptypelist[9]: return 1
if d == ptypelist[t]: return 0
if d == ptypelist[e]: return 1
if d == ptypelist[w]: return 1
if d == ptypelist[i]: return 2
if d == ptypelist[14]: return 1
if a == ptypelist[14]: #dragon
if d == ptypelist[0]: return 1
if d == ptypelist[1]: return 1
if d == ptypelist[2]: return 1
if d == ptypelist[3]: return 1
if d == ptypelist[4]: return 1
if d == ptypelist[5]: return 1
if d == ptypelist[6]: return 1
if d == ptypelist[7]: return 1
if d == ptypelist[8]: return 1
if d == ptypelist[9]: return 1
if d == ptypelist[t]: return 1
if d == ptypelist[e]: return 1
if d == ptypelist[w]: return 1
if d == ptypelist[i]: return 1
if d == ptypelist[14]: return 2
print("got baaaaad inputs for effective: %s was attack, %s was defence" % (a, d))
return 1
def twoeffective(a, d1, d2 ):
component1 = effective( a, d1 )
component2 = 1
if d2 != d1:
component2 = effective( a, d2 )
return float(component1) * float(component2)
def read_opponent_type( line ):
type1 = ""
type2 = ""
if "FIR" in line:
assignment = "Fire"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "NRM" in line:
assignment = "Normal"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "ELE" in line:
assignment = "Electric"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "PSN" in line:
assignment = "Poison"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "FGT" in line or "FTG" in line:
assignment = "Fighting"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "GRN" in line:
assignment = "Ground"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "GRS" in line:
assignment = "Grass"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "FLY" in line:
assignment = "Flying"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "GST" in line:
assignment = "Ghost"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "DRG" in line:
assignment = "Dragon"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "PSY" in line:
assignment = "Psychic"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "WTR" in line:
assignment = "Water"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "ICE" in line:
assignment = "Ice"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "BUG" in line:
assignment = "Bug"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if "RCK" in line:
assignment = "Rock"
if type1 == "":
type1 = assignment
else:
type2 = assignment
if type2 == "":
type2 = type1
return type1, type2
O = [] #for opponents
with open("PokemonTrainers.txt" ) as walkthru:
for line in walkthru:
last_four_chars = line[len(line) - 5 : len(line) - 1]
if line[0] == "#" and last_four_chars != "Exp.":
#if last_four_chars != "Exp.":
#if line[len(line) - 9]!= "%":
#print("funny line: %s our last four: %s" % (line[: len(line) - 1],last_four_chars))
type1, type2 = read_opponent_type( line )
if type1 == "" or type2 == "":
print("bad input:", line)
ptypes = set([])
ptypes.add(type1)
ptypes.add(type2)
alreadygotit = 0
for typeset in O:
if ptypes == typeset:
alreadygotit = 1
#and have something to say how many there are of each type!
if alreadygotit == 1:
continue
#print(line[6:17], ptypes)
O.append(ptypes)
#check for "if a move can take out this kind, it can take out THIS kind"
#but HMMM we may well be interested in moves that can only take out a subset? It might be the only subset we need
'''i1 = 0
while i1 < len(O):
strong_against_i1 = set([])
for attacktype in ptypelist:
if len( O[i1] ) == 2:
if twoeffective( attacktype, list(O[i1])[0], list(O[i1])[1] ) >= 2:
strong_against_i1.add(attacktype)
elif twoeffective( attacktype, list(O[i1])[0], list(O[i1])[0] ) >= 2:
strong_against_i1.add(attacktype)
i2 = i1+1
while i2 < len(O):
strong_against_i2 = set([])
for attacktype in ptypelist:
if len( O[i2] ) == 2:
if twoeffective( attacktype, list(O[i2])[0], list(O[i2])[1] ) >= 2:
strong_against_i2.add(attacktype)
elif twoeffective( attacktype, list(O[i2])[0], list(O[i2])[0] ) >= 2:
strong_against_i2.add(attacktype)
if strong_against_i2 <= strong_against_i1:
print("if you've got one of these moves %s that can take care of %s, then you can also take care of %s, which requires these moves %s" % (strong_against_i1,O[i1],O[i2],strong_against_i2) )
O.pop(i2)
else:
i2 = i2 + 1
i1 = i1 + 1'''
#also, moves that are objectively inferior to other moves? eg normal and everything else?
#That may not exist though. A may be strong against all that B is and more, but B may have more weakenesses
print("distinct opponents: %s" % O)
for movetype1 in ptypelist:
#we need a bug for our psychic, fighting, and ground
for movetype2 in ptypelist:
if ptypelist.index( movetype2 ) <= ptypelist.index( movetype1 ): continue
for movetype3 in ptypelist:
if ptypelist.index( movetype3 ) <= ptypelist.index( movetype2 ): continue
for movetype4 in ptypelist:
if ptypelist.index( movetype4 ) <= ptypelist.index( movetype3 ): continue
knockouts = 0
for o in O:
something_is_super_effective = 0
for attacktype in [movetype1, movetype2, movetype3, movetype4 ]:
if len( o ) == 2:
if twoeffective( attacktype, list(o)[0], list(o)[1] ) >= 2:
something_is_super_effective = 1
elif twoeffective( attacktype, list(o)[0], list(o)[0] ) >= 2:
something_is_super_effective = 1
if something_is_super_effective == 1:
knockouts += 1
if knockouts >= 27: #change this number if you want to be less strict
nonknockouts = []
for o in O:
something_is_super_effective = 0
for attacktype in [movetype1, movetype2, movetype3, movetype4 ]:
if len( o ) == 2:
if twoeffective( attacktype, list(o)[0], list(o)[1] ) >= 2:
something_is_super_effective = 1
elif twoeffective( attacktype, list(o)[0], list(o)[0] ) >= 2:
something_is_super_effective = 1
if something_is_super_effective == 0:
nonknockouts.append(o)
#if {'Fighting'} not in nonknockouts: #can use this to make sure a few aren't printed
print("we have a big one: %s, %s, %s, %s although it can't do %s" % (movetype1, movetype2, movetype3, movetype4, nonknockouts))
'''
How to interpret these results?
One thing to do would be to look at which ones are unable to do obscure types that you can attach to a specific pokemon. There might be subsets though!
Our anti-normals
Grass, Fighting, Bug, Rock although it can't do [{'Electric'}, {'Fighting'}, {'Dragon'}]
Ice, Fighting, Bug, Rock although it can't do [{'Electric'}, {'Fighting'}, {'Water'}]
- so find one in your team that's NOT STRONG to these types, they are your anti-normals.
Unlikely scenario: there is an electric or fighting type that doesn't HAVE any of those moves, and actually has lots of moves that can only be defended against by this weak-to-fighting type
Double Kick 30
Low Kick 50
Rolling Kick 60
Jump Kick 70
Submission 80
Hi Jump Kick 85
Leech life 20
Pin Missile 14
Twineedle 25
Rock Throw 50
Rock Slide 75
Peck 35
Wing Attack 35
Fly 70
Drill Peck 80
Sky Attack 140
Bone club 65
Dig 100
Earthquake 100
aurora beam 65
ice punch 75
ice beam 95
blizzard 120
absorb 20
vine whip 35
mega drain 40
razor leaf 55
petal dance 70
solar beam 120
confusion 50
psybeam 65
psychic 90
''' | [
"hamish.todd1@gmail.com"
] | hamish.todd1@gmail.com |
d6ee0bd3568242e4bc6eb8ecacc74d4cf4df7530 | 89c071ebaf9878ec4666148fa04d941621efa4db | /SendEmailApp/wsgi.py | 14ae275248dc9996a101b72cf36f818d015ab884 | [] | no_license | ali-py3/Send_Email_with_Dj | ab8cb49b2388135f372239146bdfba0fd216c5ed | 273f0c0e8be26e999bbe0fd7b8f40e4263315b3d | refs/heads/main | 2023-07-05T05:59:53.946720 | 2021-09-01T19:36:02 | 2021-09-01T19:36:02 | 402,179,670 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for SendEmailApp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SendEmailApp.settings')
application = get_wsgi_application()
| [
"man.pyyy@gmail.com"
] | man.pyyy@gmail.com |
597a9df0a49588dbd4e817b3c9e418c5967c0c9c | 61425d834741e9a043899a5e4ad9007bc3c387ba | /Code/Problem Set 2/2.6.py | bd6275ff41a0460bb130bf8f3f8ad40eb5f0a91c | [] | no_license | Plecebo/Data-Analysis-P2 | 84c99a9559ba51e416e7977f7ccdaea3e0514c82 | a8ed08cdbe82920992a87605d3c1f227453915de | refs/heads/master | 2016-09-06T18:14:15.086719 | 2015-08-24T01:20:19 | 2015-08-24T01:20:19 | 40,058,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | def create_master_turnstile_file(filenames, output_file):
'''
Write a function that takes the files in the list filenames, which all have the
columns 'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn', and consolidates
them into one file located at output_file. There should be ONE row with the column
headers, located at the top of the file. The input files do not have column header
rows of their own.
For example, if file_1 has:
'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn'
line 1 ...
line 2 ...
and another file, file_2 has:
'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn'
line 3 ...
line 4 ...
line 5 ...
We need to combine file_1 and file_2 into a master_file like below:
'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn'
line 1 ...
line 2 ...
line 3 ...
line 4 ...
line 5 ...
'''
with open(output_file, 'w') as master_file:
master_file.write('C/A,UNIT,SCP,DATEn,TIMEn,DESCn,ENTRIESn,EXITSn\n')
for filename in filenames:
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
master_file.write(line)
| [
"larry@schwerzler.com"
] | larry@schwerzler.com |
7704976a916bf3892c53447a2e1714f2a596d4ec | b2cbc9bcd0566695c32b028772e6316ce81fb7fe | /password_generator/settings.py | 135060064a7b764ce5d449aefa2eebd34c8b5ce9 | [] | no_license | AvinashDhamnani/Django-password-generator | fed156e8c6e29bf102e500bbf4a2226ecad743cf | 536701955d9c4a14e6e1d65a2bcd38dc2afde677 | refs/heads/master | 2023-02-26T08:20:31.872858 | 2021-02-05T10:46:48 | 2021-02-05T10:46:48 | 336,248,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,115 | py | """
Django settings for password_generator project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fxs&v7&4+xbtc4^-aiq%az3+yqxry50j(2$im=@8h(%qly32%8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'generator',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'password_generator.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'password_generator.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"avinashdhamnani31@gmail.com"
] | avinashdhamnani31@gmail.com |
fdaa58c6c8d679b9e5214b8455713c37be838bfc | fa097257d8ec4167db24b17076a38e60dbbb0b36 | /Code/27. Quadratic primes.py | fe1e7e6ad3f76dd53febf405107f55c1dfee5f04 | [] | no_license | SergeyShk/Project-Euler | 5e0d5bb3f03e2baaa25bd895f53603026fb147c7 | 6f3019ca88a545bf85e714526aa6ca661f89e4a9 | refs/heads/master | 2021-08-16T03:04:28.000466 | 2020-04-15T20:13:29 | 2020-04-15T20:13:29 | 159,189,991 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | '''
Euler discovered the remarkable quadratic formula:
n^2+n+41
It turns out that the formula will produce 40 primes for the consecutive integer values 0≤n≤39. However, when n=40,40^2+40+41=40(40+1)+41 is divisible by 41, and certainly when n=41,41^2+41+41 is clearly divisible by 41.
The incredible formula n^2−79n+1601 was discovered, which produces 80 primes for the consecutive values 0≤n≤79. The product of the coefficients, −79 and 1601, is −126479.
Considering quadratics of the form:
n^2+an+b, where |a|<1000 and |b|≤1000
where |n| is the modulus/absolute value of n
e.g. |11|=11 and |−4|=4
Find the product of the coefficients, a and b, for the quadratic expression that produces the maximum number of primes for consecutive values of n, starting with n=0.
'''
def problem_27(a, b):
max_primes = {'a': 0, 'b': 0, 'n': 0}
for a in range(-1 * a, a + 1):
for b in range(-1 * b, b + 1):
n = 0
while True:
num = n**2 + a * n + b
if len([i for i in range(2, round(abs(num)**0.5) + 1) if num % i == 0]) == 0:
n += 1
else:
break
if n > max_primes['n']:
max_primes['a'] = a
max_primes['b'] = b
max_primes['n'] = n
return max_primes['a'] * max_primes['b']
print(problem_27(1000, 1000)) | [
"kouki.sergey@gmail.com"
] | kouki.sergey@gmail.com |
b072a98d63408faeeb970f8289713393ca20a94d | 37a8128efba642e8c7613652621297fc6bbd1dd3 | /courses/migrations/0001_initial.py | 24b8cce96f6c77123a662b202c27e64502c8980b | [] | no_license | amosISA/elearning_django | be75c2e6eeaa10d7202d96a1d8e139d5b6828b44 | f676076be3a7a2a20c45f565be01e4ba3fcdd274 | refs/heads/master | 2021-05-09T13:33:04.174597 | 2018-01-26T13:15:01 | 2018-01-26T13:15:01 | 119,036,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-01-26 10:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('slug', models.SlugField(max_length=200, unique=True)),
('overview', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='courses_created', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField(blank=True)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='modules', to='courses.Course')),
],
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('slug', models.SlugField(max_length=200, unique=True)),
],
options={
'ordering': ('title',),
},
),
migrations.AddField(
model_name='course',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='courses', to='courses.Subject'),
),
]
| [
"amosisa700@gmail.com"
] | amosisa700@gmail.com |
520f26abdaa8e6c9525b21e583470adbafaf529c | c62710853eaaaaa554fe2ef348db1b615ec814fb | /dnn/dongyaxing/PosTagging/HMMforword.py | 21ae4aeeb9c1ad9bfb8c1b29e5269d5541909492 | [] | no_license | CodingWD/course | e38d75871e17627d1ce24beef6700ef65e606640 | e80eeba61be9c0960002259a452babce44ee84a1 | refs/heads/master | 2023-04-06T21:47:54.940420 | 2021-04-15T10:25:30 | 2021-04-15T10:25:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,325 | py | # coding=utf-8
import numpy as np
class HiddenMarkov:
def forward(self, Q, V, A, B, X, PI):
N = len(Q) # 状态集合的长度(几个盒子)
M = len(X) # 观测序列的长度(实验进行了几次)
alphas = np.zeros((N, M)) # 前向概率矩阵 alphas 矩阵
T = M # 时刻长度,即观测序列的长度
for t in range(T): # 第 t 次
indexOfXi = V.index(X[t]) # 观测值Xi 对应的 V中的索引,即是白球还是红球
for i in range(N): # 对每个状态(盒子)进行遍历
if t == 0: # 计算初值
alphas[i][t] = PI[t][i] * B[i][indexOfXi] # 填充alphas的第0列,为第一次的概率计算
print("alphas_1(%d) = p%d * B%d(x1) = %f" % (i, i, i, alphas[i][t]))
else:
# 计算两个数组的点积(行*列,求和)
alphas[i][t] = np.dot([alpha[t - 1] for alpha in alphas], [a[i] for a in A]) * B[i][indexOfXi]
print("alpha_%d(%d) = [sigma(i=0,..,%d ) alpha_%d(i)*ai%d] * B%d(x%d) = %f" % (
t, i, len(A), t - 1, i, i, t, alphas[i][t]))
print(alphas)
P = sum(alpha[M - 1] for alpha in alphas) # alphas的最后一列求和
print("P = %f" % P)
def viterbi(self, Q, V, A, B, X, PI):
N = len(Q) # 状态集合的长度(几个盒子)
M = len(X) # 观测序列的长度(实验进行了几次)
deltas = np.zeros((N, M)) # 前向概率矩阵 alphas 矩阵
psis = np.zeros((N, M)) # 记录路径, 存储的每一次概率最大的值盒子
Y = np.zeros((1, M)) # 最优(概率最大)的状态序列
for t in range(M): # 第t次
realT = t + 1 # 状态号从1开始
indexofXi = V.index(X[t])
for i in range(N): # 第 i 个盒子
realI = i + 1 # 盒子号从1开始
if t == 0: # 第一次取球
deltas[i][0] = PI[0][i] * B[i][indexofXi] # 初始值
psis[i][0] = 0 # 第一次初始psis[i][0]为0
print("delta_1(%d) = pi_%d * B%d(x1) = %.2f * %.2f = %.2f" % (realI, realI, realI, PI[0][i], B[i][indexofXi], deltas[i][0]))
print('psis_1(%d) = 0' % realT) # 第一次没有决定从哪个盒子取,所以先定为0
print()
else:
deltas[i][t] = np.max(
np.multiply([delta[t-1] for delta in deltas], [a[i] for a in A])
) * B[i][indexofXi]
temp = np.max(np.multiply([delta[t-1] for delta in deltas], [a[i] for a in A])) # 保存最大值
print("delta_%d(%d) = max[delta_%d(j) * Aj%d] * B%d(x%d) = %.2f * %.2f = %.5f" % (realT, realI, t, realI, realI, realT, temp, B[i][indexofXi], deltas[i][t]))
psis[i][t] = np.argmax(np.multiply([delta[t-1] for delta in deltas], [a[i] for a in A])) + 1
print("psis_%d(%d) = argmax[delta_%d(j)Aj%d] = %d" % (realT, realI, t, realI, psis[i][t]))
print()
print("deltas: \n", deltas)
print("psis:\n", psis)
print()
# 最后一列的最大值的索引
Y[0][M-1] = np.argmax([delta[M-1] for delta in deltas]) + 1 # 序号从1开始, 沿着给定轴,返回最大值的索引。
print("Y[0][M-1]: \n", Y[0][M-1])
print("Y%d=argmax[deltaT(i)]=%d" % (M, Y[0][M-1])) # 最优路径的终点
print()
for t in range(M-2, -1, -1): # 逆向推导最优路径, 倒着扫描,间隔为1。填充Y时,也倒着填充
Y[0][t] = psis[int(Y[0][t+1]) - 1][t+1] # psis的索引
print("Y%d = psis%d(Y%d) = %d" % (t+1, t+2, t+2, Y[0][t]))
print()
print("最大概率的状态序列 Y 是:", Y)
Q = [1, 2, 3]
V = ['红', '白']
A = [[0.5, 0.2, 0.3], [0.3, 0.5, 0.2], [0.2, 0.3, 0.5]]
B = [[0.5, 0.5], [0.4, 0.6], [0.7, 0.3]]
X = ['红', '白', '红']
PI = [[0.2, 0.4, 0.4]] # 声明为列数组,1行3列
hmm = HiddenMarkov()
# hmm.forward(Q, V, A, B, X, PI)
hmm.viterbi(Q, V, A, B, X, PI)
| [
"379992467@qq.com"
] | 379992467@qq.com |
9c221234f925ac473501277c79617d2849dd2eea | 8d9e19ac32bab45f608e09e7c8836e91ac7d0914 | /container_workflow_tool/main.py | 73fff29ce36b8e9e363baa398810995d1b421dda | [
"MIT"
] | permissive | injeti-manohar/container-workflow-tool | 22ed533bf74c9a102f1b595672d90ad5d1d49558 | 25f98751e3584e52e55db0fdd641922cd67e848a | refs/heads/master | 2020-05-02T03:30:37.865495 | 2018-12-13T14:20:26 | 2019-01-07T13:59:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,767 | py | # description : Script for helping with the rebuild of container images.
# author : pkubat@redhat.com
# notes : Rewritten from a shell script originally created by hhorak@redhat.com.
# python_version : 3.x
import subprocess
import os
import shutil
import re
import tempfile
import pprint
import getpass
import logging
from copy import copy
from git import Repo
import container_workflow_tool.utility as u
from container_workflow_tool.koji import KojiAPI
from container_workflow_tool.distgit import DistgitAPI
from container_workflow_tool.utility import RebuilderError
from container_workflow_tool.decorators import needs_base, needs_brewapi, needs_dhapi
from container_workflow_tool.decorators import needs_distgit
from container_workflow_tool.config import Config
class ImageRebuilder:
"""Class for rebuilding Container images."""
def __init__(self, base_image, rebuild_reason=None, config="default.yaml", release="current"):
""" Init method of ImageRebuilder class
Args:
base_image (str): image id to be used as a base image
config (str, optional): configuration file to be used
rebuild_reason (str, optional): reason for the rebuild,
used in commit
"""
self.base_image = base_image
self.brewapi = None
self.dhapi = None
self.distgit = None
self.commit_msg = None
self.args = None
self.tmp_workdir = None
self.repo_url = None
self.jira_header = None
self.conf_name = config
self.rebuild_reason = rebuild_reason
self.do_image = None
self.exclude_image = None
self.do_set = None
self.check_script = None
self.image_set = None
self.disable_klist = None
self.latest_release = None
self._setup_logger()
self.set_config(self.conf_name, release=release)
@classmethod
def from_args(cls, args):
"""
Creates an ImageRebuilder instance from argparse arguments.
"""
rebuilder = ImageRebuilder(base_image=args.base)
rebuilder._setup_args(args)
return rebuilder
def _setup_args(self, args):
self.args = args
if args.config:
conf = args.config.split(':')
config_fn = conf[0]
image_set = conf[1] if len(conf) > 1 else 'current'
self.set_config(config_fn, image_set)
if args.tmp:
self.set_tmp_workdir(args.tmp)
if args.clear_cache:
self.clear_cache()
if args.do_image:
self.set_do_images(args.do_image)
if args.exclude_image:
self.set_exclude_images(args.exclude_image)
if args.do_set:
self.set_do_set(args.do_set)
self.logger.setLevel(u._transform_verbosity(args.verbosity))
# Command specific
# TODO: generalize?
if getattr(args, 'repo_url', None) is not None and args.repo_url:
self.set_repo_url(args.repo_url)
if getattr(args, 'commit_msg', None) is not None:
self.set_commit_msg(args.commit_msg)
if getattr(args, 'rebuild_reason', None) is not None and args.rebuild_reason:
self.rebuild_reason = args.rebuild_reason
if getattr(args, 'check_script', None) is not None and args.check_script:
self.check_script = args.check_script
if getattr(args, 'disable_klist', None) is not None and args.disable_klist:
self.disable_klist = args.disable_klist
if getattr(args, 'latest_release', None) is not None and args.latest_release:
self.latest_release = args.latest_release
# Image set to build
if getattr(args, 'image_set', None) is not None and args.image_set:
self.image_set = args.image_set
def _get_set_from_config(self, layer):
i = getattr(self.conf, layer, [])
if i is None:
err_msg = "Image set '{}' not found in config.".format(layer)
raise RebuilderError(err_msg)
return i
def _setup_distgit(self):
if not self.distgit:
self.distgit = DistgitAPI(self.base_image, self.conf,
self.rebuild_reason, copy(self.logger))
def _setup_brewapi(self):
if not self.brewapi:
self.brewapi = KojiAPI(self.conf, copy(self.logger),
self.latest_release)
def _setup_dhapi(self):
from dhwebapi.dhwebapi import DockerHubWebAPI, DockerHubException
if not self.dhapi:
token = None
username = None
password = None
try:
token = self.conf.DOCKERHUB_TOKEN
self.dhapi = DockerHubWebAPI(token=token)
return
except (AttributeError, DockerHubException):
pass
try:
username = self.conf.DOCKERHUB_USERNAME
password = self.conf.DOCKERHUB_PASSWORD
except AttributeError:
if username is None:
username = input("Dockerhub username: ")
if password is None:
password = getpass.unix_getpass(prompt="Password for user " + username + ": ")
self.dhapi = DockerHubWebAPI(username, password)
def _setup_logger(self, level=logging.INFO, user_logger=None):
# If a logger has been provided, do not setup own
if user_logger and isinstance(user_logger, logging.Logger):
logger = user_logger
else:
logger = u.setup_logger("main", level)
self.logger = logger
return logger
def _check_kerb_ticket(self):
if not self.disable_klist:
ret = subprocess.run(["klist"], stdout=subprocess.DEVNULL)
if ret.returncode:
raise(RebuilderError("Kerberos token not found."))
def _change_workdir(self, path):
self.logger.info("Using working directory: " + path)
os.chdir(path)
@needs_base
def _get_tmp_workdir(self, setup_dir=True):
# Check if the workdir has been set by the user
if self.tmp_workdir:
return self.tmp_workdir
tmp = None
tmp_id = self.base_image.replace(':', '-')
# Check if there is an existing tempdir for the build
for f in os.scandir(tempfile.gettempdir()):
if os.path.isdir(f.path) and f.name.startswith(tmp_id):
tmp = f.path
break
else:
if setup_dir:
tmp = tempfile.mkdtemp(prefix=tmp_id)
return tmp
def set_do_images(self, val):
self.do_image = val
def set_exclude_images(self, val):
self.exclude_image = val
def set_do_set(self, val):
self.do_set = val
def _get_images(self):
images = []
if self.do_set:
# Use only the image sets the user asked for
for layer in self.do_set:
images += self._get_set_from_config(layer)
else:
# Go through all known layers and create a single image list
for (order, layer) in self.conf.layers.items():
i = getattr(self.conf, layer, [])
images += i
return self._filter_images(images)
def _filter_images(self, base):
if self.do_image:
return [i for i in base if i["component"] in self.do_image]
elif self.exclude_image:
return [i for i in base if i["component"] not in self.exclude_image]
else:
return base
def _prebuild_check(self, image_set, branches=[]):
tmp = self._get_tmp_workdir(setup_dir=False)
if not tmp:
raise RebuilderError("Temporary directory structure does not exist. Pull upstream first.")
self.logger.info("Checking for correct repository configuration ...")
releases = branches
for image in image_set:
component = image["component"]
cwd = os.path.join(tmp, component)
try:
repo = Repo(cwd)
except GitError as e:
self.logger.error("Failed to open repository for {}", component)
raise e
# This checks if any of the releases can be found in the name of the checked-out branch
if releases and not [i for i in releases if i in str(repo.active_branch)]:
raise RebuilderError("Unexpected active branch for {}: {}".format(component,
repo.active_branch))
def _build_images(self, image_set, custom_args=[], branches=[]):
if not image_set:
# Nothing to build
self.logger.warn("No images to build, exiting.")
return
if not branches:
# Fill defaults from config if not provided
for release in self.conf.releases:
branches += [self.conf.releases[release]["current"]]
self._prebuild_check(image_set, branches)
procs = []
tmp = self._get_tmp_workdir(setup_dir=False)
for image in image_set:
component = image["component"]
cwd = os.path.join(tmp, component)
self.logger.info("Building image {} ...".format(component))
args = [u._get_packager(self.conf), 'container-build']
if custom_args:
args.extend(custom_args)
proc = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
# Append the process and component information for later use
procs.append((proc, component))
self.logger.info("Fetching tasks...")
for proc, component in procs:
self.logger.debug("Query component: {}".format(component))
# Iterate until a taskID is found
for stdout in iter(proc.stdout.readline, ""):
if "taskID" in stdout:
self.logger.info("{} - {}".format(component,
stdout.strip()))
break
else:
# If we get here the command must have failed
# The error will get printed out later when getting all builds
temp = "Could not find task for {}!"
self.logger.warning(temp.format(component))
self.logger.info("Waiting for builds...")
timeout = 30
while procs:
self.logger.debug("Looping over all running builds")
for proc, image in procs:
out = err = None
try:
self.logger.debug("Waiting {} seconds for {}".format(timeout,
image))
out, err = proc.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
msg = "{} not yet finished, checking next build"
self.logger.debug(msg.format(image))
continue
self.logger.info("{} build has finished".format(image))
if err:
# Write out stderr if we encounter an error
err = u._4sp(err)
self.logger.error(err)
procs.remove((proc, image))
def _get_config_path(self, config):
if not os.path.isabs(config):
base_path = os.path.abspath(__file__)
dir_path = os.path.dirname(base_path)
path = os.path.join(dir_path, "config/", config)
else:
path = config
return path
def _not_yet_implemented(self):
print("Method not yet implemented.")
@needs_brewapi
def get_brew_builds(self, print_time=True):
"""Returns information about builds in brew
Args:
print_time (bool, optional): Print time finished for a build.
Returns:
str: Resulting brew build text
"""
output = []
header = "||Component||Build||Image_name||"
if print_time:
header += "Build finished||"
header += "Archives||"
output.append(header)
nvrs = (self.brewapi.get_nvrs(self._get_images()))
for item in nvrs:
nvr, name, component, *rest = item
# No nvr found for the image, might not have been built
if nvr is None:
continue
else:
template = "|{0}|{1}|{2}|"
vr = re.search(".*-([^-]*-[^-]*)$", nvr).group(1)
build_id = self.brewapi.get_buildinfo(nvr)["build_id"]
archives = self.brewapi.brew.listArchives(build_id)
archive = archives[0]["extra"]
name = archive["docker"]["config"]["config"]["Labels"]["name"]
image_name = "{name}:{vr}".format(name=name, vr=vr)
result = template.format(component, nvr, image_name)
if print_time:
result += self.brewapi.get_time_built(nvr) + '|'
result += str(len(archives))
output.append(result)
return '\n'.join(output)
def set_config(self, conf_name, release="current"):
"""
Use a configuration file other than the current one.
The configuration file used must be located in the standard 'config' directory.
Args:
config(str): Name of the configuration file (filename)
release(str, optional): ID of the release to be used inside the config
"""
path = self._get_config_path(conf_name)
self.logger.debug("Setting config to {}", path)
with open(path) as f:
newconf = Config(f, release)
self.conf = newconf
# Set config for every module that is set up
if self.brewapi:
self.brewapi.conf = newconf
if self.distgit:
self.distgit.conf = newconf
def set_tmp_workdir(self, tmp):
"""
Sets the temporary working directory to the one provided.
The directory has to already exist.
Args:
tmp(str): location of the directory to be used
"""
if os.path.isdir(tmp):
self.tmp_workdir = os.path.abspath(tmp)
else:
raise RebuilderError("Provided working directory does not exist.")
@needs_distgit
def set_commit_msg(self, msg):
"""
Set the commit message to some other than the default one.
Args:
msg(str): Message to be written into the commit.
"""
self.distgit.set_commit_msg(msg)
def clear_cache(self):
"""Clears various caches used in the rebuilding process"""
self.logger.info("Removing cached data and git storage.")
# Clear ondisk storage for git and the brew cache
tmp = self._get_tmp_workdir(setup_dir=False)
shutil.rmtree(tmp, ignore_errors=True)
# If the working directory has been set by the user, recreate it
if self.tmp_workdir:
os.makedirs(tmp)
# Clear koji object caches
self.nvrs = []
if self.brewapi:
self.brewapi.clear_cache()
def set_repo_url(self, repo_url):
"""Repofile url setter
Sets the url of .repo file used for the build.
Args:
repo_url: url of .repo file used for the build
"""
self.repo_url = repo_url
def list_images(self):
"""Prints list of images that we work with"""
for i in self._get_images():
print(i["component"])
def print_upstream(self):
"""Prints the upstream name and url for images used in config"""
template = "{component} {img_name} {ups_name} {url}"
for i in self._get_images():
ups_name = re.search(".*\/([a-zA-Z0-9-]+).git",
i["git_url"]).group(1)
print(template.format(component=i["component"], url=i["git_url"],
ups_name=ups_name, img_name=i["name"]))
def show_config_contents(self):
"""Prints the symbols and values of configuration used"""
for key in self.conf:
value = getattr(self.conf, key)
# Do not print clutter the output with unnecessary content
if key in ["raw"]:
continue
print(key + ":")
pprint.pprint(value, compact=True, width=256, indent=4)
def build_images(self, image_set=None):
"""
Build images specified by image_set (or self.image_set)
"""
if image_set is None and self.image_set is None:
raise RebuilderError("image_set is None, build cancelled.")
if image_set is None:
image_set = self.image_set
image_config = self._get_set_from_config(image_set)
images = self._filter_images(image_config)
self._build_images(images)
def print_brew_builds(self, print_time=True):
"""Prints information about builds in brew
Args:
print_time (bool, optional): Print time finished for a build.
Returns:
str: Resulting brew build text
"""
print(self.get_brew_builds(print_time=print_time))
# Dist-git method wrappers
@needs_distgit
def pull_downstream(self):
"""Pulls downstream dist-git repositories and does not make any further changes to them
Additionally runs a script against each repository if check_script is set, checking its exit value.
"""
self._check_kerb_ticket()
tmp = self._get_tmp_workdir()
self._change_workdir(tmp)
images = self._get_images()
for i in images:
self.distgit._clone_downstream(i["component"], i["git_branch"])
# If check script is set, run the script provided for each config entry
if self.check_script:
for i in images:
self.distgit.check_script(i["component"], self.check_script,
i["git_branch"])
@needs_distgit
def pull_upstream(self):
"""Pulls upstream git repositories and does not make any further changes to them
Additionally runs a script against each repository if check_script is set, checking its exit value.
"""
tmp = self._get_tmp_workdir()
self._change_workdir(tmp)
images = self._get_images()
for i in images:
# Use unversioned name as a path for the repository
ups_name = i["name"].split('-')[0]
repo = self.distgit._clone_upstream(i["git_url"],
ups_name,
commands=i["commands"])
# If check script is set, run the script provided for each config entry
if self.check_script:
for i in images:
ups_name = i["name"].split('-')[0]
self.distgit.check_script(i["component"], self.check_script,
os.path.join(ups_name, i["git_path"]))
@needs_distgit
def push_changes(self):
"""Pushes changes for all components into downstream dist-git repository"""
# Check for kerberos ticket
self._check_kerb_ticket()
tmp = self._get_tmp_workdir(setup_dir=False)
if not tmp:
raise RebuilderError("Temporary directory structure does not exist. Pull upstream/rebase first.")
self._change_workdir(tmp)
images = self._get_images()
self.distgit.push_changes(tmp, images)
def dist_git_rebase(self):
"""
Do a rebase against a new base/s2i image.
Does not pull in upstream changes of layered images.
"""
self.dist_git_changes(rebase=True)
@needs_distgit
def dist_git_changes(self, rebase=False):
"""Method to merge changes from upstream into downstream
Pulls both downstream and upstream repositories into a temporary directory.
Merge is done by copying tracked files from upstream into downstream.
Args:
rebase (bool, optional): Specifies whether a rebase should be done instead.
"""
# Check for kerberos ticket
self._check_kerb_ticket()
tmp = self._get_tmp_workdir()
self._change_workdir(tmp)
images = self._get_images()
self.distgit.dist_git_changes(images, rebase)
self.logger.info("\nGit location: " + tmp)
if self.args:
template = "./rebuild-helper {} git show"
self.logger.info("You can view changes made by running:")
self.logger.info(template.format('--base ' + self.base_image + (' --tmp ' + self.tmp_workdir if self.tmp_workdir else "")))
if self.args:
self.logger.info("To push and build run: rebuild-helper git push && rebuild-helper build [base/core/s2i] --repo-url link-to-repo-file")
@needs_distgit
def merge_future_branches(self):
"""Merges current branch with future branches"""
# Check for kerberos ticket
self._check_kerb_ticket()
tmp = self._get_tmp_workdir()
self._change_workdir(tmp)
images = self._get_images()
self.distgit.merge_future_branches(images)
@needs_distgit
def show_git_changes(self, components=None):
"""Shows changes made to tracked files in local downstream repositories
Args:
components (list of str, optional): List of components to show changes for
Walks through all downstream repositories and calls 'git-show' on each of them.
"""
if not components:
images = self._get_images()
components = [i["component"] for i in images]
tmp = self._get_tmp_workdir()
self._change_workdir(tmp)
self.distgit.show_git_changes(tmp, components)
@needs_dhapi
def update_dh_description(self): # TODO: handle login if config changes during a run
self.pull_upstream()
imgs = self._get_images()
for img in imgs:
#FIXME: Will not work with new config
name, version, component, branch, url, path, *rest = img
with open(os.path.join(name.split('-')[0], path, "README.md")) as f:
desc = "".join(f.readlines())
self.dhapi.set_repository_full_description(namespace="centos", repo_name=name.replace("rhel", "centos"), full_description=desc)
| [
"pkubat@redhat.com"
] | pkubat@redhat.com |
9d500d189cf164e347ced5b1896ebe1a652c66be | b815fa2bf58f4b992037b54f4f5ca7b0c43fef1e | /3D_Latent_Space_GAN/run_model.py | 1b2f310c9f69503176d118b011ac64fa852d6712 | [] | no_license | Fleford/GAN_with_Kalman_filters | c661845d3643a0204a466f65c1fc52f752eb2b27 | d3b4fd616bf6036fdb7065b6faf6d42435f04daf | refs/heads/master | 2022-01-16T13:00:58.814026 | 2022-01-05T05:18:51 | 2022-01-05T05:18:51 | 219,023,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,938 | py | import torch
import numpy as np
import torch.nn.functional as F
from torch import nn, optim
from progan_modules import Generator
from utils import get_texture2D_iter
from matplotlib import pyplot as plt
def generate_condition(input_matrix, density=10):
# ref_k_array = np.loadtxt("k_array_ref_gan.txt")
ref_k_array = torch.as_tensor(input_matrix, dtype=torch.float32)
random_matrix = torch.randint_like(ref_k_array, 2)
for x in range(density):
random_matrix = random_matrix * torch.randint_like(ref_k_array, 2)
# Enlarge condition points
sf = 1
avg_downsampler = torch.nn.MaxPool2d((sf, sf), stride=(sf, sf))
random_matrix = avg_downsampler(random_matrix)
random_matrix = F.interpolate(random_matrix, scale_factor=sf, mode="nearest")
output_matrix = ref_k_array * random_matrix
# output_matrix = torch.zeros_like(input_matrix)
return torch.as_tensor(output_matrix, dtype=torch.float32, device=device), torch.as_tensor(random_matrix, dtype=torch.float32, device=device)
device = 'cuda:0'
# device = 'cpu'
b_size = 32
input_z_size = 32
generator = Generator(in_channel=128, input_z_channels=input_z_size, pixel_norm=False, tanh=False).to(device)
generator.load_state_dict(torch.load('trial_test18_2021-06-24_12_58/checkpoint/400000_g.model'))
# Prepare z vectors for training (top_bottom)
gen_z_first_half = torch.randn(b_size//2, input_z_size, 2, 2).to(device)
gen_z_top_swap = torch.flip(gen_z_first_half[:, :, 0:gen_z_first_half.shape[2]//2, :], dims=[0])
gen_z_bttm = gen_z_first_half[:, :, gen_z_first_half.shape[2]//2:gen_z_first_half.shape[2], :]
gen_z_second_half = torch.cat((gen_z_top_swap, gen_z_bttm), dim=2)
gen_z = torch.cat((gen_z_first_half, gen_z_second_half), dim=0)
print(gen_z_top_swap.shape)
print(gen_z_bttm.shape)
print(gen_z_first_half[0, 0])
print(gen_z_first_half[-1, 0])
print(gen_z_top_swap[0, 0])
print(gen_z_bttm[0, 0])
print(gen_z_second_half[0, 0])
print(gen_z_second_half.shape)
print(gen_z.shape)
print(gen_z[gen_z.shape[0]//2, 0])
# Prep imgs for spatial loss function (top_bottom)
gen_imgs_first_half = torch.randn(b_size//2, input_z_size, 8, 8).to(device)
gen_imgs_top_swap = torch.flip(gen_imgs_first_half[:, :, 0:gen_imgs_first_half.shape[2]//2, :], dims=[0])
gen_imgs_bttm = gen_imgs_first_half[:, :, gen_imgs_first_half.shape[2]//2:gen_imgs_first_half.shape[2], :]
gen_imgs_second_half = torch.cat((gen_imgs_top_swap, gen_imgs_bttm), dim=2)
gen_imgs = torch.cat((gen_imgs_first_half, gen_imgs_second_half), dim=0)
# gen_img_true_z_swap = gen_imgs_top_swap_bttm[gen_imgs_top_swap_bttm.shape[0]//2:, :, :, :]
# print(gen_img_true_z_swap.shape)
# print(gen_img_true_z_swap[0, 0])
print(gen_imgs_top_swap.shape)
print(gen_imgs_bttm.shape)
print(gen_imgs_first_half[0, 0])
print(gen_imgs_first_half[-1, 0])
print(gen_imgs_top_swap[0, 0])
print(gen_imgs_bttm[0, 0])
print(gen_imgs_second_half[0, 0])
print(gen_imgs[gen_imgs.shape[0]//2, 0])
print(gen_imgs.shape)
# sample input data: vector for Generator
gen_z_a = torch.randn(b_size, input_z_size, 2, 2).to(device)
gen_z_b = torch.randn(b_size, input_z_size, 2, 2).to(device)
gen_z_a_rot180 = torch.rot90(gen_z_a, k=2, dims=(2, 3))
# gen_z_a_top = gen_z_a[:,:,0,:]
# gen_z_b_bttm = gen_z_b[:,:,1,:]
# gen_z_ab = torch.stack((gen_z_a_top, gen_z_b_bttm), dim=2)
gen_z_ab = 0.9 * gen_z_a + 0.1 * gen_z_b
print(gen_z_a[0, 0])
print(gen_z_a_rot180[0, 0])
# print(gen_z_b[0, 0])
# print(gen_z_ab[0, 0])
# generate conditioned images
fake_image_a = generator(gen_z_a, step=7, alpha=1.0)
fake_image_a_rot180 = generator(gen_z_a_rot180, step=7, alpha=1.0)
# fake_image_b = generator(gen_z_b, step=7, alpha=1.0)
# fake_image_ab = generator(gen_z_ab, step=7, alpha=1.0)
# # plot sample of fake images
# plt.matshow(fake_image_a[0, 0].cpu().detach().numpy())
# plt.matshow(fake_image_a_rot180[0, 0].cpu().detach().numpy())
# plt.matshow(fake_image_b[0, 0].cpu().detach().numpy())
# plt.matshow(fake_image_ab[0, 0].cpu().detach().numpy())
# plt.show()
print()
# Prepare z vectors for training (left_right)
gen_z_first_half = torch.randn(b_size//2, input_z_size, 2, 2).to(device)
gen_z_left_swap = torch.flip(gen_z_first_half[:, :, :, 0:gen_z_first_half.shape[3]//2], dims=[0])
gen_z_right = gen_z_first_half[:, :, :, gen_z_first_half.shape[3]//2:gen_z_first_half.shape[3]]
gen_z_second_half = torch.cat((gen_z_left_swap, gen_z_right), dim=3)
gen_z = torch.cat((gen_z_first_half, gen_z_second_half), dim=0)
print(gen_z_left_swap.shape)
print(gen_z_right.shape)
print(gen_z_first_half[0, 0])
print(gen_z_first_half[-1, 0])
print(gen_z_left_swap[0, 0])
print(gen_z_right[0, 0])
print(gen_z_second_half[0, 0])
print(gen_z_second_half.shape)
print(gen_z.shape)
print(gen_z[gen_z.shape[0]//2, 0])
# # Prep imgs for spatial loss function (left_right) (To Do)
# gen_imgs_first_half = torch.randn(b_size//2, input_z_size, 8, 8).to(device)
# gen_imgs_left_swap = torch.flip(gen_imgs_first_half[:, :, :, 0:gen_imgs_first_half.shape[3]//2], dims=[0])
# gen_imgs_right = gen_imgs_first_half[:, :, :, gen_imgs_first_half.shape[3]//2:gen_imgs_first_half.shape[3]]
# gen_imgs_second_half = torch.cat((gen_imgs_left_swap, gen_imgs_right), dim=3)
# gen_imgs = torch.cat((gen_imgs_first_half, gen_imgs_second_half), dim=0)
# # gen_img_true_z_swap = gen_imgs_top_swap_bttm[gen_imgs_top_swap_bttm.shape[0]//2:, :, :, :]
#
# # print(gen_img_true_z_swap.shape)
# # print(gen_img_true_z_swap[0, 0])
# print(gen_imgs_left_swap.shape)
# print(gen_imgs_right.shape)
# print(gen_imgs_first_half[0, 0])
# print(gen_imgs_first_half[-1, 0])
# print(gen_imgs_left_swap[0, 0])
# print(gen_imgs_right[0, 0])
# print(gen_imgs_second_half[0, 0])
# print(gen_imgs[gen_imgs.shape[0]//2, 0])
# print(gen_imgs.shape)
# # Create conditioning mask
# cond_mask = torch.zeros_like(gen_imgs)
# cond_mask[:, :, :, 0:cond_mask.shape[3]//4] = 1
# cond_mask[:, :, :, -cond_mask.shape[3]//4:] = 1
# print(cond_mask[0, 0])
breakpoint() | [
"fleford@gmail.com"
] | fleford@gmail.com |
e08bf0a06ee1f02e5f21832b2129596a74bd048a | 39bc937620f94c0c9a897b144940855d13006146 | /testchild.py | 095e05d488883ecf7fe17bd67b249f8f07a02efe | [] | no_license | Mechalla17/Test-II | c3f9f115ea10d59b375e3b19a9a7cf3c3d3bd6bb | f17adbd0c4cd568ac51accf5944eeb4304478683 | refs/heads/main | 2023-01-06T09:06:52.644400 | 2020-10-20T09:10:59 | 2020-10-20T09:10:59 | 305,645,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | ## Adding a new file in Branch test
print("inside Branch test")
| [
"noreply@github.com"
] | noreply@github.com |
9c89bef6abd9a663b880012ce3075eaedaf4478a | 9a2505b34a16c29f0789a82bbf9b28ecfa5fd153 | /main.py | b5d4f66c0bc621124cb92812f855cf3e4e709acd | [] | no_license | Grandvitar/Project_41-for-HRs- | 0dc4e8d4f7f3c248aeb30bf927fa52c5e5bf7728 | 00b8a76cf020b3ea70a6bb8f81f7f75bbc47d933 | refs/heads/master | 2023-02-21T20:04:09.911862 | 2021-01-25T21:45:08 | 2021-01-25T21:45:08 | 325,548,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | import Models.View
Models.View.UI()
| [
"malex1982@mail.ru"
] | malex1982@mail.ru |
72efc772d005b199ba2344008550607a08ac3f5d | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Mastering_Probabilistic_Graphical_Models_Using_Python/pgmpy/factors/JointProbabilityDistribution.py | 4e8ceff854544881423360c5c64189a912298156 | [
"MIT"
] | permissive | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 9,729 | py | #!/usr/bin/env python3
from itertools import product
import numpy as np
from pgmpy.factors import Factor
from pgmpy.independencies import Independencies
class JointProbabilityDistribution(Factor):
"""
Base class for Joint Probability Distribution
Public Methods
--------------
conditional_distribution(values)
create_bayesian_model()
get_independencies()
pmap()
marginal_distribution(variables)
minimal_imap()
"""
def __init__(self, variables, cardinality, values):
"""
Initialize a Joint Probability Distribution class.
Defined above, we have the following mapping from variable
assignments to the index of the row vector in the value field:
+-----+-----+-----+-------------------------+
| x1 | x2 | x3 | P(x1, x2, x2) |
+-----+-----+-----+-------------------------+
| x1_0| x2_0| x3_0| P(x1_0, x2_0, x3_0) |
+-----+-----+-----+-------------------------+
| x1_1| x2_0| x3_0| P(x1_1, x2_0, x3_0) |
+-----+-----+-----+-------------------------+
| x1_0| x2_1| x3_0| P(x1_0, x2_1, x3_0) |
+-----+-----+-----+-------------------------+
| x1_1| x2_1| x3_0| P(x1_1, x2_1, x3_0) |
+-----+-----+-----+-------------------------+
| x1_0| x2_0| x3_1| P(x1_0, x2_0, x3_1) |
+-----+-----+-----+-------------------------+
| x1_1| x2_0| x3_1| P(x1_1, x2_0, x3_1) |
+-----+-----+-----+-------------------------+
| x1_0| x2_1| x3_1| P(x1_0, x2_1, x3_1) |
+-----+-----+-----+-------------------------+
| x1_1| x2_1| x3_1| P(x1_1, x2_1, x3_1) |
+-----+-----+-----+-------------------------+
Parameters
----------
variables: list
List of scope of Joint Probability Distribution.
cardinality: list, array_like
List of cardinality of each variable
value: list, array_like
List or array of values of factor.
A Joint Probability Distribution's values are stored in a row
vector in the value using an ordering such that the left-most
variables as defined in the variable field cycle through their
values the fastest.
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)/8)
>>> print(prob)
print(prob)
x1 x2 x3 P(x1, x2, x3)
x1_0 x2_0 x3_0 0.125
x1_0 x2_0 x3_1 0.125
x1_0 x2_1 x3_0 0.125
x1_0 x2_1 x3_1 0.125
x1_1 x2_0 x3_0 0.125
x1_1 x2_0 x3_1 0.125
x1_1 x2_1 x3_0 0.125
x1_1 x2_1 x3_1 0.125
"""
if np.isclose(np.sum(values), 1):
Factor.__init__(self, variables, cardinality, values)
else:
raise ValueError("The probability values doesn't sum to 1.")
def __repr__(self):
var_card = ", ".join(['{var}:{card}'.format(var=var, card=card)
for var, card in zip(self.variables, self.cardinality)])
return "<Joint Distribution representing P({var_card}) at {address}>".format(address=hex(id(self)),
var_card=var_card)
def __str__(self):
return self._str(phi_or_p='P')
def marginal_distribution(self, variables, inplace=True):
"""
Returns the marginal distribution over variables.
Parameters
----------
variables: string, list, tuple, set, dict
Variable or list of variables over which marginal distribution needs
to be calculated
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> values = np.random.rand(12)
>>> prob = JointProbabilityDistribution(['x1, x2, x3'], [2, 3, 2], values/np.sum(values))
>>> prob.marginal_distribution(['x1', 'x2'])
>>> print(prob)
x1 x2 P(x1, x2)
x1_0 x2_0 0.290187723512
x1_0 x2_1 0.203569992198
x1_0 x2_2 0.00567786144202
x1_1 x2_0 0.116553704043
x1_1 x2_1 0.108469538521
x1_1 x2_2 0.275541180284
"""
return self.marginalize(list(set(list(self.variables)) -
set(variables if isinstance(
variables, (list, set, dict, tuple)) else [variables])),
inplace=inplace)
def check_independence(self, event1, event2, event3=None):
"""
Check if the Joint Probability Distribution satisfies the given independence condition.
Parameters
----------
event1: list or string
random variable whose independence is to be checked.
event2: list or string
random variable from which event1 is independent.
event3: list or string
event1 is independent of event2 given event3.
For random variables say X, Y, Z to check if X is independent of Y given Z.
event1 should be either X or Y.
event2 should be either Y or X.
event3 should Z.
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)
>>> prob.check_independence('x1', 'x2')
True
>>> prob.check_independence(['x1'], ['x2'], 'x3')
True
"""
if event3:
self.conditional_distribution(event3)
for variable_pair in product(event1, event2):
if (self.marginal_distribution(variable_pair, inplace=False) !=
self.marginal_distribution(variable_pair[0], inplace=False) *
self.marginal_distribution(variable_pair[1], inplace=False)):
return False
return True
def get_independencies(self, condition=None):
"""
Returns the independent variables in the joint probability distribution.
Returns marginally independent variables if condition=None.
Returns conditionally independent variables if condition!=None
Parameter
---------
condition: array_like
Random Variable on which to condition the Joint Probability Distribution.
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(8)/8)
>>> prob.get_independencies()
"""
if condition:
self.conditional_distribution(condition)
independencies = Independencies()
from itertools import combinations
for variable_pair in combinations(list(self.variables), 2):
from copy import deepcopy
if JointProbabilityDistribution.marginal_distribution(deepcopy(self), variable_pair) == \
JointProbabilityDistribution.marginal_distribution(deepcopy(self), variable_pair[0]) * \
JointProbabilityDistribution.marginal_distribution(deepcopy(self), variable_pair[1]):
independencies.add_assertions(variable_pair)
return independencies
def conditional_distribution(self, values):
"""
Returns Conditional Probability Distribution after setting values to 1.
Parameters
----------
values: string or array_like
The values on which to condition the Joint Probability Distribution.
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)/8)
>>> prob.conditional_distribution(('x1', 1))
>>> print(prob)
x2 x3 P(x1, x2)
x2_0 x3_0 0.25
x2_0 x3_1 0.25
x2_1 x3_0 0.25
x2_1 x3_1 0.25
"""
self.reduce(values)
self.normalize()
def minimal_imap(self, order):
"""
Returns a Bayesian Model which is minimal IMap of the Joint Probability Distribution
considering the order of the variables.
Parameters
----------
order: array-like
The order of the random variables.
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)
>>> bayesian_model = prob.minimal_imap(order=['x2', 'x1', 'x3'])
>>> bayesian_model
<pgmpy.models.models.models at 0x7fd7440a9320>
"""
from pgmpy import models as bm
import itertools
def combinations(u):
for r in range(len(u) + 1):
for i in itertools.combinations(u, r):
yield i
G = bm.BayesianModel()
for variable_index in range(len(order)):
u = order[:variable_index]
for subset in combinations(u):
if self.check_independence(order[variable_index], set(u)-set(subset), subset):
G.add_edges_from([(variable, order[variable_index]) for variable in subset])
return G
def pmap(self):
pass
| [
"bb@b.om"
] | bb@b.om |
9b608e7d269389d21bba2c5a9d91fea46a0498d9 | fff7170249d4c2bd0eb8578c96bdc4fc90cf3a5d | /TableTopKnight/TableTopKnight/urls.py | 8277549be3aec0ede0f08b0ae7ea32636ce74d3e | [] | no_license | colin3131/Table-Top-Knight | e1a272d3385eb003cd089dbcec41b841086009f2 | c648ba844a72b6e3ea7b4b62115896ca149f9b72 | refs/heads/master | 2023-04-30T21:53:33.395123 | 2020-01-06T22:56:05 | 2020-01-06T22:56:05 | 218,636,770 | 0 | 0 | null | 2023-04-21T20:48:03 | 2019-10-30T22:18:58 | HTML | UTF-8 | Python | false | false | 1,051 | py | """TableTopKnight URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('events.urls')),
]
# Just in case we want to check to see if all of our thumbnails exist
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"cps38@pitt.edu"
] | cps38@pitt.edu |
e26b36dc12b5610f39bdaf50670cbb889b02a9b9 | 9741a626ea58ef334d4280af1a756be06f69c871 | /apps/departamentos/views.py | 2837d3f8e966127b6dfa9fb4e31fc11de7367c79 | [] | no_license | diogo20lemos/gestao_rh | a8a86c7afcad0649dbdbb64e4855e08907917068 | d78139812b13a679699160e0c2be382548864f0f | refs/heads/master | 2023-01-12T11:51:44.918602 | 2020-11-17T16:44:54 | 2020-11-17T16:44:54 | 287,319,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | from django.shortcuts import render
from django.urls import reverse_lazy
from .models import Departamento
from django.views.generic import (
ListView,
CreateView,
UpdateView,
DeleteView,
)
class DepartamentosList(ListView):
model = Departamento
def get_queryset(self):
empresa_logada = self.request.user.funcionario.empresa
return Departamento.objects.filter(empresa=empresa_logada)
class DepartamentosCreate(CreateView):
model = Departamento
fields = ['nome']
def form_valid(self, form):
departamento = form.save(commit=False)
departamento.empresa = self.request.user.funcionario.empresa
departamento.save()
return super(DepartamentosCreate, self).form_invalid(form)
class DepartamentoUpdate(UpdateView):
model = Departamento
fields = ['nome']
class DepartamentoDelete(DeleteView):
model = Departamento
fields = ['nome']
success_url = reverse_lazy('list_departamentos')
| [
"diogolemos@EDiagnose.local"
] | diogolemos@EDiagnose.local |
8574cc177650e40a4bc27530edb2d08b72bb71ce | 3e95aae140dbea822e3542399871a5b573114ab7 | /python 3.7.4.py(1).py | 7a7db44425a3db141e38f0bb8f92284cfabebc97 | [] | no_license | meghana-221810402017/12-07-19 | 5885b53503b47e5835cfd8bb188ab8f85fae5611 | 52f795c8cae11504e9e4fed0aeecc6e07a21e83c | refs/heads/master | 2020-06-19T04:59:18.161188 | 2019-07-13T03:28:20 | 2019-07-13T03:28:20 | 196,571,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from turtle import *
for i in range(20):
forward(100)
left(90)
forward(10)
left(90)
forward(100)
right(90)
forward(10)
right(90)
pencolor('red')
for i in range(90):
undo()
| [
"noreply@github.com"
] | noreply@github.com |
1e616727b698fb933c3679722bfecdc53bf353af | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/kid/way_team_business/family/face_issue_city/number/year/health_kerberos_back.py | 655af272c1deb6d1ca6522de2970d1ca8ae96cfa | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,934 | py | using System;
using System.Net.Http;
using System.Text;
using System.Threading.Tasks;
// Install Newtonsoft.Json with NuGet
using Newtonsoft.Json;
namespace translate_sample
{
class Program
{
private const string key_var = "TRANSLATOR_TEXT_SUBSCRIPTION_KEY";
private static readonly string subscriptionKey = "9264e06fc74e856e6ad7039efbb924c4";
private const string endpoint_var = "TRANSLATOR_TEXT_ENDPOINT";
private static readonly string endpoint = Environment.GetEnvironmentVariable(endpoint_var);
static Program()
{
if (null == subscriptionKey)
{
throw new Exception("Please set/export the environment variable: " + key_var);
}
if (null == endpoint)
{
throw new Exception("Please set/export the environment variable: " + endpoint_var);
}
}
// The code in the next section goes here.
// This sample requires C# 7.1 or later for async/await.
// Async call to the Translator Text API
static public async Task TranslateTextRequest(string subscriptionKey, string endpoint, string route, string inputText)
{
object[] body = new object[] { new { Text = inputText } };
var requestBody = JsonConvert.SerializeObject(body);
using (var client = new HttpClient())
using (var request = new HttpRequestMessage())
{
// Build the request.
// Set the method to Post.
request.Method = HttpMethod.Post;
// Construct the URI and add headers.
request.RequestUri = new Uri(endpoint + route);
request.Content = new StringContent(requestBody, Encoding.UTF8, "application/json");
request.Headers.Add("a19ae802003b91b483269a2d3ca373a1", subscriptionKey);
// Send the request and get response.
HttpResponseMessage response = await client.SendAsync(request).ConfigureAwait(false);
// Read response as a string.
string result = await response.Content.ReadAsStringAsync();
// Deserialize the response using the classes created earlier.
TranslationResult[] deserializedOutput = JsonConvert.DeserializeObject<TranslationResult[]>(result);
// Iterate over the deserialized results.
foreach (TranslationResult o in deserializedOutput)
{
// Print the detected input language and confidence score.
Console.WriteLine("Detected input language: {0}\nConfidence score: {1}\n", o.DetectedLanguage.Language, o.DetectedLanguage.Score);
// Iterate over the results and print each translation.
foreach (Translation t in o.Translations)
{
Console.WriteLine("Translated to {0}: {1}", t.To, t.Text);
}
}
}
}
static async Task Main(string[] args)
{
// This is our main function.
// Output languages are defined in the route.
// For a complete list of options, see API reference.
// https://docs.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate
string route = "/translate?api-version=3.0&to=de&to=it&to=ja&to=th";
// Prompts you for text to translate. If you'd prefer, you can
// provide a string as textToTranslate.
Console.Write("Type the phrase you'd like to translate? ");
string textToTranslate = Console.ReadLine();
await TranslateTextRequest(subscriptionKey, endpoint, route, textToTranslate);
Console.WriteLine("Press any key to continue.");
Console.ReadKey();
}
}
}
| [
"soric.matko@gmail.com"
] | soric.matko@gmail.com |
3f2d82a98c7780670df6738341d6c92a64e95c4f | 8b71aaab38dbe1adac0c3dfa97bd39997272e0d1 | /main.py | 35138ba0d0bd7dd7909fbd7bb9db47ccdd44538f | [
"MIT"
] | permissive | sreekesari-vangeepuram/visual-card-generator | 39486d0d0565d8400c3d1e4f2b6f77ea8a1d2add | f39b253c21d98119e44ab741d992bde7987354c3 | refs/heads/main | 2023-07-16T17:03:04.148380 | 2021-09-07T15:41:33 | 2021-09-07T15:41:33 | 339,816,805 | 1 | 1 | null | 2021-08-11T08:28:56 | 2021-02-17T18:22:34 | Python | UTF-8 | Python | false | false | 5,388 | py | """
Copyright © 2021
Vangeepuram Sreekesari
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from sys import argv
from PIL import Image, ImageDraw, ImageFilter, ImageFont
# If you are developing an API
# just change the parameters
# in your convenient way!
global vc_size, profile_pic_size, overlay_location, watermark_location, uname_fntsz, other_fntsz
vc_size, profile_pic_size = (<int>, <int>), (<int>, <int>)
overlay_location = (vc_size[0] // 2 - profile_pic_size[0] // 2,
vc_size[1] // 2 - profile_pic_size[1] // 2)
uname_fntsz, other_fntsz = <int>, <int>
profile_pic_path = argv[1]
color = argv[2]
# --------------------------------------------------
username = "<username>"
user_handle = f"@{'<userhandle>'}"
user_location = "<user-location>"
# --------------------------------------------------
def crop_center(pil_img, crop_width, crop_height):
img_width, img_height = pil_img.size
offset = 5
return pil_img.crop(((img_width - crop_width) // 2 + offset,
(img_height - crop_height) // 2 + offset,
(img_width + crop_width) // 2 + offset,
(img_height + crop_height) // 2 + offset))
crop_max_square = lambda pil_img: crop_center(pil_img, min(pil_img.size), min(pil_img.size))
def mask_circle_transparent(pil_img, blur_radius, offset=0):
"Returns a card after masking the profile pic"
offset += blur_radius * 2
mask = Image.new("L", pil_img.size, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((offset, offset, pil_img.size[0] - offset, pil_img.size[1] - offset), fill = 255)
mask = mask.filter(ImageFilter.GaussianBlur(blur_radius)) # Filtering the mask
result = pil_img.copy() # Buffer of same type to add alpha-gradient with mask
result.putalpha(mask)
return result
def render_text(image, text, text_location, font_size):
"Returns a card by rendering the given text"
card = ImageDraw.Draw(image)
font_path = "./etc/font.ttf"
if "|" not in text:
card.text(text_location, text, font=ImageFont.truetype(font_path, font_size))
else:
card.text(text_location, text.split("|")[0], font=ImageFont.truetype(font_path, font_size))
width, height = card.textsize(text.split("|")[0], font=ImageFont.truetype(font_path, font_size))
n_spaces = width // len(text.split("|")[0]) + 2 # since word-size is diff. based on font-style
card.text((text_location[0] + width + n_spaces, text_location[1] + height // 5),
text.split("|")[1], font=ImageFont.truetype(font_path, other_fntsz))
return image
def create_broder(image, y):
"Returns a card by rendering border line to text"
card = ImageDraw.Draw(image)
x1, x2 = 0, vc_size[0] # To vary the length of the border-line
y1 = y2 = y # To drag down the border-line
line_segment, line_color = [(x1, y1), (x2, y2)], (255,255,255,128)
card.line(line_segment, fill = line_color, width=1)
return image
def stamp_watermark(image, filepath_of_watermark):
"Returns the card by stamping the watermark at bottom right corner"
offset = 10 # Distance between image border and watermark
watermark = Image.open(filepath_of_watermark).convert("RGBA")
wm_size = (watermark.size[0] // (offset + 5), watermark.size[1] // (offset + 5))
watermark = watermark.resize(wm_size)
watermark_location = (vc_size[0] - wm_size[0] - offset,
vc_size[1] - wm_size[1] - offset) # Bottom right corner
image.paste(watermark, watermark_location, mask=watermark)
watermark.close()
return image
visual_card = Image.new("RGBA", vc_size, color)
visual_card = stamp_watermark(visual_card, "<watermark-filepath>")
profile_pic = Image.open(profile_pic_path)
profile_pic = crop_max_square(profile_pic).resize((profile_pic_size), Image.LANCZOS)
# In fn-call of `mask_circle_transparent`, increase 2nd arg to create blur effect at border
profile_pic = mask_circle_transparent(profile_pic, 0)
visual_card.paste(profile_pic, overlay_location, mask=profile_pic) # Overlay profile-pic on visual-card
visual_card = render_text(visual_card, f'{username}|{user_handle}', (uname_fntsz - 10, 10), uname_fntsz)
visual_card = render_text(visual_card, user_location, (uname_fntsz - 10, 35), other_fntsz)
visual_card = create_broder(visual_card, 60)
visual_card.show()
#visual_card.save("./visual_card.png")
| [
"kesari.vangeepuram@gmail.com"
] | kesari.vangeepuram@gmail.com |
73cf29f013b0e80778d15c35bf65169249f13894 | ed84375e88a09106f045e621820e0f13fca0d733 | /tests/mock_tests/_test_cfgparser.py | 4e7dbd0c1b072107dbc8e9cb77aa19665c73e635 | [
"Apache-2.0"
] | permissive | probablytom/PyMuTester | 554319b6e8b5343ea1021818552ac73329cdd509 | 855ce779f6db3748d12c7573ffcca35c3a488ce6 | refs/heads/master | 2021-01-18T20:25:26.948618 | 2014-10-07T14:54:32 | 2014-10-07T14:54:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,741 | py | #
# [miketeo/20111120]
# This is the ConfigParser module from Python 2.7
# We have adapted it slightly for our unit test
#
from mock_modules import ConfigParser
import StringIO
import unittest
import UserDict
from test import test_support
class SortedDict(UserDict.UserDict):
def items(self):
result = self.data.items()
result.sort()
return result
def keys(self):
result = self.data.keys()
result.sort()
return result
def values(self):
# XXX never used?
result = self.items()
return [i[1] for i in result]
def iteritems(self): return iter(self.items())
def iterkeys(self): return iter(self.keys())
__iter__ = iterkeys
def itervalues(self): return iter(self.values())
class TestCaseBase(unittest.TestCase):
allow_no_value = False
def newconfig(self, defaults=None):
if defaults is None:
self.cf = self.config_class(allow_no_value=self.allow_no_value)
else:
self.cf = self.config_class(defaults,
allow_no_value=self.allow_no_value)
return self.cf
def fromstring(self, string, defaults=None):
cf = self.newconfig(defaults)
sio = StringIO.StringIO(string)
cf.readfp(sio)
return cf
def test_basic(self):
config_string = (
"[Foo Bar]\n"
"foo=bar\n"
"[Spacey Bar]\n"
"foo = bar\n"
"[Commented Bar]\n"
"foo: bar ; comment\n"
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[Section\\with$weird%characters[\t]\n"
"[Internationalized Stuff]\n"
"foo[bg]: Bulgarian\n"
"foo=Default\n"
"foo[en]=English\n"
"foo[de]=Deutsch\n"
"[Spaces]\n"
"key with spaces : value\n"
"another with spaces = splat!\n"
)
if self.allow_no_value:
config_string += (
"[NoValue]\n"
"option-without-value\n"
)
cf = self.fromstring(config_string)
L = cf.sections()
L.sort()
E = [r'Commented Bar',
r'Foo Bar',
r'Internationalized Stuff',
r'Long Line',
r'Section\with$weird%characters[' '\t',
r'Spaces',
r'Spacey Bar',
]
if self.allow_no_value:
E.append(r'NoValue')
E.sort()
eq = self.assertEqual
eq(L, E)
# The use of spaces in the section names serves as a
# regression test for SourceForge bug #583248:
# http://www.python.org/sf/583248
eq(cf.get('Foo Bar', 'foo'), 'bar')
eq(cf.get('Spacey Bar', 'foo'), 'bar')
eq(cf.get('Commented Bar', 'foo'), 'bar')
eq(cf.get('Spaces', 'key with spaces'), 'value')
eq(cf.get('Spaces', 'another with spaces'), 'splat!')
if self.allow_no_value:
eq(cf.get('NoValue', 'option-without-value'), None)
self.assertNotIn('__name__', cf.options("Foo Bar"),
'__name__ "option" should not be exposed by the API!')
# Make sure the right things happen for remove_option();
# added to include check for SourceForge bug #123324:
self.assertTrue(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report existence of option")
self.assertFalse(cf.has_option('Foo Bar', 'foo'),
"remove_option() failed to remove option")
self.assertFalse(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report non-existence of option"
" that was removed")
self.assertRaises(ConfigParser.NoSectionError,
cf.remove_option, 'No Such Section', 'foo')
eq(cf.get('Long Line', 'foo'),
'this line is much, much longer than my editor\nlikes it.')
def test_case_sensitivity(self):
cf = self.newconfig()
cf.add_section("A")
cf.add_section("a")
L = cf.sections()
L.sort()
eq = self.assertEqual
eq(L, ["A", "a"])
cf.set("a", "B", "value")
eq(cf.options("a"), ["b"])
eq(cf.get("a", "b"), "value",
"could not locate option, expecting case-insensitive option names")
self.assertTrue(cf.has_option("a", "b"))
cf.set("A", "A-B", "A-B value")
for opt in ("a-b", "A-b", "a-B", "A-B"):
self.assertTrue(
cf.has_option("A", opt),
"has_option() returned false for option which should exist")
eq(cf.options("A"), ["a-b"])
eq(cf.options("a"), ["b"])
cf.remove_option("a", "B")
eq(cf.options("a"), [])
# SF bug #432369:
cf = self.fromstring(
"[MySection]\nOption: first line\n\tsecond line\n")
eq(cf.options("MySection"), ["option"])
eq(cf.get("MySection", "Option"), "first line\nsecond line")
# SF bug #561822:
cf = self.fromstring("[section]\nnekey=nevalue\n",
defaults={"key":"value"})
self.assertTrue(cf.has_option("section", "Key"))
def test_default_case_sensitivity(self):
cf = self.newconfig({"foo": "Bar"})
self.assertEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive option names")
cf = self.newconfig({"Foo": "Bar"})
self.assertEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive defaults")
def test_parse_errors(self):
self.newconfig()
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces: splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces= splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n:value-without-option-name\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n=value-without-option-name\n")
self.parse_error(ConfigParser.MissingSectionHeaderError,
"No Section!\n")
def parse_error(self, exc, src):
sio = StringIO.StringIO(src)
self.assertRaises(exc, self.cf.readfp, sio)
def test_query_errors(self):
cf = self.newconfig()
self.assertEqual(cf.sections(), [],
"new ConfigParser should have no defined sections")
self.assertFalse(cf.has_section("Foo"),
"new ConfigParser should have no acknowledged sections")
self.assertRaises(ConfigParser.NoSectionError,
cf.options, "Foo")
self.assertRaises(ConfigParser.NoSectionError,
cf.set, "foo", "bar", "value")
self.get_error(ConfigParser.NoSectionError, "foo", "bar")
cf.add_section("foo")
self.get_error(ConfigParser.NoOptionError, "foo", "bar")
def get_error(self, exc, section, option):
try:
self.cf.get(section, option)
except exc, e:
return e
else:
self.fail("expected exception type %s.%s"
% (exc.__module__, exc.__name__))
def test_boolean(self):
cf = self.fromstring(
"[BOOLTEST]\n"
"T1=1\n"
"T2=TRUE\n"
"T3=True\n"
"T4=oN\n"
"T5=yes\n"
"F1=0\n"
"F2=FALSE\n"
"F3=False\n"
"F4=oFF\n"
"F5=nO\n"
"E1=2\n"
"E2=foo\n"
"E3=-1\n"
"E4=0.1\n"
"E5=FALSE AND MORE"
)
for x in range(1, 5):
self.assertTrue(cf.getboolean('BOOLTEST', 't%d' % x))
self.assertFalse(cf.getboolean('BOOLTEST', 'f%d' % x))
self.assertRaises(ValueError,
cf.getboolean, 'BOOLTEST', 'e%d' % x)
def test_weird_errors(self):
cf = self.newconfig()
cf.add_section("Foo")
self.assertRaises(ConfigParser.DuplicateSectionError,
cf.add_section, "Foo")
def test_write(self):
config_string = (
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[DEFAULT]\n"
"foo: another very\n"
" long line\n"
)
if self.allow_no_value:
config_string += (
"[Valueless]\n"
"option-without-value\n"
)
cf = self.fromstring(config_string)
output = StringIO.StringIO()
cf.write(output)
expect_string = (
"[DEFAULT]\n"
"foo = another very\n"
"\tlong line\n"
"\n"
"[Long Line]\n"
"foo = this line is much, much longer than my editor\n"
"\tlikes it.\n"
"\n"
)
if self.allow_no_value:
expect_string += (
"[Valueless]\n"
"option-without-value\n"
"\n"
)
self.assertEqual(output.getvalue(), expect_string)
def test_set_string_types(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
# Check that we don't get an exception when setting values in
# an existing section using strings:
class mystr(str):
pass
cf.set("sect", "option1", "splat")
cf.set("sect", "option1", mystr("splat"))
cf.set("sect", "option2", "splat")
cf.set("sect", "option2", mystr("splat"))
try:
unicode
except NameError:
pass
else:
cf.set("sect", "option1", unicode("splat"))
cf.set("sect", "option2", unicode("splat"))
def test_read_returns_file_list(self):
file1 = test_support.findfile("cfgparser.1")
# check when we pass a mix of readable and non-readable files:
cf = self.newconfig()
parsed_files = cf.read([file1, "nonexistent-file"])
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only a filename:
cf = self.newconfig()
parsed_files = cf.read(file1)
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only missing files:
cf = self.newconfig()
parsed_files = cf.read(["nonexistent-file"])
self.assertEqual(parsed_files, [])
# check when we pass no files:
cf = self.newconfig()
parsed_files = cf.read([])
self.assertEqual(parsed_files, [])
# shared by subclasses
def get_interpolation_config(self):
return self.fromstring(
"[Foo]\n"
"bar=something %(with1)s interpolation (1 step)\n"
"bar9=something %(with9)s lots of interpolation (9 steps)\n"
"bar10=something %(with10)s lots of interpolation (10 steps)\n"
"bar11=something %(with11)s lots of interpolation (11 steps)\n"
"with11=%(with10)s\n"
"with10=%(with9)s\n"
"with9=%(with8)s\n"
"with8=%(With7)s\n"
"with7=%(WITH6)s\n"
"with6=%(with5)s\n"
"With5=%(with4)s\n"
"WITH4=%(with3)s\n"
"with3=%(with2)s\n"
"with2=%(with1)s\n"
"with1=with\n"
"\n"
"[Mutual Recursion]\n"
"foo=%(bar)s\n"
"bar=%(foo)s\n"
"\n"
"[Interpolation Error]\n"
"name=%(reference)s\n",
# no definition for 'reference'
defaults={"getname": "%(__name__)s"})
def check_items_config(self, expected):
cf = self.fromstring(
"[section]\n"
"name = value\n"
"key: |%(name)s| \n"
"getdefault: |%(default)s|\n"
"getname: |%(__name__)s|",
defaults={"default": "<default>"})
L = list(cf.items("section"))
L.sort()
self.assertEqual(L, expected)
class ConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.ConfigParser
def test_interpolation(self):
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "Foo")
eq(cf.get("Foo", "bar"), "something with interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something with lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something with lots of interpolation (10 steps)")
self.get_error(ConfigParser.InterpolationDepthError, "Foo", "bar11")
def test_interpolation_missing_value(self):
self.get_interpolation_config()
e = self.get_error(ConfigParser.InterpolationError,
"Interpolation Error", "name")
self.assertEqual(e.reference, "reference")
self.assertEqual(e.section, "Interpolation Error")
self.assertEqual(e.option, "name")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|<default>|'),
('getname', '|section|'),
('key', '|value|'),
('name', 'value')])
def test_set_nonstring_types(self):
cf = self.newconfig()
cf.add_section('non-string')
cf.set('non-string', 'int', 1)
cf.set('non-string', 'list', [0, 1, 1, 2, 3, 5, 8, 13, '%('])
cf.set('non-string', 'dict', {'pi': 3.14159, '%(': 1,
'%(list)': '%(list)'})
cf.set('non-string', 'string_with_interpolation', '%(list)s')
self.assertEqual(cf.get('non-string', 'int', raw=True), 1)
self.assertRaises(TypeError, cf.get, 'non-string', 'int')
self.assertEqual(cf.get('non-string', 'list', raw=True),
[0, 1, 1, 2, 3, 5, 8, 13, '%('])
self.assertRaises(TypeError, cf.get, 'non-string', 'list')
self.assertEqual(cf.get('non-string', 'dict', raw=True),
{'pi': 3.14159, '%(': 1, '%(list)': '%(list)'})
self.assertRaises(TypeError, cf.get, 'non-string', 'dict')
self.assertEqual(cf.get('non-string', 'string_with_interpolation',
raw=True), '%(list)s')
self.assertRaises(ValueError, cf.get, 'non-string',
'string_with_interpolation', raw=False)
class RawConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.RawConfigParser
def test_interpolation(self):
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "%(__name__)s")
eq(cf.get("Foo", "bar"),
"something %(with1)s interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something %(with9)s lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something %(with10)s lots of interpolation (10 steps)")
eq(cf.get("Foo", "bar11"),
"something %(with11)s lots of interpolation (11 steps)")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|%(default)s|'),
('getname', '|%(__name__)s|'),
('key', '|%(name)s|'),
('name', 'value')])
def test_set_nonstring_types(self):
cf = self.newconfig()
cf.add_section('non-string')
cf.set('non-string', 'int', 1)
cf.set('non-string', 'list', [0, 1, 1, 2, 3, 5, 8, 13])
cf.set('non-string', 'dict', {'pi': 3.14159})
self.assertEqual(cf.get('non-string', 'int'), 1)
self.assertEqual(cf.get('non-string', 'list'),
[0, 1, 1, 2, 3, 5, 8, 13])
self.assertEqual(cf.get('non-string', 'dict'), {'pi': 3.14159})
class SafeConfigParserTestCase(ConfigParserTestCase):
config_class = ConfigParser.SafeConfigParser
def test_safe_interpolation(self):
# See http://www.python.org/sf/511737
cf = self.fromstring("[section]\n"
"option1=xxx\n"
"option2=%(option1)s/xxx\n"
"ok=%(option1)s/%%s\n"
"not_ok=%(option2)s/%%s")
self.assertEqual(cf.get("section", "ok"), "xxx/%s")
self.assertEqual(cf.get("section", "not_ok"), "xxx/xxx/%s")
def test_set_malformatted_interpolation(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
self.assertEqual(cf.get('sect', "option1"), "foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "%foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "foo%")
self.assertRaises(ValueError, cf.set, "sect", "option1", "f%oo")
self.assertEqual(cf.get('sect', "option1"), "foo")
# bug #5741: double percents are *not* malformed
cf.set("sect", "option2", "foo%%bar")
self.assertEqual(cf.get("sect", "option2"), "foo%bar")
def test_set_nonstring_types(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
# Check that we get a TypeError when setting non-string values
# in an existing section:
self.assertRaises(TypeError, cf.set, "sect", "option1", 1)
self.assertRaises(TypeError, cf.set, "sect", "option1", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option1", object())
self.assertRaises(TypeError, cf.set, "sect", "option2", 1)
self.assertRaises(TypeError, cf.set, "sect", "option2", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option2", object())
def test_add_section_default_1(self):
cf = self.newconfig()
self.assertRaises(ValueError, cf.add_section, "default")
def test_add_section_default_2(self):
cf = self.newconfig()
self.assertRaises(ValueError, cf.add_section, "DEFAULT")
class SafeConfigParserTestCaseNoValue(SafeConfigParserTestCase):
allow_no_value = True
class SortedTestCase(RawConfigParserTestCase):
def newconfig(self, defaults=None):
self.cf = self.config_class(defaults=defaults, dict_type=SortedDict)
return self.cf
def test_sorted(self):
self.fromstring("[b]\n"
"o4=1\n"
"o3=2\n"
"o2=3\n"
"o1=4\n"
"[a]\n"
"k=v\n")
output = StringIO.StringIO()
self.cf.write(output)
self.assertEquals(output.getvalue(),
"[a]\n"
"k = v\n\n"
"[b]\n"
"o1 = 4\n"
"o2 = 3\n"
"o3 = 2\n"
"o4 = 1\n\n")
def test_main():
test_support.run_unittest(
ConfigParserTestCase,
RawConfigParserTestCase,
SafeConfigParserTestCase,
SortedTestCase,
SafeConfigParserTestCaseNoValue,
)
if __name__ == "__main__":
test_main()
| [
"miketeo@miketeo.net"
] | miketeo@miketeo.net |
99097c7a60ccd21798abfd905881f2092e9aec1e | 7ac7f7e401a545670b7dcd6d1229a600089f6cf7 | /BasicoBlog/urls.py | 813faa1f004d645d8ddad94e5f196f9b19ecdc22 | [] | no_license | nicolasgeronimorodi/BasicoDjangoBlog | 29d60438a18bcc0062c2b0e8d457806ba9f73507 | 5cfedbf4aef09a2ea0653add1fbdaae4e03c1959 | refs/heads/main | 2023-08-23T19:35:41.068920 | 2021-09-27T13:21:26 | 2021-09-27T13:21:26 | 410,059,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | """BasicoBlog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("BlogApp.urls"))
]
| [
"nicolasgeronimorodi@gmail.com"
] | nicolasgeronimorodi@gmail.com |
24628a937c4bb015580dcf7db437fbac6c5eb40d | 13696a9691b173d75b11b4aee22b79d4ea6b7c0b | /test/test_order_line_item.py | 0f2cd0135271ef7e6096299470663b97c0befed0 | [
"Apache-2.0"
] | permissive | square/connect-python-sdk | 410613bc4b04f0f70176275591a16c9e49e25ede | e00e2889b2dd2c55048219cbe64db79962a68633 | refs/heads/master | 2023-06-15T09:24:17.190416 | 2019-08-15T17:44:41 | 2019-08-15T17:44:41 | 64,772,029 | 53 | 45 | Apache-2.0 | 2020-12-20T18:41:31 | 2016-08-02T16:07:17 | Python | UTF-8 | Python | false | false | 1,186 | py | # coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import squareconnect
from squareconnect.rest import ApiException
from squareconnect.models.order_line_item import OrderLineItem
class TestOrderLineItem(unittest.TestCase):
""" OrderLineItem unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testOrderLineItem(self):
"""
Test OrderLineItem
"""
model = squareconnect.models.order_line_item.OrderLineItem()
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
eaf3893eecde7d8134acabf5412ec6cabea694dc | 36f2147d46cf0bb8f85acc81bfe2b4486405e33a | /ZXYJ_GG-master/PyCharm/Wdpc/Dog2.py | 9b6985b14f136ee13f528f45e34265eea60357ef | [] | no_license | LDPGG/dm | 4aab9b54a6f4e6dae57baede40c9b5f094c3607e | 7b0e267418fd2345923fb1b4c48d53347eec13dd | refs/heads/master | 2021-03-06T20:09:20.126015 | 2020-03-10T06:06:20 | 2020-03-10T06:06:20 | 157,844,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | # -*- coding: utf-8 -*-
# @Time : 2018/6/14 0014 下午 6:30
# @Author : 刘登攀阿!!
# @FileName: Dog2.py
# @Software: PyCharm
from selenium import webdriver
# 这是我自己封装的一堆方法
from A_method.Slide import *
import unittest
import time
import HTMLTestRunner
class Store(unittest.TestCase):
"测试"
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.maximize_window()
self.driver.get('https://www.baidu.com/')
def su(self):
self.driver.find_element_by_id('kw').send_keys('python')
time.sleep(1)
self.driver.find_element_by_id('su').click()
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
testunit = unittest.TestSuite()
testunit.addTest(Store('su'))
# route 是我自己封装的文件路径,我把所有的测试报告都放在一个文件夹的,这里你自己可以改一下
# fliename = route('tets.html')
fp = open('tets.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fp,
title='测试用例',
description='用例执行情况')
runner.run(testunit)
fp.close()
| [
"1583141776@qq.com"
] | 1583141776@qq.com |
2c299242c9b1cd7d9f2e8d23fd9594deda9e697f | f90b2068ca01ae2f9b1bc21b346ad0b0b406912d | /exchange_scrapper/migrations/0004_exchange_rss_urls.py | b83c6c78a9890e666c85e200d83f1fd22a5eaf36 | [] | no_license | spam128/ecb-exchange-rate | 2509e3d269f646b9116ddc9447050455360777d0 | f7d6fc38126f1e1a4cd470e21eec4dc4dd8b8e0c | refs/heads/master | 2020-03-24T03:16:54.713073 | 2018-07-28T09:35:21 | 2018-07-28T09:35:21 | 142,412,387 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | # Generated by Django 2.0.7 on 2018-07-26 11:11
from django.db import migrations, models
import requests
from bs4 import BeautifulSoup
url_with_rss = 'https://www.ecb.europa.eu/home/html/rss.en.html'
exchanges_list_class = 'zebraList'
def add_exchange_rss(apps, schema_editor):
MyModel = apps.get_model('exchange_scrapper', 'ExchangeRSS')
try:
for exchange_data in get_urls():
MyModel.objects.create(name=exchange_data[1][-4:-1], slug=exchange_data[0])
except:
raise ('something went wrong')
def get_urls():
urls = []
response = requests.get(url_with_rss)
html = BeautifulSoup(response.text, 'html.parser')
for ul in html.find_all('ul', class_=exchanges_list_class):
for li in ul.find_all('li'):
a = li.find('a')
url = a['href'], a.get_text()
if url[0].startswith('/rss/fxref'):
urls.append(url)
return urls
class Migration(migrations.Migration):
dependencies = [
('exchange_scrapper', '0003_auto_20180726_1111'),
]
operations = [
migrations.RunPython(add_exchange_rss),
]
| [
"corpuscallosum@localhost.localdomain"
] | corpuscallosum@localhost.localdomain |
8c64015c2a91b9f94438206ce782b3045cbe1caf | 43b62e422a31f803d40676323465a5956467ede0 | /setup.py | d463b69792686d2b4554444297c620bfc7b2bade | [
"BSD-3-Clause"
] | permissive | declaresub/pygments-xojo | e88dfe4946869fa80721df02f1428d51c2c0697d | a4109d1effda8105e8dcc52f547c44329afde134 | refs/heads/master | 2016-09-05T18:52:46.629046 | 2016-02-16T02:23:56 | 2016-02-16T02:23:56 | 39,641,892 | 5 | 3 | null | 2016-02-16T02:23:57 | 2015-07-24T15:28:39 | Python | UTF-8 | Python | false | false | 1,670 | py | # -*- coding: utf-8 -*-
"""Pygments-Xojo
^^^^^^^^^^^^^
Pygments-Xojo adds support for the Xojo language to the Pygments syntax highlighting
package.
:copyright: Copyright 2015 Charles Yeomans.
:license: BSD, see LICENSE for details.
"""
from setuptools import setup
from setuptools.command.test import test as TestCommand
import io
import string
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
def package_version():
with io.open('pygments_xojo/__init__.py', 'r', encoding='utf-8') as f:
for sourceline in f:
if sourceline.strip().startswith('__version__'):
return sourceline.split('=', 1)[1].strip(string.whitespace + '"\'')
else:
raise Exception('Unable to read package version.')
setup(name='pygments-xojo',
version=package_version(),
author='Charles Yeomans',
author_email='charles@declaresub.com',
license='BSD License',
url='https://github.com/declaresub/pygments-xojo',
description='Pygments highlighting for the Xojo language',
long_description = __doc__,
keywords = 'syntax highlighting xojo',
platforms = 'any',
packages=['pygments_xojo'],
install_requires=['pygments'],
entry_points = {'pygments.lexers': ['xojo = pygments_xojo.lexer:XojoLexer'], 'pygments.styles': ['xojo = pygments_xojo.styles:XojoStyle']},
cmdclass = {'test': Tox}
)
| [
"charles@declareSub.com"
] | charles@declareSub.com |
eaa3e0daae4766d32f9000a254fa9857a49ac1d1 | 1a9d8e128b6d44abe15c2c82dd2e795909fa8a0c | /sentimental.py | 01350ab95536a121ce559147619e2189cb0cfa10 | [] | no_license | cozy-an/FinalProject | 68c4b9ff025bf421d4c55209916cad58eecdc02c | 0f0c4cc8e634f11e140aaf2938f5d646152f7e37 | refs/heads/master | 2020-03-17T05:56:42.875655 | 2018-05-23T03:39:58 | 2018-05-23T03:39:58 | 133,334,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,818 | py | import cv2
import argparse
import io
import time
import json
from google.cloud import vision
def video2frame(video,save_path):
vidcap = cv2.VideoCapture(video)
count = 0;
while True:
success,image = vidcap.read()
if not success:
break
print('Read a new Frame : ',success)
fname = "{}.jpg".format("{0:05d}".format(count))
cv2.imwrite(save_path+fname,image)
detect_faces(save_path+fname)
time.sleep(1)
count +=1
print("{} images are extracted in {}.".format(count,save_path))
# [START def_detect_faces]
def detect_faces(path):
"""Detects faces in an image."""
client = vision.ImageAnnotatorClient()
# [START migration_face_detection]
# [START migration_image_file]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
# [END migration_image_file]
response = client.face_detection(image=image)
faces = response.face_annotations
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
print('Faces:')
for face in faces:
print('anger: {}'.format(likelihood_name[face.anger_likelihood]))
print('joy: {}'.format(likelihood_name[face.joy_likelihood]))
print('surprise: {}'.format(likelihood_name[face.surprise_likelihood]))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in face.bounding_poly.vertices])
print('face bounds: {}'.format(','.join(vertices)))
# [END migration_face_detection]
# [END def_detect_faces]
video2frame("C:/Users/Student/Desktop/iuiuiu.mp4","c:/users/Student/desktop/capture/") | [
"seafood123@naver.com"
] | seafood123@naver.com |
f69577fd592c5d9f9b74f57aac0cca16c85f5801 | 8841ea034263e5a0ad330ef85443c171a33d3c1d | /core/views.py | aa668dafd9de02d401f200451dfdc2ad7e678d36 | [] | no_license | peteryao/foodster | c63fe7461f48d6aa337fd630dfbd8947df19bd1c | cb18ae2754075faa671febae24081ee6635fcd94 | refs/heads/master | 2020-07-07T22:20:07.154370 | 2014-10-10T21:08:53 | 2014-10-10T21:08:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.models import User, Permission
from django.contrib.auth.decorators import login_required
def index(request):
context = {}
return render(request, 'core/index.html', context) | [
"peteryao916@gmail.com"
] | peteryao916@gmail.com |
e3c5584e012ccde4876ed48134754e141f6a08da | de7f4188de1e9c5a90ce70c3c21ad9a3fa14ffca | /star2.py | 4b1137e138bcc6dfd22a7b12644d75a611852641 | [] | no_license | aryajar/practice-in-7-23 | 7118fd31e87c4e58dcd1ffb6711c8c3e1513658c | 19d917e9e9a9e0e62d1508049068ea6d4e57d3a8 | refs/heads/master | 2022-11-22T12:43:25.789849 | 2020-07-24T08:33:02 | 2020-07-24T08:33:02 | 281,873,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | n = int(input("Enter a number: "))
i = 1
while i <= n:
print(" * " * i)
i += 1
| [
"wangshengnan@stockhn.com"
] | wangshengnan@stockhn.com |
e43f1526ecb3cb7aaef25924f0510556b16f8538 | 0c84df2bab1acad781ed19f9f86433aa187c52b8 | /venv/bin/pip2 | 2f447db901ebc4773b1eb9e17b9eb91314bd7fa2 | [] | no_license | drunkrabbit1/botfid1 | ccc02e42ea72d9a8f5bc3bb30419caac6c9f17f4 | 9f67d7ca3d65785a06e366ed2b349bb72d5b822e | refs/heads/master | 2022-04-08T20:53:46.177140 | 2020-03-06T14:49:39 | 2020-03-06T14:49:39 | 245,441,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | #!/home/fallen/PycharmProjects/bot1/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"acer5742zg@mail.ru"
] | acer5742zg@mail.ru | |
554a4a4b666e10e9da61bdc24bf006d10058c256 | 4702fc119749eb4d1ff44f13d65ea090a2ac0a38 | /notes/venv/Scripts/pip-script.py | 6a12ac11b465ab9c257641dfa1ee608a48a030f2 | [] | no_license | KennedyDotCom/CSE | b2a2d6cc0d881e281a480de556e6ac8cf9429e5f | 693a93792c4cfb2e0d107ef254516cf7bcce75f1 | refs/heads/master | 2020-04-02T10:37:35.239513 | 2019-05-22T16:45:13 | 2019-05-22T16:45:13 | 154,347,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | #!C:\Users\p9rn\Documents\Github\CSE\notes\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"44372252+KennedyDotCom@users.noreply.github.com"
] | 44372252+KennedyDotCom@users.noreply.github.com |
9517efb47d7084f3fc2cc2ee9b05e5d3e0b05ea3 | b9ca45806679dd8bae4111923ba3de90dffb792e | /policy/nn_policy.py | d2b798295284d1eca7589f4fbee5feb0cdeab20c | [] | no_license | souradip-chakraborty/RoMBRL | 6d24bda52e15101c7625bd16c826292acbe6cf97 | a3772cee003af18a129c5b8799f978f2fad96294 | refs/heads/master | 2023-03-15T16:24:10.996972 | 2020-08-02T11:14:55 | 2020-08-02T11:14:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,245 | py | # -*- coding: utf-8 -*-
"""
nn_policy.py
Created on : February 28, 2019
Author : anonymous
Name : Anonymous
"""
import tensorflow as tf
import numpy as np
import sys
import os
import logging
import rllab.misc.logger as rllab_logger
from sandbox.rocky.tf.envs.base import TfEnv
# sys.path.append(os.path.abspath(os.path.join("lib", "me_trpo")))
from environments.bnn_env import BayesNeuralNetEnv
from rllab_algos.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from lib.utils.env_helpers import evaluate_fixed_init_trajectories, evaluate_fixed_init_trajectories_2
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
"""
Extend from project ME-TRPO
"""
class NNPolicy(object):
def __init__(self, session, env, dyn_model, n_timestep, n_states, n_actions, log_dir,
policy_params=None, policy_opt_params=None):
self.policy_params = policy_params
self.policy_opt_params = policy_opt_params
self.env = env
self.tf_sess = session
self.n_states = n_states
self.n_actions = n_actions
self.n_timestep = n_timestep
self.scope_name = "training_policy"
self.policy_saver = None
self.log_dir = log_dir
self.bnn_model = dyn_model
# Parameters assign
self.n_envs = policy_opt_params["batch_size"]
self.min_iters = policy_opt_params["num_iters_threshold"]
self.reset_non_increasing = policy_opt_params["reset_non_increasing"]
# Initial value
self.min_validation_cost = np.inf
self.non_increase_counter = 0
self.training_policy, self.policy_model = self._build_policy_from_rllab(env=env, n_actions=self.n_actions)
self.policy_in, self.policy_out, self.stochastic = self._initialize_policy(self.policy_model, self.n_states)
self.algo_policy, self.cost_np_vec = self._init_bnn_trpo(dyn_model, self.training_policy, self.n_timestep)
self.reset_op = tf.assign(self.training_policy._l_std_param.param, np.log(1.0) * np.ones(self.n_actions))
# Create validation data
self.policy_validation_init, self.policy_validation_reset_init = \
self._init_validation_data(self.n_envs, policy_opt_params["validation_is_correct"])
def _init_validation_data(self, policy_opt_batch_size=500, is_correct=False):
policy_validation_init = []
policy_validation_reset_init = []
if is_correct:
policy_validation_init = np.array([self.env.reset() for i in range(policy_opt_batch_size)])
policy_validation_reset_init = np.copy(policy_validation_init)
else:
for i in range(policy_opt_batch_size):
init = self.env.reset()
if hasattr(self.env._wrapped_env, '_wrapped_env'):
inner_env = self.env._wrapped_env._wrapped_env
else:
inner_env = self.env._wrapped_env.env.unwrapped
if hasattr(inner_env, "model"):
reset_init = np.concatenate(
[inner_env.model.data.qpos[:, 0],
inner_env.model.data.qvel[:, 0]])
else:
reset_init = inner_env._state
if hasattr(self.env._wrapped_env, '_wrapped_env'):
assert np.allclose(init, inner_env.reset(reset_init))
policy_validation_init.append(init)
policy_validation_reset_init.append(reset_init)
policy_validation_init = np.array(policy_validation_init)
policy_validation_reset_init = np.array(policy_validation_reset_init)
return policy_validation_init, policy_validation_reset_init
def _build_policy_from_rllab(self, env, n_actions):
""" Return both rllab policy and policy model function. """
sess = self.tf_sess
scope_name = self.scope_name
# Initialize training_policy to copy from policy
training_policy = GaussianMLPPolicy(
name=scope_name,
env_spec=env.spec,
hidden_sizes=self.policy_params["hidden_layers"],
init_std=self.policy_opt_params["trpo"]["init_std"],
output_nonlinearity=eval(self.policy_params["output_nonlinearity"])
)
training_policy_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='training_policy')
sess.run([tf.variables_initializer(training_policy_vars)])
def policy_model(x, stochastic=1.0, collect_summary=False):
dist_info_sym = training_policy.dist_info_sym(x, dict())
mean_var = dist_info_sym["mean"]
log_std_var = dist_info_sym["log_std"]
mean_var += stochastic * tf.random_normal(shape=(tf.shape(x)[0], n_actions)) * tf.exp(log_std_var)
return mean_var
return training_policy, policy_model
def session_policy_out(self, observation, stochastic=0.0):
action = self.tf_sess.run(self.policy_out, feed_dict={self.policy_in: observation, self.stochastic: stochastic})
return action
def _initialize_policy(self, policy_model, n_states):
# Initial tf variables
policy_scope = self.scope_name
policy_in = tf.placeholder(tf.float32, shape=(None, n_states), name='policy_in')
stochastic = tf.placeholder(tf.float32, shape=(None), name='stochastic')
policy_out = policy_model(policy_in, stochastic=stochastic)
tf.add_to_collection("policy_in", policy_in)
tf.add_to_collection("stochastic", stochastic)
tf.add_to_collection("policy_out", policy_out)
""" Prepare variables and data for learning """
# Initialize all variables
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=policy_scope)
init_op = tf.initialize_variables(var_list)
self.tf_sess.run(init_op)
# Policy saver
self.policy_saver = tf.train.Saver(var_list)
return policy_in, policy_out, stochastic
def _init_bnn_trpo(self, bnn_model, training_policy, time_step):
if hasattr(self.env._wrapped_env, '_wrapped_env'):
inner_env = self.env._wrapped_env._wrapped_env
else:
inner_env = self.env._wrapped_env.env.unwrapped
cost_np_vec = inner_env.cost_np_vec
batch_size = self.policy_opt_params["trpo"]["batch_size"]
if bnn_model is not None:
bnn_env = TfEnv(BayesNeuralNetEnv(env=self.env,
inner_env=inner_env,
cost_np=cost_np_vec,
bnn_model=bnn_model,
sam_mode=None))
else:
bnn_env = self.env
baseline = LinearFeatureBaseline(env_spec=self.env.spec)
algo = TRPO(
env=bnn_env,
policy=training_policy,
baseline=baseline,
batch_size=batch_size,
max_path_length=time_step,
discount=self.policy_opt_params["trpo"]["discount"],
step_size=self.policy_opt_params["trpo"]["step_size"],
# sampler_args=sampler_args, # params for VectorizedSampler
)
return algo, cost_np_vec
def _evaluate_cost_bnn_env(self, bnn_model, time_step, policy_training_init):
if hasattr(self.env._wrapped_env, '_wrapped_env'):
inner_env = self.env._wrapped_env._wrapped_env
else:
inner_env = self.env._wrapped_env.env.unwrapped
cost_np = inner_env.cost_np
gamma = 1.0
_policy_costs = []
for i in range(bnn_model.model.n_nets):
x = policy_training_init
_policy_cost = 0
for t in range(time_step):
u = np.clip(self.session_policy_out(x), *self.env.action_space.bounds)
x_next, _ = bnn_model.predict(np.concatenate([x, u], axis=1),
return_individual_predictions=True,
model_idx=i)
_policy_cost += (gamma ** t) * cost_np(x, u, x_next)
# Move forward 1 step.
x = x_next
_policy_costs.append(_policy_cost)
return np.array(_policy_costs)
def get_action(self, observation, action_noise, **kwargs):
if len(observation.shape) == 1:
observation = observation[np.newaxis]
# action = self.tf_sess.run(self.policy_out,
# feed_dict={self.policy_in: observation})
# action = self.session_policy_out(observation, stochastic=1.0)
action = self.session_policy_out(observation, stochastic=0.0)
# More noisy as t increases, max_var = 1.0
n_particles, n_actions = action.shape
action += action_noise * np.random.randn(n_particles, n_actions)
return np.clip(action, *kwargs['action_bounds'])
def optimize_policy(self):
iteration = self.policy_opt_params["max_iters"]
cost_np_vec = self.cost_np_vec
algo = self.algo_policy
real_env = self.env
""" Re-initialize Policy std parameters. """
if self.non_increase_counter == self.reset_non_increasing:
self.tf_sess.run(tf.variables_initializer(tf.global_variables(self.scope_name)))
self.non_increase_counter = 0
self.min_validation_cost = np.inf
logging.debug("Before reset policy std %s " %
np.array2string(np.exp(self.training_policy._l_std_param.param.eval()),
formatter={'float_kind': '{0:.5f}'.format}))
self.tf_sess.run([self.reset_op])
""" Optimize policy via rllab. """
min_iter = self.min_iters
min_validation_cost = np.inf # self.min_validation_cost
min_idx = 0
mean_validation_costs, real_validation_costs = [], []
reset_idx = np.arange(len(self.policy_validation_reset_init))
for j in range(iteration):
np.random.shuffle(reset_idx)
reset_val = reset_idx[:len(self.policy_validation_reset_init) // 20]
algo.start_worker()
with rllab_logger.prefix('itr #%d | ' % int(j + 1)):
paths = algo.obtain_samples(j)
samples_data = algo.process_samples(j, paths)
algo.optimize_policy(j, samples_data)
""" Do validation cost """
if (j + 1) % self.policy_opt_params["log_every"] == 0:
if self.bnn_model:
estimate_validation_cost = self._evaluate_cost_bnn_env(self.bnn_model,
self.n_timestep,
self.policy_validation_init)
else:
estimate_validation_cost = evaluate_fixed_init_trajectories_2(
real_env,
self.session_policy_out,
self.policy_validation_reset_init[reset_val],
cost_np_vec, self.tf_sess,
max_timestep=self.n_timestep,
gamma=1.00,
)
mean_validation_cost = np.mean(estimate_validation_cost)
validation_cost = mean_validation_cost
np.random.shuffle(reset_idx)
real_validation_cost = evaluate_fixed_init_trajectories_2(
real_env,
self.session_policy_out,
self.policy_validation_reset_init[reset_val],
cost_np_vec, self.tf_sess,
max_timestep=self.n_timestep,
gamma=1.00
)
real_validation_costs.append(real_validation_cost)
mean_validation_costs.append(mean_validation_cost)
logging.info('iter %d' % j)
logging.info("%s\n"
"\tVal cost:\t%.3f\n"
"\tReal cost:\t%.3f\n" % (
np.array2string(estimate_validation_cost, formatter={'float_kind': '{0:.5f}'.format}),
validation_cost, real_validation_cost))
""" Store current best policy """
if validation_cost < min_validation_cost:
min_idx = j
min_validation_cost = validation_cost
# Save
logging.info('\tSaving policy')
self.policy_saver.save(self.tf_sess,
os.path.join(self.log_dir, 'policy.ckpt'),
write_meta_graph=False)
if j - min_idx > min_iter and mean_validation_cost - min_validation_cost > 1.0: # tolerance
break
""" Log and restore """
logging.info("Stop at iteration %d and restore the current best at %d: %.3f"
% (j + 1, min_idx + 1, min_validation_cost))
self.policy_saver.restore(self.tf_sess, os.path.join(self.log_dir, 'policy.ckpt'))
min_real_cost = min(real_validation_costs)
if min_real_cost < self.min_validation_cost:
self.min_validation_cost = min_real_cost
self.non_increase_counter = 0
else:
self.non_increase_counter += 1
real_final_cost = evaluate_fixed_init_trajectories_2(
real_env,
self.session_policy_out,
self.policy_validation_reset_init,
cost_np_vec, self.tf_sess,
max_timestep=self.n_timestep,
gamma=1.00
)
real_validation_costs.append(real_final_cost)
logging.info("Final Real cost: %.3f" % real_final_cost)
logging.info("Best in all iters %.3f, non increasing in %d" %
(self.min_validation_cost, self.non_increase_counter))
return mean_validation_costs, real_validation_costs
| [
"thobotics@gmail.com"
] | thobotics@gmail.com |
29ce2971da5b09e22031f2f7ec2487d1879401ac | 4acad432dc4ae82d3d964445e180c6de54b528ea | /animate.py | a574c2c07f7d3005b951a8eb2839bdd075e1cccd | [] | no_license | rahulrchandran/animate-videos | 1ad014338dc48a8790161df0cd6b6be58dca65ae | c410b7b9b52a541e0ed1d4eb23b9abdb687deb0b | refs/heads/main | 2022-12-20T15:33:16.956612 | 2020-10-07T07:46:50 | 2020-10-07T07:46:50 | 301,962,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,410 | py | import cv2
import numpy as np
import glob
import os
from os.path import isfile, join
class Cartoonizer:
def __init__(self):
self.numDownSamples = 1
self.numBilateralFilters = 7
def render(self, img_rgb):
img_rgb = cv2.imread(img_rgb)
#img_rgb = cv2.resize(img_rgb, (1366,768))
# downsample image using Gaussian pyramid
img_color = img_rgb
for _ in range(self.numDownSamples):
img_color = cv2.pyrDown(img_color)
# repeatedly apply small bilateral filter instead of applying
# one large filter
for _ in range(self.numBilateralFilters):
img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
# upsample image to original size
for _ in range(self.numDownSamples):
img_color = cv2.pyrUp(img_color)
# convert to grayscale and apply bilateral blur
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
for _ in range(self.numBilateralFilters):
img_gray_blur = cv2.bilateralFilter(img_gray, 9, 9, 7)
# detect and enhance edges
img_edge = cv2.adaptiveThreshold(img_gray_blur, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 9, 5)
# convert back to color so that it can be bit-ANDed with color image
img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
#Ensure that img_color and img_edge are the same size, otherwise bitwise_and will not work
height = min(len(img_color), len(img_edge))
width = min(len(img_color[0]), len(img_edge[0]))
img_color = img_color[0:height, 0:width]
img_edge = img_edge[0:height, 0:width]
return cv2.bitwise_and(img_color, img_edge)
def createVideo():
img_array = []
for filename in glob.glob('temp/*.jpg'):
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
#img_array.sort(key = lambda x: x[3:-4])
img_array.sort()
out = cv2.VideoWriter('project.avi',cv2.VideoWriter_fourcc(*'DIVX'), 15, size)
for i in range(len(img_array)):
print(img_array[i])
out.write(img_array[i])
out.release()
def convert_frames_to_video(pathIn1, pathIn2 ,pathOut,fps):
frame_array1 = []
files = [f for f in os.listdir(pathIn1) if isfile(join(pathIn1, f))]
#for sorting the file names properly
files.sort(key = lambda x: int(x[0:-4]))
for i in range(len(files)):
filename=pathIn1 + files[i]
#reading each files
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
print(filename)
#inserting the frames into an image array
frame_array1.append(img)
frame_array2 = []
files = [f for f in os.listdir(pathIn2) if isfile(join(pathIn2, f))]
#for sorting the file names properly
files.sort(key = lambda x: int(x[0:-4]))
for i in range(len(files)):
filename=pathIn2 + files[i]
#reading each files
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
print(filename)
#inserting the frames into an image array
frame_array2.append(img)
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'MP4V'), fps, size)
print(len(frame_array1))
print(len(frame_array2))
j = 0
for i in range(len(frame_array1)):
# writing to a image array
out.write(frame_array1[i])
out.write(frame_array1[i])
out.write(frame_array1[i])
out.write(frame_array1[i])
out.write(frame_array1[i])
out.write(frame_array1[i])
out.write(frame_array2[i])
out.write(frame_array2[i])
out.write(frame_array2[i])
out.write(frame_array2[i])
out.write(frame_array2[i])
out.write(frame_array2[i])
j = i;
while j< len(frame_array2):
out.write(frame_array2[j]);
j = j + 1
out.release()
'''
cartoonize an image
tmp_canvas = Cartoonizer()
image = "div.jpg" #File_name will come here
image = tmp_canvas.render(image)
cv2.imwrite('div_out.jpg',image)
cv2.destroyAllWindows()
'''
#entries = os.listdir('before/')
#for entry in entries:
# print(entry)
#cap= cv2.VideoCapture('./before/'+entry)
'''
# input is a video file -- extract each frame as a jpeg image
cap= cv2.VideoCapture('david.mp4')
i=0
j=0
#tmp_canvas = Cartoonizer()
while(cap.isOpened()):
ret, frame = cap.read()
i+=1
if ret == False:
break
#frame = tmp_canvas.render(frame)
cv2.imwrite('temp/david/'+str(j)+'.jpg',frame)
j+=1
cap.release()
'''
'''
# To read images from folder and convert it to a video
pathIn1 = './temp/tony/'
pathIn2 = './temp/david/'
pathOut = './after/IM.mp4'
fps = 29.97
convert_frames_to_video(pathIn1, pathIn2, pathOut, fps)
#createVideo()
#cv2.destroyAllWindows()
#include<iostream>
'''
cap= cv2.VideoCapture('div_1.mp4')
pathOut = './div_after.mp4'
fps = 25
flag = 0
while(cap.isOpened()):
ret, frame = cap.read()
height, width, layers = frame.shape
size = (width,height)
if flag == 0:
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'MP4V'), fps, size)
flag = 1
out.write(frame)
out.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
1205d3d16e5a6074d8edaa6eaa57bd2a4e540189 | 1041799995b29f8cb0138d57f94b6ebf649cb86d | /file.py | f161d03ebb1b5c28330addf1d456b3912fc932d7 | [] | no_license | sriison/python_basics | 484a4ef6d7f6466e24b85da2af9d4d976e46e4b0 | 4491ced7d9cb7cfc2e6d622cb7c36c816b4bdb50 | refs/heads/master | 2020-12-23T06:50:42.666498 | 2020-03-04T12:01:35 | 2020-03-04T12:01:35 | 237,073,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | import os
# file = open("srini.txt",'r')
# file.write("forth line added\n")
# file.write("fifth line added\n")
# file.close()
file = open("srini.txt","r+")
str = file.read()
print (str[0])
file.close()
os.remove("srini.txt")
| [
"sriison@gmail.com"
] | sriison@gmail.com |
b461c2f44b87c7937d92dd2c038a59e357d0657b | e242d30d9c3398a88ae50a6990760ab63e426b70 | /website/talleres/taller_2/migrations/0001_initial.py | becc107f2a40942aabd56052e37799cfed8188b2 | [] | no_license | minigonche/sistemas_de_recomendacion_2020 | b6a82a163c4a80d5cb7c2f5dadfae68744099db9 | 645bba398c033ff2063958b0be60d4bd4d27cab2 | refs/heads/master | 2021-01-02T12:09:00.480070 | 2020-05-09T14:07:59 | 2020-05-09T14:07:59 | 239,615,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,411 | py | # Generated by Django 2.2.5 on 2020-05-01 15:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=40)),
('business_id', models.CharField(max_length=40)),
('categories', models.CharField(max_length=40)),
('city', models.CharField(max_length=40)),
('is_open', models.IntegerField(null=True)),
('latitude', models.FloatField(null=True)),
('longitude', models.FloatField(null=True)),
('name', models.CharField(max_length=40)),
('postal_code', models.CharField(max_length=40)),
('review_count', models.IntegerField(null=True)),
('stars', models.FloatField(null=True)),
('state', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=40)),
('business_id', models.CharField(max_length=40)),
('last_review', models.DateField(null=True)),
('year_review', models.PositiveIntegerField(null=True)),
('stars', models.PositiveIntegerField(null=True)),
('attractive_count', models.PositiveIntegerField(null=True)),
('review_id', models.CharField(max_length=40, null=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('average_stars', models.FloatField(null=True)),
('compliment_cool', models.IntegerField(null=True)),
('compliment_cute', models.IntegerField(null=True)),
('compliment_funny', models.IntegerField(null=True)),
('compliment_hot', models.IntegerField(null=True)),
('compliment_list', models.IntegerField(null=True)),
('compliment_more', models.IntegerField(null=True)),
('compliment_note', models.IntegerField(null=True)),
('compliment_photos', models.IntegerField(null=True)),
('compliment_plain', models.IntegerField(null=True)),
('compliment_profile', models.IntegerField(null=True)),
('compliment_writer', models.IntegerField(null=True)),
('cool', models.IntegerField(null=True)),
('fans', models.IntegerField(null=True)),
('funny', models.IntegerField(null=True)),
('name', models.CharField(max_length=40)),
('review_count', models.IntegerField(null=True)),
('useful', models.IntegerField(null=True)),
('user_id', models.CharField(max_length=40)),
('yelping_since', models.DateField(null=True)),
],
),
]
| [
"minigonche@gmail.com"
] | minigonche@gmail.com |
3da22ec7f9fd090ed4375400c5ac1f5bbde09816 | 2be2fcb96de91621a152241843db340b917ce6cb | /src/parsers.py | 26c85178a44ea128f9e263b2f6438f7e58b7bc91 | [] | no_license | chebbit/scraper | 21e3878193ba795cd73e3db57a1dd74d708e0a94 | ea54a9d06dc0973bbe86f3667e3e880c696c1fd4 | refs/heads/master | 2022-12-14T09:09:15.126841 | 2020-01-08T10:05:54 | 2020-01-08T10:05:54 | 232,534,620 | 0 | 0 | null | 2022-12-08T03:24:28 | 2020-01-08T10:09:25 | Python | UTF-8 | Python | false | false | 1,278 | py | from bs4 import BeautifulSoup
from html2text import HTML2Text
from abc import ABC, abstractmethod
class ABCParser(ABC):
"""An abstract class with a single method, for cleaning data from HTML"""
@classmethod
@abstractmethod
def cleaned_data(cls, text: str) -> str:
pass
class ReutersParser(ABCParser):
"""
Class for parsing body news at reuters.com
"""
headline_block = ('h1', {'class': 'ArticleHeader_headline'})
body_block = ('div', {'class': 'StandardArticleBody_body'})
garbage_blocks = [
('div', {'class': 'RelatedCoverage_related-coverage-module'}),
]
@classmethod
def cleaned_data(cls, text: str) -> str:
"""
return clean text news body
"""
soup = BeautifulSoup(text, features="html.parser")
# clear target text from unused blocks
for b in cls.garbage_blocks:
block = soup.find(*b)
if block:
block.clear()
headline = soup.find(*cls.headline_block)
body = soup.find(*cls.body_block)
# use to clear HTML tags
parser = HTML2Text()
parser.ignore_images = True
parser.ignore_links = True
data = parser.handle(str(headline) + str(body))
return data
| [
"nikolaev.segroup@gmail.com"
] | nikolaev.segroup@gmail.com |
e170237f3ca1ca16746589e235b82415a84693a4 | c39c8ca66c7046c87d45d6544697e1d2e17963c0 | /sy/net/__init__.py | 0053e6f3bc3eadd7a8c65536d21bebe19e11ce6e | [
"BSD-3-Clause"
] | permissive | afajl/sy | 20bb9d8390bf1c972726742313e3e6907bd27f4e | 04efc8f98d64eddebd15ab30c28133631872bd20 | refs/heads/master | 2021-01-20T21:24:14.320094 | 2011-06-14T17:58:20 | 2011-06-14T17:58:20 | 841,197 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | import sy
# sub modules
import ip
import intf
import remote
# sub functions
from remote import sendmail, download
| [
"p@feedem.net"
] | p@feedem.net |
bc72522978e6d458c5412f2e2b041154e7366c33 | 91087607cf165e2cd743ac844f1e8fc1bc09224a | /Triangle_Begin.py | 9a1b2235776aad2c1db16bc69c9eac2a1cdb96e3 | [] | no_license | cullorsvm/python-community | 9f4d19a89bfc015b5934abd27cda6044fb3568b7 | 22b2507e44e6a50c3321bd747492dec88f402bb0 | refs/heads/master | 2020-12-25T03:00:43.747840 | 2018-08-15T00:50:54 | 2018-08-15T00:50:54 | 51,409,295 | 4 | 4 | null | 2016-03-04T02:46:04 | 2016-02-09T23:42:45 | null | UTF-8 | Python | false | false | 1,302 | py | ##Python program to determine the classification type of triangle
## by inputting 3 side lengths.
## Aug 2018
#TEST PLAN to run your program to check your code:
#test for first block
#i=0,j=0,k=0 -> if any side = 0 then 4, end program
#test for second block
#i=1,j=1,k=1 -> if all sides are same then 6
#i=1,j=2,k=3 -> if all sides diff then 0
#i=1,j=1,k=2 -> 1
#i=1,j=2,k=1 -> 2
#i=2,j=1,k=1 -> 3
#i=2,j=2,k=1 -> 3
#test for third block when tri == 0:
#i=5,j=1,k=2 -> TRUE --> 4
#i=2,j=3,k=4 -> FALSE --> 1
#i=3,j=4,k=6 -> FALSE --> 1
#i=7,j=3,k=4 -> TRUE --> 4
#test for fourth block of code
#i=1,j=1,k=1 -> (Tri = 6) = 3
#i=2,j=2,k=3 -> (Tri == 1 and T) = 2
#i=2,j=3,k=2 -> (Tri == 2 and T) = 2
#i=1,j=2,k=2 -> (Tri == 3 and T) = 2
#i=2,j=1,k=1 -> (Tri == 3 and F) = 4
def triangle_classification_algorithm (i,j,k):
##enter code here:
triangle = i + j + k
return triangle
##MAIN PROGRAM
#result table
# 1 --> scalene triangle
# 2 --> isosceles triangle
# 3 --> equilateral triangle
# 4 --> side lengths cannot form a triangle
i= int(input("Enter Lenght 1: "))
j= int(input("Enter Lenght 2: "))
k= int(input("Enter Lenght 3: "))
print (triangle_classification_algorithm (i,j,k))
print("End of script.")
| [
"noreply@github.com"
] | noreply@github.com |
1962137d2e58710043d0fd6cf9a72dad38216214 | 5f300418ce1605eb41276b0a9a79fe5f46fa8198 | /users/queries.py | 5563883ddbbe4db0399fe5fea0dadfdbd3b0b71f | [] | no_license | Academia-MagicKode/FastAPI | daaeea85152717a484a32309acf77be92435b53d | 139942870a5ee76a1e29bcbfb5d1262af0b2a832 | refs/heads/master | 2023-05-09T02:03:45.804484 | 2021-05-29T14:37:47 | 2021-05-29T14:37:47 | 371,967,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,000 | py | from passlib.context import CryptContext
from typing import Optional
from sqlalchemy.orm import Session
from starlette.status import HTTP_400_BAD_REQUEST
from config.database import get_db
from fastapi.security import OAuth2PasswordBearer
from fastapi import HTTPException, status, Depends
from datetime import timedelta , datetime
from jose import JWTError, jwt
import os
import re
from .schema import TokenData
from .models import User
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
SECRET_KEY = os.environ["SECRET_KEY"]
ALGORITHM = os.environ["ALGORITHM"]
ACCESS_TOKEN_EXPIRE_MINUTES = 1440
#---------Hashing password---------------------------------------------------
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
#--------USER MODEL QUERIES-----------------------------------------------------------
def create(request, db:Session=Depends(get_db)):
check_valid_email(request.email)
try:
new_user= User(username=request.username, email=request.email,
password=get_password_hash(request.password))
db.add(new_user)
db.commit()
except Exception:
raise HTTPException(status_code= status.HTTP_400_BAD_REQUEST,
detail= "username or email already exists")
db.refresh(new_user)
return new_user
def detail(id, db:Session=Depends(get_db)):
user= db.query(User).filter(User.id== id).first()
if not user:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"User not found id:{id}")
return user
def check_valid_email(email:str):
match= re.match('^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$',
email)
if match == None:
raise HTTPException(status_code= status.HTTP_400_BAD_REQUEST,
detail="Invalid Email Address")
#-------AUTHENTICATION------------------------------------------------------------
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
def authenticate(username, password, db:Session=Depends(get_db)):
user= db.query(User).filter(User.username == username).first()
if not user:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"User not found with username: {username}")
if not verify_password(password, user.password):
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Incorrect password for username: {username}")
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(data={"sub": user.username},
expires_delta=access_token_expires)
return {"access_token": access_token, "token_type": "bearer"}
async def get_current_user(db:Session=Depends(get_db), token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
raise credentials_exception
user= db.query(User).filter(User.username== token_data.username).first()
if not user:
raise credentials_exception
return user
| [
"magickode7@gmail.com"
] | magickode7@gmail.com |
91c25e9e1439da3790676816b093c0f9a27f9de5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03559/s351538346.py | 07b30fc56f836903884b21d2c40f39a3645029b7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | import bisect
N=int(input().strip())
a=list(map(int, input().split()))
b=list(map(int, input().split()))
c=list(map(int, input().split()))
a.sort()
#b.sort(reverse=True)
c.sort()
ans=0
#for i in range(len(b)):
# la=bisect.bisect_left(a, b[i])
# ra=bisect.bisect_right(c, b[i])
# ans+=la*(len(c)-ra)
# print(la,(len(c)-ra))
for i in range(len(b)):
la=bisect.bisect_left(a, b[i])
ra=bisect.bisect_right(c, b[i])
#print(la*(len(c)-ra))
ans+=la*(len(c)-ra)
#print(ans)
#la=bisect.bisect_left(a, 8)
#ra=bisect.bisect_right(a, 8)
#print(la,len(a)-ra)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
22cf102caef9ddc4f6356032fb4a19c3ba880ae9 | 0acfaf0b296d60543fe0f0357887f6834601a4d4 | /posts/models.py | 907bad2f9b9be0161c6688452c13693aa0f72163 | [
"MIT"
] | permissive | haTrang061299/mb_project | 51d8f0c7005929f269f7badaf417690aa6f2215c | a3152ab2cf206b2575d80783944f6f04a1b480c4 | refs/heads/master | 2020-04-09T15:00:55.803865 | 2018-12-04T20:17:43 | 2018-12-04T20:17:43 | 151,310,595 | 0 | 0 | null | 2018-10-04T16:54:46 | 2018-10-02T19:22:55 | Python | UTF-8 | Python | false | false | 143 | py | from django.db import models
class Post(models.Model):
text = models.TextField()
def __str__(self):
return self.text[:50]
| [
"noreply@github.com"
] | noreply@github.com |
c4b7a470507b0ce2d15d37a30a866863b40666d2 | fb50b04d429530ec2759dfabebd1ed733cbcaea3 | /utility/hash_util.py | 8e6df1e3bf626a759e693e553fdac28608195d5f | [] | no_license | dxnter/blockchain | c5565590316a3786f30449e90d1e294591c70b99 | 44eb610b1317b57fc44692925bae21772147ebbb | refs/heads/master | 2020-03-28T08:32:12.842778 | 2018-09-25T18:39:39 | 2018-09-25T18:39:39 | 147,971,934 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | import hashlib as hl
import json
def hash_string_256(string):
"""Create a SHA256 hash for a given input string.
Arguments:
:string: The string which should be hashed.
"""
return hl.sha256(string).hexdigest()
def hash_block(block):
"""Hashes a block and returns a string representation of it.
Arguments:
:block: The block that should be hashed.
"""
hashable_block = block.__dict__.copy()
hashable_block['transactions'] = [tx.to_ordered_dict()
for tx in hashable_block['transactions']]
return hash_string_256(json.dumps(hashable_block, sort_keys=True).encode())
| [
"contact@danfoster.io"
] | contact@danfoster.io |
6b0f05b24305838a791d7539f7b5f6e7fa6c8395 | 850d778687e3692ab2a38d4d2227391d92c21e6b | /atcoder.jp/arc008/arc008_4/Main.py | 1bc7f5184d2c991ab49a12ce1a26ad20d78090fc | [] | no_license | Valkyrja3607/AtCoder | 77e2e5e66c0e8e12bb902c35f679119c6576fad7 | 9218a50b1eb83e4498845d15d9dda41fab90ed73 | refs/heads/master | 2023-07-15T20:38:52.911301 | 2018-05-30T17:56:22 | 2018-05-30T17:56:22 | 294,980,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | n,m=[int(j) for j in input().split()]
pab=[[float(j) for j in input().split()] for i in range(m)]
ll=[]
for p,a,b in pab:
ll.append(p)
ll.sort()
from collections import Counter
l=Counter()
for i in range(m):
l[ll[i]]=i
#A[k]をxに変更 O(logN)
def update(k,x,y):
k += num
seg_min[k] = x
seg_max[k] = y
while k>1:
k //= 2
seg_min[k] = seg_min[k*2]*seg_min[k*2+1]
seg_max[k] = seg_max[k*2]*seg_min[k*2+1]+seg_max[k*2+1]
num=2**(m-1).bit_length()
seg_min=[1]*2*num
seg_max=[0]*2*num
ans1=1
ans2=1
for p,a,b in pab:
update(l[p],a,b)
t=seg_min[1]+seg_max[1]
ans1=min(ans1,t)
ans2=max(ans2,t)
print(ans1)
print(ans2)
| [
"purinjolly@gmail.com"
] | purinjolly@gmail.com |
0f8f9412df4f2e0e7c0c0310a5cb36e26229891d | c781f71ee4323f2df637043e08c422e5340fb2fa | /doc-generator/tests/test_version_added.py | 371248b7cdd202734680a6c07845031ccbf675b1 | [
"BSD-3-Clause"
] | permissive | digideskio/Redfish-Tools | 4a4a4e61bb617d04643c304a5ed7ca40c015935d | 9b6597225647f447519cdc6011166c1ed6b6e415 | refs/heads/master | 2020-04-23T06:39:24.956353 | 2019-02-07T21:34:45 | 2019-02-07T21:34:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,490 | py | # Copyright Notice:
# Copyright 2018 Distributed Management Task Force, Inc. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/Redfish-Tools/blob/master/LICENSE.md
"""
File: test_version_added.py
Brief: test(s) for correct detection of "Version Added".
"""
import os
import copy
from unittest.mock import patch
import pytest
from doc_generator import DocGenerator
from .discrepancy_list import DiscrepancyList
testcase_path = os.path.join('tests', 'samples')
base_config = {
'expand_defs_from_non_output_schemas': False,
'excluded_by_match': ['@odata.count', '@odata.navigationLink'],
'profile_resources': {},
'units_translation': {},
'excluded_annotations_by_match': ['@odata.count', '@odata.navigationLink'],
'excluded_schemas': [],
'excluded_properties': ['@odata.id', '@odata.context', '@odata.type'],
'uri_replacements': {},
'profile': {},
'escape_chars': [],
'output_format': 'markdown',
}
@patch('urllib.request') # so we don't make HTTP requests. NB: samples should not call for outside resources.
def test_version_added_metadata(mockRequest):
""" Verify metadata contains expected version_added info.
Note that there is an additional step, after generating this metadata, for generating metadata
within property data ... so possibly this test should be replaced.
"""
config = copy.deepcopy(base_config)
input_dir = os.path.abspath(os.path.join(testcase_path, 'version_added', 'AccountService'))
# This is a partial list of versions that should be detected.
expected_versions = {
'AccountLockoutThreshold': {},
'LDAP': {'version': '1.3.0'},
'LocalAccountAuth': {'version': '1.3.0'},
'PrivilegeMap': {'version': '1.1.0'},
'Actions': {'version': '1.2.0',
'Oem': { 'version': '1.2.0'},
},
'AdditionalExternalAccountProviders': { 'version': '1.3.0' },
'definitions': { 'AccountProviderTypes': {'enum': {'ActiveDirectoryService': {'version': '1.3.0'},
'RedfishService': {'version': '1.3.0'},
'OEM': {'version': '1.3.0'},
'LDAPService': {'version': '1.3.0'},
},
'version': '1.3.0',
},
# WORKAROUND for properties incorrectly included in errata versions:
# 'Actions': { 'version': '1.2.2',
# 'Oem': { 'version': '1.2.2' },
# },
'Actions': { 'version': '1.3.0',
'Oem': { 'version': '1.3.0' },
},
'LDAPSearchSettings': { 'version': '1.3.0',
'BaseDistinguishedNames': {'version': '1.3.0'},
},
'AccountService': { 'LDAP': { 'version': '1.3.0' },
'LocalAccountAuth': { 'version': '1.3.0' },
'AccountLockoutThreshold': { },
'PrivilegeMap': { 'version': '1.1.0'},
'Actions': { 'version': '1.2.0' }
},
}
}
config['uri_to_local'] = {'redfish.dmtf.org/schemas/v1': input_dir}
config['local_to_uri'] = { input_dir : 'redfish.dmtf.org/schemas/v1'}
docGen = DocGenerator([ input_dir ], '/dev/null', config)
output = docGen.generate_docs()
meta = docGen.property_data['redfish.dmtf.org/schemas/v1/AccountService.json']['doc_generator_meta']
discrepancies = DiscrepancyList()
for name, data in expected_versions.items():
if name == 'version': continue
_version_compare(meta, name, data, discrepancies, [])
assert [] == discrepancies
def _version_compare(meta, name, data, discrepancies, context):
context = copy.copy(context)
context.append(name)
key_meta = meta.get(name)
if key_meta is None:
discrepancies.append(' > '.join(context) + ' not found in metadata')
else:
if data.get('version', '1.0.0') != key_meta.get('version', '1.0.0'):
discrepancies.append(' > '.join(context) + ' version is "' + key_meta.get('version', '(none)') + '", expected "'
+ data.get('version', '1.0.0') + '"')
for childname, childdata in data.items():
if childname == 'version': continue
_version_compare(key_meta, childname, childdata, discrepancies, context)
@patch('urllib.request') # so we don't make HTTP requests. NB: samples should not call for outside resources.
def test_version_added_output_AccountService(mockRequest):
""" Verify markdown output contains expected version_added info.
This means pulling the correct version strings from the metadata
"""
config = copy.deepcopy(base_config)
input_dir = os.path.abspath(os.path.join(testcase_path, 'version_added', 'AccountService'))
expected_version_strings = [ '**LDAP** *(v1.3+)*', '**LDAPService** {',
'**LocalAccountAuth** *(v1.3+)*',
'**PrivilegeMap** *(v1.1+)*', '**Actions** *(v1.2+)*'
]
config['uri_to_local'] = {'redfish.dmtf.org/schemas/v1': input_dir}
config['local_to_uri'] = { input_dir : 'redfish.dmtf.org/schemas/v1'}
docGen = DocGenerator([ input_dir ], '/dev/null', config)
output = docGen.generate_docs()
discrepancies = DiscrepancyList()
for expected in expected_version_strings:
if expected not in output:
discrepancies.append('"' + expected + '" not found')
assert [] == discrepancies
@patch('urllib.request') # so we don't make HTTP requests. NB: samples should not call for outside resources.
def test_version_added_output_Chassis(mockRequest):
""" Verify markdown output contains expected version_added info.
This means pulling the correct version strings from the metadata.
The Chassis example gave us some distinct scenarios.
"""
config = copy.deepcopy(base_config)
input_dir = os.path.abspath(os.path.join(testcase_path, 'version_added', 'Chassis'))
expected_version_strings = ['| **Actions** { |', '| **Links** { |', # string to match property without version
# WORKAROUND for properties incorrectly included in errata versions:
# '**PowerState** *(v1.0.1+)*'
'**PowerState** *(v1.1+)*']
config['uri_to_local'] = {'redfish.dmtf.org/schemas/v1': input_dir}
config['local_to_uri'] = { input_dir : 'redfish.dmtf.org/schemas/v1'}
docGen = DocGenerator([ input_dir ], '/dev/null', config)
output = docGen.generate_docs()
discrepancies = DiscrepancyList()
for expected in expected_version_strings:
if expected not in output:
discrepancies.append('"' + expected + '" not found')
assert [] == discrepancies
| [
"afarrell@secondrise.com"
] | afarrell@secondrise.com |
cb578ad64b97aa30543f73ee4a63efd055b2b249 | 6331677339411ef3bcdd08d6de6b42c662354ca8 | /com/subhash/hackerrank/ai/botbuilding/BotSavesPrinces.py | 000e4751cf1f3d0c34ec37bc941d7a84afae72f7 | [] | no_license | subhashtodkari/py_tutorials | 6183dc5b8459f26f8a3b39af14615a13201d113d | 7ca63dbf8f34c96d92c92107794bfa6578f51d1f | refs/heads/master | 2020-07-06T16:08:50.433461 | 2020-07-05T13:38:05 | 2020-07-05T13:38:05 | 203,076,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py |
def displayPathtoPrincess(n, grid):
mX = int(n / 2) + 1
mY = mX
# print("{}, {}".format(mX, mY))
for i in range(1, n + 1):
line = grid[i-1]
lineLne = len(line)
if line[0] == 'p':
pX = 1
pY = i
elif line[lineLne - 1] == 'p':
pX = n
pY = i
# print("{}, {}".format(pX, pY))
xDir = "LEFT" if pX < mX else "RIGHT"
yDir = "UP" if pY < mY else "DOWN"
for i in range(mX, n):
print(yDir)
for i in range(mX, n):
print(xDir)
m = int(input())
grid = []
for i in range(0, m):
grid.append(input().strip())
displayPathtoPrincess(m, grid)
| [
"subhashtodkari@gmail.com"
] | subhashtodkari@gmail.com |
bdf4dbf77e299f9f907fbfee7fb51c087cc5d369 | af42e539576474a634815d06397120fb5ffa87a4 | /Q6.py | 005d82500b056cc5e9627ebe909833e5dff5ad0a | [] | no_license | laithtareq/Quiz | 11b22ae222594d80f8be45ada29f08393b8ad860 | d540d6fb103c43a8abaa8e640e1897163914db73 | refs/heads/master | 2022-12-28T11:26:07.848560 | 2020-10-03T12:36:10 | 2020-10-03T12:36:10 | 297,966,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py |
'''
Question 6
The fractional_part function divides the numerator by the denominator,
and returns just the fractional part (a number between 0 and 1).
Complete the body of the function so that it returns the right number.
Note: Since division by 0 produces an error, if the denominator is 0,
the function should return 0 instead of attempting the division.
'''
def fractional_part(numerator, denominator):
# Operate with numerator and denominator to
# keep just the fractional part of the quotient
Full_num = numerator / denominator
X = Full_num - int(Full_num)
return X
print(fractional_part(5, 5)) # Should be 0
print(fractional_part(5, 4)) # Should be 0.25
print(fractional_part(5, 3)) # Should be 0.66...
print(fractional_part(5, 2)) # Should be 0.5
print(fractional_part(5, 0)) # Should be 0
print(fractional_part(0, 5)) # Should be 0 | [
"laith96.t@gmail.com"
] | laith96.t@gmail.com |
677d130c4fd8ad52ce8ea7898bfb1738ec5fb4db | 529c3dd901bd15118b73cefb2f418ad451fdd913 | /pyqt/bilesenler.py | 0306a2f01277dfecf3df9a10816569a5fab4de48 | [] | no_license | sevketcakir/bbgk2020 | 080b86c01c31dfd781a013365eecded31f61e6af | 87b61ea1844262c240e1c520a8a3a6299029c65c | refs/heads/main | 2023-02-13T13:48:59.065059 | 2021-01-05T08:40:28 | 2021-01-05T08:40:28 | 307,322,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from PyQt5 import uic
from PyQt5.QtWidgets import QMainWindow, QApplication
class UI(QMainWindow):
def __init__(self):
super(UI, self).__init__()
uic.loadUi('bilesenler.ui', self)
self.cinsiyet.addItems(['Kadın', 'Erkek'])
self.show()
if __name__ == '__main__':
app = QApplication([])
w = UI()
app.exec_() | [
"cakiaccounts@gmail.com"
] | cakiaccounts@gmail.com |
fc13bca1882e5fb5e2d69812e298f8a8ac00c19f | 1244d7d75a8bee555af4f1cecefe1890c93d8766 | /h.py | 382e8955d9028f64d786f025ad3925d7de01c885 | [] | no_license | puspita-sahoo/hacker_rank | 7725e3e6722ac4716dccac3b86e3a18aa73b846f | d5a9e0aff78d07a2474918303b82b33fc02598c9 | refs/heads/master | 2023-07-29T14:55:04.435766 | 2021-09-13T17:06:57 | 2021-09-13T17:06:57 | 406,060,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | class Person:
def __init__(self,initialAge):
# Add some more code to run some checks on initialAge
if initialAge > 0:
self.age = initialAge
else:
self.age = 0
print("Age is not valid, setting age to 0.")
def amIOld(self):
# Do some computations in here and print out the correct statement to the console
if self.age <= 13:
print("You are young.")
elif self.age > 13 and self.age < 18:
print("You are a teenager.")
else:
print("You are old.")
def yearPasses(self):
# age of the person in here
self.age += 3
self.amIOld()
t = int(input())
for i in range(0, t):
age = int(input())
p = Person(age)
p.amIOld()
for j in range(0, 3):
p.yearPasses()
p.amIOld()
print("") | [
"puspita.private@gmail.com"
] | puspita.private@gmail.com |
53942ff921d2764c52d3a2d1043a07453d184706 | 67ee4c88350d376efdb5c561d701b347cf02ac36 | /uncleloader/transform_tmp.py | 9f43fae71d79ac93284550cd9e047047f039f463 | [] | no_license | SCP-173-cool/uncleloader | de2003a73c1689be986e10066d63b514673f2c7c | e41b2dbe60d73812b064b7e5569bbe43c79ee9a2 | refs/heads/master | 2022-11-09T19:12:35.325562 | 2018-10-26T06:28:42 | 2018-10-26T06:28:42 | 150,403,408 | 4 | 1 | null | 2022-10-26T20:30:59 | 2018-09-26T09:38:24 | Python | UTF-8 | Python | false | false | 4,300 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 26 18:06:36 2018
@author: loktarxiao
"""
import numpy as np
import cv2
import random
paddings = {'z': cv2.BORDER_CONSTANT, 'r': cv2.BORDER_REFLECT_101}
interpolations = {'bilinear': cv2.INTER_LINEAR, 'bicubic': cv2.INTER_CUBIC, 'nearest': cv2.INTER_NEAREST}
def _apply_perspective(img, M, shape, interp_mode='bilinear', padding_mode='r'):
return cv2.warpPerspective(img, M, shape,
flags=interpolations[interp_mode],
borderMode=paddings[padding_mode])
def image_resize(img, size=(150, 150), interp_mode='bilinear'):
return cv2.resize(img, size, interpolation=interpolations[interp_mode])
def resize_shorter(img, shorter_length=300, interp_mode='bilinear'):
rows, cols = img.shape[:2]
if rows >= cols:
size = (int(shorter_length), int(1.0*rows*shorter_length/cols))
else:
size = (int(1.0*cols*shorter_length/rows), int(shorter_length))
return cv2.resize(img, size, interpolation=interpolations[interp_mode])
def random_flip_left_right(img):
if random.random() > 0.5:
return cv2.flip(img, 1)
return img
def random_flip_up_down(img):
if random.random() > 0.5:
return cv2.flip(img, 0)
return img
def random_rotate(img, rotage_range=(0, 180), random_position=False, interp_mode='bilinear', padding_mode='r'):
angel = np.random.uniform(rotage_range[0], rotage_range[1])
rows, cols = img.shape[:2]
if random_position:
cen_x = int(np.random.uniform(cols/4., 3*cols/4.))
cen_y = int(np.random.uniform(rows/4., 3*rows/4.))
else:
cen_x = int(cols/2)
cen_y = int(rows/2)
M = cv2.getRotationMatrix2D((cen_x, cen_y), angel, 1)
M = np.concatenate([M, [[0, 0, 1]]], axis=0)
img = _apply_perspective(img, M, (rows, cols), interp_mode, padding_mode)
return img
def random_crop(img, crop_size):
rows, cols = img.shape[:2]
assert rows > crop_size[0]
assert cols > crop_size[1]
start_x = int(np.random.uniform(0, rows - crop_size[0]))
start_y = int(np.random.uniform(0, cols - crop_size[1]))
end_x = int(start_x + crop_size[0])
end_y = int(start_y + crop_size[1])
return img[start_x:end_x, start_y:end_y, :]
def random_shear(img, range_x=(-0.5, 0.5), range_y=(0, 0), interp_mode='bilinear', padding_mode='r'):
rows, cols = img.shape[:2]
shear_x = np.random.uniform(range_x[0], range_x[1])
shear_y = np.random.uniform(range_y[0], range_y[1])
M = np.array([1, shear_x, 0, shear_y, 1, 0, 0, 0, 1]).reshape((3, 3)).astype(np.float32)
img = _apply_perspective(img, M, (rows, cols), interp_mode, padding_mode)
return img
def random_rescale(img, range_x=(0.5, 1.5), range_y=(1, 1), interp_mode='bilinear', padding_mode='r'):
rows, cols = img.shape[:2]
scale_x = np.random.uniform(range_x[0], range_x[1])
scale_y = np.random.uniform(range_y[0], range_y[1])
M = np.array([scale_x, 0, 0, 0, scale_y, 0, 0, 0, 1]).reshape((3, 3)).astype(np.float32)
img = _apply_perspective(img, M, (rows, cols), interp_mode, padding_mode)
return img
def random_hsv(img, h_range=(-720, 720), s_range=(-40, 40), v_range=(-40, 40)):
h_add = np.random.uniform(h_range[0], h_range[1])
s_add = np.random.uniform(s_range[0], s_range[1])
v_add = np.random.uniform(v_range[0], v_range[1])
img = img.copy()
dtype = img.dtype
if dtype != np.uint8:
raise TypeError
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
h, s, v = cv2.split(img_hsv.astype(np.int32))
h = np.clip(abs((h + h_add) % 180), 0, 180).astype(dtype)
s = np.clip(s + s_add, 0, 255).astype(dtype)
v = np.clip(v + v_add, 0, 255).astype(dtype)
img_hsv_shifted = cv2.merge((h, s, v))
img = cv2.cvtColor(img_hsv_shifted, cv2.COLOR_HSV2RGB)
return img
def transform(img, label):
#img = random_rescale(img)
img = random_shear(img, range_x=(-0.5, 0.5), range_y=(-0.5, 0.5))
#img = random_rotate(img, random_position=False)
#img = image_resize(img, size=(400, 400))
img = resize_shorter(img, shorter_length=300)
img = random_crop(img, crop_size=(225, 225))
img = random_hsv(img)
#img = random_flip_left_right(img)
return img, label | [
"scp173.cool@gmail.com"
] | scp173.cool@gmail.com |
acd418a40b6482a37dd5aa799b76f3af35162cad | 74720302705a7582ed6df2f2513fd0efaf94851b | /src/pymessagebus/_messagebus.py | c5b6da8beae2edbd7db9f995d6780856bf45c617 | [
"MIT"
] | permissive | laukamp/pymessagebus | c4218891d33898e712c8be861c79dfe11fbc63e8 | 5860dce85a30455cd04f34fbeb3eca433c931dbb | refs/heads/master | 2021-09-24T00:42:15.195119 | 2018-09-30T22:58:15 | 2018-09-30T22:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,010 | py | from collections import defaultdict
import typing as t
import pymessagebus.api as api
class MessageBus(api.MessageBus):
def __init__(self, *, middlewares: t.List[api.Middleware] = None) -> None:
self._handlers: t.Dict[type, t.List[t.Callable]] = defaultdict(list)
self._middlewares_chain = self._get_middlewares_callables_chain(
middlewares, self._trigger_handlers_for_message_as_a_middleware
)
def add_handler(self, message_class: type, message_handler: t.Callable) -> None:
if not isinstance(message_class, type):
raise api.MessageHandlerMappingRequiresAType(
f"add_handler() first argument must be a type, got '{type(message_class)}"
)
if not callable(message_handler):
raise api.MessageHandlerMappingRequiresACallable(
f"add_handler() second argument must be a callable, got '{type(message_handler)}"
)
self._handlers[message_class].append(message_handler)
def handle(self, message: object) -> t.List[t.Any]:
if not self.has_handler_for(message.__class__):
return []
result = self._middlewares_chain(message)
return result
def has_handler_for(self, message_class: type) -> bool:
return message_class in self._handlers
def _trigger_handlers_for_message_as_a_middleware(
self, message: object, unused_next: t.Callable
) -> t.List[t.Any]:
handlers: t.List[t.Callable] = self._handlers[message.__class__]
results = [self._trigger_handler(message, handler) for handler in handlers]
return results
@staticmethod
def _get_middlewares_callables_chain(
middlewares: t.Union[t.List[api.Middleware], None], message_handler: t.Callable
) -> t.Callable[[object], t.Any]:
"""
The algorithm comes from the source code of Tactician (PHP CommandBus):
https://github.com/thephpleague/tactician/blob/master/src/CommandBus.php#L50 :-)
"""
all_middlewares = (middlewares or []) + [message_handler]
# the last "middleware" is actually the execution of the target Message Handler,
# so it won't make any use of the "next" parameter but we have to provide it.
# --> let's use a no-op lambda as the last middleware's "next" parameter:
chain = lambda _: None
for middleware in reversed(all_middlewares):
chain = MessageBus._get_middleware_callable_for_middleware(
middleware, chain
)
return chain
@staticmethod
def _get_middleware_callable_for_middleware(
middleware: api.Middleware, next_middleware: t.Callable
) -> t.Callable[[object], t.Any]:
def middleware_callable(message: object):
return middleware(message, next_middleware)
return middleware_callable
@staticmethod
def _trigger_handler(message: object, handler: t.Callable) -> t.Any:
return handler(message)
| [
"olivier@rougemine.com"
] | olivier@rougemine.com |
93f3fe2d44390bb3ee1652a7fd897636bdcf3618 | ef045d6f8c4cfd57783c05031471fc2cb8a893c6 | /Hw5/likehood.py | d3e820eb52e6ea871bc9ec7031ead307480cfa9e | [] | no_license | MartinIMR/NLP | d2400a9aed250963a520a5aa9f566b078b9693fb | aef181af5766c4a696db0f9b211e92df1c90f574 | refs/heads/master | 2020-07-12T11:46:03.908437 | 2019-12-05T07:41:16 | 2019-12-05T07:41:16 | 204,811,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,986 | py | import nltk
import numpy as np
def load_data(file_name):
from pickle import load
input = open(file_name,"rb")
data = load(input)
input.close()
return data
def obtain_vocabulary(articles):
text = " ".join(articles)
tokens = nltk.word_tokenize(text)
vocabulary = sorted(set(tokens))
return vocabulary
def obtain_frequencies(articles):
text = " ".join(articles)
tokens = nltk.word_tokenize(text)
frequency = dict(nltk.FreqDist(tokens))
return frequency
def E_step(prob_back, prob_top,back_vector,topic_vector):
""" Compute probability for latent variable z = 0
in this case for the topic """
numerator = topic_vector * prob_top
denominator = numerator + back_vector * prob_back
z0_vector = np.divide(numerator, denominator) # z0 vector of probability
return z0_vector
def M_step(counts, z0_probs):
numerator = np.multiply(counts,z0_probs) #Each word count by z = 0
denominator = np.sum(numerator) #Sum all values of the numerator (Scalar)
topic_probs = numerator / denominator # For each element divide
return topic_probs
def compute_likelihood(article, vocabulary, back_vector, topic_vector,
back_prob,topic_prob, iterations = 50, print_iterations = False):
words = nltk.Text(nltk.word_tokenize(article))
counts = []
""" Count frequency of each word in article """
for word in vocabulary:
counts.append(words.count(word))
counts = np.array(counts)
z0 = 0
for i in range(iterations):
z0 = E_step(back_prob, topic_prob,back_vector,topic_vector)
topic_vector = M_step(counts,z0)
logarithms = np.log(back_vector * back_prob + \
topic_vector * topic_prob)
product = np.multiply(logarithms, counts)
document_likelihood = np.sum(logarithms)
if print_iterations == True:
print("Document likelihood is:")
print(document_likelihood)
return topic_vector
if __name__=='__main__':
articles = load_data("lemmatized_articles.pkl")
vocabulary = obtain_vocabulary(articles) #Vocabulary
fd = obtain_frequencies(articles) #All text frequencies
""" Create the background distribution vector """
background_vector = []
for word in vocabulary:
background_vector.append(fd[word])
background_vector = np.array(background_vector)
total_count = np.sum(background_vector)
background_vector = background_vector/total_count
""" Create the topic distribution vector """
dimension = len(vocabulary)
topic_vector = np.full(shape = dimension, fill_value = (1/dimension))
back_prob = 0.5 #Probability of background
topic_prob = 0.5 #Probabilty of topic
i = 0
topics = []
for article in articles:
print("For article ",i,":")
topics.append(compute_likelihood(article, vocabulary, background_vector,
topic_vector,back_prob,topic_prob,20,True))
for i in range(len(articles)):
print("Article ",i," has topics:")
print(articles[i][:10])
| [
"martin.mimr@gmail.com"
] | martin.mimr@gmail.com |
7d9f3aef2fbc3cfed4fd88318a5795b3e7bda781 | a2da457b874d305c8b4c97ea1ea5ee0a289aa96e | /ejecucion1.py | f3903cdb9f3e0dd8aa8b33d13bda62568fa1cbf1 | [] | no_license | capidani/TFG | 3579328d4fa9237a907dd83864a9128c473dd713 | 8809d0cad28d72c26354bd086ed5f4a96eac2e7c | refs/heads/master | 2021-01-01T04:04:17.911246 | 2017-08-21T06:48:02 | 2017-08-21T06:48:02 | 97,119,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | import numpy as np
import matplotlib.pyplot as plt
# display plots in this notebook
#%matplotlib inline
# set display defaults
plt.rcParams['figure.figsize'] = (10, 10) # large images
plt.rcParams['image.interpolation'] = 'nearest' # don't interpolate: show square pixels
plt.rcParams['image.cmap'] = 'gray' # use grayscale output rather than a (potentially misleading) color heatmap
| [
"danielcapitanlopez@MacBook-Pro-de-Daniel-2.local"
] | danielcapitanlopez@MacBook-Pro-de-Daniel-2.local |
a20506a5babd07a07115c1c7769e74271b52356c | c6194deac4c0aab6a1aac1de08a78897c6e07430 | /tcp_server1.py | 8863c00e97c57a12c437c109ed86180de8dea843 | [] | no_license | d-sato2/PepperSocket | 2347bbf04f9f4e7aa929d06e361dc500ba67101d | 5e86afd1bee249410cda6645683f0318f30f4b2d | refs/heads/master | 2020-12-24T18:50:48.758758 | 2016-04-15T01:31:40 | 2016-04-15T01:31:40 | 56,220,408 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | from __future__ import print_function
import socket
from contextlib import closing
import datetime
def main():
host = '192.168.1.179'
port = 4000
backlog = 10
bufsize = 4096
now = datetime.datetime.now()
mili = now.strftime("%Y%m%d%H%M%S.") + "%04d" % (now.microsecond // 1000)
print (mili)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with closing(sock):
sock.bind((host, port))
sock.listen(backlog)
while True:
conn, address = sock.accept()
with closing(conn):
msg = conn.recv(bufsize)
print(msg)
now = datetime.datetime.now()
mili = now.strftime("%Y%m%d%H%M%S.") + "%04d" % (now.microsecond // 1000)
print("PCtime:" + mili)
conn.send("PCtime:" + mili)
return
if __name__ == '__main__':
main()
| [
"daisuke.satou2@e-harp.jp"
] | daisuke.satou2@e-harp.jp |
6272820bf35b5e435d6c1b6caa1a5391eaa7f81a | 178c12bcfff6100d68b07ff6666b34e474d670b9 | /numpy_dir/numpy_code.py | 618693b8605b62a5c7c2dfad571ef8e1c6c47cfd | [
"MIT"
] | permissive | rongliangzi/review_your_python_code | e70d604f3d8fe613213ae745a3f2d8a6950d886c | 5b429422a14117edfa7c1d6bd27586efd5f0554a | refs/heads/master | 2020-05-24T09:52:19.037248 | 2019-06-07T12:26:32 | 2019-06-07T12:26:32 | 187,216,055 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,203 | py | # encoding: utf-8
"""
@author: Liangzi Rong
@contact: 289770334@qq.com
"""
import time
import numpy as np
import sys
# 顺序读取效率
def get_np_list(size=(200, 100), iteration=100):
np_matrix = np.arange(0, size[0]*size[1]).reshape(size)
list_matrix = [[i*size[1]+j for j in range(size[1])] for i in range((size[0]))]
print('id(np_matrix[0]): {0}, id(np_matrix[1]): {1}'.format(id(np_matrix[0]), id(np_matrix[1])))
print('id(list_matrix[0]): {0}, id(list_matrix[1]): {1}, data length: {2}'.
format(id(list_matrix[0]), id(list_matrix[1]), sys.getsizeof(list_matrix[0])))
last_time = time.time()
for i in range(iteration):
for j in range(size[0]):
for k in range(size[1]):
_ = np_matrix[j][k]
print('\ntime(s) for iterating np_matrix: ', time.time()-last_time)
last_time = time.time()
for i in range(iteration):
for j in range(size[0]):
for k in range(size[1]):
_ = list_matrix[j][k]
print('\ntime(s) for iterating list_matrix: ', time.time()-last_time)
last_time = time.time()
for _ in range(iteration):
_ = np_matrix.sum()
print(np_matrix.sum())
print('\ntime(s) for list_matrix sum: ', time.time() - last_time)
last_time = time.time()
s = 0
for _ in range(iteration):
s = 0
for i in range(size[0]):
for j in range(size[1]):
s += list_matrix[i][j]
print(s)
print('\ntime(s) for list_matrix sum: ', time.time() - last_time)
last_time = time.time()
# built-in flip and reverse
def flip():
raw = np.arange(0, 12).reshape((2, 2, 3))
print(raw)
raw_flip_lr = np.fliplr(raw)
raw_reversed_lr = raw[:, ::-1, ...]
raw_flip_ud = np.flipud(raw)
raw_reversed_ud = raw[::-1, ...]
print('\n', raw_flip_lr)
print('\n', raw_reversed_lr)
print('\n', raw_flip_ud)
print('\n', raw_reversed_ud)
# copy array/复制向量,如将一个(2,3)的扩展为(3,2,3)的,可以(2,3)->(1,2,3)->(3,2,3)
def repeat():
# np.repeat:复制的是多维数组的每一个元素,返回一个flatten的array;
# axis来控制复制的行和列,可以复制多维数组本身,返回高维数组,而非flatten的一维数组
# axis参数指定某一维时,可以实现该维度中不同子数组不同的复制次数
# np.tile:复制的是多维数组本身;
raw = np.linspace(1, 6, 6)
raw = raw.reshape([1, 2, 3])
print('raw shape: ', raw.shape, '\nraw array: ', raw)
raw_rp = np.repeat(raw, 2)
print('\n2 repeats every element and flatten array shape(1D): ', raw_rp.shape, '\ncontent: ', raw_rp)
raw_rp = np.repeat(raw, 2, axis=0)
print('\n2 repeats specify axis(dimensions not changed) shape:', raw_rp.shape, '\ncontent ', raw_rp)
raw_rp = np.repeat(raw, (1, 2), axis=1)
print('\n2 repeats dif repetitions in specific axis(dimensions not changed): ', raw_rp.shape, '\ncontent: ', raw_rp)
raw_tile = np.tile(raw, (2, 1, 1))
print('\n2 repeats tile(dimensions not changed) shape: ', raw_tile.shape, '\ncontent: ', raw_tile)
if __name__ == '__main__':
get_np_list()
# flip()
# repeat()
| [
"289770334@qq.com"
] | 289770334@qq.com |
6b6c96040720436d156488cae64c337265f073ad | a49e8de43e4c9293de92b542fb96954e55e0e70d | /test_autoui.py | e97a5b5a84e6f901ec72c678cef8fa846f116296 | [] | no_license | tianxue1129/tx | 98e0b138dc296af28d7da22e44c20c40fae63500 | 037de465c0dee0fe2ff84daa516fe919f4db593a | refs/heads/master | 2023-02-12T06:10:31.220676 | 2021-01-10T10:18:20 | 2021-01-10T10:18:20 | 326,430,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | import pytest
from time import sleep
from selenium import webdriver
def test_main():
# 创建webdriver实例对象
wd = webdriver.Chrome(r'd:\chromedriver.exe')
wd.maximize_window()
wd.implicitly_wait(5)
wd.get('http://web4tst.dogotsn.cn/supplier/pages/login.html')
sleep(1)
wd.find_element_by_name('username').send_keys('13611011011')
wd.find_element_by_name('password').send_keys('12345678')
wd.find_element_by_id('loginSub').click()
sleep(4)
wd.find_element_by_xpath('//*[@id="side-menu"]/li[2]').click()
sleep(1)
wd.find_element_by_link_text('全部订单').click()
sleep(1)
wd.switch_to.frame('iframe1') # 切换右侧frame
# page_id = wd.page_source
# print(page_id)
# page_text = wd.find_element_by_id('tb_departments').text
# print(page_text)
# title = wd.find_element_by_class_name('panel-heading').text
# print(title)
# assert u'全部订单列表' in title # 判断是否打开全部订单页
wd.find_element_by_id('orderNo').send_keys('5201228141913619936')
wd.find_element_by_id('queryBtn').click()
content = wd.find_element_by_xpath('//*[@id="tb_departments"]/tbody/tr/td[13]').text
print(content)
assert u'全部退款' in content , 'aaaaaaaaa' # 判断是否打开全部订单页
if __name__ == '__main__':
pytest.main(['-s','test_autoui.py','--html=./report/result.html'])
| [
"wenxindexuetx@163.com"
] | wenxindexuetx@163.com |
4ca86bb778fd36f53883880894e2099c2a4fc0a9 | 899f9a470794c741478e50304f9057b073c533ad | /RealBE.py | ff9623f683e7863f7c09dc5c0cb26b9a416988be | [] | no_license | SachitShroff/CalHacks4-processing | cbe877dbc0c43d4075d99513f34f51f6516a13d8 | 95f3336ecc7a025cadc7db6ae3881c700cc4aabe | refs/heads/master | 2021-07-09T15:40:29.144337 | 2017-10-08T17:34:35 | 2017-10-08T17:34:35 | 106,084,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,346 | py | from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
import time
import os
import urllib2
import timeit
import json
import requests
import httplib
import google.gax.errors
A_CLASS_NAME = "61A" #TODO: SET FINAL CLASS NAME
B_CLASS_NAME = "61B" #TODO: SET FINAL CLASS NAME
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "CalHacks-8f56181f7e42.json"
GCS_API_KEY = "AIzaSyBy6MNhLhTbs0vKoBqsoYdyQDq6ceCoNag"
#audioURIS = ["gs://calhacksproject/audiofiles/test2.wav"]
#delimiters = set([" ", ",", ".", "!", "?"])
AZ_API_KEY = "22c658f2b8784b55aed7e2f689008908"
def processAudio(audioURIS, className=None):
#Initialize empty topics dictionary RETVAL (will have "topic" : [[List of impotant tokens (summary) with "importance"], [video URLs], [related keywords]] )
passData = []
#Convert speech to text (text transcription for each audio file)
print("\nTRANSCRIBING AUDIO\n")
transcriptions = transcribeAudio(audioURIS)
print(transcriptions)
#Analyze text for keywords
keywords = []
print("\nFINDING KEYWORDS AND INFO:\n")
for i in range(len(audioURIS)):
audioURI = audioURIS[i]
transcription = transcriptions[i]
keywords = getKeyWords(transcription)
relatedResources = getRelatedResources(keywords)
passData.append({"Audio_URL":audioURI, "Topics":keywords, "Related_Resources":relatedResources})
data = {"ClassName":className, "Videos":passData}
print("Sending post request with info to backend web server server")
r = requests.post("https://safe-spire-89119.herokuapp.com/api/v1/classes/upload", data=json.dumps(data))
def transcribeAudio(audioURIS):
"""Asynchronously transcribes the audio files specified by the audioURIS."""
operations=[]
for audioURI in audioURIS:
try:
client = speech.SpeechClient()
audio = types.RecognitionAudio(uri=audioURI)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, #CHANGE THIS
sample_rate_hertz=16000, #CHANGE THIS
language_code='en-US')
operation = client.long_running_recognize(config, audio)
operations.append(operation)
except google.gax.errors.GaxError as e:
print("ERROR:")
print(audioURI)
assert len(operations) == len(audioURIS), "An operation was not generated for every ausio file"
responses = []
for operation in operations:
response = operation.result(timeout=600)
responses.append(response)
assert len(operations) == len(responses), "Responses were not generated for every operation"
transcriptions = []
for response in responses:
text = ""
for result in response.results:
text = text + result.alternatives[0].transcript
transcriptions.append(text)
assert len(transcriptions) == len(operations), "A transcription was not generated for evey audio file"
return transcriptions
def getKeyWords(transcription):
#Accepts a transcription, splits into <5000 character chunks, runs through keyword search
#Keeps keywords (up to 7) over .75 in confidence rating
#Sorts keywords by preference and returns them
uri = 'westcentralus.api.cognitive.microsoft.com'
path = '/text/analytics/v2.0/keyPhrases'
headers = {'Content-Type':'application/json', 'Ocp-Apim-Subscription-Key':AZ_API_KEY} #TODO: Change this
inputJsons = []
for i in range(len(transcription)//4900): #TODO: Implement intelligent transcription
text = transcription[max(0,i*4900):min(len(transcription), (i+1)*4900)]
inputJsons.append({"language": "en", "id":i+1,"text":text})
inputJson = {"documents" : inputJsons}
numDocuments = len(inputJsons)
print(inputJson)
"""
req = urllib2.Request('https://westcentralus.api.cognitive.microsoft.com/text/analytics/v2.0/keyPhrases', inputJson, headers)
response = urllib2.urlopen(req)
result = response.read()
obj = json.loads(result)
"""
body = json.dumps(inputJson)
conn = httplib.HTTPSConnection(uri)
conn.request("POST", path, body, headers)
response = conn.getresponse()
print("\nRESPONSE READ")
temp = response.read()
decoder = json.JSONDecoder()
obj = decoder.decode(temp)
print("\nOBJ")
print(obj)
keywords =[]
for i in range(numDocuments):
keywords = keywords + obj['documents'][i]['keyPhrases']
print(list(set(keywords)))
return list(set(keywords))[0:6]
"""
uri = 'westcentralus.api.cognitive.microsoft.com'
path = '/text/analytics/v2.0/keyPhrases'
def GetKeyPhrases (documents):
"Gets the sentiments for a set of documents and returns the information."
headers = {'Ocp-Apim-Subscription-Key': AZ_API_KEY}
conn = httplib.HTTPSConnection (uri)
body = json.dumps (documents)
conn.request ("POST", path, body, headers)
response = conn.getresponse ()
return response.read ()
"""
def getRelatedResources(keywords):
links = []
for keyword in keywords:
resource = keyword.replace(" ", "+")
links.append("http://www.google.com/search?q=" + resource + "&btnI")
print(links)
return links
"""
def processAU(audioURIS):
#Initialize empty topics dictionary (will have "topic" : [[List of impotant tokens (summary) with "importance"], [video URLs], [related keywords]] )
topics = {}
#Convert speech to text (text transcription for each audio file)
operations=[] # Eache element is [client, operation]
for audioURI in audioURIS:
client = speech.SpeechClient()
operation = client.long_running_recognize(audio=speech.types.RecognitionAudio(uri=audioURI),
config=speech.types.RecognitionConfig(encoding='FLAC', #TODO: CHANGE THIS
language_code='en-US',
sample_rate_hertz=44100)) #TODO: CHANGE THIS
operations.append(operation)
retryCount = 0
for operation in operations:
operation.poll()
while retryCount <= 100 and not allOpsComplete(operations):
if retryCount == 100:
for operation in operations:
print(operation.complete) # This line will error
retryCount += 1
time.sleep(3*i)
for operation in operations:
operation.poll()
transcriptions = []
for i in range(len(audioURIS)):
text = ""
operation = operations[i]
for result in operation.results:
for alternative in result.alternatives:
text = text + alternative.transcript
transcriptions.append(text)
return transcriptions
def allOpsComplete(operations):
for operation in operations:
if not operation.complete:
return False
"""
def gen61bFiles():
names = []
prefix = "CS61B("
suffix = ").wav"
for i in range (1,108):
fileName = prefix + str(i) + suffix
names.append(fileName)
return ["gs://calhacksproject/CS61B/" + name for name in names]
def gen61aFiles():
names = []
prefix = "CS61A ("
suffix = ").wav"
for i in range (1,108):
if (i != 90):
fileName = prefix + str(i) + suffix
names.append(fileName)
temp = ["gs://calhacksproject/CS61A/" + name for name in names]
return [temp[i*9:min(len(temp), (i+1)*9)] for i in range(len(temp)//9)]
def mainProcess(process61A=True, process61B=True, startIndex = 0):
if process61A:
print("Generating 61A Filenames")
sixOneAFiles = gen61aFiles()
print("Starting 61a processing " + str(len(sixOneAFiles)))
for i in range(startIndex, len(sixOneAFiles)):
fileBunch = sixOneAFiles[i]
processAudio(fileBunch, className=A_CLASS_NAME)
if process61B:
print("Generating 61B FIlenames")
sixOneBFiles = gen61bFiles()
print("Starting 61b processing")
for i in range(startIndex, len(sixOneBFiles)):
fileBunch = sixOneBFiles[i]
processAudio(fileBunch, className=B_CLASS_NAME)
"""
print ("TESTING")
print("Running sample file (10 mins, LINEAR16, 16kHz)")
start_time = timeit.default_timer()
processAudio(audioURIS, className="TESTING")
elapsed = timeit.default_timer() - start_time
print("TIME ELAPSED:")
print(elapsed)
"""
| [
"sachit.shroff@gmail.com"
] | sachit.shroff@gmail.com |
2cacd8a9325db31a6a26faf26f05f6263f778e9c | 2aaa47d425662314e8c283c9ba3e720889d8f07e | /script/config.py | 2bd8b14357c723e21d7c6586053c9c75f8e2d091 | [
"MIT"
] | permissive | youngspring1/Parse12306 | ca903786ae1c4b0f27d37d8b2dc66b6804e31e67 | 09614e287853989573fa303b91ec1e673c818a74 | refs/heads/master | 2021-05-11T20:00:20.007838 | 2018-01-14T14:27:24 | 2018-01-14T14:27:24 | 117,429,115 | 16 | 9 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | # mongodb
db_host = 'localhost'
db_port = 27017
db_name = '12306' | [
"xuyc@touzhiwang.com"
] | xuyc@touzhiwang.com |
4c2986b068e75e8ac4291076722d55fe4d52745f | ae657ab18358ba7d54769b83dab58269fb6f30e8 | /DistilBert/model/no_BERT_MTL.py | 3e0778d26bc0f1937a73e4978ef08bdb141b82fd | [] | no_license | jailbreakaaaa/MTL-DistilBert-CNN | b3b02cb9695646f7451f5855c29c539e0e885e79 | 837c6a576695433604e9d0fa9a25cfee00837872 | refs/heads/master | 2023-06-26T15:37:40.932524 | 2021-07-14T09:07:45 | 2021-07-14T09:07:45 | 385,863,635 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,612 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import pytorch_lightning as pl
import numpy as np
from transformers import AutoModel, AutoTokenizer
#
class get_embedding(nn.Module):
def __init__(self, args):
super(get_embedding, self).__init__()
self.args = args
self.init_glove()
self.word_dim = args.glove_dim
def forward(self, x):
return self.get_glove(x)
def init_glove(self):
"""
load the GloVe model
"""
self.word2id = np.load(self.args.word2id_file, allow_pickle=True).tolist()
self.glove = nn.Embedding(self.args.vocab_size, self.args.glove_dim)
emb = torch.from_numpy(np.load(self.args.glove_file, allow_pickle=True)).to(self.args.device)
self.glove.weight.data.copy_(emb)
self.word_dim = self.args.glove_dim
self.glove.weight.requires_grad = False
def get_glove(self, sentence_lists):
"""
get the glove word embedding vectors for a sentences
"""
max_len = max(map(lambda x: len(x), sentence_lists))
sentence_lists = list(map(lambda x: list(map(lambda w: self.word2id.get(w, 0), x)), sentence_lists))
sentence_lists = list(map(lambda x: x + [self.args.vocab_size - 1] * (max_len - len(x)), sentence_lists))
sentence_lists = torch.LongTensor(sentence_lists).to(self.args.device)
# sentence_lists = sentence_lists
# embeddings = self.glove(sentence_lists)
return self.glove(sentence_lists)
class BERT(nn.Module):
def __init__(self, args):
super(BERT, self).__init__()
self.args = args
self.bert_tokenizer = AutoTokenizer.from_pretrained(args.bert_path)
self.bert = AutoModel.from_pretrained(args.bert_path)
for param in self.bert.parameters():
param.requires_grad = False
# self.word_dim = args.bert_dim
# self.dropout = nn.Dropout(args.dropout) # 丢弃层用于防止过拟合
# self.pool = nn.AdaptiveMaxPool1d(1)
#
# self.encoder = nn.Linear(args.bert_dim, 512)
# nn.init.xavier_uniform_(self.encoder.weight.data)
# self.decoder = nn.Linear(512, 2)
# nn.init.xavier_uniform_(self.decoder.weight.data)
def forward(self, x):
word_emb = self.get_embedding(x)
# x = self.encoder(word_emb).permute(0, 2, 1)
# x = self.pool(F.relu(x)).squeeze(-1)
# x = self.decoder(self.dropout(x))
return word_emb
def get_embedding(self, sentence_lists):
sentence_lists = [' '.join(x) for x in sentence_lists]
# print("seqlen:", len(sentence_lists[0]), len(sentence_lists[1]))
ids = self.bert_tokenizer(sentence_lists, padding=True, return_tensors="pt")
inputs = ids['input_ids']
if self.args.use_gpu:
inputs = inputs.to(self.args.device)
return self.bert(inputs)[0]
class GRU_attn(nn.Module):
"""
GRU
"""
def __init__(self, glove_dim, enc_hid_size, rnn_layers, bidirectional, dec_hid_size, dropout_rnn, device="cuda"):
super(GRU_attn, self).__init__()
self.device = device
# self.args = args
self.rnn = nn.GRU(glove_dim, enc_hid_size, rnn_layers,
batch_first=True, bidirectional=bidirectional)
# if bidirectional:
# self.fc = nn.Linear(enc_hid_size * 2, dec_hid_size)
# else:
# self.fc = nn.Linear(enc_hid_size, dec_hid_size)
# nn.init.xavier_normal_(self.fc.weight)
# self.attn = Attention(enc_hid_size, dec_hid_size)
# self.dropout = nn.Dropout(dropout_rnn)
# self.pool = nn.AdaptiveMaxPool1d(1) # 自适应最大池化
def forward(self, x, seq_len):
sent_len, idx_sort = np.sort(seq_len)[::-1], np.argsort(-seq_len)
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).to(self.device)
sent_variable = x.index_select(0, idx_sort)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent_variable, np.ascontiguousarray(sent_len, dtype=np.float32),
batch_first=True)
sent_output, sent_hidden = self.rnn(sent_packed)
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output, batch_first=True)[0] #[batch_size, max_len, 4096]
# Un-sort by length
idx_unsort = torch.from_numpy(idx_unsort).to(self.device)
sent_output = sent_output.index_select(0, idx_unsort) # batch, seq_len, encoder * layer
# attention, 没有加dropout
# s = torch.tanh(self.fc(torch.cat((sent_hidden[-2, :, :], sent_hidden[-1, :, :]), dim=1)))
# attn_weights = self.attn(s, sent_output) # batch, seq_len
# context = attn_weights.bmm(sent_output.transpose(0, 1))
# local_representation = self.pool(attn_weights.matmul(sent_output)).squeeze(-1) # batch_size, batch_size, enc_hid_dim
# batch, enc_hid_size * 2
# local_representation = torch.bmm(sent_output.transpose(1, 2), attn_weights.unsqueeze(2)).squeeze(-1)
return sent_output
class GRU(nn.Module):
"""
不需要变长的GRU, 稳定输出
"""
def __init__(self, glove_dim, enc_hid_size, rnn_layers, bidirectional, dec_hid_size, dropout_rnn, device="cuda"):
super(GRU, self).__init__()
self.rnn = nn.GRU(glove_dim, enc_hid_size, rnn_layers,
batch_first=True, bidirectional=bidirectional)
def forward(self, x):
sent_output, sent_hidden = self.rnn(x)
return sent_output
class CNN_layers(nn.Module):
"""
CNN
"""
def __init__(self, num_channels, kernel_sizes, glove_dim, device="cuda"):
super(CNN_layers, self).__init__()
self.convs = nn.ModuleList()
for c, k in zip(num_channels, kernel_sizes):
self.convs.append(nn.Conv1d(in_channels=glove_dim,
out_channels=c,
kernel_size=k).to(device))
for conv in self.convs:
nn.init.kaiming_normal_(conv.weight.data)
nn.init.uniform_(conv.bias, 0, 0) # 后加入的初始化bias
self.pool = nn.AdaptiveMaxPool1d(1) #自适应最大池化
def forward(self, x):
# x的输出维度为 num_channels * 3
x = torch.cat([
self.pool(F.relu(conv(x))).squeeze(-1) for conv in self.convs], dim=1)
return x
class TransformerModel(nn.Module):
# def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
def __init__(self, ninp, nhead, nhid, nlayers, dropout=0.5): # 需要调整ninp
super(TransformerModel, self).__init__()
from torch.nn import TransformerEncoder, TransformerEncoderLayer
self.model_type = 'Transformer'
self.pos_encoder = PositionalEncoding(ninp, dropout) # ninp是embed_size
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout) # nhid是反馈网络的的尺寸
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
# self.encoder = nn.Embedding(ntoken, ninp)
# self.ninp = ninp
# 暂时不需要解码
# self.decoder = nn.Linear(ninp, d_model)
self.pool = nn.AdaptiveMaxPool1d(1)
# self.init_weights()
# def generate_square_subsequent_mask(self, sz): # 生成掩膜向量
# mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
# mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
# return mask
# def init_weights(self): # 初始化解码器
# initrange = 0.1
# # self.encoder.weight.data.uniform_(-initrange, initrange)
# self.decoder.bias.data.zero_()
# self.decoder.weight.data.uniform_(-initrange, initrange)
# def forward(self, src, src_mask):
def forward(self, src):
# src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
# output = self.transformer_encoder(src, src_mask)
output = self.transformer_encoder(src) # batch, seq_len, embed
# 可以考虑加入线性层做一个缓冲, GRU_attn含有attention
# output =
output = self.pool(output.transpose(1, 2)).squeeze(-1)
# 暂时不需要解码
# output = self.decoder(output)
return output
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=450):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model) # d_model是模型的第三维度
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class Attention(nn.Module):
def __init__(self, enc_hid_dim, dec_hid_dim):
super().__init__()
self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim, bias=False)
# nn.init.
self.v = nn.Linear(dec_hid_dim, 1, bias=False)
def forward(self, s, enc_output):
# s = [batch_size, dec_hid_dim]
# enc_output = [src_len, batch_size, enc_hid_dim * 2]
# batch_size = enc_output.shape[1]
src_len = enc_output.shape[1]
# repeat decoder hidden state src_len times
# s = [batch_size, src_len, dec_hid_dim]
# enc_output = [batch_size, src_len, enc_hid_dim * 2]
s = s.unsqueeze(1).repeat(1, src_len, 1) # s.shape[batch_size, batch_size, enc_hid_dim]
# enc_output = enc_output.transpose(0, 1) # [batch_size, src_len, enc_hid_dim * 2]
# energy = [batch_size, src_len, dec_hid_dim]
energy = torch.tanh(self.attn(torch.cat((s, enc_output), dim=2)))
# attention = [batch_size, src_len]
attention = self.v(energy).squeeze(2)
return F.softmax(attention, dim=1) # 输出对每一个词的注意力
class MultiTaskLossWrapper(nn.Module):
def __init__(self, model, num=2, device="cuda"):
super(MultiTaskLossWrapper, self).__init__()
self.model = model
# self.task_num = task_num
# loss的权重可以手动调最优
# self.log_vars = nn.Parameter(torch.ones(num))
# self.log_vars = nn.Parameter(torch.FloatTensor((0.9, 0.1)))
# self.total_loss = nn.Parameter(torch.zeros(num))
self.device = device
# self.losses = []
def forward(self, input, targets):
# losses = []
outputs = self.model(input)
targets = torch.LongTensor(targets).to(self.device)
target_loss = F.cross_entropy(outputs[0], targets[0]) * 0.9
task_loss = F.cross_entropy(outputs[1], targets[1]) * 0.1
# dis_loss = F.cross_entropy(outputs[2], targets[1]) * -0.05
result = {"target": outputs[0].argmax(dim=1).tolist(), "task": outputs[1].argmax(dim=1).tolist()}
return result, {"target": target_loss, "task": task_loss, "total": target_loss+task_loss}
# weight.tolist()
class generate(nn.Module):
def __init__(self, args):
super(generate, self).__init__()
# self.args = args
# self.embedding = BERT(args)
self.embedding = get_embedding(args)
# self.share_layer0 = CNN_layers(args.num_channels, args.kernel_sizes, args.glove_dim)
# self.share_layer1 = TransformerModel(args.glove_dim, args.nhead, args.nhid, args.nlayers, dropout=args.dropout_trans)
## 使用GRU 跑不了, 长度不对
# self.share_layer = GRU(args.bert_dim, args.enc_hid_size, args.rnn_layers,
# args.bidirectional, args.dec_hid_size,
# args.dropout_rnn)
# self.private_layer = nn.ModuleList([GRU(args.bert_dim, args.enc_hid_size, args.rnn_layers,
# args.bidirectional, args.dec_hid_size,
# args.dropout_rnn)]*args.task_num) # 多任务对应的decoder
## 使用CNN 0.875
# self.share_layer = CNN_layers(args.num_channels, args.kernel_sizes, args.bert_dim)
self.private_layer = nn.ModuleList([CNN_layers(args.num_channels, args.kernel_sizes, args.glove_dim)]*args.task_num)
self.task_recognized = CNN_layers(args.num_channels, args.kernel_sizes, args.glove_dim)
## 使用固定长度的GRU
# self.share_layer = GRU(args.bert_dim, args.enc_hid_size, args.rnn_layers,
# args.bidirectional, args.dec_hid_size,
# args.dropout_rnn)
# self.private_layer = nn.ModuleList([GRU(args.bert_dim, args.enc_hid_size, args.rnn_layers,
# args.bidirectional, args.dec_hid_size,
# args.dropout_rnn)]*args.task_num) # 多任务对应的decoder
## 使用两个简单私有层的GRU
# self.share_layer = GRU(args.bert_dim, args.enc_hid_size, args.rnn_layers,
# args.bidirectional, args.dec_hid_size,
# args.dropout_rnn)
# self.private_layer = GRU(args.bert_dim, args.enc_hid_size, args.rnn_layers,
# args.bidirectional, args.dec_hid_size,
# args.dropout_rnn)
## 使用两个简单私有层的CNN
# self.share_layer = CNN_layers(args.num_channels, args.kernel_sizes, args.bert_dim)
# self.private_layer = CNN_layers(args.num_channels, args.kernel_sizes, args.bert_dim)
# self.fushion = nn.Linear(args.enc_hid_size*4, args.enc_hid_size*2)
if args.bidirectional:
# GRU
# self.fc_target = nn.Linear(args.enc_hid_size*2, args.output_size)
# self.fc_task = nn.Linear(args.enc_hid_size*2, args.task_num)
# CNN
self.fc_target = nn.Linear(sum(args.num_channels), args.output_size)
self.fc_task = nn.Linear(sum(args.num_channels), args.task_num)
# 初始化
# self.fc_target = nn.Linear(args.enc_hid_size*4, args.output_size)
# self.fc_task = nn.Linear(args.enc_hid_size*4, args.task_num)
else:
self.fc_target = nn.Linear(sum(args.num_channels)+args.enc_hid_size, args.output_size)
self.fc_task = nn.Linear(sum(args.num_channels)+args.enc_hid_size, args.task_num)
nn.init.xavier_normal_(self.fc_target.weight)
nn.init.xavier_normal_(self.fc_task.weight)
# self.fc_discriminator = nn.Linear(sum(args.num_channels), args.task_num)
self.pool = nn.AdaptiveMaxPool1d(1)
# self.dropout = nn.Dropout(args.dropout)
def forward(self, input):
# def forward(self, x, task_id, seq_len):
emb = self.embedding(input["x"])
## GRU 变长
# share_layer = self.pool(self.share_layer(emb, input["seq_len"]).permute(0, 2, 1)).squeeze(-1)
# private_layer = self.pool(self.private_layer[input["task_id"]](emb).permute(0, 2, 1)).squeeze(-1) # (batch_size, num_channels*3)
## CNN
# share_layer = self.share_layer(emb.permute(0, 2, 1))
private_layer = self.private_layer[input["task_id"]](emb.permute(0, 2, 1))
task_recognized = self.task_recognized(emb.permute(0, 2, 1))
## GRU
# share_layer = self.pool(self.share_layer(emb).permute(0, 2, 1)).squeeze(-1)
# private_layer = self.pool(self.private_layer(emb).permute(0, 2, 1)).squeeze(-1) # (batch_size, num_channels*3)
## CNN 单层
# share_layer = self.share_layer(emb.permute(0, 2, 1))
# private_layer = self.private_layer(emb.permute(0, 2, 1))
# fusion_layer = torch.cat((share_layer, private_layer), dim=1)
target = self.fc_target(private_layer)
task = self.fc_task(task_recognized)
# discriminator = self.fc_discriminator(share_layer)
return [target, task, None]
| [
"43905277+jailbreakaaaa@users.noreply.github.com"
] | 43905277+jailbreakaaaa@users.noreply.github.com |
93307e57c171d6735b35433ae3686998b93ed784 | 66c732374449a15f4f002459a869cd68636e19d6 | /courses/migrations/0004_auto_20200628_1719.py | 2becb5605bae77913e75bbdeaa7a1aecb0aaf702 | [] | no_license | Sujit115/DjangoWebRepo | 54d6a3410ef9e24af0505b1796d820ca7d549a4d | 5546fa18e5e236705cdfab996b4aea71e20fc24d | refs/heads/master | 2022-11-22T11:28:14.331086 | 2020-07-21T18:25:38 | 2020-07-21T18:25:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | # Generated by Django 3.0.7 on 2020-06-28 17:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0003_classcontent_subject_name'),
]
operations = [
migrations.AlterField(
model_name='classcontent',
name='contributer_image',
field=models.ImageField(upload_to='ContributerImage'),
),
migrations.AlterField(
model_name='classcontent',
name='contributer_name',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='classcontent',
name='subject_name',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='classdata',
name='class_id',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='leveldata',
name='module_name',
field=models.CharField(max_length=20),
),
]
| [
"oscarmike286@gmail.com"
] | oscarmike286@gmail.com |
a4a5d7c166a9d300707f6a1c1407b5a9c15b1ace | 14e3a6d5d5ef1a7fc576c0670361fc908630b495 | /python/clx/eda/__init__.py | f5615c081f99808a87e4ee8d070f7f3c2db6964d | [
"Apache-2.0"
] | permissive | rapidsai/clx | 3b6e49b53704de7f81fcd923ae88148a6ed5f031 | 68c14f460b5d3ab41ade9b2450126db0d2536745 | refs/heads/branch-23.04 | 2023-05-25T09:37:15.553353 | 2023-05-19T16:07:00 | 2023-05-19T16:07:00 | 186,716,715 | 169 | 93 | Apache-2.0 | 2023-05-19T16:07:02 | 2019-05-14T23:47:32 | Jupyter Notebook | UTF-8 | Python | false | false | 42 | py | from clx.eda.eda import EDA # noqa: F401
| [
"noreply@github.com"
] | noreply@github.com |
d4f6b8b6a9dbdc67fbd6571ac93cc66ea53e0867 | 1d22f82abc38dd85a844cb6a19ec83ffae2a434d | /experimental/socket_test.py | 6cf9425c7fc13a617f4424a30cbd6e0fade911e0 | [] | no_license | pbarton666/learninglab | c4d5602d148618ee2848a4954d8d93eae24be9ef | f2ad15b77aefcf65bd19e00f3f61687b4f13b737 | refs/heads/master | 2022-12-14T05:55:33.346925 | 2021-07-13T17:21:42 | 2021-07-13T17:21:42 | 84,211,135 | 2 | 3 | null | 2022-12-08T06:51:28 | 2017-03-07T14:52:08 | Jupyter Notebook | UTF-8 | Python | false | false | 78 | py | import socketserver
s=socketserver.BaseServer(1,2)
import socket, socketserver | [
"pbarton@SEB01.COM"
] | pbarton@SEB01.COM |
8afff7ef51720353c451541ed3d0512f11363eb5 | edc0ce5e5312392fcffdc578f0f3d63b48264c93 | /bin/chardetect | ffeab36972b52aa626de82eb576c84e71536a21a | [] | no_license | peytondodd/mugshots-cli | 2867a7cfc8e62d83a5cf1f73e1bcde926ae1ac26 | 279c69065d5aa6c73a145f57687657c6124404f8 | refs/heads/master | 2020-07-27T13:26:04.604491 | 2019-08-21T22:00:34 | 2019-08-21T22:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | #!/Users/home/Projects/mython/mython/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"zacharytepic@gmail.com"
] | zacharytepic@gmail.com | |
8f40ef9482f347822d768b8d3cdfbf7aa02c70ab | 244e81a34a40db489e33f506f118f20c2b4109c1 | /scripts/__init__.py | d80acb6381bdb5c74938ea011e7b1871b69235b4 | [] | no_license | cyberbikepunk/archive | 945e50ef2c870c31389b68c9e5b161e9e78b6917 | fc33ce6645628a91601c0300390d9f6a0bd01338 | refs/heads/master | 2021-01-20T18:40:18.867066 | 2016-07-24T18:01:46 | 2016-07-24T18:01:46 | 64,078,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """ This module pulls all the ETL scripts together in one place. """
from scripts.uk_drivers_sync_police import load_into_warehouse
from scripts.german_payroll_wizard import produce_delivery_count_table
all_scripts = {
'sync_police': load_into_warehouse,
'payroll_germany': produce_delivery_count_table
}
def run_etl_scripts(scripts):
for script in scripts:
script()
| [
"loic@cyberpunk.bike"
] | loic@cyberpunk.bike |
7d5d1ab8699c782798a9997fa32dd09d21c2226c | b59cb56a8427f5abbcfa83f14133b150d40f4c34 | /LeetCode/Python3/symmetric-tree.py | 7452c2a322147496bc25d58baf79e141c25a52f4 | [] | no_license | eudaemonic-one/Lifelong-Learning | 95311699b531d65db8f8d10e108c2af26c82a8d5 | 0c0e13fe3f429158d4ff6ece903253176675a6b5 | refs/heads/master | 2023-07-16T15:43:29.460487 | 2021-08-23T19:32:17 | 2021-08-23T19:32:17 | 191,079,025 | 43 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
queue = [root, root]
while queue:
t1, t2 = queue.pop(0), queue.pop(0)
if not t1 and not t2:
continue
elif not t1 or not t2:
return False
elif t1.val != t2.val:
return False
queue.append(t1.left)
queue.append(t2.right)
queue.append(t1.right)
queue.append(t2.left)
return True
def isSymmetric(self, root: TreeNode) -> bool:
def isMirror(t1, t2):
if not t1 and not t2:
return True
elif not t1 or not t2:
return False
return t1.val == t2.val and isMirror(t1.right, t2.left) and isMirror(t1.left, t2.right)
return isMirror(root, root)
| [
"brookcui97@gmail.com"
] | brookcui97@gmail.com |
c9b5896507f9ebed9f80e3a70c1f2a839c06dffc | 220ac20b650538fcd9bca971a7c4c2fad665dc78 | /gui.py | dbc5e15da0b35028c3b6da357b8fe7f34d31f61b | [] | no_license | Dennis-who/Simple-chat-system | 735a9fc0fcd2649df30b5ad6ba53aa73d44adac6 | 3180716d080b7dd0ba711d7be10ce98918e3f5a0 | refs/heads/main | 2023-07-09T17:35:53.821721 | 2021-08-05T16:50:00 | 2021-08-05T16:50:00 | 393,113,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,256 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 14 09:36:07 2020
@author: zhangyumeng & zhouyuewen
"""
import tkinter as tk
import tkinter.messagebox
#import pickle
import time
from chat_utils import *
import chat_client_class as chat_client
import argparse
import threading
parser = argparse.ArgumentParser(description='chat client argument')
parser.add_argument('-d', type=str, default=None, help='server IP addr')
args = parser.parse_args()
client = chat_client.Client(args)
client.init_chat()
h = 800
w = 1000
class GUI():
def __init__(self):
self.window = tk.Tk()
self.window.title("Login page")
self.canvas = tk.Canvas(self.window, height=h, width=w)
self.canvas.pack()
self.frame = tk.Frame(self.window, bg='#ced4db')
self.frame.place(relwidth=1, relheight=1)
self.textFrame = tk.Frame(self.frame, bg='white').place(relwidth=0.5, relheight=0.3,relx=0.2, rely=0.2)
self.title = tk.Label(self.textFrame, text="Welcome!", font="Times 48 bold",bg='#ced4db', fg="#3a93f8").place(relx=0.33, rely=0.25)
tk.Label(self.textFrame, text="A chatting app could connet the world.", font="Times 24 italic", bg='#ced4db', fg="#3a93f8").place(
relx=0.27,
rely=0.35)
self.usr_name = tk.StringVar()
self.usr_name.set("")
self.label = tk.Label(self.frame, text="Name", bg='white', font="Times 18")
self.label.place(relx=0.23, rely=0.65, relwidth=0.07, relheight=0.05)
self.entry = tk.Entry(self.frame, font=40,textvariable=self.usr_name)
self.entry.place(relwidth=0.32, relheight=0.05,relx=0.3, rely=0.65)
#button
self.button = tk.Button(self.frame, text="Log In", font="Times 18",command=self.fun1)
self.button.place(relx=0.41, rely=0.8, relwidth=0.08, relheight=0.05)
self.window.mainloop()
def fun1(self):
self.window.destroy()
self.name = self.usr_name.get()
ok = tk.messagebox.showinfo(title='Welcome', message='Chat away! ' + self.name)
client.login(self.name)
if ok == 'ok':
self.chat_away()
def chat_away(self):
self.window2 = tk.Tk()
self.window2.geometry("1000x800")
self.window2.title("Let's chat!")
'''创建分区'''
self.f_msglist = tk.Frame(self.window2,height = 400,width = 700, bg='#ced4db') #创建<消息列表分区 >
self.f_msgsend = tk.Frame(self.window2,height = 400,width = 700, bg='#ced4db') #创建<发送消息分区 >
self.f_floor = tk.Frame(self.window2,height = 90,width = 700, bg='#ced4db') #创建<按钮分区>
self.f_right = tk.Frame(self.window2, height = 800,width = 300, bg='#ced4db') #创建《功能分区>
'''创建控件'''
self.txt_msglist = tk.Text(self.f_msglist) #消息列表分区中创建文本控件
self.txt_msglist.tag_config('green',foreground = 'blue') #消息列表分区中创建标签
self.txt_msgsend = tk.Text(self.f_msgsend) #发送消息分区中创建文本控件
def chat_looping():
while True:
self.system_msg = client.proc()
client.output()
time.sleep(CHAT_WAIT)
if self.system_msg != '' and self.system_msg != None :
self.txt_msglist.insert(tk.END, self.system_msg + '\n')
reading_thread = threading.Thread(target = chat_looping)
reading_thread.daemon = True
reading_thread.start()
def cancle_msg():
self.txt_msgsend.delete('0.0',tk.END)
def send_msg():
# name=self.usr_name
self.msg = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())+'\n'
self.txt_msglist.insert(tk.END,self.msg,'green') #添加时间
self.txt_msglist.insert(tk.END,self.txt_msgsend.get('0.0',tk.END))
self.msg_content = str(self.txt_msgsend.get('0.0',tk.END)[:-1])#获取发送消息,添加文本到消息列表
client.console_input.append(self.msg_content)
self.txt_msgsend.delete('0.0',tk.END) #清空发送消息
def get_time():
client.console_input.append('time')
def find_who():
client.console_input.append('who')
def chat_with():
self.window3 = tk.Tk()
self.window3.geometry("500x200")
self.window3.title("Select a person")
self.label_p = tk.Label(self.window3, text='Who do you want to chat with?', bg='white', font="Times 18").place(relx=0.25, rely=0.2)
self.chat_name = tk.StringVar()
self.chat_name.set("")
self.entry_p = tk.Entry(self.window3,textvariable=self.chat_name).place(relx=0.29, rely=0.4)
self.button_p = tk.Button(self.window3, text='Confirm', font="Times 18", command=chat_confirm).place(relx=0.42, rely=0.7)
self.window3.mainloop()
def chat_confirm():
self.window3.destroy()
client.console_input.append('c'+ self.chat_name.get())
def search_term():
self.window4 = tk.Tk()
self.window4.geometry("500x200")
self.window4.title("Select a term")
self.label_s = tk.Label(self.window4, text='What word do you want to search?', bg='white', font="Times 18").place(
relx=0.25, rely=0.2)
self.search_term = tk.StringVar()
self.search_term.set("")
self.entry_s = tk.Entry(self.window4, textvariable=self.search_term).place(relx=0.29, rely=0.4)
self.button_s = tk.Button(self.window4, text='Confirm', font="Times 18", command=search_confirm).place(relx=0.42,
rely=0.7)
self.window4.mainloop()
def search_confirm():
self.window4.destroy()
client.console_input.append('?'+ self.search_term.get())
def get_son():
self.window5 = tk.Tk()
self.window5.geometry("500x200")
self.window5.title("Select a sonnet")
self.label_n = tk.Label(self.window5, text='Which sonnet do you want to get?', bg='white', font="Times 18").place(
relx=0.25, rely=0.2)
self.sonnet_num = tk.StringVar()
self.sonnet_num.set("")
self.entry_n = tk.Entry(self.window5, textvariable=self.sonnet_num).place(relx=0.29, rely=0.4)
self.button_n = tk.Button(self.window5, text='Confirm', font="Times 18", command=sonnet_confirm).place(relx=0.42,rely=0.7)
def sonnet_confirm():
self.window5.destroy()
client.console_input.append('p' + self.sonnet_num.get())
def quit_sys():
client.console_input.append('q')
self.window2.destroy()
# self.txt_msgsend.bind('<KeyPress-Up>',tk.msgsendEvent) #发送消息分区中,绑定‘UP’键与消息发送。
self.button_send = tk.Button(self.f_floor,text = 'Send',font="Times 15" ,activeforeground = "#3a93f8", height= 3, width=10,command = send_msg)
self.button_cancel = tk.Button(self.f_floor,text = 'Cancel',font="Times 15", activeforeground = "#3a93f8", height= 3, width=10,command = cancle_msg) #按钮分区中创建按钮并绑定发送消息函数
self.button_time = tk.Button(self.f_right, text='Time',font="Times 15" ,activeforeground = "#3a93f8", height= 2, width=10, command = get_time)
self.button_who = tk.Button(self.f_right, text='Find',font="Times 15" ,activeforeground = "#3a93f8", height= 2, width=10, command = find_who)
self.button_chat = tk.Button(self.f_right, text='Chat',font="Times 15" ,activeforeground = "#3a93f8", height= 2, width=10, command = chat_with)
self.button_search = tk.Button(self.f_right, text='Search',font="Times 15" ,activeforeground = "#3a93f8", height= 2, width=10, command = search_term)
self.button_sonnet = tk.Button(self.f_right, text='Sonnet',font="Times 15" ,activeforeground = "#3a93f8", height= 2, width=10, command = get_son)
self.button_quit = tk.Button(self.f_right, text='Quit',font="Times 15", activeforeground = "#3a93f8", height= 2, width=10, command = quit_sys)
self.instruction = tk.Label(self.f_right,text= "\nChoose one of the following commands\n \
Time: calendar time in the system\n \
Find: to find out who else are there\n \
Chat: to connect to the peer and chat\n \
Search: to search one term in your chat logs\n \
Sonnet: to get one sonnet\n \
Quit: to leave the chat system\n\n",font="Times 13 italic",bg="white", fg='#3a93f8').place(relx=0.05, rely=0.675, relwidth=0.9)
'''分区布局'''
# self.canvas2.pack()
self.f_msglist.grid(row = 0,column = 0 ) #消息列表分区
self.f_msgsend.grid(row = 1,column = 0) #发送消息分区
self.f_floor.grid(row = 2,column = 0) #按钮分区
self.f_right.grid(row=0, column=2, rowspan = 3) #功能键分区
self.txt_msglist.grid() #消息列表文本控件加载
self.txt_msgsend.grid() #消息发送文本控件加载
self.button_send.place(relx=0.3,rely = 0.2),#sticky = W) #发送按钮控件加载
self.button_cancel.place(relx=0.6,rely = 0.2)#取消按钮控件加载
self.button_time.place(relx=0.37,rely = 0.1)
self.button_who.place(relx=0.37,rely = 0.2)
self.button_chat.place(relx=0.37,rely = 0.3)
self.button_search.place(relx=0.37,rely = 0.4)
self.button_sonnet.place(relx=0.37,rely = 0.5)
self.button_quit.place(relx=0.37,rely = 0.6)
# self.instruction.place(relx=0.37,rely = 0.7)
# self.instruction.pack()
# print(162)
self.window2.mainloop()
gui = GUI()
| [
"noreply@github.com"
] | noreply@github.com |
925db904b3e3bce5fd3d07069328ae9b575f7401 | 1e99d202f94d26d8af5405a8c8284a5ffc345bba | /user/models.py | 09b73ff470a86207cf8f06d317e689aca1d5b450 | [] | no_license | madlad33/drf_pythonmate | 889b6a057ab9ac60b1e1138c2eb4ebc96d873e7c | 0b47ed41e847b0e0a7920e008867cdf971bddd6c | refs/heads/master | 2023-02-18T09:02:35.313419 | 2021-01-11T17:03:19 | 2021-01-11T17:03:19 | 328,583,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.base_user import BaseUserManager
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self,email,password=None,**extrafields):
if not email:
raise ValueError("Email is a required field")
user = self.model(email=self.normalize_email(email),**extrafields)
user.set_password(password)
user.save(using= self._db)
return user
def create_superuser(self,email,password,**extrafields):
user = self.create_user(email,password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class CustomUser(AbstractUser):
username = None
email = models.EmailField(max_length=255,unique=True)
USERNAME_FIELD = 'email'
objects = UserManager()
REQUIRED_FIELDS = ['password']
def __str__(self):
return self.email
class Client(models.Model):
user = models.OneToOneField(CustomUser,on_delete=models.CASCADE) | [
"tanmay.milky33@gmail.com"
] | tanmay.milky33@gmail.com |
eb56f7f4c91d2c693b5a4daae68f5e75b7997b80 | 6622f5a6bb4b2b7c2c20be57af51f0800ea532d6 | /blog/urls.py | 2248d6e08e21eb81b77b5eef51a5e372945d4fe7 | [] | no_license | kenichikawaguchi/my_second_blog | f14e760756fe15a091922e3060c6fc41bc0d9e5b | df9e2974147a6ae8b384b1ede39b1190246045f6 | refs/heads/master | 2020-04-16T10:47:00.067831 | 2019-01-19T00:30:56 | 2019-01-19T00:30:56 | 165,516,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list, name='post_list'),
path('post/<int:pk>/', views.post_detail, name='post_detail'),
path('post/new', views.post_new, name='post_new'),
path('post/<int:pk>/edit/', views.post_edit, name='post_edit'),
path('drafts/', views.post_draft_list, name='post_draft_list'),
path('post/<pk>/publish/', views.post_publish, name='post_publish'),
path('post/<pk>/remove/', views.post_remove, name='post_remove'),
path('post/<int:pk>/comment/', views.add_comment_to_post, name='add_comment_to_post'),
path('comment/<int:pk>/approve/', views.comment_approve, name='comment_approve'),
path('comment/<int:pk>/remove/', views.comment_remove, name='comment_remove'),
]
| [
"xianzaikaishiba@gmail.com"
] | xianzaikaishiba@gmail.com |
691ef5198f3ad92abf4d5c6852d3b1a73022750f | 1cedf10cef4b37d38386232d066f3260570c0758 | /squery/repbin/get_md5_url.py | db4d749a7b47a9f3ad28b8f11fa318e7858aab58 | [
"Apache-2.0"
] | permissive | seahurt/OGSManage | eab799ebab00fc6cf1b1f849229cb50a3af498b5 | 65b37ec31d979cc8fc886d0f13bfe8043e898adb | refs/heads/master | 2021-09-05T09:24:06.235617 | 2018-01-26T02:26:53 | 2018-01-26T02:26:53 | 113,641,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,067 | py | # -*-coding:utf-8-*-
# !/usr/bin/env python
# Author : Jiucheng
# Email : chenjiucheng@1gene.com.com
# Last modified :
# Description :
# update : 2017/5/26
import hashlib
import requests
# import logging
from retrying import retry
import fire
# logging.basicConfig(filename = 'md5.log', level = logging.INFO, format = '%(asctime)s %(message)s',
# datefmt = '%m/%d/%Y %I:%M:%S %p')
def lazy_property(func):
name = '_lazy_' + func.__name__
@property
def lazy(self):
if hasattr(self, name):
return getattr(self, name)
else:
value = func(self)
setattr(self, name, value)
return value
return lazy
class SampleInfo(object):
"""
Get sample information from url:"http://medicine.1gene.com.cn/v1/api/reportInfo"
"""
def __init__(self, sample_id):
self.num = 0
self.id = sample_id
# self.md5 = self.get_md5()
# self.data = self.get_data()
@lazy_property
def get_md5(self, key = 'JZIn1cr75aE0dag1gene'):
def md5_code(string):
return hashlib.md5(str(string).encode('utf-8')).hexdigest()
key_md5 = md5_code(key)
id_md5 = md5_code(self.id)
url_md5 = key_md5 + id_md5
return md5_code(url_md5)
@lazy_property
@retry(stop_max_attempt_number = 5, wait_fixed = 5000)
def data(self):
url = 'http://medicine.1gene.com.cn/v1/api/reportInfo'
params = {'id': self.id, 'signature': self.get_md5}
try:
resp = requests.get(url, params)
except requests.exceptions.ConnectionError as e:
self.num += 1
raise ValueError('Connection timeout %d times. Wrong info: {!r%}'.format(self.num, e))
# logging.error('Connection timeout %d times. Wrong info: {!r%}'.format(self.num, e))
else:
return resp.json()
if __name__ == '__main__':
fire.Fire(SampleInfo)
# a = SampleInfo('OG175710801')
| [
"haozi.vv@gmail.com"
] | haozi.vv@gmail.com |
cf42af40c4c65693b1a172c9c045c8f026c1b200 | fbac1fc19ca5736aa2ed4a8d846760bec35d9ec6 | /django_tutorial/modules/Borrow.py | 6db032a63b7f984b1bceebc7e60957551bcdfc57 | [] | no_license | dsmsfans/Router | 390984b0ee1045d13f55f8935d4a974ce3a23a36 | c685d50c799abadc8405c7bb64df7781cab08587 | refs/heads/master | 2020-03-27T02:41:02.178550 | 2018-09-04T06:22:09 | 2018-09-04T06:22:09 | 145,810,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | import pandas as pd
import time
pd.set_option('display.max_columns',None)
pd.set_option('display.max_rows',None)
data = pd.read_excel('Borrow.xlsx')
Title = ['時間戳記','Brand','Model','Name']
borrow_list = []
def Insert():
#Time
local_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
borrow_list.append(local_time)
#Brand
insert = input("Brand: ")
borrow_list.append(insert)
#Model
insert = input("Model: ")
borrow_list.append(insert)
#Name
insert = input("Name: ")
borrow_list.append(insert)
ques = input("Save the record?[y/n] ")
if ques == 'y':
data_toexcel()
def data_toexcel():
global data
output = pd.DataFrame([borrow_list],columns = Title)
data = data.append(output,ignore_index=True)
data.to_excel('Borrow.xlsx',na_rep=True,index=False)
print(data.tail(5))
Insert() | [
"aaa40066@gmail.com"
] | aaa40066@gmail.com |
626cf71c48464cb893de5a9ed1725d4cb7e0a2be | ffa2c5f5a3c5d55edd02d7bff0c1b2ef89051e31 | /src/autobahn/statistics.py | 288819c9312eba656dfa67dd779b70f34ca4070c | [
"MIT"
] | permissive | nnguyen19/Autobahn | 946269cb9a073dd5a5eec62cccebb13fab655ab9 | 7833c70394311dfdb3a1e870988c39fde5a15447 | refs/heads/main | 2023-03-19T23:32:04.487815 | 2021-03-09T03:05:19 | 2021-03-09T03:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,860 | py | """Utilities to compute statistics on the datasets."""
import collections
from typing import Dict, Mapping, Tuple
import numpy as np
import torch
import torch.utils.data
import torch_geometric.data
import tqdm
from autobahn import decompositions, utils
def _update_append_dictionary(accumulator: dict, update: dict):
for k, v in update.items():
accumulator.setdefault(k, []).append(v.clone())
def _make_values_to_array(d: dict):
return {k: torch.cat(v) for k, v in d.items()}
class _TransformShuffleDataset(torch.utils.data.Dataset):
def __init__(self, base_dataset: torch.utils.data.Dataset, transform=None, limit: int=None, generator=None):
length = min(limit, len(base_dataset)) if limit is not None else len(base_dataset)
self._base_dataset = base_dataset
self._permutation = torch.randperm(len(base_dataset), generator=generator).narrow(0, 0, length)
self._transform = transform
def __getitem__(self, idx):
base_idx = int(self._permutation[idx])
value = self._base_dataset[base_idx]
if self._transform is not None:
value = self._transform(value)
return value
def __len__(self):
return len(self._permutation)
def compute_dataset_statistics(dataset: Mapping[int, torch_geometric.data.Data],
max_sample: int=10000, progress: bool=False,
statistics_fn=None):
"""Computes statistics on the given dataset.
Parameters
----------
dataset : Mapping[int, Data]
A dataset of graphs to analyze
max_sample : int
Subsample the dataset to this number of observations to compute statistics.
progress : bool
If True, indicates that a progress bar should be printed.
"""
if statistics_fn is None:
statistics_fn = path_and_cycle_statistics
stats = {
'num_nodes': [],
'num_edges': []
}
path_stats = {}
cycle_stats = {}
dataset = _TransformShuffleDataset(dataset, statistics_fn, max_sample)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=20, shuffle=False,
num_workers=0 if max_sample < 2000 else 8)
if progress:
dataloader = tqdm.tqdm(dataloader)
for s, p, c in dataloader:
_update_append_dictionary(stats, s)
_update_append_dictionary(path_stats, p)
_update_append_dictionary(cycle_stats, c)
stats = _make_values_to_array(stats)
path_stats = _make_values_to_array(path_stats)
cycle_stats = _make_values_to_array(cycle_stats)
return stats, path_stats, cycle_stats
def global_statistics(data: torch_geometric.data.Data) -> Dict[str, int]:
return {
'num_nodes': data.num_nodes,
'num_edges': data.num_edges,
}
def path_statistics(data: torch_geometric.data.Data, max_length: int=6) -> Dict[int, int]:
num_paths = {}
for i in range(3, max_length + 1):
paths = decompositions.get_path_list(data, i)
num_paths[i] = len(paths)
return collections.Counter(num_paths)
def cycle_statistics(data: torch_geometric.data.Data, max_length: int=6) -> Dict[int, int]:
_, cycles, _, _ = decompositions.cycle_decomposition(data, max_length)
cycle_length, cycle_count = np.unique([len(c) for c in cycles], return_counts=True)
return collections.Counter(dict(zip(cycle_length, cycle_count)))
def path_and_cycle_statistics(data: torch_geometric.data.Data, max_path_length=6, max_cycle_length=6) -> Tuple[Dict[str, int], Dict[int, int], Dict[int, int]]:
"""Computes global, cycle and path statistics for the given data sample."""
global_stat = global_statistics(data)
path_stat = path_statistics(data, max_path_length)
cycle_stat = cycle_statistics(data, max_cycle_length)
return global_stat, path_stat, cycle_stat
| [
"wz2247@nyu.edu"
] | wz2247@nyu.edu |
b67c766d3fb4b8a6075ff3866004f62ed4a8b3d9 | 5bdcc62a5b51d8da7f5fce71b5adeb919a2da7cd | /爬虫编程/案例集合/05/练习实例63.py | 8879969cf5d504d87fb2fd34c2f004adcb713736 | [] | no_license | kingofnorth1/python | ffbde81342a74d4d20c8a2e2e303c270968709ad | f20f014984f4cd1d56443b62df32549d798c693b | refs/heads/master | 2023-09-03T02:17:21.962256 | 2021-11-14T02:08:33 | 2021-11-14T02:08:33 | 422,926,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | """
题目:画椭圆。
画椭圆。 #!/usr/bin/python
# -*- coding: UTF-8 -*-
if __name__ == '__main__':
from Tkinter import *
x = 360
y = 160
top = y - 30
bottom = y - 30
canvas = Canvas(width = 400,height = 600,bg = 'white')
for i in range(20):
canvas.create_oval(250 - top,250 - bottom,250 + top,250 + bottom)
top -= 5
bottom += 5
canvas.pack()
mainloop()
""" | [
"344423164@qq.com"
] | 344423164@qq.com |
9a5c1c8d4f358589a5a518cc0e191b06f084541c | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/Accounting/FPythonCode/FAccountingRollForward.py | aadcf523a93669d6ecde20daab2b4b7f22aa7ead | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,284 | py | """ Compiled: 2020-09-18 10:38:46 """
#__src_file__ = "extensions/accounting/etc/FAccountingRollForward.py"
# operations
from FOperationsCollectionUtils import PopObject
# accounting
from FAccountingEngineEOFY import IAccountingEOFYEngine
from FAccountingCreation import CreateRollForwardJournals
from FAccountingPairReverser import PerformCancellation
from FAccountingReader import ReadRollForwardPairs
from FAccountingDRCRPairGenerator import GenerateDRCRPairs
from FAccountingCalculations import IsAmountZero
#-------------------------------------------------------------------------
class BalanceRollForward(IAccountingEOFYEngine.IRollForwardProvider):
#-------------------------------------------------------------------------
def __init__(self, fiscalYear):
self.__provider = None
self.__toRollForwardBalances = dict()
#-------------------------------------------------------------------------
def PO_Init(self, provider):
self.__provider = provider
#-------------------------------------------------------------------------
def PO_Clear(self):
self.__toRollForwardBalances.clear()
#-------------------------------------------------------------------------
def RFP_IsValidForRollForward(self, balance):
return balance.ChartOfAccount().HasActiveRollForwardTAccount() and \
(not IsAmountZero(balance.Amount()) or not IsAmountZero(balance.BaseAmount()))
#-------------------------------------------------------------------------
def RFP_AddForRollForward(self, key, balance):
self.__toRollForwardBalances[key] = balance
#-------------------------------------------------------------------------
def RFP_RollForward(self, book, fiscalYear, endPeriod, keyFunc):
accountMapper = self.__provider.LKMP_TAccountLedgerKeyMapper()
oldPairs = dict((self.__FindKey(pair, keyFunc), pair) for pair in ReadRollForwardPairs(book, fiscalYear))
for key, balance in self.__toRollForwardBalances.items():
oldPair = PopObject(oldPairs, key)
rollforwardAmount, rollforwardBaseAmount = self.__CalculateRollForwardAmount(balance, oldPair)
newPair = next(GenerateDRCRPairs(CreateRollForwardJournals(rollforwardAmount, rollforwardBaseAmount, balance, endPeriod, accountMapper), True))
self.__ProcessPairs(oldPair, newPair, keyFunc, balance.AccountingPeriod())
#-------------------------------------------------------------------------
def __FindKey(self, pair, keyFunc):
for journal in pair.Journals():
if journal.Account().RollForwardTAccount():
return keyFunc(journal)
return None
#-------------------------------------------------------------------------
def __CalculateRollForwardAmount(self, balance, oldPair):
rollforwardAmount = balance.Amount()
rollforwardBaseAmount = balance.BaseAmount()
if oldPair:
for journal in oldPair.Journals():
if journal.Balance() == balance.Original():
rollforwardAmount -= journal.Amount()
rollforwardBaseAmount -= journal.BaseAmount()
return rollforwardAmount, rollforwardBaseAmount
#-------------------------------------------------------------------------
def __ProcessPairs(self, oldPair, newPair, keyFunc, startPeriod):
if newPair and oldPair:
connectedPairs = [pair for pair in PerformCancellation(oldPair, None, None)]
connectedPairs.append(newPair)
for pair in connectedPairs:
self.__SetBalanceRef(pair, keyFunc, startPeriod)
self.__provider.STPUP_AddConnectedPairsForUpdate(connectedPairs)
elif newPair:
self.__SetBalanceRef(newPair, keyFunc, startPeriod)
self.__provider.STPUP_AddPairForUpdate(newPair)
#-------------------------------------------------------------------------
def __SetBalanceRef(self, pair, keyFunc, startPeriod):
for journal in pair.Journals():
key = keyFunc(journal)
balanceForKey = self.__provider.BC_GetOrCreateBalanceForKey(key, journal, startPeriod)
journal.Balance(balanceForKey)
| [
"nencho.georogiev@absa.africa"
] | nencho.georogiev@absa.africa |
4f6edf20cdebe577b8864010fef1e297df7d682c | e18a353582609732c795401f1a01bc762bd939f2 | /top/python/test_reweighting.py | 091f786c198a373f5f95cb41e3770088680d9081 | [] | no_license | s-farry/workspaces | 06741807bb464bb0712d52108c2d1b7ae62b1353 | 0dcf3868dcbe110206ea88ff5c9e04a3b44b1ca1 | refs/heads/master | 2020-04-03T00:45:39.152227 | 2017-06-15T16:33:33 | 2017-06-15T16:33:33 | 64,213,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,370 | py | from Jawa import MWTemplate
from ROOT import TFile, TTree, TCut, TPaveText, TLine
f = TFile("/user2/sfarry/workspaces/top/output.root")
t = f.Get("topTuple")
mwt = MWTemplate("top_eft")
mwt.AddTree(t)
mwt.ApplyCut()
mwt.AddVar("top_rap", "abs(top_rap)", 50, 0, 4)
mwt.AddVar("antitop_rap", "abs(antitop_rap)", 50, 0, 4)
for i in range(18):
mwt.AddWeight("rwgt_"+str(i+1), "rwgt_"+str(i+1))
mwt.FillVars()
mwt.Scale(1.0/t.GetEntries())
mwt.ScaleAllWeights(1.0/t.GetEntries())
mwt.SaveToFile()
fwd = TCut("abs(top_rap) > abs(antitop_rap)")
bwd = TCut("abs(top_rap) < abs(antitop_rap)")
lhcb = TCut("top_rap > 2.0 && top_rap < 4.5 && antitop_rap > 2.0 && antitop_rap < 4.5")
gpd = TCut("abs(top_rap) < 2.5 && abs(antitop_rap) < 2.5")
c13qq_label = TPaveText(0.2,0.38,0.33, 0.5, 'NDC')
c13qq_label.AddText('C^{(1,3)}_{qq}')
line = TLine(0.0, 1.0, 5.0, 1.0)
line.SetLineStyle(2)
line.SetLineColor(1)
'''
mwt_fwd = MWTemplate("top_eft_fwd")
mwt_fwd.AddTree(f.Get("topTuple"))
mwt_fwd.ApplyCut()
mwt_fwd.AddVar("top_rap", "abs(top_rap)", 100, 0, 5)
mwt_fwd.AddWeight("central", "w")
for i in range(24):
mwt_fwd.AddWeight("rwgt_"+str(i+1), "rwgt_"+str(i+1))
mwt_fwd.FillVars()
mwt_fwd.SaveToFile()
'''
from Style import *
SetLHCbStyle()
from PlotTools import *
p = Plot([ mwt.GetWeightHist("top_rap", "rwgt_"+str(i+1)) for i in range(11)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'test.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 6, 9, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(12)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] })
p.setProp('ycomplims', [0.8, 1.15])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,2,3)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C13qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.5, 1.49])
#p.setProp('extraObjs', [ c13qq_label])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{1,3}_{qq} = 0', 'C^{1,3}_{qq} = +4', 'C^{1,3}_{qq} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,4,5)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C81qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{8,1}_{qq} = 0', 'C^{8,1}_{qq} = +4', 'C^{8,1}_{qq} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,6,7)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C83qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{8,3}_{qq} = 0', 'C^{8,3}_{qq} = +4', 'C^{8,3}_{qq} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,8,9)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C8ut.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{8}_{ut} = 0', 'C^{8}_{ut} = +4', 'C^{8}_{ut} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,10,11)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C8dt.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{8}_{dt} = 0', 'C^{8}_{dt} = +4', 'C^{8}_{dt} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,12,13)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C1qu.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{1}_{qu} = 0', 'C^{1}_{qu} = +4', 'C^{1}_{qu} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,14,15)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C1qd.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{1}_{qd} = 0', 'C^{1}_{qd} = +4', 'C^{1}_{qd} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,16,17)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C1qt.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{1}_{qt} = 0', 'C^{1}_{qt} = +4', 'C^{1}_{qt} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,2,3)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C13qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,4,5)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C81qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,6,7)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C83qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,8,9)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C8ut.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,10,11)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C8dt.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,12,13)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C1qu.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,14,15)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C1qd.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,16,17)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C1qt.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
| [
"sfarry@hep.ph.liv.ac.uk"
] | sfarry@hep.ph.liv.ac.uk |
f2c507fa9170a3c812dc51c78d62b2998c90f32a | f46a4661d391b99ca59e9506602640e2246e10e2 | /app.py | 06bf7efc15b69c0b2801e47c8fb11623caa4bcf8 | [
"MIT"
] | permissive | zcbrand/sdev300flaskapp | 11726eb55ec294e7f5c297525044660e74dd6dfc | e28ed03b4d765d5c39ebc96ba911ac7ce142d3d3 | refs/heads/main | 2023-04-20T05:28:34.174652 | 2021-05-12T00:56:42 | 2021-05-12T00:56:42 | 361,536,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,808 | py | """
Author: Zachary Brandenburg
"""
import secrets
from datetime import datetime
from flask import Flask, render_template, request, flash, redirect, session
from auth.login import valid_login, user_exists, register_user, complexity, \
password_is_not_common, reset_password, matches_last_password
app = Flask('sdev300flaskapp',
template_folder='templates',
static_folder='static')
secret = secrets.token_urlsafe(32)
app.secret_key = secret
@app.route('/')
def index():
"""Landing page HOME"""
if not session.get('logged_in'):
return redirect('/login')
else:
"""Routes to the main page"""
nav = [
{'name': 'SlevinLabs', 'url': 'https://www.slevinlabs.com'},
{'name': 'Register', 'url': '/register'},
{'name': 'Login', 'url': '/login'}
]
description = '''SlevinLabs is a leading provider of cutting-edge technologies and
services, offering scalable solutions for companies of all sizes. Founded
by a group of friends who started by scribbling their ideas on a piece of
paper, today we offer smart, innovative services to dozens of clients
worldwide. We built our solutions by closely listening to our potential
clientele and understanding their expectations with our product. We know how
to analyze this information and customize our offering to changing market
needs. Why not join our fast growing customer base? Get in touch today to
learn more about the SlevinLabs story.'''
items = [
'Provide software solutions in an ordered timely manner',
'Consult teams to align goals with Agile methodologies',
'Set upt you business for success'
]
services = [
{'service': 'Platinum', 'contents': 'Custom seminar for you\'re entire org',
'price': 'Contact'},
{'service': 'Gold', 'contents': 'Private Seminar for team of 15, Silver, Bronze',
'price': '$10,000'},
{'service': 'Silver', 'contents': 'Private seminar for one individual, Bronze',
'price': '$1,000'},
{'service': 'Bronze', 'contents': 'Access to our video library', 'price': '$100'}
]
time = datetime.now().isoformat(' ')
return render_template(
'home.html',
user=session['username'],
nav=nav,
services=services,
title='SlevinLabs',
description=description,
subtitle='WHAT WE DO',
subsub='Easy. Fast. Secure.',
items=items,
time=time
)
@app.route('/login', methods=['GET', 'POST'])
def login():
""" Login Page"""
if request.method == 'POST':
if valid_login(request.form.get('username'), request.form.get('password')):
session['logged_in'] = True
session['username'] = request.form.get('username')
return redirect('/')
else:
flash('Incorrect Username or Password')
return redirect('/login')
if request.method == 'GET':
return render_template(
'login.html',
title='Login'
)
@app.route('/register', methods=['GET', 'POST'])
def register():
""" Registration Page"""
error = None
if request.method == 'POST':
if not request.form['username']:
error = 'Please enter a Username.'
elif not request.form['password']:
error = 'Please enter a Password.'
elif user_exists(request.form.get('username')):
error = 'You are already registered'
elif not password_is_not_common(request.form.get('password')):
error = 'Password is in a list of known passwords'
elif not complexity(request.form.get('password')):
error = 'Make your password more complex'
if error is None:
register_user(request.form.get('username'), request.form.get('password'))
flash('You are registered')
return redirect('/login')
else:
flash(error)
return redirect('/register')
if request.method == 'GET':
return render_template(
'register.html',
title='Register'
)
@app.route('/change_password', methods=['GET', 'POST'])
def change_password():
""" Password update"""
error = None
if request.method == 'POST':
if not request.form['username']:
error = 'Please enter a Username.'
elif not request.form['password']:
error = 'Please enter a Password.'
elif valid_login(request.form.get('username'), request.form.get('old_password')):
error = 'Incorrect username or password'
elif password_is_not_common(request.form.get('new_password')):
error = 'Password is in a list of known passwords'
elif not complexity(request.form.get('new_password')):
error = 'Make your password more complex'
elif matches_last_password(request.form.get('username'), request.form.get('new_password')):
error = 'Password must not match previous'
if error is None:
reset_password(request.form.get('username'), request.form.get('new_password'),
request.form.get('old_password'))
flash('Password Updated')
return redirect('/login')
else:
flash(error)
return redirect('/change_password')
if request.method == 'GET':
return render_template(
'change_password.html',
title='Change Password'
)
| [
"zac@zacharycraig.com"
] | zac@zacharycraig.com |
36b8165874527ec6b6a038f2526a3b40284cad6c | 80075edf813fa1c7ef3126b153e7ab2f6c42f0be | /xml/Reading_XML_File_From_Python_Code.py | 992076a09487b050d0949ebe21ed811ab5e7f2c2 | [] | no_license | keshavkummari/python-nit-930pm | a7e16701d981145f8fdc27e741169ef76616bc8a | aa3bb7654c091e3d04098483525768e287604c38 | refs/heads/master | 2020-04-01T15:00:20.366890 | 2018-11-29T17:14:41 | 2018-11-29T17:14:41 | 153,316,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | # A simple XML file, later parse it with Python minidom.
'''
staff.xml
<?xml version="1.0"?>
<company>
<name>Mkyong Enterprise</name>
<staff id="1001">
<nickname>mkyong</nickname>
<salary>100,000</salary>
</staff>
<staff id="1002">
<nickname>yflow</nickname>
<salary>200,000</salary>
</staff>
<staff id="1003">
<nickname>alex</nickname>
<salary>20,000</salary>
</staff>
</company>
'''
"""
<?xml version="1.0"?>
<company>
<name>Online Ucator</name>
<staff id="1001">
<nickname>Minnu</nickname>
<salary>100,000</salary>
</staff>
<staff id="1002">
<nickname>Keshav</nickname>
<salary>200,000</salary>
</staff>
<staff id="1003">
<nickname>Jessi</nickname>
<salary>20,000</salary>
</staff>
</company>
"""
#2. DOM Example 1
#A simple Python minidom example.
# dom-example.py
from xml.dom import minidom
doc = minidom.parse("staff.xml")
# doc.getElementsByTagName returns NodeList
name = doc.getElementsByTagName("name")[0]
print(name.firstChild.data)
staffs = doc.getElementsByTagName("staff")
for staff in staffs:
sid = staff.getAttribute("id")
nickname = staff.getElementsByTagName("nickname")[0]
salary = staff.getElementsByTagName("salary")[0]
print("id:%s, nickname:%s, salary:%s" %
(sid, nickname.firstChild.data, salary.firstChild.data))
| [
"keshav.kummari@gmail.com"
] | keshav.kummari@gmail.com |
073d78c72797a498ee7bb2d1df9ab5ad1b900dad | 47c3cd8a60943f5d6d78afdfb399adc599b92840 | /milk.py | a95551bffb3b6526872e2be60fbac769ab162d4a | [] | no_license | den4uk/milkman | 30d071787a4af253eb307513db2226bd3161b0d0 | 464281ac799621fc03cc2f77c38c3d0b0b2ed991 | refs/heads/master | 2021-09-23T17:41:38.106055 | 2018-09-25T21:21:30 | 2018-09-25T21:21:30 | 116,485,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,025 | py | from flask import Flask, request, render_template, session, redirect, url_for, flash
from flask_mail import Mail, Message
from functools import wraps
from datetime import timedelta, date
from itertools import cycle, count
from collections import OrderedDict
from passlib.hash import sha256_crypt
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, PasswordField, DateField, SelectField, TextField, validators
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
import atexit
app = Flask(__name__)
app.config.from_pyfile('config.py')
db = SQLAlchemy(app)
mail = Mail(app)
cron = BackgroundScheduler(
jobstores={'default': SQLAlchemyJobStore(url=app.config['SQLALCHEMY_DATABASE_URI'])}
)
# Decorator for admin login requirements
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not session.get('admin'):
return redirect(url_for('login'))
return f(*args, **kwargs)
return decorated_function
class Settings(db.Model):
id = db.Column(db.Integer, primary_key=True)
user = db.Column(db.String(20), default='admin')
passwd = db.Column(db.String(100), nullable=False, default=sha256_crypt.encrypt('admin'))
offset = db.Column(db.Integer, nullable=False, default=0)
period = db.Column(db.Integer, nullable=False, default=7)
starts = db.Column(db.Date, nullable=False, default=date(2018,1,1))
display = db.Column(db.String(50), default='Milk App')
subject = db.Column(db.String(100), default='Reminder: Buy Milk')
body = db.Column(db.Text, nullable=False, default='Please buy milk. Thank you!')
hour = db.Column(db.Integer, nullable=False, default=7)
class Milkmen(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False)
email = db.Column(db.String(50))
active = db.Column(db.Boolean, default=True)
class LoginForm(FlaskForm):
user = StringField('', [validators.DataRequired()])
passwd = PasswordField('', [validators.DataRequired()])
class UserForm(FlaskForm):
name = StringField('', [validators.DataRequired()])
email = StringField('')
class ChangePasswordForm(FlaskForm):
user = StringField('', [validators.DataRequired()])
passwd = PasswordField('Password', [
validators.DataRequired(),
validators.Length(min=6, message='Too short!'),
validators.EqualTo('passwd2', message='Passwords must match!'),
])
passwd2 = PasswordField('', [validators.DataRequired()])
class SettingsForm(FlaskForm):
offset = IntegerField('Offset')
starts = DateField('Start Date', [validators.DataRequired()])
period = SelectField(
label='How Frequently',
choices=[('1', 'Everyday'), ('2', 'Every 2 days'), ('3', 'Every 3 days'), ('7', 'Weekly'), ('14', 'Bi-Weekly')],
)
class EmailForm(FlaskForm):
display = StringField('From Name', [validators.DataRequired()])
subject = StringField('Subject', [validators.DataRequired()])
body = TextField('E-Mail Body', [validators.DataRequired()])
hour = SelectField('Send Time At', choices=[(h, '{}:00'.format(h.zfill(2))) for h in map(str,range(24))])
# Milkman finder algorithm
def milkmen(get_first=False):
D = []
s = Settings.query.one_or_none()
milkmen = Milkmen.query.filter_by(active=True)
for p,d in zip(cycle(milkmen), count(s.period * s.offset, s.period)):
new_date = s.starts + timedelta(days=d)
if new_date > (date.today() - timedelta(days=s.period)):
p.buy_date = new_date
D.append(p)
if len(D) == milkmen.count() or get_first:
break
return D
@app.route('/')
def index():
return render_template("index.html", data=milkmen())
@app.route('/admin', methods=['GET', 'POST'])
def login():
if session.get('admin'):
return redirect('/dashboard')
form = LoginForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
U = Settings.query.filter_by(user=form.user.data).first()
if U and sha256_crypt.verify(form.passwd.data, U.passwd):
session.update({'admin': True})
return redirect('/dashboard')
else:
flash('Wrong login credentials!', category='danger')
return render_template('login.html', form=form)
@app.route('/dashboard', methods=['GET', 'POST'])
@login_required
def dashboard():
settings = Settings.query.get_or_404(1)
form = SettingsForm(request.form, obj=settings)
if request.method == 'POST' and 'controls' in request.form and form.validate_on_submit():
form.populate_obj(settings)
db.session.commit()
return redirect('/dashboard')
if request.method == 'POST' and 'move_offset' in request.form:
move = request.form.get('move_offset')
if move == 'up': settings.offset -= 1
if move == 'down': settings.offset += 1
db.session.commit()
return redirect('/dashboard')
return render_template('dashboard.html', form=form, data=milkmen())
@app.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
settings = Settings.query.get_or_404(1)
form = ChangePasswordForm(request.form)
form.user.data = settings.user
if request.method == 'POST' and form.validate_on_submit():
settings.user = request.form.get('user')
settings.passwd = sha256_crypt.encrypt(request.form.get('passwd'))
db.session.commit()
flash('Your login detalils were changed!', category='success')
return redirect('/change-password')
return render_template('change_pw.html', form=form)
@app.route('/manage', methods=['GET', 'POST'])
@login_required
def user_manage():
form = UserForm(request.form)
if request.method == 'POST':
if 'active_user' in request.form:
user = Milkmen.query.get_or_404(request.form.get('active_user'))
user.active = not user.active
elif 'del_user' in request.form:
user = Milkmen.query.get_or_404(request.form.get('del_user'))
db.session.delete(user)
elif 'add_user' in request.form and form.validate_on_submit():
new = Milkmen(name=form.name.data, email=form.email.data)
db.session.add(new)
db.session.commit()
return redirect('/manage')
users = Milkmen.query.all()
return render_template('manage.html', users=users, form=form, action='Add')
@app.route('/manage/<int:user>', methods=['GET', 'POST'])
@login_required
def user_edit(user=None):
user = Milkmen.query.get_or_404(user)
form = UserForm(request.form, obj=user)
if request.method == 'POST' and form.validate_on_submit():
form.populate_obj(user)
db.session.commit()
return redirect('/manage')
users = Milkmen.query.all()
return render_template('manage.html', users=users, form=form, action='Save')
@app.route('/email-settings', methods=['GET', 'POST'])
@login_required
def email_settings():
settings = Settings.query.get_or_404(1)
form = EmailForm(request.form, obj=settings)
if request.method == 'POST' and form.validate_on_submit():
form.populate_obj(settings)
db.session.commit()
job = cron.get_jobs()[0]
cron.reschedule_job(job.id, trigger='cron', hour=settings.hour, minute=0)
flash('Success! E-Mail Settings updated.', category='success')
return redirect('/email-settings')
return render_template('email.html', form=form)
@app.route('/logout')
def logout():
session.clear()
return redirect('/')
def send_mail():
settings = Settings.query.get_or_404(1)
milkman = milkmen(get_first=True)[0]
if milkman.email and milkman.buy_date == date.today():
msg = Message(
subject=settings.subject,
sender=(settings.display, app.config['MAIL_USERNAME']),
recipients=[milkman.email],
)
msg.body = settings.body.format(name=milkman.name)
with app.app_context():
mail.send(msg)
@app.before_first_request
def start_cron():
atexit.register(lambda: cron.shutdown(wait=False))
settings = Settings.query.get_or_404(1)
cron.add_job(send_mail, 'cron', hour=settings.hour, minute=0)
cron.start()
# Initialises the DB and creates the admin user; run from CLI
def init_db():
db.init_app(app)
db.create_all()
if not Settings.query.one_or_none():
db.session.add(Settings())
db.session.commit()
if __name__ == '__main__':
app.run(debug=True)
| [
"den@saz.lt"
] | den@saz.lt |
1f874d56c7ddd52297a1017d98f80668658e6bcc | 08a059f91391dabf51ca015c1658e91e597b9dfb | /combineDatabase.py | 7ec0a7e328b12ffacb6e43af6a8f55b8f18a6c30 | [] | no_license | xiahuadong1981/DenseNet_centerloss-CapsulNet-InceptionV3_use | da7025fb467093744ea28400a9b7d6c3e431b184 | 2ca78db213f20950752a0782fc1d9af378ef5de0 | refs/heads/master | 2020-06-29T19:40:55.467871 | 2019-03-02T10:10:01 | 2019-03-02T10:10:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,826 | py | # coding=utf-8
import os
import shutil
# 得到库的所有不同手指文件夹的路径
def get_folder_dir(path):
folder_dir = []
for root, dirs, files in os.walk(path, topdown=False):
for name in dirs:
# print("image:", os.path.join(root, name))
folder_dir.append(os.path.join(root, name))
return folder_dir
# 将不同手指的文件夹复制到另外一个大的文件夹里
def copy_folders_to(folder_dir,dst_folder):
for f in folder_dir:
print(f)
dst_dir = "%s/%s" % (dst_folder, f.split('/')[-1])
# 如果已经存在,则采用将图片添加进文件夹的方法
if (os.path.exists(dst_dir)):
for root, dirs, files in os.walk(f, topdown=False):
for name in files:
shutil.copy(os.path.join(root, name), os.path.join(dst_dir, name))
else:
# 否则直接将整个文件夹添加进去
shutil.copytree(f, dst_dir)
# 一个个地复制
dst_folder='/home/users/wdd/myDatabase/SD_TH_ML_CP_set'
path1 = '/home/users/wdd/myDatabase/Finger_ROI/fvdb_ROI'
folder_dir1=get_folder_dir(path1)
copy_folders_to(folder_dir1, dst_folder)
path2 = '/home/users/wdd/myDatabase/Finger_ROI/MALAY_rawData_ROI'
folder_dir2=get_folder_dir(path2)
copy_folders_to(folder_dir2, dst_folder)
path3 = '/home/users/wdd/myDatabase/Finger_ROI/SDUMLA_rawData_ROI'
folder_dir3=get_folder_dir(path3)
copy_folders_to(folder_dir3, dst_folder)
path4 = '/home/users/wdd/myDatabase/Finger_ROI/SDUMLA_splitValidSet'
folder_dir4=get_folder_dir(path4)
copy_folders_to(folder_dir4, dst_folder)
path5 = '/home/users/wdd/myDatabase/Finger_ROI/TSINGHUA_rawData_ROI'
folder_dir5=get_folder_dir(path5)
copy_folders_to(folder_dir5, dst_folder)
| [
"wdongdongde"
] | wdongdongde |
bab242cced1e1ad5251f1876544fa92f2c8f4c73 | 8ee5dcbdbd407eb5f294d430813b16eca22f571c | /data/HW3/hw3_359.py | 802a68fe38d1f46796648e9870bd99992298710a | [] | no_license | MAPLE-Robot-Subgoaling/IPT | 5e60e2ee4d0a5688bc8711ceed953e76cd2ad5d9 | f512ea3324bfdceff8df63b4c7134b5fcbb0514e | refs/heads/master | 2021-01-11T12:31:00.939051 | 2018-08-13T23:24:19 | 2018-08-13T23:24:19 | 79,373,489 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | temp = float(input("Please enter the temperature: "))
scale = input("Please enter 'C' for Celsius, or 'K' for Kelvin: ")
MELTING_POINT_C = 32
BOILING_POINT_C = 100
MELTING_POINT_K = 273.15
BOILING_POINT_K = 373.15
def main():
if scale == "C":
if temp >= 0 and temp < MELTING_POINT_C:
print("At this temperature, water is a (frozen) solid.")
elif temp >= MELTING_POINT_C and temp < BOILING_POINT_C:
print("At this temperature, water is a liquid.")
elif temp >= BOILING_POINT_C:
print("At this temperature, water is a gas.")
else:
if temp >= 0 and temp < MELTING_POINT_K:
print("At this temperature, water is a (frozen) solid.")
elif temp >= MELTING_POINT_K and temp < BOILING_POINT_K:
print("At this temperature, water is a liquid.")
elif temp >= BOILING_POINT_K:
print("At this temperature, water is a gas.")
main()
| [
"mneary1@umbc.edu"
] | mneary1@umbc.edu |
4c9ccd62216fb7af1a4b09ae50e4c1f46c8b7d2a | d50ca2e1baee9c8dc0c14e11145e21030003f89d | /main.py | ed5681e307ecb529e726fe835841e98a8741895b | [] | no_license | Team1559/Vision2018 | 0f7d626c3ca142c222fe600eceb29a968e1425ff | 1da67d2127bc06daf06e296e1793b04d25ad6111 | refs/heads/master | 2018-11-04T17:10:40.481269 | 2018-02-18T19:39:00 | 2018-02-18T19:39:00 | 117,614,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | import cv2
import numpy as np
import stereo
import server
import LEDFinder
import cubeFinder
import calc
import usb
import PID
camera = usb.USBCamera(0)
#cameraR and cameraL
#cubes = stereo.Stereo(cameraR, cameraL, 8)
#lights = stereo.LEDStereo(cameraR, cameraL, 8)
cube = cubeFinder.CubeFinder(camera)
target = "none"
server.startServer()
while 1:
target = server.getData()
print target
if target is "c":
cube.find()
server.putData("c",cube.angle,0)
#if target is "r":
# if lights.getColor() is not "red":
# lights.setRed()
# lights.track()
# server.putData("r",lights.angle,lights.distance)
#if target is "b":
# if lights.getColor() is not "blue":
# lights.setBlue()
# lights.track()
# server.putData("b",lights.angle,lights.distance)
| [
"wdmerges@gmail.com"
] | wdmerges@gmail.com |
1c13c144a468c1cbf0319fc908921f463ab44ab4 | 986f86d754d93729533f0d96bfd08cea83f8a497 | /results_script.py | 8c83445eeacf6d70e29d1f601304e22c8cc6a9e6 | [] | no_license | vidhartbhatia/somnia | dd1f7507a1ebc3fb1dad1f7b60da0b696ff9250a | f8297c8497a14e27608b7a1db737ac6344b4c535 | refs/heads/master | 2022-07-18T05:50:13.451979 | 2020-05-13T04:01:26 | 2020-05-13T04:01:26 | 255,775,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | # importing csv module
import csv
import datetime
import time
import statistics
import os
import pprint
import numpy as np
pp = pprint.PrettyPrinter()
pprint = pp.pprint
folder_name = "Test_var_agg"
file_name = "meds 5-12/51234"
ROW_LIMIT = None # set to none if want all
# initializing the titles and rows list
# fields = []
rows = []
T = time.time()
# reading csv file
with open(f"{folder_name}{os.sep}{file_name}_results.csv", 'r') as csvfile:
# creating a csv reader object
csvreader = csv.DictReader(csvfile)
# # extracting field names through first row
fields = csvreader.fieldnames
rows = list(csvreader)[:ROW_LIMIT]
if ROW_LIMIT!=None:
rows = rows[:ROW_LIMIT]
# get total number of rows
print(f"Total no. of rows: {len(rows)}")
print(rows[0]['predicted'][-1])
predicted = ['predicted']
for i in range(len(rows)):
predicted.append(rows[i]['predicted'][-1])
rows = zip(predicted)
outFile = f"{folder_name}{os.sep}{file_name}_results.csv"
os.makedirs(os.path.dirname(outFile), exist_ok=True)
with open(outFile, "w", newline ='') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
| [
"medhapotluri@gmail.com"
] | medhapotluri@gmail.com |
559da7f533a0595e47899cdeb1079e5f659d4a17 | a5b3fe8308a6206f9e0fb21ba8cd08e239ade60a | /dotpy_implementation/functions.py | 8e498a1d58a8d2872c4803361dead853fc27b7cb | [] | no_license | jakelourie1502/Connect4 | 95a69d956bece6df9e0ed065eff924ee52032e5d | 978667786cd03e8ac13bc48740b75208d220ebc3 | refs/heads/main | 2023-08-16T11:22:15.473734 | 2021-10-08T15:44:32 | 2021-10-08T15:44:32 | 415,041,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,622 | py | import numpy as np
from IPython.display import clear_output
from tabulate import tabulate
import copy
import time
import torch
from torch.nn import Linear, ReLU, Softmax, Sigmoid
from model_and_dataloader import Linear_Model, Dataset
def training_loop(model, training_epochs, games_per_epoch, mcts_search, batch_size, crit1, crit2, mini_epochs, cupt):
candidate_mod = Linear_Model()
candidate_mod.load_state_dict(model.state_dict())
for i in range(training_epochs):
m = 0
episodes = {}
for game in range(games_per_epoch):
states, prob_vecs, rewards = [], [] ,[]
states, prob_vecs, rewards= run_episode(mcts_search, candidate_mod, cupt, states, prob_vecs, rewards)
m+=len(rewards)
episodes[game] = {
'states':states,
'prob_vecs': prob_vecs,
'rewards':rewards,
'm': m
}
x, y_probs, y_rewards = tensorfy_data(episodes,m)
dLoader = dataloader_func(x, y_probs, y_rewards, batch_size)
candidate_mod = train_system(dLoader, candidate_mod, optimizer, crit1, crit2, mini_epochs)
candidate_agent = Alpha_player(simulations = mcts_searches, model = candidate_model, cupt=1)
old_agent = Alpha_player(simulations = mcts_searches, model = model, cupt=1)
score_v_monty = pit_two_agents(candidate_agent, magic_monty1, 4)
print(f'Score v Monty: {np.mean(score_v_monty)}')
score_v_old_agent = pit_two_agents(candidate_agent, magic_monty1, 5)
print(f'Score v old agent: {np.mean(score_v_old_agent)}')
return candidate_mod, score_v_monty, score_v_old_agent
def run_episode(mcts_searches,candidate_mod,cupt, states, prob_vecs, rewards, gamma=0.9):
board = create_board()
current_player = 1
is_done = False
while True:
root = AZ_node(board, action= None, player=current_player, model=candidate_mod, cupt=cupt, gamma=gamma) #node with starting board
for i in range(mcts_searches):
node, reward = root.perform_mcts_search() #gets valid options frm board
node.mcts_propagate(reward)
#log
states.append(root.board.flatten() * root.player) #if current_player = -1, store the state as *= -1 so it's in first person mode.
prob_vecs.append(root.sampled_prob_vector())
rewards.append(current_player) #we can use this to multiply by the reward later
#makemove
col = np.argmax(root.sampled_prob_vector())
row = get_next_open_row(board,col)
drop_piece(board,row,col,current_player)
#check win and add rewards
if winning_move(board,current_player):
reward = current_player
rewards = [x*reward for x in rewards]
break
if board.all() != 0:
reward = 0
rewards = [x*0 for x in rewards]
break
current_player*=-1
return states, prob_vecs, rewards
def tensorfy_data(episodes,m):
x = torch.zeros((m,42))
y_P = torch.zeros((m,7))
y_R = torch.zeros((m,1))
progress = 0
for value in episodes.values():
x[progress:value['m']] = torch.from_numpy(np.array(value['states']))
y_P[progress:value['m']] = torch.from_numpy(np.array(value['prob_vecs']))
y_R[progress:value['m']] = torch.from_numpy(np.array(value['rewards']).reshape(-1,1))
progress = value['m']
return x, y_P, y_R
def dataloader_func(x, y_probs, y_rewards, batch_size):
dataset = Dataset(x, y_probs, y_rewards)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,shuffle=True)
return dataloader
def train_system(dataloader, model, optimizer, crit1, crit2, epochs):
model.train()
for x, y_p, y_r in dataloader:
x, y_p, y_r = x.float(), y_p.float(), y_r.float()
probas, reward = model(x)
loss1 = crit1(reward, y_r)
loss2 = crit2(probas, y_p)
loss = loss1 - loss2
print(loss)
loss.backward()
optimizer.step(); optimizer.zero_grad()
return model
def pit_two_agents(agent_1, agent_2, matches):
winners = []
for i in range(matches//2):
board = create_board()
w = play_game(board, agent_1, agent_2, printy= False)
winners.append(w)
board=create_board()
w = play_game(board, agent_1, agent_2, printy= False, starting_player=-1)
winners.append(w)
return winners | [
"jakelourie1502@gmail.com"
] | jakelourie1502@gmail.com |
1dc64fad675de185f0f91ac6e71d954af38ebb21 | ba444d59bc744645a5af48a7c5788db23c69c121 | /project/views.py | 1cfe876c68ed331540d10f317178b3f507e27996 | [] | no_license | baloram-roy/tracking_app | bd7a5af1ee71bb303e1f1d1e13b296203eb69c06 | 6fa5846b5d19ccf2508e545d45e5d9c1ad5cb4db | refs/heads/master | 2023-08-28T20:29:01.673969 | 2021-10-24T17:59:02 | 2021-10-24T17:59:02 | 412,880,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,660 | py | # import from python:
from datetime import datetime, timedelta
# import from django:
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.models import User
from django.urls.base import reverse_lazy
from django.views.generic import ListView, CreateView, UpdateView, DeleteView, DetailView
from .models import Entry, Project, Task
# Create your views here.
# Home view:
#
def home(request):
task = Task.objects.all()
project = Project.objects.all()
entry = Entry.objects.all()
list = []
for i in range(0, 7):
time = (datetime.today() + timedelta(i))
list.append(time)
context = {
'tasks': task,
'entrys':entry,
'date': list,
'project': project
}
return render(request, 'project/index.html',context)
def project_list_create(request):
project = Project.objects.all() # this is for list all the project
# this section is for creating project in the same page
if request.method == 'POST':
title = request.POST.get('title')
client_name = request.POST.get('client_name')
if title and client_name:
project = Project.objects.create(
title=title,
client_name=client_name,
created_by=request.user
)
project.save()
return redirect('project_list')
context = {'object_list': project}
return render(request, 'project/project_list.html', context)
def project_detail(request, pk):
project = get_object_or_404(Project, pk=pk)
# this down section is for creating task in the project detail page
if request.method == 'POST':
title = request.POST.get('title')
print(title)
if title:
task_create = Task.objects.create(
name=title, project=project, created_by=request.user)
return redirect('project_detail', pk=project.id)
task_runing = project.task.filter(status=Task.RUNING)
task_done = project.task.filter(status=Task.DONE)
context = {
'project': project,
'task_runing': task_runing,
'task_done': task_done
}
return render(request, 'project/project_details.html', context)
# class ProjectListView(ListView):
# model = Project
# template_name = "project/project_list.html"
# class ProjectDetailView(DetailView):
# model = Project
# template_name = "project/project_details.html"
# class ProjectCreateView(CreateView):
# model = Project
# template_name = "project/project_create.html"
# fields = ['title', 'client_name']
# success_url = reverse_lazy('project_list')
# def form_valid(self, form: Project):
# form.instance.created_by = self.request.user
# return super().form_valid(form)
class ProjectUpdateView(UpdateView):
model = Project
template_name = "project/project_update.html"
fields = ['title', 'client_name']
success_url = reverse_lazy('project_list')
def form_valid(self, form):
form.instance.created_by = self.request.user
return super().form_valid(form)
# def test_func(self):
# project = self.get_object()
# if self.request.user == project.created_by:
# return True
# return False
def delete_project(request, pk):
project = get_object_or_404(Project, pk=pk)
project.delete()
return redirect('project_list')
# Task section start here:
###################
##################
def task_detail(request, pk, task_id):
project = get_object_or_404(Project, pk=pk)
task = get_object_or_404(Task, pk=task_id)
if request.method == 'POST':
hours = int(request.POST.get('hours'))
minutes = int(request.POST.get('minutes'))
date = '%s %s' % (request.POST.get('date'), datetime.now().time())
minutes_total = (hours * 60) + minutes
entry = Entry.objects.create(
project=project, task=task, minutes=minutes_total, created_by=request.user, created_at=date)
context = {
'project': project,
'task': task,
'today': datetime.today() # facing problem on this
}
return render(request, 'project/task_detail.html', context)
def task_edit(request, pk, task_id):
project = get_object_or_404(Project, pk=pk)
task = get_object_or_404(Task, pk=task_id)
if request.method == 'POST':
title = request.POST.get('title')
status = request.POST.get('status')
if title:
task.name = title
task.status = status
task.save()
return redirect('task_detail', pk=project.id, task_id=task.id)
context = {
'project': project,
'task': task
}
return render(request, 'project/task_edit.html', context)
def edit_entry(request, pk, task_id, entry_id):
project = get_object_or_404(Project, pk=pk)
task = get_object_or_404(Task, pk=task_id)
entry = get_object_or_404(Entry, pk=entry_id)
if request.method == 'POST':
hours = int(request.POST.get('hours'))
minutes = int(request.POST.get('minutes'))
date = '%s %s' % (request.POST.get('date'), datetime.now().time())
entry.created_at = date
entry.minutes = (hours * 60) + minutes
entry.save()
return redirect('task_detail', pk=project.id, task_id=task.id)
hours, minutes = divmod(entry.minutes, 60)
context = {
'project': project,
'task': task,
'entry': entry,
'hours': hours,
'minutes': minutes
}
return render(request, 'project/entry_edit.html', context)
| [
"baloramroyram@gmail.com"
] | baloramroyram@gmail.com |
bdfe96afd844c8fdacaf562d9acad3dcc65138ff | d854df66e28a4a870ef95ba816087f1fce704619 | /02 Vannucci/03 Magnet_curr_bias_lockin/Sub_Scripts/GUI.py | 7bef74b84b01f1c5928b5a61f164f27b22836413 | [] | no_license | Brucewanghahei/QMD_Custom_Program | 7c93020027872856ece2bcb1cb6a91c0abe0d7d4 | ef5015194ac070e49676c3f6cbc8d3bd9ce0ba99 | refs/heads/master | 2021-08-31T16:03:29.919885 | 2017-12-22T00:53:07 | 2017-12-22T00:53:07 | 115,059,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,744 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUI.ui'
#
# Created: Sun Jul 31 15:33:31 2016
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1915, 1104)
self.centralwidget = QtGui.QWidget(MainWindow)
font = QtGui.QFont()
font.setPointSize(12)
self.centralwidget.setFont(font)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout_2 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_2.setContentsMargins(-1, 0, -1, 0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setContentsMargins(-1, -1, -1, 0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.scrollArea = QtGui.QScrollArea(self.centralwidget)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1893, 1080))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.groupBox_condition = QtGui.QGroupBox(self.scrollAreaWidgetContents)
self.groupBox_condition.setGeometry(QtCore.QRect(10, 920, 321, 61))
font = QtGui.QFont()
font.setPointSize(10)
self.groupBox_condition.setFont(font)
self.groupBox_condition.setObjectName(_fromUtf8("groupBox_condition"))
self.label_condition = QtGui.QLabel(self.groupBox_condition)
self.label_condition.setGeometry(QtCore.QRect(10, 20, 301, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_condition.setFont(font)
self.label_condition.setText(_fromUtf8(""))
self.label_condition.setObjectName(_fromUtf8("label_condition"))
self.groupBox_control = QtGui.QGroupBox(self.scrollAreaWidgetContents)
self.groupBox_control.setEnabled(True)
self.groupBox_control.setGeometry(QtCore.QRect(10, 560, 321, 61))
font = QtGui.QFont()
font.setPointSize(10)
self.groupBox_control.setFont(font)
self.groupBox_control.setObjectName(_fromUtf8("groupBox_control"))
self.pushButton_Start = QtGui.QPushButton(self.groupBox_control)
self.pushButton_Start.setGeometry(QtCore.QRect(30, 20, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_Start.setFont(font)
self.pushButton_Start.setObjectName(_fromUtf8("pushButton_Start"))
self.pushButton_Pause = QtGui.QPushButton(self.groupBox_control)
self.pushButton_Pause.setGeometry(QtCore.QRect(130, 20, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_Pause.setFont(font)
self.pushButton_Pause.setObjectName(_fromUtf8("pushButton_Pause"))
self.pushButton_Stop = QtGui.QPushButton(self.groupBox_control)
self.pushButton_Stop.setGeometry(QtCore.QRect(230, 20, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_Stop.setFont(font)
self.pushButton_Stop.setObjectName(_fromUtf8("pushButton_Stop"))
self.tabWidget_visa = QtGui.QTabWidget(self.scrollAreaWidgetContents)
self.tabWidget_visa.setGeometry(QtCore.QRect(10, 10, 321, 221))
font = QtGui.QFont()
font.setPointSize(12)
self.tabWidget_visa.setFont(font)
self.tabWidget_visa.setObjectName(_fromUtf8("tabWidget_visa"))
self.tab_228 = QtGui.QWidget()
self.tab_228.setObjectName(_fromUtf8("tab_228"))
self.label_chooseVisa_1 = QtGui.QLabel(self.tab_228)
self.label_chooseVisa_1.setGeometry(QtCore.QRect(20, 0, 201, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_chooseVisa_1.setFont(font)
self.label_chooseVisa_1.setObjectName(_fromUtf8("label_chooseVisa_1"))
self.pushButton_select_1 = QtGui.QPushButton(self.tab_228)
self.pushButton_select_1.setGeometry(QtCore.QRect(30, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_select_1.setFont(font)
self.pushButton_select_1.setObjectName(_fromUtf8("pushButton_select_1"))
self.comboBox_visa_1 = QtGui.QComboBox(self.tab_228)
self.comboBox_visa_1.setGeometry(QtCore.QRect(10, 30, 301, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.comboBox_visa_1.setFont(font)
self.comboBox_visa_1.setObjectName(_fromUtf8("comboBox_visa_1"))
self.label_visaname_1 = QtGui.QLabel(self.tab_228)
self.label_visaname_1.setGeometry(QtCore.QRect(20, 120, 181, 27))
font = QtGui.QFont()
font.setPointSize(12)
self.label_visaname_1.setFont(font)
self.label_visaname_1.setObjectName(_fromUtf8("label_visaname_1"))
self.pushButton_close_1 = QtGui.QPushButton(self.tab_228)
self.pushButton_close_1.setGeometry(QtCore.QRect(130, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_close_1.setFont(font)
self.pushButton_close_1.setObjectName(_fromUtf8("pushButton_close_1"))
self.pushButton_update_1 = QtGui.QPushButton(self.tab_228)
self.pushButton_update_1.setGeometry(QtCore.QRect(230, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_update_1.setFont(font)
self.pushButton_update_1.setObjectName(_fromUtf8("pushButton_update_1"))
self.label_visa_1 = QtGui.QLabel(self.tab_228)
self.label_visa_1.setGeometry(QtCore.QRect(0, 150, 311, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.label_visa_1.setFont(font)
self.label_visa_1.setText(_fromUtf8(""))
self.label_visa_1.setObjectName(_fromUtf8("label_visa_1"))
self.tabWidget_visa.addTab(self.tab_228, _fromUtf8(""))
self.tab_229 = QtGui.QWidget()
self.tab_229.setObjectName(_fromUtf8("tab_229"))
self.pushButton_update_2 = QtGui.QPushButton(self.tab_229)
self.pushButton_update_2.setGeometry(QtCore.QRect(230, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_update_2.setFont(font)
self.pushButton_update_2.setObjectName(_fromUtf8("pushButton_update_2"))
self.pushButton_close_2 = QtGui.QPushButton(self.tab_229)
self.pushButton_close_2.setGeometry(QtCore.QRect(130, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_close_2.setFont(font)
self.pushButton_close_2.setObjectName(_fromUtf8("pushButton_close_2"))
self.pushButton_select_2 = QtGui.QPushButton(self.tab_229)
self.pushButton_select_2.setGeometry(QtCore.QRect(30, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_select_2.setFont(font)
self.pushButton_select_2.setObjectName(_fromUtf8("pushButton_select_2"))
self.comboBox_visa_2 = QtGui.QComboBox(self.tab_229)
self.comboBox_visa_2.setGeometry(QtCore.QRect(10, 30, 301, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.comboBox_visa_2.setFont(font)
self.comboBox_visa_2.setObjectName(_fromUtf8("comboBox_visa_2"))
self.label_visaname_2 = QtGui.QLabel(self.tab_229)
self.label_visaname_2.setGeometry(QtCore.QRect(20, 120, 181, 27))
font = QtGui.QFont()
font.setPointSize(12)
self.label_visaname_2.setFont(font)
self.label_visaname_2.setObjectName(_fromUtf8("label_visaname_2"))
self.label_chooseVisa_2 = QtGui.QLabel(self.tab_229)
self.label_chooseVisa_2.setGeometry(QtCore.QRect(20, 0, 201, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_chooseVisa_2.setFont(font)
self.label_chooseVisa_2.setObjectName(_fromUtf8("label_chooseVisa_2"))
self.label_visa_2 = QtGui.QLabel(self.tab_229)
self.label_visa_2.setGeometry(QtCore.QRect(0, 150, 311, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.label_visa_2.setFont(font)
self.label_visa_2.setText(_fromUtf8(""))
self.label_visa_2.setObjectName(_fromUtf8("label_visa_2"))
self.tabWidget_visa.addTab(self.tab_229, _fromUtf8(""))
self.tab_230 = QtGui.QWidget()
self.tab_230.setObjectName(_fromUtf8("tab_230"))
self.pushButton_update_3 = QtGui.QPushButton(self.tab_230)
self.pushButton_update_3.setGeometry(QtCore.QRect(230, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_update_3.setFont(font)
self.pushButton_update_3.setObjectName(_fromUtf8("pushButton_update_3"))
self.pushButton_close_3 = QtGui.QPushButton(self.tab_230)
self.pushButton_close_3.setGeometry(QtCore.QRect(130, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_close_3.setFont(font)
self.pushButton_close_3.setObjectName(_fromUtf8("pushButton_close_3"))
self.pushButton_select_3 = QtGui.QPushButton(self.tab_230)
self.pushButton_select_3.setGeometry(QtCore.QRect(30, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_select_3.setFont(font)
self.pushButton_select_3.setObjectName(_fromUtf8("pushButton_select_3"))
self.comboBox_visa_3 = QtGui.QComboBox(self.tab_230)
self.comboBox_visa_3.setGeometry(QtCore.QRect(10, 30, 301, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.comboBox_visa_3.setFont(font)
self.comboBox_visa_3.setObjectName(_fromUtf8("comboBox_visa_3"))
self.label_visaname_3 = QtGui.QLabel(self.tab_230)
self.label_visaname_3.setGeometry(QtCore.QRect(20, 120, 181, 27))
font = QtGui.QFont()
font.setPointSize(12)
self.label_visaname_3.setFont(font)
self.label_visaname_3.setObjectName(_fromUtf8("label_visaname_3"))
self.label_chooseVisa_3 = QtGui.QLabel(self.tab_230)
self.label_chooseVisa_3.setGeometry(QtCore.QRect(20, 0, 201, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_chooseVisa_3.setFont(font)
self.label_chooseVisa_3.setObjectName(_fromUtf8("label_chooseVisa_3"))
self.label_visa_3 = QtGui.QLabel(self.tab_230)
self.label_visa_3.setGeometry(QtCore.QRect(0, 150, 311, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.label_visa_3.setFont(font)
self.label_visa_3.setText(_fromUtf8(""))
self.label_visa_3.setObjectName(_fromUtf8("label_visa_3"))
self.tabWidget_visa.addTab(self.tab_230, _fromUtf8(""))
self.tab_231 = QtGui.QWidget()
self.tab_231.setObjectName(_fromUtf8("tab_231"))
self.pushButton_update_4 = QtGui.QPushButton(self.tab_231)
self.pushButton_update_4.setGeometry(QtCore.QRect(230, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_update_4.setFont(font)
self.pushButton_update_4.setObjectName(_fromUtf8("pushButton_update_4"))
self.pushButton_close_4 = QtGui.QPushButton(self.tab_231)
self.pushButton_close_4.setGeometry(QtCore.QRect(130, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_close_4.setFont(font)
self.pushButton_close_4.setObjectName(_fromUtf8("pushButton_close_4"))
self.pushButton_select_4 = QtGui.QPushButton(self.tab_231)
self.pushButton_select_4.setGeometry(QtCore.QRect(30, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_select_4.setFont(font)
self.pushButton_select_4.setObjectName(_fromUtf8("pushButton_select_4"))
self.comboBox_visa_4 = QtGui.QComboBox(self.tab_231)
self.comboBox_visa_4.setGeometry(QtCore.QRect(10, 30, 301, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.comboBox_visa_4.setFont(font)
self.comboBox_visa_4.setObjectName(_fromUtf8("comboBox_visa_4"))
self.label_visaname_4 = QtGui.QLabel(self.tab_231)
self.label_visaname_4.setGeometry(QtCore.QRect(20, 120, 181, 27))
font = QtGui.QFont()
font.setPointSize(12)
self.label_visaname_4.setFont(font)
self.label_visaname_4.setObjectName(_fromUtf8("label_visaname_4"))
self.label_chooseVisa_4 = QtGui.QLabel(self.tab_231)
self.label_chooseVisa_4.setGeometry(QtCore.QRect(20, 0, 201, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_chooseVisa_4.setFont(font)
self.label_chooseVisa_4.setObjectName(_fromUtf8("label_chooseVisa_4"))
self.label_visa_4 = QtGui.QLabel(self.tab_231)
self.label_visa_4.setGeometry(QtCore.QRect(0, 150, 311, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.label_visa_4.setFont(font)
self.label_visa_4.setText(_fromUtf8(""))
self.label_visa_4.setObjectName(_fromUtf8("label_visa_4"))
self.tabWidget_visa.addTab(self.tab_231, _fromUtf8(""))
self.tab_232 = QtGui.QWidget()
self.tab_232.setObjectName(_fromUtf8("tab_232"))
self.pushButton_update_5 = QtGui.QPushButton(self.tab_232)
self.pushButton_update_5.setGeometry(QtCore.QRect(230, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_update_5.setFont(font)
self.pushButton_update_5.setObjectName(_fromUtf8("pushButton_update_5"))
self.pushButton_close_5 = QtGui.QPushButton(self.tab_232)
self.pushButton_close_5.setGeometry(QtCore.QRect(130, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_close_5.setFont(font)
self.pushButton_close_5.setObjectName(_fromUtf8("pushButton_close_5"))
self.pushButton_select_5 = QtGui.QPushButton(self.tab_232)
self.pushButton_select_5.setGeometry(QtCore.QRect(30, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_select_5.setFont(font)
self.pushButton_select_5.setObjectName(_fromUtf8("pushButton_select_5"))
self.comboBox_visa_5 = QtGui.QComboBox(self.tab_232)
self.comboBox_visa_5.setGeometry(QtCore.QRect(10, 30, 301, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.comboBox_visa_5.setFont(font)
self.comboBox_visa_5.setObjectName(_fromUtf8("comboBox_visa_5"))
self.label_visaname_5 = QtGui.QLabel(self.tab_232)
self.label_visaname_5.setGeometry(QtCore.QRect(20, 120, 181, 27))
font = QtGui.QFont()
font.setPointSize(12)
self.label_visaname_5.setFont(font)
self.label_visaname_5.setObjectName(_fromUtf8("label_visaname_5"))
self.label_chooseVisa_5 = QtGui.QLabel(self.tab_232)
self.label_chooseVisa_5.setGeometry(QtCore.QRect(20, 0, 201, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_chooseVisa_5.setFont(font)
self.label_chooseVisa_5.setObjectName(_fromUtf8("label_chooseVisa_5"))
self.label_visa_5 = QtGui.QLabel(self.tab_232)
self.label_visa_5.setGeometry(QtCore.QRect(0, 150, 311, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.label_visa_5.setFont(font)
self.label_visa_5.setText(_fromUtf8(""))
self.label_visa_5.setObjectName(_fromUtf8("label_visa_5"))
self.tabWidget_visa.addTab(self.tab_232, _fromUtf8(""))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.comboBox_visa_6 = QtGui.QComboBox(self.tab)
self.comboBox_visa_6.setGeometry(QtCore.QRect(10, 30, 301, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.comboBox_visa_6.setFont(font)
self.comboBox_visa_6.setObjectName(_fromUtf8("comboBox_visa_6"))
self.pushButton_update_6 = QtGui.QPushButton(self.tab)
self.pushButton_update_6.setGeometry(QtCore.QRect(230, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_update_6.setFont(font)
self.pushButton_update_6.setObjectName(_fromUtf8("pushButton_update_6"))
self.label_visaname_6 = QtGui.QLabel(self.tab)
self.label_visaname_6.setGeometry(QtCore.QRect(20, 120, 181, 27))
font = QtGui.QFont()
font.setPointSize(12)
self.label_visaname_6.setFont(font)
self.label_visaname_6.setObjectName(_fromUtf8("label_visaname_6"))
self.label_visa_6 = QtGui.QLabel(self.tab)
self.label_visa_6.setGeometry(QtCore.QRect(0, 150, 311, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.label_visa_6.setFont(font)
self.label_visa_6.setText(_fromUtf8(""))
self.label_visa_6.setObjectName(_fromUtf8("label_visa_6"))
self.pushButton_select_6 = QtGui.QPushButton(self.tab)
self.pushButton_select_6.setGeometry(QtCore.QRect(30, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_select_6.setFont(font)
self.pushButton_select_6.setObjectName(_fromUtf8("pushButton_select_6"))
self.label_chooseVisa_6 = QtGui.QLabel(self.tab)
self.label_chooseVisa_6.setGeometry(QtCore.QRect(20, 0, 201, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_chooseVisa_6.setFont(font)
self.label_chooseVisa_6.setObjectName(_fromUtf8("label_chooseVisa_6"))
self.pushButton_close_6 = QtGui.QPushButton(self.tab)
self.pushButton_close_6.setGeometry(QtCore.QRect(130, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_close_6.setFont(font)
self.pushButton_close_6.setObjectName(_fromUtf8("pushButton_close_6"))
self.tabWidget_visa.addTab(self.tab, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.comboBox_visa_7 = QtGui.QComboBox(self.tab_2)
self.comboBox_visa_7.setGeometry(QtCore.QRect(10, 30, 301, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.comboBox_visa_7.setFont(font)
self.comboBox_visa_7.setObjectName(_fromUtf8("comboBox_visa_7"))
self.pushButton_update_7 = QtGui.QPushButton(self.tab_2)
self.pushButton_update_7.setGeometry(QtCore.QRect(230, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_update_7.setFont(font)
self.pushButton_update_7.setObjectName(_fromUtf8("pushButton_update_7"))
self.label_visaname_7 = QtGui.QLabel(self.tab_2)
self.label_visaname_7.setGeometry(QtCore.QRect(20, 120, 181, 27))
font = QtGui.QFont()
font.setPointSize(12)
self.label_visaname_7.setFont(font)
self.label_visaname_7.setObjectName(_fromUtf8("label_visaname_7"))
self.label_visa_7 = QtGui.QLabel(self.tab_2)
self.label_visa_7.setGeometry(QtCore.QRect(0, 150, 311, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.label_visa_7.setFont(font)
self.label_visa_7.setText(_fromUtf8(""))
self.label_visa_7.setObjectName(_fromUtf8("label_visa_7"))
self.pushButton_select_7 = QtGui.QPushButton(self.tab_2)
self.pushButton_select_7.setGeometry(QtCore.QRect(30, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_select_7.setFont(font)
self.pushButton_select_7.setObjectName(_fromUtf8("pushButton_select_7"))
self.label_chooseVisa_7 = QtGui.QLabel(self.tab_2)
self.label_chooseVisa_7.setGeometry(QtCore.QRect(20, 0, 201, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_chooseVisa_7.setFont(font)
self.label_chooseVisa_7.setObjectName(_fromUtf8("label_chooseVisa_7"))
self.pushButton_close_7 = QtGui.QPushButton(self.tab_2)
self.pushButton_close_7.setGeometry(QtCore.QRect(130, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_close_7.setFont(font)
self.pushButton_close_7.setObjectName(_fromUtf8("pushButton_close_7"))
self.tabWidget_visa.addTab(self.tab_2, _fromUtf8(""))
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName(_fromUtf8("tab_3"))
self.comboBox_visa_8 = QtGui.QComboBox(self.tab_3)
self.comboBox_visa_8.setGeometry(QtCore.QRect(10, 30, 301, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.comboBox_visa_8.setFont(font)
self.comboBox_visa_8.setObjectName(_fromUtf8("comboBox_visa_8"))
self.pushButton_update_8 = QtGui.QPushButton(self.tab_3)
self.pushButton_update_8.setGeometry(QtCore.QRect(230, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_update_8.setFont(font)
self.pushButton_update_8.setObjectName(_fromUtf8("pushButton_update_8"))
self.label_visaname_8 = QtGui.QLabel(self.tab_3)
self.label_visaname_8.setGeometry(QtCore.QRect(20, 120, 181, 27))
font = QtGui.QFont()
font.setPointSize(12)
self.label_visaname_8.setFont(font)
self.label_visaname_8.setObjectName(_fromUtf8("label_visaname_8"))
self.label_visa_8 = QtGui.QLabel(self.tab_3)
self.label_visa_8.setGeometry(QtCore.QRect(0, 150, 311, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.label_visa_8.setFont(font)
self.label_visa_8.setText(_fromUtf8(""))
self.label_visa_8.setObjectName(_fromUtf8("label_visa_8"))
self.pushButton_select_8 = QtGui.QPushButton(self.tab_3)
self.pushButton_select_8.setGeometry(QtCore.QRect(30, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_select_8.setFont(font)
self.pushButton_select_8.setObjectName(_fromUtf8("pushButton_select_8"))
self.label_chooseVisa_8 = QtGui.QLabel(self.tab_3)
self.label_chooseVisa_8.setGeometry(QtCore.QRect(20, 0, 201, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_chooseVisa_8.setFont(font)
self.label_chooseVisa_8.setObjectName(_fromUtf8("label_chooseVisa_8"))
self.pushButton_close_8 = QtGui.QPushButton(self.tab_3)
self.pushButton_close_8.setGeometry(QtCore.QRect(130, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_close_8.setFont(font)
self.pushButton_close_8.setObjectName(_fromUtf8("pushButton_close_8"))
self.tabWidget_visa.addTab(self.tab_3, _fromUtf8(""))
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName(_fromUtf8("tab_4"))
self.comboBox_visa_9 = QtGui.QComboBox(self.tab_4)
self.comboBox_visa_9.setGeometry(QtCore.QRect(10, 30, 301, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.comboBox_visa_9.setFont(font)
self.comboBox_visa_9.setObjectName(_fromUtf8("comboBox_visa_9"))
self.pushButton_update_9 = QtGui.QPushButton(self.tab_4)
self.pushButton_update_9.setGeometry(QtCore.QRect(230, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_update_9.setFont(font)
self.pushButton_update_9.setObjectName(_fromUtf8("pushButton_update_9"))
self.label_visaname_9 = QtGui.QLabel(self.tab_4)
self.label_visaname_9.setGeometry(QtCore.QRect(20, 120, 181, 27))
font = QtGui.QFont()
font.setPointSize(12)
self.label_visaname_9.setFont(font)
self.label_visaname_9.setObjectName(_fromUtf8("label_visaname_9"))
self.label_visa_9 = QtGui.QLabel(self.tab_4)
self.label_visa_9.setGeometry(QtCore.QRect(0, 150, 311, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.label_visa_9.setFont(font)
self.label_visa_9.setText(_fromUtf8(""))
self.label_visa_9.setObjectName(_fromUtf8("label_visa_9"))
self.pushButton_select_9 = QtGui.QPushButton(self.tab_4)
self.pushButton_select_9.setGeometry(QtCore.QRect(30, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_select_9.setFont(font)
self.pushButton_select_9.setObjectName(_fromUtf8("pushButton_select_9"))
self.label_chooseVisa_9 = QtGui.QLabel(self.tab_4)
self.label_chooseVisa_9.setGeometry(QtCore.QRect(20, 0, 201, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_chooseVisa_9.setFont(font)
self.label_chooseVisa_9.setObjectName(_fromUtf8("label_chooseVisa_9"))
self.pushButton_close_9 = QtGui.QPushButton(self.tab_4)
self.pushButton_close_9.setGeometry(QtCore.QRect(130, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_close_9.setFont(font)
self.pushButton_close_9.setObjectName(_fromUtf8("pushButton_close_9"))
self.tabWidget_visa.addTab(self.tab_4, _fromUtf8(""))
self.tab_5 = QtGui.QWidget()
self.tab_5.setObjectName(_fromUtf8("tab_5"))
self.comboBox_visa_10 = QtGui.QComboBox(self.tab_5)
self.comboBox_visa_10.setGeometry(QtCore.QRect(10, 30, 301, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.comboBox_visa_10.setFont(font)
self.comboBox_visa_10.setObjectName(_fromUtf8("comboBox_visa_10"))
self.pushButton_update_10 = QtGui.QPushButton(self.tab_5)
self.pushButton_update_10.setGeometry(QtCore.QRect(230, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_update_10.setFont(font)
self.pushButton_update_10.setObjectName(_fromUtf8("pushButton_update_10"))
self.label_visaname_10 = QtGui.QLabel(self.tab_5)
self.label_visaname_10.setGeometry(QtCore.QRect(20, 120, 181, 27))
font = QtGui.QFont()
font.setPointSize(12)
self.label_visaname_10.setFont(font)
self.label_visaname_10.setObjectName(_fromUtf8("label_visaname_10"))
self.label_visa_10 = QtGui.QLabel(self.tab_5)
self.label_visa_10.setGeometry(QtCore.QRect(0, 150, 311, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.label_visa_10.setFont(font)
self.label_visa_10.setText(_fromUtf8(""))
self.label_visa_10.setObjectName(_fromUtf8("label_visa_10"))
self.pushButton_select_10 = QtGui.QPushButton(self.tab_5)
self.pushButton_select_10.setGeometry(QtCore.QRect(30, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_select_10.setFont(font)
self.pushButton_select_10.setObjectName(_fromUtf8("pushButton_select_10"))
self.label_chooseVisa_10 = QtGui.QLabel(self.tab_5)
self.label_chooseVisa_10.setGeometry(QtCore.QRect(20, 0, 201, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_chooseVisa_10.setFont(font)
self.label_chooseVisa_10.setObjectName(_fromUtf8("label_chooseVisa_10"))
self.pushButton_close_10 = QtGui.QPushButton(self.tab_5)
self.pushButton_close_10.setGeometry(QtCore.QRect(130, 80, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton_close_10.setFont(font)
self.pushButton_close_10.setObjectName(_fromUtf8("pushButton_close_10"))
self.tabWidget_visa.addTab(self.tab_5, _fromUtf8(""))
self.textEdit = QtGui.QTextEdit(self.scrollAreaWidgetContents)
self.textEdit.setGeometry(QtCore.QRect(10, 240, 321, 311))
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.textEditDisplay = QtGui.QTextEdit(self.scrollAreaWidgetContents)
self.textEditDisplay.setGeometry(QtCore.QRect(10, 630, 321, 281))
self.textEditDisplay.setObjectName(_fromUtf8("textEditDisplay"))
self.tabWidget = QtGui.QTabWidget(self.scrollAreaWidgetContents)
self.tabWidget.setEnabled(True)
self.tabWidget.setGeometry(QtCore.QRect(340, 10, 1551, 961))
self.tabWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setPointSize(12)
self.tabWidget.setFont(font)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab_213 = QtGui.QWidget()
self.tab_213.setObjectName(_fromUtf8("tab_213"))
self.gridLayout_40 = QtGui.QGridLayout(self.tab_213)
self.gridLayout_40.setObjectName(_fromUtf8("gridLayout_40"))
self.scrollAreaGL_scan_ST_4 = QtGui.QScrollArea(self.tab_213)
self.scrollAreaGL_scan_ST_4.setMinimumSize(QtCore.QSize(0, 0))
self.scrollAreaGL_scan_ST_4.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.scrollAreaGL_scan_ST_4.setSizeIncrement(QtCore.QSize(0, 0))
self.scrollAreaGL_scan_ST_4.setBaseSize(QtCore.QSize(0, 0))
self.scrollAreaGL_scan_ST_4.setAutoFillBackground(False)
self.scrollAreaGL_scan_ST_4.setFrameShape(QtGui.QFrame.NoFrame)
self.scrollAreaGL_scan_ST_4.setFrameShadow(QtGui.QFrame.Plain)
self.scrollAreaGL_scan_ST_4.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.scrollAreaGL_scan_ST_4.setWidgetResizable(False)
self.scrollAreaGL_scan_ST_4.setObjectName(_fromUtf8("scrollAreaGL_scan_ST_4"))
self.scrollAreaWidgetContents_47 = QtGui.QWidget()
self.scrollAreaWidgetContents_47.setGeometry(QtCore.QRect(0, 0, 4351, 900))
self.scrollAreaWidgetContents_47.setObjectName(_fromUtf8("scrollAreaWidgetContents_47"))
self.curvewidget_1 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_1.setGeometry(QtCore.QRect(0, 10, 711, 401))
self.curvewidget_1.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_1.setObjectName(_fromUtf8("curvewidget_1"))
self.curvewidget_3 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_3.setGeometry(QtCore.QRect(720, 10, 711, 401))
self.curvewidget_3.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_3.setObjectName(_fromUtf8("curvewidget_3"))
self.curvewidget_2 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_2.setGeometry(QtCore.QRect(0, 460, 711, 401))
self.curvewidget_2.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_2.setObjectName(_fromUtf8("curvewidget_2"))
self.curvewidget_4 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_4.setGeometry(QtCore.QRect(720, 460, 711, 401))
self.curvewidget_4.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_4.setObjectName(_fromUtf8("curvewidget_4"))
self.curvewidget_7 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_7.setGeometry(QtCore.QRect(2160, 10, 711, 401))
self.curvewidget_7.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_7.setObjectName(_fromUtf8("curvewidget_7"))
self.curvewidget_6 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_6.setGeometry(QtCore.QRect(1440, 460, 711, 401))
self.curvewidget_6.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_6.setObjectName(_fromUtf8("curvewidget_6"))
self.curvewidget_8 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_8.setGeometry(QtCore.QRect(2160, 460, 711, 401))
self.curvewidget_8.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_8.setObjectName(_fromUtf8("curvewidget_8"))
self.curvewidget_5 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_5.setGeometry(QtCore.QRect(1440, 10, 711, 401))
self.curvewidget_5.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_5.setObjectName(_fromUtf8("curvewidget_5"))
self.curvewidget_11 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_11.setGeometry(QtCore.QRect(3620, 10, 711, 401))
self.curvewidget_11.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_11.setObjectName(_fromUtf8("curvewidget_11"))
self.curvewidget_10 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_10.setGeometry(QtCore.QRect(2880, 460, 711, 401))
self.curvewidget_10.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_10.setObjectName(_fromUtf8("curvewidget_10"))
self.curvewidget_12 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_12.setGeometry(QtCore.QRect(3620, 460, 711, 401))
self.curvewidget_12.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_12.setObjectName(_fromUtf8("curvewidget_12"))
self.curvewidget_9 = CurveWidget(self.scrollAreaWidgetContents_47)
self.curvewidget_9.setGeometry(QtCore.QRect(2890, 10, 711, 401))
self.curvewidget_9.setOrientation(QtCore.Qt.Horizontal)
self.curvewidget_9.setObjectName(_fromUtf8("curvewidget_9"))
self.scrollAreaGL_scan_ST_4.setWidget(self.scrollAreaWidgetContents_47)
self.gridLayout_40.addWidget(self.scrollAreaGL_scan_ST_4, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_213, _fromUtf8(""))
self.tab_214 = QtGui.QWidget()
self.tab_214.setObjectName(_fromUtf8("tab_214"))
self.widget_4 = QtGui.QWidget(self.tab_214)
self.widget_4.setGeometry(QtCore.QRect(780, 470, 751, 451))
self.widget_4.setObjectName(_fromUtf8("widget_4"))
self.mplwidget_4 = MatplotlibWidget(self.widget_4)
self.mplwidget_4.setGeometry(QtCore.QRect(230, 150, 16, 16))
self.mplwidget_4.setObjectName(_fromUtf8("mplwidget_4"))
self.widget_1 = QtGui.QWidget(self.tab_214)
self.widget_1.setGeometry(QtCore.QRect(10, 10, 751, 451))
self.widget_1.setObjectName(_fromUtf8("widget_1"))
self.mplwidget_1 = MatplotlibWidget(self.widget_1)
self.mplwidget_1.setGeometry(QtCore.QRect(230, 150, 16, 16))
self.mplwidget_1.setObjectName(_fromUtf8("mplwidget_1"))
self.widget_3 = QtGui.QWidget(self.tab_214)
self.widget_3.setGeometry(QtCore.QRect(780, 10, 751, 451))
self.widget_3.setObjectName(_fromUtf8("widget_3"))
self.mplwidget_3 = MatplotlibWidget(self.widget_3)
self.mplwidget_3.setGeometry(QtCore.QRect(230, 150, 16, 16))
self.mplwidget_3.setObjectName(_fromUtf8("mplwidget_3"))
self.widget_2 = QtGui.QWidget(self.tab_214)
self.widget_2.setGeometry(QtCore.QRect(10, 470, 751, 451))
self.widget_2.setObjectName(_fromUtf8("widget_2"))
self.mplwidget_2 = MatplotlibWidget(self.widget_2)
self.mplwidget_2.setGeometry(QtCore.QRect(230, 150, 16, 16))
self.mplwidget_2.setObjectName(_fromUtf8("mplwidget_2"))
self.tabWidget.addTab(self.tab_214, _fromUtf8(""))
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionArray_Builder = QtGui.QAction(MainWindow)
font = QtGui.QFont()
font.setPointSize(10)
self.actionArray_Builder.setFont(font)
self.actionArray_Builder.setObjectName(_fromUtf8("actionArray_Builder"))
self.actionAgilent_Single_scan = QtGui.QAction(MainWindow)
self.actionAgilent_Single_scan.setObjectName(_fromUtf8("actionAgilent_Single_scan"))
self.actionKeithley_Single_scan = QtGui.QAction(MainWindow)
self.actionKeithley_Single_scan.setObjectName(_fromUtf8("actionKeithley_Single_scan"))
self.actionKeithley__Sweep = QtGui.QAction(MainWindow)
self.actionKeithley__Sweep.setObjectName(_fromUtf8("actionKeithley__Sweep"))
self.actionLockIn_Single_Scan = QtGui.QAction(MainWindow)
self.actionLockIn_Single_Scan.setObjectName(_fromUtf8("actionLockIn_Single_Scan"))
self.actionResonant_Single_Scan = QtGui.QAction(MainWindow)
self.actionResonant_Single_Scan.setObjectName(_fromUtf8("actionResonant_Single_Scan"))
self.actionKeithley_Stepper_Single_Scan = QtGui.QAction(MainWindow)
self.actionKeithley_Stepper_Single_Scan.setObjectName(_fromUtf8("actionKeithley_Stepper_Single_Scan"))
self.actionSee_Visa_List = QtGui.QAction(MainWindow)
self.actionSee_Visa_List.setObjectName(_fromUtf8("actionSee_Visa_List"))
self.actionTwo_s_with_Lock_in = QtGui.QAction(MainWindow)
self.actionTwo_s_with_Lock_in.setObjectName(_fromUtf8("actionTwo_s_with_Lock_in"))
self.actionSweep_Scan = QtGui.QAction(MainWindow)
self.actionSweep_Scan.setObjectName(_fromUtf8("actionSweep_Scan"))
self.actionLockInSweep = QtGui.QAction(MainWindow)
self.actionLockInSweep.setObjectName(_fromUtf8("actionLockInSweep"))
self.actionDouble_Lock_In = QtGui.QAction(MainWindow)
self.actionDouble_Lock_In.setObjectName(_fromUtf8("actionDouble_Lock_In"))
self.actionVoltage_Bias = QtGui.QAction(MainWindow)
self.actionVoltage_Bias.setObjectName(_fromUtf8("actionVoltage_Bias"))
self.actionCurrent_Bias = QtGui.QAction(MainWindow)
self.actionCurrent_Bias.setObjectName(_fromUtf8("actionCurrent_Bias"))
self.actionJV_1 = QtGui.QAction(MainWindow)
self.actionJV_1.setObjectName(_fromUtf8("actionJV_1"))
self.actionJV_2 = QtGui.QAction(MainWindow)
self.actionJV_2.setObjectName(_fromUtf8("actionJV_2"))
self.actionJV_3 = QtGui.QAction(MainWindow)
self.actionJV_3.setObjectName(_fromUtf8("actionJV_3"))
self.actionJS_1 = QtGui.QAction(MainWindow)
self.actionJS_1.setObjectName(_fromUtf8("actionJS_1"))
self.actionJS_2 = QtGui.QAction(MainWindow)
self.actionJS_2.setObjectName(_fromUtf8("actionJS_2"))
self.actionRS_1 = QtGui.QAction(MainWindow)
self.actionRS_1.setObjectName(_fromUtf8("actionRS_1"))
self.actionRS_2 = QtGui.QAction(MainWindow)
self.actionRS_2.setObjectName(_fromUtf8("actionRS_2"))
self.actionJS_3 = QtGui.QAction(MainWindow)
self.actionJS_3.setObjectName(_fromUtf8("actionJS_3"))
self.actionRS_3 = QtGui.QAction(MainWindow)
self.actionRS_3.setObjectName(_fromUtf8("actionRS_3"))
self.actionBW_1 = QtGui.QAction(MainWindow)
self.actionBW_1.setObjectName(_fromUtf8("actionBW_1"))
self.actionBW_2 = QtGui.QAction(MainWindow)
self.actionBW_2.setObjectName(_fromUtf8("actionBW_2"))
self.actionBW_3 = QtGui.QAction(MainWindow)
self.actionBW_3.setObjectName(_fromUtf8("actionBW_3"))
self.actionST_1 = QtGui.QAction(MainWindow)
self.actionST_1.setObjectName(_fromUtf8("actionST_1"))
self.actionST_2 = QtGui.QAction(MainWindow)
self.actionST_2.setObjectName(_fromUtf8("actionST_2"))
self.actionST_3 = QtGui.QAction(MainWindow)
self.actionST_3.setObjectName(_fromUtf8("actionST_3"))
self.retranslateUi(MainWindow)
self.tabWidget_visa.setCurrentIndex(0)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.groupBox_condition.setTitle(_translate("MainWindow", "Condition", None))
self.groupBox_control.setTitle(_translate("MainWindow", "Control", None))
self.pushButton_Start.setText(_translate("MainWindow", "Start", None))
self.pushButton_Pause.setText(_translate("MainWindow", "Pause", None))
self.pushButton_Stop.setText(_translate("MainWindow", "Stop", None))
self.label_chooseVisa_1.setText(_translate("MainWindow", "Choose Visa:", None))
self.pushButton_select_1.setText(_translate("MainWindow", "Select", None))
self.label_visaname_1.setText(_translate("MainWindow", "VISA name:", None))
self.pushButton_close_1.setText(_translate("MainWindow", "Close", None))
self.pushButton_update_1.setText(_translate("MainWindow", "Update", None))
self.tabWidget_visa.setTabText(self.tabWidget_visa.indexOf(self.tab_228), _translate("MainWindow", "Visa1", None))
self.pushButton_update_2.setText(_translate("MainWindow", "Update", None))
self.pushButton_close_2.setText(_translate("MainWindow", "Close", None))
self.pushButton_select_2.setText(_translate("MainWindow", "Select", None))
self.label_visaname_2.setText(_translate("MainWindow", "VISA name:", None))
self.label_chooseVisa_2.setText(_translate("MainWindow", "Choose Visa:", None))
self.tabWidget_visa.setTabText(self.tabWidget_visa.indexOf(self.tab_229), _translate("MainWindow", "Visa2", None))
self.pushButton_update_3.setText(_translate("MainWindow", "Update", None))
self.pushButton_close_3.setText(_translate("MainWindow", "Close", None))
self.pushButton_select_3.setText(_translate("MainWindow", "Select", None))
self.label_visaname_3.setText(_translate("MainWindow", "VISA name:", None))
self.label_chooseVisa_3.setText(_translate("MainWindow", "Choose Visa:", None))
self.tabWidget_visa.setTabText(self.tabWidget_visa.indexOf(self.tab_230), _translate("MainWindow", "Visa3", None))
self.pushButton_update_4.setText(_translate("MainWindow", "Update", None))
self.pushButton_close_4.setText(_translate("MainWindow", "Close", None))
self.pushButton_select_4.setText(_translate("MainWindow", "Select", None))
self.label_visaname_4.setText(_translate("MainWindow", "VISA name:", None))
self.label_chooseVisa_4.setText(_translate("MainWindow", "Choose Visa:", None))
self.tabWidget_visa.setTabText(self.tabWidget_visa.indexOf(self.tab_231), _translate("MainWindow", "Visa4", None))
self.pushButton_update_5.setText(_translate("MainWindow", "Update", None))
self.pushButton_close_5.setText(_translate("MainWindow", "Close", None))
self.pushButton_select_5.setText(_translate("MainWindow", "Select", None))
self.label_visaname_5.setText(_translate("MainWindow", "VISA name:", None))
self.label_chooseVisa_5.setText(_translate("MainWindow", "Choose Visa:", None))
self.tabWidget_visa.setTabText(self.tabWidget_visa.indexOf(self.tab_232), _translate("MainWindow", "Visa5", None))
self.pushButton_update_6.setText(_translate("MainWindow", "Update", None))
self.label_visaname_6.setText(_translate("MainWindow", "VISA name:", None))
self.pushButton_select_6.setText(_translate("MainWindow", "Select", None))
self.label_chooseVisa_6.setText(_translate("MainWindow", "Choose Visa:", None))
self.pushButton_close_6.setText(_translate("MainWindow", "Close", None))
self.tabWidget_visa.setTabText(self.tabWidget_visa.indexOf(self.tab), _translate("MainWindow", "Visa6", None))
self.pushButton_update_7.setText(_translate("MainWindow", "Update", None))
self.label_visaname_7.setText(_translate("MainWindow", "VISA name:", None))
self.pushButton_select_7.setText(_translate("MainWindow", "Select", None))
self.label_chooseVisa_7.setText(_translate("MainWindow", "Choose Visa:", None))
self.pushButton_close_7.setText(_translate("MainWindow", "Close", None))
self.tabWidget_visa.setTabText(self.tabWidget_visa.indexOf(self.tab_2), _translate("MainWindow", "Visa7", None))
self.pushButton_update_8.setText(_translate("MainWindow", "Update", None))
self.label_visaname_8.setText(_translate("MainWindow", "VISA name:", None))
self.pushButton_select_8.setText(_translate("MainWindow", "Select", None))
self.label_chooseVisa_8.setText(_translate("MainWindow", "Choose Visa:", None))
self.pushButton_close_8.setText(_translate("MainWindow", "Close", None))
self.tabWidget_visa.setTabText(self.tabWidget_visa.indexOf(self.tab_3), _translate("MainWindow", "Visa8", None))
self.pushButton_update_9.setText(_translate("MainWindow", "Update", None))
self.label_visaname_9.setText(_translate("MainWindow", "VISA name:", None))
self.pushButton_select_9.setText(_translate("MainWindow", "Select", None))
self.label_chooseVisa_9.setText(_translate("MainWindow", "Choose Visa:", None))
self.pushButton_close_9.setText(_translate("MainWindow", "Close", None))
self.tabWidget_visa.setTabText(self.tabWidget_visa.indexOf(self.tab_4), _translate("MainWindow", "Visa9", None))
self.pushButton_update_10.setText(_translate("MainWindow", "Update", None))
self.label_visaname_10.setText(_translate("MainWindow", "VISA name:", None))
self.pushButton_select_10.setText(_translate("MainWindow", "Select", None))
self.label_chooseVisa_10.setText(_translate("MainWindow", "Choose Visa:", None))
self.pushButton_close_10.setText(_translate("MainWindow", "Close", None))
self.tabWidget_visa.setTabText(self.tabWidget_visa.indexOf(self.tab_5), _translate("MainWindow", "Visa10", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_213), _translate("MainWindow", "Curve Plot", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_214), _translate("MainWindow", "Analysis", None))
self.actionArray_Builder.setText(_translate("MainWindow", "Array Builder", None))
self.actionAgilent_Single_scan.setText(_translate("MainWindow", "Single Scan", None))
self.actionKeithley_Single_scan.setText(_translate("MainWindow", "Single Scan", None))
self.actionKeithley__Sweep.setText(_translate("MainWindow", " Sweep", None))
self.actionLockIn_Single_Scan.setText(_translate("MainWindow", "Single Scan", None))
self.actionResonant_Single_Scan.setText(_translate("MainWindow", "Single Scan", None))
self.actionKeithley_Stepper_Single_Scan.setText(_translate("MainWindow", "Single Scan", None))
self.actionSee_Visa_List.setText(_translate("MainWindow", "See Visa List", None))
self.actionTwo_s_with_Lock_in.setText(_translate("MainWindow", "Two s with Lock-in", None))
self.actionSweep_Scan.setText(_translate("MainWindow", "Sweep Scan", None))
self.actionLockInSweep.setText(_translate("MainWindow", "Sweep Scan", None))
self.actionDouble_Lock_In.setText(_translate("MainWindow", "Double Lock-In", None))
self.actionVoltage_Bias.setText(_translate("MainWindow", "Voltage", None))
self.actionCurrent_Bias.setText(_translate("MainWindow", "Current", None))
self.actionJV_1.setText(_translate("MainWindow", "Program 1", None))
self.actionJV_2.setText(_translate("MainWindow", "Program 2", None))
self.actionJV_3.setText(_translate("MainWindow", "Program 3", None))
self.actionJS_1.setText(_translate("MainWindow", "Program 1", None))
self.actionJS_2.setText(_translate("MainWindow", "Program 2", None))
self.actionRS_1.setText(_translate("MainWindow", "Program 1", None))
self.actionRS_2.setText(_translate("MainWindow", "Program 2", None))
self.actionJS_3.setText(_translate("MainWindow", "Program 3", None))
self.actionRS_3.setText(_translate("MainWindow", "Program 3", None))
self.actionBW_1.setText(_translate("MainWindow", "Program 1", None))
self.actionBW_2.setText(_translate("MainWindow", "Program 2", None))
self.actionBW_3.setText(_translate("MainWindow", "Program 3", None))
self.actionST_1.setText(_translate("MainWindow", "Program 1", None))
self.actionST_2.setText(_translate("MainWindow", "Program 2", None))
self.actionST_3.setText(_translate("MainWindow", "Program 3", None))
from matplotlibwidget import MatplotlibWidget
from guiqwt.plot import CurveWidget
| [
"qw68@duke.edu"
] | qw68@duke.edu |
0e95a6a91e5b22b1b26ace811080c9100aae1997 | 8a1650940b9d1a136c61faaec6bb77fba5310d26 | /application.py | 7fe2f78601febb29ec53ff1c4165117c3f8b31ef | [] | no_license | garthtee/flask-api-foundation | 82c45a882a6a2f263ea6cd4e13470cff8779332b | 685408b0b34efb4ce712377a0883e4cd1f00f8a5 | refs/heads/master | 2023-02-09T10:33:51.911052 | 2019-06-09T18:33:51 | 2019-06-09T18:33:51 | 191,041,727 | 2 | 0 | null | 2023-02-02T06:29:01 | 2019-06-09T18:33:38 | Python | UTF-8 | Python | false | false | 205 | py | import os
from api.app import app
if __name__ == '__main__':
app.debug = True
host = os.environ.get('IP', '0.0.0.0')
port = int(os.environ.get('PORT', 8080))
app.run(host=host, port=port)
| [
"tolandgarth@gmail.com"
] | tolandgarth@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.