index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
5,300 | 39f1595374147c71bc2d4c945a0f1149891f1883 | import cx_Oracle
import datetime
SDATE = '01.01.2014'
FDATE = '01.01.2020'
#p.PRESZAB,
#GDR_RATE.RFLUID,
#p.NRES
#join GDR_RATE on GDR_RATE.IDWELL = p.IDWELL and GDR_RATE.DTBGN = p.DTBGN and GDR_RATE.NRES = p.NRES)
pbu_query_raw = f"""
select
WELLNAME,
DTBGN,
DPDEVICE,
(TVDSS-(MD - DPDEVICE)*(cos(INKL/57.2958))) as TVDDEVICE
from(
select
p.IDWELL as IDWELL,
BASP_REGISTRYWELL.WELLNAME as WELLNAME,
p.DTBGN as DTBGN,
GDR_TEST.DPDEVICE as DPDEVICE,
itb.MD as MD,
itb.TVDSS as TVDSS,
itb.INKL as INKL,
itb.AZIM as AZIM,
row_number() over(partition by p.IDWELL, p.DTBGN order by abs(itb.MD-GDR_TEST.DPDEVICE) asc) as RN
from GDR_MSRPRESS p
join GDR_TEST on GDR_TEST.IDWELL = p.IDWELL and GDR_TEST.DTBGN = p.DTBGN and GDR_TEST.NRES = p.NRES
join BASP_REGISTRYWELL on BASP_REGISTRYWELL.IDWELL = p.IDWELL
join (select
RSRC_REGISTRYINKL.IDWELL as IDWELL,
i.DPTINKL as MD,
i.AGLINKL as INKL,
i.AZMINKL as AZIM,
i.AOINKL as TVDSS
from RSRC_INKL i
JOIN RSRC_REGISTRYINKL ON i.IDINKL = RSRC_REGISTRYINKL.IDINKL
order by RSRC_REGISTRYINKL.IDWELL, i.DPTINKL) itb
on itb.IDWELL=p.IDWELL and itb.MD > GDR_TEST.DPDEVICE
where p.DTBGN > TO_DATE('{SDATE}','DD.MM.YYYY')
order by p.DTBGN, p.IDWELL
)
where RN = 1
order by IDWELL, DTBGN
""" # PBU press
def get_data_from_database_cns(connection, query_string, delimiter = ';'):
with connection.cursor() as cur:
cur.execute(query_string)
[print(x[0], end=delimiter) for x in cur.description] # print table headers
print()
for result in cur:
#print(result)
for w in result:
if w == None:
print("",end = delimiter)
elif isinstance(w, datetime.datetime):
print(f"{w:%d.%m.%Y %H:%M:%S}",end = delimiter)
else:
print(f"{w}",end = delimiter)
print()
def connect_database():
host_name = '10.201.194.37'
port_number = 1521
service_name = 'WQ2'
user = 'WQ2_RO'
password = user
dsn_tns = cx_Oracle.makedsn(host_name, port_number, service_name)
return cx_Oracle.connect(user, password, dsn_tns)
def connect_and_query():
connection = connect_database() #print(connection.version)
get_data_from_database_cns(connection, pbu_query_raw,' ') #
connection.close()
connect_and_query()
|
5,301 | 97afa67cbe20900e2388994481abebe772e22818 | import pandas as pd
triples = pd.read_csv("SollTripel.csv", sep=",", skip_blank_lines=True, skipinitialspace=True)
triples.columns = ["triple", "found"]
triples = triples["#" not in triples.triple]
print(triples) |
5,302 | f52bac3e658a34b82721746364fab11d25d470c4 | from .VimaptException import VimaptException
class VimaptAbortOperationException(VimaptException):
pass
|
5,303 | 251d589a5815d77d2bc375d8d4a7d41e79a2a5cd | # Mostra entre as 7 pessoas, quantas pessoas são maiores de idade.
num1 = 0
for c in range(0,7):
pe1 = int(input('Digite o ano de nascimento: '))
pe1 = 2019 - pe1
if pe1 >= 21:
num1 = num1 + 1
print(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.') |
5,304 | 941dac77fe60081ffa113c437a356d59837f5883 | from collections import OrderedDict as odict
from vent.gui import styles
MONITOR = odict({
'oxygen': {
'name': 'O2 Concentration',
'units': '%',
'abs_range': (0, 100),
'safe_range': (60, 100),
'decimals' : 1
},
'temperature': {
'name': 'Temperature',
'units': '\N{DEGREE SIGN}C',
'abs_range': (0, 50),
'safe_range': (20, 30),
'decimals': 1
},
'humidity': {
'name': 'Humidity',
'units': '%',
'abs_range': (0, 100),
'safe_range': (20, 75),
'decimals': 1
},
'vte': {
'name': 'VTE',
'units': '%',
'abs_range': (0, 100),
'safe_range': (20, 80),
'decimals': 1
}
})
CONTROL = {
'oxygen': {
'name': 'O2 Concentration',
'units': '%',
'abs_range': (0, 100),
'value': 80,
'decimals': 1
},
'temperature': {
'name': 'Temperature',
'units': '\N{DEGREE SIGN}C',
'abs_range': (0, 50),
'value': 23,
'decimals': 1
},
}
PLOTS = {
'flow': {
'name': 'Flow (L/s)',
'abs_range': (0, 100),
'safe_range': (20, 80),
'color': styles.SUBWAY_COLORS['yellow'],
},
'pressure': {
'name': 'Pressure (mmHg)',
'abs_range': (0, 100),
'safe_range': (20, 80),
'color': styles.SUBWAY_COLORS['orange'],
}
} |
5,305 | 8247b045a5aed4d0f3db6bc2c0edd985f2c4ba30 | import os
def get_os_env_value(key):
return os.getenv(key)
def get_mysql_uri(user, password, host, database):
return f'mysql+pymysql://{user}:{password}@{host}/{database}'
MASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')
MASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value('MASTER_MYSQL_DATABASE_PASSWORD')
MASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')
MASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value('MASTER_MYSQL_DATABASE_DB_CASAONE')
# SQLALCHEMY_POOL_RECYCLE = 60 * 10
# SQLALCHEMY_POOL_TIMEOUT = 60 * 20
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_ECHO = True
SQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER, MASTER_MYSQL_DATABASE_PASSWORD,
MASTER_MYSQL_DATABASE_HOST, MASTER_MYSQL_DATABASE_DB_CASAONE)
SQLALCHEMY_ENGINE_OPTIONS = {
"pool_pre_ping": True
}
|
5,306 | 59de17ea4e714e17e3a7dd966bd0d93ba73f4503 | #!/usr/bin/env python
from pymongo import GEO2D
from GlobalConfigs import eateries
eateries.create_index([("eatery_coordinates", GEO2D)])
eateries.ensure_index([("eatery_coordinates", pymongo.GEOSPHERE)])
for e in eateries.find({"eatery_coordinates": {"$near": [latitude, longitude]}}).limit(5):
print e.get("eatery_coordinates"), e.get("eatery_name")
|
5,307 | 4dea0967a0ee3e9eb3b46145739dfeb233f3a120 | '''
Trolls are attacking your comment section!
A common way to deal with this situation is to remove all of the vowels from the trolls' comments, neutralizing the threat.
Your task is to write a function that takes a string and return a new string with all vowels removed.
For example, the string "This website is for losers LOL!" would become "Ths wbst s fr lsrs LL!".
Note: for this kata y isn't considered a vowel.
'''
#%%
def disemvowel(string):
returnString =""
vowels = ["a","e", "i", "o", "u"]
upperVowels = ["A", "E", "I", "O", "U"]
vowless = [i for i in string if i not in vowels and i not in upperVowels]
for letters in vowless:
returnString += letters
return returnString
string = "hEllo"
dis = disemvowel(string)
dis
#%%
def disemvowel(s):
return s.translate(None, "aeiouAEIOU")
e = "Hello"
i = disemvowel(e)
i
# %%
|
5,308 | 5ab20c1cd2dc0d0ad881ee52008d00c2317084f9 | from django.db import models
# Create your models here.
class Position(models.Model):
title = models.CharField(max_length=50)
def __str__(self):
return self.title
class Employee(models.Model):
nom = models.CharField(max_length=100)
prenom = models.CharField(max_length=100)
age= models.CharField(max_length=15)
sexe= models.ForeignKey(Position,on_delete=models.CASCADE)
portable = models.CharField(max_length=100)
email = models.CharField(max_length=100)
formation = models.CharField(max_length=100000)
experiences1 = models.CharField(max_length=100000)
experiences2 = models.CharField(max_length=100000)
experiences3 = models.CharField(max_length=100000)
competences = models.CharField(max_length=100000)
divers = models.CharField(max_length=1000)
linkedin = models.CharField(max_length=1000)
CV=models.FileField(upload_to ='media/pdf')
"""
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.pdfpage import PDFPage
from io import BytesIO
import argparse
rsrcmgr = PDFResourceManager()
retstr = BytesIO()
device = TextConverter(rsrcmgr, retstr)
#with open(path, "rb") as fp: # open in 'rb' mode to read PDF bytes
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(CV, check_extractable=True):
interpreter.process_page(page)
device.close()
text = retstr.getvalue()
retstr.close()
pdf_text = text.decode("utf-8")
#########"""
def __str__(self):
return (str(self.CV)+"*"+" "+str(self.formation)+" "+str(self.competences)+" "+str(self.experiences1)+" "+str(self.experiences2)+" "+str(self.experiences3)) |
5,309 | d5acda0d5d066d381a7f6310eb4fe6280d7e84de | import unittest
from collections import Counter
class Solution(object):
def findOriginalArray(self, changed):
"""
:type changed: List[int]
:rtype: List[int]
"""
n = len(changed)
if n % 2 != 0:
return []
freq = Counter(changed)
changed.sort()
ans = []
for num in changed:
if num in freq and freq[num] > 0:
freq[num] -= 1
double_num = 2 * num
if double_num in freq and freq[double_num] > 0:
ans.append(num)
freq[double_num] -= 1
else:
return []
return ans
class TestSolution(unittest.TestCase):
def test_findOriginalArray(self):
solution = Solution()
self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1, 3, 4])
if __name__ == '__main__':
unittest.main()
|
5,310 | 0b3d6339faf9d66d4e1338599e4784fac0f63d3f | def solution(citations):
# 사이테이션을 정렬
citations.sort()
#
for i in range(len(citations)):
if citations[i] >= len(citations) - i:
return len(citations)-i
print(solution([3,0,6,1,5])) |
5,311 | 12f035962925c5380c782e8fad23f16fe9fb9435 | #cerner_2^5_2019
#Mason Seeger submission 1
from random import randint as r
import operator as o
#Only works with valid integers. A function for quick math brain training.
def randomMath():
correct = 0
while(correct<10):
str_ops = ['+', '-', '*', '/', '%']
ops = {'+': o.add, '-': o.sub, '*': o.mul, '/': o.floordiv, '%': o.mod}
x = r(1,10)
y = r(1,10)
op = str_ops[r(0,4)]
inp = input(str(x) + op + str(y) + '=')
if int(inp) == ops[op](x, y):
correct+=1
print("Correct! Only " + str(10-correct) + ' correct answers to go!')
else:
print("Wrong! " + str(10-correct) + ' correct answers to go!')
print("Congrats!! Good brain training.")
randomMath()
|
5,312 | dc13ca17bff8e2a5254c7758bd7274926bafd454 | from dataclasses import dataclass
from datetime import date
@dataclass
class Book:
id: int
title: str
author: str
genre: str
published: date
status: str = 'Available'
def __str__(self):
return f'{self.id}: {self.title} by {self.author}'
def get_more_information(self):
return f"Gatunek: {self.genre}\nData publikacji: {self.published}\nStatus: {self.status}"
|
5,313 | bc53af24bb46d2be3122e290c4732b312f4ebdf5 | from get_info import parse_matches as pm
def all_match_data(year):
"""
Searches through the parse_matches data for all games in a specific season prints them out with a game ID and
returns the data in a list to the main program
:param year: Specific format YYYY between 2008 - 2017
:return: year_match_data
"""
year_match_data = []
match_year_data = pm()
for count in range(len(match_year_data)):
if year == match_year_data[count][1]:
year_match_data.append(match_year_data[count])
for count in range(len(year_match_data)):
print(
f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs '
f'{year_match_data[count][5]}')
return year_match_data
|
5,314 | 422873f89468b1faabed96f72f463b6294b85276 | # Generated by Django 3.0.5 on 2020-05-12 13:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='idcard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, null=True)),
('employment_id', models.CharField(max_length=20, null=True)),
('customer_account_no', models.CharField(max_length=20, null=True)),
('circle', models.CharField(max_length=20, null=True)),
('company_name', models.CharField(max_length=20, null=True)),
('department', models.CharField(max_length=20, null=True)),
('certificate_no', models.CharField(max_length=20)),
('date', models.CharField(max_length=20, null=True)),
],
),
]
|
5,315 | 97e7ca02d85267492a0dcbbda9d8754a0a3735a5 | from core.detector import Detector
from utils.augmentations import *
from torchvision.transforms.transforms import Compose
from config.mask_config import *
from config.train_config import model_info
np.random.seed(3)
colors = np.random.randint(128, 256, (100, 3))
def to_image(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
val_trans = Compose(val_trans)
for i in range(5, 200):
path = f"D:/temp_data/mask/test/{i}.jpg "
print(path)
image = cv2.imread(path)
image = cv2.resize(image, (size, size))
bboxes = det.predict(image.copy(), size, (0.2, 0.2))
for cid, bbox in bboxes[0].items():
cls = "mask" if cid == 1 else "face"
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, "{}:{}".format(cls, int(prob*100)), (b[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)
cv2.imshow("image", image)
cv2.waitKey()
def to_video(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) # 参数为0时调用本地摄像头;url连接调取网络摄像头;文件地址获取本地视频
cap.set(3, 1920) # 设置分辨率
cap.set(4, 1080)
cap.set(cv2.CAP_PROP_FPS, 30)
ret, frame = cap.read()
while (True):
ret, frame = cap.read()
frame = frame[:, ::-1]
frame = frame[:, 440: -440]
image = cv2.resize(frame, (size, size))
bboxes = det.predict(image.copy(), size, (0.5, 0.5))
for cid, bbox in bboxes[0].items():
cls = "mask" if cid == 1 else "face"
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, "{}:{}".format(cls, int(prob * 100)), (b[0], b[1]), cv2.FONT_ITALIC, 1,
colors[cid].tolist(), 2)
cv2.imshow("image", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
det = Detector(classes_info, model_info, "cuda")
det.load_model("checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth")
# to_image(det)
to_video(det)
|
5,316 | 084579152a2cc7feb2c31e0209ce1e32f4905d81 | import chars2vec
import sklearn.decomposition
import matplotlib.pyplot as plt
import csv
# Load Inutition Engineering pretrained model
# Models names: 'eng_50', 'eng_100', 'eng_150' 'eng_200', 'eng_300'
from sklearn.cluster import KMeans
c2v_model = chars2vec.load_model('eng_50')
words=[]
etichette=[]
with open('datasetParsing2DEF.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
else:
print(row[1],row[2])
words.append(row[2])
etichette.append(row[1])
line_count += 1
print(f'Processed {line_count} lines.')
# Create word embeddings
word_embeddings = c2v_model.vectorize_words(words)
print(word_embeddings)
kmeans = KMeans(
init="random",
n_clusters=4,
n_init=10,
max_iter=200,
random_state=30)
kmeans.fit(word_embeddings),
y_kmeans = kmeans.predict(word_embeddings)
print(y_kmeans)
i=0;
for j in range(0,len(y_kmeans)):
print(etichette[i])
print(word_embeddings[j,0])
print(word_embeddings[j,1])
print()
#plt.scatter(word_embeddings[:, 0], word_embeddings[:, 1],marker=('$' + etichette[i] + '$'),c=y_kmeans, s=1800)
plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1],
marker=('$' + 'O'+ '$'),
s=30, label=j)
i=i+1
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)
plt.show()
|
5,317 | f225fbf363f1b170704418ed339f2e57ca790975 | from django import forms
from django.utils.translation import gettext_lazy as _
import django_filters
from elasticsearch_dsl.query import Q
class BaseSearchFilterSet(django_filters.FilterSet):
query_fields = ["content"]
q = django_filters.CharFilter(
method="auto_query",
widget=forms.TextInput(
attrs={"placeholder": _("Enter search term"), "class": "form-control"}
),
)
def __init__(self, *args, **kwargs):
self.facet_config = kwargs.pop("facet_config", {})
self.view = kwargs.pop("view", None)
super().__init__(*args, **kwargs)
def apply_filter(self, qs, name, *args, **kwargs):
if name in self.facet_config:
return qs.post_filter(name, *args, **kwargs)
return qs.filter(*args, **kwargs)
def filter_queryset(self, queryset):
"""
Filter the queryset with the underlying form's `cleaned_data`. You must
call `is_valid()` or `errors` before calling this method.
This method should be overridden if additional filtering needs to be
applied to the queryset before it is cached.
"""
for name, value in self.form.cleaned_data.items():
queryset = self.filters[name].filter(queryset, value)
# assert isinstance(queryset, models.QuerySet), \
# "Expected '%s.%s' to return a QuerySet, but got a %s instead." \
# % (type(self).__name__, name, type(queryset).__name__)
return queryset
def auto_query(self, qs, name, value):
if value:
return qs.set_query(
Q(
"simple_query_string",
query=value,
fields=self.query_fields,
default_operator="and",
lenient=True,
)
)
return qs
|
5,318 | e3b8bec0cc7df217052a3182f9a862f0e3622afd | #!python
import pdb
import argparse
import os
import re
import sys
import string
from utilpack import path
from subprocess import Popen
from subprocess import PIPE
def popen(cmd):
spl = cmd.split()
return Popen(spl, stdout=PIPE).communicate()[0]
def debug (s):
s
dists = 0
def get_setup_ini (setup_ini_filename):
global dists
if dists:
return
dists = {'test': {}, 'curr': {}, 'prev' : {}}
chunks = string.split (open (setup_ini_filename).read (), '\n\n@ ')
for i in chunks[1:]:
lines = string.split (i, '\n')
name = string.strip (lines[0])
debug ('package: ' + name)
packages = dists['curr']
records = {'sdesc': name}
j = 1
while j < len (lines) and string.strip (lines[j]):
debug ('raw: ' + lines[j])
if lines[j][0] == '#':
j = j + 1
continue
elif lines[j][0] == '[':
debug ('dist: ' + lines[j][1:5])
packages[name] = records.copy ()
packages = dists[lines[j][1:5]]
j = j + 1
continue
try:
key, value = map (string.strip,
string.split (lines[j], ': ', 1))
except:
print lines[j]
raise 'URG'
if value[0] == '"' and value.find ('"', 1) == -1:
while 1:
j = j + 1
value += '\n' + lines[j]
if lines[j].find ('"') != -1:
break
records[key] = value
j = j + 1
packages[name] = records
def error (msg):
print sys.argv[0] + ": " + msg
def find_line(inifile, target_package, section, filename):
ini = file(inifile).readlines()
tpmarkerlen= len(target_package) + 2
ln = 0
found = False
for l in ini:
if l[0:tpmarkerlen] == "@ " + target_package:
found = True
break
ln = ln + 1
if not found:
error("urg")
return None
endln = len(ini)
while ln < endln:
#print ini[ln]
if section in ini[ln]:
return ln, ini[ln]
ln += 1
raise("urg")
def gen_diff(diff_filename, packagename, linenum, oldline,\
filename, basename, section):
# Generate the md5
md5 = popen("md5sum " + filename).split()[0]
# Generate the length
len = str(os.stat(filename).st_size)
# Generate the new line
#install: release-2/testpkg/testpkg-0.0.1-0.tar.bz 3140 fbbe05f50b9273be640c312857f70619
newline = section + ": " + "release-2/" + packagename + "/" + basename + " " + len + " " + md5 + "\n"
# Use the old and new lines to create a diff
#19916c19916
#< install: release-2/testpkg/testpkg-0.0.1-0.tar.bz 3140 fbbe05f50b9273be640c312857f70619
#---
#> install: release-2/testpkg/testpkg-0.0.1-0.tar.bz 3140 69906b3bc3a249056201c398cb928bef
# Add one: we're zerobase internally but diff is 1 based linenumbers
diff = [0,0,0,0]
diff[0] = str(linenum + 1) + "c" + str(linenum + 1) + "\n"
diff[1] = "< " + oldline
diff[2] = "---\n"
diff[3] = "> " + newline
# Return the diff
return diff
def main():
global dists
parser = argparse.ArgumentParser(description = " Fixes md5sum in setup-2.ini to match newly built package. It is an error for given files not to exist in the .ini under that package."\
"Example usage: " + sys.argv[0] + " testpkg test-pkg-0.0.1-0-src.tar.bz test-pkg-0.0.1-0.tar.bz"
)
parser.add_argument("inifile",\
help="The setup.ini to patch.", metavar="INI")
parser.add_argument("package",\
help="The package name to fix the md5sums for.", metavar="PKG")
parser.add_argument("files",\
help="The package files to fix.", nargs = "*", metavar="FILES")
options = parser.parse_args()
target_package = options.package
target_files = []
for f in options.files:
target_files.append(f)
# Yeah I know this looks wrong but that's globals for you
get_setup_ini(options.inifile)
inifile = options.inifile
pkgs = dists["curr"]
namekeys = pkgs.keys()
if target_package not in namekeys:
error(target_package + " is not in " + inifile)
return 1
sections = ["install", "source"]
for f in target_files:
basename = path(f).basename()
found_section = 0
for s in sections:
if basename in pkgs[target_package][s]:
found_section = s
break
if not found_section:
error(basename + " is not in install: or source: of " +\
target_package + "in " + inifile )
return 1
#def gen_diff(diff_filename, packagename, linenum, oldline,\
# filename, basename, section):
for f in target_files:
basename = path(f).basename()
#def find_line(inifile, target_package, section, filename):
(linenum, line) = find_line(inifile, target_package,\
found_section, basename)
diff_filename = basename + ".diff"
diff = gen_diff(diff_filename, target_package, linenum, line,\
f, basename, found_section)
df = file(diff_filename, "w")
df.writelines(diff)
df.close()
#selected = []
#for package in dists["curr"].keys():
# if "Base" in dists["curr"][package]["category"]:
# selected.append(package)
#selected.sort()
#for i in selected:
# print i
if __name__ == "__main__":
main()
|
5,319 | 833053a5a75636267feaad5ddaa21dce1de34038 | #!/usr/bin/env python
"""
maskAOI.py
Dan Fitch 20150618
"""
from __future__ import print_function
import sys, os, glob, shutil, fnmatch, math, re, numpy, csv
from PIL import Image, ImageFile, ImageDraw, ImageColor, ImageOps, ImageStat
ImageFile.MAXBLOCK = 1048576
DEBUG = False
AOI_DIR='/study/reference/public/IAPS/IAPS/IAPS_2008_1-20_800x600BMP/IAPS_2008_AOIs/'
IMG_DIR='/study/midus/IAPS2005png/'
SALIENCY_DIR='/home/fitch/aoi/saliency/'
SUN_SALIENCY_DIR='/home/fitch/aoi/sunsaliency/'
MASK_NAMES = ["0", "E", "1", "2", "3", "4"]
# A wrapper function to check if a string is a number (and account for negatives)
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
#Function to return only the main, averaged AOI files (the .OBT) and their coordinates.
def getCoordinates(picturename):
#Load one current image
aoiName = picturename + ".OBT"
aoiList = []
obtfile = "{0}/{1}".format(AOI_DIR, aoiName)
if not os.path.exists(obtfile):
if DEBUG: print("WARNING: No OBT file found for " + picturename)
return []
with open(obtfile) as file:
stringContent = file.readlines()
for string in stringContent:
dirtyContent = re.split(", | |=", string)
content = map(int, [ x for x in dirtyContent if RepresentsInt(x) ])
if content and content != [0]:
aoiList.append(content)
return aoiList
def drawAOI(aoi, i, d):
if aoi[0] == 1:
drawOneRect(aoi[1:5], i, d)
else:
drawOneEllipse(aoi[1:5], i, d)
# Function to display the AOI as masks
def createAOIMasks(pictureName, size):
if DEBUG: print("Displaying AOIs for picture {0}".format(pictureName))
aoiList = getCoordinates(pictureName)
if aoiList == []: return None
masks = []
# L is grayscale
img = Image.new("L", size, 0)
draw = ImageDraw.Draw(img)
for aoi in aoiList:
drawAOI(aoi, img, draw)
masks.append(img)
# Now the "emotional" masks, index 2 and up theoretically
emo = Image.new("L", size, 0)
emo_draw = ImageDraw.Draw(emo)
for aoi in aoiList[1:]:
drawAOI(aoi, emo, emo_draw)
masks.append(emo)
# Now we draw each mask individually
for aoi in aoiList:
individual = Image.new("L", size, 0)
individual_draw = ImageDraw.Draw(individual)
drawAOI(aoi, individual, individual_draw)
masks.append(individual)
return masks
def drawOneEllipse(aoi, img, draw):
#Draw one ellipse on the figure given
if DEBUG: print("Ellipse centered at [{0}, {1}] with {2} {3}".format(aoi[0], aoi[1], aoi[2], aoi[3]))
imgDim = img.size
cx=aoi[0]
cy=aoi[1]
w=2*aoi[2]
h=2*aoi[3]
imgArea=imgDim[0]*imgDim[1]
LeftX=cx-aoi[2]
RightX=cx+aoi[2]
TopY=cy-aoi[3]
BottomY=cy+aoi[3]
draw.ellipse(((LeftX,TopY),(RightX,BottomY)), fill="white", outline="white")
def drawOneRect(aoi, img, draw):
#Draw one rectangle on the figure given
if DEBUG: print("Rectangle with Coordinates {0}".format(aoi))
imgDim = img.size
TopY=aoi[3]
BottomY=aoi[1]
LeftX=aoi[0]
RightX=aoi[2]
if DEBUG: print(" Top:{0}, Bottom:{1}, Left:{2}, Right: {3}".format(TopY, BottomY, LeftX, RightX))
imgArea=imgDim[0]*imgDim[1]
draw.rectangle(((LeftX,TopY),(RightX,BottomY)), fill="white", outline="white")
def stat(img, mask=None):
if mask == None:
return ImageStat.Stat(img)
else:
return ImageStat.Stat(img, mask)
def brightness(img, mask=None):
return stat(img,mask).rms[0]
def luminance(c):
if len(c) < 3 or len(c) > 4:
raise Exception("Luminance got values: ", c)
r = c[0]
b = c[1]
g = c[2]
lum = r*0.2126 + g*0.7152 + b*0.0722
if len(c) == 4:
# Multiply by alpha... kind of hokey but should work for most cases
result = lum * (c[3] / 255.0)
else:
result = lum
if math.isnan(result):
return 0.0
else:
return result
def complexity(pictureName, key, img):
name = "masks/{0}-{1}.jpg".format(pictureName, key)
img.save(name, quality=80, format="JPEG", optimize=True, progressive=True)
size = os.path.getsize(name)
#os.remove(name)
return size
def results_for_mask(withColors, original, pictureName, key, mask):
# We also want the area outside of the mask
mask_inverted = ImageOps.invert(mask)
stats_mask = stat(mask)
stats_in = stat(original, mask)
stats_out = stat(original, mask_inverted)
# Complexity uses the resultant image saved as jpg, so we need to prepare some actual images
stats_in_image = Image.new('RGBA', original.size, "black")
stats_in_image.paste(original, mask=mask)
stats_out_image = Image.new('RGBA', original.size, "black")
stats_out_image.paste(original, mask=mask_inverted)
try:
if withColors:
return {
key + '_mask_lum': stats_mask.mean[0] / 256.0,
key + '_in_lum': luminance(stats_in.mean) / 256.0,
key + '_in_r': stats_in.mean[0] / 256.0,
key + '_in_g': stats_in.mean[1] / 256.0,
key + '_in_b': stats_in.mean[2] / 256.0,
key + '_in_complexity': complexity(pictureName, key + "in", stats_in_image),
key + '_out_lum': luminance(stats_out.mean) / 256.0,
key + '_out_r': stats_out.mean[0] / 256.0,
key + '_out_g': stats_out.mean[1] / 256.0,
key + '_out_b': stats_out.mean[2] / 256.0,
key + '_out_complexity': complexity(pictureName, key + "out", stats_out_image),
}
else:
return {
key + '_in_lum': luminance(stats_in.mean) / 256.0,
key + '_out_lum': luminance(stats_out.mean) / 256.0,
}
except ZeroDivisionError:
return {}
def do_saliency(original, masks, path, prefix, pictureName, results):
saliency = Image.open(path + pictureName + ".png")
if saliency.mode != "RGBA":
saliency = saliency.convert("RGBA")
saliency = saliency.resize(original.size)
stats_saliency = stat(saliency)
results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0
for i, mask in zip(MASK_NAMES, masks):
stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask)
results.update(stuff)
saliency_bw = saliency.convert("L")
s_array = numpy.array(saliency_bw)
m_array = numpy.array(masks[0])
dot = numpy.dot(s_array, numpy.rot90(m_array))
results[prefix + "_aoi_dotproduct_sum"] = numpy.sum(dot)
def write_stats(writer, filename, pictureName):
original = Image.open(IMG_DIR + filename)
if original.mode != "RGBA":
# P is palette. Did you know BMP *and* PNG files can have 8-bit palettes? WHAAAT
original = original.convert("RGBA")
# First, draw the AOI masks in white on black
# This returns a list, the first mask is ALL AOIs, the second is the "emotional" ones >=2, and the rest are each individual shape
masks = createAOIMasks(pictureName, original.size)
if masks == None:
print("No masks found in: " + filename)
return False
stats_orig = stat(original)
results = {
'image_name': pictureName,
'orig_lum': luminance(stats_orig.mean) / 256.0,
'orig_r': stats_orig.mean[0] / 256.0,
'orig_g': stats_orig.mean[1] / 256.0,
'orig_b': stats_orig.mean[2] / 256.0,
'orig_complexity': complexity(pictureName, "original", original),
}
for i, mask in zip(MASK_NAMES, masks):
stuff = results_for_mask(True, original, pictureName, 'aoi' + i, mask)
results.update(stuff)
# And finally we get the saliency image and resize it and do a bunch of garbage with it and the AOI masks
do_saliency(original, masks, SALIENCY_DIR, "saliency", pictureName, results)
do_saliency(original, masks, SUN_SALIENCY_DIR, "sun_saliency", pictureName, results)
writer.writerow(results)
if DEBUG: print("Generated stats for " + filename)
return True
with open('stats.csv', 'wb') as csvfile:
per_mask_fields = [
'_mask_lum',
'_in_lum',
'_in_r',
'_in_g',
'_in_b',
'_in_complexity',
'_out_lum',
'_out_r',
'_out_g',
'_out_b',
'_out_complexity',
]
per_saliency_fields = [
'_in_lum',
'_out_lum',
]
fields = [
'image_name',
'orig_lum',
'orig_r',
'orig_g',
'orig_b',
'orig_complexity',
]
for i in MASK_NAMES:
for f in per_mask_fields:
fields.append("aoi{0}{1}".format(i,f))
fields.append("saliency_aoi_dotproduct_sum")
fields.append("saliency_lum")
for i in MASK_NAMES:
for f in per_saliency_fields:
fields.append("saliency{0}{1}".format(i,f))
fields.append("sun_saliency_aoi_dotproduct_sum")
fields.append("sun_saliency_lum")
for i in MASK_NAMES:
for f in per_saliency_fields:
fields.append("sun_saliency{0}{1}".format(i,f))
writer = csv.DictWriter(csvfile, fieldnames=fields)
writer.writerow(dict(zip(fields,fields)))
for filename in sorted(os.listdir(IMG_DIR)):
if not ".png" in filename:
continue
pictureName = filename.replace(".png", "")
try:
write_stats(writer, filename, pictureName)
except:
print("Error on file " + pictureName, file=sys.stderr)
raise
|
5,320 | faa53db9dd581b6508fb9e4042ec86ebaf850e60 | from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
print(accuracy_score(true_labels, guesses))
print(recall_score(true_labels, guesses))
print(precision_score(true_labels, guesses))
print(f1_score(true_labels, guesses))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(true_labels, guesses)) |
5,321 | d5a31e53444e2efa2eb972f1152b6d3e37d5ab79 | aminotable = [
['Ile' , 'AUU','AUC','AUA'], #0
['Leu' , 'CUU','CUC','CUA','CUG','UUA','UUG'], #1
['Val' , 'GUU','GUC','GUA','GUG'], #2
['Phe' , 'UUU','UUC'], #3
['Met' , 'AUG'], #4
['Cys' , 'UGU','UGC'], #5
['Ala' , 'GCU','GCC','GCA','GCG'], #6
['Gly', 'GGU', 'GGC', 'GGA', 'GGG'], #7
['Pro' , 'CCU', 'CCC', 'CCA', 'CCG'], #8
['Thr' , 'ACU', 'ACC', 'ACA', 'ACG'], #9
['Ser' , 'UCU', 'UCC', 'UCA', 'UCG', 'AGU', 'AGC'], #10
['Tyr' , 'UAU', 'UAC'], #11
['Trp' , 'UGG'], #12
['Gln' , 'CAA', 'CAG'], #13
['Asn' , 'AAU', 'AAC'], #14
['His' , 'CAU', 'CAC'], #15
['Glu' , 'GAA', 'GAG'], #16
['Asp' , 'GAU', 'GAC'], #17
['Lys', 'AAA', 'AAG'], #18
['Arg' , 'CGU', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], #19
['Stop' , 'UAA', 'UAG', 'UGA'], #20
]
sequence = input("\nEnter RNA Sequence : ")
print('Original sequence: ',sequence,'\n')
n = 0
seqlength = len(sequence)
print('Amino Sequence: ')
while (n < seqlength):
codon = sequence[n:n+3]
for amino in aminotable:
for i in range(len(amino) - 1):
match = amino[i+1]
if (codon == match) :
print(amino[0], end = '-')
break
n += 3
print('\n\n\nEnd of program')
|
5,322 | 436b89b91aed14525f847e6488b452b7ca0e1b70 | from enum import Enum
class AggregationTypes(Enum):
NO_AGG = 'NO-AGG'
STATIC = 'STATIC'
SUB_HOUR = 'SUB-HOUR'
DYNAMIC = 'DYNAMIC'
|
5,323 | e2840eb1b0d731d6b0356835ba371d05ba351ff6 | """APP Cloud Connect errors"""
class CCEError(Exception):
pass
class ConfigException(CCEError):
"""Config exception"""
pass
class FuncException(CCEError):
"""Ext function call exception"""
pass
class HTTPError(CCEError):
""" HTTPError raised when HTTP request returned a error."""
def __init__(self, reason=None):
"""
Initialize HTTPError with `response` object and `status`.
"""
self.reason = reason
super(HTTPError, self).__init__(reason)
class StopCCEIteration(CCEError):
"""Exception to exit from the engine iteration."""
pass
class CCESplitError(CCEError):
"""Exception to exit the job in Split Task"""
pass
class QuitJobError(CCEError):
pass
|
5,324 | ddf074e400551d2c147d898fe876a31d13a72699 | class Fail(Exception):
def __init__(self, message):
super().__init__(message)
class Student:
def __init__(self, rollNo, name, marks):
self.rollNo = rollNo
self.name = name
self.marks = marks
def displayDetails(self):
print('{} \t {} \t {}'.format(self.name, self.rollNo, self.marks))
try:
if self.marks < 40:
raise Fail('Student {} has Scored {} marks and has Failed '.format(
self.name, self.marks))
except Fail as f:
print(f)
myStudentList = []
num = int(input('Enter the number of Students : '))
for i in range(num):
rollNo, name, marks = input(
'Enter Roll-no,Name,Marks of Student {} : '.format(i+1)).split(',')
print('----------------------------------------')
marks = int(marks)
myStudentList.append(Student(rollNo, name, marks))
print('DETAILS OF STUDENTS ARE : ')
for i in range(num):
myStudentList[i].displayDetails()
|
5,325 | 92f4f1c8a4e04b07ed7c05d5bb733c0b9c28bd05 | # i change it for change1
# change 1.py in master
i = 1
# fix bug for boss
|
5,326 | fc2afc99dc754b58c36bc76c723727337851cc3e | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
# Create your views here.
def check(request):
if not request.user.is_authenticated:
return redirect('/auth/login/')
else:
return redirect('/worker/')
def loginpg(request):
return render(request, 'registration/login.html')
def logoutpg(request):
logout(request)
return render(request, 'registration/logout.html')
def auth(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/worker/')
else:
return render(request, 'registration/login_error.html')
|
5,327 | a721adaaa69bf09c2ea259f12bea05515c818679 | # TrackwayDirectionStage.py
# (C)2014-2015
# Scott Ernst
from __future__ import print_function, absolute_import, unicode_literals, division
from collections import namedtuple
import math
from pyaid.number.NumericUtils import NumericUtils
from cadence.analysis.CurveOrderedAnalysisStage import CurveOrderedAnalysisStage
from cadence.analysis.shared.LineSegment2D import LineSegment2D
from pyaid.number.PositionValue2D import PositionValue2D
from cadence.analysis.shared.plotting.MultiScatterPlot import MultiScatterPlot
from cadence.svg.CadenceDrawing import CadenceDrawing
#*************************************************************************************************** TrackwayDirectionStage
class TrackwayDirectionStage(CurveOrderedAnalysisStage):
"""A class for..."""
#===============================================================================
# C L A S S
SAMPLE_DATA_NT = namedtuple('SAMPLE_DATA_NT', [
'directionAngle', # Angle instance for the calculated trackway heading
'position', # Spatial position of the angle reference point
'curvePoint', # For plotting (curvePosition, directionAngle, curvePosUnc, directionAngleUnc)
'curvePosition', # ValueUncertainty object representing position along curve
'track' ]) # Track used to reference this sample
MAPS_FOLDER_NAME = 'Trackway-Direction'
COLORS = ['#AAAAAA', 'black', 'blue', 'green', 'red']
#_______________________________________________________________________________
def __init__(self, key, owner, **kwargs):
"""Creates a new instance of TrackwayDirectionStage."""
super(TrackwayDirectionStage, self).__init__(
key, owner,
label='Trackway Direction',
**kwargs)
self._paths = []
#===============================================================================
# G E T / S E T
#_______________________________________________________________________________
@property
def trackHeadingData(self):
return self.owner.getStage('heading').trackwaysData
#_______________________________________________________________________________
@property
def trackwayDirectionData(self):
return self.owner.cache.get('trackwayDirectionData')
#===============================================================================
# P R O T E C T E D
#_______________________________________________________________________________
def _preAnalyze(self):
self.owner.cache.set('trackwayDirectionData', {})
#_______________________________________________________________________________
def _analyzeSitemap(self, sitemap):
"""_analyzeSitemap doc..."""
self._createDrawing(sitemap, 'SAMPLED-DIRECTION', self.MAPS_FOLDER_NAME)
super(TrackwayDirectionStage, self)._analyzeSitemap(sitemap)
self._saveDrawing(sitemap)
#_______________________________________________________________________________
def _analyzeTrackway(self, trackway, sitemap):
if trackway.uid not in self.trackHeadingData:
return
bundle = self.owner.getSeriesBundle(trackway)
# Create a list of window sizes to test trimmed to account for small trackways with fewer
# points than a specified size
maxWindowSize = min(8, int(0.5*float(bundle.count)))
windowSizes = [1, 2, 4, 6, 8]
while maxWindowSize < windowSizes[-1]:
windowSizes.pop()
samples = []
for i in windowSizes:
# For each valid window size create a sample entry
samples.append({'size':i + 1, 'values':self._sampleTrackway(trackway, i + 1) })
self._plotTrackwaySamples(trackway, samples)
self._drawTrackwaySamples(sitemap, samples)
self.trackwayDirectionData[trackway.uid] = {'trackway':trackway, 'samples':samples}
#_______________________________________________________________________________
def _drawTrackwaySamples(self, sitemap, samples):
"""_drawTrackwaySamples doc..."""
drawing = sitemap.cache.get('drawing')
for sample in samples:
color = self.COLORS[samples.index(sample)]
if len(sample['values']) < 2:
continue
prev = sample['values'][0].position
for value in sample['values'][1:]:
pos = value.position
drawing.line(
prev.toMayaTuple(), pos.toMayaTuple(),
stroke=color, stroke_width=1, stroke_opacity='0.75')
prev = pos
for value in sample['values']:
pos = value.position
drawing.circle(
pos.toMayaTuple(), 5,
stroke='none', fill=color, fill_opacity='0.75')
#_______________________________________________________________________________
def _plotTrackwaySamples(self, trackway, samples):
"""_plotTrackwaySamples doc..."""
bundle = self.owner.getSeriesBundle(trackway)
plot = MultiScatterPlot(
title='%s Direction Sampling %s' % (trackway.name, bundle.echoStatus(asPercent=True)),
xLabel='Trackway Curve Position (m)',
yLabel='Direction (degrees)')
for sample in samples:
color = self.COLORS[samples.index(sample)]
data = []
for value in sample['values']:
data.append(value.curvePoint)
plot.addPlotSeries(data=data, color=color, line=True)
self._paths.append(plot.save(self.getTempFilePath(extension='pdf')))
#_______________________________________________________________________________
def _sampleTrackway(self, trackway, windowSize):
"""
Samples the trackway and returns result
@type trackway: * """
window = []
samples = []
entries = self.trackHeadingData[trackway.uid]['entries']
analysisTrackway = trackway.getAnalysisPair(self.analysisSession)
for entry in entries:
# For each track entry in the trackways data add that to the sample window and update
# the samples result
window.append(entry)
if len(window) < windowSize:
# Don't create a sample until the sub-sample list exceeds the sample window size
continue
xTests = [] # X spatial position values
yTests = [] # Y spatial position values
angleTests = [] # Heading angle values
curvePosTests = [] # Curve position values
for item in window:
# Calculate weighted averages for various properties of the current sample window
angle = item.headingAngle
angleTests.append(angle.valueDegrees)
# Create a ValueUncertainty for the curve position by using the fractional
# positional uncertainty over the spatial length of the curve
posValue = item.track.positionValue
posUnc = math.sqrt(posValue.xUnc**2 + posValue.yUnc**2)
curvePos = item.track.getAnalysisPair(self.analysisSession).curvePosition
curvePosUnc = abs(posUnc/analysisTrackway.curveLength)
curvePosTests.append(NumericUtils.toValueUncertainty(curvePos, curvePosUnc))
pv = item.track.positionValue
xTests.append(pv.xValue)
yTests.append(pv.yValue)
directionAngleMean = NumericUtils.weightedAverage(*angleTests)
curvePositionMean = NumericUtils.weightedAverage(*curvePosTests)
xValue = NumericUtils.weightedAverage(*xTests)
yValue = NumericUtils.weightedAverage(*yTests)
position = PositionValue2D(
x=xValue.raw, xUnc=xValue.rawUncertainty,
y=yValue.raw, yUnc=yValue.rawUncertainty)
# Remove the oldest sample from the to make room for a new sample in the next iteration
window.pop(0)
if len(samples) > 0:
# Compare this sample to the previous one and if it does not differ
# significantly then continue to continue to the next iteration
last = samples[-1].directionAngle
totalUnc = last.rawUncertainty + directionAngleMean.rawUncertainty
deviation = abs(directionAngleMean.raw - last.raw)/totalUnc
if deviation < 2.0:
continue
samples.append(self.SAMPLE_DATA_NT(
directionAngle=directionAngleMean,
position=position,
curvePoint=(
curvePositionMean.value, directionAngleMean.value,
curvePositionMean.uncertainty, directionAngleMean.uncertainty),
curvePosition=curvePositionMean,
track=entry.track ))
self._extendSamplesToTrackwayStart(entries[0], samples)
self._extendSampleToTrackwayEnd(entries[-1], samples)
return samples
#_______________________________________________________________________________
def _extendSamplesToTrackwayStart(self, firstEntry, samples):
"""_extendSamplesToTrackwayStart doc..."""
if len(samples) < 2 or samples[0].track == firstEntry.track:
# If there aren't enough samples, or the samples already extend to the end of the
# trackway, return the samples without adding on an end point
return
line = LineSegment2D(
start=samples[0].position.clone(),
end=samples[1].position.clone())
firstTrack = firstEntry.track
analysisTrack = firstTrack.getAnalysisPair(self.analysisSession)
position = line.closestPointOnLine(firstTrack.positionValue, False)
samples.insert(0, self.SAMPLE_DATA_NT(
directionAngle=samples[0].directionAngle.clone(),
position=position,
curvePoint=(
analysisTrack.curvePosition, samples[0].directionAngle.value,
0, samples[-1].directionAngle.uncertainty),
curvePosition=samples[0].curvePosition.clone(),
track=firstTrack ))
#_______________________________________________________________________________
def _extendSampleToTrackwayEnd(self, lastEntry, samples):
if len(samples) < 2 or samples[-1].track == lastEntry.track:
# If there aren't enough samples, or the samples already extend to the end of the
# trackway, return the samples without adding on an end point
return
line = LineSegment2D(
start=samples[-2].position.clone(),
end=samples[-1].position.clone())
lastTrack = lastEntry.track
analysisTrack = lastTrack.getAnalysisPair(self.analysisSession)
position = line.closestPointOnLine(lastTrack.positionValue, False)
ha = samples[-1].directionAngle.clone()
samples.append(self.SAMPLE_DATA_NT(
directionAngle=ha,
position=position,
curvePoint=(analysisTrack.curvePosition, ha.value, 0, ha.uncertainty),
curvePosition=samples[-1].curvePosition.clone(),
track=lastTrack ))
#_______________________________________________________________________________
def _postAnalyze(self):
self.mergePdfs(self._paths, 'Trackway-Direction.pdf')
|
5,328 | 4a5185fac7d6c09daa76b5d0d5aee863028a6bce | from functools import partial
import torch
from torch import nn
from src.backbone.layers.conv_block import ConvBNAct, MBConvConfig, MBConvSE, mobilenet_v2_init
from src.backbone.mobilenet_v2 import MobileNetV2
from src.backbone.utils import load_from_zoo
class MobileNetV3(MobileNetV2):
def __init__(self, residual_config, last_channel=1280, dropout=0.2, stochastic_depth=0.0,
block=MBConvSE, act_layer=nn.Hardswish, norm_layer=nn.BatchNorm2d):
super(MobileNetV3, self).__init__(residual_config, dropout, stochastic_depth, block, act_layer, norm_layer)
in_ch = self.layer_infos[-1].in_ch
out_ch = in_ch * self.layer_infos[-1].expand_ratio
self.features[-1] = ConvBNAct(in_ch, out_ch, kernel_size=1, stride=1, norm_layer=self.norm_layer, act=self.act)
self.classifier = nn.Sequential(
nn.Linear(out_ch, last_channel),
act_layer(inplace=True),
)
self.out_channels = last_channel
def forward(self, x):
return self.dropout(self.classifier(torch.flatten(self.avg_pool(self.features(x)), 1)))
def get_mobilenet_v3(model_name:str, pretrained=True, **kwargs) -> nn.Module:
"""Get mobilenet_v3 large model
The changes from mobilenet_v3:
- change input channel to 16 and last stage structure to avoid redundancy
- change activation to nn.relu, nn.Hardsigmoid, nn.Hardswish to reduce computational cost
- apply se unit (larger hidden_dim than efficientnet)
"""
mbconfig = partial(MBConvConfig, depth_mult=1.0, width_mult=1.0, norm_layer=nn.BatchNorm2d,
se_act2=partial(nn.Hardsigmoid, inplace=True), se_reduction_ratio=4, se_reduce_mode='adjust')
if model_name == 'mobilenet_v3_large':
residual_config = [
# expand k s in out layers act
mbconfig(1, 3, 1, 16, 16, 1, act=nn.ReLU, use_se=False),
mbconfig(4, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),
mbconfig(3, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),
mbconfig(3, 5, 2, 24, 40, 1, act=nn.ReLU, use_se=True),
mbconfig(3, 5, 1, 40, 40, 2, act=nn.ReLU, use_se=True),
mbconfig(6, 3, 2, 40, 80, 1, act=nn.Hardswish, use_se=False),
mbconfig(2.5, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),
mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),
mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),
mbconfig(6, 3, 1, 80, 112, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 3, 1, 112, 112, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 2, 112, 160, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),
]
last_channel = 1280
elif model_name == 'mobilenet_v3_small':
residual_config = [
# expand k s in out layers act
mbconfig(1, 3, 2, 16, 16, 1, act=nn.ReLU, use_se=True),
mbconfig(4.5, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),
mbconfig(3.5, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),
mbconfig(4, 5, 2, 24, 40, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),
mbconfig(3, 5, 1, 40, 48, 1, act=nn.Hardswish, use_se=True),
mbconfig(3, 5, 1, 48, 48, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 2, 48, 96, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),
]
last_channel = 1024
model = MobileNetV3(residual_config, last_channel=last_channel, block=MBConvSE, act_layer=nn.Hardswish, norm_layer=nn.BatchNorm2d)
mobilenet_v2_init(model)
if pretrained:
load_from_zoo(model, model_name)
return model |
5,329 | c02af2ecd980da4ceff133c13072ad7c6b724041 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hilma import Mesh, loadPly, savePly
mesh = Mesh()
loadPly("head.ply", mesh)
verts = []
faces = []
edges = []
uvs = []
for v in mesh.getVertices():
verts.append( (v.x, v.y, v.z) )
for t in mesh.getTrianglesIndices():
faces.append( (t.x, t.y, t.z ) )
for e in mesh.getLinesIndices():
edges.append( (e.x, e.y) )
# print( verts )
# print( faces )
# print(edges)
savePly("out.ply", mesh, False)
|
5,330 | 666e839b4d66dc4eede4e7325bfd4f4b801fd47d | from django.urls import path, re_path
from app.views import UploaderAPIView, TeacherListAPIView, TeacherDetailAPIView
app_name = "directory"
urlpatterns = [
re_path(r"^directory/uploader/?$", UploaderAPIView.as_view(), name="teacher_uploader"),
re_path(r"^directory/teachers/?$", TeacherListAPIView.as_view(), name="teacher_list"),
path("directory/teachers/<int:pk>/", TeacherDetailAPIView.as_view(), name="teacher_detail"),
]
|
5,331 | 94264e121bb31a08cbd9766be1ff16173d2838ed | import pandas
class _RegressionModelTable(object):
def __init__(self, regression_models, function_to_evaluate_model=None, function_to_select_model=None):
if not isinstance(regression_models, list):
regression_models = [regression_models]
self._check_model_inputs(regression_models, function_to_evaluate_model, function_to_select_model)
self._function_to_evaluate_model = function_to_evaluate_model
self._function_to_select_model = function_to_select_model
self._regression_model_list = regression_models
self._table_evaluation_dict = {}
self._all_fit_models_table_dict = {}
self._fit_model_table_dict = {}
@property
def pandas_table(self):
model_names = [model.__str__() for model in self._regression_model_list]
df = pandas.DataFrame(self._table_evaluation_dict, index=model_names)
df = df.transpose()
return df
@classmethod
def _check_model_inputs(cls, regression_models, function_to_evaluate_model, function_to_select_model):
if len(regression_models) > 1:
if function_to_select_model is None or function_to_evaluate_model is None:
raise ValueError("Functions to evaluate and select regression models must be specified "
"in case of regression model list.")
def initialize_tables(self, label_names):
n_models = len(self._regression_model_list)
self._table_evaluation_dict = {LABEL_NAME: [None]*n_models for LABEL_NAME in label_names}
self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in label_names}
def evaluate_label_models(self, x, y, label_name):
label_tuple_list = list(map(lambda model: self.evaluate_model(model, x, y), self._regression_model_list))
# print("TUPLES! Window, Model", label_tuple_list[0][0], label_tuple_list[0][0]._model)
self._all_fit_models_table_dict[label_name] = [T[0] for T in label_tuple_list]
self._table_evaluation_dict[label_name] = [T[1] for T in label_tuple_list]
def evaluate_model(self, model, x, y):
model, value = self._function_to_evaluate_model(model, x, y)
return model, value
def return_selected_label_model(self, label_name):
if len(self._regression_model_list) == 1:
# print("unique model")
return self._all_fit_models_table_dict[label_name][0]
if self._is_any_none_in_list(self._table_evaluation_dict[label_name]):
raise ValueError("Some models were not evaluated")
return self._function_to_select_model(self._all_fit_models_table_dict[label_name], self._table_evaluation_dict[label_name])
@staticmethod
def _is_any_none_in_list(list_):
return any(list(map(lambda x: x is None, list_)))
def set_label_regression_model(self, model, label_name):
self._fit_model_table_dict[label_name] = model
def return_label_regression_model(self, label_name):
return self._fit_model_table_dict[label_name]
@classmethod
def _predict_func(cls, model, x_instance, n_samples):
return model.predict(x_instance, n_samples)
|
5,332 | 7f58179efecd5a0d691a5c6d83b808f2cd2fcba3 | from RestClient4py.client import RestClient
from API_Wrap import util
import os
import json
kakao_native_app_key, kakao_rest_api_key, kakao_javascript_key, kakao_admin_key = util.kakao_auth()
client = RestClient()
client.set_header("Authorization", "KakaoAK {}".format(kakao_rest_api_key))
client.set_header("Accept", "*/*")
"""
https://developers.kakao.com/docs/restapi/translation
"""
def translation(query, src_lang, target_lang):
if type(query) != str:
raise AttributeError("[ERROR] query parameter should be string type")
elif len(query) > 5000:
raise AttributeError("[ERROR] Maximum length of query parameter should be same or less than 5,000 chars")
if type(src_lang) != str:
raise AttributeError("[ERROR] src_lang parameter should be string type")
elif src_lang not in ["kr", "en", "jp", "cn", "vi", "id", "ar", "bn", "de", "es", "fr", "hi", "it", "ms", "nl",
"pt", "ru", "th", "tr"]:
raise AttributeError("[ERROR] src_lang parameter should be one of below language codes"
"--------------------------------------------------------------"
"Number | Language Code | Language"
"1 | kr | 한국어"
"2 | en | 영어"
"3 | jp | 일본어"
"4 | cn | 중국어"
"5 | vi | 베트남어"
"6 | id | 인도네시아어"
"7 | ar | 아랍어"
"8 | bn | 뱅갈어"
"9 | de | 독일어"
"10 | es | 스페인어"
"11 | fr | 프랑스어"
"12 | hi | 힌디어"
"13 | it | 이탈리아어"
"14 | ms | 말레이시아어"
"15 | nl | 네덜란드어"
"16 | pt | 포르투갈어"
"17 | ru | 러시아어"
"18 | th | 태국어"
"19 | tr | 터키어")
if type(target_lang) != str:
raise AttributeError("[ERROR] target_lang parameter should be string type")
elif target_lang not in ["kr", "en", "jp", "cn", "vi", "id", "ar", "bn", "de", "es", "fr", "hi", "it", "ms", "nl",
"pt", "ru", "th", "tr"]:
raise AttributeError("[ERROR] target_lang parameter should be one of below language codes"
"--------------------------------------------------------------"
"Number | Language Code | Language"
"1 | kr | 한국어"
"2 | en | 영어"
"3 | jp | 일본어"
"4 | cn | 중국어"
"5 | vi | 베트남어"
"6 | id | 인도네시아어"
"7 | ar | 아랍어"
"8 | bn | 뱅갈어"
"9 | de | 독일어"
"10 | es | 스페인어"
"11 | fr | 프랑스어"
"12 | hi | 힌디어"
"13 | it | 이탈리아어"
"14 | ms | 말레이시아어"
"15 | nl | 네덜란드어"
"16 | pt | 포르투갈어"
"17 | ru | 러시아어"
"18 | th | 태국어"
"19 | tr | 터키어")
postData = {
"query": query,
"src_lang": src_lang,
"target_lang": target_lang
}
return client.post("https://kapi.kakao.com/v1/translation/translate", data=postData) |
5,333 | f494dc99febfad99b371d72f542556a9024bc27d | #
# Copyright (c) 2018-2020 by Kristoffer Paulsson <kristoffer.paulsson@talenten.se>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
from unittest import TestCase
from angelos.document.statements import Statement, Verified, Trusted, Revoked
class TestStatement(TestCase):
def setUp(self):
self.instance = Statement()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules())
class TestVerified(TestCase):
def setUp(self):
self.instance = Verified()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules())
class TestTrusted(TestCase):
def setUp(self):
self.instance = Trusted()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules())
class TestRevoked(TestCase):
def setUp(self):
self.instance = Revoked()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules()) |
5,334 | fc4cf800c663abf20bfba7fcc1032e09a992641b | __author__ = 'asistente'
#from __future__ import absolute_import
from unittest import TestCase
from selenium import webdriver
from selenium.webdriver.common.by import By
class FunctionalTest(TestCase):
def setUp(self):
self.browser = webdriver.Chrome("C:\\chromedriver\\chromedriver.exe")
self.browser.implicitly_wait(2)
def tearDown(self):
self.browser.quit()
def test_title(self):
self.browser.get('http://localhost:8000')
self.assertIn('BuscoAyuda', self.browser.title)
def test_registro(self):
self.browser.get('http://localhost:8000')
link = self.browser.find_element_by_id('id_register')
link.click()
nombre = self.browser.find_element_by_id('id_nombre')
nombre.send_keys('Rafael')
apellidos = self.browser.find_element_by_id('id_apellidos')
apellidos.send_keys('Medrano')
experiencia = self.browser.find_element_by_id('id_aniosExperiencia')
experiencia.send_keys('7')
self.browser.find_element_by_xpath(
"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']").click()
telefono = self.browser.find_element_by_id('id_telefono')
telefono.send_keys('3135555555')
correo = self.browser.find_element_by_id('id_correo')
correo.send_keys('re.medrano@uniandes.edu.co')
imagen = self.browser.find_element_by_id('id_imagen')
imagen.send_keys('C:\chromedriver\developer.jpg')
nombreUsuario = self.browser.find_element_by_id('id_username')
nombreUsuario.send_keys('re.medrano')
clave = self.browser.find_element_by_id('id_password')
clave.send_keys('prueba123')
botonGrabar = self.browser.find_element_by_id('id_grabar')
botonGrabar.click()
self.browser.implicitly_wait(3)
span = self.browser.find_element(By.XPATH, '//span[text()="Rafael Medrano"]')
self.assertIn('Rafael Medrano', span.text)
def test_verDetalle(self):
self.browser.get('http://localhost:8000')
span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]')
span.click()
self.browser.implicitly_wait(3)
h2 = self.browser.find_element(By.XPATH, '//h2[text()="Betzy Editado Montanez Editado"]')
self.assertIn('Betzy Editado Montanez Editado', h2.text)
def test_login(self):
self.browser.get('http://localhost:8000')
link = self.browser.find_element_by_id('id_login')
link.click()
nombreUsuario = self.browser.find_element_by_id('username')
nombreUsuario.send_keys('ba.montanez')
clave = self.browser.find_element_by_id('password')
clave.send_keys('prueba123')
botonIngresar = self.browser.find_element_by_id('id_ingresar')
botonIngresar.click()
self.browser.implicitly_wait(3)
span = self.browser.find_element(By.XPATH, '//span[text()=" Logout"]')
self.assertIn('Logout', span.text)
def test_Editar(self):
self.browser.get('http://localhost:8000')
link = self.browser.find_element_by_id('id_login')
link.click()
nombreUsuario = self.browser.find_element_by_id('username')
nombreUsuario.send_keys('ba.montanez')
claveIngreso = self.browser.find_element_by_id('password')
claveIngreso.send_keys('prueba123')
botonIngresar = self.browser.find_element_by_id('id_ingresar')
botonIngresar.click()
self.browser.implicitly_wait(3)
linkEditar = self.browser.find_element_by_id('id_editar')
linkEditar.click()
nombre = self.browser.find_element_by_id('id_nombre')
nombre.clear()
nombre.send_keys('Betzy Editado')
apellidos = self.browser.find_element_by_id('id_apellidos')
apellidos.clear()
apellidos.send_keys('Montanez Editado')
experiencia = self.browser.find_element_by_id('id_aniosExperiencia')
experiencia.clear()
experiencia.send_keys('10')
self.browser.find_element_by_xpath(
"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']").click()
telefono = self.browser.find_element_by_id('id_telefono')
telefono.clear()
telefono.send_keys('313555666')
correo = self.browser.find_element_by_id('id_correo')
correo.clear()
correo.send_keys('ba.montanez01@uniandes.edu.co')
imagen = self.browser.find_element_by_id('id_imagen')
imagen.send_keys('C:\chromedriver\developer.jpg')
nombreUsuario = self.browser.find_element_by_id('id_username')
nombreUsuario.clear()
nombreUsuario.send_keys('ba.montanez2')
clave = self.browser.find_element_by_id('id_password')
clave.clear()
clave.send_keys('prueba1234')
botonGrabar = self.browser.find_element_by_id('id_editar')
botonGrabar.click()
self.browser.implicitly_wait(3)
span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]')
self.assertIn('Betzy Editado Montanez Editado', span.text)
def test_Comentar(self):
self.browser.get('http://localhost:8000')
span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]')
span.click()
self.browser.implicitly_wait(3)
h2 = self.browser.find_element(By.XPATH, '//h2[text()="Betzy Editado Montanez Editado"]')
correo = self.browser.find_element_by_id('correo')
correo.send_keys('prueba@prueba.com')
comentario = self.browser.find_element_by_id('comentario')
comentario.send_keys('Comentario Prueba')
botonAceptar = self.browser.find_element_by_id('id_comentar')
botonAceptar.click()
self.browser.implicitly_wait(6)
span = self.browser.find_element(By.XPATH, '//p[text()="Comentario Prueba"]')
self.assertIn('Comentario Prueba', span.text)
def test_listado(self):
self.browser.get('http://localhost:8000')
self.browser.implicitly_wait(3)
span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]')
self.assertIn('Betzy Editado Montanez Editado', span.text)
self.browser.implicitly_wait(3)
span = self.browser.find_element(By.XPATH, '//span[text()="pepito perez"]')
self.assertIn('pepito perez', span.text)
def test_buscar(self):
self.browser.get('http://localhost:8000')
correo = self.browser.find_element_by_id('buscar')
correo.send_keys('Betzy Editado Montanez Editado')
botonBuscar = self.browser.find_element_by_id('id_buscar')
botonBuscar.click()
self.browser.implicitly_wait(6)
span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]')
self.assertIn('Betzy Editado Montanez Editado', span.text) |
5,335 | 34a523b31e5567d2a8aec95c5820792d1ae80892 | from django.db import models
# Create your models here.
from user.models import User
class Post(models.Model):
class Meta:
db_table = 'bl_post'
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=200, null=False)
pubdate = models.DateTimeField(null=False)
# 作者
# author_id = models.IntegerField(null=False)
author = models.ForeignKey(User)
# 内容
def __repr__(self):
return "<Post {} {} {} {} [{}] >".format(self.id, self.title,self.author,self.content,self.author.id)
__str__ = __repr__
class Content(models.Model):
class Meta:
db_table = 'bl_content'
# id 可以不写,主键django帮你创建一个pk
post = models.OneToOneField(Post, to_field='id') # post_id
content = models.TextField(null=False)
def __repr__(self):
return "<Content {} {} {} >".format(self.id,self.post.id, self.content[:40])
__str__ = __repr__
|
5,336 | 548eebb9628374df320021c714454e05d2c606c0 | from __future__ import absolute_import, print_function, division, unicode_literals
import tensorflow as tf
def get_encoder(conf):
if conf.encoder == 'linear':
model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(conf.d_model)])
return model
if conf.encoder == 'rand_linear':
model = get_stochastic_linear(conf)
return model
if conf.encoder[:5] == 'cifar':
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k=conf.k, linear=conf.linear)
return model
def get_stochastic_linear(conf):
model = tf.keras.Sequential([tf.keras.layers.GaussianNoise(.3),
tf.keras.layers.Dense(conf.d_model * 2),
tf.keras.layers.ReLU(),
tf.keras.layers.GaussianNoise(.3),
tf.keras.layers.Dense(conf.d_model)])
return model
# noinspection PyAbstractClass
class BasicBlock(tf.keras.layers.Layer):
EXPANSION = 1
def __init__(self, channels, filters, strides=1):
super().__init__()
self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3, strides=strides, padding='same',
use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3, strides=1, padding='same',
use_bias=False)
self.bn_2 = tf.keras.layers.BatchNormalization()
self.shortcut = tf.keras.Sequential()
if strides != 1 or channels != (filters * self.EXPANSION):
self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION * filters, kernel_size=1, strides=strides,
use_bias=False))
self.shortcut.add(tf.keras.layers.BatchNormalization())
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training), training=training))
x = self.bn_2(self.conv_2(x, training=training), training=training)
x += self.shortcut(inputs, training=training)
return tf.nn.relu(x)
# noinspection PyAbstractClass
class ResNet(tf.keras.Model):
def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1, k=10, linear=True):
super().__init__()
self.channels = 64
self.pool_len = pool_len
self.k = k
self.linear = linear
self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same', use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.base = int(64 * width)
self.residual = tf.keras.Sequential([
self._make_layer(block, self.base, num_blocks[0], stride=1),
self._make_layer(block, self.base * 2, num_blocks[1], stride=2),
self._make_layer(block, self.base * 4, num_blocks[2], stride=2),
self._make_layer(block, self.base * 8, num_blocks[3], stride=2)
])
if self.linear:
self.fc = tf.keras.layers.Dense(low_dim)
self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len, data_format='channels_last')
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.channels, planes, stride))
self.channels = planes * block.EXPANSION
return tf.keras.Sequential(layers)
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training), training=training))
x = self.residual(x, training=training)
x = self.pool(x)
batch_size = tf.shape(x)[0]
x = tf.reshape(x, [batch_size, -1])
if self.linear:
x = self.fc(x, training=training)
return x
def test_resnet():
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)
a = tf.ones([7, 32, 32, 3])
b = model(a)
print(b)
if __name__ == '__main__':
test_resnet()
|
5,337 | ed5ba72443b70c84941af3d112e0246cb3ae97d9 | ####################################
## Readable code versus less code ##
####################################
import threading
from web_server.general_api import general_api as api
logger = api.__get_logger('ConnTimeout.run')
class ConnTimeout(object):
def __init__(self, timeout, function, servers=5, args=[], kwargs=[]):
self.timeout = timeout
self.timer = None #threading.Timer(timeout, pickle.loads(function), args)
self.count = 0
self.f = function
self.servers = servers
self.args = args
self.kwargs = kwargs
super(ConnTimeout, self).__init__()
#def __reduce__(self):
# return (self.__class__, (self.name, self.address))
def start(self):
return self._start()
def _start(self):
self.timer = threading.Timer(self.timeout, self._handler)
self.timer.start()
def is_alive(self):
return self._is_alive()
def _is_alive(self):
if self.timer:
return self.timer.is_alive()
else:
return self.timer
def _handler(self):
if self.count<self.servers:
self.count+=1
else:
self.count=0
## recursive timer call
self.timer = threading.Timer(self.timeout, self._handler)
self.timer.start()
args = self.args[:]
args.append(api.MN_RKEY+str(self.count))
logger.info(" trying to connect to "+api.MN_RKEY+str(self.count))
self.f(*args)
del args[:]
def stop(self):
if self.timer.is_alive():
self.timer.cancel()
logger.info("timer killed...")
return True
return False
## other approach, didn't like that has to keep the main thread running by force
## using while inside main
## http://code.activestate.com/recipes/496800-event-scheduling-threadingtimer/
"""
import thread
import threading
class Operation(threading._Timer):
def __init__(self, *args, **kwargs):
threading._Timer.__init__(self, *args, **kwargs)
def run(self):
while True:
self.finished.clear()
self.finished.wait(self.interval)
if not self.finished.isSet():
self.function(*self.args, **self.kwargs)
else:
return
self.finished.set()
class Manager(object):
def add_operation(self, operation, interval, args=[], kwargs={}):
self.op = Operation(interval, operation, args, kwargs)
thread.start_new_thread(self.op.run, ())
def cancel(self):
if self.op:
self.op.cancel()
if __name__ == '__main__':
# Print "Hello World!" every 5 seconds
import time
def hello():
print "Hello World!"
timer = Manager()
timer.add_operation(hello, 5)
while True:
time.sleep(.1)
"""
|
5,338 | f23b002ec0eefa376890e255b1ac0137e3a1c989 | from django.urls import path
from player.views import (
MusicListView, MusicPlayView, MusicPauseView, MusicUnPauseView,
NextSongView, PreviousSongView
)
urlpatterns = [
path('list/', MusicListView, name="music_list"),
path('play/<str:name>/', MusicPlayView, name="play_music"),
path('pause/', MusicPauseView, name="pause_music"),
path('unpause/', MusicUnPauseView, name="unpause_music"),
path('nextsong/', NextSongView, name="next_song"),
path('prevsong/', PreviousSongView, name="previous_song"),
]
|
5,339 | b4a96d5df56acd545e9919e202c462ee710a0339 | #print pathToConnectionsList(['A','C','B','D','E'])
#['EA','CB','AC','BD', 'DE']
#print independantPathPieces()
#print pathToConnectionsList(pathGenerator())
#print geneFormatToPathSegmentsMini(['CD', 'AB', 'BE', 'EC']) #DA
#print independantPathPieces(['EAC', 'CBD', 'ACB', 'BDE', 'DEA'])
#print greedyCrossover(['EC', 'CD', 'AB', 'BE','DF','FA'],['EC', 'XX', 'XX', 'XX','XX','xx'], 3)
#['ABECD', '', '__', '__']
# def joinPathBits(pathBits):
# index = 0
# for index in range(len(pathBits)):
# # figure out nex and prev point
# while matchFound:
# matchFound = False
# next = pathBits[index][-1]
# prev = pathBits[index][0]
# while True
# index2 = 1
# if next == pathBits[index2][0] and next != '_':
# join one way
# matchFound = True
# elif prev == pathBits[index2][-1] and prev != '_':
# join another
# matchFound = True
# def findpaths(segments):
# path_starts = {} # path_start:path
# path_ends = {} # path_end:path
# starts = {} # start:end of a segment
# #path_prefixes = []
# for segment in segments:
# starts[segment[0]] = segment[1]
# for start in starts:
# next = segment[start]
# if next in starts: # a longer path's been found
def writeToGene(toOrFromPos,whichCodon,whichGene,whatToWrite):
if toOrFromPos == 'to': pos = 1
if toOrFromPos == 'from': pos = 0
#print "which codon: " + str(whichCodon)
#print "postion: " + str(pos)
# check if whichgene[whichcodon is empty]
if whichCodon == 88: return whichGene # this may be the worlds ugliest hack, depending on
# _ not being a reserved char aka being in the charset but also depending on the num of cities
# in the prob to be less that 88
spot = whichGene[whichCodon]
val = whichGene[whichCodon][pos]
#print "current value: " + str(val)
if val == whatToWrite: return whichGene
if val == "_":
#spot = ['','']
#print "spot:"
#print spot
spot = list(spot)
spot[pos] = whatToWrite
#print "spot:"
#print spot
#check if val is empty
newGene = whichGene[0:whichCodon] + ["".join(spot)] + whichGene[whichCodon+1:len(whichGene)]
return newGene
return "ERROR, NON CONSISTANT VALUE ALREADY IN POS."
#print writeToGene('to',2,['__','__','__','__','__','__','xx','xx'],'o')
#writeToGene('to',3,['','','','','','','',''],"x")
def tspGeneTemplater(gene,locCodes):
# assumes that it gets a valid gene which was constructed by common elements in two parents and an additional random element from on parent.
gene = codeBlankSpots(gene)
genecopy = gene
charset = theCharset()
for codonLoc in range(len(gene)):
codon = gene[codonLoc]
if codon !='__':
whereFrom = codon[0]
whereTo = codon[1]
current = locCodes[codonLoc]
whereFromIndex = charset.index(whereFrom)
whereToIndex = charset.index(whereTo)
current = locCodes[codonLoc]
genecopy = writeToGene('from',whereToIndex,genecopy,current)
genecopy = writeToGene('to',whereFromIndex,genecopy,current)
#at this point we should have a template!!!!
# that we can fill in.
return genecopy
#print tspGeneTemplater(['BD', 'CA', '_B', 'A_'], theCharset())
def templateToGene(gene):
# GETS A FULLY TEMPLATED GENE
# MUST NOW FILL UP THE CHARS TO MAKE A VALID GENE! WHAT A DAUNTING TASK!!
# FIRST WE GET THE CHARSETS WE ARE WORKING WITH
# ONE FOR TO AND ONE FOR FROM POSITIONS
#init
chars = theCharset()[0:len(gene)]
toChars = chars
fromChars = chars
# remove already existing chars
for codon in gene:
if codon[0] != "_": fromChars = fromChars.replace(codon[0],'',1)
if codon[1] != "_":
toChars = toChars.replace(codon[1],'',1)
else:
anEmptyToSpot = gene.index(codon)
currentLoc = chars[anEmptyToSpot]
# now we have a list of to and from chars that need to be placed in a valid configuration.
# choose a blank spot to start from (anEmptyTospot)
gene = writeToGene('from',anEmptyToSpot,gene,currentLoc)
cont = True
while cont:
toLoc = random.choice(toChars)
toChars = toChars.replace(toLoc,'',1)
gene = writeToGene('from',anEmptyToSpot,gene,currentLoc)
currentLoc = toLoc
writeToGene('to',2,['__','__','x_','__','__','__','xx','xx'],'o')
return connectionList
def geneFormatToPathSegments(gene):
charset = theCharset()
segments = []
for i in range(len(gene)):
spot = charset[i]
if gene[i] != '__':
segment = str(gene[i][0]) + str(spot) + str(gene[i][1])
segments.append(segment)
return segments
def indPathPieces(segmentsList):
for thisSegment in segmentsList:
for anotherSegment in segmentsList:
if thisSegment[1:2] == anotherSegment[-2:]:
newSegment = thisSegment
def independantPathPieces(path_segments = []):
# TAKES EDGE SEGMENTS FOR EACH GENE OR SOME SUBSET OF GENES AND MAKES A STRING PATH OF MIN LENGTH
#path_segments = ['LOP','BAC','FYZ','CDF','REX', 'XWL']
#path_segments = ['EAC','CBD']
path_segments = ['EA','CB','AC','BD', 'DE']
# CAREFUL: THERE IS SOME INSANITY LOGIC GOING ON HERE!
#print "path seg: " + str(path_segments)
index = 0
while index < len(path_segments):
next = path_segments[index][-1]
for j in range(len(path_segments)):
prev = path_segments[j][0]
print "next: " + next
print "prev: " + prev
print "index:" + str(index)
print path_segments
if (next == prev) and (next != '_') :
path_segments[index] = path_segments[index] + path_segments[j][1:]
path_segments[j] = '_'
next = path_segments[index][-1]
#index -=1
print path_segments
index +=1
path_segments = [x for x in path_segments if x != '_']
#print "path seg: " + str(path_segments)
return path_segments
def makeTSPGeneX(numLocations):
# this time we are going to do things smarter.
if numLocations < 3 or numLocations > 94:
print "MAX LOCATIONS IS 94, MIN LOCATIONS IS 3."
quit()
# intialize
locationsCharset = theCharset()[0:numLocations]
path = pathMaker(numLocations)
#fromLocations = locationsCharset
locIndex = dict()
locValue = dict()
# BUILD THE INDEX AND VALUE DICTS
for i in range(numLocations):
locIndex[locationsCharset[i]] = i
locValue[i] = locationsCharset[i]
connectionList = ["" for x in range(numLocations)]
return connectionList
def completeTSPGene(pGene):
# this time we are going to do things smarter.
numLocations = len(pGene)
# intialize
locationsCharset = theCharset()[0:numLocations]
toLocations = locationsCharset
fromLocations = locationsCharset
locIndex = dict()
locValue = dict()
# BUILD THE INDEX AND VALUE DICTS
for i in range(numLocations):
locIndex[locationsCharset[i]] = i
locValue[i] = locationsCharset[i]
#connectionList = ["__" for x in range(numLocations)]
# remove existing options from charsrets.
for codon in pGene:
if codon[0] != "_": fromLocations = fromLocations.replace(codon[0],'',1)
if codon[1] != "_":
toLocations = toLocations.replace(codon[1],'',1)
else:
# grab details about a codon where the to location is empty.
anEmptyToSpot = pGene.index(codon)
currentLoc = locationsCharset[anEmptyToSpot]
# we define an empty fromLoc, we have a currentLoc, and we get a toLoc!
fromLoc = "_"
#toLoc = random.choice(toLocations)
#toLocations = toLocations.replace(currentLoc, "")
for i in range(numLocations+1):
print len(toLocations)
print len(fromLocations)
print "wherefrom: " + fromLoc
print "currentloc: " + currentLoc
print "to locs options: " + str(toLocations)
print "from locs: " + str(fromLocations)
print pGene
print
#place the from loc in the from position of the current loc
if fromLoc != "_":
pGene[locIndex[currentLoc]] = str(fromLoc) + str(pGene[locIndex[currentLoc]][1])
fromLocations = fromLocations.replace(fromLoc,'',1)
if len(toLocations) == 0:
pGene[locIndex[currentLoc]] = str(fromLoc[0] ) + str(pGene[locIndex[currentLoc]][1])
return pGene
toLoc = pGene[locIndex[currentLoc]][1]
if toLoc == "_":
# get a to loc only if needed
#if len(toLocations) == 2 and len(fromLocations) == 1 and (fromLocations == toLoc)
toLoc = currentLoc
while (toLoc == currentLoc) or (toLoc == fromLoc) :
if len(toLocations) == 0:
toLoc = locValue[anEmptyToSpot]
else:
toLoc = random.choice(toLocations)
toLocations = toLocations.replace(toLoc, "")
#place it in the to position of the current loc
pGene[locIndex[currentLoc]] = str(pGene[locIndex[currentLoc]][0]) + str(toLoc)
#prepare to move to the new loc!
fromLoc = currentLoc
currentLoc = toLoc
pGene[locIndex[currentLoc]] = str(fromLoc) + str(pGene[locIndex[currentLoc]][0])
return pGene
#print completeTSPGene(['__','CD','_B','B_','__','__','AC','FI','HA']) |
5,340 | 59b2c9d279168a806e59fb7529ab12d7b86107bc | # 213. 打家劫舍 II
# 你是一个专业的小偷,计划偷窃沿街的房屋,每间房内都藏有一定的现金。这个地方所有的房屋都 围成一圈 ,这意味着第一个房屋和最后一个房屋是紧挨着的。
# 同时,相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警 。
# 给定一个代表每个房屋存放金额的非负整数数组,计算你 在不触动警报装置的情况下 ,能够偷窃到的最高金额。
class Solution:
# 86.24%, 15.46%
def rob(self, nums) -> int:
n = len(nums)
if n == 0:
return 0
if n == 1:
return nums[0]
return max(self.helper(nums[1:],n-1),self.helper(nums[:-1],n-1))
def helper(self,nums,n):
if n == 1:
return nums[0]
dp = [0] * n
dp[0] = nums[0]
dp[1] = max(nums[0], nums[1])
for i in range(2, n):
dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])
return dp[n-1]
# 优秀解答
def rob2(self, nums) -> int:
n = len(nums)
if nums == []:
return 0
if len(nums) == 1:
return nums[0]
# 抢了
dp = [[0, 0] for _ in range(n)]
dp[0][1] = nums[0]
dp[0][0] = float('-inf')
for i in range(1, n):
dp[i][1] = dp[i - 1][0] + nums[i]
dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])
tmp_max = dp[n - 1][0]
# 没抢
dp = [[0, 0] for _ in range(n)]
dp[0][1] = float('-inf')
dp[0][0] = 0
for i in range(1, n):
dp[i][1] = dp[i - 1][0] + nums[i]
dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])
return max(dp[n - 1][0], dp[n - 1][1], tmp_max)
|
5,341 | a521befba58aa85c2fcfe6006db4b161123585f1 | # Copyright 2018-present Kensho Technologies, LLC.
from .utils import create_vertex_statement, get_random_date, get_uuid
EVENT_NAMES_LIST = (
"Birthday",
"Bar Mitzvah",
"Coronation",
"Re-awakening",
)
def _create_event_statement(event_name):
"""Return a SQL statement to create a Event vertex."""
field_name_to_value = {'name': event_name, 'event_date': get_random_date(), 'uuid': get_uuid()}
return create_vertex_statement('Event', field_name_to_value)
def get_event_generation_commands():
"""Return a list of SQL statements to create all event vertices."""
command_list = []
for event_name in EVENT_NAMES_LIST:
command_list.append(_create_event_statement(event_name))
return command_list
|
5,342 | b0ab97f5c05cdeee4c01460109a76cef75ac72ce | print("Convertidor de pies y pulgadas a centímetros")
pies = float(input("Escriba una cantidad de pies: "))
pulgadas = float(input("Escriba una cantidad de pulgadas: "))
cm = (pies * 12 + pulgadas) * 2.54;
print("{} pies y {} pulgadas son {} cm".format(pies, pulgadas, cm))
|
5,343 | df1486afcc99e03510512ed6ed3e8b3471459d50 | import pkgutil
import mimetypes
import time
from datetime import datetime
from pywb.utils.wbexception import NotFoundException
from pywb.utils.loaders import BlockLoader
from pywb.utils.statusandheaders import StatusAndHeaders
from pywb.framework.basehandlers import BaseHandler, WbUrlHandler
from pywb.framework.wbrequestresponse import WbResponse
from pywb.warc.recordloader import ArcWarcRecordLoader
from pywb.warc.resolvingloader import ResolvingLoader
from views import J2TemplateView
from replay_views import ReplayView
from pywb.framework.memento import MementoResponse
from pywb.utils.timeutils import datetime_to_timestamp
#=================================================================
class SearchPageWbUrlHandler(WbUrlHandler):
"""
Loads a default search page html template to be shown when
the wb_url is empty
"""
def __init__(self, config):
self.search_view = (J2TemplateView.
create_template(config.get('search_html'),
'Search Page'))
self.is_frame_mode = config.get('framed_replay', False)
self.response_class = WbResponse
if self.is_frame_mode:
html = config.get('frame_insert_html', 'ui/frame_insert.html')
self.frame_insert_view = (J2TemplateView.
create_template(html, 'Frame Insert'))
self.banner_html = config.get('banner_html', 'banner.html')
if config.get('enable_memento', False):
self.response_class = MementoResponse
else:
self.frame_insert_view = None
self.banner_html = None
def render_search_page(self, wbrequest, **kwargs):
if self.search_view:
return self.search_view.render_response(wbrequest=wbrequest,
prefix=wbrequest.wb_prefix,
**kwargs)
else:
return WbResponse.text_response('No Lookup Url Specified')
def __call__(self, wbrequest):
# root search page
if wbrequest.wb_url_str == '/':
return self.render_search_page(wbrequest)
# render top level frame if in frame mode
# (not supported in proxy mode)
if (self.is_frame_mode and wbrequest.wb_url and
not wbrequest.wb_url.is_query() and
not wbrequest.options['is_proxy']):
if wbrequest.wb_url.is_top_frame:
return self.get_top_frame_response(wbrequest)
else:
wbrequest.final_mod = 'tf_'
return self.handle_request(wbrequest)
def get_top_frame_params(self, wbrequest):
embed_url = wbrequest.wb_url.to_str(mod='')
if wbrequest.wb_url.timestamp:
timestamp = wbrequest.wb_url.timestamp
else:
timestamp = datetime_to_timestamp(datetime.utcnow())
params = dict(embed_url=embed_url,
wbrequest=wbrequest,
timestamp=timestamp,
url=wbrequest.wb_url.url,
banner_html=self.banner_html)
return params
def get_top_frame_response(self, wbrequest):
params = self.get_top_frame_params(wbrequest)
headers = [('Content-Type', 'text/html; charset=utf-8')]
status_headers = StatusAndHeaders('200 OK', headers)
template_result = self.frame_insert_view.render_to_string(**params)
body = template_result.encode('utf-8')
return self.response_class(status_headers, [body], wbrequest=wbrequest)
#=================================================================
# Standard WB Handler
#=================================================================
class WBHandler(SearchPageWbUrlHandler):
def __init__(self, query_handler, config=None):
super(WBHandler, self).__init__(config)
self.index_reader = query_handler
cookie_maker = config.get('cookie_maker')
record_loader = ArcWarcRecordLoader(cookie_maker=cookie_maker)
paths = config.get('archive_paths')
resolving_loader = ResolvingLoader(paths=paths,
record_loader=record_loader)
self.replay = ReplayView(resolving_loader, config)
self.fallback_handler = None
self.fallback_name = config.get('fallback')
def resolve_refs(self, handler_dict):
if self.fallback_name:
self.fallback_handler = handler_dict.get(self.fallback_name)
def handle_request(self, wbrequest):
try:
cdx_lines, output = self.index_reader.load_for_request(wbrequest)
except NotFoundException as nfe:
return self.handle_not_found(wbrequest, nfe)
if output != 'text' and wbrequest.wb_url.is_replay():
return self.handle_replay(wbrequest, cdx_lines)
else:
return self.handle_query(wbrequest, cdx_lines, output)
def handle_query(self, wbrequest, cdx_lines, output):
return self.index_reader.make_cdx_response(wbrequest,
cdx_lines,
output)
def handle_replay(self, wbrequest, cdx_lines):
cdx_callback = self.index_reader.cdx_load_callback(wbrequest)
return self.replay.render_content(wbrequest,
cdx_lines,
cdx_callback)
def handle_not_found(self, wbrequest, nfe):
if (not self.fallback_handler or
wbrequest.wb_url.is_query() or
wbrequest.wb_url.is_identity):
raise
return self.fallback_handler(wbrequest)
def __str__(self):
return 'Web Archive Replay Handler'
#=================================================================
# Static Content Handler
#=================================================================
class StaticHandler(BaseHandler):
def __init__(self, static_path):
mimetypes.init()
self.static_path = static_path
self.block_loader = BlockLoader()
def __call__(self, wbrequest):
url = wbrequest.wb_url_str.split('?')[0]
full_path = self.static_path + url
try:
data = self.block_loader.load(full_path)
try:
data.seek(0, 2)
size = data.tell()
data.seek(0)
headers = [('Content-Length', str(size))]
except IOError:
headers = None
if 'wsgi.file_wrapper' in wbrequest.env:
reader = wbrequest.env['wsgi.file_wrapper'](data)
else:
reader = iter(lambda: data.read(), '')
content_type, _ = mimetypes.guess_type(full_path)
return WbResponse.text_stream(data,
content_type=content_type,
headers=headers)
except IOError:
raise NotFoundException('Static File Not Found: ' +
wbrequest.wb_url_str)
def __str__(self): # pragma: no cover
return 'Static files from ' + self.static_path
#=================================================================
# Debug Handlers
#=================================================================
class DebugEchoEnvHandler(BaseHandler): # pragma: no cover
def __call__(self, wbrequest):
return WbResponse.text_response(str(wbrequest.env))
#=================================================================
class DebugEchoHandler(BaseHandler): # pragma: no cover
def __call__(self, wbrequest):
return WbResponse.text_response(str(wbrequest))
|
5,344 | a179d3d2f04a101eaa60b5964c2b1cd77071633f | from envs import DATASET_FOLDER
from os.path import join
import json
import collections
from tqdm import tqdm
def add_space(context_list):
space_context = []
for idx, context in enumerate(context_list):
space_sent_list = []
sent_list = context[1]
if idx == 0:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
if sent_idx == 0:
space_sent_list.append(sent.strip())
else:
space_sent_list.append(' ' + sent)
else:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
space_sent_list.append(' ' + sent)
space_context.append([context[0], space_sent_list])
return space_context
def find_answer(answer, sents):
for s_idx, sent in enumerate(sents):
if answer in sent:
return s_idx
return -1
def find_in_answer_context(answer, context):
founds = []
for ctx_idx, ctx in enumerate(context):
ans_idx = find_answer(answer=answer, sents=ctx[1])
if ans_idx >= 0:
founds.append(1)
# if ctx_idx == 0:
# print('{} : {}: {}'.format(ctx_idx, ans_idx, len(ctx[1])))
else:
founds.append(0)
ans_found_idx = -1
assert sum(founds) <= 2
if sum(founds) > 0:
if founds[0] == 1:
ans_found_idx = 0
else:
ans_found_idx = 1
return ans_found_idx
def fintuner_in_answer_context(answer, context, supporting_facts):
ans_idx = find_answer(answer=answer, sents=context[0][1])
support_facts = set([(x[0], x[1]) for x in supporting_facts])
if ans_idx > 0 and len(support_facts) > 1:
# if (context[0][0], ans_idx) not in support_facts:
# print(ans_idx, len(context[0][1]))
# print(supporting_facts)
return True
return False
def docred_refiner():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER, 'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,
'data_raw/converted_docred_total.json') # converted_docred_total.json
REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER, 'data_raw/refined_converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8') as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8') as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data), DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
answer_position = []
answer_not_found = []
no_answer_found = 0
first_one_sent = 0
title_dict = {}
tunable_count = 0
for case in tqdm(raw_data):
# print(case)
key = case['_id']
answer = case['answer']
context = case['context']
support_facts = case['supporting_facts']
title = context[0][0][:-2].strip()
if title not in title_dict:
title_dict[title] = 1
else:
title_dict[title] = title_dict[title] + 1
fine_tune_flag = fintuner_in_answer_context(answer=answer, supporting_facts=support_facts, context=context)
if fine_tune_flag:
tunable_count = tunable_count + 1
ans_find_idx = find_in_answer_context(answer=answer, context=context)
if ans_find_idx >= 0:
answer_position.append(ans_find_idx)
else:
no_answer_found = no_answer_found + 1
if ans_find_idx == 0 and len(context[0][1]) > 1:
first_one_sent = first_one_sent + 1
# for ctx_idx, ctx in enumerate(context):
# is_answer_found = find_answer(answer=answer, sents=ctx[1])
# if is_answer_found:
# answer_position.append(ctx_idx)
# break
# else:
# continue
# for key_name, key_value in case.items():
# if key_name != 'context':
# print('{}: {}'.format(key_name, key_value))
# else:
# for ctx_idx, ctx in enumerate(key_value):
# print('{}: {}'.format(ctx_idx + 1, ctx))
# context = case['context']
# space_context = add_space(context_list=context)
# case['context'] = space_context
# examples.append(case)
# print(context)
# print('-' * 50)
# print(add_space(context_list=context))
# print('*' * 100)
print(len(raw_data))
print(len(answer_position))
print(sum(answer_position))
print('no answer found = {}'.format(no_answer_found))
print('first one sent = {}'.format(first_one_sent))
print('tunable count = {}'.format(tunable_count))
print('title number = {}'.format(len(title_dict)))
# sorted_title_dict = sorted(title_dict.items(), key=lambda kv: kv[1])
# for key, value in sorted_title_dict:
# print('{}: {}'.format(key, value))
def docred_checker():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER, 'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER, 'data_raw/converted_docred_total.json') #converted_docred_total.json
# Saved_raw_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER, 'data_raw/space_converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8') as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8') as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data), DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
for case in tqdm(raw_data):
# print(case)
key = case['_id']
for key_name, key_value in case.items():
if key_name != 'context':
print('{}: {}'.format(key_name, key_value))
else:
for ctx_idx, ctx in enumerate(key_value):
print('{}: {}'.format(ctx_idx + 1, ctx))
# context = case['context']
# space_context = add_space(context_list=context)
# case['context'] = space_context
# examples.append(case)
# print(context)
# print('-' * 50)
# print(add_space(context_list=context))
print('*' * 100)
# print('key {}'.format(key))
# print(para_data[key])
# json.dump(examples, open(Saved_raw_DOCRED_OUTPUT_PROCESSED, 'w'))
|
5,345 | 55c00ce4c1657dc5ce78e5eeccd8e9625c0590dc | import requests
import json
import logging
import time
from alto.server.components.datasource import DBInfo, DataSourceAgent
class CRICAgent(DataSourceAgent):
def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):
super().__init__(dbinfo, name, namespace)
self.uri = self.ensure_field(cfg, 'uri')
self.local_asn = cfg.get('local_asn', None)
self.refresh_interval = cfg.get('refresh_interval', None)
self.netroute_map = dict()
logging.info("Loading databases")
self.db = [ self.request_db(t) for t in ['endpoint']]
def update(self):
eb_trans = self.db[0].new_transaction()
cric_dict = dict()
if self.uri.startswith('http'):
data = requests.get(self.uri, verify=False)
cric_dict = json.loads(data.content)
else:
with open(self.uri, 'r') as f_cric:
cric_dict = json.load(f_cric)
for _, rcsite_obj in cric_dict.items():
netroutes = rcsite_obj.get('netroutes', dict())
for _, netroute in netroutes.items():
for _, ipprefixes in netroute['networks'].items():
for ipprefix in ipprefixes:
asn = netroute.get('asn')
if asn == self.local_asn:
eb_trans.add_property(ipprefix, {'is_local': True})
eb_trans.commit()
def run(self):
if self.refresh_interval is None:
self.refresh_interval = 60
while True:
self.update()
time.sleep(self.refresh_interval)
|
5,346 | 10c9566503c43e806ca89e03955312c510092859 | import datetime
import json
import re
import time
import discord
from utils.ext import standards as std, checks, context, logs
DISCORD_INVITE = '(discord(app\.com\/invite|\.com(\/invite)?|\.gg)\/?[a-zA-Z0-9-]{2,32})'
EXTERNAL_LINK = '((https?:\/\/(www\.)?|www\.)[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6})'
EVERYONE_MENTION = '@(here|everyone)'
discordRegex = re.compile(DISCORD_INVITE, re.IGNORECASE)
linkRegex = re.compile(EXTERNAL_LINK, re.IGNORECASE)
everyoneRegex = re.compile(EVERYONE_MENTION)
def findWord(word):
return re.compile(r'\b({0})\b'.format(word), flags=re.IGNORECASE).search
async def managePunishment(ctx, punishment, reason):
await ctx.message.delete()
user: discord.Member = ctx.author
msg = ctx.message.content if len(ctx.message.content) < 1015 else f'{ctx.message.content[:1015]}...'
reason = f'Automoderation: {reason}'
embed: discord.Embed = std.getBaseModEmbed(reason, ctx.author, ctx.me)
userEmbed: discord.Embed = std.getBaseModEmbed(reason)
userEmbed.add_field(name=f'{std.folder_emoji} **Server**', value=ctx.guild.name, inline=False)
userEmbed.add_field(name=f'{std.list_emoji} **__Message__**', value=msg, inline=False)
embed.add_field(name=f'{std.channel_emoji} **__Channel__**', value=ctx.channel.mention, inline=False)
embed.add_field(name=f'{std.list_emoji} **__Message__**', value=msg, inline=False)
data = await ctx.bot.db.fetchrow('SELECT bantime, mutetime, muterole FROM automod.config WHERE sid = $1', ctx.guild.id)
if punishment == 1:
if checks.hasPermsByName(ctx, ctx.me, 'kick_members'):
embed.title = 'AUTOMODERATION [KICK]'
userEmbed.title = 'AUTOMODERATION [KICK]'
await ctx.guild.kick(user, reason=reason)
elif punishment == 2:
if checks.hasPermsByName(ctx, ctx.me, 'ban_members'):
embed.title = 'AUTOMODERATION [BAN]'
userEmbed.title = 'AUTOMODERATION [BAN]'
await ctx.guild.ban(user, reason=reason)
elif punishment == 3:
if checks.hasPermsByName(ctx, ctx.me, 'ban_members'):
embed.title = 'AUTOMODERATION [TEMPBAN]'
userEmbed.title = 'AUTOMODERATION [TEMPBAN]'
unixTime = time.time() + data['bantime']
embed.add_field(name=f'{std.date_emoji} **__Entbann__**', value=datetime.datetime.fromtimestamp(unixTime).strftime('%d. %m. %Y um %H:%M:%S'))
await ctx.db.execute('INSERT INTO extra.timers (sid, objid, type, time, data) VALUES ($1, $2, $3, $4, $5)',
ctx.guild.id, user.id, 0, unixTime, json.dumps({'reason': reason}))
await ctx.guild.ban(user, reason=reason)
elif punishment == 4:
if checks.hasPermsByName(ctx, ctx.me, 'manage_roles'):
muteRole = ctx.guild.get_role(data['muterole'])
if muteRole is None:
return
embed.title = 'AUTOMODERATION [TEMPMUTE]'
userEmbed.title = 'AUTOMODERATION [TEMPMUTE]'
unixTime = time.time() + data['mutetime']
embed.add_field(name=f'{std.date_emoji} **__Entmute__**', value=datetime.datetime.fromtimestamp(unixTime).strftime('%d. %m. %Y um %H:%M:%S'))
await ctx.db.execute('INSERT INTO extra.timers (sid, objid, type, time, data) VALUES ($1, $2, $3, $4, $5)',
ctx.guild.id, user.id, 1, unixTime, json.dumps({'reason': reason}))
await user.add_roles(muteRole, reason=reason)
await logs.createEmbedLog(ctx=ctx, modEmbed=embed, userEmbed=userEmbed, member=user, ignoreMMSG=True, ignoreNoLogging=True)
async def add_points(ctx: context, addPoints, modType, user: discord.Member = None):
await ctx.message.delete()
if user is not None:
punishedUser: discord.Member = user
else:
punishedUser: discord.Message = ctx.author
await ctx.bot.db.execute(
'INSERT INTO automod.users (uid, sid, points, time, reason) VALUES ($1, $2, $3, $4, $5)',
punishedUser.id, ctx.guild.id, addPoints, time.time(), f'Automoderation: {modType}')
points = await ctx.bot.db.fetchval('SELECT sum(points) FROM automod.users WHERE uid = $1 AND sid = $2 AND $3 - time < 2592000', punishedUser.id, ctx.guild.id, time.time())
data = await ctx.bot.db.fetchrow("SELECT action, maxpoints, muterole, mutetime, bantime FROM automod.config WHERE sid = $1", ctx.guild.id)
msg: discord.Message = ctx.message
action = data['action']
maxPoints = data['maxpoints']
unixTimeMute = unixTimeBan = time.time() + 86400
if data['mutetime']:
unixTimeMute: float = time.time() + data['mutetime']
if data['bantime']:
unixTimeBan: float = time.time() + data['bantime']
message = msg.content if len(msg.content) < 1015 else f'{ctx.message.content[:1015]}...'
embed: discord.Embed = std.getBaseModEmbed(f'{modType} [+{addPoints}]', punishedUser)
userEmbed: discord.Embed = std.getBaseModEmbed(f'{modType} [+{addPoints}]')
userEmbed.add_field(name=f'{std.folder_emoji} **Server**', value=ctx.guild.name)
embed.title = f'AUTOMODERATION [LOG]'
userEmbed.title = f'AUTOMODERATION [LOG]'
if user is not None:
embed.add_field(name=f'{std.supporter_emoji} **__Moderator__**', value=ctx.author.mention, inline=False)
embed.add_field(name=f'{std.channel_emoji} **__Channel__**', value=ctx.channel.mention, inline=False)
embed.add_field(name=f'{std.invite_emoji} **__Punkte__**', value=f'{points}/{maxPoints}', inline=False)
userEmbed.add_field(name=f'{std.invite_emoji} **__Punkte__**', value=f'{points}/{maxPoints}', inline=False)
if user is None:
userEmbed.add_field(name=f'{std.list_emoji} **__Message__**', value=message, inline=False)
embed.add_field(name=f'{std.list_emoji} **__Message__**', value=message, inline=False)
if points >= maxPoints:
if action is None:
embed.title = 'AUTOMODERATION [LOG]'
if action == 1:
if checks.hasPermsByName(ctx, ctx.me, 'kick_members'):
embed.title = 'AUTOMODERATION [KICK]'
await punishedUser.kick(reason="Automoderation")
await ctx.bot.db.execute("DELETE FROM automod.users WHERE uid = $1 AND sid = $2", punishedUser.id, msg.guild.id)
else:
return
if action == 2:
if checks.hasPermsByName(ctx, ctx.me, 'kick_members'):
embed.title = 'AUTOMODERATION [BAN]'
await punishedUser.ban(reason="Automoderation")
await ctx.bot.db.execute("DELETE FROM automod.users WHERE uid = $1 AND sid = $2", punishedUser.id, msg.guild.id)
else:
return
if action == 3:
if checks.hasPermsByName(ctx, ctx.me, 'ban_members'):
embed.add_field(name=f'{std.date_emoji} **__Entbann__**', value=datetime.datetime.fromtimestamp(unixTimeBan).strftime('%d. %m. %Y um %H:%M:%S'))
embed.title = 'AUTOMODERATION [TEMPBAN]'
await punishedUser.ban(reason="Automoderation: Punktesystem")
await ctx.db.execute('INSERT INTO extra.timers (sid, objid, type, time, data) VALUES ($1, $2, $3, $4, $5)',
ctx.guild.id, punishedUser.id, 0, unixTimeBan, json.dumps({'reason': 'Automoderation: Punktesystem'}))
else:
return
if action == 4:
if checks.hasPermsByName(ctx, ctx.me, 'manage_roles'):
muteRole = ctx.guild.get_role(data['muterole'])
if muteRole is None:
return
embed.add_field(name=f'{std.date_emoji} **__Entmute__**', value=datetime.datetime.fromtimestamp(unixTimeMute).strftime('%d. %m. %Y um %H:%M:%S'))
embed.title = 'AUTOMODERATION [TEMPMUTE]'
await punishedUser.add_roles(muteRole, reason='Automoderation')
await ctx.bot.db.execute("DELETE FROM automod.users WHERE uid = $1 AND sid = $2", punishedUser.id, msg.guild.id)
await ctx.db.execute('INSERT INTO extra.timers (sid, objid, type, time, data) VALUES ($1, $2, $3, $4, $5)',
ctx.guild.id, punishedUser.id, 1, unixTimeMute, json.dumps({'reason': 'Automoderation: Punktesystem'}))
else:
return
await logs.createEmbedLog(ctx=ctx, modEmbed=embed, userEmbed=userEmbed, member=punishedUser, ignoreNoLogging=True, ignoreMMSG=True)
async def automod(ctx):
bot = ctx.bot
guild: discord.Guild = ctx.guild
msg: discord.Message = ctx.message
channel: discord.TextChannel = ctx.channel
blState = await bot.get(guild.id, 'state')
if not await bot.get(guild.id, 'automod'):
return
if blState:
words = await bot.get(guild.id, 'words')
if words:
for word in words:
if findWord(word)(msg.content.lower()):
if not await checks.ignores_automod(ctx):
data = await bot.db.fetchrow('SELECT points, whitelist FROM automod.blacklist WHERE sid = $1', guild.id)
if data['whitelist'] is not None:
if channel.id in data['whitelist']:
return
if blState == 5:
return await add_points(ctx, data['points'], 'Blacklisted Word')
else:
return await managePunishment(ctx, blState, 'Blacklisted Word')
if discordRegex.findall(msg.content):
if await checks.ignores_automod(ctx):
return
data = await bot.db.fetchrow("SELECT state, whitelist, partner, points FROM automod.invites WHERE sid = $1", guild.id)
if not data:
return
if not (state := data['state']):
return
if data['whitelist'] is not None:
if channel.id in data['whitelist']:
return
whitelistedServers = [guild.id]
if partner := data['partner']:
whitelistedServers.extend([int(guildID) for guildID in partner])
hasInvite: bool = False
for invite in discordRegex.findall(msg.content):
try:
invite = await bot.fetch_invite(invite[0])
except discord.NotFound:
continue
except discord.Forbidden:
if state == 5:
return await add_points(ctx, data['points'], 'Invite')
else:
return await managePunishment(ctx, state, 'Invite')
if invite.guild.id not in whitelistedServers:
hasInvite = True
break
if hasInvite:
if state == 5:
return await add_points(ctx, data['points'], 'Invite')
else:
return await managePunishment(ctx, state, 'Invite')
elif linkRegex.findall(msg.content):
if await checks.ignores_automod(ctx):
return
data = await bot.db.fetchrow('SELECT points, state, links, whitelist, iswhitelist FROM automod.links WHERE sid = $1', guild.id)
if not data:
return
if not (state := data['state']):
return
if data['whitelist'] is not None:
if channel.id in data['whitelist']:
return
links = ['discord.gg', 'discord.com', 'discordapp.com', 'plyoox.net']
if (linksData := data['links']) is not None:
links.extend(linksData)
linksObj = linkRegex.findall(msg.content)
for linkObj in linksObj:
link = linkObj[0].replace(linkObj[1], '')
if data['iswhitelist']:
if link not in links:
if state == 5:
return await add_points(ctx, data['points'], 'Link')
else:
return await managePunishment(ctx, state, 'Link')
else:
if link in links:
if state == 5:
return await add_points(ctx, data['points'], 'Link')
else:
return await managePunishment(ctx, state, 'Link')
if not msg.clean_content.islower() and len(msg.content) > 15:
if await checks.ignores_automod(ctx):
return
lenCaps = len(re.findall(r'[A-ZÄÖÜ]', msg.clean_content))
percent = lenCaps / len(msg.content)
if percent > 0.7:
data = await bot.db.fetchrow("SELECT points, state, whitelist FROM automod.caps WHERE sid = $1", msg.guild.id)
if not data:
return
if not (state := data['state']):
return
if data['whitelist'] is not None:
if channel.id in data['whitelist']:
return
if state == 5:
return await add_points(ctx, data['points'], 'Caps')
else:
return await managePunishment(ctx, state, 'Caps')
if len(msg.raw_mentions) + len(msg.raw_role_mentions) + len(everyoneRegex.findall(msg.content)) >= 3:
if await checks.ignores_automod(ctx):
return
lenMentions = sum(m != ctx.author.id for m in msg.raw_mentions) + len(msg.raw_role_mentions)
data = await bot.db.fetchrow(
"SELECT state, points, count, whitelist, everyone FROM automod.mentions WHERE sid = $1",
guild.id)
if not data:
return
if not (state := data['state']):
return
if data['whitelist'] is not None:
if channel.id in data['whitelist']:
return
if data['everyone']:
lenMentions += len(everyoneRegex.findall(msg.content))
if lenMentions >= data['count']:
if state == 5:
return await add_points(ctx, data['points'], 'Mentions')
else:
return await managePunishment(ctx, state, 'Caps')
|
5,347 | 0ad2e6d7e3fd61943fc1dfe6662110a6f48c1bd5 | from .parse_categories import extract_categories
from .parse_sections import extract_sections
from .utils import remove_xml_comments
def parse_page(page):
if 'redirect' in page.keys():
return
page_text = page['revision']['text']['#text']
page_text = remove_xml_comments(page_text)
title = page['title']
categories = extract_categories(page_text)
try:
sections = extract_sections(page_text)
except:
return title, 'Can not parse', None, None
return title, sections, categories
|
5,348 | 4d07795543989fe481e1141756f988d276f82c02 | """
7-4. Pizza Toppings: Write a loop that prompts the user to enter a series of
pizza toppings until they enter a 'quit' value. As they enter each topping,
print a message saying you’ll add that topping to their pizza.
"""
if __name__ == '__main__':
topping = None
while topping != "quit":
if topping:
print("I'll add %s to your pizza!" % topping)
topping = input("What topping would you like? (enter 'quit' when you are done.) ") |
5,349 | 51bdbec732bebd73a84b52c6d1d39eead047d29e | from __future__ import absolute_import
import unittest
import yaml
import os
from bok_choy.web_app_test import WebAppTest
from .pages.job_config_history_subpage import JobConfigHistorySubPage
class TestJobConfigHistory(WebAppTest):
def setUp(self):
super(TestJobConfigHistory, self).setUp()
config_path = os.getenv('CONFIG_PATH')
try:
yaml_contents = open(
"{}/job_config_history.yml".format(config_path), 'r'
).read()
except IOError:
pass
self.job_config_history = yaml.safe_load(yaml_contents)
self.config_page = JobConfigHistorySubPage(self.browser)
def test_job_config_history(self):
"""
Verify the Jenkins Config History plugin has been configured
properly.
"""
self.config_page.visit()
self.config_page.expand_advanced()
assert self.job_config_history['HISTORY_ROOT_DIR'] == self.config_page.get_history_root_dir()
assert self.job_config_history['MAX_HISTORY_ENTRIES'] == self.config_page.get_max_history_entries()
assert str(self.job_config_history['SKIP_DUPLICATE_HISTORY']).lower() == self.config_page.get_skip_duplicate_history()
assert self.job_config_history['SHOW_BUILD_BADGES'] == self.config_page.get_show_build_badges()
|
5,350 | 2ad1b44027b72499c1961f2d2b1c12c356c63d2b | import numpy,math,random
from scipy.io.wavfile import write
notes=[('c',32.7),('c#',34.65),('d',36.71),('d#',38.89),('e',41.2),('f',43.65),
('f#',46.25),('g',49),('g#',51.91),('a',55),('a#',58.27),('b',61.47)]
#notes={'c':32.7,'c#':34.65,'d':36.71,'d#':38.89,'e':41.2,'f':43.65,'f#':46.25,
# 'g':49,'g#':51.91,'a':55,'a#':58.27,'b':61.47}
tempo=80
beatLen=1/(tempo/60)
noteTypes={'q':1,'h':2,'dh':3,'w':4,'e':.5,'s':.25,}
def make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16):
bytelist = []
TwoPiDivSamplerate = 2*math.pi/samplerate
increment = TwoPiDivSamplerate * freq
incadd = phase*increment
count=0
mid=None
for i in range(int(samplerate*time)):
if incadd > (2**(bitspersample - 1) - 1):
incadd = (2**(bitspersample - 1) - 1) - (incadd - (2**(bitspersample - 1) - 1))
elif incadd < -(2**(bitspersample - 1) - 1):
incadd = -(2**(bitspersample - 1) - 1) + (-(2**(bitspersample - 1) - 1) - incadd)
f=math.e**(-((i-int(samplerate*time)/2)**2)/(2*(int(samplerate*time)/4)**2))
bytelist.append(int(round(f*amp*(2**(bitspersample - 1) - 1)*math.sin(incadd))))
incadd += increment
return bytelist
data = []
for octave in range(2,4):
for note in notes:
f=note[1]
data+=make_wave(f*2**octave,.3)
scaled = numpy.int16(data/numpy.max(numpy.abs(data)) * 32767)
print(scaled)
write('test0.wav', 44100, scaled)
|
5,351 | ec39dae7217ddc48b1ab5163d234542cb36c1d48 | # Generated by Django 3.1.1 on 2020-10-14 16:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Store', '0004_remove_product_mcat'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='main_cat',
),
migrations.AddField(
model_name='category',
name='main_cat',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='Store.maincategory'),
),
]
|
5,352 | f4ca7f31000a1f649876b19ef937ece9958dd60f | def maior(a,b):
if a > b:
return a
else:
return b
a = int(input("Digite o 1 valor: "))
b = int(input("Digite o 2 valor: "))
print(maior(a,b))
|
5,353 | 5c06229f8e80a7225620f25941cc5276a9021e53 | #===============================================================================
# @author: Daniel V. Stankevich
# @organization: RMIT, School of Computer Science, 2012
#
#
# This package contains representations of the following models:
# 'Particle' - an atomic element
# 'Swarm' - a set of particles
# 'Neighbourhood' - particles topology
# 'KnapsackSolution' - representation for solution of the problem
# 'TSPSolution' - representation for solution of the problem
#===============================================================================
#===============================================================================
# GENERIC MODELS
#===============================================================================
#---- Particle representation
class ParticleModel:
_position = None
_velocity = None
_bestPosition = None
_nbBestPosition = None
_fitness = -1
def __init__(self):
self._position = None
self._velocity = None
self._bestPosition = None
self._nbBestPosition = None
self._fitness = -1
#---- Swarm representation
class SwarmModel:
_particles = None
_neighbourhoods = None
_bestPosition = None
_bestPositionFitness = -1
def __init__(self):
self._particles = []
self._neighbourhoods = None
self._bestPosition = None
self._bestPositionFitness = -1
#---- Neighbourhood representation
class NeighbourhoodModel:
_particles = []
_bestPosition = None
_bestPositionFitness = -1
def __init__(self, particles):
self._particles = particles
self._bestPosition = None
self._bestPositionFitness = -1
#===============================================================================
# PROBLEM SPECIFIC MODELS
#===============================================================================
#---- Knapsack Problem Solution representation
class KnapsackSolutionModel:
_items = []
_knapsackSize = None
def __init__(self, items, size):
self._items = items
self._knapsackSize = size
#---- TSP Problem Solution representation
class TSPSolutionModel:
_edges = {}
_startNode = None
_numOfCities = None
_bestPath = []
def __init__(self, edges, numOfCities, startNode):
self._edges = edges
self._numOfCities = numOfCities
self._startNode = startNode |
5,354 | fd7961d3a94b53ae791da696bb2024165db8b8fc | import pandas as pd
import csv
import numpy as np
import matplotlib.pyplot as plt
#import csv file with recorded left, right servo angles and their corresponding roll and pitch values
df = pd.read_csv('C:/Users/yuyan.shi/Desktop/work/head-neck/kinematics/tabblepeggy reference tables/mid_servo_angle_2deg_3.csv') #change address to csv file address
#remove all the NaN rows
df = df.apply (pd.to_numeric, errors='coerce')
df = df.dropna()
#scatter plot of all avaiable left and right servo angles
plt.scatter(df['left_rel_angle'], df['right_rel_angle'])
plt.xlabel('Left servo angle(deg)')
plt.ylabel('Right servo angle(deg)')
plt.title('Plot of left and right servo values')
plt.show()
#scatter plot of all avaiable roll and pitch angles
plt.scatter(df['roll'], df['pitch'])
plt.xlabel('Roll(deg)')
plt.ylabel('Pitch(deg)')
plt.title('Plot of roll and pitch values')
plt.show()
#change to integer
df['roll'] = df['roll'].astype('int8')
df['pitch'] = df['pitch'].astype('int8')
#sort df by roll(ascending) and then pitch(ascending)
df_sorted = df.sort_values(by=['roll', 'pitch']).reset_index(drop=True)
#group dataframe by roll and pitch values (i.e. collect the data sets with the same roll and pitch outputs) and calculate the mean for left and right servo values
df_sorted = df.groupby(['pitch','roll']).mean().reset_index()
#change left and right servo values to integer
df_sorted['left_rel_angle'] = df_sorted['left_rel_angle'].astype('int8')
df_sorted['right_rel_angle'] = df_sorted['right_rel_angle'].astype('int8')
#group left and right servo value together into a tuple
df_sorted['servo_angles'] = df_sorted[['left_rel_angle', 'right_rel_angle']].apply(tuple, axis=1)
#change table format to row index:pitch, column index: roll, create two tables with left and right servo angles
df_sorted_left = df_sorted.pivot(index ='pitch', columns='roll', values='left_rel_angle')
df_sorted_right = df_sorted.pivot(index ='pitch', columns='roll', values='right_rel_angle')
#for every cell that is empty, write it a value of it's left or right most adjacent available cell
df_sorted_left.bfill(axis ='columns', inplace = True)
df_sorted_left.ffill(axis ='columns', inplace = True)
df_sorted_right.bfill(axis ='columns', inplace = True)
df_sorted_right.ffill(axis ='columns', inplace = True)
#change table type to integer
df_sorted_left = df_sorted_left.astype('int8')
df_sorted_right = df_sorted_right.astype('int8')
#save the left and right servo table files locally (debugging step)
df_sorted_left.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/left_test.csv')
df_sorted_right.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/right_test.csv')
#create empty data table and row
data = []
row = []
for i in range(-55,52): #for i in pitch range (rows); check the left_test.csv or right_test.csv file to find out the range of pitch values
row = []
for j in range(-21, 23): #for j in roll range (column); check the left_test.csv or right_test.csv file to find out the range of pitch values
tup = (df_sorted_left[j][i], df_sorted_right[j][i]) #create a tuple in the format of (left_serve_angle, right_servo_angle)
# print(i,j)
# print(tup)
row.append(tup) #apend tuple to row
data.append(row) #append row to data
df_concat = pd.DataFrame(data=data)
# df_concat = df_concat.applymap(str)
df_concat = df_concat.astype(str)
df_concat.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')
# df_concat = df_concat.str.replace('(','{')
# df_concat = df_concat.str.replace(')','},')
# df_concat.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/tabblepeggy_2_angle_reference_TEST.csv')
'''
Run the next two lines after you open the csv file and edited the following:
1. change all "(" to "{"
2. change all ")" to "}"
3. delete the first column (index column)
'''
# df_concat = pd.read_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')
# np.savetxt(r'C:/Users/yuyan.shi/Desktop/test files/mid_servo_2deg_1.h', df_concat, fmt='%s', newline="}, \n {", header="#ifndef NECK_H_\n#define NECK_H_")
|
5,355 | 8e05b2723d8c50354e785b4bc7c5de8860aa706d | import torch
import torchvision
from torch import nn
def get_resnet18(pre_imgnet = False, num_classes = 64):
model = torchvision.models.resnet18(pretrained = pre_imgnet)
model.fc = nn.Linear(512, 64)
return model
|
5,356 | 4a13a0d7aa2371d7c8963a01b7cc1b93f4110d5e | # 백준 문제(2021.5.22)
# 10039번) 상현이가 가르치는 아이폰 앱 개발 수업의 수강생은 원섭, 세희, 상근, 숭, 강수이다.
# 어제 이 수업의 기말고사가 있었고, 상현이는 지금 학생들의 기말고사 시험지를 채점하고 있다.
# 기말고사 점수가 40점 이상인 학생들은 그 점수 그대로 자신의 성적이 된다.
# 하지만, 40점 미만인 학생들은 보충학습을 듣는 조건을 수락하면 40점을 받게 된다.
# 보충학습은 거부할 수 없기 때문에, 40점 미만인 학생들은 항상 40점을 받게 된다.
# 학생 5명의 점수가 주어졌을 때, 평균 점수를 구하는 프로그램을 작성하시오.
result = 0
for i in range(5) :
score = int(input())
if(score < 40) :
score = 40
result += score
print(result//5) |
5,357 | d6c06a465c36430e4f2d355450dc495061913d77 | import os
import sys
import glob
import argparse
import shutil
import subprocess
import numpy as np
from PIL import Image
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import save_image
sys.path.append(os.pardir)
from models import sagan
from common.dataset.dataset import FaceDataset
from common.utils.config import Config
def parse_args():
parser = argparse.ArgumentParser(description='MultiClassGAN')
parser.add_argument('config', type=str)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--gen', type=str, required=True)
parser.add_argument('--N', type=int, default=16)
parser.add_argument('--row', type=int, default=4)
parser.add_argument('--mode', choices=['random', 'morphing', 'attention'], default='random')
args = parser.parse_args()
return args
def main():
global device
args = parse_args()
cfg = Config.from_file(args.config)
out = cfg.train.out
if not os.path.exists(out):
os.makedirs(out)
# Set device
cuda = torch.cuda.is_available()
if cuda and args.gpu >= 0:
print('# cuda available! #')
device = torch.device(f'cuda:{args.gpu}')
else:
device = 'cpu'
gen = getattr(sagan, cfg.models.generator.name)(z_dim=cfg.models.generator.z_dim, norm=cfg.models.generator.norm).to(device)
# restore
if args.gen is not None:
if os.path.isfile(args.gen):
gen.load_state_dict(torch.load(args.gen)['gen_state_dict'])
else:
print(f'=> no checkpoint found at {args.gen}')
sys.exit()
# arrange path
top, gen_file = os.path.split(args.gen)
top, _ = os.path.split(top)
out_dir = os.path.join(top, 'test')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
gen_name, ext = os.path.splitext(gen_file)
for i in range(args.N):
out_file = os.path.join(out_dir, gen_name + f'_{args.mode}_{i}')
if args.mode == 'random':
inference(gen, args, cfg, out_file + '.png')
elif args.mode == 'morphing':
#inference(gen, args, cfg, out_file + '.png')
inference_gif(gen, args, cfg, out_file + '.gif')
elif args.mode == 'attention':
inference_attention(gen, args, cfg,out_file + '.gif')
def get_limited_z(size, dim, _min=-0.5, _max=0.5):
for i in range(size):
for j in range(dim):
while(True):
z_ij = torch.randn((1, 1))
if torch.max(z_ij) < _max and torch.min(z_ij) > _min:
if j == 0:
z_i = z_ij
else:
z_i = torch.cat((z_i, z_ij), dim=1)
break
if i == 0:
z = z_i
else:
z = torch.cat((z, z_i), dim=0)
return z
def inference_attention(gen, args, cfg, out_file, frame_nums=8, fps=20):
out_dir, _ = os.path.split(out_file)
frame_dir = os.path.join(out_dir, 'frames')
if not os.path.exists(frame_dir):
os.makedirs(frame_dir)
gen.train()
z0 = Variable(get_limited_z(32, cfg.models.generator.z_dim, _min=-1.0, _max=1.0)).to(device)
image_list = []
for t in range(frame_nums):
z1 = Variable(get_limited_z(32, cfg.models.generator.z_dim, _min=-0.5, _max=0.5)).to(device)
for i in range(fps):
alpha = i / fps
z = (1 - alpha) * z0 + alpha * z1
fake, attn = gen(z)
attn_size = int(np.sqrt(attn.shape[1]))
attn = attn.permute(0, 2, 1).view(attn.shape[0], -1, attn_size, attn_size)
attn = F.upsample(attn, scale_factor=4, mode='bilinear')
attn0 = attn[:1,3:6,:,:].repeat(1,1,1,1)
_min = torch.min(attn0)
_max = torch.max(attn0)
attn0 = (attn0 - _min) / (_max - _min)
fake_image = (fake[:1,:,:,:] + 1.0) * 0.5
fake_image = torch.cat((fake_image, attn0), dim=0)
save_image(fake_image.data.cpu(), os.path.join(frame_dir, f'{t*fps+i:04d}.png'), nrow=2)
z0 = z1
cmd = ['convert','-layers','optimize','-loop','0','-delay','10',f'{frame_dir}/*.png',f'{out_file}']
subprocess.run(cmd)
print(f'saving image to {out_file}')
#image_list[0].save(out_file, save_all=True, append_images=image_list[1:], duration=200, loop=1)
def inference(gen, args, cfg, out_file):
gen.train()
if args.mode == 'random':
z = Variable(get_limited_z(args.row**2, cfg.models.generator.z_dim)).to(device)
elif args.mode == 'morphing':
z0 = Variable(get_limited_z(1, cfg.models.generator.z_dim)).to(device)
z1 = Variable(get_limited_z(1, cfg.models.generator.z_dim)).to(device)
z2 = Variable(get_limited_z(1, cfg.models.generator.z_dim)).to(device)
z3 = Variable(get_limited_z(1, cfg.models.generator.z_dim)).to(device)
for i in range(args.row**2):
alpha = (i // args.row) / args.row
beta = (i % args.row) / args.row
z = z0 if i == 0 else torch.cat((z, (1.0 - alpha) * (1.0 - beta) * z0 + (1.0 - alpha) * beta * z1 + alpha * (1.0 - beta) * z2 + alpha * beta * z3),dim=0)
x_fake, _ = gen(z)
x_fake = (x_fake[:,:,:,:] + 1.0) * 0.5
save_image(x_fake.data.cpu(), out_file, nrow=args.row)
def inference_gif(gen, args, cfg, out_file, frame_nums=8, fps=20):
out_dir, _ = os.path.split(out_file)
frame_dir = os.path.join(out_dir, 'frames')
if not os.path.exists(frame_dir):
os.makedirs(frame_dir)
gen.train()
z0 = Variable(get_limited_z(32, cfg.models.generator.z_dim, _min=-0.5, _max=0.5)).to(device)
image_list = []
for t in range(frame_nums):
z1 = Variable(get_limited_z(32, cfg.models.generator.z_dim, _min=-0.5, _max=0.5)).to(device)
for i in range(fps):
alpha = i / fps
z = (1 - alpha) * z0 + alpha * z1
fake, _ = gen(z)
fake_image = (fake[:9,:,:,:] + 1.0) * 0.5
save_image(fake_image.data.cpu(), os.path.join(frame_dir, f'{t*fps+i:04d}.png'), nrow=3)
z0 = z1
print(f'saving image to {out_file}')
cmd = ['convert','-layers','optimize','-loop','0','-delay','10',f'{frame_dir}/*.png',f'{out_file}']
subprocess.run(cmd)
#image_list[0].save(out_file, save_all=True, append_images=image_list[1:], duration=200, loop=1)
if __name__ == '__main__':
main()
|
5,358 | bc0846397a5ad73b1c4b85e12864b27ef4fd08d7 | import ctypes
import win32con
import request_spider
from selenium_tickets_spider import *
from threading import Thread
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt, QThread, pyqtSignal
import sys, time, re
import datetime
SESSION_DATA = False
SHOW_S_P = False
class Worker(QThread):
valueChanged = pyqtSignal(int) # 值变化信号
handle = -1
def run(self):
global SESSION_DATA,EXIT_COND
try:
self.handle = ctypes.windll.kernel32.OpenThread( # @UndefinedVariable
win32con.PROCESS_ALL_ACCESS, False, int(QThread.currentThreadId()))
except Exception as e:
print('get thread handle failed', e)
# print('thread id', int(QThread.currentThreadId()))
# 循环发送信号
while True:
if SESSION_DATA:
self.valueChanged.emit(1024)
SESSION_DATA = False
time.sleep(0.1)
def exit_thread(self):
os._exit(122)
class Ui_MainWindow(QMainWindow):
threads = []
keywordJudge = ''
def __init__(self):
super(Ui_MainWindow, self).__init__()
# self.ex = Example()
self.buy_succeed_count = 0
for func in [self.output_buy_record, self.output_login_status,self.output_register_record]:
thr = Thread(target=func)
thr.setDaemon(True)
thr.start()
# 子线程
self._thread = Worker(self)
self._thread.finished.connect(self._thread.deleteLater)
self._thread.valueChanged.connect(ex.create_c)
self._thread.start()
def setupUi(self, MainWindow):
# MainWindow.setStyleSheet("#MainWindow{background-color: yellow}")
MainWindow.setObjectName("MainWindow")
MainWindow.resize(640, 478)
# MainWindow.setMinimumSize(640, 478)
# MainWindow.setMaximumSize(640, 478)
# 取消最大化
MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
# 固定窗口大小
MainWindow.setFixedSize(self.width(), self.height())
# MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
# 登录按钮
self.pushButton = QtWidgets.QPushButton(self.tab)
self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))
self.pushButton.setObjectName("pushButton")
# 登陆个数输入框
self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)
self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))
self.lineEdit_tab.setPlaceholderText(" 请输入登陆个数")
# 登录日志输出
self.label_0 = QtWidgets.QLabel(self.tab)
self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))
self.label_0.setObjectName("label_0")
# 注册日志
self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)
self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))
self.textBrowser_2.setObjectName("textBrowser_2")
# 登录页面
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.tabWidget.addTab(self.tab, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.lineEdit = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))
self.lineEdit.setObjectName("lineEdit")
# 查询商品名称
self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(self.search_1)
self.label = QtWidgets.QLabel(self.tab_2)
self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.tab_2)
self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))
self.label_2.setObjectName("label_2")
self.comboBox = QtWidgets.QComboBox(self.tab_2)
self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))
self.comboBox.setObjectName("comboBox")
# self.comboBox.currentText()
self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)
self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))
self.comboBox_2.setObjectName("comboBox_2")
# 选择数量
self.label_3 = QtWidgets.QLabel(self.tab_2)
self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))
self.label_3.setObjectName("label_3")
# 数量输入框
self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))
self.lineEdit_1.setObjectName("lineEdit_1")
# 购买成功数量
self.label_6 = QtWidgets.QLabel(self.tab_2)
self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.tab_2)
self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))
self.label_7.setObjectName("label_7")
self.label_7.setStyleSheet("font-size:16px;color:red") # 设置字体颜色
self.label_8 = QtWidgets.QLabel(self.tab_2)
self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))
self.label_8.setObjectName("label_8")
self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))
self.lineEdit_8.setObjectName("lineEdit_8")
self.lineEdit_8.setText('4')
# 购买按钮 当所有条件选择完之后点击
self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(self.search_2)
# 退出程序按钮
self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)
self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))
self.pushButton_quit.setObjectName("pushButton_quit")
self.pushButton_quit.clicked.connect(self.exit_quit)
self.label_4 = QtWidgets.QLabel(self.tab_2)
self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))
self.label_4.setObjectName("label_4")
# 购买日志输出
self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)
self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))
self.textBrowser_1.setObjectName("textBrowser")
# 添加显示数据
# self.textBrowser_1.append('购买日志')
# 抢票中心页面
self.tabWidget.addTab(self.tab_2, "")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
# 账号注册页面
self.tabWidget.addTab(self.tab_3, "")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
# 点击注册按钮
self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))
self.pushButton_4.setObjectName("pushButton")
# 注册个数输入框
self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)
self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))
self.lineEdit_tab3.setPlaceholderText(" 请输入注册个数")
# 注册日志输出
self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)
self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))
self.textBrowser_3.setObjectName("textBrowser_3")
self.label_5 = QtWidgets.QLabel(self.tab_3)
self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))
self.label_5.setObjectName("label_5")
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "城市售票网-抢票"))
self.pushButton.setText(_translate("MainWindow", "点击登录"))
self.pushButton.clicked.connect(self.login)
self.pushButton_4.clicked.connect(self.register)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "账号登录"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "抢购中心"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "账号注册"))
self.label_0.setText(_translate("MainWindow", "登录日志:"))
self.pushButton_2.setText(_translate("MainWindow", "搜索名称"))
self.pushButton_3.setText(_translate("MainWindow", "点击购买"))
self.pushButton_quit.setText(_translate("MainWindow", "退出程序"))
self.pushButton_4.setText(_translate("MainWindow", "点击注册"))
self.label.setText(_translate("MainWindow", "已择场次:"))
self.label_2.setText(_translate("MainWindow", "已择价格:"))
self.label_3.setText(_translate("MainWindow", "购买总数量:"))
self.label_4.setText(_translate("MainWindow", "购买日志:"))
self.label_5.setText(_translate("MainWindow", "注册日志:"))
self.label_6.setText(_translate("MainWindow", "已购买:"))
self.label_7.setText(_translate("MainWindow", "0"))
self.label_8.setText(_translate("MainWindow", "每个账号购买数量:"))
self.textBrowser_3.setText("")
self.textBrowser_2.setText("")
self.textBrowser_1.setText("")
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# 点击登录执行
def login(self):
try:
regiterSum = int(self.lineEdit_tab.text())
except Exception as err:
res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) # 提示框
return
ipList = [""]
# ipList = request_tickets_spider.get_ip_list(10)
self.textBrowser_2.append("开始登陆,请等待...")
userinfo_list = []
with open('infomation.txt', 'rt', encoding='utf-8') as f:
info_record = re.findall("'loginId': '(.*?)'", f.read())
for loginId in info_record:
userinfo_list.append(loginId)
# 多线程
for thr in userinfo_list[:regiterSum]:
grabber = BuyUrbtix()
ip = random.choice(ipList)
Thread_name = Thread(target=grabber.openSite, args=(thr,ip))
self.threads.append(Thread_name)
Thread_name.setDaemon(True)
Thread_name.start()
# 点击搜索按钮执行
def search_1(self):
keyword = self.lineEdit.text()
self.textBrowser_1.append("正在查询 %s 的所有场次和价格..." % keyword)
if keyword == self.keywordJudge:
self.textBrowser_1.append("请等待...")
self.keywordJudge = ''
return
self.keywordJudge = keyword
Thread_name = Thread(target=self.refresh)
self.threads.append(Thread_name)
Thread_name.start()
Thread_01 = Thread(target=self.show_session_data)
self.threads.append(Thread_01)
Thread_01.start()
# 把选择的场次和价格显示到主界面
def show_session_data(self):
global SHOW_S_P
self.comboBox_2.clear()
self.comboBox.clear()
while True:
# if self.ex.sessionName and self.ex.sessionPrice:
if ex.sessionName and ex.sessionPrice and SHOW_S_P:
for i,eventDateName in enumerate(ex.sessionName):
self.comboBox_2.addItem(eventDateName, i)
for i,price in enumerate(ex.sessionPrice):
self.comboBox.addItem(str(price), i)# 价格
self.comboBox.setCurrentIndex(0)
self.comboBox_2.setCurrentIndex(0)
ex.sessionName.clear()
ex.sessionPrice.clear()
SHOW_S_P = False
time.sleep(0.2)
# 把信息刷新到界面
def refresh(self):
try:
if self.lineEdit.text():
global eventDateList
keyword = self.lineEdit.text()
my_attr['selNum'] = self.lineEdit_8.text()
ex.eventDateList = request_spider.get_date_url(keyword)
if ex.eventDateList:
self.textBrowser_1.append("查询成功,请在选择界面选择场次和价格...")
global SESSION_DATA
SESSION_DATA = True
# ex.create_c()
else:
self.textBrowser_1.append("查询失败,请确定您查询的节目存在...")
else:
sys.exit()
except Exception as err:
self.textBrowser_1.append("查询失败,请确定您查询的节目存在...")
print(err)
sys.exit()
# 日志更新
def output_login_status(self):
# 登录成功输出
while True:
# 登陆日志
login_record_list = login_record()
if login_record_list:
for i in login_record_list:
self.textBrowser_2.append(i)
self.textBrowser_2.moveCursor(self.textBrowser_2.textCursor().End)
login_record_list.remove(i)
time.sleep(0.1)
# 购买日志
def output_buy_record(self):
while True:
buy_record_list = buy_record()
if buy_record_list:
for record in buy_record_list:
if "购买成功" in record:
self.buy_succeed_count += 1
self.label_7.setText(str(self.buy_succeed_count))
self.textBrowser_1.append(record)
self.textBrowser_1.moveCursor(self.textBrowser_1.textCursor().End)
buy_record_list.remove(record)
time.sleep(0.1)
# 注册日志
def output_register_record(self):
while True:
register_record_list = register_record()
if register_record_list:
for i in register_record_list:
self.textBrowser_3.append(i)
self.textBrowser_3.moveCursor(self.textBrowser_3.textCursor().End)
register_record_list.remove(i)
time.sleep(0.1)
# 购买条件选择后点击执行
def search_2(self):
if not self.lineEdit_1.text():
self.textBrowser_1.append("请输入购买总数量...")
return
if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:
self.textBrowser_1.append("正在购买,请等待...")
return
if ex.saleTime:
Thread_name = Thread(target=self.wait_sale)
Thread_name.setDaemon(True)
Thread_name.start()
return
my_attr['gross'] = self.lineEdit_1.text()
my_attr['selNum'] = self.lineEdit_8.text()
my_attr['selPrice'] = ex.eventPrice
my_attr['selSeatUrl'] = ex.eventUrl
self.textBrowser_1.append("开始购买,请您耐心等待...")
def wait_sale(self):
dateList = ex.saleTime
print("%s年%s月%s日%s时开始售票,等待购买!" % tuple(dateList))
self.textBrowser_1.append("%s年%s月%s日%s时开始售票,等待购买!" % tuple(dateList))
while True:
saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) + '0000', "%Y%m%d%H%M%S")))
if saleTimestamp <= int(time.time()):
print("%s年%s月%s日%s时开始售票,开始购买!" % tuple(dateList))
self.textBrowser_1.append("%s年%s月%s日%s时开始售票,开始购买!" % tuple(dateList))
break
time.sleep(1)
my_attr['gross'] = self.lineEdit_1.text()
my_attr['selNum'] = self.lineEdit_8.text()
my_attr['selPrice'] = ex.eventPrice
my_attr['selSeatUrl'] = ex.eventUrl
self.textBrowser_1.append("开始购买,请您耐心等待...")
#点击注册执行并打印注册
def register(self):
self.textBrowser_3.append("开始注册,请等待...")
try:
regiterSum = int(self.lineEdit_tab3.text())
except Exception as err:
res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) # 提示框
return
threads = []
for _ in range(regiterSum):
uper = Register()
Thread_name = Thread(target=uper.registerInfo)
Thread_name.setDaemon(True)
Thread_name.start()
threads.append(Thread_name)
# 退出程序
def exit_quit(self):
global EXIT_COND
res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.Yes | QMessageBox.No) # 提示框
if res == QMessageBox.Yes:
self._thread.exit_thread()
time.sleep(1)
sys.exit()
else:
pass
class Example(QMainWindow):
sessionList = []
priceList = []
sessionListEvn = []
priceListEvn = []
eventDateList = []
eventUrl = []
eventPrice = []
sessionName = []
sessionPrice = []
saleTime = []
buyNum = 1
def __init__(self):
super(QMainWindow, self).__init__()
self.setWindowTitle('城市售票网') # 主窗口
self.resize(680, 800)
# 取消最大化
self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
# 固定窗口大小
self.setFixedSize(self.width(), self.height())
self.w = QWidget()
self.w.setFixedWidth(680)
self.w.setFixedHeight(540)
self.setCentralWidget(self.w)
self.topFiller = QWidget()
# 把布局放入到 w 窗口
# 创建一个滚动条
self.scroll = QScrollArea()
self.scroll.setWidget(self.topFiller) # 滚动条放self.topFiller
self.vbox = QVBoxLayout() # 方框布局
self.vbox.addWidget(self.scroll) # 滚动条放入布局
self.w.setLayout(self.vbox)
self.initUI()
def closeEvent(self, QCloseEvent):
res = QMessageBox.question(self,'提示','您确定选择无误吗?',QMessageBox.Yes|QMessageBox.No,QMessageBox.No) #两个按钮是否, 默认No则关闭这个提示框
if res == QMessageBox.Yes:
global SHOW_S_P
SHOW_S_P = True
QCloseEvent.accept()
self.cb1.setChecked(False)
self.cb2.setChecked(False)
else:
QCloseEvent.ignore()
def initUI(self):
#新建全选复选框对象
self.cb1 = QCheckBox('全选',self.topFiller)
self.cb1.move(20,30)
self.cb2 = QCheckBox('全选',self)
self.cb2.move(20, 570)
# 创建按钮
bt1 = QPushButton('确定',self)
bt2 = QPushButton('刷新',self)
bt1.move(20,760)
bt2.move(120,760)
# 每当复选框的状态改变时,即每当用户选中或取消选中该信号时,就会发出此信号。
# 所以当产生此信号的时候,我们将其连接相应的槽函数。
self.cb1.stateChanged.connect(self.changecb1) # 全选复选框连接到全选槽函数
self.cb2.stateChanged.connect(self.changecb2) # 全选复选框连接到全选槽函数
bt1.clicked.connect(self.pitch_on) # 连接到显示选中单元
bt2.clicked.connect(self.create_c) # 连接到创建函数
def create_c(self):
if self.eventDateList:
self.sessionList = [eventDateName['eventDateName'] for eventDateName in self.eventDateList]
self.priceList = [price for price in self.eventDateList[0]['priceList']]
# print(self.priceList)
# print(self.sessionList)
ex.show()
else:
ex.show()
QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)
return
# 清空上次搜索内容
if self.sessionListEvn and self.priceListEvn:
for s_evn in self.sessionListEvn:
s_evn.deleteLater()
for p_evn in self.priceListEvn:
p_evn.deleteLater()
self.sessionListEvn.clear()
self.priceListEvn.clear()
self.eventPrice.clear()
self.eventUrl.clear()
# 场次信息显示
for i,item in enumerate(self.sessionList):
cb = QCheckBox(item, self.topFiller)
cb.move(30, 60+30*i)
self.sessionListEvn.append(cb)
cb.show()
self.topFiller.setMinimumSize(580,(len(self.sessionList)+5)*30) #设置滚动条的尺寸
# 价格显示
for i,item in enumerate(self.priceList):
cb_1 = QCheckBox(str(item), self)
if i % 2 == 0:
i = i // 2 + 1
cb_1.move(30, 570+30*i)
else:
i = i // 2 + 1
cb_1.move(330, 570+30*i)
self.priceListEvn.append(cb_1)
cb_1.show()
def pitch_on(self):
if self.sessionList:
for i in self.sessionListEvn: # 遍历所有复选框
if i.isChecked(): # 判断是否被选中
for eventDate in self.eventDateList: # 遍历所有的数据
if eventDate['eventDateName'] == i.text(): # 判断数据是否被选中
if 'saleDate' in eventDate:
self.saleTime = eventDate['saleDate']
# print(eventDate['saleDate'])
self.eventUrl.append(eventDate["eventUrl"]) # 被选中则保存
self.sessionName.append(eventDate['eventDateName'])
for i in self.priceListEvn:
if i.isChecked():
if i.text() in self.eventDateList[0]['priceList']:
self.eventPrice.append(str(self.eventDateList[0]['priceList'].index(i.text())))
self.sessionPrice.append(i.text())
# 如果选择的有数据,则关闭窗口,没有数据,提示选择数据
if self.eventPrice and self.eventUrl:
self.close()
else:
res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?', QMessageBox.Yes | QMessageBox.No,
QMessageBox.No) # 两个按钮是否, 默认No则关闭这个提示框
if res == QMessageBox.Yes:
self.close()
else:
print("输入内容不存在!")
# 全选复选框槽函数
def changecb1(self):
if self.cb1.checkState() == Qt.Checked:
for qcb in self.sessionListEvn:
qcb.setChecked(True)
elif self.cb1.checkState() == Qt.Unchecked:
for qcb in self.sessionListEvn:
qcb.setChecked(False)
# 全选复选框槽函数
def changecb2(self):
if self.cb2.checkState() == Qt.Checked:
for qcb in self.priceListEvn:
qcb.setChecked(True)
elif self.cb2.checkState() == Qt.Unchecked:
for qcb in self.priceListEvn:
qcb.setChecked(False)
# 刷新按钮
def refresh_cb(self):
while True:
if self.sessionList and self.priceList:
self.create_c()
break
time.sleep(0.2)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv) # 创建一个QApplication,也就是你要开发的软件app
ex = Example()
MainWindow = QtWidgets.QMainWindow() # 创建一个QMainWindow,用来装载你需要的各种组件、控件
ui = Ui_MainWindow() # ui是你创建的ui类的实例化对象
ui.setupUi(MainWindow) # 执行类中的setupUi方法,方法的参数是第二步中创建的QMainWindow
MainWindow.show() # 执行QMainWindow的show()方法,显示这个QMainWindow
# ex.show()
sys.exit(app.exec_())
|
5,359 | 849343561dd9bdcfc1da66c604e1bfa4aa10ddf3 | # bot.py
import os
import sqlite3
import json
import datetime
from dotenv import load_dotenv
import discord
from discord.ext import commands
from discord.ext.commands import Bot
from cogs.utils import helper as h
intents = discord.Intents.default()
intents.members = True
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
PREFIX = os.getenv('BOT_PREFIX')
dir_path = os.path.dirname(os.path.realpath(__file__))
extensions = ['cogs.general', 'cogs.events', 'cogs.moderation']
class LLKEventsBot(Bot):
def __init__(self):
super().__init__(
description="Bot created by Oto#2494",
command_prefix=PREFIX,
owner_id=271992863175344130,
intents=intents,
help_command=None
)
print('\nLoading embed data...')
try:
with open(f'{dir_path}/db/embed_id.json', 'r+') as f:
try:
self.embed_data = json.load(f)
if self.embed_data:
self.embed_id = self.embed_data['eventEmbed']['id']
except:
self.embed_data = {"eventEmbed":{
"id": None }}
self.embed_id = self.embed_data['eventEmbed']['id']
json.dump(self.embed_data, f, indent=4)
except:
with open(f'{dir_path}/db/embed_id.json', 'w+'):
self.embed_data = {"eventEmbed":{
"id": self.bot.embed_id
}}
self.embed_id = self.embed_data['eventEmbed']['id']
json.dump(self.embed_data, f, indent=4)
print('Loading permissions data...')
try:
with open('db/roles.json', 'r+') as f:
try:
self.perms_data = json.load(f)
if self.perms_data:
self.perms = self.perms_data['permissions']
except Exception as e:
print(f'{e}')
except:
with open(f'{dir_path}/db/roles.json', 'w+') as f:
self.perms_data = {"permissions":{
"admins": [],
"mods": [],
"hosts": []
}}
self.perms = self.perms_data['permissions']
json.dump(self.perms_data, f, indent=4)
print('Loading roles DB...')
self.conn = sqlite3.connect(f'{dir_path}/db/events.db')
self.cursor = self.conn.cursor()
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS events (
event_id STRING NOT NULL,
user_id STRING NOT NULL,
description STRING NOT NULL,
target STRING NOT NULL
)
""")
# print('Loading embed data...')
# try:
# with open('db/embed_id.json', 'r+') as f:
# try:
# self.embed_data = json.load(f)
# if self.embed_data:
# self.embed_id = self.embed_data['eventEmbed']['id']
# except Exception as e:
# print(f'{e}')
# except:
# open('db/embed_id.json', 'w+')
async def on_ready(self):
if not os.path.exists('db'):
os.makedirs('db')
if not os.path.exists('logs'):
os.makedirs('logs')
print('\nLoading extensions...')
for extension in extensions:
print(f'Loading {extension}')
bot.load_extension(extension)
await bot.change_presence(activity=discord.Game(f'{PREFIX}help'))
print(f'\nLogged in as: {bot.user.name} - {bot.user.id}\nVersion: {discord.__version__}\n')
# async def on_message(self, msg):
# if msg.author.bot:
# return
async def on_command_error(self, ctx, error):
if isinstance(error, commands.BotMissingPermissions):
await ctx.send(f'I have no permission to do that')
return
elif isinstance(error, commands.CheckFailure):
await ctx.send(f'You have no permission to use this command')
return
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f'You forgot to inform the following parameter: {error.param}')
else:
d = datetime.datetime.now()
with open(f'logs/{d.year}-{d.month}-{d.day}.log', 'a', encoding='utf8') as f:
# f.write(f'''-------------\n{d.hour}:{d.minute}:{d.second}.{d.microsecond}\n{type(error)}\n{error}\n-------------\n\n'''')
f.write(
'-------------\n'
f'{d.hour}:{d.minute}:{d.second}.{d.microsecond}\n'
f'Command: {ctx.message.content}\n'
f'Author: {ctx.author}\n'
f'Exception: {type(error)}\n'
f'Description: {error}\n'
'-------------\n\n'
)
await ctx.send(f'It seems something went wrong:```{error}```')
return
bot = LLKEventsBot()
bot.run(TOKEN)
|
5,360 | df4c03d9faedf2d347593825c7221937a75a9c10 | from api import *
version_api = api(0)
def is_bad_version(v):
return version_api.is_bad(v)
def first_bad_version(n):
# -- DO NOT CHANGE THIS SECTION
version_api.n = n
# --
api_calls_count = 0
left, right = 1, n
while left < right:
mid = (left + right) // 2
is_bad = is_bad_version(mid)
api_calls_count += 1
if is_bad:
right = mid
else:
left = mid + 1
return left, api_calls_count
|
5,361 | c0218acadb9e03359ac898cf3bb4898f516400e5 | from setuptools import setup
setup(name='google-drive-helpers',
version='0.1',
description='Helper functions for google drive',
url='https://github.com/jdoepfert/google-drive-helpers',
license='MIT',
packages=['gdrive_helpers'],
install_requires=[
'google-api-python-client',
],
zip_safe=False)
|
5,362 | 8af3bb1b33a01353cd7f26c9496485e36d954edb | import json
import webapp2
import requests
import requests_toolbelt.adapters.appengine
from . import mongodb
import datetime
from bson.json_util import dumps
class RestHandler(webapp2.RequestHandler):
def dispatch(self):
# time.sleep(1)
super(RestHandler, self).dispatch()
def send_json(self, content):
self.response.headers['content-type'] = 'application/json'
self.response.write(content)
def update_trello_card(self, id, params):
card_url = 'https://api.trello.com/1/cards/' + id
return requests.put(card_url, params=params)
def create_trello_card(self, params):
card_url = 'https://api.trello.com/1/cards'
return requests.post(card_url, params=params)
def delete_trello_card(self, id, params):
card_url = 'https://api.trello.com/1/cards/' + id
return requests.delete(card_url, params=params)
class TasksHandler(RestHandler):
def get(self):
board_id = self.request.get('board_id')
data = mongodb.list_where_value_matches('tasks', 'boards', board_id)
self.send_json(dumps(data))
class CreateTaskHandler(RestHandler):
def post(self):
key = '61cf04749fda864dd404009216cbe106'
token = '2caecaa0245326fcc4b949a4780ad7fdcb8cd8d77b4394ad8590d244dbfa542f'
payload = json.loads(self.request.body)
phase_id = payload['phase']['projectManager']['listId']
team_phase_id = payload['phase']['team']['listId']
params = { 'idList': phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }
team_params = { 'idList': team_phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }
requests_toolbelt.adapters.appengine.monkeypatch()
response = self.create_trello_card(params)
payload['projectManagementTrelloId'] = json.loads(response.text)['id']
if team_phase_id is not None:
response = self.create_trello_card(team_params)
print response
payload['teamTrelloId'] = json.loads(response.text)['id']
payload['dateLastActivity'] = datetime.datetime.utcnow().isoformat()
mongodb.create(payload, 'tasks')
self.send_json(dumps(response.text))
class UpdateTaskHandler(RestHandler):
def post(self):
key = '61cf04749fda864dd404009216cbe106'
token = '2caecaa0245326fcc4b949a4780ad7fdcb8cd8d77b4394ad8590d244dbfa542f'
payload = json.loads(self.request.body)
phase_id = payload['phase']['projectManager']['listId']
team_phase_id = payload['phase']['team']['listId']
params = { 'idList': phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }
team_params = { 'idList': team_phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }
requests_toolbelt.adapters.appengine.monkeypatch()
response = self.update_trello_card(payload['projectManagementTrelloId'], params)
if team_phase_id is not None:
if payload.get('teamTrelloId') is None:
response = self.create_trello_card(team_params)
payload['teamTrelloId'] = json.loads(response.text)['id']
else:
response = self.update_trello_card(payload['teamTrelloId'], team_params)
else:
if payload['phase']['team']['phase'] is None:
params = { 'key': key, 'token': token }
response = self.delete_trello_card(payload['teamTrelloId'], params)
payload['dateLastActivity'] = datetime.datetime.utcnow().isoformat()
mongodb.update(payload, payload['_id'], 'tasks')
self.send_json(dumps(response.text))
class DeleteTaskHandler(RestHandler):
def post(self):
key = '61cf04749fda864dd404009216cbe106'
token = '2caecaa0245326fcc4b949a4780ad7fdcb8cd8d77b4394ad8590d244dbfa542f'
payload = json.loads(self.request.body)
params = { 'key': key, 'token': token }
requests_toolbelt.adapters.appengine.monkeypatch()
response = self.delete_trello_card(payload['projectManagementTrelloId'], params)
if payload.get('teamTrelloId') is not None:
response = self.delete_trello_card(payload['teamTrelloId'], params)
mongodb.delete(payload['_id']['$oid'], 'tasks')
self.send_json(dumps(response.text))
APP = webapp2.WSGIApplication([
('/rest/tasks', TasksHandler),
('/rest/task/create', CreateTaskHandler),
('/rest/task/update', UpdateTaskHandler),
('/rest/task/delete', DeleteTaskHandler)
], debug=True)
|
5,363 | 1cccb37a7195b1555513a32ef33b35b0edcd5eb1 | import requests
import datetime
import collections
import csv
import sys
import os
import os.path
History = collections.namedtuple('History', ['open', 'high', 'low', 'close', 'volume', 'adjustment'])
def history(symbol, since, until):
response = requests.get('http://ichart.finance.yahoo.com/table.csv?s=%s&d=%d&e=%d&f=%d&g=d&a=%d&b=%d&c=%d&ignore=.csv' % (
symbol,
until.month - 1,
until.day,
until.year,
since.month - 1,
since.day,
since.year,
))
for row in csv.reader(response.text.split('\n')[::-1][1:-1]):
yield History._make(map(float, row[1:]))
def last(symbol, start, number):
until = start - datetime.timedelta(days=1)
if until.weekday() == 6:
until -= datetime.timedelta(days=2)
elif until.weekday() == 0:
until -= datetime.timedelta(days=1)
since = until - datetime.timedelta(days=number - 1)
if since.weekday() in [0, 6]:
since -= datetime.timedelta(days=2)
return history(symbol, since, until)
def recent(symbol):
response = requests.get('http://download.finance.yahoo.com/d/quotes.csv?s=%s&f=d1ohgpvp&e=.csv' % symbol)
return History._make(map(float, csv.reader(response.text.split('\n', 1)).next()[1:]))
def qualify(symbol):
today = datetime.date.today()
data = dict(zip(['yy', 'y'], last(symbol, today, 2)))
try:
data['t'] = recent(symbol)
except ValueError:
return False
return data['yy'].close < data['y'].low and data['y'].close > data['t'].low
def process():
if len(sys.argv) > 1:
symbols = sys.argv[1:]
else:
symbols = []
for entry in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')):
symbols.append(entry.rsplit('.', 1)[0])
for symbol in symbols:
symbol = symbol.upper()
if symbol.strip() and qualify(symbol):
print symbol
if __name__ == '__main__':
process()
|
5,364 | 1ee5139cb1613977f1c85619404b3dcc6e996382 | def adder(x, y):
return x + y
adder('one', 'two')
adder([3, 4], [9, 0, 33])
adder(4.3, 3.5) |
5,365 | b35686f7feec2c4a905007f3c105b6fa05b87297 | '''
swea 2806 N-Queen
'''
def nqueen(depth, n, history):
global cnt
if depth == n:
cnt += 1
else:
for i in range(n):
if i not in history:
for index, value in enumerate(history):
if abs(depth - index) == abs(i - value):
break
else:
history.append(i)
nqueen(depth + 1, n, history)
history.remove(i)
for t in range(int(input())):
cnt = 0
nqueen(0, int(input()), [])
print("#{} {}".format(t+1, cnt)) |
5,366 | 31ed798118f20005b5a26bc1fc0053b7d0a95657 | # Demo - train the decoders & use them to stylize image
from __future__ import print_function
from train import train
from infer import stylize
from utils import list_images
IS_TRAINING = True
# for training
TRAINING_IMGS_PATH = 'MS_COCO'
ENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz'
MODEL_SAVE_PATH = 'models/autoencoder'
MODEL_SAVE_SUFFIX = '-done'
DEBUG = True
LOGGING_PERIOD = 10
AUTUENCODER_LEVELS_TRAIN = [5, 4, 3, 2, 1]
# for inferring (stylize)
CONTENTS_DIR = 'images/content'
STYLES_DIR = 'images/style'
OUTPUT_DIR = 'outputs'
STYLE_RATIO = 0.8
REPEAT_PIPELINE = 1
AUTUENCODER_LEVELS_INFER = [3, 2, 1]
def main():
if IS_TRAINING:
training_imgs_paths = list_images(TRAINING_IMGS_PATH)
train(training_imgs_paths,
ENCODER_WEIGHTS_PATH,
MODEL_SAVE_PATH,
autoencoder_levels=AUTUENCODER_LEVELS_TRAIN,
debug=DEBUG,
logging_period=LOGGING_PERIOD)
print('\n>>>>>> Successfully done training...\n')
else:
contents_path = list_images(CONTENTS_DIR)
styles_path = list_images(STYLES_DIR)
model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX
stylize(contents_path,
styles_path,
OUTPUT_DIR,
ENCODER_WEIGHTS_PATH,
model_path,
style_ratio=STYLE_RATIO,
repeat_pipeline=REPEAT_PIPELINE,
autoencoder_levels=AUTUENCODER_LEVELS_INFER)
print('\n>>>>>> Successfully done stylizing...\n')
if __name__ == '__main__':
main()
|
5,367 | f12bdfc054e62dc244a95daad9682790c880f20d | from unittest.case import TestCase
from datetime import datetime
from src.main.domain.Cohort import Cohort
from src.main.domain.Group import Group
from src.main.util.TimeFormatter import TimeFormatter
__author__ = 'continueing'
class CohortTest(TestCase):
def testAnalyzeNewGroups(self):
cohort = Cohort(aStartDate=TimeFormatter.toDatetime('2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime('2014-06-01 23:59:59'), aInterval = 7)
groups = cohort.groups
group = Group(anId=1, aStartDate=TimeFormatter.toDatetime('2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-11 23:59:59'), aNickname="5월 1째 주")
self.assertEqual(groups[0].period, group.period)
group = Group(anId=2, aStartDate=TimeFormatter.toDatetime('2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-18 23:59:59'), aNickname="5월 2째 주")
self.assertEqual(groups[1].period, group.period)
group = Group(anId=3, aStartDate=TimeFormatter.toDatetime('2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-25 23:59:59'), aNickname="5월 3째 주")
self.assertEqual(groups[2].period, group.period)
group = Group(anId=3, aStartDate=TimeFormatter.toDatetime('2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-06-01 23:59:59'), aNickname="5월 4째 주")
self.assertEqual(groups[3].period, group.period)
self.assertEqual(groups.__len__(),4)
def testSnapshots(self):
self.fail("should test this! but take too long network time")
|
5,368 | 1d8e48aab59869831defcccdd8902230b0f3daa7 | import random
import pygame
pygame.init()
# 큐브의 크기
cubeSize = 2
# GUI 관련 변수
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 204, 0)
ORANGE = (255, 102, 0)
WHITE = (255, 255, 255)
GREY = (128, 128, 128)
pieceSize = 50
gridSize = pieceSize * cubeSize
screen = pygame.display.set_mode((gridSize * 7, gridSize * 5))
clock = pygame.time.Clock()
font = pygame.font.SysFont('Arial', 12 * cubeSize, True)
class Cube:
def __init__(self):
self.pieces = [Piece([i, j, k]) for k in range(cubeSize) for j in range(cubeSize) for i in range(cubeSize)]
# Piece 객체 리스트의 객체들을 위치순서대로 정렬
def sortPieces(self):
self.pieces.sort(key=lambda x: x.location[2] * cubeSize * cubeSize + x.location[1] * cubeSize + x.location[0])
# Piece 객체리스트로 부터 큐브의 면 색 배열 정보 추출
def getPlaneArray(self):
planeArray = []
for direction in range(6):
planeArray.append([piece.colorState[direction] for piece in self.pieces
if piece.location[direction % 3] == (cubeSize - 1) * (1 - direction // 3)])
return planeArray
# 맞춰지지 않은 색들의 수 계산
def countUnmatched(self):
colorList = [RED, BLUE, WHITE, ORANGE, GREEN, YELLOW]
planArray = self.getPlaneArray()
unmatched = 0
for i, plane in enumerate(planArray):
unmatched += len(plane) - plane.count(colorList[i])
return unmatched
# 큐브 회전함수
def rotate(self, axis, target, direction):
for piece in self.pieces:
if piece.location[axis.index(1)] == target:
piece.rotateLocation(axis, direction)
piece.rotateColorState(axis, direction)
# 큐브 역회전함수
def inverseRotate(self, axis, target, direction):
self.rotate(axis, target, -direction)
# 큐브를 무작위로 섞는 함수
def mixCube(self):
mixCount = cubeSize * 20
for _ in range(mixCount):
axis = [0, 0]
axis.insert(random.choice(range(3)), 1)
self.rotate(axis, random.choice(range(cubeSize)), random.choice([1, -1]))
self.displayCube()
pygame.display.update()
pygame.time.wait(10)
# GUI 에 큐브의 색 배열 출력
def displayCube(self, userInput=None, inputButtonArray=None):
planeArray = self.getPlaneArray()
for i, color in enumerate(planeArray[0]):
start = [gridSize * 2, gridSize * 2]
left = start[0] + (i % cubeSize) * pieceSize
top = start[1] - (i // cubeSize - (cubeSize - 1)) * pieceSize
if userInput:
inputButtonArray[0].append(PushButton([left, top], [pieceSize, pieceSize], WHITE))
inputButtonArray[0][i].show()
inputButtonArray[0][i].allocatedPiece = self.pieces[cubeSize * (i + 1) - 1]
else:
pygame.draw.rect(screen, color, [[left, top], [pieceSize, pieceSize]])
pygame.draw.rect(screen, BLACK, [[left, top], [pieceSize, pieceSize]], 1)
for i, color in enumerate(planeArray[1]):
start = [gridSize * 3 + 5, gridSize * 2]
left = start[0] + ((cubeSize - 1) - i % cubeSize) * pieceSize
top = start[1] - (i // cubeSize - (cubeSize - 1)) * pieceSize
if userInput:
inputButtonArray[1].append(PushButton([left, top], [pieceSize, pieceSize], WHITE))
inputButtonArray[1][i].show()
inputButtonArray[1][i].allocatedPiece = \
self.pieces[(i // cubeSize + 1) * cubeSize * cubeSize - (cubeSize - i % cubeSize)]
else:
pygame.draw.rect(screen, color, [[left, top], [pieceSize, pieceSize]])
pygame.draw.rect(screen, BLACK, [[left, top], [pieceSize, pieceSize]], 1)
for i, color in enumerate(planeArray[2]):
start = [gridSize * 2, gridSize * 1 - 5]
left = start[0] + (i // cubeSize) * pieceSize
top = start[1] + (i % cubeSize) * pieceSize
if userInput:
inputButtonArray[2].append(PushButton([left, top], [pieceSize, pieceSize], WHITE))
inputButtonArray[2][i].show()
inputButtonArray[2][i].allocatedPiece = self.pieces[i + cubeSize * cubeSize * (cubeSize - 1)]
else:
pygame.draw.rect(screen, color, [[left, top], [pieceSize, pieceSize]])
pygame.draw.rect(screen, BLACK, [[left, top], [pieceSize, pieceSize]], 1)
for i, color in enumerate(planeArray[3]):
start = [gridSize * 4 + 10, gridSize * 2]
left = start[0] + ((cubeSize - 1) - i % cubeSize) * pieceSize
top = start[1] - (i // cubeSize - (cubeSize - 1)) * pieceSize
if userInput:
inputButtonArray[3].append(PushButton([left, top], [pieceSize, pieceSize], WHITE))
inputButtonArray[3][i].show()
inputButtonArray[3][i].allocatedPiece = self.pieces[i * cubeSize]
else:
pygame.draw.rect(screen, color, [[left, top], [pieceSize, pieceSize]])
pygame.draw.rect(screen, BLACK, [[left, top], [pieceSize, pieceSize]], 1)
for i, color in enumerate(planeArray[4]):
start = [gridSize * 1 - 5, gridSize * 2]
left = start[0] + (i % cubeSize) * pieceSize
top = start[1] - (i // cubeSize - (cubeSize - 1)) * pieceSize
if userInput:
inputButtonArray[4].append(PushButton([left, top], [pieceSize, pieceSize], WHITE))
inputButtonArray[4][i].show()
inputButtonArray[4][i].allocatedPiece = \
self.pieces[(i // cubeSize) * cubeSize * cubeSize + i % cubeSize]
else:
pygame.draw.rect(screen, color, [[left, top], [pieceSize, pieceSize]])
pygame.draw.rect(screen, BLACK, [[left, top], [pieceSize, pieceSize]], 1)
for i, color in enumerate(planeArray[5]):
start = [gridSize * 2, gridSize * 3 + 5]
left = start[0] + (i // cubeSize) * pieceSize
top = start[1] + ((cubeSize - 1) - i % cubeSize) * pieceSize
if userInput:
inputButtonArray[5].append(PushButton([left, top], [pieceSize, pieceSize], WHITE))
inputButtonArray[5][i].show()
inputButtonArray[5][i].allocatedPiece = self.pieces[i]
else:
pygame.draw.rect(screen, color, [[left, top], [pieceSize, pieceSize]])
pygame.draw.rect(screen, BLACK, [[left, top], [pieceSize, pieceSize]], 1)
class Piece:
# location: [x, y, z], colorState = [x, y, z, -x, -y, -z]
def __init__(self, location):
self.location = location
self.colorState = [RED if location[0] == cubeSize - 1 else 'E',
BLUE if location[1] == cubeSize - 1 else 'E',
WHITE if location[2] == cubeSize - 1 else 'E',
ORANGE if location[0] == 0 else 'E',
GREEN if location[1] == 0 else 'E',
YELLOW if location[2] == 0 else 'E']
# 큐브 회전 시 조각의 위치변환
# 회전 축과 평행한 요소는 고정, 나머지 요소들을 회전축이 원점이 오도록 이동, 회전변환 후 다시 원위치로 이동
def rotateLocation(self, rotateAxis, direction):
def rotate90Matrix(x, y, z):
if not direction:
return [x, y, z]
else:
newX = rotateAxis[0] * x + rotateAxis[1] * (direction * z) + rotateAxis[2] * (-direction * y)
newY = rotateAxis[0] * (-direction * z) + rotateAxis[1] * y + rotateAxis[2] * (direction * x)
newZ = rotateAxis[0] * (direction * y) + rotateAxis[1] * (-direction * x) + rotateAxis[2] * z
return [newX, newY, newZ]
trans = (cubeSize - 1) / 2
temp = list(map(lambda x: x - trans, self.location))
self.location = list(map(lambda x: int(x + trans), rotate90Matrix(temp[0], temp[1], temp[2])))
# 큐브 회전 시 조각의 색 방향 변환
# 회전 축과 평행한 요소는 고정, 나머지 요소는 리스트 내에서 한 칸씩 이동
# X, Z 축회전은 + 방향이면 정방향 이동, Y축은 - 방향이면 정방향 이동
def rotateColorState(self, rotateAxis, direction):
temp = self.colorState
axisFactor = [[i, j] for i, j in enumerate(self.colorState) if i % 3 == rotateAxis.index(1)]
for i, factor in enumerate(axisFactor):
del(temp[factor[0] - i])
temp = list(map(lambda x: temp[(x - direction * (rotateAxis[1] * -2 + 1)) % 4], range(4)))
for factor in axisFactor:
temp.insert(factor[0], factor[1])
self.colorState = temp
class PushButton(pygame.Rect):
def __init__(self, pos, size, color):
super().__init__(pos, size)
self.color = color
self.allocatedPiece = 0
def show(self):
pygame.draw.rect(screen, self.color, [[self.left, self.top], [self.width, self.height]])
pygame.draw.rect(screen, BLACK, [[self.left, self.top], [self.width, self.height]], 1)
def addText(self, displayText):
text = font.render(displayText, True, BLACK)
text_rect = text.get_rect(center=(self.left + self.width // 2, self.top + self.height // 2))
screen.blit(text, text_rect)
def changeColor(self):
colorList = [RED, BLUE, WHITE, ORANGE, GREEN, YELLOW]
self.color = colorList[colorList.index(self.color) - 1]
self.show()
class AI:
def __init__(self):
self.virtualCube = Cube()
self.virtualCube.pieces = cube.pieces.copy()
self.path = []
# sortPieces 관련 재코딩 필요 - detPlaneArray 전에 쓰여야한다
def selectRotation(self):
# A* 알고리즘으로 먼저 구현해보자. 이후에 헤더파일로 떼어내자.
self.path.append(self.virtualCube.getPlaneArray())
lookahead = 1
totalBranch = pow(3 * cubeSize * 3, lookahead + 1)
print("Total branch: %d" % totalBranch)
searched = self.searchPath(lookahead)
print("Best Rotation:", searched[1])
print("Cost: %d" % searched[0])
print("=" * 100)
return searched[1]
def searchPath(self, lookahead):
lowestCost = float('inf')
bestRotation = 0
for axis in range(3):
for target in range(cubeSize):
for direction in [-1, 0, 1]:
rotation = [[1 if i == axis else 0 for i in range(3)], target, direction]
self.virtualCube.rotate(*rotation)
if self.virtualCube.getPlaneArray() in self.path:
self.virtualCube.inverseRotate(*rotation)
continue
if not lookahead:
currentCost = self.calculateCost()
print("branch cost: %d" % currentCost)
else:
currentCost = self.searchPath(lookahead - 1)[0]
if lowestCost > currentCost:
lowestCost = currentCost
bestRotation = rotation
self.virtualCube.inverseRotate(*rotation)
return lowestCost, bestRotation
def calculateCost(self):
# if self.virtualCube.getPlaneArray() in self.path:
# return float('inf')
rotation = 0
unmatched = self.virtualCube.countUnmatched()
return rotation + unmatched
# 사용자로부터 큐브의 초기상태를 입력받음
def getUserInput():
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
# 큐브 무작위로 섞기
if startingButtons[0].collidepoint(pygame.mouse.get_pos()):
screen.fill(WHITE)
cube.mixCube()
return False
# 큐브의 색 배열을 사용자가 직접 입력
elif startingButtons[1].collidepoint(pygame.mouse.get_pos()):
inputButtonArray = [[] for _ in range(6)]
confirmButton = PushButton([gridSize * 5, gridSize * 4], [gridSize, gridSize // 2], GREY)
screen.fill(WHITE)
confirmButton.show()
confirmButton.addText("Confirm")
cube.displayCube(True, inputButtonArray)
pygame.display.update()
while True:
if userInputDone(inputButtonArray, confirmButton):
return False
return True
# 클릭을 통해 사용자가 색 입력
def userInputDone(inputButtonArray, confirmButton):
for inputEvent in pygame.event.get():
if inputEvent.type == pygame.MOUSEBUTTONDOWN:
for planeButton in inputButtonArray:
for button in planeButton:
if button.collidepoint(pygame.mouse.get_pos()):
button.changeColor()
pygame.display.update()
if confirmButton.collidepoint(pygame.mouse.get_pos()):
for d, planeButton in enumerate(inputButtonArray):
for button in planeButton:
button.allocatedPiece.colorState[d] = button.color
screen.fill(WHITE)
cube.displayCube()
pygame.display.update()
return True
return False
if __name__ == "__main__":
# 큐브 초기화
cube = Cube()
# 사용자 입력 관련 객체 및 GUI 초기화
startingButtons = [PushButton([gridSize * 1, gridSize * 2], [gridSize * 2, gridSize], GREY),
PushButton([gridSize * 4, gridSize * 2], [gridSize * 2, gridSize], GREY)]
rotationCountDisplay = PushButton([gridSize * 4, gridSize * 4], [gridSize * 2, gridSize // 2], WHITE)
screen.fill(WHITE)
startingButtons[0].show()
startingButtons[0].addText("Mix Randomly")
startingButtons[1].show()
startingButtons[1].addText("User Input")
pygame.display.update()
# 메인 루프
running = True
selecting = True
rotationCount = 0
ai_1 = AI()
while running:
# 사용자 입력 루프
while selecting:
selecting = getUserInput()
# AI 에게서 다음 회전정보를 받아서 큐브를 회전
nextRotate = ai_1.selectRotation()
cube.rotate(*nextRotate)
cube.sortPieces()
rotationCount += 1
# 현재 큐브 상태 GUI 디스플레이
cube.displayCube()
rotationCountDisplay.show()
rotationCountDisplay.addText("Rotate: %d" % rotationCount)
pygame.display.update()
pygame.time.wait(0)
# 큐브 맞추기 완료
if not cube.countUnmatched():
print("Cube Solved!!!")
while True:
pass
|
5,369 | dd792c502317288644d4bf5d247999bb08d5f401 | from collections import deque
warp = dict()
u, v = map(int, input().split())
for _ in range(u + v):
s, e = map(int, input().split())
warp[s] = e
q = deque()
q.append(1)
check = [-1] * 101
check[1] = 0
while q:
now = q.popleft()
for k in range(1, 7):
if now + k <= 100 and check[now + k] == -1:
check[now + k] = check[now] + 1
if now + k in warp:
if check[warp[now + k]] == -1:
check[warp[now + k]] = check[now] + 1
q.append(warp[now + k])
else:
q.append(now + k)
print(check[100])
|
5,370 | 82a3fca0261b4bde43f7bf258bb22e5b2ea8c28d | import pickle
import time
start = time.time()
f = open('my_classifier.pickle', 'rb')
cl = pickle.load(f)
f.close()
print(cl.classify("Where to travel in bangalore ?"))
print(cl.classify("Name a golf course in Myrtle beach ."))
print(cl.classify("What body of water does the Danube River flow into ?"))
#print("Accuracy: {0}".format(cl.accuracy(test)))
print(time.time()-start)
|
5,371 | 693f2a56578dfb1e4f9c73a0d33c5585070e9f9e | import cv2
import numpy as np
"""
# Create a black image
image = np.zeros((512,512,3), np.uint8)
# Can we make this in black and white?
image_bw = np.zeros((512,512), np.uint8)
cv2.imshow("Black Rectangle (Color)", image)
cv2.imshow("Black Rectangle (B&W)", image_bw)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
cv2.line(image, (0,0), (511,511), (255,127,0), 5) #Start Position , End positon of a line , RGB , 5 >> Thickness
cv2.imshow("Blue Line", image)
cv2.imwrite("blueline.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
cv2.rectangle(image, (100,100), (300,250), (127,50,127), -1)
cv2.imshow("Rectangle", image)
cv2.imwrite("Rectangle.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
cv2.circle(image, (350, 350), 100, (15,75,50), -1)
cv2.imshow("Circle", image)
cv2.imwrite("circle.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
# Let's define four points
pts = np.array( [[10,50], [400,50], [90,200], [50,500]], np.int32)
# Let's now reshape our points in form required by polylines
pts = pts.reshape((-1,1,2))
cv2.polylines(image, [pts], True, (0,0,255), 3)
cv2.imshow("Polygon", image)
cv2.imwrite("polygon.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
"""
image = np.zeros((512,512,3), np.uint8)
cv2.putText(image, 'Hello World!', (75,290), cv2.FONT_HERSHEY_COMPLEX, 2, (100,170,0), 3)
cv2.imshow("Hello World!", image)
cv2.imwrite("Text.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows() |
5,372 | 9843f957435b74e63a6fe4827cc17c824f11c7d6 | import time
t0 = time.time()
# ------------------------------
days_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def count_days(start_date, end_date, ref_date, target_day):
# ref_date must be exactly 1 year before start_date
month = start_date[0]
day = start_date[1]
year = start_date[2]
end_month = end_date[0]
end_day = end_date[1]
end_year = end_date[2]
ref_year = ref_date[2]
ref_day_of_week = ref_date[3]
if (ref_year % 100 == 0) & (ref_year % 400 == 0):
ref_days_in_year = 366
elif ref_year % 4 == 0:
ref_days_in_year = 366
else:
ref_days_in_year = 365
day_of_week = ref_day_of_week + ref_days_in_year % 7
day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week
day_counter = 0
if day_of_week != 1:
day_of_week += days_in_month[month] - day + 1
day_of_week %= 7
month += 1
while year <= end_year:
days_in_month[2] = 29 if year % 4 == 0 else 28
while ( (year != end_year) & (month <= 12) |
(year == end_year) & (month <= end_month) ):
day_of_week += days_in_month[month] % 7
day_of_week = day_of_week % 7 if day_of_week > 7 else day_of_week
day_counter += 1 if day_of_week == target_day else 0
month += 1
month = 1
year += 1
return day_counter
print(count_days( (1, 1, 1901), (12, 31, 2000), (1, 1, 1900, 1), 7))
# ------------------------------
t1 = time.time()
print(f"program took {(t1-t0)*1000} milliseconds")
|
5,373 | 20671470c087719fa9ea8ffa25be55e9ade67681 | # p.85 (문자 갯수 카운팅)
message = \
'It was a bright cold day in April, and the clocks were striking thirteen.'
print(message, type(message))
msg_dict = dict() #빈 dict() 생성
for msg in message:
print(msg, message.count(msg))
msg_dict[msg] = message.count(msg)
print(msg_dict)
|
5,374 | ba336094d38a47457198919ce60969144a8fdedb | # Generated by Django 3.1.6 on 2021-02-27 23:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('RMS', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='inventorytable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='menuitemstable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Ingredient_ID',
new_name='Ingredient',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Item_ID',
new_name='Item',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='seatmanagementtable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
]
|
5,375 | 1673214215043644e1a878ed7c30b69064f1a022 | import datetime
class assignmentObject:
def __init__(self,name,day):
self.name = name
self.day = day |
5,376 | 88731049227629ed84ff56922d7ac11d4a137984 | from core.models import Atom
from core.models.vector3d import cVector3D
from fractions import Fraction
class SpaceGroup(object):
def __init__(self,
index=None,
name=None,
lattice_system=None,
lattice_centering=None,
inversion=None,
symmetry=None,
asymmetric_unit=None,
unique_axis=None):
self.index = index
self.name = name
self.lattice_system = lattice_system
self.lattice_centering = lattice_centering
self.inversion = inversion
self.symmetry = symmetry
self.asymmetric_unit = asymmetric_unit
self.unique_axis = unique_axis
self.non_centering_symmetry = []
self.full_symmetry = []
self.__compute_full_symmetry()
@property
def identity(self):
return SymmetryOperation('x,y,z')
def __append_identity(self):
if 'x,y,z' not in [i.operation_string for i in self.symmetry]:
self.symmetry = [self.identity] + self.symmetry
def __add_inversion_symmetry(self):
for op in self.symmetry:
self.non_centering_symmetry.append(op)
if isinstance(self.inversion,CentroSymmetric):
for i in range(len(self.non_centering_symmetry)):
op = self.non_centering_symmetry[i]
self.non_centering_symmetry.append(op.inversion())
def __add_centering_symmetry(self):
if self.lattice_centering:
for i in range(len(self.full_symmetry)):
op = self.full_symmetry[i]
centering_ops = self.lattice_centering.transform(op)
self.full_symmetry += centering_ops
def __compute_full_symmetry(self):
self.__append_identity()
self.__add_inversion_symmetry()
self.full_symmetry = [op for op in self.non_centering_symmetry]
self.__add_centering_symmetry()
return self.full_symmetry
class SymmetryOperation(object):
def __init__(self, operation_string):
"""
Initialize a symmetry operation object from a string representation of symmetry operation.
:param operation_string: A string (as read in from res/cif file) representing a symmetry operation
for a space group.
"""
self.operation_string = operation_string.lower()
self.operation_function = None
self.__set_operation_function()
def __set_operation_function(self):
"""
Convert the string form of the symmetry operation into the form of a mathematical
function that can be directly applied to a vector to transform a point to a
symmetry related point.
"""
if self.operation_function is not None:
return self.operation_function
else:
self.operation_function = symm_eval
def transform_scaled_position(self, data):
"""
Applying this symmetry operation on a 3D coordinate to transform it to a symmetry-related
position in the crystal.
:param data: A vector (:class:`entdecker.core.models.vector3d.cVector3D`)
representing the fractional coordinates on which the symmetry
operation will be applied upon.
:return: Symmetry transformed vector.
"""
return self.operation_function(prepare_operation(self.operation_string), data)
def transform_atom(self, atom):
return Atom(label=atom.label, scaled_position=self.transform_scaled_position(atom.scaled_position))
def inversion(self):
func = lambda x: "-1*(%s),-1*(%s),-1*(%s)" % tuple(x.split(","))
return self.__class__(func(self.operation_string))
class Symmetry(object):
@staticmethod
def get(value):
if value is '' or value == 'UNKNOWN':
return []
return [SymmetryOperation(v) for v in value.split(';')]
class Inversion(object):
# TODO - this needs to be fixed later to make it consistent with the Centering class!
YES = True
NO = False
UNKNOWN = None
class InversionFactory(object):
@staticmethod
def construct(latt):
if int(latt) > 0:
return CentroSymmetric()
else:
return NonCentroSymmetric()
class CentroSymmetric(Inversion):
@staticmethod
def transform(op):
func = lambda x: "-1*(%s),-1*(%s),-1*(%s)" % tuple(x.split(","))
return func(op)
class NonCentroSymmetric(Inversion):
@staticmethod
def transform(op):
return op
class Centering(object):
def __init__(self, letter, additional_lattice_points):
self.letter = letter
self.additional_lattice_points = additional_lattice_points
def transform(self, op):
additional_ops = []
for point in self.additional_lattice_points:
func = lambda x: "{0}+{3},{1}+{4},{2}+{5}".format(*(x.split(",") + list(point)))
additional_ops.append(op.__class__(func(op.operation_string)))
return additional_ops
@classmethod
def primitive(cls):
return cls('P', [])
@classmethod
def body_centered(cls):
return cls('I', [(0.5, 0.5, 0.5)])
@classmethod
def hexagonal(cls):
return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0),
(Fraction(1, 3), Fraction(2, 3), 0.0)])
@classmethod
def rhombohedral(cls):
return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),
(Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])
@classmethod
def face_centered(cls):
return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])
@classmethod
def base_centered_A(cls):
return cls('A', [(0.0, 0.5, 0.5)])
@classmethod
def base_centered_B(cls):
return cls('B', [(0.5, 0.0, 0.5)])
@classmethod
def base_centered_C(cls):
return cls('C', [(0.5, 0.5, 0.0)])
@classmethod
def construct(cls, latt):
"""
Given the LATT directive in a res file, return the corresponding centered lattice type.
:param latt: the absolute integer value specified in LATT
:return: corrected centered lattice type
"""
latt = abs(latt)
if latt == 1:
return cls.primitive()
elif latt == 2:
return cls.body_centered()
elif latt == 3:
# default setting from reading in a res file is Rhombohedral
return cls.rhombohedral()
elif latt == 4:
return cls.face_centered()
elif latt == 5:
return cls.base_centered_A()
elif latt == 6:
return cls.base_centered_B()
elif latt == 7:
return cls.base_centered_C()
def get_LATT_code(self):
if self.letter == 'P':
return 1
elif self.letter == 'I':
return 2
elif self.letter == 'R':
return 3
elif self.letter == 'F':
return 4
elif self.letter == 'A':
return 5
elif self.letter == 'B':
return 6
elif self.letter == 'C':
return 7
class AsymmetricUnit(object):
UNKNOWN = [[0, 1.00], [0, 1.00], [0, 1.00]]
FULL = [[0, 1.00], [0, 1.00], [0, 1.00]]
HALF_X = [[0, 0.50], [0, 1.00], [0, 1.00]]
HALF_Y = [[0, 1.00], [0, 0.50], [0, 1.00]]
HALF_Z = [[0, 1.00], [0, 1.00], [0, 0.50]]
QUART_Y = [[0, 1.00], [0, 0.25], [0, 1.00]]
HALF_X_QUART_Y = [[0, 0.50], [0, 0.25], [0, 1.00]]
HALF_XZ = [[0, 0.50], [0, 1.00], [0, 0.50]]
HALF_XY = [[0, 0.50], [0, 0.50], [0, 1.00]]
EIGHT_Z = [[0, 1.00], [0, 1.00], [0, 0.125]]
class UniqueAxis(object):
UNKNOWN = -1
NA = -1
X = 0
Y = 1
Z = 2
def symm_eval(s, data):
x, y, z = data.x, data.y, data.z
out = list(map(eval, s.split(",")))
return cVector3D(out[0], out[1], out[2])
def prepare_operation(s):
''' Cleans up a string of a symmetry operation to be used in eval or exec
:param s: Input string e.g. "x,y,z+1/2"
:type s: string
:rtype: string
'''
tmp = s.replace("1/4", "1.0/4.0")
tmp = tmp.replace("1/2", "1.0/2.0")
tmp = tmp.replace("3/4", "3.0/4.0")
tmp = tmp.replace("1/3", "1.0/3.0")
tmp = tmp.replace("2/3", "2.0/3.0")
tmp = tmp.replace("1/6", "1.0/6.0")
tmp = tmp.replace("5/6", "5.0/6.0")
return tmp.replace(" ", "").lower()
|
5,377 | a777c6d76ef2ae15544a91bcfba0dbeabce0470a | from practice.demo4 import paixu
if __name__ == '__main__':
n=int(input("请输入最大的数字范围:"))
paixu(n) |
5,378 | 0271c45a21047b948946dd76f147692bb16b8bcf | import requests
url='https://item.jd.com/100008348550.html'
try:
r=requests.get(url)
r.raise_for_status()
print(r.encoding)
r.encoding=r.apparent_encoding
print(r.text[:1000])
print(r.apparent_encoding)
except:
print('error')
|
5,379 | 9ba5af7d2b6d4f61bb64a055efb15efa8e08d35c | from selenium import webdriver
from urllib.request import urlopen, Request
from subprocess import check_output
import json
#from flask import Flask
# https://data-live.flightradar24.com/zones/fcgi/feed.js?bounds=-32.27,-34.08,-73.15,-70.29
def get_json_aviones(north, south, west, east):
#driver = webdriver.Chrome('/Users/luisl/Desktop/Pega Altavoz/chromedriver')
driver = webdriver.PhantomJS("phantomjs")
# Mala práctica de programación
eval("driver.get('https://data-live.flightradar24.com/zones/fcgi/feed.js?bounds={},{},{},{}'.format(north, south, west, east))")
json_aviones = json.loads(driver.find_element_by_tag_name("pre").text)
driver.close()
return json_aviones
#######################
def get_json_buques(centerx, centery, zoom):
## PRUEBA 1 - Mezclar con phantomjs
count = 0
while True:
ignore = False
count += 1
print(centerx, centery, zoom)
out = check_output(["phantomjs", "GetBarcos.js", str(centerx), str(centery), str(zoom)])
links = json.loads(out)
if links[0] != 0:
break
else:
print("get_json_buques FAILED -------------- trying again")
if count == 5:
ignore = True
break
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'vessel-image': '00853fc25189416456442da74396a0288d02',
'x-requested-with': 'XMLHttpRequest'}
webpage = []
for link in links:
if not ignore:
req = Request(link, headers=headers)
webpage.extend(json.loads(urlopen(req).read().decode())['data']['rows'])
## try:
## with open("data", "w") as file:
## file.write(json.dumps(webpage[0]))
## except Exception as e:
## print(e)
return webpage
#######################
#app = Flask(__name__)
#
#
#@app.route('/')
# def hello_world():
# return json.dumps({'aviones': get_json_aviones(),
# 'buques': get_json_buques()})
#
#
#t = Timer(10.0, hello_world)
# t.start()
if __name__ == "__main__":
get_json_buques(-71, -33, 9)
# get_json_aviones(32.27, -34.08, -73.15, -70.29)
|
5,380 | bcc3d4e9be0de575c97bb3bf11eeb379ab5be458 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 2 22:49:00 2020
@author: Drew
____________________________________________________________________
basic_github_auto_uploader.py - A Basic Automated GitHub Uploader
____________________________________________________________________
1. Requirements:
Version: Python 3.7
Built-in Libs: base64, os, shutil, time, datetime
Dependencies: pygithub, Git (maybe)
2. Description:
This file automatically uploads subdirectories as new repositories in
GitHub. You will need an internet connection to do this.
The first function [subdir_maker(directory)] will sort the subdirectories
in the folder.
The second function [daily_github_upload(subdirs)] will do the actual repo
creation and commit.
The second function can be fun on a schedule using a for loop and
time.sleep or a dedicated scheduling library. You need to restart the
script if you add new subdirectories that you want to upload.
3. Running Instructions:
Place this file in a root directory where you keep your project
subdirectories. Keep the file structure in the subdirectories flat (don't
make subdirectories in the subdirectory) as this is not handled in this
simplified script. Also, support for PDFs is a bit sketchy.
Be sure to replace the Github key in the second function with your own
generated key. You can configure the README.MD file as well to say a
custom message.
4. Performance:
Performance is poor for now. The script needs to run constantly and uses
quite a bit of memory. A more efficient future version will be made.
"""
# Import libraries that we need to use.
import os, shutil, base64, time, datetime
from github import Github, InputGitTreeElement
# Function 1: Given a directory/file path, return all the subdirectories in
# the given directory in a list of strings. Uses the os library.
# Individual files should not be left in the directory.
def subdir_maker(directory):
# Create an empty list to store the resultant subdirectories in.
subdirs = []
# Walk through the directory and add items to the empty list we made.
for i,j,y in os.walk(directory):
subdirs.append(i)
# os.walk's first element is the directory itself, so remove it.
subdirs.remove(subdirs[0])
# Return the list of subdirectories.
return subdirs
# Function 2: When invoked with a filepath, upload all the files.
# Does not support subdirectories within the subdirectory.
# Also, cannot be empty!
def daily_github_upload(sub_to_repo):
# Create a Github object that we can use to connect to Github and do work.
# It must be initialized with a 40-character secret key. You generate this
# on Github itself.
g = Github('****************************************')
# Copy the location to a local variable.
current_subdir = sub_to_repo
# Extract the subdirectory name - this will be the Repo name.
title = current_subdir[current_subdir.rindex("\\")+1:]
# Create Repo through Github object. We will not work on the repo object.
repo = g.get_user().create_repo(title)
# Initialize with a README.MD file. You can configure this as needed.
repo.create_file("README.MD","A readme file","This was an auto-upload on "
+ str(datetime.datetime.now()))
# The message we will add under the commit.
commit_message = "This was automatically committed."
# Create a list of file objects.
file_list = []
# Create a list of file names.
file_names = []
# Do a walk through the subdirectory.
for subdir, dirs, files in os.walk(current_subdir):
# For the files in the subdirectory, print them and then add them to
# list we created, along with the name to the other list.
for file in files:
print(os.path.join(subdir, file))
file_list.append(os.path.join(subdir, file))
file_names.append(file)
# Get the branch to add to.
master_ref = repo.get_git_ref('heads/master')
master_sha = master_ref.object.sha
base_tree = repo.get_git_tree(master_sha)
# Create an empty list to add files to.
element_list = list()
# For each file in list of file objects, read and adjust as needed.
for i, entry in enumerate(file_list):
# If normal file type.
with open(entry) as input_file:
data = input_file.read()
# If proprietary file type, encode it.
if entry.endswith('.png' or '.pdf' or '.xlsx'):
data = base64.b64encode(data)
# Put each file that was encoded from above into an appropriate format
# to add to a branch.
element = InputGitTreeElement(file_names[i], '100644', 'blob', data)
# Append the object created above to the list made before the loop.
element_list.append(element)
# Create a tree with the elements and specify settings to add the element
# list to the repo.
tree = repo.create_git_tree(element_list, base_tree)
parent = repo.get_git_commit(master_sha)
# Commit!
commit = repo.create_git_commit(commit_message, tree, [parent])
master_ref.edit(commit.sha)
# Remove the subdirectory from the folder so we don't repeat.
shutil.rmtree(current_subdir)
def main():
# Invoke the subdir_maker() function with the current directory at runtime.
subs = subdir_maker(os.path.dirname(os.path.realpath(__file__)))
# Use a loop to call the daily_github_upload() function for each subdir in
# the subs list. We keep the subs in case we want to see what was uploaded.
for i in range(len(subs)):
# Call the function for each elem of the list.
daily_github_upload(subs[i])
# Print what was done.
print("_"*40 + "\n\n" + "Uploaded {0} to Github. ".format(i) + "\n" + "_"*40)
# Sleep for 24 hours then do it again.
time.sleep(86400)
|
5,381 | e4ce10f5db56e4e2e1988da3cee542a4a09785a8 | from . import mongo
col = mongo.cli['Cupidbot']['timer']
async def add_time(chat, time):
return col.insert_one({'chat': chat, 'time': time})
async def get_time(chat):
return col.find_one({'chat': chat})
async def update_time(chat, time):
return col.update_one({'chat': chat}, {'$set': {'chat': chat, 'time': time}})
|
5,382 | 1ce5b97148885950983e39b7e99d0cdfafe4bc16 | from turtle import *
def drawSquare():
for i in range(4):
forward(100)
left(90)
if __name__ == '__main__':
drawSquare()
up()
forward(200)
down()
drawSquare()
mainloop()
|
5,383 | 6267c999d3cec051c33cbcde225ff7acaa6bff74 | import sys
from random import randint
if len(sys.argv) != 2:
print "Usage: generate.py <number of orders>"
sys.exit(1)
n = int(sys.argv[1])
for i in range(0, n):
action = 'A'
orderid = i + 1
side = 'S' if (randint(0,1) == 0) else 'B'
quantity = randint(1,100)
price = randint(100,200)
print action + ',' + str(orderid) + ',' + side + ',' + str(quantity) + ',' + str(price)
|
5,384 | 39ecbf914b0b2b25ce4290eac4198199b90f95e0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
lgr = logging.getLogger(__name__)
lgr.log("hello")
import database
import csv
import codecs
class Stop(object):
"""docstring for Stop"""
def __init__(self, arg):
self.fields = [
'stop_id',
'stop_name',
'stop_lat',
'stop_lon',
'stop_calle',
'stop_numero',
'stop_entre',
'stop_esquina'
]
self.d = {}
self.parse(arg)
def __repr__(self):
return str(self.d)
def parse(self, dictParams):
for k,v in dictParams.items():
if str(k) in 'stop_id':
v = int(v)
if type(v) is str:
v = codecs.decode(v, 'utf-8')
if k in self.fields:
self.d.update({k:v})
def save(self, db):
db.insert('stops', **self.d)
def saveStops(stops):
db = database.dbInterface('../database/cba-1.0.1.sqlite')
for stop_id, stop in stops.items():
stop.save(db)
db.close()
def addFromFile(stops, filename):
repeated = {}
with open('../incoming/'+ filename) as csvFile:
reader = csv.DictReader(csvFile)
for r in reader:
stop_id = r['stop_id']
stop = Stop(r)
if stop_id in stops:
if stop.d != stops[stop_id].d:
pass
repeated[stop_id] = stop
print("stop already in collection, skipping")
print(r)
print(stops[stop_id])
else:
stops[stop_id] = stop
return repeated
def show(stops):
for stop_id, stop in stops.items():
print(stop_id, stop)
def main():
stops = {}
repeated = addFromFile(stops, 'asf/stops.csv')
repeated.update(addFromFile(stops, 'ccba/stops.csv'))
repeated.update(addFromFile(stops, 'coniferal/stops.csv'))
repeated.update(addFromFile(stops, 'ersa/stops.csv'))
# show(stops)
show(repeated)
saveStops(stops)
if __name__ == '__main__':
main() |
5,385 | 5dd79f8ebd74099871d4367cafd83359c4f24e26 | #!/usr/bin/python3
import os
import sys
import subprocess
path = sys.argv[1]
name, ext = os.path.splitext(path)
options = ['g++',
'-O3',
'src/' + path,
'-o', f'./bin/{name}',
'-std=c++11',
'-lgmp']
subprocess.call(options)
subprocess.call([f'./bin/{name}'])
|
5,386 | 55d184a9342b40fe027913e46933325bb00e33a6 | from django.contrib import admin
from .models import User, UserProfile, Lead, Agent, Category
admin.site.register(User)
admin.site.register(UserProfile)
admin.site.register(Lead)
admin.site.register(Agent)
admin.site.register(Category)
|
5,387 | 44097da54a0bb03ac14196712111a1489a956689 | #proper clarification for requirement is required
import boto3
s3_resource = boto3.resource('s3')
s3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
s3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name, Key=first_file_name)
s3_resource.Object(second_bucket_name, first_file_name).delete()
|
5,388 | cf2c57dbb2c1160321bcd6de98691db48634d5d6 | # Generated by Django 2.2.2 on 2019-07-17 10:02
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('users', '0003_delete_userprofile'),
]
operations = [
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=255)),
('subscribe_to', models.CharField(choices=[('jobs', 'Jobs'), ('posts', 'Posts'), ('newsletter', 'Newsletter')], max_length=100)),
('department', modelcluster.fields.ParentalKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='department_subscriptions', to='users.Department')),
],
options={
'verbose_name': 'Subscription',
'verbose_name_plural': 'Subscriptions',
},
),
]
|
5,389 | 0f4fa9f8835ae22032af9faa6c7cb10af3facd79 | def parse_detail_for_one_course(page, course, no_info_course):
print(f'{course["name"]} is processing**: {course["url"]}')
map = {"Locatie": "location",
"Location": "location",
"Startdatum": "effective_start_date",
"Start date": "effective_start_date",
"Duur": "duration_desc",
"Wekelijkse studie": "duration_desc",
"Expensive": "duration_desc",
"Colleges": "consecutive_desc",
"Languages": "languages",
"Languages ": "languages",
"Talen": "languages",
"Fee": "price",
"Fee ": "price",
"Fairy ": "price",
"Weekly study": "second_duration_desc",
"Accreditations ": "third_duration_desc",
"Investering": "price"}
info = {"location": "",
"effective_start_date": "",
"duration_desc": "",
"consecutive_desc": "",
"languages": "",
"price": "",
"second_duration_desc": "",
"third_duration_desc": ""}
info_div = page.find('div', attrs={"class": "program-general-info"})
info_sessions = None
if info_div:
info_sessions = info_div.find_all('div', attrs={"class": "info-item"})
if not info_sessions:
print(f'-------{course["url"]} not div')
no_info_course.append(course)
elif info_sessions:
for info_session in info_sessions:
try:
label = info_session.find('label')
label_text = label.text.strip()
info_attr = map.get(label_text, '')
if "Wekeli" in label_text:
info_attr = "duration_desc"
elif "Permanente educatie" in label_text:
continue
elif "Accreditaties" in label_text:
continue
elif "Deadline voor aanmelding" in label_text:
continue
res = info_session.find('div')
res_text = res.text.strip()
info[info_attr] = res_text
except Exception as e:
print(f'{course["url"]} has problem of {e}')
continue
# print(title)
detail = {**course, **info}
# pprint(detail)
return detail
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/collegereeks-excellent-leiderschap")
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/behavioral-and-cultural-governance")
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/advanced-management-program")
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/mba-thesis")
# course = {"name": "",
# "url": ""}
# page = page.text
# page = bs4.BeautifulSoup(page, 'html.parser')
#
# detail = get_detail_for_one_course(page, course, [])
# pprint(detail)
|
5,390 | 0b8cb522c531ac84d363b569a3ea4bfe47f61993 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Module for mimic explainer and explainable surrogate models."""
from .mimic_explainer import MimicExplainer
__all__ = ["MimicExplainer"]
|
5,391 | bd1fbdf70bae7d5853bac8fae83343dfa188ca19 | from django import http
from django.utils import simplejson as json
import urllib2
import logging
from google.appengine.api import urlfetch
import cmath
import math
from ams.forthsquare import ForthSquare
from ams.twitter import Twitter
OAUTH_TOKEN='3NX4ATMVS35LKIP25ZOKIVBRGAHFREKGNHTAKQ5NPGMCWOE0'
DEFAULT_RADIUS = 500.0
DEFAULT_LIMIT = 5
forthsquare = ForthSquare()
twitter = Twitter()
#arts, education,events, food, night, outdoors, professional, residence, shop, travel
CATEGORIES = [('arts','4d4b7104d754a06370d81259'),\
('education','4d4b7105d754a06372d81259'),\
('events','4d4b7105d754a06373d81259'),\
('food','4d4b7105d754a06374d81259'),\
('night','4d4b7105d754a06376d81259'),\
('outdoors','4d4b7105d754a06377d81259'),\
('professional','4d4b7105d754a06375d81259'),\
('residence','4e67e38e036454776db1fb3a'),\
('shop','4d4b7105d754a06378d81259'),\
('travel','4d4b7105d754a06379d81259')]
def venues(request):
if not request.GET or not request.GET.get(u'll'):
return http.HttpResponseBadRequest('Wrong data')
pos = request.GET.get(u'll')
lat0 = float(pos.split(',')[0])
lng0 = float(pos.split(',')[1])
categories = []
param_categories = request.GET.get(u'categories')
print 'PARAM CATEGORIES: ' + str(param_categories)
if param_categories:
categories = param_categories.split(',')
print '0-point coord:', lat0, lng0
radius = '&radius=%s' %(DEFAULT_RADIUS)
limit = '&limit=%s' %(DEFAULT_LIMIT)
for categoryName in categories:
categoryId = getCategoryId(categoryName)
filter_url_categories = '&categoryId=%s' %(categoryId)
json_data = forthsquare.venues(pos, limit, radius, filter_url_categories)
# print 'data:', json_data
venues = json_data['response']['venues']
print "got some venues"
if request.GET.get('alpha') and request.GET.get('beta') and request.GET.get('gamma'):
print 'point2'
alpha = float(request.GET.get('alpha'))
beta = float(request.GET.get('beta'))
gamma = float(request.GET.get('gamma'))
if request.GET.get('radius'):
float(request.GET.get('radius'))
else:
radius = DEFAULT_RADIUS
venuesInRadar = list()
for i in venues:
# print i
if 'url' in i:
print i['url']
if i and i.get('location') and i.get('location').get('lat') and i.get('location').get('lng') and i.get('location').get('distance'):
lat = float(i.get('location').get('lat'))
lng = float(i.get('location').get('lng'))
distance = float(i.get('location').get('distance'))
print 'lat:', lat
print 'lng:', lng
print 'distance:', distance
if venueInRadarRange(lat0, lng0, alpha, beta, gamma, radius, DEFAULT_SPREAD_ANGLE, lat, lng, distance):
# res = {'lat' : lat, 'lng': lng, 'id' : i['id'].encode("utf-8"), 'name' : i['name'].encode("utf-8") }
venuesInRadar.append(forthsquare.getVenueData(i))
print len(venuesInRadar)
print venuesInRadar
print 'point3'
else:
venuesInRadar = list()
for i in venues:
venuesInRadar.append(forthsquare.getVenueData(i))
'''
if len(venuesInRadar) == 1:
# return detailed information
response = http.HttpResponse(venuesInRadar[0],
content_type='application/json')
response["Access-Control-Allow-Origin"] = "*"
return response
'''
print venuesInRadar
print type(venuesInRadar)
print 'point4'
response = http.HttpResponse(json.dumps(venuesInRadar),
content_type='application/json')
response["Access-Control-Allow-Origin"] = "*"
return response
def getCategoryId(category):
for name,id in CATEGORIES:
if name == category:
return id
def comments(request):
# id = '4a688ba1f964a52088ca1fe3'
if not request.GET or not request.GET.get(u'id'):
return http.HttpResponseBadRequest('Wrong data')
id = request.GET.get(u'id')
foursquareRequest = 'https://api.foursquare.com/v2/venues/%s/tips?sort=recent&oauth_token=%s&v=20140517' % (id, OAUTH_TOKEN)
foursquareResponse = urllib2.urlopen(foursquareRequest)
json_raw = foursquareResponse.read()
# json_data = json.loads(json_raw)
response = http.HttpResponse(json.dumps(json_raw),
content_type='application/json')
response["Access-Control-Allow-Origin"] = "*"
return response
def getPhoto(id):
foursquareRequest = 'https://api.foursquare.com/v2/venues/%s/photos?limit=1&sort=recent&oauth_token=%s&v=20140517' % (id, OAUTH_TOKEN)
foursquareResponse = urllib2.urlopen(foursquareRequest)
json_raw = foursquareResponse.read()
if not json_raw:
return None
json_data = json.loads(json_raw)
print json_data
return json_data
def photos(request):
# id = '4a688ba1f964a52088ca1fe3'
if not request.GET or not request.GET.get(u'id'):
return http.HttpResponseBadRequest('Wrong data')
id = request.GET.get(u'id')
foursquareRequest = 'https://api.foursquare.com/v2/venues/%s/photos?limit=1&sort=recent&oauth_token=%s&v=20140517' % (id, OAUTH_TOKEN)
foursquareResponse = urllib2.urlopen(foursquareRequest)
json_raw = foursquareResponse.read()
# json_data = json.loads(json_raw)
if not json_raw:
return http.DoesNotExist('photo does not exist')
response = http.HttpResponse(json.dumps(json_raw),
content_type='application/json')
response["Access-Control-Allow-Origin"] = "*"
return response
def tweets(request):
radius="0.02km"
if not request.GET or not request.GET.get(u'll'):
return http.HttpResponseBadRequest('Wrong data')
pos = request.GET.get(u'll')
result = twitter.tweets(radius, pos)
response = http.HttpResponse(result,
content_type='application/json')
response["Access-Control-Allow-Origin"] = "*"
return response
|
5,392 | e598091fc6c05b1d7f9f35f2ae58494fed53f9af | # Write a Python program to print alphabet pattern 'G'.
result = ''
for row in range(0,7):
for col in range(0,7):
if ((col ==0) and (row !=0 and row !=6) or ((row ==0 or row == 6) and (col>0 and col<6))or ((row ==1 or row == 5 or row == 4)and (col ==6))or ((row ==3)and ((col!=2)and col!=1))):
result = result+'*'
else:
result = result+' '
result=result+'\n'
print(result) |
5,393 | 7819e41d567daabe64bd6eba62461d9e553566b3 | from socketserver import StreamRequestHandler, TCPServer
from functools import partial
class EchoHandler(StreamRequestHandler):
def __init__(self, *args, ack, **kwargs):
self.ack = ack
super.__init__(*args, **kwargs)
def handle(self):
for line in self.rfile:
self.wfile.write(self.ack +line)
# serv = TCPServer(('', 15000), EchoHandler)
serv = TCPServer(('', 15000), partial(EchoHandler, ack=b'RECEIVE:'))
# serv = TCPServer(('', 15000), lambda *args, **kwargs: EchoHandler(*args, ack=b'RECEIVED:', **kwargs)) #等价
serv.serve_forever()
|
5,394 | e0b28fdcbc3160bcccbb032949317a91a32eeb1b | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 12:47:30 2019
Title: MP4-Medical Image Processing
@author: MP4 Team
"""
# Validate window controller
class ValidateWindowCtr(object):
# Initialization
def __init__(self, fig, im_trans, im_truth, im_segmen, vol_trans, vol_truth, vol_segmen, ax_trans, ax_truth, ax_segmen, index_trans, index_truth, index_segmen):
self.fig = fig
self.im_trans, self.im_truth, self.im_segmen = im_trans, im_truth, im_segmen
self.vol_trans, self.vol_truth, self.vol_segmen = vol_trans, vol_truth, vol_segmen
self.ax_trans, self.ax_truth, self.ax_segmen = ax_trans, ax_truth, ax_segmen
self.index_trans, self.index_truth, self.index_segmen = index_trans, index_truth, index_segmen
self.txt_trans = self.ax_trans.text(0, 600, 'Slice No: '+str(self.index_trans[-1]), color='b')
self.txt_truth = self.ax_truth.text(0, 10, 'Slice No: '+str(self.index_truth[-1]), color='b')
self.txt_segmen = self.ax_segmen.text(0, 600, 'Slice No: '+str(self.index_segmen[-1]), color='b')
self.scroll_trans = None
self.scroll_truth = None
self.scroll_segmen = None
self.fig.canvas.mpl_connect('axes_enter_event', self.fig_enter_event)
self.fig.canvas.mpl_connect('axes_leave_event', self.fig_leave_event)
# Enable scrolling image
def fig_enter_event(self, event):
if self.ax_trans.in_axes(event):
self.scroll_trans = self.fig.canvas.mpl_connect('scroll_event', self.trans_subplot_scroll)
elif self.ax_truth.in_axes(event):
self.scroll_truth = self.fig.canvas.mpl_connect('scroll_event', self.truth_subplot_scroll)
elif self.ax_segmen.in_axes(event):
self.scroll_segmen = self.fig.canvas.mpl_connect('scroll_event', self.segmen_subplot_scroll)
# Disable scrolling image
def fig_leave_event(self, event):
self.fig.canvas.mpl_disconnect(self.scroll_trans)
self.fig.canvas.mpl_disconnect(self.scroll_truth)
self.fig.canvas.mpl_disconnect(self.scroll_segmen)
# Scroll voxel image
def trans_subplot_scroll(self, event):
if event.button == 'down' and (self.index_trans[-1] > -1*self.vol_trans.shape[0]):
self.index_trans[-1] -= 1
if event.button == 'up' and (self.index_trans[-1] < self.vol_trans.shape[0]-1):
self.index_trans[-1] += 1
self.im_trans.set_data(self.vol_trans[self.index_trans[-1]])
self.txt_trans.set_text('Slice No: '+str(self.index_trans[-1]))
self.fig.canvas.draw_idle()
# Scroll ground truth image
def truth_subplot_scroll(self, event):
if event.button == 'down' and (self.index_truth[-1] > -1*self.vol_truth.shape[0]):
self.index_truth[-1] -= 1
if event.button == 'up' and (self.index_truth[-1] < self.vol_truth.shape[0]-1):
self.index_truth[-1] += 1
self.im_truth.set_data(self.vol_truth[self.index_truth[-1]])
self.txt_truth.set_text('Slice No: '+str(self.index_truth[-1]))
self.fig.canvas.draw_idle()
# Scroll segmented image
def segmen_subplot_scroll(self, event):
if event.button == 'down' and (self.index_segmen[-1] > -1*self.vol_segmen.shape[0]):
self.index_segmen[-1] -= 1
if event.button == 'up' and (self.index_segmen[-1] < self.vol_segmen.shape[0]-1):
self.index_segmen[-1] += 1
self.im_segmen.set_data(self.vol_segmen[self.index_segmen[-1]])
self.txt_segmen.set_text('Slice No: '+str(self.index_segmen[-1]))
self.fig.canvas.draw_idle()
|
5,395 | 5b6241907cc97f82d6c6e0a461f4f71a9a567204 | #Program to create and store Employee Salary Records in a file
import os
def appendEmployee(eno,name,basic):
fh=open("Employee.txt","a")
hra=basic*0.10
da=basic*0.73
gross=basic+hra+da
tax=gross*0.3
net=gross-tax
line=str(eno)+","+name+","+str(basic)+","+str(hra)+","+str(da)+","+str(gross)+","+str(tax)+","+str(net)+"\n"
fh.write(line)
fh.close()
def displayEmployees():
fh=open("Employee.txt","r")
for line in fh:
emp=line.split(",")
print("\nEmployee No:",emp[0],"\nEmployee Name:",emp[1],"\nBasic:",emp[2],"\nHRA:",emp[3],"\nDA:",emp[4],"\nGross Salary:",emp[5],"\nIncome Tax:",emp[6],"\nNet Salary:",emp[7])
fh.close()
def searchEmployee(eno):
fh=open("Employee.txt","r")
flag=False
for line in fh:
emp=line.split(",")
if(int(emp[0])==eno):
print("\nEmployee No:",emp[0],"\nEmployee Name:",emp[1],"\nBasic:",emp[2],"\nHRA:",emp[3],"\nDA:",emp[4],"\nGross Salary:",emp[5],"\nIncome Tax:",emp[6],"\nNet Salary:",emp[7])
flag=True
break
if flag==False:
print("Employee record not found")
fh.close()
def deleteEmployee(eno):
count=0
fh=open("Employee.txt","r")
ftemp=open("Temp.txt","w")
for line in fh:
emp=line.split(",")
if(int(emp[0])!=eno):
ftemp.write(line)
else:
count+=1
continue
fh.close()
ftemp.close()
if count==0:
print("Employee record not found")
os.remove("Temp.txt")
else:
print("No of Employee records deleted:",count)
os.remove("Employee.txt")
os.rename("Temp.txt","Employee.txt")
def modifyEmployee(eno):
count=0
fh=open("Employee.txt","r")
ftemp=open("Temp.txt","w")
for line in fh:
emp=line.split(",")
if(int(emp[0])==eno):
print("Existing Employee record:")
print("\nEmployee No:",emp[0],"\nEmployee Name:",emp[1],"\nBasic:",emp[2],"\nHRA:",emp[3],"\nDA:",emp[4],"\nGross Salary:",emp[5],"\nIncome Tax:",emp[6],"\nNet Salary:",emp[7])
print("Enter New Employee details")
emp[1]=input("Enter Employee Name:")
emp[2]=int(input("Enter Employee Basic Salary:"))
emp[3]=emp[2]*0.10 #HRA
emp[4]=emp[2]*0.73 #DA
emp[5]=emp[2]+emp[3]+emp[4] #Gross
emp[6]=emp[5]*0.3 #Tax
emp[7]=emp[5]-emp[6]
print(emp[0]) #Net
line=emp[0]+","+emp[1]+","+str(emp[2])+","+str(emp[3])+","+str(emp[4])+","+str(emp[5])+","+str(emp[6])+","+str(emp[7])+"\n"
count+=1
ftemp.write(line)
fh.close()
ftemp.close()
if count==0:
print("Employee record not found")
os.remove("Temp.txt")
else:
print("No of Employee records modified:",count)
os.remove("Employee.txt")
os.rename("Temp.txt","Employee.txt")
if __name__=="__main__":
while True:
ch=int(input("1->New Employee 2->Display Employee records 3->Search Employee 4->Delete Employee 5->Modify Employee 6->Exit\n"))
if ch==1:
eno=int(input("Enter Employee No:"))
name=input("Enter Employee Name:")
basic=int(input("Enter Employee Basic salary:"))
appendEmployee(eno,name,basic)
elif ch==2:
displayEmployees()
elif ch==3:
eno=int(input("Enter Employee No to search:"))
searchEmployee(eno)
elif ch==4:
eno=int(input("Enter Employee No to delete:"))
deleteEmployee(eno)
elif ch==5:
eno=int(input("Enter Employee No to modify:"))
modifyEmployee(eno)
else:
break
|
5,396 | 6eec95932ef445ba588f200233495f59c4d77aac | from sklearn.datasets import fetch_mldata
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
import numpy as np
import os
import tarfile
import pickle
import subprocess
import sys
if sys.version_info.major == 2:
# Backward compatibility with python 2.
from six.moves import urllib
urlretrieve = urllib.request.urlretrieve
else:
from urllib.request import urlretrieve
def get_gpu_name():
try:
out_str = subprocess.run(["nvidia-smi", "--query-gpu=gpu_name", "--format=csv"], stdout=subprocess.PIPE).stdout
out_list = out_str.decode("utf-8").split('\n')
out_list = out_list[1:-1]
return out_list
except Exception as e:
print(e)
def read_batch(src):
'''Unpack the pickle files
'''
with open(src, 'rb') as f:
if sys.version_info.major == 2:
data = pickle.load(f)
else:
data = pickle.load(f, encoding='latin1')
return data
def shuffle_data(X, y):
s = np.arange(len(X))
np.random.shuffle(s)
X = X[s]
y = y[s]
return X, y
def yield_mb(X, y, batchsize=64, shuffle=False):
assert len(X) == len(y)
if shuffle:
X, y = shuffle_data(X, y)
# Only complete batches are submitted
for i in range(len(X)//batchsize):
yield X[i*batchsize:(i+1)*batchsize], y[i*batchsize:(i+1)*batchsize]
def download_cifar(download_dir, src="http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"):
'''Load the training and testing data
'''
if not os.path.isfile("{}/cifar-10-python.tar.gz".format(download_dir)):
print ('Downloading ' + src)
fname, h = urlretrieve(src, '{}/cifar-10-python.tar.gz'.format(download_dir))
print ('Done.')
print ('Extracting files...')
with tarfile.open(fname) as tar:
tar.extractall(download_dir)
print ('Done.')
print ('Preparing train set...')
train_list = [read_batch('{0}/cifar-10-batches-py/data_batch_{1}'.format(download_dir, i + 1)) for i in range(5)]
x_train = np.concatenate([t['data'] for t in train_list])
y_train = np.concatenate([t['labels'] for t in train_list])
print ('Preparing test set...')
tst = read_batch('{0}/cifar-10-batches-py/test_batch'.format(download_dir))
x_test = tst['data']
y_test = np.asarray(tst['labels'])
print ('Done.')
return x_train, x_test, y_train, y_test
def download_imdb(src="https://s3.amazonaws.com/text-datasets/imdb.npz"):
'''Load the training and testing data
'''
# FLAG: should we host this on azure?
print ('Downloading ' + src)
fname, h = urlretrieve(src, './delete.me')
print ('Done.')
try:
print ('Extracting files...')
with np.load(fname) as f:
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
print ('Done.')
finally:
os.remove(fname)
return x_train, x_test, y_train, y_test
def cifar_for_library(download_dir, channel_first=True, one_hot=False):
# Raw data
x_train, x_test, y_train, y_test = download_cifar(download_dir)
# Scale pixel intensity
x_train = x_train/255.0
x_test = x_test/255.0
# Reshape
x_train = x_train.reshape(-1, 3, 32, 32)
x_test = x_test.reshape(-1, 3, 32, 32)
# Channel last
if not channel_first:
x_train = np.swapaxes(x_train, 1, 3)
x_test = np.swapaxes(x_test, 1, 3)
# One-hot encode y
if one_hot:
y_train = np.expand_dims(y_train, axis=-1)
y_test = np.expand_dims(y_test, axis=-1)
enc = OneHotEncoder(categorical_features='all')
fit = enc.fit(y_train)
y_train = fit.transform(y_train).toarray()
y_test = fit.transform(y_test).toarray()
# dtypes
x_train = x_train.astype(np.float32)
x_test = x_test.astype(np.float32)
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
return x_train, x_test, y_train, y_test
def imdb_for_library(seq_len=100, max_features=20000, one_hot=False):
''' Replicates same pre-processing as:
https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py
I'm not sure if we want to load another version of IMDB that has got
words, but if it does have words we would still convert to index in this
backend script that is not meant for others to see ...
But I'm worried this obfuscates the data a bit?
'''
# 0 (padding), 1 (start), 2 (OOV)
START_CHAR=1
OOV_CHAR=2
INDEX_FROM=3
# Raw data (has been encoded into words already)
x_train, x_test, y_train, y_test = download_imdb()
# Combine for processing
idx = len(x_train)
_xs = np.concatenate([x_train, x_test])
# Words will start from INDEX_FROM (shift by 3)
_xs = [[START_CHAR] + [w + INDEX_FROM for w in x] for x in _xs]
# Max-features - replace words bigger than index with oov_char
# E.g. if max_features = 5 then keep 0, 1, 2, 3, 4 i.e. words 3 and 4
if max_features:
print("Trimming to {} max-features".format(max_features))
_xs = [[w if (w < max_features) else OOV_CHAR for w in x] for x in _xs]
# Pad to same sequences
print("Padding to length {}".format(seq_len))
xs = np.zeros((len(_xs), seq_len), dtype=np.int)
for o_idx, obs in enumerate(_xs):
# Match keras pre-processing of taking last elements
obs = obs[-seq_len:]
for i_idx in range(len(obs)):
if i_idx < seq_len:
xs[o_idx][i_idx] = obs[i_idx]
# One-hot
if one_hot:
y_train = np.expand_dims(y_train, axis=-1)
y_test = np.expand_dims(y_test, axis=-1)
enc = OneHotEncoder(categorical_features='all')
fit = enc.fit(y_train)
y_train = fit.transform(y_train).toarray()
y_test = fit.transform(y_test).toarray()
# dtypes
x_train = np.array(xs[:idx]).astype(np.int32)
x_test = np.array(xs[idx:]).astype(np.int32)
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
return x_train, x_test, y_train, y_test
|
5,397 | 8a7904881d936a3cb421ed5550856b600894fcee | #!/bin/python
import sys
import notify2
import subprocess
from time import sleep
def notification(message: str):
"""
Display notification to the desktop
Task:
1. show() -> it will generate a complete new pop
2. update() -> it will update the payload part of same notification pop-up, not issuing any new one.
Usage : python <filename.py> typeObj:str value:int objective:str
typeObj: RAM/SWAP/NORMAL
value: current usage of RAM or SWAP (for NORMAL, the value = 0)
objective: show/update
"""
# initialize the notification
notify2.init("notifywhenLOAD")
notifyObj = notify2.Notification("Emergency Alert!", message)
notifyObj.set_timeout(12000)
return notifyObj
def main():
a = notification(f"{sys.argv[1]} exceeds {sys.argv[2]}")
if sys.argv[1] in ["RAM", "SWAP"] and sys.argv[3] == "update":
a.update(f"{sys.argv[1]} Alert!! Warning for death")
# a.update('river')
a.set_urgency(2)
a.show()
elif sys.argv[1] in ["RAM", "SWAP"] and sys.argv[3] == "show":
a.set_timeout(10000)
a.set_urgency(1)
a.show()
elif sys.argv[1] == "NORMAL":
a.update("ChiLLax!!! Nothing to worry about")
a.set_urgency(0)
a.show()
main()
|
5,398 | b750673829873c136826ae539900451559c042c8 | #Alexis Langlois
'''
Fichier de test pour l'algorithme Adaboost avec arbres de décision (@nbTrees).
'''
import numpy as np
from sklearn.utils import shuffle
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from adaboost_trees import AdaboostTrees
#Trees
nbTrees = 20
#Train dataset
X = np.loadtxt('train_data')
y = np.loadtxt('train_labels')
X, y = shuffle(X, y)
#Data normalization
X -= X.min()
X /= X.max()
#Instanciation
forest = AdaboostTrees(nbTrees)
#Training
forest.train(X, y)
#Test dataset
X = np.loadtxt('test_data')
y = np.loadtxt('test_labels')
X, y = shuffle(X, y)
#Data normalization
X -= X.min()
X /= X.max()
#Predictions
predictions = forest.predict(X)
#Report
print classification_report(y, predicted)
print 'Accuracy: ' + str(accuracy_score(tags, preds)) |
5,399 | 8ef20a7a93d6affabe88dad4e5d19613fe47dd0f | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn import svm
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from numpy import genfromtxt
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
'/home/ubuntu/hdd/tensorFlowDic/', 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def feature_extraction(image):
image_data = tf.gfile.FastGFile(image, 'rb').read()
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')
predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
return predictions
# create_graph()
# input_x = np.zeros((0,2048))
# for i in range(1,7001):
# imageName = str(i).zfill(5)
# image = '/home/ubuntu/caffe/examples/images/joey/'+imageName+".jpg"
# pre = feature_extraction(image)
# print ("Finish extracting features of training image "+image)
# input_x = np.vstack((input_x,pre))
# print(input_x.shape)
test_x = np.zeros((0,2048))
for i in range(1,971):
imageName = str(i).zfill(5)
image = '/home/ubuntu/caffe/examples/images/val/'+imageName+".jpg"
pre = feature_extraction(image)
print ("Finish extracting features of test image "+image)
test_x = np.vstack((test_x,pre))
print(test_x.shape)
input_label = genfromtxt('/home/ubuntu/caffe/examples/images/Files/train.csv', delimiter=',')
input_label = input_label[1:7001,1].reshape(-1)
input_x = np.load("tensorFlow_train.npz")
#np.load("tensorFlow_test.npz")
print ('input_x shape ',input_x.shape)
print ('input_label shape ',input_label.shape)
# np.savez_compressed("tensorFlow_train", input_x)
np.savez_compressed("tensorFlow_test", test_x)
X_train, X_test, y_train, y_test = train_test_split(input_x, input_label, test_size=0.1, random_state=42)
clf = SVC(C=500.0,decision_function_shape='ovr',max_iter=-1,probability=False)
clf.fit(X_train, y_train)
print('training accuracy is', clf.score(X_train,y_train))
print('validation accuracy is', clf.score(X_test,y_test))
clf = SVC(C=500.0,decision_function_shape='ovr',max_iter=-1,probability=False)
clf.fit(input_x, input_label)
y_pred = clf.predict(test_x)
filename = "predict_inception_v3.csv"
f = open(filename, "w")
f.write('Id,Prediction\n')
if ((len(y_pred))<1000):
zeros = np.zeros(2000)
y_pred = np.append(y_pred, zeros).reshape(-1)
for i in range(0,len(y_pred)):
d = '{0},{1}\n'.format(i+1,int(y_pred[i]))
f.write(d)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.