text stringlengths 38 1.54M |
|---|
from flask import Flask, render_template, redirect
from school_distant.data import db_session
from school_distant.data.users import User
from school_distant.data.test import Tests
from school_distant.data.tasks import Tasks
from school_distant.data.form import RegisterForm, LoginForm, TestsForm, TasksForm
from flask_login import LoginManager, login_user, login_required, logout_user, current_user
db_session.global_init("db/school.sqlite")
app = Flask(__name__)
login_manager = LoginManager()
login_manager.init_app(app)
app.config['SECRET_KEY'] = 'yandexlyceum_secret_key'
COUNT_OF_OTHER_QUESTIONS = 0
@login_manager.user_loader
def load_user(user_id):
session = db_session.create_session()
return session.query(User).get(user_id)
@app.route("/")
@app.route("/school")
def index():
session = db_session.create_session()
test = session.query(Tests)
return render_template("school.html", test=test)
@app.route('/register', methods=['GET', 'POST'])
def reqister():
form = RegisterForm()
if form.validate_on_submit():
if form.password.data != form.password_again.data:
return render_template('registration.html', title='Регистрация', form=form, message="Пароли не совпадают")
session = db_session.create_session()
if session.query(User).filter(User.email == form.email.data).first():
return render_template('registration.html', title='Регистрация', form=form,
message="Такой пользователь уже есть")
user = User(name=form.name.data, email=form.email.data, clas=form.clas.data, occupation=form.occupation.data)
user.set_password(form.password.data)
session.add(user)
session.commit()
return redirect('/login')
return render_template('registration.html', title='Регистрация', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
session = db_session.create_session()
user = session.query(User).filter(User.email == form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect("/school")
return render_template('log_in.html', message="Неправильный логин или пароль", form=form)
return render_template('log_in.html', title='Авторизация', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect("/school")
@app.route('/test', methods=['GET', 'POST'])
@login_required
def add_tests():
global COUNT_OF_OTHER_QUESTIONS
form = TestsForm()
if form.validate_on_submit():
session = db_session.create_session()
tests = Tests()
tests.title = form.title.data
tests.subject = form.subject.data
tests.count_of_questions = form.count_of_questions.data
tests.user_id = form.user_id.data
current_user.tests.append(tests)
session.merge(current_user)
session.commit()
COUNT_OF_OTHER_QUESTIONS = int(form.count_of_questions.data)
return redirect('/task')
return render_template('tests.html', title='Добавление теста', form=form)
@app.route('/task', methods=['GET', 'POST'])
@login_required
def add_tasks():
global COUNT_OF_OTHER_QUESTIONS
form = TasksForm()
if form.validate_on_submit():
session = db_session.create_session()
tasks = Tasks()
tasks.title = form.title.data
tasks.ans1 = form.ans1.data
tasks.ans2 = form.ans2.data
tasks.ans3 = form.ans3.data
tasks.ans4 = form.ans4.data
tasks.correct_answer = form.correct_answer.data
session.add(tasks)
session.commit()
if COUNT_OF_OTHER_QUESTIONS > 0:
COUNT_OF_OTHER_QUESTIONS -= 1
return redirect('/task')
else:
return redirect('/school')
return render_template('task.html', title='Добавление вопроса', form=form)
@app.route('/tests_delete/<int:id>', methods=['GET', 'POST'])
@login_required
def news_delete(id):
session = db_session.create_session()
news = session.query(Tests).filter(Tests.id == id, Tests.user == current_user).first()
if news:
session.delete(news)
session.commit()
else:
abort(404)
return redirect('/school')
if __name__ == '__main__':
app.run(port=8082, host='127.0.0.1')
|
import MySQLdb
import random
import database_creds as dbc
#SQL
check_query = 'SELECT * FROM raw_data LIMIT 1;'
raw_data_insert = 'INSERT INTO raw_data (user_id, event_id, amount) VALUES (%s ,%s, %s);'
# Constants
rows_interval = (90000,100000)
user_id_interval = (1,100)
event_id_interval = (1,100)
amount_interval = (-100000, 100000)
def generate_random(i):
while i>0:
i-=1
yield (random.randint(user_id_interval[0],user_id_interval[1]), random.randint(event_id_interval[0],event_id_interval[1]), random.randint(amount_interval[0],amount_interval[1]))
if __name__=='__main__':
try:
db = MySQLdb.connect(dbc.host, dbc.user, dbc.passwd, dbc.db_name)
cur = db.cursor()
except:
exit('Connection failed. Something went wrong')
cur.execute(check_query)
if not cur.fetchone():
cur.executemany(raw_data_insert, generate_random(random.randint(rows_interval[0],rows_interval[1])))
db.commit()
else:
exit('raw_data is full')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last-Updated : <2014/01/03 23:24:05 by samui>
import webapp2
import jinja2
import os
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)+"/../.."),
#loader=jinja2.FileSystemLoader("/Users/samui/temp/python/GAE/google_appengine/product/aroundmethod/"),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class BaseTemplate:
@classmethod
def render(self, template, params):
baseTemplate = JINJA_ENVIRONMENT.get_template('template/common/base.html')
headTempate = JINJA_ENVIRONMENT.get_template('template/common/_header.html')
footTemplate = JINJA_ENVIRONMENT.get_template('template/common/_footer.html')
params.update({
'common_base': baseTemplate.render({'title':'SimpleTitle'}),
'common_head':headTempate.render({'uri_for':webapp2.uri_for}),
'common_footer': footTemplate.render({'uri_for':webapp2.uri_for}),
})
bodyTemplate = JINJA_ENVIRONMENT.get_template(template)
return bodyTemplate.render(params)
|
import sys
sys.stdin = open("2309.txt")
nine = [int(input()) for _ in range(9)]
flag = 0
A= []
def seven(n, k, s, small):
global flag
if 7 - k > 9 - n:
if k == 7:
return
if flag == 1:
return
if s >100:
return
if k == 7:
if s == 100:
small.sort()
for i in range(7):
print(small[i])
flag = 1
else:
seven(n+1,k+1, s+nine[n], small+[nine[n]])
seven(n+1,k,s, small)
seven(0,0,0,[]) |
def combine_known_transpos(dataset,combo_transpo,normal_transpo):
out = [[[] for _sentence in line] for line in dataset]
for i,line in enumerate(dataset):
for j,sentence in enumerate(line):
for word in sentence:
word=mylower(word)
print(word in combo_transpo, word in normal_transpo)
if len(word) > 20:
out[i][j].append(combo_transpo[word])
else:
out[i][j].append([normal_transpo[word],combo_transpo[word][1]])
return out
def eval_cosine_similarity(embedding):
similarity_scores = []
sim_mean = []
sim_max = []
sim_min = []
size = []
bar = progressbar.ProgressBar(max_value=len(embedding)**2)
i = 0
j = 0
for key1 in embedding:
for key2 in embedding:
similarity_scores.append(cosine_similarity(embedding[key1], embedding[key2]))
j +=1
i +=1
if j == 100000:
similarity_scores = np.array(similarity_scores)
sim_max.append(np.max(similarity_scores))
sim_mean.append(np.mean(similarity_scores))
sim_min.append(np.min(similarity_scores))
size.append(j)
j = 0
similarity_scores = []
bar.update(i)
if j !=0:
similarity_scores = np.array(similarity_scores)
sim_max.append(np.max(similarity_scores))
sim_mean.append(np.mean(similarity_scores))
sim_min.append(np.min(similarity_scores))
size.append(j)
sim_mean = calc_mean(sim_mean, size)
sim_max = np.max(sim_max)
sim_min = np.min(sim_min)
with open("sim_eval.txt", "w") as f:
f.write(str(sim_mean) + "\n")
f.write(str(sim_max) + "\n")
f.write(str(sim_min) + "\n")
def create_pre_mapping(distribution,orig_words,dic):
pre_map = {}
for i,(p,c) in enumerate(distribution):
pmaxin = np.argmax(p)
if len(c) == 0:
if len(dic[pmaxin])>3:
blub = np.zeros_like(p)
blub[pmaxin] = 1
pre_map[orig_words[i]] = [blub,tuple()]
else:
csum = sum(x[1] for x in c)
if p[pmaxin]/(1+csum)>c[0][1]:
if len(dic[pmaxin])>3:
blub = np.zeros_like(p)
blub[pmaxin] = 1
pre_map[orig_words[i]] = [blub,tuple()]
else:
pre_map[orig_words[i]] = [np.zeros_like(p),((c[0][0],1),)]
return pre_map
def get_word_vec_from_distribution(word_distribution, dic, embeddings):
word_vecs = []
weights = []
for i in range(len(word_distribution)):
if word_distribution[i] !=0:
word_vecs.append(embeddings[dic[i]])
weights.append(word_distribution[i])
word_vecs = np.array(word_vecs)
average_word = np.average(word_vecs, axis=0, weights=weights)
return average_word
def sentence_average_from_word_embeddings(posterior, dic, embeddings):
sentence_embedding =[]
word_vecs = []
for word_distribution in posterior:
word_vec = get_word_vec_from_distribution(word_distribution, dic, embeddings)
word_vecs.append(word_vec)
sentence_embedding.append(np.average(np.array(word_vecs), axis=0))
return sentence_embedding
def load_vectors(fname):
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
data = {}
for line in fin:
tokens = line.rstrip().split(' ')
data[tokens[0]] = np.array(list(map(float, tokens[1:])))
return data |
import numpy as np
arr = np.array([10, 15, 25, 5])
newarr = np.diff(arr)
print(newarr)
############
arr = np.array([10, 15, 25, 5])
newarr = np.diff(arr, n=2) #so vong lap
print(newarr) |
import os
from time import sleep
import tkinter.messagebox
import tkinter
import sato
if __name__ == '__main__':
ct = sato.ComThread()
usb = sato.USB()
vid = 0x0828
pid = 0x0159
# print(f"检查串口{ct.check_com()}")
# print(f"选择串口{ct.open_com()}")
dev = usb.open_usb()
print(dev)
barcode = "11234"
barcodeBytes = barcode.encode("utf-8") # 转换为字节格式
# b'\x1bH520\x1bV109\x1bL0202\x1bS' + barcodeBytes +
#satoString = b'\x1bA\x1bN\x1bR\x1bR\x1bH070\x1bV00002\x1bL0202\x1bS\x1bB103080*' + barcodeBytes + b'*\x1bH0200\x1bV00009\x1bL0202\x1bS' + barcodeBytes + b'\x1bQ1\x1bZ'
data = b'[)>\x1E06\x1D' + \
b'VH2TM\x1D' + \
b'P1234567890\x1D' + \
b'SALC1\x1D' + \
b'EOptional\x1D' + \
b'T201210132NA1234567\x1D' + \
b'COptional\x1D' + \
b'\x1E\x04'
# satoString = b'\x1bA\x1bN\x1bH420\x1bV00002\x1bL0202\x1bS\x1b2D50,10,10,000,000\x1bDN%04d,' % len(data) + \
# data + \
# b'\x1bQ1\x1bZ'
satoString = b'\x1bA\x1bH670\x1bV00016\x1b2D50,06,06,032,032\x1bDN%04d,' % len(data) + \
data + \
b'\x1bH960\x1bV00016\x1bL0202\x1bS' + b'H2TM' + \
b'\x1bH960\x1bV00062\x1bL0202\x1bS' + b'BBBB' + \
b'\x1bH960\x1bV00108\x1bL0202\x1bS' + b'0001' + \
b'\x1bQ1\x1bZ'
satoString1 = b'\x1bA\x1bH30\x1bV00016\x1b2D50,06,06,032,032\x1bDN%04d,' % len(data) + \
data + \
b'\x1bH320\x1bV00016\x1bL0202\x1bS' + b'H2TM' + \
b'\x1bH320\x1bV00062\x1bL0202\x1bS' + b'BBBB' + \
b'\x1bH320\x1bV00108\x1bL0202\x1bS' + b'0001' + \
b'\x1bQ1\x1bZ'
satoString2 = b'\x1bA\x1bH50\x1bV00016\x1b2D50,06,06,032,032\x1bDN%04d,' % len(data) + \
data + \
b'\x1bH280\x1bV00016\x1bL0202\x1bS' + b'\x1b$A,80,80,0\x1b$=' + b'H2TM' + \
b'\x1bH280\x1bV0102\x1bL0202\x1bS' + b'BBBB' + \
b'\x1bH280\x1bV0158\x1bL0202\x1bS' + b'0001' + \
b'\x1bH670\x1bV0016\x1b2D50,06,06,032,032\x1bDN%04d,' % len(data) + \
data + \
b'\x1bH900\x1bV0016\x1bL0202\x1bS' + b'\x1b$A,80,80,0\x1b$=' + b'H2TM' + \
b'\x1bH900\x1bV0102\x1bL0202\x1bS' + b'BBBB' + \
b'\x1bH900\x1bV0158\x1bL0202\x1bS' + b'0001' + \
b'\x1bQ1\x1bZ'
print(satoString)
print(len(satoString))
# ct.send_data(satoString)
usb.write_data(1, satoString2)
|
from django.shortcuts import render, get_object_or_404
from .models import *
from django.db.models import Max
# Create your views here.
def index(request):
context = {
'post' : Blog.objects.order_by('-likes'),
'sobre': Sobre.objects.last(),
'faq': Faq.objects.order_by('-publidata')[:3],
'autore': Autore.objects.all()[:3],
'frase': Frase.objects.last(),
}
return render(request, 'index.html', context)
def blog(request, id_blog):
post = {
'chave': get_object_or_404(Blog, pk=id_blog),
}
return render(request, 'blog.html', post)
|
class Solution:
def translateNum(self, num: int) -> int:
s = str(num)
n = len(s)
if n == 0:
return 1
f = [0] * (n + 1)
f[0] = 1
for i in range(1, n + 1):
f[i] = 0
if s[i - 1] >= '0' and s[i - 1] <= '9':
f[i] += f[i - 1]
if i > 1:
j = 10 * (int(s[i - 2]) - 0) + (int(s[i - 1]) - 0)
if j >= 10 and j <= 25:
f[i] += f[i - 2]
return f[n]
print(Solution().translateNum(12158))
|
cargahoraria=int(input("Digite a carga horária: "))
maximofaltas=int(cargahoraria*(25/100))
print("O máximo de faltas que você pode ter é: ",maximofaltas) |
print("Insert M for male and F for female:")
sex = input().upper()
if sex not in ["M", "F"]:
print("You can insert only M for male or F for female.")
exit()
print(sex)
print("Insert your age in years:")
age = int(input())
print(age)
print("Insert your height in cm")
height = int(input())
print(height)
print("Insert your body mass in kg:")
mass = int(input())
print(mass)
def calc_BMR(age, mass, height, sex):
sex_constant = 5 if sex == "M" else -161
BMR = ((10*mass)+(6.25*height)-(5*-age)+sex_constant)
return (BMR)
print(f"Your BMR is: {calc_BMR(age, mass, height, sex)}")
print("Press enter to continue.")
input()
# kar je za # je komentar
|
from flask import Flask, jsonify, render_template, request
import spidev
import time
import os
import threading
import sched
import json
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
relayPin = 26
GPIO.setup(relayPin,GPIO.OUT)
GPIO.output(relayPin,0)
light_history = []
moisture_history = []
nameLightDataFile = 'lightdata.json'
nameMoistureDataFile='moisturedata.json'
light_channel = 0
moisture_channels=[1,2,3]
# Load the history
with open(nameMoistureDataFile) as data_file:
moisture_history = json.load(data_file)
with open(nameLightDataFile) as data_file:
light_history = json.load(data_file)
spi = spidev.SpiDev()
spi.open(0,0)
# Function to read SPI data from MCP3008 chip
# Channel must be an integer 0-7
def ReadChannel(channel):
adc = spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
# Function to convert data to voltage level,
# rounded to specified number of decimal places.
def ConvertVolts(data,places):
volts = (data * 3.3) / float(1023)
volts = round(volts,places)
return volts
def getJSON(currentValue, history):
return jsonify({"id":currentValue,"message":"use /history for the complete history", "history":history})
def savehistory(history):
with open(nameMoistureDataFile,'w') as outfile:
json.dump(history,outfile)
def saveLightHistory(history):
with open(nameLightDataFile,'w') as outfile:
json.dump(history,outfile)
def turnMeasuringDevicesOn():
GPIO.output(relayPin,1)
print 'devices on'
def turnMeasuringDevicesOff():
GPIO.output(relayPin,0)
print 'devices off'
def measure_data():
light_level = ReadChannel(light_channel)
light_volts = ConvertVolts(light_level,2)
moisture_channels=[1,2,3]
voltsToReturn=[]
voltsToReturn.append(light_volts)
for channel in moisture_channels:
moisture_level=ReadChannel(channel)
moisture_volt =ConvertVolts(moisture_level,2)
voltsToReturn.append(moisture_volt)
return voltsToReturn
def foo():
print(time.ctime())
turnMeasuringDevicesOn()
time.sleep(3)
data = measure_data()
light_history.append(data[0])
moisture_history.append(data[1::])
savehistory(moisture_history)
saveLightHistory(light_history)
time.sleep(3)
turnMeasuringDevicesOff()
threading.Timer(60,foo).start() # run every hour
foo()
app= Flask(__name__)
@app.route("/")
def hello():
# Read the temperature sensor data
return getJSON(0,light_history[-500::])
@app.route("/history")
def getHistory():
return jsonify({"id":0, "history":history})
@app.route("/moisture")
def getMoisture():
return jsonify({"id":0, "history":moisture_history})
if __name__=="__main__":
app.run(host='0.0.0.0',port=8010,debug=True)
|
import sys
from xopen import xopen as open
def filter_vcf(inVcf, outVcf, minQUAL=20, minDP=10):
for line in open(inVcf):
if line.startswith('#'):
outVcf.write(line)
continue
temp = line.rstrip().split('\t')
if len(temp) <= 9:
continue
sample = temp[9]
CHROM, POS, ID, REF, ALT, QUAL, FILTER, INFO, FORMAT = temp[:9]
gt = sample.split(':')[0]
gts = gt.split('/')
if '0' in set(gts): # 0/0 or 0/1 etc.
continue
if float(QUAL) < minQUAL: # low QUAL
continue
dp = sample.split(':')[FORMAT.split(':').index('DP')]
if int(dp) < minDP: # low DP
continue
alt = gts[0]
ALTs = ALT.split(',')
##INFO = 'ALT=%s;SAMPLE=%s' % (ALT, sample)
ALT = ALTs[int(alt)-1]
##FORMAT = 'GT'
##sample = '1'
line = [CHROM, POS, ID, REF, ALT, QUAL, FILTER, INFO, FORMAT, sample]
print >> outVcf, '\t'.join(line)
if __name__ == '__main__':
filter_vcf(inVcf=sys.argv[1], outVcf=sys.stdout, minQUAL=20, minDP=10)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""--- Day 5: Doesn't He Have Intern-Elves For This? ---
Santa needs help figuring out which strings in his text file are naughty or
nice.
A nice string is one with all of the following properties:
It contains at least three vowels (aeiou only), like aei, xazegov, or
aeiouaeiouaeiou. It contains at least one letter that appears twice in a row,
like xx, abcdde (dd), or aabbccdd (aa, bb, cc, or dd). It does not contain the
strings ab, cd, pq, or xy, even if they are part of one of the other
requirements.
For example:
- ugknbfddgicrmopn is nice because it has at least three vowels (u...i...o...), a
double letter (...dd...), and none of the disallowed substrings.
- aaa is nice because it has at least three vowels and a double letter, even
though the letters used by different rules overlap.
- jchzalrnumimnmhp is naughty because it has no double letter.
- haegwjzuvuyypxyu is naughty because it contains the string xy.
- dvszwmarrgswjxmb is naughty because it contains only one vowel.
How many strings are nice?
"""
import sys
import click
from string import ascii_lowercase as ALPHABET
FORBIDDEN = ['ab', 'cd', 'pq', 'xy']
VOWELS = 'aeiou'
def num_vowels(text):
return len([char for char in text if char in VOWELS])
def repeated_chars(text, repeats=2):
return any([char*repeats in text for char in ALPHABET])
def forbidden_patterns(text):
return any([pattern in text for pattern in FORBIDDEN])
def nice_string(line):
return all([num_vowels(line) >= 3,
repeated_chars(line),
not forbidden_patterns(line)])
def total_nice_strings(text):
return sum([nice_string(line) for line in text.split()])
def non_overlapping_pair(text):
for i, char0 in enumerate(text[:-2]):
if '{}{}'.format(char0, text[i+1]) in text[i+2:]:
found = True
break
else:
found = False
return found
def has_letter_hop(text):
return any([text[i+2] == char for i, char in enumerate(text[:-2])])
def nicer_string(text):
return non_overlapping_pair(text) and has_letter_hop(text)
def total_nicer_strings(text):
return sum([nicer_string(line) for line in text.split()])
def calculate_solution_1(text):
return total_nice_strings(text)
def calculate_solution_2(text):
return total_nicer_strings(text)
@click.command()
@click.option('--source_file', default='data/05.txt',
help='source data file for problem')
def main(source_file):
"""Simple solution to adventofcode problem 5."""
data = ''
with open(source_file) as source:
data = source.read()
print('Santa found {} entries on the nice list.'.format(
calculate_solution_1(data)))
print('Santa found {} entries on the nicer list.'.format(
calculate_solution_2(data)))
if __name__ == "__main__":
sys.exit(main())
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation
import random
def persisted_rw(nk,x0,p):
a = np.zeros(nk)
x=x0
a[0]=x
krok=random.choice([-1, 1])
for i in range(1,nk):
if random.random()>p:
krok=-krok
x=x+krok
a[i]=x
return a
n1=100
t = np.arange(n1)
ns=10000
x0=0
p=0.2
r1 = np.zeros(n1)
for i in range(ns):
xt=persisted_rw(n1,x0,p)
for l in range(n1):
r1[l]=r1[l]+(xt[l]-x0)**2
for i in range(n1):
r1[i]=r1[i]/ns
plt.plot(t,r1,label='p=0.8')
p=0.8
r2 = np.zeros(n1)
for i in range(ns):
xt=persisted_rw(n1,x0,p)
for l in range(n1):
r2[l]=r2[l]+(xt[l]-x0)**2
for i in range(n1):
r2[i]=r2[i]/ns
plt.plot(t,r2,label='p=0.8')
plt.ylabel('<r^2>')
plt.xlabel('t')
tytul = "skorelowane błądzenia losowe"
plt.title(tytul)
plt.show()
#definicja msd/ ośk |
from rest_framework import serializers
from .models import *
class FormDSerializer(serializers.ModelSerializer):
class Meta:
model = FormD
fields = "__all__" |
import json
import requests
import sys
import pandas as pd
class gitPulls ():
def __init__(self, config):
with open(config) as f:
data = json.load(f)
self.username = data["gituser"]
self.token = data ["token"]
self.url = data["url"]
self.session = requests.Session()
self.repositories = self.get_repos()
def get_repos(self):
url = 'https://api.github.com/users/k8-proxy/repos'
try:
repos = pd.DataFrame(self.session.get(url=url).json())
return repos['name'].apply(lambda s: 'k8-proxy/'+s if isinstance(s, str) else s).to_list()
except requests.exceptions.RequestException as e:
raise SystemExit(e)
def getPullRequests(self):
d = []
for repo in self.repositories:
#print ("\n:::::Fetching pull requests from " + repo)
try:
temp_url = self.url + repo + "/pulls"
req = requests.get(temp_url, auth=(self.username, self.token))
data = req.json()
try:
print("Error! A message from github server: \n" + data["message"])
break
except:
#print("Success!")
empty = True
counter = 0
for pullRequest in data:
link = pullRequest["html_url"]
name = pullRequest["base"]["repo"]["name"]
date = str(pullRequest["created_at"])
empty = False
counter = counter + 1
#print("\n:" + str(counter))
#print("PR Link: " + link)
#print("Repository Name: " + name)
all_reviewers = ""
for reviewer in pullRequest["requested_reviewers"]:
all_reviewers += reviewer["login"] + ", "
if (all_reviewers == ""):
all_reviewers = "not assigned"
else:
all_reviewers = all_reviewers[:-2]
all_labels = ""
for label in pullRequest["labels"]:
all_labels += label["name"] + ", "
if (all_labels == ""):
all_labels = "none"
else:
all_labels = all_labels[:-2]
#print("Reviewer(s): " + all_reviewers)
#print("Label(s): " + all_labels)
#print("Date Created: " + date)
d.append([name, link, all_reviewers, all_labels, date])
#if (empty):
#print("\n:0")
#print("\nThere are no pull requests in " + repo)
except:
print("\nUnexpected error: ", sys.exc_info()[0])
df = pd.DataFrame(d, columns=['Repository Name', 'PR Link', 'Reviewer(s)', 'Label(s)','Date Created'])
return df
|
"""
聊天室机制思路(重点思想)
功能 : 类似qq群功能
【1】 有人进入聊天室需要输入姓名,姓名不能重复
* 客户端input姓名,服务端验证是否存在姓名,服务端有记录
【2】 有人进入聊天室时,其他人会收到通知:xxx 进入了聊天室
【3】 一个人发消息,其他人会收到:xxx : xxxxxxxxxxx
* 经过服务器转发客户端消息
【4】 有人退出聊天室,则其他人也会收到通知:xxx退出了聊天室
* 随时发送随时接受(sendto,recvfrom)发送接收互不干扰
【5】 扩展功能:服务器可以向所有用户发送公告:管理员消息: xxxxxxxxx
# 服务器有多个客户端
# 服务器向所有客户端发送消息(公告)
# 架构先写服务端,功能先写客户端
# 写一个测一个
# 抓大放小
执行流程架构
1.需求认知
2.基本的技术类型
× 网络传输 -->TCP(一个客户端断开,服务器用不了) UDP(群聊)
× 服务端需要存储用户(存储什么信息,怎么存(用户名,地址)(数据结构:列表[(name,adderss),] {name:adderss})
× 服务器要给所有人发消息(要知道所有人地址)
× 转发机制(发送接收互不干扰)(发送消息,接受消息各一个进程)
× 区分请求类型(固定模型思路)--》定协议
3.功能模块划分(拆)
一个功能一个函数
函数封装
× 架构模型搭建
× 进入聊天室
× 聊天
× 退出聊天室
4.每一个功能具体实现逻辑
× 架构模型搭建
客户端:创建udp套接字,准备给服务端发请求
服务端:创建udp套接字,等待客户端请求
(每个客户端发的请求不同,请求类型也不同)
× 进入聊天室
客户端:
输入姓名
将姓名发送给服务端
等待服务端反馈结果
服务端:
接收客户端姓名
判断该用户是否存在
将结果发送给客户端
【如果不能进入聊天室(原因),结束】
【能进入,(ok),存储用户信息;告知他人】
× 聊天
客户端:
创建新进程
父进程负责循环输入内容,发送消息
子进程负责循环接收,打印
服务端:
接收消息
转发给其他人
× 退出聊天室
客户端:
输入quit(或者直接ctrl+c)
告知服务端
进程退出
服务端;
接收消息
将其从用户字典中删除
告知其他人
5.通信协议(双方的约定)
客户端请求类型:
进入聊天室
L name (请求类型 请求参量)
聊天
C name msg
退出聊天室
Q ****
6.优化修改
× 注释添加
× 代码的重构
× 细节修改
"""
|
array = [1,1,2,5,5,5,7,7,9,9,9,9,9,11]
def upper_bound(array, target):
previous = -1
for i in range(len(array)):
# if array[i] == target:
# if array[i] > target:
# print("i")
# return f"hi {i}"
previous = i
if array[i] > target:
return previous
def lower_bound(array, target):
previous = -1
for i in range(len(array)):
if array[i] == target:
return i
previous = i
if target < array[i]:
return previous - 1
def freq_count(array, target):
ub = upper_bound(array, target)
lb = lower_bound(array, target)
print(lb)
print(ub)
if (ub - lb) == 1:
return "Frequency is 1"
else:
return f"Frequency is {ub - lb }"
print(freq_count(array, 1))
## revisit this problem , something's wrong here
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 20 20:01:26 2021
@author: Aaron
"""
import os
import pandas as pd
import numpy as np
import seaborn as sns
#from textblob import TextBlob
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
import datetime
sns.set()
os.getcwd()
os.chdir('C:\\Users\\Aaron\\Desktop\\Boehringer\\Codes\\Python\\Miros')
os.getcwd()
data=pd.read_csv("dataset.csv")
#Inspeccion de lo sdatos
list(data.columns)
data.head()
data.shape
data.info()
data.describe()
#mayor detalle de las variables:
data['Subscriber Type'].value_counts()
data['Start Station'].value_counts()
data['End Station'].value_counts()
#pivot tables
pivot1 = pd.pivot_table(data.iloc[:,[3,9]], index = 'Start Station', aggfunc='count')
print(pivot1)#total de suscriptores por terminal
pivot2 = pd.pivot_table(data, index = ['Start Station','Subscriber Type'],
aggfunc='count', margins=True)
print(pivot2)#debemos recortar esta tabla
pivot2.head()
pivot2.shape
print(pivot2.iloc[:,6])#total de suscriptores por tipo para cada terminal
#Duracion promedio de los viajes por tipo de suscriptor
data[['Subscriber Type', 'Duration']].groupby(['Subscriber Type'], as_index = False).mean()
sns.factorplot(x='Subscriber Type', y ='Duration', data=data, kind="bar", size=3)
plt.show()
#Histogramas con distribucion de las variables:
sns.distplot(data['Duration'])
plt.show()
sns.distplot(data['Bike #'])
plt.show()
sns.distplot(data['Start Station'])
plt.show()
#%%
%matplotlib inline
#Matriz de Correlacion lineal
data.corr()
#heatmap de las correlaciones
plt.figure(figsize=(8,4))
sns.heatmap(datos.corr(),cmap='Greens',annot=False)
#matriz de scatter plots para todas la variables, se puede hacer por grupos o individual
scatter_matrix(data)
#%%
#Gragica de los suscriptores y terminales por fecha
#creamos otro dataframe con las columnas de fechas y terminales
time_data=data.iloc[:,[2,3,4,5,6,7,9]]#dataset de fechas, estaciones y suscriptores
list(time_data.columns)
time_data.head()
#Ahora aplicamos la transformacion de formsto de fecha
#ejemplo de como transformar strings a date time
fecha = pd.to_datetime(data['Start Date'])#correcta transformacion de fechas
print(fecha)
#fecha vs suscriptores
time_data=data.sort_values('Start Date', ascending=True)
plt.plot(time_data['Start Date'], time_data['Subscriber Type'])
plt.xticks(rotation='vertical')
#fecha vs start terminal
time_data=time_data.sort_values('Start Date', ascending=True)
plt.plot(time_data['Start Date'], time_data['Start Terminal'])
plt.xticks(rotation='vertical')
#%%
"""
Para crear las graficas de la duracion min, promedio y maxima de los tiempos por mes y de
acuerdo a los tipos de suscriptores, primero distinguimos valores por mes, después, ya
para graficar debemos separar las duraciones minimas y medias en una grafica y las
duraciones maximas en otra ya que los valores al ser muy distintos; al visualizacion no
se alcanzan a distinguir adecuadamente
"""
#Transformas la data a tipo datetime
data['Start Date']=pd.to_datetime(data['Start Date'])
#creamos una variable que devuelva el mes
data['Month']=data['Start Date'].apply(lambda x: x.strftime('%B'))
#observamos la nueva columna
list(data.columns)
data.head()
data.tail()
#Veamos la distribucion de los meses
data['Month'].unique()
data['Month'].value_counts()
#Ahor avamos a filtrar el dataset por meses, y se puede analizar por mes
March=data[data['Month']=='March']
April=data[data['Month']=='April']
May=data[data['Month']=='May']
June=data[data['Month']=='June']
July=data[data['Month']=='July']
August=data[data['Month']=='August']
#Calculamos el vector de medias y los valores minimos y maximos para todas la variables:
means = data.groupby('Month').mean()
type(means)
means = pd.DataFrame(means)#transformamos a dataframe
means
means['Month']=means.index
means
#Ls minimos
mini = data.groupby('Month').min()
mini = pd.DataFrame(mini)#transformamos a dataframe
mini['Month']=mini.index
#Ahora los maximos
maxi = data.groupby('Month').max()
maxi = pd.DataFrame(maxi)#transformamos a dataframe
maxi['Month']=maxi.index
#Agrupamo slos resultados en un solo dataset (el de lo sminimos)
mini['Maxi']=maxi['Duration']#le anadimos las columnas
mini['Means']=means['Duration']
mini.shape
values=mini.iloc[:,[0,9,10,11]]#filtramos
values.rename(columns = {'Duration': 'Min'}, inplace=True)#renombramos
values=values.reset_index(drop= True)#reindexamos
print(values.columns)
values
#Para graficar vamos a separar min con media y max por separado
min_mean=values.iloc[:,[0,1,3]]
min_mean.plot(x='Month', kind='bar',stacked=False,
title='Duration Min & Mean by month')
#Los maximos
values.iloc[:,[1,2]].plot(x='Month', kind='bar',stacked=False,
title='Max duration by month')
#######AHORA GRAFICAMOS LAS DURACIONES DE ACUERDO A LAS PREFENCIAS DE LOS SUCRIPTORES
sus_type=data.iloc[:,[1,9]]
sus_type.head()
means_s = sus_type.groupby('Subscriber Type').mean()
means_s = pd.DataFrame(means_s)#transformamos a dataframe
means_s
means_s['Subscriber Type']=means_s.index
means_s
#Los minimos
mini_s = sus_type.groupby('Subscriber Type').min()
mini_s = pd.DataFrame(mini_s)#transformamos a dataframe
mini_s['Subscriber Type']=mini_s.index
#Ahora los maximos
maxi_s = sus_type.groupby('Subscriber Type').max()
maxi_s = pd.DataFrame(maxi_s)#transformamos a dataframe
maxi_s['Month']=maxi_s.index
#Agrupamo slos resultados en un solo dataset (el de lo sminimos)
mini_s['Maxi']=maxi_s['Duration']#le anadimos las columnas
mini_s['Means']=means_s['Duration']
mini_s.shape
mini_s
values_s=mini_s
values_s
values_s.rename(columns = {'Duration': 'Min'}, inplace=True)#renombramos
values_s=values_s.reset_index(drop= True)#reindexamos
print(values_s.columns)
values_s
#Para graficar vamos a separar min con media y max por separado
values_s.iloc[:,[0,1,3]].plot(x='Subscriber Type', kind='bar',stacked=False,
title='Duration Min & Mean by suscriber type')
#Los maximos
values_s.iloc[:,[1,2]].plot(x='Subscriber Type', kind='bar',stacked=False,
title='Max duration by suscriber type')
#%%
#MValores faltantes:
sns.heatmap(datos.isnull(), cbar=False, yticklabels=False, cmap='viridis')
from dateutil.parser import parse, parser
import datetime
parse('03/01/2014 12:16:00 a. m.').date()
print(datetime.date(2014, 3, 1))
df['A'] = df['A'].apply(add_2)
print (df)
time_data['Start Date'] = time_data['Start Date'].apply(parse)
#time_data['Start Date'] = time_data['Start Date'].apply(date())
print (time_data)
type(time_data['Start Date'])
def string_date_time(dstr):
d = parser.parse(dstr)
d= d.strftime("%Y-%m-%d")
return d
string_date_time(time_data['Start Date'])
###
import datetime as dt
datetime.datetime.strptime
Enero=data[(data['Start Date']>=datetime.datetime.strptimedate(2014,1,1)) &
(data['Start Date']<=datetime.datetime.strptime.date(2014,1,31))]
Enero=data[(data['Start Date']>=datetime.date(2014,1,1)) &
(data['Start Date']<=datetime.date(2014,1,31))]
type(data['Start Date'])
dt.datetime.date(2014,1,1)
dt.datetime.date(2014,1,1)
dt.datetime.strptime()
data_fecha = data.set_index('Start Date')
data_fecha.head()
# Se crean las fechas con la librería datetime
ene_inic = dt.datetime(2014, 1, 1)
ene_fin = dt.datetime(2014, 1, 31)
# Filtro por fecha
data_fecha.loc[ene_inic: ene_fin]
date_string = "12/11/2018"
date_object = dt.strptime(date_string, "%d %m %Y")
print("date_object =", date_object)
###
###
#Vamos a graficar por mes las medias y los valores minimos y maximos:
rango = np.arange(6)
width = 0.2
# plot data in grouped manner of bar type
plt.bar(rango-0.2, mini['Duration'], width, color='cyan')
plt.bar(rango, means['Duration'], width, color='orange')
plt.bar(rango+0.2, maxi['Duration'], width, color='green')
plt.xticks(rango, ['March', 'April', 'May', 'June', 'July', 'August'])
plt.xlabel("Months")
plt.ylabel("Values")
plt.legend(["Min", "Mean", "Max"])
plt.show()
###
#para la grafica:
# create data
df = pd.DataFrame([['A', 10, 20, 10, 30], ['B', 20, 25, 15, 25], ['C', 12, 15, 19, 6],
['D', 10, 29, 13, 19]],
columns=['Team', 'Round 1', 'Round 2', 'Round 3', 'Round 4'])
# view data
print(df)
# plot grouped bar chart
df.plot(x='Team',
kind='bar',
stacked=False,
title='Grouped Bar Graph with dataframe')
"""
Para crear las graficas de la duracion min, promedio y maxima de los tiempos por mes y de
acuerdo a los tipos de suscriptores, primero distinguimos valores por mes, después, ya
para graficar debemos separar las duraciones minimas y medias en una grafica y las
duraciones maximas en otra ya que los valores al ser muy distintos; al visualizacion no
se alcanzan a distinguir adecuadamente
"""
#Transformas la data a tipo datetime
data['Start Date']=pd.to_datetime(data['Start Date'])
#creamos una variable que devuelva el mes
data['Month']=data['Start Date'].apply(lambda x: x.strftime('%B'))
#observamos la nueva columna
list(data.columns)
data.head()
data.tail()
#Veamos la distribucion de los meses
data['Month'].unique()
data['Month'].value_counts()
#Ahor avamos a filtrar el dataset por meses, y se puede analizar por mes
March=data[data['Month']=='March']
April=data[data['Month']=='April']
May=data[data['Month']=='May']
June=data[data['Month']=='June']
July=data[data['Month']=='July']
August=data[data['Month']=='August']
#Calculamos el vector de medias y los valores minimos y maximos para todas la variables:
means = data.groupby('Month').mean()
type(means)
means = pd.DataFrame(means)#transformamos a dataframe
means
means['Month']=means.index
means
#Ls minimos
mini = data.groupby('Month').min()
mini = pd.DataFrame(mini)#transformamos a dataframe
mini['Month']=mini.index
#Ahora los maximos
maxi = data.groupby('Month').max()
maxi = pd.DataFrame(maxi)#transformamos a dataframe
maxi['Month']=maxi.index
#Agrupamo slos resultados en un solo dataset (el de lo sminimos)
mini['Maxi']=maxi['Duration']#le anadimos las columnas
mini['Means']=means['Duration']
mini.shape
values=mini.iloc[:,[0,9,10,11]]#filtramos
values.rename(columns = {'Duration': 'Min'}, inplace=True)#renombramos
values=values.reset_index(drop= True)#reindexamos
print(values.columns)
values
#Para graficar vamos a separar min con media y max por separado
min_mean=values.iloc[:,[0,1,3]]
min_mean.plot(x='Month', kind='bar',stacked=False,
title='Duration Min & Mean by month')
#Los maximos
values.iloc[:,[1,2]].plot(x='Month', kind='bar',stacked=False,
title='Max duration by month')
#######AHORA GRAFICAMOS LAS DURACIONES DE ACUERDO A LAS PREFENCIAS DE LOS SUCRIPTORES
sus_type=data.iloc[:,[1,9]]
sus_type.head()
means_s = sus_type.groupby('Subscriber Type').mean()
means_s = pd.DataFrame(means_s)#transformamos a dataframe
means_s
means_s['Subscriber Type']=means_s.index
means_s
#Los minimos
mini_s = sus_type.groupby('Subscriber Type').min()
mini_s = pd.DataFrame(mini_s)#transformamos a dataframe
mini_s['Subscriber Type']=mini_s.index
#Ahora los maximos
maxi_s = sus_type.groupby('Subscriber Type').max()
maxi_s = pd.DataFrame(maxi_s)#transformamos a dataframe
maxi_s['Month']=maxi_s.index
#Agrupamo slos resultados en un solo dataset (el de lo sminimos)
mini_s['Maxi']=maxi_s['Duration']#le anadimos las columnas
mini_s['Means']=means_s['Duration']
mini_s.shape
mini_s
values_s=mini_s
values_s
values_s.rename(columns = {'Duration': 'Min'}, inplace=True)#renombramos
values_s=values_s.reset_index(drop= True)#reindexamos
print(values_s.columns)
values_s
#Para graficar vamos a separar min con media y max por separado
values_s.iloc[:,[0,1,3]].plot(x='Subscriber Type', kind='bar',stacked=False,
title='Duration Min & Mean by suscriber type')
#Los maximos
values_s.iloc[:,[1,2]].plot(x='Subscriber Type', kind='bar',stacked=False,
title='Max duration by suscriber type') |
from enemy import Enemy
from pygame.math import Vector2
from pygame import mixer
import pygame
import random
class Cat(Enemy):
def __init__(self, game, speed):
super().__init__(game, speed)
self.icon = pygame.image.load('kotek.png')
self.health = 5
size = self.game.screen.get_size()
while True:
self.pos = Vector2(random.randint(0, size[0] - 80), size[1] / 3)
self.rect = pygame.Rect(self.pos[0], self.pos[1], 64, 64)
if self.rect.collidelist(self.game.rects) == -1:
break
def attack(self):
dog = self.game.player
dog.be_attacked()
self.meow(True)
def can_attack(self):
dog = self.game.player
if dog.is_close_to(self) and not dog.attacked:
return True
else:
return False
def draw(self):
self.game.screen.blit(self.icon, self.pos)
pygame.draw.rect(self.game.screen, (255, 0, 0), (self.pos[0] + self.health * 11, self.pos[1] - 15, 55 - self.health * 11 , 10))
pygame.draw.rect(self.game.screen, (0, 255, 0), (self.pos[0], self.pos[1] - 15, self.health * 11 + 1, 10))
pygame.draw.rect(self.game.screen, (0, 0, 255), (self.pos[0], self.pos[1], 64, 64), 2)
@staticmethod
def meow(attack=True):
if attack:
cat_sound = mixer.Sound('cat_scream.wav')
cat_sound.set_volume(0.5)
cat_sound.play()
else:
cat_sound = mixer.Sound('cat_meow_x.wav')
cat_sound.play() |
cities = { 101:"Pune", 102:"Mumbai", 105 : "Delhi", 103:"Chennai" }
cities[104]="Bengaluru"
cities[103]="Kolkatta"
print(cities) # unordered
print(cities[102])
#val=cities[109] # KeyError
print(cities.get(103, "NIL"))
print(cities.get(107, "NIL"))
print(104 in cities)
print(105 not in cities)
for k in cities:
print(cities[k])
for kid,name in cities.items():
print(kid, name)
print(len(cities))
|
import pandas as pd
from datetime import datetime, timedelta
# Pandas settings
pd.options.display.max_rows = 200
class Parser:
def __init__(
self,
csv_path,
date_col=0,
name_col=5,
event_col=3,
component_col=2,
context_col=1,
index_dtype=str,
):
"""
Creates a new parser with log data taken from the csv
found in the `csv_path`.
"""
# Load csv with pandas
self.log = pd.read_csv(
csv_path,
parse_dates=[date_col],
infer_datetime_format=True,
# encoding="ascii",
)
# Get colnames
self.date, self.name, self.event, = self.log.columns.values[
[date_col, name_col, event_col]
]
self.component, self.context = self.log.columns.values[
[component_col, context_col]
]
# Get index dtype
self.index_dtype = index_dtype
# Sort by students and then ascending time
self.log.sort_values([self.name, self.date], inplace=True)
index = pd.Index(self.log[self.name].unique(), name=self.name)
self.output = pd.DataFrame(index=index)
# Create deltatime differences
self.log["delta"] = self.log[self.date] - self.log.shift(1)[self.date]
def add_event_counts(
self,
prefix,
period,
event_filter=None,
component_filter=None,
clip_val=30,
):
"""
Adds a new column for each event in the log
counting the number of times each student has performed each event
in total.
"""
log = self.log
# Filters
if event_filter:
mask = log[self.event].apply(
lambda c: any(keyword in c for keyword in event_filter)
)
log = log[mask]
if component_filter:
mask = log[self.component].apply(
lambda c: any(keyword in c for keyword in component_filter)
)
log = log[mask]
# Apply period
mask = log[self.date].apply(lambda c: c >= period[0] and c <= period[1])
log = log[mask]
# Number of each event performed by each user
event_counts = (
log[[self.name, self.event]]
.groupby([self.name, self.event])
.size()
.unstack(fill_value=0)
)
# Rename columns
event_counts = event_counts.add_prefix(prefix)
# Clip long column names
if clip_val:
for i, name in enumerate(event_counts.columns.values):
event_counts.columns.values[i] = name[:clip_val]
# Merge event counts with output
self.output = self.output.merge(event_counts, on=self.name)
def add_component_counts(
self, prefix, period, event_filter=None, component_filter=None
):
"""
A useful docstring goes here.
"""
log = self.log
# Filters
if event_filter:
mask = log[self.event].apply(
lambda c: any(keyword in c for keyword in event_filter)
)
log = log[mask]
if component_filter:
mask = log[self.component].apply(
lambda c: any(keyword in c for keyword in component_filter)
)
log = log[mask]
# Apply period
mask = log[self.date].apply(lambda c: c >= period[0] and c <= period[1])
log = log[mask]
# Number of each event performed by each user
component_counts = (
log[[self.name, self.component]]
.groupby([self.name, self.component])
.size()
.unstack(fill_value=0)
)
# Rename columns
component_counts = component_counts.add_prefix(prefix)
# Merge component counts with output
self.output = self.output.merge(component_counts, on=self.name)
def add_session_counts(self, colname, period, delta=timedelta(minutes=20)):
"""
Adds total number of sessions each student has performed.
"""
# Apply period
mask = self.log[self.date].apply(
lambda c: c >= period[0] and c <= period[1]
)
log = self.log[mask]
# Compute session number (max + 1 will be number of sessions)
self.log["session_number"] = log.groupby(self.name)["delta"].apply(
lambda t: (t >= delta).cumsum()
)
# Save
self.output[colname] = (
self.log.groupby(self.name)["session_number"].max() + 1
)
def add_events_per_forum(
self, prefix, period, component_name="Foro", clip_val=30
):
# Get events per forum
log_context = self.log[[self.name, self.context]]
log_forums = log_context.loc[self.log[self.component] == component_name]
per_forum = (
log_forums.groupby([self.name, self.context])
.size()
.unstack(fill_value=0)
)
# Rename
per_forum = per_forum.add_prefix(prefix)
# Clip long column names
if clip_val:
for i, name in enumerate(per_forum.columns.values):
per_forum.columns.values[i] = name[:clip_val]
# Save
self.output = self.output.merge(per_forum, on=self.name)
def add_scores(
self, csv_path, name_col=0, score_filter=[], non_numeric="remove"
):
"""
Appends columns of scores to the data, taken from the csv path.
@non_numeric: Can be 'remove', 'nan', 'leave',
or a number to replace it.
"""
scores = pd.read_csv(csv_path)
name_col = scores.columns.values[name_col]
# Use score filter
if score_filter:
scores = scores[[name_col] + score_filter]
if non_numeric != "leave":
# This turns all non-numeric values to NaNs.
for column in scores.columns.values:
scores[column] = pd.to_numeric(scores[column], errors="coerce")
# Remove all NaN rows
if non_numeric == "remove":
scores.dropna()
# Replace all NaNs with the given value
elif not isinstance(non_numeric, str):
scores.fillna(non_numeric)
# If indexes were ints with missing values, fix float conversion
if self.index_dtype == int:
scores.dropna(inplace=True)
scores[name_col] = scores[name_col].astype(int)
# Save
self.output = self.output.merge(
scores, left_on=self.name, right_on=name_col
)
def write_file(self, output_path):
self.output.to_csv(
output_path, quoting=None, index=False # encoding="ascii",
)
if __name__ == "__main__":
period1 = (datetime(2016, 9, 1), datetime(2016, 12, 1))
period2 = (datetime(2016, 12, 1), datetime(2017, 4, 1))
parser = Parser(
"./data/primaria2.csv",
date_col=1,
name_col=0,
event_col=2,
component_col=5,
context_col=4,
index_dtype=int,
)
parser.add_event_counts("period1_", period1)
parser.add_event_counts("period2_", period2)
# parser.add_component_counts("total_", period)
parser.add_session_counts("period1_sessions", period1)
parser.add_session_counts("period2_sessions", period2)
parser.add_events_per_forum("period1_", period1)
parser.add_events_per_forum("period2_", period2)
parser.add_scores(
"data/notas2.csv", score_filter=["FINAL"], non_numeric="remove"
)
parser.write_file("data/output.csv")
|
import random
from sii3 import *
import unittest
class testClasters(unittest.TestCase):
'''tests for SII3'''
def setUp(self):
pass
def tearDown(self):
'''complite test1'''
pass
def test_d(self):
"""растояние между 2 точками"""
a = (1,1)
b = (1,2)
c = (10,1)
t1 = d(a,b)
t2 = d(a,c)
t3 = d(c,c)
self.assertEqual(t1,1)
self.assertEqual(t2,9)
self.assertEqual(t3,0)
def test_points(self):
initPoints()
if __name__ == '__main__':
unittest.main() |
from mpl_toolkits.mplot3d import Axes3D
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import numpy as np
import sys
args = sys.argv
x = np.loadtxt('norm.dat')
fig = plt.figure()
plt.hist(x,bins=100)
plt.savefig('norm-len{}mean{:.5f}var{:.5f}.png'.format(len(x),np.mean(x),np.var(x)))
|
from selenium import webdriver
# 获取cookie
def get_cookies():
chrome_options = webdriver.ChromeOptions()
driver = webdriver.Chrome(options=chrome_options)
# driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
# "source": """
# Object.defineProperty(navigator, 'webdriver', {
# get: () => undefined
# })
# """
# })
driver.get("https://www.lagou.com/")
for i in driver.get_cookies():
print(i)
#print(driver.get_cookies())
# 进行cookie的拼接
# cookies_dic = {
# "JSESSIONID": driver.get_cookie("JSESSIONID")["value"],
# "WEBTJ-ID": driver.get_cookie("WEBTJ-ID")["value"],
# "user_trace_token": driver.get_cookie("user_trace_token")["value"],
# "LGUID": driver.get_cookie("LGUID")["value"],
# "_ga": driver.get_cookie("_ga")["value"],
# "_gid": driver.get_cookie("_gid")["value"],
# "index_location_city": "%E5%85%A8%E5%9B%BD",
# "__lg_stoken__": "4cf05d1f4f3dfda3ed5280e46fea64ef21eddc1e1e4871646793a47e0905a8e515049ab213546726a140de84e1b884e8d61d9254a19d799ffae29d5ed814930d6077e032f55e",
# "X_MIDDLE_TOKEN": "ef6f1e7dba29f2358e96502363802085",
# "TG-TRACK-CODE": "index_search",
# "SEARCH_ID": "a5fdae06e7bc42a4b425dabf8f6344c4",
# "LGSID": driver.get_cookie("LGSID")["value"],
# "PRE_UTM": driver.get_cookie("PRE_UTM")["value"],
# "PRE_HOST": driver.get_cookie("PRE_HOST")["value"],
# "PRE_SITE": driver.get_cookie("PRE_SITE")["value"],
# "PRE_LAND": driver.get_cookie("PRE_LAND")["value"],
# "sensorsdata2015jssdkcross": driver.get_cookie("sensorsdata2015jssdkcross")["value"],
# "Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6": driver.get_cookie("Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6")[
# "value"],
# "LGRID": driver.get_cookie("LGRID")["value"]
# }
#
# cookie = ""
# for i in range(0, len(list(cookies_dic.keys()))):
# cookie = cookie + list(cookies_dic.keys())[i] + "=" + str(list(cookies_dic.values())[i]) + ";"
# print(cookie)
get_cookies() |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management.core.resources import Directory
from resource_management.core.resources import Execute
from resource_management.libraries.functions import default
from resource_management.libraries.script.script import Script
def setup_extensions():
import params
# Hadoop Custom extensions
hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
hadoop_custom_extensions_services = default("/configurations/core-site/hadoop.custom-extensions.services", "")
hadoop_custom_extensions_owner = default("/configurations/core-site/hadoop.custom-extensions.owner", params.hdfs_user)
hadoop_custom_extensions_services = [ service.strip().upper() for service in hadoop_custom_extensions_services.split(",") ]
hadoop_custom_extensions_services.append("YARN")
hadoop_custom_extensions_hdfs_dir = "/iop/ext/{0}/hadoop".format(params.stack_version_formatted)
hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(Script.get_stack_root())
if params.current_service in hadoop_custom_extensions_services:
clean_extensions(hadoop_custom_extensions_local_dir)
if hadoop_custom_extensions_enabled:
download_extensions(hadoop_custom_extensions_owner, params.user_group,
hadoop_custom_extensions_hdfs_dir,
hadoop_custom_extensions_local_dir)
setup_extensions_hive()
hbase_custom_extensions_services = []
hbase_custom_extensions_services.append("HBASE")
if params.current_service in hbase_custom_extensions_services:
setup_hbase_extensions()
def setup_extensions_hive():
import params
hive_custom_extensions_enabled = default("/configurations/hive-site/hive.custom-extensions.enabled", False)
hive_custom_extensions_owner = default("/configurations/hive-site/hive.custom-extensions.owner", params.hdfs_user)
hive_custom_extensions_hdfs_dir = "/iop/ext/{0}/hive".format(params.stack_version_formatted)
hive_custom_extensions_local_dir = "{0}/current/ext/hive".format(Script.get_stack_root())
impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT'];
role = params.config.get('role','')
# Run copying for HIVE_SERVER and HIVE_CLIENT
if params.current_service == 'HIVE' and role in impacted_components:
clean_extensions(hive_custom_extensions_local_dir)
if hive_custom_extensions_enabled:
download_extensions(hive_custom_extensions_owner, params.user_group,
hive_custom_extensions_hdfs_dir,
hive_custom_extensions_local_dir)
def download_extensions(owner_user, owner_group, hdfs_source_dir, local_target_dir):
"""
:param owner_user: user owner of the HDFS directory
:param owner_group: group owner of the HDFS directory
:param hdfs_source_dir: the HDFS directory from where the files are being pull
:param local_target_dir: the location of where to download the files
:return: Will return True if successful, otherwise, False.
"""
import params
if not os.path.isdir(local_target_dir):
import tempfile
#Create a secure random temp directory
tmp_dir=tempfile.mkdtemp()
cmd = ('chown', '-R', params.hdfs_user, tmp_dir)
Execute(cmd, sudo=True)
cmd = ('chmod', '755', tmp_dir)
Execute(cmd, sudo=True)
Directory(os.path.dirname(local_target_dir),
owner="root",
mode=0755,
group="root",
create_parents=True)
params.HdfsResource(hdfs_source_dir,
type="directory",
action="create_on_execute",
owner=owner_user,
group=owner_group,
mode=0755)
# copy from hdfs to tmp_dir
params.HdfsResource(tmp_dir,
type="directory",
action="download_on_execute",
source=hdfs_source_dir,
user=params.hdfs_user,
mode=0644,
replace_existing_files=True)
# Execute command is not quoting correctly.
cmd = ('mv', tmp_dir, local_target_dir)
only_if_cmd = "ls -d {tmp_dir}/*".format(tmp_dir=tmp_dir)
Execute(cmd, only_if=only_if_cmd, sudo=True)
only_if_local = 'ls -d "{local_target_dir}"'.format(local_target_dir=local_target_dir)
Execute(("chown", "-R", "root:root", local_target_dir),
sudo=True,
only_if=only_if_local)
params.HdfsResource(None,action="execute")
return True
def clean_extensions(local_dir):
"""
:param local_dir: The local directory where the extensions are stored.
:return: Will return True if successful, otherwise, False.
"""
if os.path.isdir(local_dir):
Directory(local_dir,
action="delete",
owner="root")
return True
def setup_hbase_extensions():
import params
# HBase Custom extensions
hbase_custom_extensions_enabled = default("/configurations/hbase-site/hbase.custom-extensions.enabled", False)
hbase_custom_extensions_owner = default("/configurations/hbase-site/hbase.custom-extensions.owner", params.hdfs_user)
hbase_custom_extensions_hdfs_dir = "/iop/ext/{0}/hbase".format(params.stack_version_formatted)
if hbase_custom_extensions_enabled:
download_hbase_extensions(hbase_custom_extensions_owner, params.user_group,
hbase_custom_extensions_hdfs_dir)
def download_hbase_extensions(owner_user, owner_group, hdfs_source_dir):
"""
:param owner_user: user owner of the HDFS directory
:param owner_group: group owner of the HDFS directory
:param hdfs_source_dir: the HDFS directory from where the files are
:return: Will return True if successful, otherwise, False.
"""
import params
params.HdfsResource(hdfs_source_dir,
type="directory",
action="create_on_execute",
owner=owner_user,
group=owner_group,
mode=0755)
params.HdfsResource(None,action="execute")
return True
|
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from .models import User, UserProfile
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
@receiver(post_save, sender=User)
def create_or_update_chatkit_user(sender, instance, created, **kwargs):
try:
from seedorf.chatkit.client import create_client
client = create_client()
client.token = client.create_admin_token()
user_uuid = str(instance.uuid)
user_name = instance.name
user_avatar = instance.profile.avatar.url if instance.profile.avatar else None
if created:
client.create_user(user_uuid, user_name, user_avatar)
else:
client.update_user(user_uuid, user_name, user_avatar)
except Exception:
pass
@receiver(post_delete, sender=User)
def delete_chatkit_user(sender, instance, **kwargs):
try:
from seedorf.chatkit.client import create_client
client = create_client()
client.token = client.create_admin_token()
client.delete_user(str(instance.uuid))
except Exception:
pass
|
# valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import mdp, util
from learningAgents import ValueEstimationAgent
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
mdp.isTerminal(state)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
# Write value iteration code here
"*** YOUR CODE HERE ***"
gridStates = self.mdp.getStates()
# We set the gridstates and use the iterations to calculate the q values
# Those are inserted in a copy of the dictionary of the values and set to the
# current values once an iteration is done.
while self.iterations > 0:
valuesCopy = util.Counter()
for state in gridStates:
currentValue = None
stateActions = self.mdp.getPossibleActions(state)
for action in stateActions:
qValue = self.computeQValueFromValues(state,action)
if currentValue is None or qValue > currentValue:
currentValue = qValue
if currentValue is None:
valuesCopy[state] = 0
else:
valuesCopy[state] = currentValue
self.values = valuesCopy
self.iterations -= 1
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def computeQValueFromValues(self, state, action):
"""
Compute the Q-value of action in state from the
value function stored in self.values.
"""
"*** YOUR CODE HERE ***"
# The algorithm is being applied here using the values store in the dictionary
# it is set to 0 for the start state.
nextStatesAndProbs = self.mdp.getTransitionStatesAndProbs(state, action)
value = 0
for nextState, prob in nextStatesAndProbs:
reward = self.mdp.getReward(state, action, nextState)
value += prob*(reward + (self.discount * self.values[nextState]))
return value
def computeActionFromValues(self, state):
"""
The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
"*** YOUR CODE HERE ***"
# Here all values with actions are stored in a counter and the max is used
# Ties are broken by first encountered
actions = self.mdp.getPossibleActions(state)
qValues = util.Counter()
for action in actions:
qValues[action] = self.computeQValueFromValues(state, action)
return qValues.argMax()
def getPolicy(self, state):
return self.computeActionFromValues(state)
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.computeActionFromValues(state)
def getQValue(self, state, action):
return self.computeQValueFromValues(state, action)
|
class BaseProvider:
def get_catalog(self, **kwargs):
raise NotImplementedError()
def get_merchant(self, merchant_id):
raise NotImplementedError()
def login(self, username, password):
raise NotImplementedError()
def post_order(self, **kwargs):
raise NotImplementedError()
|
from .test_case import TestCase
from infi.unittest.parameters import iterate
from os import path
class ConsoleScriptsTestCase(TestCase):
def test_add_and_remove_a_valid_entry_point(self):
from infi.projector.plugins.builtins.console_scripts import ConsoleScriptsPlugin
plugin = ConsoleScriptsPlugin()
with self.temporary_directory_context():
self.projector("repository init a.b.c none short long")
with open(path.join("src", "a", "b", "c", "__init__.py"), 'a') as fd:
fd.write("\ndef foo():\n pass\n")
self.assertFalse('foo' in plugin.get_set().get().keys())
self.projector("console-scripts add foo a.b.c:foo")
self.assertTrue('foo' in plugin.get_set().get().keys())
self.projector("console-scripts remove foo")
self.assertFalse('foo' in plugin.get_set().get().keys())
def test_list(self):
from infi.projector.plugins.builtins.console_scripts import ConsoleScriptsPlugin
from mock import patch, Mock
with patch("pprint.pprint") as pprint, patch.object(ConsoleScriptsPlugin, "get_set") as get_set:
console_scripts = dict()
get_set.return_value.get.return_value = console_scripts
def side_effect(*args, **kwargs):
called_console_scripts, = args
self.assertEqual(console_scripts, called_console_scripts)
pprint.side_effect = side_effect
with self.temporary_directory_context():
self.projector("repository init a.b.c none short long")
self.projector("console-scripts list")
self.assertTrue(pprint.called)
|
# K-Means Clustering in OpenCV
import numpy as np
import cv2 as cv
img = cv.imread('dcbrtg.jpg')
Z = img.reshape((-1, 3))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 4
ret, label, center = cv.kmeans(Z, K, None, criteria, 10, cv.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape(img.shape)
gray = cv.cvtColor(res2, cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
cv.imshow('res2', res2)
cv.waitKey(0)
cv.destroyAllWindows()
|
{
"targets": [
{
"target_name": "boost-smart_ptr",
"type": "static_library",
"include_dirs": [
"1.57.0/smart_ptr-boost-1.57.0/include"
],
"all_dependent_settings": {
"include_dirs": [
"1.57.0/smart_ptr-boost-1.57.0/include"
]
},
"sources": [
"1.57.0/smart_ptr-boost-1.57.0/src/*.cpp"
],
"dependencies": [
"../boost-predef/boost-predef.gyp:*",
"../boost-mpl/boost-mpl.gyp:*",
"../boost-throw_exception/boost-throw_exception.gyp:*",
"../boost-align/boost-align.gyp:*",
"../boost-config/boost-config.gyp:*",
"../boost-assert/boost-assert.gyp:*",
"../boost-core/boost-core.gyp:*"
]
}
]
} |
'''
On a Certain day, the nurses at a hospital worked the following number of hours:
Nurse Howard worked 8 hours
Nurse Pease worked 10 hours
Nurse Campbell worked 9 hours
Nurse Grace worked 8 hours
Nurse McCarthy worked 7 hours
and Nurse Murphy worked 12 hours.
What is the average number of hours worked per nurse on this day?
Average should be a float value
'''
nurseWorked = [8, 10, 9, 8, 7, 12]
avgWorked = float(sum(nurseWorked) / len(nurseWorked))
print("The avg work is {:.2f}".format(avgWorked))
|
import sys
zeefile = open(sys.argv[1], 'r')
for x in zeefile:
a, b = x.strip().split(',')
top = int(b)
bottom = int(a)
mystring = ''
nums = []
for p in range(2, 1000):
for i in range(2, p):
if p % i == 0:
break
else:
if p >= bottom and p <= top:
nums.append(p)
print len(nums)
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class Users(models.Model):
_inherit = ['res.users']
_rec_name = 'display_name'
_order = 'login'
display_name = fields.Char(string='Display Name', compute='_compute_display_name')
@api.one
@api.depends('login', 'partner_id.name')
def _compute_display_name(self):
names = [self.login, self.partner_id.name]
self.display_name = ' / '.join(filter(None, names))
@api.model
def create(self, vals):
new_record = super(Users, self).create(vals)
# Set default email by login, and avoiding the issue while creating "Template User" in auth_signup module.
if new_record.login != 'portaltemplate':
new_record.email = new_record.login
return new_record
|
#!/usr/bin/env python
#import execjs
import execjs
import os
path=os.path.abspath('.')+'/Library/pyjsfuck'
class JSFuck():
def __init__(self):
f = open(path+'/jsfuck.js', 'r')
jsf_code = f.read()
js = execjs.get()
#print "Using Engine %s" % js.name
self.jsf_int = js.compile(jsf_code)
pass
def encode(self, code):
return self.jsf_int.call('JSFuck',code,'1') |
import os
from flask import Flask, jsonify
from flask_restful import Api
# from flask_jwt import JWT
from flask_jwt_extended import JWTManager
from db import db
from ma import ma
from blacklist import BLACKLIST
from authentication import authenticate, identity
from resources.user import (
UserRegister,
User,
UserLogin,
UserTokenRefresh,
UserLogout,
# UserEmailActivation
)
from resources.confirmation import UserConfirmation
from resources.item import Item, ItemList
from resources.store import Store, StoreList
app = Flask(__name__)
app.secret_key = os.environ.get('APP_SECRET_KEY')
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['PROPAGATE_EXCEPTIONS'] = True
app.config['JWT_BLACKLIST_ENABLED'] = True # allowing blacklist feature
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh'] # allowing token blacklist for access & refresh token
api = Api(app)
@app.before_first_request
def create_tables():
db.create_all()
# jwt = JWT(app, authenticate, identity)
jwt = JWTManager(app)
@jwt.user_claims_loader
def add_claims_to_jwt(identity):
if identity == 1:
return {'is_admin': True}
return {'is_admin': False}
# Check apakah token termasuk dalam daftar blacklist, lihat config["JWT_BLACKLIST_ENABLED"]
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
return decrypted_token['jti'] in BLACKLIST
@jwt.expired_token_loader
def expired_token_callback():
return jsonify({'message': 'The token has expired', 'action': False}), 401
# Callback jika token sudah expired
# Bawaan sudah ada, ini digunakan jika hendak melakukan modifikasi
@jwt.needs_fresh_token_loader
def token_not_fresh_callback():
return jsonify({'message': 'The token is not fresh.', 'action': False}), 401
@jwt.unauthorized_loader
def missing_token_callback(error):
return jsonify({'message': 'Request header does not contain an access token', 'action': False}), 401
# Callback jika token tidak benar
# Bawaan sudah ada, ini digunakan jika hendak melakukan modifikasi
@jwt.invalid_token_loader
def invalid_token_callback(error):
return jsonify({'message': 'Invalid token.', 'action': False}), 401
@jwt.revoked_token_loader
def revoked_token_callback():
return jsonify({'message': 'The token has been revoked', 'action': False}), 401
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(Store, '/store/<string:name>')
api.add_resource(StoreList, '/stores')
api.add_resource(UserRegister, '/register')
api.add_resource(User, '/user/<int:id_>')
# api.add_resource(UserEmailActivation, '/user/activate/<int:user_id>')
api.add_resource(UserConfirmation, '/confirm_user/<string:confirmation_id>')
api.add_resource(UserLogin, '/login')
api.add_resource(UserTokenRefresh, '/refresh')
api.add_resource(UserLogout, '/logout')
if __name__ == '__main__':
db.init_app(app)
ma.init_app(app)
app.run(debug=True, port=5000)
|
import pandas as pd
import numpy as np
import json
def get_data(filename):
with open(filename) as data_file:
# print(ps.read_json(content, orient='values'))
data = json.load(data_file)
dates = pd.to_datetime(data['dates'], format="%Y-%m-%d")
dataset = np.array(data['dataset'], dtype=np.float16)
# print(dates.shape)
# print(dataset[0][0])
return dates, dataset
|
import sqlite3 as sl
import os,logging
import pandas as pd
from peewee import *
TRANSFORMED_FILENAME='mushrooms_transformed.csv'
DB_NAME='mushrooms_database.db'
db=SqliteDatabase(DB_NAME)
class BaseModel(Model):
class Meta:
database=db
class AllCategories(BaseModel):
name=CharField(primary_key=True)
class Meta:
database=db
class Mushrooms(BaseModel):
id=AutoField()
cap_shape=ForeignKeyField(AllCategories)
cap_color=ForeignKeyField(AllCategories)
odor=ForeignKeyField(AllCategories)
gill_size=ForeignKeyField(AllCategories)
gill_color=ForeignKeyField(AllCategories)
stalk_color_above_ring=ForeignKeyField(AllCategories)
veil_color=ForeignKeyField(AllCategories)
ring_type=ForeignKeyField(AllCategories)
spore_print_color=ForeignKeyField(AllCategories)
population=ForeignKeyField(AllCategories)
habitat=ForeignKeyField(AllCategories)
#This functions takes in the whole data and gives all uniques values in all dataaset
def get_unique_categories(data):
all_cols=list(data.columns)
unique_cols_list=[]
for col in all_cols:
col_list=list(set(list(data[col].values)))
unique_cols_list.extend(col_list)
return list(set(unique_cols_list))
def main():
logging.basicConfig( format='%(filename)s - %(levelname)s - %(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
logging.info("Reading Data")
data=pd.read_csv(TRANSFORMED_FILENAME)
logging.info("Connecting to Database")
db.connect()
logging.info("Creating Tables")
db.create_tables([ AllCategories, Mushrooms])
unique_cols=get_unique_categories(data)
unique_cols=[{'name':x} for x in unique_cols]
logging.info("Inserting Unique Categories")
AllCategories.insert_many(unique_cols).execute()
logging.info("Preparing Mushrooms data")
new_renamed_cols={}
for x in data.columns:
new_renamed_cols[x]=x.replace('-','_')
data=data.rename(columns=new_renamed_cols)
data_source=[]
cols=list(data.columns)
for i,x in data.iterrows():
values=[x[col_name] for col_name in cols]
single_dict={}
single_dict.update(zip(cols, values))
data_source.append(single_dict)
logging.info("Inserting Mushroom Data")
Mushrooms.insert_many(data_source).execute()
if __name__ == "__main__":
main()
|
__author__ = 'tung'
class Users:
def __init__(self, username='', password=''):
self._username=username
self._password=password
@property
def username(self):
return self._username
@username.setter
def username(self, name):
self._username=name
@property
def password(self):
return self._password
@password.setter
def password(self, password):
self._password=password
|
import os
import string
import sqlite3
def select_authorizer(*args):
return sqlite3.SQLITE_OK
class SensorLog:
b_steps=[]
class B_step:
closed_segments=[]
compat_segments=[]
crossing_points=[]
class Joined_Segment:
id_closed=0
id_compat=0
average=0
segment=None
subsequence=None
index_crossing=0
position_crossing=None
def insert_in_database(b_set,id, c):
for i in range(0, len(b_set)):
joined_segment=b_set[i]
seq = joined_segment.segment
index=joined_segment.index_crossing
pos = joined_segment.position_crossing
c.execute("INSERT INTO segment VALUES (?, ?, ?, ?, ?);", ( None,seq, id, pos, index))
return
def construct_database(final_array_sub_seq,pathDirectoryLog):
exists = os.path.isfile(
(pathDirectoryLog ))
if exists:
os.remove(pathDirectoryLog )
##creo il database dove andrò a salvare i vari log
conn = sqlite3.connect(pathDirectoryLog)
conn.set_authorizer(select_authorizer)
c = conn.cursor()
# Create tables
c.execute('''CREATE TABLE segment(id_segment INTEGER PRIMARY KEY AUTOINCREMENT, sequence TEXT ,id_b_step INTEGER, crossing_positions TEXT, index_crossing INTEGER)''')
# Save (commit) the changes
conn.commit()
for i in range(0,len(final_array_sub_seq)):
insert_in_database(final_array_sub_seq[i],i, c)
conn.commit()
conn.close()
def is_contained(i, segment,sub_sequence ):
flag=False
if(i<len(segment)):
if (sub_sequence[0] == segment[ i][0] ):
if( i+1<len(segment)):
if (sub_sequence[1] == segment[ i+1][0]):
if(i+2<len(segment)):
if (sub_sequence[2] == segment[ i+2][0]):
if (i + 3 < len(segment)):
if (sub_sequence[3] == segment[i + 3][0]):
if (i + 4 < len(segment)):
if (sub_sequence[4] == segment[i + 4][0]):
if (i + 5 < len(segment)):
if (sub_sequence[5] == segment[i + 5][0]):
flag = True
return flag
def find_subsequence(segment, sub_sequence):
c = 0
for i in range(0, len(segment)):
if(is_contained(i, segment, sub_sequence)==True):
c+=1
return c
def calculate_max_averages(joined_segment, b_step):
max_joined=b_step[0]
for i in range(1, len(b_step)):
if(b_step[i].id_closed==joined_segment.id_closed):
if(max_joined.average<=b_step[i].average):
max_joined=b_step[i]
return max_joined
def calculate_averages(subsequence, sensor_log):
count_occurences=0
total=0
average=0
for i in range(0, len(sensor_log.b_steps)):
for j in range(0, len(sensor_log.b_steps[i].closed_segments)):
count_occurences+=sensor_log.b_steps[i].closed_segments[j].count(subsequence)
if(sensor_log.b_steps[i].closed_segments[j].count(subsequence)>0):
total+=1
for m in range(0, len(sensor_log.b_steps[i].compat_segments)):
count_occurences += sensor_log.b_steps[i].compat_segments[m].count(subsequence)
if (sensor_log.b_steps[i].compat_segments[m].count(subsequence) > 0):
total += 1
if(count_occurences==0):
return 0
average= count_occurences
return average
def get_crossing_point(array, point):
for i in range(0, len(array)):
if(array[i].split("_")[0]==point):
my_array = array[i].split("_")
sensor_name = translate(my_array[0])
timestamp = translate_timestamp(int(my_array[1]))
#print(sensor_name)
if (sensor_name != None and timestamp != None):
return sensor_name+"_"+timestamp
return None
def get_indexes(array):
c=0
seq=array[0][6]
for i in range(1, len(array)):
if(seq!=array[i][6]):
seq=array[i][6]
c += 1
return c
def isContained(array, sensor):
for i in range(0, len( array)):
if(array[i][0][0]==sensor):
return True
return False
def translate_timestamp(n_line):
path = "C:\\Users\\Dario\\Desktop\\multi-user-segmentation-master\\data\\DatasetPaths.txt"
f = open(path, "r")
line = f.readline()
if(line==""):
return None
counter=1
while (line != ""):
if(n_line==counter):
parsed_line=line.split("\t")
timestamp=parsed_line[0]+" "+parsed_line[1]
return timestamp
line = f.readline()
counter+=1
return None
def translate(my_char):
path = "C:\\Users\\Dario\\Desktop\\multi-user-segmentation-master\\data\\DatasetPaths_simplified_dict.txt"
f=open(path,"r")
line=f.readline()
while(line!=""):
parsed_line=line.split("\t\t")
symbol_dict=parsed_line[0].strip()
if(symbol_dict==str(my_char).strip()):
return parsed_line[1].strip()
line=f.readline()
return None
def max_is_contained(max_joined_segment,b_step):
for i in range(0, len(b_step)):
if(b_step[i].id_closed==max_joined_segment.id_closed ):
return True
return False
def reconstruct(ssl):
array_sub_seq=[]
sensor_log= SensorLog()
for i in range(0, len(ssl.b_steps)):
b_step= B_step()
b_step.closed_segments=[]
b_step.compat_segments=[]
for j in range(0, len(ssl.b_steps[i].closed_segments)):
if (len(ssl.b_steps[i].closed_segments[j]) <= 4 ):
continue
s = ""
for h in range(0,len(ssl.b_steps[i].closed_segments[j])):
s+=ssl.b_steps[i].closed_segments[j][h][0]
b_step.closed_segments.append(s)
for m in range(0, len(ssl.b_steps[i].compat_segments)):
if(len(ssl.b_steps[i].compat_segments[m])<=4 ):
continue
x = ""
for u in range(0, len(ssl.b_steps[i].compat_segments[m])):
x+=ssl.b_steps[i].compat_segments[m][u][0]
b_step.compat_segments.append(x)
b_step.crossing_points=ssl.b_steps[i].crossing_points
#print( b_step.compat_segments)
sensor_log.b_steps.append(b_step)
final_array=[]
for i in range(0, len(sensor_log.b_steps)):
b_step=[]
for j in range(0, len(sensor_log.b_steps[i].closed_segments)):
for m in range(0, len(sensor_log.b_steps[i].compat_segments)):
if(len(sensor_log.b_steps[i].closed_segments[j]+sensor_log.b_steps[i].compat_segments[m])!=0):
joined_segment=Joined_Segment()
joined_segment.id_closed=j
joined_segment.id_compat=m
joined_segment.subsequence=sensor_log.b_steps[i].closed_segments[j][-3:]+sensor_log.b_steps[i].compat_segments[m][:3]
joined_segment.segment=sensor_log.b_steps[i].closed_segments[j]+sensor_log.b_steps[i].compat_segments[m]
joined_segment.index_crossing=len(sensor_log.b_steps[i].closed_segments[j])-1
joined_segment.position_crossing=get_crossing_point(sensor_log.b_steps[i].crossing_points, sensor_log.b_steps[i].compat_segments[m]
[:1])
b_step.append( joined_segment)
if( b_step):
final_array.append(b_step)
print(len(final_array))
array_joined_segments=[]
for i in range(0, len(final_array)):
for j in range(0, len(final_array[i])):
joined_segment=final_array[i][j]
joined_segment.occurences=calculate_averages(joined_segment.subsequence, sensor_log)
for i in range(0, len(final_array)):
b_step=[]
for j in range(0, len(final_array[i])):
joined_segment = final_array[i][j]
max_joined_segment=calculate_max_averages(joined_segment,final_array[i])
if(max_is_contained(max_joined_segment, b_step)==False):
b_step.append(max_joined_segment)
array_joined_segments.append(b_step)
for i in range(0, len(array_joined_segments)):
print(i)
for j in range(0, len(array_joined_segments[i])):
joined_segment = array_joined_segments[i][j]
print(joined_segment.segment)
print(joined_segment.id_closed)
print(joined_segment.position_crossing)
print(len(final_array))
print(len(array_joined_segments))
construct_database(array_joined_segments,"C:\\Users\\Dario\\Desktop\\multi-user-segmentation-master\\data\\trajectoriesDB.db")
return
|
def my_decorator(func):
def wrap_func(x,y):
print('*******')
func(x,y)
print('*******')
return wrap_func
@my_decorator
def hello(greeting,emoji):
print(greeting,emoji)
hello('hellooo',':)')
def my_decorator(func):
def wrap_func(*args,**kwargs):
print('*******')
func(*args,**kwargs)
print('*******')
return wrap_func
@my_decorator
def hello(greeting,emoji = ':)'):
print(greeting,emoji)
hello('hellooo')
#performance
from time import time
def performance(fn):
def wrapper(*args,**kwargs):
t1 = time()
result = fn(*args,**kwargs)
t2 = time()
print(f'took {t2-t1}s')
return result
return wrapper
@performance
def long_time():
for i in range(10000000):
i*5
long_time()
user1 = {
'name': 'Sorna',
'valid': True
}
def authenticated(fn):
def wrapper(*args, **kwargs):
if user1['valid']:
return fn(*args, **kwargs)
return wrapper
@authenticated
def message_friends(user):
print('message has been sent')
message_friends(user1)
|
# 输入圆的半径算周长,面积
radius = float(input('圆的半径为: '))
C = 2 * 3.14 * radius
S = 3.14 * (radius ** 2)
print('圆的周长为 %.1f' % C)
print('圆的面积为 %.1f' % S)
|
import os
import json
import pickle
import numpy as np
import pandas as pd
from xgboost import XGBClassifier
model = pickle.load(open("model/model.pickle", "rb"))
columns = pickle.load(open("model/model_columns.pickle", "rb"))
def recommendation():
pass
def predict_ratings(data):
data2 = {}
for k, v in data.items():
data2[k] = [v]
query = pd.get_dummies(pd.DataFrame.from_dict(data2))
for col in columns:
if col not in query.columns:
query[col] = 0
query = query[columns]
prob_5stars = model.predict_proba(query)[0, 1]
print(query.shape)
# Recommendation
score = {}
for col in list(columns[14:42]):
rec = query.copy()
if query[col][0] == 0:
rec[col][0] = 1
prob_new = model.predict_proba(rec)[0, 1]
score[col] = prob_new
highest = max(score, key=score.get)
result = {
'result': str(prob_5stars),
'add': highest,
'improvement': str(score.get(highest))
}
return result
if __name__ == '__main__':
pass # print(predict_ratings(example))
|
import logging
from collections import Sequence
import numpy as np
from qtpy.QtCore import QObject, Signal
from qtpy.QtWidgets import QMainWindow
from model.preferences import LOGGING_LEVEL
from ui.logs import Ui_logsForm
class LogViewer(QMainWindow, Ui_logsForm):
change_level = Signal(str)
set_size = Signal(int)
set_exclude_filter = Signal(str)
'''
A window which displays logging.
'''
def __init__(self, max_size):
super(LogViewer, self).__init__()
self.setupUi(self)
self.maxRows.setValue(max_size)
self.logViewer.setMaximumBlockCount(max_size)
def closeEvent(self, event):
'''
Propagates the window close event.
'''
self.hide()
def set_log_size(self, size):
'''
Updates the log size.
:param level: the new size.
'''
self.set_size.emit(size)
self.logViewer.setMaximumBlockCount(size)
def set_log_level(self, level):
'''
Updates the log level.
:param level: the new level.
'''
self.change_level.emit(level)
def set_excludes(self):
self.set_exclude_filter.emit(self.excludes.text())
def refresh(self, data):
'''
Refreshes the displayed data.
:param data: the data.
'''
self.logViewer.clear()
for d in data:
if d is not None:
self.logViewer.appendPlainText(d)
def append_msg(self, msg):
'''
Shows the message.
:param idx: the idx.
:param msg: the msg.
'''
self.logViewer.appendPlainText(msg)
self.logViewer.verticalScrollBar().setValue(self.logViewer.verticalScrollBar().maximum())
class MessageSignals(QObject):
append_msg = Signal(str, name='append_msg')
class RollingLogger(logging.Handler):
def __init__(self, preferences, size=1000, parent=None):
super().__init__()
self.__buffer = RingBuffer(size, dtype=object)
self.__signals = MessageSignals()
self.__visible = False
self.__window = None
self.__preferences = preferences
self.__excludes = []
self.parent = parent
self.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(threadName)s - %(name)s - %(funcName)s - %(message)s'))
level = self.__preferences.get(LOGGING_LEVEL)
if level is not None and level in logging._nameToLevel:
level = logging._nameToLevel[level]
else:
level = logging.INFO
self.__root = self.__init_root_logger(level)
self.__levelName = logging.getLevelName(level)
def __init_root_logger(self, level):
root_logger = logging.getLogger()
root_logger.setLevel(level)
root_logger.addHandler(self)
return root_logger
def emit(self, record):
msg = self.format(record)
if not any(e in msg for e in self.__excludes):
self.__buffer.append(msg)
self.__signals.append_msg.emit(msg)
def show_logs(self):
'''
Creates a new log viewer window.
'''
if self.__window is None:
self.__window = LogViewer(self.__buffer.maxlen)
self.__window.set_size.connect(self.set_size)
self.__window.change_level.connect(self.change_level)
self.__window.set_exclude_filter.connect(self.set_excludes)
self.__signals.append_msg.connect(self.__window.append_msg)
level_idx = self.__window.logLevel.findText(self.__levelName)
self.__window.logLevel.setCurrentIndex(level_idx)
self.__window.show()
self.__window.refresh(self.__buffer)
def set_excludes(self, excludes):
self.__excludes = excludes.split(',')
if len(self.__excludes) > 0:
old_buf = self.__buffer
self.__buffer = RingBuffer(old_buf.maxlen, dtype=np.object)
for m in old_buf:
if any(e in m for e in self.__excludes):
pass
else:
self.__buffer.append(m)
if self.__window is not None:
self.__window.refresh(self.__buffer)
def set_size(self, size):
'''
Changes the size of the log cache.
'''
old_buf = self.__buffer
self.__buffer = RingBuffer(size, dtype=np.object)
self.__buffer.extend(old_buf)
if self.__window is not None:
self.__window.refresh(self.__buffer)
def change_level(self, level):
'''
Change the root logger level.
:param level: the new level name.
'''
logging.info(f"Changing log level from {self.__levelName} to {level}")
self.__root.setLevel(level)
self.__levelName = level
self.__preferences.set(LOGGING_LEVEL, self.__levelName)
def to_millis(start, end, precision=1):
'''
Calculates the differences in time in millis.
:param start: start time in seconds.
:param end: end time in seconds.
:return: delta in millis.
'''
return round((end - start) * 1000, precision)
class RingBuffer(Sequence):
def __init__(self, capacity, dtype=np.float64):
"""
Create a new ring buffer with the given capacity and element type
Parameters
----------
capacity: int
The maximum capacity of the ring buffer
dtype: data-type, optional
Desired type of buffer elements. Use a type like (float, 2) to
produce a buffer with shape (N, 2)
"""
self.__buffer = np.empty(capacity, dtype)
self.__left_idx = 0
self.__right_idx = 0
self.__capacity = capacity
self.__event_count = 0
def unwrap(self):
""" Copy the data from this buffer into unwrapped form """
return np.concatenate((
self.__buffer[self.__left_idx:min(self.__right_idx, self.__capacity)],
self.__buffer[:max(self.__right_idx - self.__capacity, 0)]
))
def take_event_count(self, if_multiple=None):
'''
:param if_multiple: if set, only take the event count if it is a multiple of the supplied value.
:return: the count of items added since the last take if the count is taken.
'''
count = self.__event_count
if if_multiple is None or count % if_multiple == 0:
self.__event_count = 0
return count
else:
return None
def _fix_indices(self):
"""
Enforce our invariant that 0 <= self._left_index < self._capacity
"""
if self.__left_idx >= self.__capacity:
self.__left_idx -= self.__capacity
self.__right_idx -= self.__capacity
elif self.__left_idx < 0:
self.__left_idx += self.__capacity
self.__right_idx += self.__capacity
@property
def idx(self):
return self.__left_idx, self.__right_idx
@property
def is_full(self):
""" True if there is no more space in the buffer """
return len(self) == self.__capacity
# numpy compatibility
def __array__(self):
return self.unwrap()
@property
def dtype(self):
return self.__buffer.dtype
@property
def shape(self):
return (len(self),) + self.__buffer.shape[1:]
@property
def maxlen(self):
return self.__capacity
def append(self, value):
if self.is_full:
if not len(self):
return
else:
self.__left_idx += 1
self.__buffer[self.__right_idx % self.__capacity] = value
self.__right_idx += 1
self.__event_count += 1
self._fix_indices()
def peek(self):
if len(self) == 0:
return None
idx = (self.__right_idx % self.__capacity) - 1
logger.debug(f"Peeking at idx {idx}")
res = self.__buffer[idx]
return res
def append_left(self, value):
if self.is_full:
if not len(self):
return
else:
self.__right_idx -= 1
self.__left_idx -= 1
self._fix_indices()
self.__buffer[self.__left_idx] = value
self.__event_count += 1
def extend(self, values):
lv = len(values)
if len(self) + lv > self.__capacity:
if not len(self):
return
if lv >= self.__capacity:
# wipe the entire array! - this may not be threadsafe
self.__buffer[...] = values[-self.__capacity:]
self.__right_idx = self.__capacity
self.__left_idx = 0
return
ri = self.__right_idx % self.__capacity
sl1 = np.s_[ri:min(ri + lv, self.__capacity)]
sl2 = np.s_[:max(ri + lv - self.__capacity, 0)]
self.__buffer[sl1] = values[:sl1.stop - sl1.start]
self.__buffer[sl2] = values[sl1.stop - sl1.start:]
self.__right_idx += lv
self.__left_idx = max(self.__left_idx, self.__right_idx - self.__capacity)
self.__event_count += len(values)
self._fix_indices()
def extend_left(self, values):
lv = len(values)
if len(self) + lv > self.__capacity:
if not len(self):
return
if lv >= self.__capacity:
# wipe the entire array! - this may not be threadsafe
self.__buffer[...] = values[:self.__capacity]
self.__right_idx = self.__capacity
self.__left_idx = 0
return
self.__left_idx -= lv
self._fix_indices()
li = self.__left_idx
sl1 = np.s_[li:min(li + lv, self.__capacity)]
sl2 = np.s_[:max(li + lv - self.__capacity, 0)]
self.__buffer[sl1] = values[:sl1.stop - sl1.start]
self.__buffer[sl2] = values[sl1.stop - sl1.start:]
self.__right_idx = min(self.__right_idx, self.__left_idx + self.__capacity)
self.__event_count += len(values)
def __len__(self):
return self.__right_idx - self.__left_idx
def __getitem__(self, item):
# handle simple (b[1]) and basic (b[np.array([1, 2, 3])]) fancy indexing specially
if not isinstance(item, tuple):
item_arr = np.asarray(item)
if issubclass(item_arr.dtype.type, np.integer):
item_arr = (item_arr + self.__left_idx) % self.__capacity
return self.__buffer[item_arr]
# for everything else, get it right at the expense of efficiency
return self.unwrap()[item]
def __iter__(self):
# alarmingly, this is comparable in speed to using itertools.chain
return iter(self.unwrap())
# Everything else
def __repr__(self):
return '<RingBuffer of {!r}>'.format(np.asarray(self))
|
# __author__ = 'ak'
import requests
import json
import sys
def run(msg,urls,atMobiles):
data1 = {
"msgtype":"text",
"text":{
"content":"事业部今日Bug统计:"+'\n'+msg
},
"at":{
"atMobiles":atMobiles,
"isAtAll":False
}
}
header = {'Content-Type':'application/json; charset=utf-8'}
print(data1)
temp1 = requests.post(url = urls,data = json.dumps(data1),headers = header)
print(temp1.text)
#
# if __name__ == '__main__':
# urls1 = 'https://oapi.dingtalk.com/robot/send?access_token'\
# '=8cf0b365b2929e300f3ced7c622c1f9f08df8d9424b704ffe7cb93a61d11541d'
# msg1 = sys.argv[1]
# atMobiles1 = []
# run(msg = msg1,urls = urls1,atMobiles = atMobiles1)
|
def cointoss():
counttails = 0
countheads = 0
for k in range(1,5001):
import random
probability = round(random.random())
coin=""
if(probability == 0):
coin="tail"
counttails+=1
else:
coin="head"
countheads+=1
print "Attempt #"+str(k),": Throwing a coin.... it's a",coin,"! ... Got",countheads,"heads so far and",counttails,"tails so far."
cointoss() |
import random
#--TO DO---
#Make a UI
#Simplify some of these features maybe using classes
#reads the list of responses from 'possibleresponses.txt'
#returns them in a list called responses
def getResponses():
filename="possibleresponses.txt"
openfile=open(filename,'r')
lines=openfile.readlines()
responses=[]
for line in lines:
if '\n' in line:
line=line.replace('\n','')
responses.append(line)
return responses
#reads all responses from the file and copies them to prevent overwrites
#opens 'possibleresponses.txt' for writing, and enters all previously copied responses so the aren't overwritten
#begins a listener loop that prompts the user to enter a new response, writes the response, and stops when the keyword '/end' is used.
def inputResponses():
print("Type /stop to stop entering responses.")
savedResponses=getResponses()
filename="possibleresponses.txt"
openfile=open(filename,'w')
for response in savedResponses:
openfile.write(response+'\n')
run=True
while run:
entry=input("Enter a New Response: ")
if entry=='/stop':
run==False
return False
else:
openfile.write(entry+'\n')
#draws from the pool of responses obtained through getResponses()
#picks a random response from that list
def getAnswer():
responses= getResponses()
result=random.randint(0,len(responses)-1)
result= responses[result]
return result
#lists all keywords that can be entered by the user
def getOptions():
options= {
'/help':' Show a list of options.',
'/new':' Input a new response.',
'/list':' Lists all possible responses.',
'/quit':' Exit the program.',
'/stop':' Ends the sequence for entering new responses.'
}
for option in options:
print(option,options[option])
def checkEntry(entry):
if entry=='/help':
getOptions()
return True
elif entry=='/new':
inputResponses()
return True
elif entry=='/list':
print(getResponses())
return True
elif entry=='/quit':
return False
else:
print(getAnswer())
return True
#main loop of the program. Prompts user to ask a question and generates a random response.
#ends when the keyword /quit is used, and gives a list of options when /help is entered.
def mainLoop():
run=True
while run:
print("")
entry=input("Please, ask me anything: ")
print("")
run=checkEntry(entry)
print("Welcome! Ask me your questions, or enter '/help' for a list of options.")
mainLoop()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# undone
from urllib import parse
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
import time
from spider.constant import *
def get_filename(keyword):
pos = '../data/question_list/'
form = '.csv'
return pos + keyword + form
def get_url(keyword):
# 基于关键词检索, 返回展开后的url链接
print("(一) 编码关键词 <" + keyword + "> 中...")
base_url = "https://www.youtube.com/results?search_query="
url = base_url + parse.quote(keyword, safe='/', encoding='utf-8')
print("编码成功, url链接: " + url)
return url
def page_shake(driver):
# 为防止页面本身出bug, 置底后向上滑动一次, 再置底, 保证页面的动态加载过程
driver.execute_script(to_bottom) # 将滚动条移动到页面的底部
time.sleep(sleep_time)
driver.execute_script(to_top)
###
def get_html(url):
# 基于web driver来应对知乎的动态加载措施
print("(二) 访问页面中...")
driver = webdriver.Firefox() # 打开浏览器
driver.get(url) # 打开网页 - 知乎关键词检索后的网页
page_shake(driver)
for i in range(max_num):
time.sleep(sleep_time)
driver.execute_script(to_bottom) # 将滚动条移动到页面的底部
print("正在进行第" + str(i+1) + "次置底操作...")
i += 1
html = driver.page_source # get html
driver.close()
print("成功访问, 已获取html源码")
return html
def get_comments_list(url):
# 由url链接获取所有相关提问的列表
html = get_html(url)
soup = BeautifulSoup(html, features='lxml')
print("(三) 数据处理中...")
comments = soup.find_all('ytd-comment-thread-renderer', {'class': 'style-scope ytd-item-section-renderer'})
comments_list = []
for comment in comments:
comment_author = comment.find('a', {'id': 'author-text'}).get_text().strip()
comment_content = comment.find('div', {'id': 'content'}).get_text()
comments_list.append({'author': comment_author, 'content': comment_content})
print("已成功解析出所有相关问题列表")
return comments_list
def save2csv(data, filename):
df = pd.DataFrame(data)
df.to_csv(filename, sep=',', header=True, index=True, encoding='utf-8')
print("(四) 数据已保存在 <" + filename + "> ...")
def spider(keyword):
print("开始检索关键字 <" + keyword + ">...")
filename = get_filename(keyword)
url = get_url(keyword)
question_list = get_question_list(url)
save2csv(question_list, filename)
print("完成检索")
def main():
spider("这就是街舞")
if __name__ == 'main':
main()
|
"""
A small and convenient cross process FIFO queue service based on
TCP protocol.
"""
import logging
import threading
from collections import deque
from queue import Full, Empty
from time import monotonic
from ._commu_proto import *
from .exceptions import UnknownCmd, Empty, Full
from .utils import (
Unify_encoding,
md5,
new_thread,
get_logger,
get_builtin_name,
helper,
)
class _ClientStatistic:
def __init__(self, client_addr, conn: TcpConn):
self.client_addr = client_addr
self.me = str(client_addr)
self.conn = conn
class _WkSvrHelper:
def __init__(self, wk_inst, client_key):
self.wk_inst = wk_inst
self.client_key = client_key
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.wk_inst.remove_client(self.client_key)
class WuKongQueue:
def __init__(
self, host="localhost", port=8848, name="", maxsize=0, **kwargs
):
"""
:param host: host for queue server listen
:param port: port for queue server listen
:param name: queue's str identity
:param maxsize: queue max size
A number of optional keyword arguments may be specified, which
can alter the default behaviour.
max_clients: max number of clients
log_level: pass with stdlib logging.DEBUG/INFO/WARNING.., to control
the WuKongQueue's logging level that output to stderr
auth_key: it is a string used for client authentication. If is None,
the client does not need authentication
socket_connect_timeout: maximum socket operations time allowed during
connection establishment, client's tcp connection with established
connections but not authenticated in time will be disconnected
socket_timeout: maximum socket operations time allowed after successful
connection, prevent the client from disconnecting in a way that the
server cannot sense, thus making the resources unable to be released.
"""
self.name = name or get_builtin_name()
self.addr = (host, port)
self._tcp_svr = None
self.max_clients = kwargs.pop("max_clients", 0)
log_level = kwargs.pop("log_level", logging.DEBUG)
self._logger = get_logger(self, log_level)
self.socket_connect_timeout = kwargs.pop("socket_connect_timeout", 30)
self.socket_timeout = kwargs.pop(
"socket_timeout", self.socket_connect_timeout
)
# key->"-".join(client.addr)
# value-> `_ClientStatistic`
self.client_stats = {}
self.maxsize = maxsize
self.queue = deque()
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
self._statistic_lock = threading.Lock()
# if closed is True, server would not to listen connection request
# from network until execute self.run() again.
self.closed = True
auth_key = kwargs.pop("auth_key", None)
self._prepare_process(auth_key=auth_key)
self.run()
def _prepare_process(self, auth_key):
if auth_key is not None:
self._auth_key = md5(auth_key.encode(Unify_encoding))
else:
self._auth_key = None
def run(self):
"""
if not running, clients can't connect to server,but server side
is still available
"""
if self.closed:
self._tcp_svr = TcpSvr(*self.addr)
self.on_running()
new_thread(self._run)
def close(self):
"""
close only makes sense for the clients, server side is still
available.
Note: When close is executed, all connected clients will be
disconnected immediately
"""
self.closed = True
if self._tcp_svr:
self._tcp_svr.close()
self._tcp_svr = None
with self._statistic_lock:
for client_stat in self.client_stats.values():
client_stat.conn.close()
self.client_stats.clear()
self._logger.debug(
"<WuKongQueue [{}] listened {} was closed>".format(
self.name, self.addr
)
)
def __repr__(self):
return "<WuKongQueue listened {}, " "is_closed:{}>".format(
self.addr, self.closed
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def helper(self):
"""If the place server created isn't same with using,
you can use helper to close client easier, like this:
```
with svr.helper():
...
# this is equivalent to use below:
with svr:
...
```
"""
return helper(self)
def _qsize(self):
return len(self.queue)
def get(self, block=True, timeout=None, convert_method=None):
"""Remove and return an item from the queue.
:param block
:param timeout
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
:param convert_method: eventually, `get` returns convert_method(item)
"""
with self.not_empty:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = monotonic() + timeout
while not self._qsize():
remaining = endtime - monotonic()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self.queue.popleft()
self.not_full.notify()
return convert_method(item) if convert_method is not None else item
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
:param item: value for put
:param block
:param timeout
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case)
"""
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = monotonic() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - monotonic()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self.queue.append(item)
self.unfinished_tasks += 1
self.not_empty.notify()
def put_nowait(self, item):
"""
Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
:param item: value for put
:return:
"""
return self.put(item, block=False)
def get_nowait(self, convert_method=None) -> bytes:
"""
Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
:param convert_method:
:return:
"""
return self.get(block=False, convert_method=convert_method)
def full(self) -> bool:
"""Return True if the queue is full, False otherwise
"""
with self.mutex:
return 0 < self.maxsize <= self._qsize()
def empty(self) -> bool:
"""Return True if the queue is empty, False otherwise
"""
with self.mutex:
return not self._qsize()
def qsize(self) -> int:
"""Return the approximate size of the queue
"""
with self.mutex:
return self._qsize()
def reset(self, maxsize=None):
"""reset clears current queue and creates a new queue with
maxsize, if maxsize is None, use initial value of maxsize
"""
with self.mutex:
self.maxsize = maxsize if maxsize else self.maxsize
self.queue.clear()
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
with self.all_tasks_done:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError("task_done() called too many times")
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
with self.all_tasks_done:
while self.unfinished_tasks:
self.all_tasks_done.wait()
def connected_clients(self):
with self._statistic_lock:
return len(self.client_stats)
def remove_client(self, client_key):
with self._statistic_lock:
try:
self.client_stats[client_key].conn.close()
self.client_stats.pop(client_key)
except KeyError:
pass
@staticmethod
def _parse_socket_msg(conn: TcpConn, **kw):
ignore_socket_timeout = kw.pop("ignore_socket_timeout", False)
reply_msg = conn.read(ignore_socket_timeout=ignore_socket_timeout)
if not reply_msg.is_valid():
return
reply_msg.unwrap()
return reply_msg
def _auth(self, conn: TcpConn, client_stat: _ClientStatistic):
def auth_core():
if self._auth_key is None:
return True
reply_msg = self._parse_socket_msg(conn=conn)
if reply_msg is not None:
cmd = reply_msg.queue_params_object.cmd
args = reply_msg.queue_params_object.args
if cmd == QUEUE_AUTH_KEY:
if args["auth_key"] == self._auth_key:
conn.write(QUEUE_OK)
return True
else:
conn.write(QUEUE_FAIL)
return False
return False
if auth_core():
client_stat.conn.sock.settimeout(self.socket_timeout)
with self._statistic_lock:
self.client_stats[client_stat.me] = client_stat
return True
return False
def on_running(self):
if self.closed:
self.closed = False
self._logger.debug(
"<WuKongQueue [%s] is listening to %s" % (self.name, self.addr)
)
def _run(self):
while True:
try:
sock, addr = self._tcp_svr.accept()
sock.settimeout(self.socket_connect_timeout)
except OSError:
return
tcp_conn = TcpConn(sock=sock)
client_stat = _ClientStatistic(client_addr=addr, conn=tcp_conn)
with self._statistic_lock:
if self.max_clients > 0:
if self.max_clients <= len(self.client_stats):
# client will receive a empty byte, that represents
# clients fulled!
tcp_conn.close()
continue
# send hi message on connected
ok = tcp_conn.write(QUEUE_HI)
if ok:
# it's a must to authenticate firstly
if self._auth(conn=tcp_conn, client_stat=client_stat):
new_thread(
self.process_conn,
kw={"conn": tcp_conn, "me": client_stat.me},
)
self._logger.info(
"[server:%s] new client from %s"
% (self.addr, str(addr))
)
continue
# auth failed!
tcp_conn.close()
continue
else:
# please report this problem with your python version and
# wukongqueue package version on
# https://github.com/chaseSpace/wukongqueue/issues
self._logger.fatal("write_wukong_data err:%s" % tcp_conn.err)
return
def process_conn(self, me, conn: TcpConn):
"""run as thread at all"""
with _WkSvrHelper(wk_inst=self, client_key=me):
while True:
reply_msg = self._parse_socket_msg(
conn=conn, ignore_socket_timeout=True
)
if reply_msg is None:
return
cmd = reply_msg.queue_params_object.cmd
args = reply_msg.queue_params_object.args
data = reply_msg.queue_params_object.data
# Instruction for cmd and data interaction:
# 1. if only queue_cmd, just send WukongPkg(QUEUE_OK)
# 2. if there's arg or data besides queue_cmd, use
# wrap_queue_msg(queue_cmd=QUEUE_CMD, arg={}, data=b'')
#
# Communicate with client normally
#
# GET
if cmd == QUEUE_GET:
try:
item = self.get(
block=args["block"], timeout=args["timeout"]
)
except Empty:
conn.write(QUEUE_EMPTY)
else:
conn.write(
wrap_queue_msg(queue_cmd=QUEUE_DATA, data=item)
)
# PUT
elif cmd == QUEUE_PUT:
try:
self.put(
data, block=args["block"], timeout=args["timeout"]
)
except Full:
conn.write(QUEUE_FULL)
else:
conn.write(QUEUE_OK)
# STATUS QUERY
elif cmd == QUEUE_QUERY_STATUS:
# FULL | EMPTY | NORMAL
if self.full():
conn.write(QUEUE_FULL)
elif self.empty():
conn.write(QUEUE_EMPTY)
else:
conn.write(QUEUE_NORMAL)
# PING -> PONG
elif cmd == QUEUE_PING:
conn.write(QUEUE_PONG)
# QSIZE
elif cmd == QUEUE_SIZE:
conn.write(
wrap_queue_msg(queue_cmd=QUEUE_DATA, data=self.qsize())
)
# MAXSIZE
elif cmd == QUEUE_MAXSIZE:
conn.write(
wrap_queue_msg(queue_cmd=QUEUE_DATA, data=self.maxsize)
)
# RESET
elif cmd == QUEUE_RESET:
self.reset(args["maxsize"])
conn.write(QUEUE_OK)
# CLIENTS NUMBER
elif cmd == QUEUE_CLIENTS:
with self._statistic_lock:
clients = len(self.client_stats.keys())
conn.write(
wrap_queue_msg(queue_cmd=QUEUE_DATA, data=clients)
)
# TASK_DONE
elif cmd == QUEUE_TASK_DONE:
reply = {"cmd": QUEUE_OK, "err": ""}
try:
self.task_done()
except ValueError as e:
reply["cmd"] = QUEUE_FAIL
reply["err"] = e
conn.write(
wrap_queue_msg(
queue_cmd=reply["cmd"], exception=reply["err"]
)
)
# JOIN
elif cmd == QUEUE_JOIN:
self.join()
conn.write(QUEUE_OK)
else:
raise UnknownCmd(cmd)
|
from dataclasses import dataclass
class Hero:
name: str
identity: str
company: str
height: float
weight: float
gender: str
eyes_color: str
hair_color: str
strength: int
intelligence: str
def __init__(self, name, identity, company, height, weight, gender, eyes_color, hair_color, strength, intelligence):
self.name = name.capitalize()
self.identity = identity.capitalize()
self.company = company.capitalize()
self.height = float(height)
self.weight = float(weight)
self.gender = gender.capitalize()
self.eyes_color = eyes_color.capitalize()
self.hair_color = hair_color.capitalize()
self.strength = int(strength)
self.intelligence = intelligence.capitalize()
def __str__(self, hide_identity=True):
return (f'Name: {self.name}, identity: {hide_identity and "secret" or self.identity}, '
f'company: {self.company}, height: {self.height}cm, weight: {self.weight}kg, '
f'gender: {self.gender}, eyes color: {self.eyes_color}, hair color: {self.hair_color}, '
f'strength: {self.strength}, intelligence: {self.intelligence}')
def __getitem__(self, key):
return getattr(self, key)
def to_dict(self):
return {
'name': self.name,
'identity': self.identity,
'company': self.company,
'height': self.height,
'weight': self.weight,
'gender': self.gender,
'eyes_color': self.eyes_color,
'hair_color': self.hair_color,
'strength': self.strength,
'intelligence': self.intelligence
}
def get_name(self, hide_identity=True):
return f'{self.name} {("" if hide_identity else ("| " + self.identity))}'
def get_name_and_attr(self, attr, hide_identity=True):
return f'{self.get_name(hide_identity)}| {attr}: {self[attr]}'
|
"""
lesson 1 : create the scaler
"""
__all__ = [
'scaler',
# 'undulator',
]
from ...session_logs import logger
logger.info(__file__)
from apstools.devices import use_EPICS_scaler_channels
from ophyd.scaler import ScalerCH
scaler = ScalerCH("sky:scaler1", name="scaler")
scaler.wait_for_connection()
scaler.match_names()
use_EPICS_scaler_channels(scaler)
|
#Use of command:
#It is necessary to have 10 digit pswd as replace loop below is set to have 10 digits [i4:i4+10]
#So be sure that old and new paswd should have 10 digits before using this script
#python proxy_set.py 'old_pswd' 'new_pswd'
import sys
import time
import os
#Below are th files that contain proxy settings
filename=['/etc/wgetrc','/etc/apt/apt.conf.d/apt.conf','/etc/environment']
#print (os.getcwd())
#Below is the for loop to get the backup of the existing files
for i1 in range(len(filename)):
f1=open(filename[i1],'r')
m=f1.read()
back_file = filename[i1]+time.strftime("_%Y%m%d-%Hh%M")
f_back=open(back_file,'w')
for i2 in m:
f_back.write(i2)
#below is loops helps to replace old pswd with new
n=len(m)
appear=0
for i4 in range(n):
if m[i4:i4+10]==sys.argv[1]:
appear+=1
print('Found in',filename[i1],appear)
m_new=m.replace(sys.argv[1],sys.argv[2])
d2=open(filename[i1],'w')
for i5 in m_new:
d2.write(i5)
#Below command doesn't change env variable so manually change it
set_pro = 'export http_proxy=http://u157740:Daqwpmnb27@10.38.253.17:8080'
os.system(set_pro)
#mv_comm = 'mv wgetrc_* apt.conf_* environment_* backup/'
#os.system(mv_comm)
|
import requests
from bs4 import BeautifulSoup
HEADERS = {'User-Agent': 'Mozilla/5.0'}
DOMAIN = "http://synergy-journal.ru/"
url = "http://synergy-journal.ru/archive/10"
def parse_articles():
articles = [x for x in bs.find_all("div", "r") if x["data-record-type"] == "374"]
#[x.find("a")["href"] for x in bs.find_all("div", "r") if x["data-record-type"] == "374"]
if __name__ == "__main__":
main() |
import requests
import hashlib
import imgcodeidentify
from PIL import Image
from aip import AipOcr
import re
import optparse
def get_class(username,password,year,term,flag,path):
header = {
"Host":"bkjw.sxu.edu.cn",
"Origin":"http://bkjw.sxu.edu.cn",
"Content-Type":"application/x-www-form-urlencoded",
"Referer":"http://bkjw.sxu.edu.cn/_data/login.aspx",
"Upgrade-Insecure-Requests":"1",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
}
host = "http://bkjw.sxu.edu.cn/sys/ValidateCode.aspx"
login_url = "http://bkjw.sxu.edu.cn/_data/login.aspx"
score_url = "http://bkjw.sxu.edu.cn/xscj/Stu_MyScore.aspx"
while True:
s = requests.Session()
r = s.get(host,headers=header)
with open("imgCode.jpg",'wb+') as w:
w.write(r.content)
print("[*]:已经获取验证码")
yzm = get_code()
h1 = hashlib.md5()
h1.update(password.encode(encoding='utf-8'))
hex_password = h1.hexdigest()
temp_pwd = username+hex_password[:30].upper()+"10108"
h2 = hashlib.md5()
h2.update(temp_pwd.encode(encoding='utf-8'))
hex_temp = h2.hexdigest()
dsdsdsdsdxcxdfgfg = hex_temp[:30].upper() #密码
txt_asmcdefsddsd = username #用户名
h3 = hashlib.md5()
h3.update(yzm.upper().encode(encoding='utf-8'))
hex_temp_yzm = h3.hexdigest()[:30].upper()+'10108'
h4 = hashlib.md5()
h4.update(hex_temp_yzm.encode(encoding='utf-8'))
fgfggfdgtyuuyyuuckjg = h4.hexdigest()[:30].upper() #验证码
__VIEWSTATE = "/wEPDwULLTE4ODAwNjU4NjBkZA=="
__EVENTVALIDATION = "/wEWAgLnybi8BAKZwe+vBg=="
pcInfo = "Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64;+rv:61.0)+Gecko/20100101+Firefox/61.0Windows+NT+10.0;+Win64;+x645.0+(Windows)+SN:NULL"
Sel_Type = "STU"
typeName = "学生"
values = {}
values["__VIEWSTATE"] = __VIEWSTATE
values["__EVENTVALIDATION"] = __EVENTVALIDATION
values["dsdsdsdsdxcxdfgfg"] = dsdsdsdsdxcxdfgfg
values["fgfggfdgtyuuyyuuckjg"] = fgfggfdgtyuuyyuuckjg
values["pcInfo"] = pcInfo
values["Sel_Type"] = Sel_Type
values["txt_asmcdefsddsd"] = txt_asmcdefsddsd
values["txt_pewerwedsdfsdff"] = ""
values["txt_sdertfgsadscxcadsads"] = ""
values["typeName"] = typeName
print("[*]:正在尝试登录")
t = s.post(login_url,data=values,headers=header)
if "登录失败" in t.text:
print("[*]:登录失败,马上重新尝试")
continue
else:
print("[*]:登录成功")
break
r = s.get("http://bkjw.sxu.edu.cn/xscj/Stu_MyScore_Drawimg.aspx?x=1&h=2&w=782&xnxq="+str(year)+str(term)+"&xn="+str(year)+"&xq="+str(term)+"&rpt=1&rad=2&zfx="+str(flag),headers=header)
with open(path,"wb") as jpg:
jpg.write(r.content)
print("[*]:成绩图像保存成功")
def get_code():
APP_ID = '11519354'
API_KEY = 'tLlZhgC4kwx8ArqEhBXzCvRw'
SECRET_KEY = 'GnpZ0XXBFgZXz8v0aYTGIMhHRMmlRKSd'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
print("[*]:正在处理验证码图片")
code = ""
for j in range(30):
imgcodeidentify.deal_img("imgCode.jpg")
imgcodeidentify.interference_line(imgcodeidentify.deal_img("imgCode.jpg"),"imgCode.jpg")
imgcodeidentify.interference_point(imgcodeidentify.interference_line(imgcodeidentify.deal_img("imgCode.jpg"),"imgCode.jpg"),"imgCode.jpg")
try:
print("[*]:正在识别验证码")
code = client.basicGeneral(get_img_content("imgCode.jpg"))["words_result"][0]["words"]
code = re.findall('[a-zA-Z0-9]*',code)[0]
print("[*]:已经得到验证码" + code)
except:
pass
return code
def get_img_content(path):
with open(path,'rb') as a:
img_content = a.read()
return img_content
opt = optparse.OptionParser()
opt.add_option('-u',dest='username',type='string')
opt.add_option('-p',dest='password',type='string')
opt.add_option('-y',dest='year',type='string')
opt.add_option('-t',dest='term',type='string')
opt.add_option('-f',dest='format',type='string')
opt.add_option('-P',dest='jpg_path',type='string')
(options,args) = opt.parse_args()
username = options.username
password = options.password
year = options.year
term = options.term
format = options.format
path = options.jpg_path
get_class(username,password,year,term,format,path)
|
from django.db import models
# Create your models here.
class sosmed(models.Model):
judul = models.CharField(max_length = 255)
postby = models.CharField(max_length = 255)
lokasi = models.CharField(max_length = 255)
body = models.TextField()
category = models.CharField(max_length = 255)
media = models.FileField(upload_to='post/')
likes = models.CharField(max_length = 255)
slug = models.SlugField()
def __str__(self):
return '{}, {}'.format(self.id,self.judul) |
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import numpy as np
"""
Predict the next day closing price
"""
def predict_one_ahead(model, data):
prediction = model.predict(data)
prediction = prediction.reshape((prediction.size,))
return prediction
"""
Predict entire sequence by starting with first window, making a prediction,
shifting the window over by 1, and appending that prediction to the end of next
window.
"""
def predict_entire_sequence(model, data, window_size):
# Start with the first window
curr_window = data[0]
predictions = []
for _ in range(len(data)):
# Predict the next day
predictions.append(model.predict(curr_window[np.newaxis,:,:])[0,0])
# Shift window over by 1 (with the next day prediction appended)
curr_window = curr_window[1:]
curr_window = np.insert(curr_window, [window_size-1], predictions[-1], axis=0)
return predictions
"""
Predict sequences by same process as predict_entire_sequence, except only for
prediction_length days. After prediction_length days, use the test set window and
repeat. This is mainly for tendency predictions (generally trending up, or generally
trending down).
"""
def predict_in_sequences(model, data, window_size, prediction_length):
prediction_sequences = []
# Partition the test set into groups of prediction_len size
for i in range(len(data)/prediction_length):
curr_window = data[i*prediction_length] # Starting window
predictions = []
for _ in range(prediction_length):
predictions.append(model.predict(curr_window[np.newaxis,:,:])[0,0]) # Next day prediction
# Shift window over by 1 (with next day prediction appended)
curr_window = curr_window[1:]
curr_window = np.insert(curr_window, [window_size-1], predictions[-1], axis=0)
prediction_sequences.append(predictions)
return prediction_sequences
|
import PySide.QtCore as qc
import PySide.QtGui as qg
from PySide.QtGui import QPen, QColor, QBrush, QLinearGradient
try:
import maya.utils as utils
except:
pass
#-------------------------------------------------------------#
class CustomSlider(qg.QSlider):
_pen_dark = qg.QPen(qg.QColor(0 , 1, 3),1, qc.Qt.SolidLine)
_pen_bright = qg.QPen(qg.QColor(25,26,29),1.5, qc.Qt.SolidLine)
_gradient_inner = qg.QLinearGradient(0,9,0,15)
_gradient_inner.setColorAt(0,qg.QColor(69,73,66))
_gradient_inner.setColorAt(1,qg.QColor(17,18,20))
_gradient_outer = qg.QLinearGradient(0,9,0,15)
_gradient_outer.setColorAt(0,qg.QColor(53,57,60))
_gradient_outer.setColorAt(1,qg.QColor(33,34,36))
_glowBrushes = {}
for index in range(1, 11):
_glowBrushes[index] = [QBrush(QColor(0, 255, 0, 1 * index)),
QBrush(QColor(0, 255, 0, 3 * index)),
QBrush(QColor(0, 255, 0, 15 * index)),
QBrush(QColor(0, 255, 0, 25.5 * index)),
QBrush(QColor(125, 200, 100, 15 * index))]
_pen_Shadow = QPen(QColor(9, 10, 12), 1, qc.Qt.SolidLine)
_pen_clear = QPen(QColor(0, 0, 0, 0), 1, qc.Qt.SolidLine)
_brushClear = QBrush(QColor(0, 0, 0, 0))
_brushBorder = QBrush(QColor(9, 10, 12))
def __init__(self,*args,**kwargs):
qg.QSlider.__init__(self,*args,**kwargs)
self.setOrientation(qc.Qt.Horizontal)
self.setFixedHeight(22)
self.setMinimumWidth(50)
self.hover = False
self._glow_index = 0
self._anim_timer = qc.QTimer()
self._anim_timer.timeout.connect(self._animateGlow)
self._track = False
self._tracking_points = {}
self._anim_followTimer = qc.QTimer()
self._glow_index2 = 0
self._anim_followTimer.timeout.connect(self._animateGlow2)
self._anim_followTimer.timeout.connect(self._removeTrackingPoints)
self.valueChanged.connect(self._trackChanges)
def setRange(self,*args,**kwargs):
qg.QSlider.setRange(self,*args,**kwargs)
self._updateTracking()
def setMinimum(self,*args,**kwargs):
qg.QSlider.setMinimum(self,*args,**kwargs)
self._updateTracking()
def setMaximum(self,*args,**kwargs):
qg.QSlider.setMaximum(self,*args,**kwargs)
self._updateTracking()
def _updateTracking(self):
self._tracking_points = [0] * (abs(self.maximum() - self.minimum()) + 1)
def setValue(self,*args,**kwargs):
qg.QSlider.setValue(self,*args,**kwargs)
for index in range(len(self._tracking_points)):
self._tracking_points[index] = 0
#-----------------------------------------------------------------------------------------------#
def mouseMoveEvent(self,event):
qg.QSlider.mouseMoveEvent(self,event)
if self._anim_followTimer.isActive():
return
self._anim_followTimer.start(20)
#-----------------------------------------------------------------------------------------------#
def _trackChanges(self,value):
value = value - self.minimum()
self._tracking_points[value] = 10
#-----------------------------------------------------------------------------------------------#
def _removeTrackingPoints(self):
self._track = False
for index , value in enumerate(self._tracking_points):
if value > 0:
self._tracking_points[index] -= 1
if value == 0:
self._anim_followTimer.stop()
self._track = True
if self._track is False:
self._anim_followTimer.stop()
#-----------------------------------------------------------------------------------------------#
def _animateGlow2(self):
if self._glow_index2 >= 10:
self._glow_index2 = 10
while self._glow_index2 > 0:
self._glow_index2 -= 1
if self._glow_index2 == 0:
self._anim_followTimer.stop()
self.update()
#-----------------------------------------------------------------------------------------------#
def _animateGlow(self):
if self.hover:
if self._glow_index >= 10:
self._glow_index = 10
while self._glow_index > 8:
self._glow_index -= 0.25
if self._glow_index == 8:
self._anim_timer.stop()
else:
self._glow_index += 1
else:
if self._glow_index <= 0:
self._glow_index = 0
self._anim_timer.stop()
else:
self._glow_index -= 1
#print self._glow_index
utils.executeDeferred(self.update)
#-----------------------------------------------------------------------------------------------#
def enterEvent(self, event):
if not self.isEnabled():
return
self.hover = True
self._startAnim()
def leaveEvent(self, event):
if not self.isEnabled():
return
self.hover = False
self._startAnim()
def _startAnim(self):
if self._anim_timer.isActive():
return
self._anim_timer.start(20)
#-----------------------------------------------------------------------------------------------------#
def paintEvent(self,event):
painter = qg.QPainter(self)
option = qg.QStyleOption()
option.initFrom(self)
x = option.rect.x()
y = option.rect.y()
height = option.rect.height() - 1
width = option.rect.width() - 1
painter.setRenderHint(qg.QPainter.Antialiasing)
painter.setRenderHint(qg.QPainter.TextAntialiasing)
painter.setPen(self._pen_Shadow)
painter.setBrush(self._brushBorder)
painter.drawRoundedRect(qc.QRect(x+1,y+1,width-1,height-1),10,10)
mid_height = (height / 2) + 1
painter.setPen(self._pen_dark)
painter.drawLine(10, mid_height,width-8,mid_height)
painter.setRenderHint(qg.QPainter.Antialiasing,False)
painter.setPen(self._pen_bright)
painter.drawLine(10, mid_height,width-8,mid_height)
painter.setRenderHint(qg.QPainter.Antialiasing,True)
minimum = self.minimum()
maximum = self.maximum()
value_range = maximum - minimum
value = self.value() - minimum
increment = ((width - 20) / float(value_range))
center = 10 + (increment * value)
center_point = qc.QPoint(x + center, y + mid_height)
painter.setPen(self._pen_clear)
glowIndex = self._glow_index
glowBrushes = self._glowBrushes
if self._track is True:
for index, track_value in enumerate(self._tracking_points):
if track_value == 0:
continue
track_center = 10 + (increment * index)
painter.setBrush(glowBrushes[track_value][4])
painter.drawEllipse(qc.QPoint(track_center,mid_height),7,7)
if glowIndex > 0:
for index, size in zip(range(4),range(13,8,-1)):
painter.setBrush(glowBrushes[glowIndex][index])
painter.drawEllipse(center_point,size,size)
painter.setBrush(qg.QBrush(self._gradient_outer))
painter.drawEllipse(center_point,8,8)
painter.setBrush(qg.QBrush(self._gradient_inner))
painter.drawEllipse(center_point,7,7)
#-----------------------------------------------------------------------------------------------#
|
'''
20180126 jlhung v1.0
'''
c = 0
while True:
try :
n = input()
except EOFError:
break
a = []
for i in n:
if i == "\"":
if c == 0:
a.append("``")
c = 1
else:
a.append("''")
c = 0
else:
a.append(i)
print("".join(a))
|
s = {"name":"cherry","idno":101,"class":10,"marks":[90,89,42,35,77,82]}
print(s.items())
print(s.keys())
print(s.values())
print("------find total marks-----------")
print(len(s["marks"]))
print(sum(s["marks"]))
print("total=",sum(s["marks"])/(len(s["marks"])))
print("---------find pass or fail-------")
for x in range(len(s["marks"])):
if x >= 45:
print(x,"(pass)")
else:
print(x,"(fail)") |
#! /usr/bin/env python
#
def spiral_gnuplot ( header, n, x, y, u, v, s ):
#*****************************************************************************80
#
## SPIRAL_GNUPLOT writes the spiral vector field to files for GNUPLOT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 20 January 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, string HEADER, a header to be used to name the files.
#
# Input, integer N, the number of evaluation points.
#
# Input, real X(N), Y(N), the coordinates of the evaluation points.
#
# Input, real U(N), V(N), the velocity components.
#
# Input, real S, a scale factor for the velocity vectors.
#
#
# Write the data file.
#
data_filename = header + '_data.txt'
data_unit = open ( data_filename, 'w' )
for i in range ( 0, n ):
st = ' %g' % ( x[i] )
data_unit.write ( st )
st = ' %g' % ( y[i] )
data_unit.write ( st )
st = ' %g' % ( s * u[i] )
data_unit.write ( st )
st = ' %g' % ( s * v[i] )
data_unit.write ( st )
data_unit.write ( '\n' );
data_unit.close ( )
print ''
print ' Data written to "%s".' % ( data_filename )
#
# Write the command file.
#
command_filename = header + '_commands.txt'
plot_filename = header + '.png'
command_unit = open ( command_filename, 'w' )
command_unit.write ( '# %s\n' % ( command_filename ) )
command_unit.write ( '#\n' )
command_unit.write ( 'set term png\n' )
command_unit.write ( 'set output "%s"\n' % ( plot_filename ) )
command_unit.write ( '#\n' )
command_unit.write ( '# Add titles and labels.\n' )
command_unit.write ( '#\n' )
command_unit.write ( 'set xlabel "<--- X --->"\n' )
command_unit.write ( 'set ylabel "<--- Y --->"\n' )
command_unit.write ( 'set title "Spiral velocity flow"\n' )
command_unit.write ( 'unset key\n' )
command_unit.write ( '#\n' )
command_unit.write ( '# Add grid lines.\n' )
command_unit.write ( '#\n' )
command_unit.write ( 'set grid\n' )
command_unit.write ( 'set size ratio -1\n' )
command_unit.write ( '#\n' )
command_unit.write ( '# Timestamp the plot.\n' )
command_unit.write ( '#\n' )
command_unit.write ( 'set timestamp\n' )
command_unit.write ( 'plot "%s" using 1:2:3:4 with vectors \\\n' % ( data_filename ) )
command_unit.write ( ' head filled lt 2 linecolor rgb "blue"\n' )
command_unit.write ( 'quit\n' )
data_unit.close ( )
print ' Commands written to "%s".' % ( command_filename )
return
def spiral_gnuplot_test ( ):
#*****************************************************************************80
#
## SPIRAL_GNUPLOT_TEST generates a field on a regular grid and plots it.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 20 January 2015
#
# Author:
#
# John Burkardt
#
from grid_2d import grid_2d
from uv_spiral import uv_spiral
print ''
print 'SPIRAL_GNUPLOT_TEST:'
print ' Generate a spiral velocity field on a regular grid.'
print ' Store in GNUPLOT data and command files.'
x_lo = 0.0
x_hi = 1.0
x_num = 21
y_lo = 0.0
y_hi = 1.0
y_num = 21
[ x, y ] = grid_2d ( x_num, x_lo, x_hi, y_num, y_lo, y_hi )
n = x_num * y_num
c = 1.0
[ u, v ] = uv_spiral ( n, x, y, c )
header = 'spiral'
s = 0.05
spiral_gnuplot ( header, n, x, y, u, v, s )
print ''
print 'SPIRAL_GNUPLOT_TEST:'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
spiral_gnuplot_test ( )
timestamp ( )
|
class Member:
def __init__(self,percentage):
self.percentage_share=percentage
self.spent=0
self.common_spent=0
self.debit_pending=self.spent-self.common_spent
class splitwise:
def __init__(self,group):
self.total_member=0
self.group=group
self.dict_person=dict()
print(f"Group : {group} is created")
def add_member(self,person):
print("Enter percentage share of user like (55)")
for i in range(self.total_member,self.total_member+person):
p=int(input(f"Enter percentage share of user {i+1}:"))
self.dict_person[i+1]=Member(p)
self.total_member +=person
def distribute(self):
u=int(input("Enter user id to spent money :"))
if u in self.dict_person.keys():
amount=int(input("Amount of money you spent :"))
self.dict_person[u].spent +=amount
distribution=(amount/len(self.dict_person))
for i in range(len(self.dict_person)):
self.dict_person[i+1].common_spent +=distribution
def details_printer(self):
print("\n\n\n")
for i in range(len(self.dict_person)):
print(f"\t\tPerson {i+1}\n")
print(f"Amount You spent {self.dict_person[i+1].spent}")
print(f"Amount You in common {self.dict_person[i+1].common_spent}")
print(f"Amount You have to pay {self.dict_person[i+1].debit_pending}")
print("\n\n")
if __name__ == "__main__":
print("\n\n")
print("\t\t\tWelcome to splitwise\n\n")
print("1.Create Group ")
print("2.Exit ")
o=int(input("Enter Your Options(1,2): "))
if(o == 1):
n=input("Enter your Group Name : ")
p=int(input("Enter no of person in Group : "))
gp1=splitwise(n)
gp1.add_member(p)
while 1:
i=int(input("Enter 1 to spent money \nEnter 2 : To print details of money \n"))
if i==1:
gp1.distribute()
elif i==2 :
gp1.details_printer()
else:
break
|
import pygame
from constants import *
from pygame.locals import *
from inventaire import Inventory
class Player(pygame.sprite.Sprite):
"""docstring for Player"""
def __init__(self, name="Coloro",x=0,y=0, image="img/player/player.png"):
super().__init__()
self.name = name
self.x = x
self.y = y
self.image = pygame.image.load(image)
self.image_temp = pygame.image.load(image)
self.rect = self.image.get_rect()
self.rect.x = self.x
self.rect.y = self.y
self.inventaire = Inventory(64)
self.money = 0.5
self.initStats()
def initStats(self):
# Statistiques du joueur
self.HP_MAX = PLAYER_HP
self.HP = self.HP_MAX
self.DMG = PLAYER_DMG
self.tps = PLAYER_TPS # Tire par seconde
self.cooldown = FPS // self.tps
self.cooldown_max = FPS // self.tps
self.shot_speed = PLAYER_SHOOT_SPEED
self.speed = PLAYER_SPEED
self.colorbuff= GRAY
#cd
self.healed=0
self.healCD=HEAL_COOLDOWN
#bonus
self.HP_MAX_bonus = 0
self.DMG_bonus = 0
self.tps_bonus = 0 # Tire par seconde
self.shot_speed_bonus = 0
self.speed_bonus = 0
# Gestion de l'invincibilité apres avoir recus un coup
self.get_hit = False
self.invicibility_frame = 120
self.curent_invicibility_frame = self.invicibility_frame
self.filter_on = False
# Gestion salle
self.current_room_id = 0
def updateStats(self,initPartie=False):
#on stocke les effets des items équipés
effetsItems={"hp":0,"dmg":0,"tps":0,"speed":0,"shot_speed":0}
for item in self.inventaire.equipement.values():
if item!=False:
for stat in item.stats:
effetsItems[stat]+=item.stats[stat]
#on met a jour les stats en prenant en compte les bonus
self.HP_MAX = PLAYER_HP + self.HP_MAX_bonus + effetsItems["hp"]
if initPartie:
self.HP = self.HP_MAX
self.DMG = PLAYER_DMG + self.DMG_bonus + effetsItems["dmg"]
self.tps = PLAYER_TPS + self.tps_bonus +effetsItems["tps"]
self.shot_speed = PLAYER_SHOOT_SPEED +self.shot_speed_bonus + effetsItems["shot_speed"]
self.speed = PLAYER_SPEED +self.speed_bonus + effetsItems["speed"]
#on cap si besoin
if self.HP_MAX >STATS_MAX["hp"]:
self.HP_MAX = STATS_MAX["hp"]
if self.DMG >STATS_MAX["dmg"]:
self.DMG =STATS_MAX["dmg"]
if self.tps >STATS_MAX["tps"]:
self.tps =STATS_MAX["tps"]
if self.shot_speed >STATS_MAX["shot_speed"]:
self.shot_speed =STATS_MAX["shot_speed"]
if self.speed >STATS_MAX["speed"]:
self.speed =STATS_MAX["speed"]
self.cooldown = FPS // self.tps
self.cooldown_max = FPS // self.tps
def getStatsDico(self):
return {"hp":self.HP_MAX,"dmg":self.DMG,"tps":self.tps,"speed":self.speed,"shot_speed":self.shot_speed}
def pos(self):
return(self.rect.x+0.5*self.rect.width,self.rect.y+0.5*self.rect.height)
def move(self,direction, walls):
if direction=="UP":
self.rect.y-=self.speed
elif direction=="DOWN":
self.rect.y+=self.speed
elif direction =="RIGHT":
self.rect.x+=self.speed
elif direction=="LEFT":
self.rect.x-=self.speed
block_hit_list = pygame.sprite.spritecollide(self, walls, False)
for block in block_hit_list:
# Si le joueur se déplace en direction d'un mur, cela
# met le coté du joueur qui touche le mur sur le coté
# du mur touché
if direction == "RIGHT":
self.rect.right = block.rect.left
elif direction == "LEFT":
self.rect.left = block.rect.right
elif direction == "DOWN":
self.rect.bottom = block.rect.top
elif direction == "UP":
self.rect.top = block.rect.bottom
def invicibility_after_getting_hit(self):
""""""
if self.get_hit:
self.curent_invicibility_frame -= 1
if self.curent_invicibility_frame % 10 == 0:
self.filter_on = not self.filter_on
if self.filter_on:
self.image = pygame.image.load("img/player/player_dmg.png")
else:
self.image = pygame.image.load("img/player/player.png")
if self.curent_invicibility_frame <= 0:
self.get_hit = False
self.curent_invicibility_frame = self.invicibility_frame
def update(self):
""""""
self.cooldown += 1
self.invicibility_after_getting_hit()
self.cooldown_max = FPS // self.tps
|
# country = 'Korea'
country = 'korea'
if country == 'Korea':
print('한국입니다.')
if country != 'Korea':
print('한국이 아닙니다.')
print('-'*15)
if 'korea' > 'japan':
print('한국이 더 크다.')
if 'korea' < 'japan':
print('일본이 더 크다')
print('-'*15)
#ascii code에서 대문자가 숫자가 작다.
print('Korea' > 'KoreA')
print('Korea' > 'KoReA')
False
'False' |
from config import get_env
import requests
from bs4 import BeautifulSoup
import urllib.request
class Actions:
def __init__(self, slackhelper):
self.slackhelper = slackhelper
def find_image(self, website, tag, image_details):
"""
Grabs the website content and parses it using beautifulsoup. Searches
through the soup using the passed in tag and image details. If the image
is found it calls the slack post message API and returns a true json. Else,
it returns a false json.
"""
response = requests.get(website)
soup = BeautifulSoup(response.text, "html.parser")
image_details = image_details.split("=")
if image_details[0] == "class":
middle = "."
elif image_details[0] == "id":
middle = "#"
selector = tag
image_details[1] = image_details[1].split(" ")
for single in image_details[1]: #if it has multiple classes
selector += middle + single
images = soup.select(selector)
if len(images) > 0: #checks to make sure there is more than one image found
image = soup.select(selector)[0]
picture_url = image['src'] #extracts the src url
self.slackhelper.post_image(picture_url) #calls the slack api
return {"image_found": True}
else:
return {"image_found": False} |
from django.contrib import admin
from django.db import models
from .models import FacebookSession
from .forms import FacebookAccessInput
@admin.register(FacebookSession)
class FacebookAccessAdmin(admin.ModelAdmin):
list_display = ('short_token', 'is_valid',)
formfield_overrides = {
models.CharField: {'widget': FacebookAccessInput},
}
|
# 리스트 안에 for 문 사용하기
array = [i * i for i in range(0,20,2)]
# 파이썬만이 한 줄로 작성이 가능하다.
# 이 구문을 리스트 내포(list comprehensions)라고 부른다.
print(array) |
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from onmt.modules.dropout import variational_dropout
from .gaussian import Gaussian, ScaleMixtureGaussian
from .utils import flatten_list, unflatten
class PositionWiseFeedForward(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, model_size, inner_size, dropout=0., variational=False, activation='relu'):
super().__init__()
self.model_size = model_size
self.inner_size = inner_size
self.dropout = dropout
self.bias = True
self.variational = variational
self.activation = activation
# two variables to record the (sum) of priors for all linear variables
self.log_prior = 0
self.log_variational_posterior = 0
in_proj_weight_mu = torch.Tensor(inner_size, model_size)
in_proj_weight_rho = torch.Tensor(inner_size, model_size)
out_proj_weight_mu = torch.Tensor(model_size, inner_size)
out_proj_weight_rho = torch.Tensor(model_size, inner_size)
in_proj_bias_mu = torch.Tensor(inner_size)
in_proj_bias_rho = torch.Tensor(inner_size)
out_proj_bias_mu = torch.Tensor(model_size)
out_proj_bias_rho = torch.Tensor(model_size)
mu, self.indices, self.shapes = \
flatten_list([in_proj_weight_mu, out_proj_weight_mu, in_proj_bias_mu, out_proj_bias_mu])
rho, _, _ = flatten_list([in_proj_weight_rho, out_proj_weight_rho, in_proj_bias_rho, out_proj_bias_rho])
self.mu = Parameter(mu)
self.rho = Parameter(rho)
self.weight = Gaussian(self.mu, self.rho)
self.weight_prior = ScaleMixtureGaussian()
self.reset_parameters()
try:
from apex.mlp.mlp import mlp_function
self.optimized = 2
self.fast_mlp_func = mlp_function
except ModuleNotFoundError as e:
self.optimized = 2
def reset_parameters(self):
std_ = math.sqrt(2.0 / (self.model_size + self.inner_size))
nn.init.normal_(self.mu, 0.0, std_)
nn.init.normal_(self.rho, -5, 0.1)
def forward(self, input, sample=False, calculate_log_probs=False):
calculate_log_probs = calculate_log_probs or self.training
sample = sample or self.training
# (MCMC)
# Sample the weights from the variational posterior distribution q(w)
sampled_weights, log_variational_posterior = self.weight.sample(sample, calculate_log_probs)
in_proj_weight, out_proj_weight, in_proj_bias, out_proj_bias = \
unflatten(sampled_weights, self.indices, self.shapes)
if self.optimized == 2 or not input.is_cuda:
hidden = F.linear(input, in_proj_weight, in_proj_bias)
hidden = F.relu(hidden, inplace=True)
if self.variational:
hidden = variational_dropout(hidden, p=self.dropout, training=self.training)
else:
hidden = F.dropout(hidden, p=self.dropout, training=self.training)
hidden = F.linear(hidden, out_proj_weight, out_proj_bias)
else:
# Apex MLP does not support dropout so instead we use dropconnect
# Theoretically they should be the same ^^
weights = [in_proj_weight,
out_proj_weight]
biases = [in_proj_bias,
out_proj_bias]
seq_len, bsz, hidden_size = input.size(0), input.size(1), input.size(2)
# True = bias, 1 = relu
hidden = self.fast_mlp_func(True, 1, input.view(seq_len*bsz, -1), *weights, *biases)
hidden = hidden.view(seq_len, bsz, hidden_size)
if calculate_log_probs:
# KL Divergence between prior and (variational) posterior
self.log_variational_posterior = log_variational_posterior
self.log_prior = self.weight_prior.log_prob(sampled_weights)
return hidden
|
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify, render_template
engine = create_engine("sqlite:///belly_button_biodiversity.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
session = Session(engine)
app = Flask(__name__, static_url_path='/static')
@app.route("/")
def welcome():
return render_template("index.html")
@app.route('/names')
#List of sample names.
def names():
result = engine.execute("SELECT SAMPLEID FROM samples_metadata;")
names_list = ["BB_" + str(r.SAMPLEID) for r in result ]
names_dict = {"sample names": names_list}
return jsonify([names_dict])
@app.route('/otu')
def descriptions():
result = engine.execute("SELECT lowest_taxonomic_unit_found FROM otu;")
otu_desc_list = [str(r.lowest_taxonomic_unit_found) for r in result ]
otu_desc_dict = {"OTU descriptions": otu_desc_list}
return jsonify([otu_desc_dict])
@app.route('/metadata/<sample>')
def sample(sample):
s = sample[3:]
query = "SELECT AGE, BBTYPE, ETHNICITY, GENDER, LOCATION, SAMPLEID FROM samples_metadata WHERE SAMPLEID = '%s' ;"%(s)
#Calling result multiple times is awkward but necessary; what would be a better way?
result = engine.execute(query)
age = [r.AGE for r in result][0]
result = engine.execute(query)
bbtype = [r.BBTYPE for r in result][0]
result = engine.execute(query)
ethnicity = [r.ETHNICITY for r in result][0]
result = engine.execute(query)
gender = [r.GENDER for r in result][0]
result = engine.execute(query)
location = [r.LOCATION for r in result][0]
result = engine.execute(query)
sampleid = [r.SAMPLEID for r in result][0]
sample_dict = {"Age": age, "bbtype": bbtype, "Ethnicity": ethnicity, "Gender": gender, "Location": location, "Sample ID": sampleid}
return jsonify([sample_dict])
@app.route('/wfreq/<sample>')
def washfreq(sample):
s = sample[3:]
query = "SELECT WFREQ, SAMPLEID FROM samples_metadata WHERE SAMPLEID = '%s' ;"%(s)
result = engine.execute(query)
wash_freq = [r.WFREQ for r in result][0]
wash_dict = {"Wash Frequency": wash_freq}
return jsonify([wash_dict])
@app.route('/samples/<sample>')
def sample_name(sample):
s= sample
query = "SELECT %s AS value , otu_id FROM samples ORDER BY %s DESC ;"%(s,s)
result = engine.execute(query)
sample_value_list = [r.value for r in result]
result = engine.execute(query)
sample_otu_list = [r.otu_id for r in result]
sample_dict = {"otu_ids": sample_otu_list, "sample_values":sample_value_list}
return jsonify([sample_dict])
if __name__ == "__main__":
app.run(debug=True)
|
#!/usr/bin/env python
#this program simply removes the same records in groundtruth
filein=open('groundtruth.log','r')
fileout=open('groundtruth.csv','w+')
prev=""
for line in filein:
if line!=prev:
fileout.write(line)
prev=line
filein.close()
fileout.close()
|
import discord
import random
from discord.ext import commands
from discord import FFmpegPCMAudio
from dotenv import load_dotenv
import os
load_dotenv(dotenv_path="config")
bot = commands.Bot(command_prefix="!")
@bot.event
async def on_ready():
print("Le bot est prêt.")
@bot.command()
async def Bonjour(ctx):
await ctx.send("Bonjour !")
@bot.command()
async def l2mdp(ctx):
if ctx.author.voice: #Si l'auteur du message est dans un channel vocal
channel = ctx.author.voice.channel
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
if voice is None or not voice.is_connected():
vc = await channel.connect()
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
nbAleatoire = random.randint(1,5)
chemin = 'Z:\Musiques\Les2minutesdupeuple Track 00' + str(nbAleatoire) + '.mp3'
await ctx.send('**Now playing:** {}'.format(chemin))
voice.play(discord.FFmpegPCMAudio(executable='ffmpeg', source=chemin))
voice.is_playing()
else: #Si l'auteur du message n'est pas dans un channel vocal, on indique un message
await ctx.send("Connectez vous a un channel vocal en premier.")
@bot.command()
async def pause(ctx):
voice_client = ctx.author.voice.channel
if ctx.voice_client.is_playing():
ctx.voice_client.pause()
else:
await ctx.send("Le bot ne joue rien en ce moment.")
@bot.command()
async def resume(ctx):
voice_client = ctx.author.voice.channel
if ctx.voice_client.is_paused():
ctx.voice_client.resume()
else:
await ctx.send("Le bot n'est pas en pause.")
@bot.command()
async def quit(ctx):
if ctx.author.voice:
await ctx.voice_client.disconnect()
else:
await ctx.send("Connectez vous a un channel vocal en premier.")
@bot.command()
async def join(ctx):
if ctx.author.voice:
await ctx.author.voice.channel.connect()
else:
await ctx.send("Connectez vous a un channel vocal en premier.")
@bot.command()
async def logout(ctx):
await bot.change_presence(status=discord.Status.offline)
await ctx.voice_client.disconnect()
await bot.close()
bot.run(os.getenv("TOKEN")) |
name = ["Anna", "Eli", "Pariece", "Brendan", "Amy", "Shane", "Oscar"]
favorite_animal = ["horse", "cat", "spider", "giraffe", "ticks", "dolphins", "llamas","nobody likes monkeys"]
def zipit(list1,list2):
newlist = zip(list1,list2)
print newlist
newerlist = dict(newlist)
print newerlist
zipit(name,favorite_animal)
# def testing(list1,list2):
# if len(list2) > len(list1):
# zip(list2,list1)
# else:
# zip(list1,list2)
|
#! /usr/bin/python
# -*- coding: utf-8-*-
from tts import *
from datetime import date
import RPi.GPIO as GPIO
from time import sleep, time
import random
classes = [u"明天是周一,课程有:班队会,语文,语文,品德与生活,校本习惯,语文,体育活动",
u"明天是周二,课程有:数学,外语,体育,语文,校本或语文,校本或语文",
u"明天是周三,课程有:数学,语文,音乐,美术,外语,体育",
u"明天是周四,课程有:数学,语文,美术,品德与生活,体育,专题教育",
u"明天是周五,课程有:数学,语文,书法,外语,音乐,体育",
u"明天是休息日, 祝你周末愉快!",
u"明天是休息日, 祝你周末愉快!" ]
wishes = [u"祝你有一个好心情!", u"祝你过得开心!", u"祝你好运!",
u"加油吧,未来的科学家!", u"祝你学到更多的知识!",
u"我等着你来不断改进我的功能哦!", u"祝你度过丰富多彩的一天",
u"报告完毕,长官!"]
last_report_time = 0
def on_change(channel):
global last_report_time
if (time() - last_report_time) < 1:
return
print "start to report class"
todayIndex = (date.today().weekday()+1)%7
speaker.speakText(classes[todayIndex])
speaker.speakText(random.choice(wishes))
last_report_time = time()
GPIO.setmode(GPIO.BCM)
GPIO.setup(25, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
try:
GPIO.add_event_detect(25, GPIO.RISING, callback=on_change, bouncetime=1000)
while(True):
sleep(10)
except KeyboardInterrupt:
GPIO.cleanup()
finally:
GPIO.cleanup()
|
# -*- coding: utf-8 -*-
from app.obj2png.src.ObjFile import ObjFile
"""
Created on Sat Jul 7 00:40:00 2018
@author: Peter M. Clausen, pclausen
MIT License
Copyright (c) 2018 pclausen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
#import ObjFile
import sys
import os
import glob
def obj2png(obj, az, el):
if '*' in obj:
objs = glob.glob(obj)
azim = None
if az is not None:
azim = az
elevation = None
if el is not None:
elevation = el
for objfile in objs:
if os.path.isfile(objfile) and '.obj' in objfile:
outfile = objfile.replace('.obj','.png')
print('Converting %s to %s' % (objfile, outfile))
ob = ObjFile(objfile)
ob.Plot(outfile, elevation=elevation, azim=azim)
else:
print('File %s not found or not file type .obj' % objfile)
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Obj to png using MatPlotLib')
parser.add_argument("-i", "--infiles",
dest='objfiles',
nargs='+',
help="File or files to be converted to png")
parser.add_argument("-o", "--outfile",
dest='outfile',
help="Output file(s). Default: infile.png")
parser.add_argument("-a", "--azimuth",
dest='azim',
type=float,
help="Azimuth angle of view in degrees.")
parser.add_argument("-e", "--elevation",
dest='elevation',
type=float,
help="Elevation angle of view in degrees.")
parser.add_argument("-q", "--quality",
dest='quality',
help="Image quality (HIGH,MEDIUM,LOW). Default: LOW")
parser.add_argument("-s", "--scale",
dest='scale',
type=float,
help="Scale picture by descreasing boundaries. Lower than 1. gives a larger object.")
parser.add_argument("-v", "--view",
dest='view',
action='store_true',
help="View instead of creating picture file.")
parser.add_argument("-A", "--Animate",
dest='animate',
action='store_true',
help="Animate instead of creating picture file as animation, from elevation -180:180 and azim -180:180")
args = parser.parse_args()
print (args)
objfiles=args.objfiles
if '*' in objfiles[0]:
objfiles=glob.glob(objfiles[0])
res={'HIGH':1200,'MEDIUM':600,'LOW':300}
dpi=None
if args.quality:
if type(args.quality)==int:
dpi=args.quality
elif args.quality.upper() in res:
dpi=res[args.quality.upper()]
azim=None
if args.azim is not None:
azim=args.azim
elevation=None
if args.elevation is not None:
elevation=args.elevation
scale=None
if args.scale:
scale=args.scale
animate=None
if args.animate:
animate=args.animate
for objfile in objfiles:
if os.path.isfile(objfile) and '.obj' in objfile:
newname=str(os.path.basename(objfile))
direct=str(os.path.dirname(objfile))
# l=len(newname)-1
# p=newname
# for i in range(0,l):
# if p[i]=='/':
# newname=newname[i+1:]
# newname1=direct+'/'
# newname2=newname1+newname
# newname3=newname2+'.obj'
#
# os.rename(objfile,newname3)
objfile=direct+'/'+newname
print(objfile)
outfile = objfile.replace('.obj','.png')
if args.outfile:
outfile=args.outfile
if args.view:
outfile=None
else:
print('Converting %s to %s'%(objfile, outfile))
ob = ObjFile.ObjFile(objfile)
ob.Plot(outfile,elevation=elevation,azim=azim,dpi=dpi,scale=scale,animate=animate)
else:
print('File %s not found or not file type .obj'%objfile)
sys.exit(1)
|
import pdfplumber
table_settings = {
"vertical_strategy": "lines",
"horizontal_strategy": "text",
"intersection_x_tolerance": 15
}
def get_pdf():
pdf = pdfplumber.open("data/강상홍_voca.pdf")
table_data = []
for index, page in enumerate(pdf.pages):
if index < 6:
continue
# if index == 8:
# break
table_data += page.extract_table()
print(page.extract_table())
# table_data = list(zip(*table_data))
# print(table_data)
get_pdf()
|
import tensorflow as tf
class FocusLoss(tf.keras.losses.Loss):
def __init__(self, threshold=0.5, *args, **kwargs):
super().__init__(*args, **kwargs)
self.threshold = threshold
def call(self, y_true, y_pred):
"""Compute focal loss for y and y_pred.
Args:
y_true: [batcn, height, width, points_num]
y_pred: [batcn, height, width, points_num].
Returns:
the focus loss.
"""
batch_size = tf.shape(y_true)[0]
height = tf.shape(y_true)[1]
width = tf.shape(y_true)[2]
y_pred = tf.sigmoid(y_pred)
object_mask = tf.math.not_equal(y_true, 0.0)
object_mask = tf.cast(object_mask, tf.float32)
object_num = tf.math.reduce_sum(object_mask)
other_num = tf.cast(height*width, tf.float32) - object_num
object_percent = object_num / tf.cast(height*width, tf.float32)
y_true_object = tf.expand_dims(y_true * object_mask, axis=-1)
y_pred_object = tf.expand_dims(y_pred * object_mask, axis=-1)
y_true_other = tf.expand_dims(y_true * (1.0-object_mask), axis=-1)
y_pred_other = tf.expand_dims(y_pred * (1.0-object_mask), axis=-1)
loss_object = tf.math.reduce_sum(tf.math.square(y_true_object-y_pred_object)) / object_num / object_percent
loss_other = tf.math.reduce_sum(tf.math.square(y_true_other-y_pred_other)) / other_num / (1.0-object_percent)
loss = (loss_object+loss_other) / tf.cast(batch_size, tf.float32)
return loss
|
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.db.models import Count
from app.models import Hit
def home(request):
context = RequestContext(request)
template_name = 'home.html'
data = {}
data['all_ips_by_date'] = Hit.objects.extra({'day': 'date(visit_datetime)'}).distinct().order_by('-day', 'url__url', 'ip', 'hits').values('ip', 'day', 'url__url').annotate(hits=Count('url'))
return render_to_response(template_name, data, context_instance=context) |
from django.contrib import admin
from locations.models import Location
@admin.register(Location)
class LocationAdmin(admin.ModelAdmin):
list_display = ('id', 'address_info',)
|
#### Imports ####
import numpy as np
import tensorflow as tf
from sklearn.datasets import make_blobs
#### SKLearn Blobs ####
class clusterData:
def __init__(self,
n_features = 2,
n_classes = 2,
n_training_samples = 200,
n_testing_samples = 200,
cluster_std = 0.5,
center_box = (-2,2),
seed = 0):
#### Params ####
self.n_features = n_features
self.n_classes = n_classes
self.n_training_samples = n_training_samples
self.n_testing_samples = n_testing_samples
self.cluster_std = cluster_std
self.center_box = center_box
self.seed = seed
#### Training Data ####
self.x_train, self.y_train_raw = make_blobs(n_samples = self.n_training_samples,
n_features = self.n_features,
centers = self.n_classes,
center_box = self.center_box,
cluster_std = self.cluster_std,
random_state = self.seed)
# One-hot encode the y values (categorically encoding the data)
self.y_train = np.zeros((self.y_train_raw.shape[0], self.n_classes))
self.y_train[np.arange(self.y_train_raw.size), self.y_train_raw] = 1
#### Testing Data ####
self.x_test, self.y_test_raw = make_blobs(n_samples = self.n_testing_samples + self.n_training_samples,
n_features = self.n_features,
centers = self.n_classes,
center_box = self.center_box,
cluster_std = self.cluster_std,
random_state = self.seed)
self.x_test = self.x_test[-self.n_testing_samples:]
self.y_test_raw = self.y_test_raw[-self.n_testing_samples:]
# One-hot encode the y values (categorically encoding the data)
self.y_test = np.zeros((self.y_test_raw.shape[0], self.n_classes))
self.y_test[np.arange(self.y_test_raw.size), self.y_test_raw] = 1
def trainData(self):
return self.x_train, self.y_train, self.y_train_raw
def testData(self):
return self.x_test, self.y_test, self.y_test_raw
#### MNIST ####
class mnistData:
def __init__(percent_train = 0.8):
self.n_features = 784
self.n_classes = 10
self.normal_val = 255
self.percent_train = percent_train
## Loading Data ##
mnist = tf.keras.datasets.mnist
(self.x_train, self.y_train_raw), (self.x_test, self.y_test_raw) = mnist.load_data()
# Flattening for mlp
self.x_train = self.x_train.reshape(self.x_train.shape[0], self.n_features)
self.x_test = self.x_test.reshape(self.x_test.shape[0], self.n_features)
# print(self.x_train.shape) #--> (60000, 28, 28)
# print(self.y_train_raw.shape) #--> (60000,)
# print(self.x_test.shape) #--> (10000, 28, 28)
# print(self.y_test_raw.shape) #--> (10000,)
## Normalizing Data ##
self.x_train = np.divide(self.x_train, self.normal_val)
self.x_test = np.divide(self.x_test, self.normal_val)
## Splitting Data into Validation and Train Sets ##
train_samples = int(self.percent_train * len(self.x_train))
self.val_x = self.x_train[train_samples:]
self.x_train = self.x_train[:train_samples]
target_samples = int(self.percent_train * len(self.y_train_raw))
self.val_y = self.y_train_raw[target_samples:]
self.y_train_raw = self.y_train_raw[:target_samples]
## One Hot Encoding Y_train for training ##
self.y_train = np.zeros((self.y_train_raw.shape[0], self.n_classes))
self.y_train[np.arange(self.y_train_raw.size), self.y_train_raw] = 1
## One Hot Encoding Y_test for testing ##
self.y_test = np.zeros((self.y_test_raw.shape[0], self.n_classes))
self.y_test[np.arange(self.y_test_raw.size), self.y_test_raw] = 1
def trainData(self):
return self.x_train, self.y_train, self.y_train_raw
def testData(self):
return self.x_test, self.y_test, self.y_test_raw |
# Generated by Django 3.1.2 on 2020-11-15 03:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payroll', '0014_deduction_ded_bef_or_after'),
]
operations = [
migrations.AlterModelOptions(
name='employee',
options={'managed': True, 'ordering': ['employee_number']},
),
migrations.AlterField(
model_name='deduction',
name='employer_rate',
field=models.DecimalField(decimal_places=3, default=0, max_digits=8),
),
migrations.AlterField(
model_name='deduction',
name='max_for_year',
field=models.DecimalField(blank=True, decimal_places=3, default=0, max_digits=15, null=True),
),
migrations.AlterField(
model_name='employee',
name='basic_pay',
field=models.FloatField(default=0, verbose_name='Basic Pay'),
),
]
|
import matplotlib.pyplot as plt
x = [3, 4, 5, 6, 7, 8, 9, 10]
EER_d = [0.328, 0.316, 0.297, 0.286, 0.293, 0.278, 0.287, 0.293]
EER_m = [0.292, 0.266, 0.284, 0.292, 0.295, 0.278, 0.278, 0.281]
EER_s = [0.318, 0.293, 0.316, 0.307, 0.309, 0.283, 0.290, 0.301]
EER_t = [0.333, 0.3, 0.29, 0.29, 0.29, 0.298, 0.287, 0.284]
plt.plot(x, EER_d, color="b")
#plt.plot(x, EER_d, "bo")
plt.plot(x, EER_m, color="r")
#plt.plot(x, EER_m, "ro")
plt.plot(x, EER_s, color="g")
#plt.plot(x, EER_s, "go")
plt.plot(x, EER_t, color="k")
#plt.plot(x, EER_t, "ko")
plt.grid()
plt.xlabel("Maksymalna liczba i-vectorow na osobe")
plt.ylabel("EER")
plt.legend(["Otworz drzwi", "Wlacz muzyke", "Zapal swiatlo", "Zwieksz temperature"])
plt.show()
print
|
import pyb
from pyb import I2C
i2c = I2C(1)
i2c2 = I2C(2)
i2c.init(I2C.MASTER, baudrate=400000)
print(i2c.scan())
i2c.deinit()
# use accelerometer to test i2c bus
accel_addr = 76
pyb.Accel() # this will init the bus for us
print(i2c.scan())
print(i2c.is_ready(accel_addr))
print(i2c.mem_read(1, accel_addr, 7, timeout=500))
i2c.mem_write(0, accel_addr, 0, timeout=500)
i2c.send(7, addr=accel_addr)
i2c.recv(1, addr=accel_addr)
|
import zipline
from zipline.api import order, record, symbol
import logging
import matplotlib.pyplot as plt
from datetime import datetime
tickers = ['AAPL', 'NVDA', 'GOOG', 'INTC']
start = datetime(2013, 1, 1)
end = datetime(2017, 1, 1)
LOGGER = logging.getLogger(__name__)
def initialize(context):
LOGGER.info('Initialize')
# environment = Environment(tickers,
# from_date=datetime(2007, 1, 1),
# to_date=datetime(2013, 1, 1))
#
# test_environment = Environment(tickers,
# from_date=start,
# to_date=end,
# scaler=environment.scaler)
#
# agent = Agent(environment.state_size(),
# environment.action_size())
# agent.load('model.h5')
def handle_data(context, data):
aapl = symbol('AAPL')
order(aapl, 10)
record(AAPL=data.current(aapl, 'price'))
if __name__ == '__main__':
perf = zipline.run_algorithm(start=start,
end=end,
initialize=initialize,
capital_base=1000,
handle_data=handle_data)
LOGGER.info(perf)
LOGGER.info('Creating plot')
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True)
perf.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('portfolio value')
perf.AAPL.plot(ax=ax2)
ax2.set_ylabel('AAPL stock price')
fig.savefig('STOCK.png')
plt.close()
|
from gym.envs.registration import register
register(
id='boeing-safe-v0',
entry_point='gym_Boeing.envs:BoeingSafe',
)
register(
id='boeing-danger-v0',
entry_point='gym_Boeing.envs:BoeingDanger'
)
register(
id='normalized-danger-v0',
entry_point='gym_Boeing.envs:NormalizedDanger'
)
register(
id='boeing-danger-v1',
entry_point='gym_Boeing.envs:FailureDanger'
)
register(
id='boeing-danger-v2',
entry_point='gym_Boeing.envs:EvalDanger'
)
register(
id = 'failure-train-v0',
entry_point='gym_Boeing.envs:FailureMode1'
)
register(
id = 'boeing-danger-v3',
entry_point='gym_Boeing.envs:Longitudinal'
)
register(
id = 'failure-test-v0',
entry_point='gym_Boeing.envs:FailureMode2'
)
register(
id = 'failure-train-v1',
entry_point = 'gym_Boeing.envs:FailureMode3'
)
register(
id = 'failure-test-v1',
entry_point = 'gym_Boeing.envs:FailureMode4'
)
register(
id = 'failure-train-v2',
entry_point = 'gym_Boeing.envs:FailureMode5'
)
register(
id = 'simple-model-v0',
entry_point = 'gym_Boeing.envs:SimpleModel'
)
register(
id = 'ineffective-throtle-v0',
entry_point = 'gym_Boeing.envs:FailureMode6'
)
register(
id = 'faultyA-train-v0',
entry_point = 'gym_Boeing.envs:FailureMode7'
)
register(
id = 'faultyA-test-v0',
entry_point = 'gym_Boeing.envs:FailureMode8'
)
register(
id = 'combined-modes-v0',
entry_point = 'gym_Boeing.envs:FailureMode9'
)
register(
id = 'failure-train-v3',
entry_point = 'gym_Boeing.envs:FailureMode10'
)
register(
id = 'failure-test-v3',
entry_point = 'gym_Boeing.envs:FailureMode11'
)
register(
id = 'actuation-train-v0',
entry_point = 'gym_Boeing.envs:FailureMode12'
)
register(
id = 'four-modes-train-v0',
entry_point = 'gym_Boeing.envs:FailureMode13'
)
register(
id = 'four-modes-test-v0',
entry_point = 'gym_Boeing.envs:FailureMode14'
)
register(
id = 'demonstration-v0',
entry_point = 'gym_Boeing.envs:Demo'
)
register(
id = 'demonstration-v1',
entry_point = 'gym_Boeing.envs:Demo2'
) |
import random
def keyFinder():
try:
accountKey = input("What account are you looking for?")
with open('allpsswrds.txt', 'r') as file:
for line in file:
if accountKey in line:
print("Here is your key for: " + accountKey)
print(line)
except:
print("An error occurred during the process, sorry please try again.")
def keyMaker():
file = open('allpsswrds.txt', 'a')
email = input("What email did you use?")
uName = input("What username did you use?")
account = input("What is this for?")
gen_psswrd = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
list_of_char = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')']
ugly_pass = random.sample(list_of_char, len(gen_psswrd))
real_pass = (''.join(ugly_pass))
keys = "email: " + str(email) + " | uname: " + str(uName) + " | account: " + str(account) + " | password: " + str(
real_pass)
print("Here is the key for " + account)
print(keys)
file.write('\n')
file.write(keys)
file.close()
def customKey():
file = open('allpsswrds.txt', 'a')
email = input("What email did you use?")
uName = input("What username did you use?")
account = input("What account is this for?")
password = input("What would you like your password to be?")
keys = "email: " + str(email) + " | uname: " + str(uName) + " | account: " + str(account) + " | password: " + str(
password)
print("Here is the key for " + account)
print(keys)
file.write('\n')
file.write(keys)
file.close()
runAgain = "yes"
print("This Program is to make a key for an account, and once made can be searched upon in the file it makes. \n"
" A key is the account that this is for,the username used, \n"
"the password, either randomly made or custom made by you, \n"
"and the email used for the account that you are storing. \n"
"\n"
"Have fun and stay safe!\n")
while runAgain == "yes" or runAgain == "y" or runAgain == "Yes" or runAgain == "Y":
task = input("Are you finding a key or, making a key?")
if task == "finding" or task == "Finding" or task == "f" or task == "F" or task == "find" or task == "Find":
keyFinder()
elif task == "making" or task == "Making" or task == "m" or task == "M" or task == "make" or task == "Make":
typeOfKey = input("Do you want to make a random key or, make a custom key?")
if typeOfKey == "random" or typeOfKey == "Random" or typeOfKey == "r" or typeOfKey == "R":
keyMaker()
print("\n")
elif typeOfKey == "custom" or typeOfKey == "Custom" or typeOfKey == "c" or typeOfKey == "C":
customKey()
print("\n")
else:
print("That's not a valid option.")
else:
print("That's not a valid option.")
runAgain = input("Do you want to run the program again?")
|
import math
import torch
from torch.autograd import grad
# probability space computations
def phi(X, W):
return torch.exp(X @ W - X.pow(2).sum(-1, keepdim=True) / 2)
def rff(X, Y, W):
A, B = phi(X, W), phi(Y, W)
values = A @ B.T
return values / values.sum(-1, keepdim=True)
# log space computations
def log_phi(X, W):
return X @ W - X.pow(2).sum(-1, keepdim=True) / 2
def logbmm(A, B):
C = A[:,None] + B[None]
return C.logsumexp(-1)
def log_rff(X, Y, W):
A, B = log_phi(X, W), log_phi(Y, W)
values = logbmm(A, B)
return values.log_softmax(-1)
# shift by max
def shift_log_phi(X, W):
A = X @ W - X.pow(2).sum(-1, keepdim=True) / 2
return A - A.max(-1, keepdim=True)[0]
def shift_log_rff(X, Y, W):
A, B = shift_log_phi(X, W), log_phi(Y, W)
B = B - B.max()
values = logbmm(A, B)
return values.log_softmax(-1)
# detach and shift max
def detach_shift_log_phi(X, W):
d = W.shape[0]
ratio = 1 / math.sqrt(d)
A = math.log(ratio) + X @ W - X.pow(2).sum(-1, keepdim=True) / 2
return A - A.max(-1, keepdim=True)[0].detach()
def detach_shift_log_rff(X, Y, W):
A, B = detach_shift_log_phi(X, W), log_phi(Y, W)
B = B - B.max().detach()
values = logbmm(A, B)
return values.log_softmax(-1)
n = 32
num_features = 128
d = 64
T = 1
X = torch.randn(n, d) / T
Y = torch.randn(n, d) / T
W = torch.randn(d, num_features) / T
X.requires_grad = True
out1 = rff(X, Y, W)
out2 = log_rff(X, Y, W).exp()
out3 = shift_log_rff(X, Y, W).exp()
out4 = detach_shift_log_rff(X, Y, W).exp()
grad1, = grad(out1.sum(), X)
grad2, = grad(out2.sum(), X)
grad3, = grad(out3.sum(), X)
grad4, = grad(out4.sum(), X)
|
# Hint: You may not need all of these. Remove the unused functions.
class Ticket:
def __init__(self, source, destination):
self.source = source
self.destination = destination
def reconstruct_trip(tickets, length):
"""
YOUR CODE HERE
"""
route = []
mapping = {ticket.source: ticket.destination for ticket in tickets}
prev_source = 'NONE'
for i in range(length):
route.append(mapping[prev_source])
prev_source = mapping[prev_source]
return route |
from mininet.topo import Topo
from mininet.node import Docker
from mininet.link import TCLink
# s1
# ______| |_______________
# s2 s3
# ___| |___ ___| |___
# h1 h2 | |
# h3---s4 s5---h4
# |___ __|
# | |
# s6
# |
# h5
class MyTopo(Topo):
"""Example topology"""
def __init__(self):
Topo.__init__(self)
ip_list = ['10.0.0.11',
'10.0.0.22',
'10.0.0.33',
'10.0.0.44',
'10.0.0.55']
for i in range(len(ip_list)):
h = self.addHost('h' + str(i + 1), cls=Docker, ip=ip_list[i], dimage="testbed:basic")
for i in range(6):
s = self.addSwitch('s' + str(i + 1))
hosts = self.hosts()
switches = self.switches()
self.addLink(switches[0], switches[1], cls=TCLink, delay='10ms', bw=10)
self.addLink(switches[0], switches[2], cls=TCLink, delay='10ms', bw=10)
self.addLink(switches[2], switches[3], cls=TCLink, delay='10ms', bw=10)
self.addLink(switches[2], switches[4], cls=TCLink, delay='10ms', bw=10)
self.addLink(switches[3], switches[5], cls=TCLink, delay='10ms', bw=10)
self.addLink(switches[4], switches[5], cls=TCLink, delay='10ms', bw=10)
self.addLink(hosts[0], switches[1], cls=TCLink, delay='20ms', bw=10)
self.addLink(hosts[1], switches[1], cls=TCLink, delay='20ms', bw=10)
self.addLink(hosts[2], switches[3], cls=TCLink, delay='20ms', bw=10)
self.addLink(hosts[3], switches[4], cls=TCLink, delay='20ms', bw=10)
self.addLink(hosts[4], switches[5], cls=TCLink, delay='20ms', bw=10)
topos = {'mytopo': MyTopo} # terminal version: "sudo mn --custom ./topo/topo_example.py --topo=mytopo"
|
import SCons, os
sources = [
'EvoBlockSim.cc',
'api_adapter.c',
'evoBlock.c',
]
env = Environment()
env.Replace(CC = 'g++')
env.Append(CCFLAGS = '-O3 -fopenmp -Wall -ggdb -Wno-deprecated')
env.Append(CPPPATH = ['#'])
defaultTargets = []
for i in os.listdir('solutions'):
filebits = os.path.splitext(i)
if filebits[1] == '.c':
defaultTargets.append('evoBlockSim-'+filebits[0]);
env.Program('evoBlockSim-'+filebits[0], sources + ['solutions/'+i])
Default(defaultTargets)
|
# coding=utf-8
from framework.data_proc.jsonLib import get_value_from_json
from framework.http.httpLib import HttpLib
from framework.support.MyLogger import log_info
from project.api_call.baseApi import BaseApi
from project.configuration.statusCode import status_code_200
from project.configuration.configReader import parse_value_from_users_tokens
class LikesApi(BaseApi):
def __init__(self, account_id):
super(LikesApi, self).__init__()
self.token = parse_value_from_users_tokens()[account_id]
def is_liked(self, user_id, item_id_obj, type_obj='photo'):
"""
Проверяет, находится ли объект в списке Мне нравится заданного пользователя.
type:
post — запись на стене пользователя или группы;
comment — комментарий к записи на стене;
photo — фотография;
audio — аудиозапись;
video — видеозапись;
note — заметка;
photo_comment — комментарий к фотографии;
video_comment — комментарий к видеозаписи;
topic_comment — комментарий в обсуждении;
В случае успеха возвращает объект с полями:
liked — есть ли отметка «Мне нравится» от текущего пользователя (0 — отметки нет, 1 — отметка есть);
copied — сделан ли репост текущим пользователем (0 — не сделан, 1 — сделан).
"""
url = '{api}likes.isLiked'.format(api=self.api_url)
params = {'type': type_obj,
'owner_id': user_id,
'item_id': item_id_obj,
'access_token': self.token,
'v': self.api_version}
res = HttpLib(url=url,
params=params).send_get()
status_code = res.response.status_code
assert status_code == status_code_200, '"Likes.isLiked" FAILED. {text}'.format(text=res.response.text)
response = get_value_from_json(res.response.json(), 'response')
return get_value_from_json(response, 'liked'), get_value_from_json(response, 'copied')
def add(self, owner_id, item_id_obj, type_obj='photo'):
"""
Добавляет указанный объект в список Мне нравится текущего пользователя.
type:
post — запись на стене пользователя или группы;
comment — комментарий к записи на стене;
photo — фотография;
audio — аудиозапись;
video — видеозапись;
note — заметка;
photo_comment — комментарий к фотографии;
video_comment — комментарий к видеозаписи;
topic_comment — комментарий в обсуждении;
В случае успеха возвращает объект с полем likes, в котором находится текущее количество пользователей, которые добавили данный объект в свой список Мне нравится.
"""
url = '{api}likes.add'.format(api=self.api_url)
params = {'type': type_obj,
'owner_id': owner_id,
'item_id': item_id_obj,
'access_token': self.token,
'v': self.api_version}
res = HttpLib(url=url,
params=params).send_get()
status_code = res.response.status_code
assert status_code == status_code_200, '"Likes.add" FAILED. {text}'.format(text=res.response.text)
log_info('Лайк проставлен. Пользователь: id{user_id}.'.format(user_id=owner_id))
response = get_value_from_json(res.response.json(), 'response')
return get_value_from_json(response, 'likes')
|
import sqlite3
import time
import pandas as pd
import sqlite3
equityBTC = 5
equityAlt = 0
candleCount = 0
candleOpenTime = 0
candleOpen = 0
candleCloseTime = 0
candleClose = 0
candleLow = 0
canceledTrades = 0
candleHigh = 0
candlePricesList = []
candleLowList = []
candleHighList = []
ATR = 0
action = "sell"
openBuyPrice = 0
loosingTrades = 0
profitableTrades = 0
badTrades = 0
profit = 0
profitRow = 0
candleHighestList = []
candleLowestList = []
profitTarget = 0
lossTarget = 0
candleSize = 1
historySize = 5
potentialLoss = 0
potentialProfit = 0
profitWantedDefault = 0.006
lossWantedDefault = 0.006
profitWanted = 0
ATRFilterLow = 0.0000
ATRFilterHigh = 100
ATRList = []
DCLowList = []
candleOpenList = []
candleCloseList = []
SMA = 0
SMALow = 0
SMAHigh = 0
candleVolume = 0
ATRDict = {}
feeList = []
makerFee = 0.0 / 100
takerFee = 0.03 / 100
geomP = 2
inRow = 0
hit = 0
feeHigh = 0
bought = True
lossFee = 0
candleOpenPrice = 0
profitPrice = 0
lossPrice = 0
notEnoughBTC = 0
altTradeList = []
btcTradeList = []
DCHighList = []
DCMidList = []
# for i in range(10, 500, 20):000
# key = str((i - 10)) + ":" + str(i)
# ATRDict[key] = 0
conn = sqlite3.connect('gdax_0.1.db')
cur = conn.cursor()
cur.execute("SELECT * FROM quotes_BTC_LTC ORDER BY ts") # WHERE timestamp >?", (1483318861,))
index = 0
price = 0
DCLow = 0
DCHigh = 0
DCMid = 0
for row in cur:
index += 1
if (index > 1):
ts = int(row[0])
price = float(row[1])
volume = float(row[2])
# print(ts)
if (index == 2):
candleOpenTime = ts
candleCloseTime = candleOpenTime + 60 * candleSize
candleOpenPrice = price
# print(time.strftime("%d %b %Y %H:%M:%S", time.localtime(candleOpenTime)))
# print(candleOpenPrice)
if ((ts >= candleOpenTime) and (ts < candleCloseTime)):
candlePricesList.append(price)
else:
candleOpenTime = ts
candleCloseTime = candleOpenTime + 60 * candleSize
print(time.strftime("%d %b %Y %H:%M:%S", time.localtime(candleOpenTime)))
candleOpenPrice = price
candleLowPrice = min(candlePricesList)
candleHighPrice = max(candlePricesList)
candleLowList.append(candleLowPrice)
candleHighList.append(candleHighPrice)
candleOpenList.append(candleOpenPrice)
candleCount += 1
candlePricesList.clear()
candlePricesList.append(candleOpenPrice)
if (candleCount >= historySize):
# if (candleOpenTime >= 1486900260 and candleOpenTime <= 1486909360):
# print(ATR)
SMALow = sum(candleLowList) / len(candleLowList)
SMAHigh = sum(candleHighList) / len(candleHighList)
SMA = sum(candleOpenList) / len(candleOpenList)
ATR = SMAHigh - SMALow
DCLowList.append(min(candleLowList))
DCHighList.append(max(candleHighList))
# DCMidList.append(DCHigh - DCLow)
DCLow = DCLowList[-1]
DCHigh = DCHighList[-1]
# print("DCLow", DCLow)
# DCMid = DCMidList.pop()
# DCLow = min(candleLowList)
# DCHigh = max(candleHighList)
# DCMid = DCHigh - DCLow
candleLowList.pop(0)
candleHighList.pop(0)
candleOpenList.pop(0)
if (ATR == 0):
# action = "cancelBuy"
continue
else:
# if (candleCount >= historySize):
# # for key, value in ATRDict.items():
# # startRange = str(key).split(":")[0]
# # endRange = str(key).split(":")[1]
# # if (ATR > float(startRange) and ATR < float(endRange)):
# # ATRDict[key] += volume
# print(ATR)
if (action == "sell" or action == "cancelBuy") and (
candleOpenPrice < SMALow and candleOpenPrice > DCLow): # and ATR < ATRFilterHigh and ATR > ATRFilterLow):
profitTarget = profitWantedDefault
lossTarget = profitTarget # SMAHigh-SMA
# if(profitTarget <= 0 or lossTarget <= 0):
# continue
openBuyPrice = price # round(candleOpen - 1.5 * ATR, 2)
# print("price", price)
# print("ATR", ATR)
# print("SMA", SMA)
# print("SMAHigh", SMAHigh)
# print("openBuyPrice", openBuyPrice)
profitWanted = profitWantedDefault * 2 ** inRow
equityAlt = profitWanted / profitTarget
profitPrice = openBuyPrice + profitTarget
lossPrice = openBuyPrice - lossTarget
profitFee = openBuyPrice * takerFee + profitPrice * makerFee
lossFee = openBuyPrice * makerFee + lossPrice * takerFee
potentialProfit = equityAlt * (profitTarget - profitFee)
potentialLoss = equityAlt * (lossTarget + lossFee)
altTradeList.append(equityAlt)
btcTradeList.append(equityAlt * price)
feeList.append(lossFee * equityAlt)
action = "closeBuy"
# print(ts)
# if (equityBTC / openBuyPrice >= equityAlt):
# # potentialRatio = potentialProfit / potentialLoss
# # wantedRatio = profitWantedDefault / lossWantedDefault
# # if (potentialRatio >= 8 / 13):
# # feeHigh += 1
# # else:
#
# action = "closeBuy"
# # print("Open Buy at:", openBuyPrice)
# # print("Wanted Profit:", profitWanted)
# continue
# else:
# notEnoughBTC += 1
# print("Not Enough $$$")
# continue
# print("Not Enough BTC !!!")
# print("openBuyPrice", openBuyPrice)
if (action == "openBuy"):
if (price < openBuyPrice):
action = "closeBuy"
# print("Alt bought:", equityAlt)
# print("Equity BTC:", equityBTC)
continue
elif (price > profitPrice): # or ATR > ATRFilterHigh): # and ATR > ATRFilterHigh and ATR < ATRFilterLow):
action = "cancelBuy"
canceledTrades += 1
# print("Canceled")
continue
if (action == "closeBuy"):
if (price > profitPrice):
action = "sell"
profitableTrades += 1
equityBTC += potentialProfit
inRow += 1
if (inRow == geomP):
inRow = 0
hit += 1
# print("Profit")
continue
elif (price < lossPrice):
action = "sell"
loosingTrades += 1
equityBTC -= potentialLoss
inRow = 0
# print("Loss")
continue
# feeList = openBuyPrice * makerFee + profitPrice * makerFee
# print("candleLow", candleLow)
totalTrades = profitableTrades + loosingTrades
# print("Bad trades:", badTrades / (totalTrades + badTrades) * 100, "%")
print("Canceled trades:", canceledTrades)
print("Loosing trades:", (loosingTrades / totalTrades) * 100, "%")
print("Profitable trades:", (profitableTrades / totalTrades) * 100, "%")
print("Total trades:", totalTrades)
print("Final equity BTC:", equityBTC)
print("Hits:", hit)
print("Fee too high:", feeHigh)
print("Not Enough BTC", notEnoughBTC)
print("Geometric Progression:", geomP)
print("Max Alt needed", max(altTradeList))
print("Avr Alt needed", sum(altTradeList) / len(altTradeList))
print("Max BTC needed", max(btcTradeList))
print("Avr BTC needed", sum(btcTradeList) / len(altTradeList))
print("Max Fee", max(feeList))
print("Avr Fee", sum(feeList) / len(feeList))
print(candleCount)
# for key, value in ATRDict.items():
# print(key + "-" + str(round(value / 1000)))
|
from PIL import Image
import sys
def strip_extension(file_name):
return file_name.split('.')[0]
image_one_name = sys.argv[1]
image_two_name = sys.argv[2]
print("merging {} with {}".format(image_one_name, image_two_name))
with Image.open(image_one_name) as image_one, Image.open(image_two_name) as image_two:
if image_one.size != image_two.size:
raise Exception('Images must be of the same size')
w = image_one.size[0]
h = image_one.size[1]
new_image = Image.new('RGB', (w * 2, h))
new_image.paste(im=image_one, box=(0, 0))
new_image.paste(im=image_two, box=(w, 0))
new_image.save(strip_extension(image_one_name) + "_merge_" + strip_extension(image_two_name) + ".jpg")
|
import asyncio
from typing import Any
from ..errors import InvalidCallbackTypeError
def assert_sync_callback(
candidate: Any
) -> None:
"""Assert that the candidate is a valid synchronous callback."""
if not callable(candidate) or asyncio.iscoroutinefunction(candidate):
raise InvalidCallbackTypeError(
f'Invalid synchronous callback {candidate}'
)
def assert_async_callback(
candidate: Any
) -> None:
"""Assert that the candidate is a valid asynchronous."""
if not asyncio.iscoroutinefunction(candidate):
raise InvalidCallbackTypeError(
f'Invalid asynchronous callback {candidate}'
)
|
'''
987. Vertical Order Traversal of a Binary Tree
https://leetcode.com/problems/vertical-order-traversal-of-a-binary-tree/
Given binary tree, return vertical order traversal. This means that
from root node, the vertical line is x=0, left child is on line x=-1,
right child is on line x=1, and so on.
Example 1:
Input: [3,9,20,null,null,15,7]
Output: [[9],[3,15],[20],[7]]
Example 2:
Input: [1,2,3,4,5,6,7]
Output: [[4],[2],[1,5,6],[3],[7]]
'''
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def verticalTraversal(self, root: TreeNode) -> [[int]]:
colMap = {}
x = 0
y = 0
self.helper(root, colMap, x, y)
out=[]
for k in sorted(colMap.keys()):
tupList = sorted(colMap[k], key=lambda x: x[1])
yMap = {}
for _,t in enumerate(tupList):
rv = t[0]; yv = t[1]
if yv in yMap.keys():
yMap[yv].append(rv)
else:
yMap[yv] = [rv]
#print("yMap: ",yMap)
col = []
for k in sorted(yMap.keys()):
col += sorted(yMap[k])
out.append(col)
return out
def helper(self, root, colMap, x, y):
if root == None:
return
if x in colMap.keys():
colMap[x].append((root.val,y))
else:
colMap[x] = [(root.val,y)]
#print("colMap: ", colMap)
self.helper(root.left, colMap, x-1, y+1)
self.helper(root.right, colMap, x+1, y+1)
def buildTree(self, arr):
if not arr:
return None
def h(arr, i):
if i > len(arr)-1:
return None
if arr[i]==None:
return None
root = TreeNode(arr[i])
print("node created: ", root.val)
root.left = h(arr, 2*i+1)
root.right = h(arr, 2*i+2)
return root
root = h(arr, 0)
return root
def test1(self):
arr = [3,9,20,None,None,15,7]
expected = [[9],[3,15],[20],[7]]
root = self.buildTree(arr)
res = self.verticalTraversal(root)
print("res: ", res)
def test2(self):
arr = [1,2,3,4,5,6,7]
expected = [[4],[2],[1,5,6],[3],[7]]
root = self.buildTree(arr)
res = self.verticalTraversal(root)
print("res: ", res)
if res==expected:
print("test2 pass")
def test3(self):
arr = [0,2,1,3,None,None,None,4,5,None,7,6,None,10,8,11,9]
expected = [[4,10,11],[3,6,7],[2,5,8,9],[0],[1]]
root = self.buildTree(arr)
res = self.verticalTraversal(root)
print("res: ", res)
if res==expected:
print("test3 pass")
sol = Solution()
#sol.test1()
#sol.test2()
sol.test3() |
from flask import Flask, render_template, request, jsonify
from flask_mysqldb import MySQL
from config import DB_CONFIG
import json
import numpy as np
app = Flask(__name__)
app.config['MYSQL_HOST'] = DB_CONFIG['host']
app.config['MYSQL_USER'] = DB_CONFIG['user']
app.config['MYSQL_PASSWORD'] = DB_CONFIG['password']
app.config['MYSQL_DB'] = DB_CONFIG['database']
mysql = MySQL(app)
@app.route('/commodity')
def commodity():
commodity = request.args.get('commodity')
start = request.args.get('start')
end = request.args.get('end')
cur = mysql.connection.cursor()
json_res = []
mean = 0
varList = []
if commodity == 'gold':
cur.execute("""SELECT * FROM gold
WHERE date(date) BETWEEN date(%s) AND date(%s)
ORDER BY date desc""", [end, start])
headers = [x[0] for x in cur.description]
val = cur.fetchall()
for res in val:
json_res.append(dict(zip(headers, res)))
num = res[2].split(',')[0] + res[2].split(',')[1]
mean = mean + float(num)
varList.append(float(num))
json_res.append(['mean', round(mean/len(val), 2)])
json_res.append(['variance', np.var(varList)])
return json.dumps(json_res)
elif commodity == 'silver':
cur.execute("""SELECT * FROM silver
WHERE date(date) BETWEEN date(%s) AND date(%s)
ORDER BY date desc""", [end, start])
headers = [x[0] for x in cur.description]
val = cur.fetchall()
for res in val:
json_res.append(dict(zip(headers, res)))
mean = mean + float(res[2])
varList.append(float(res[2]))
json_res.append(['mean', round(mean/len(val), 2)])
json_res.append(['variance', np.var(varList)])
return json.dumps(json_res)
if __name__ == '__main__':
app.run(host='localhost', port=8080, debug=True)
|
from django.urls import path
from .import views
app_name = 'accounts'
urlpatterns = [
path('edithotel/<int:id>', views.edit_hotel, name='edithotel'),
path('register', views.register, name='register'),
path('signin', views.sign_in, name='signin'),
path('signout', views.signout, name='signout'),
path('editprofile', views.edit_profile, name='editprofile'),
path('addhotel', views.add_hotel, name='addhotel'),
] |
from PyObjCTools.TestSupport import TestCase, min_os_level
import WebKit
class TestWKSnapshotConfiguration(TestCase):
@min_os_level("10.15")
def testMethods(self):
self.assertResultIsBOOL(WebKit.WKSnapshotConfiguration.afterScreenUpdates)
self.assertArgIsBOOL(WebKit.WKSnapshotConfiguration.setAfterScreenUpdates_, 0)
|
class Solution:
def maximumGap(self, nums: List[int]) -> int:
len_nums=len(nums)
if len_nums<2:
return 0
nums.sort()
max_=0
i=0
j=i+1
while i<len_nums-1:
max_ = max(max_,abs(nums[i]-nums[j]))
i+=1
j+=1
return max_
|
from .Profile import Profile
class PipeProfile(Profile):
"""The PipeProfile object defines the properties of a circular pipe profile.
The PipeProfile object is derived from the Profile object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import section
mdb.models[name].profiles[name]
import odbSection
session.odbs[name].profiles[name]
The corresponding analysis keywords are:
- BEAM SECTION
"""
def __init__(self, name: str, r: float, t: float):
"""This method creates a PipeProfile object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].PipeProfile
session.odbs[name].PipeProfile
Parameters
----------
name
A String specifying the repository key.
r
A Float specifying the outer radius of the pipe. For more information, see [Beam
cross-section
library](https://help.3ds.com/2021/English/DSSIMULIA_Established/SIMACAEELMRefMap/simaelm-c-beamcrosssectlib.htm?ContextScope=all).
t
A Float specifying the wall thickness of the pipe.
Returns
-------
A PipeProfile object.
Raises
------
RangeError
"""
super().__init__()
pass
def setValues(self):
"""This method modifies the PipeProfile object.
Raises
------
RangeError
"""
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.