index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
4,500 | b3bace532f687edc966c6aef5f454bde9367204f | from sys import exit
# Outside
def outside():
print """
Oyoiyoi ... The train isn't running due to the HVV being complete crap.
Well, the Rewe around the corner is still open
and there's a HSV bar around the corner.
You want to get your drank on, right now!
Where do you go?
"""
choice = raw_input("> ")
if choice == "Rewe":
print "Off to Rewe!"
Rewe()
elif choice == "HSV bar":
print "Off to the HSV bar!"
bar_sober()
else:
die()
# Outsid - Rewe
def Rewe():
print """
Ohhaohaoha...Rewe ist packed!
..and you're still sober!
What is this, gradeschool?
Forget about the beer, just get in line at the register
and grab whatever hard liquor is at kids' reach
(way to go Germany..). \n
\n
Alrgiht, now you're back outside
and your nipps are about to freeze off!
Where are you going to go now?
Time for the HSV bar,
or are you done with the world and want to just go home?
"""
choice = raw_input("> ")
if choice == "HSV bar":
print "To the HSV bar!"
way_bar()
elif choice == "go home":
print "Homeward bound!"
way_inside()
else:
die()
# Outside - Rewe - way to the bar
def way_bar():
print """
You managed to grab a box of some good ol' German Schnapps!
These 12 little babies are gonna get you in the right kind of mood.
You have about a 5 to 10 minute walk ahead of you..
How many mini bottles of goodness will you gulp down on the way?
"""
choice = raw_input("> ")
how_much = int(choice)
if how_much < 3:
bar_sober()
elif 6 > how_much >= 3:
bar_buzzed()
else:
loose("""
Well, I mean you did want to get wierd tonight,
but this just escalated far too quickly! You need
to get ahold of yourself! Now you've thrown your cookies
all over the sidewalk..Though I am a bit proud of you, you better just
go home and sleep..if you can find your way.
""")
# Outside - Rewe - way back home
def way_inside():
print """
You managed to grab a box of some good ol' German Schnapps!
These 12 little babies are gonna get you in the right kind of mood.
You have about a 5 to 10 minute walk ahead of you..
How many mini bottles of goodness will you gulp down on the way?
"""
choice = raw_input("> ")
how_much = int(choice)
if how_much < 3:
inside_sober()
elif 6 > how_much >= 3:
inside_buzzed()
else:
loose("""
Well, I mean you did want to get wierd tonight,
but this just escalated far too quickly! You need
to get ahold of yourself! Now you've thrown your cookies
all over the sidewalk..Though I am a bit proud of you, you better just
go home and sleep..if you can find your way.
""")
# Outside - Rewe - Inside(buzzed)
def inside_buzzed():
print """
Now you're a little buzzed and all warm in your humble abode!
You could kick it here with ya bad self, or ask if some peeps want to
come over.
Do you want to invite people to come get wrecked with you?
"""
choice = raw_input("> ")
if choice == "yes":
print "Okay, let's get this party goin'!"
inside_buzzed_invite()
elif choice == "no":
print "There's only enough liquor for numero uno."
inside_buzzed_alone()
else:
die()
# Outside - Rewe - Inside(buzzed) Invite
def inside_buzzed_invite():
print """
Congrats..
Both of your two friends are busy.
Well, so much for trying to be social!
Guess you'll be hanging out alone after all.
"""
inside_buzzed_alone()
# Outside - Rewe - Inside(buzzed) ALone
def inside_buzzed_alone():
print """
Now you're a little buzzed and all warm in your humble abode!
Time to watch episodes of 'Intervention'
and drink everytime someone makes a worse life choice
than you have!
"""
win("Yay for not being at the very bottom!")
# Inside (sober)
def inside_sober():
print """
Alright alright alright.
You could kick it here with ya bad self, or ask if some peeps want to
come over.
Do you want to invite people to come get wrecked with you?
"""
choice = raw_input("> ")
if choice == "yes":
print "It'll be nice to have some social interaction."
inside_sober_invite()
elif choice == "no":
print "Ew gross, people are icky."
inside_sober_alone()
else:
die()
# Inside (sober) invite
def inside_sober_invite():
print """
Wow you're feeling socially acceptable today!
Three people are now at your place and you don't have much alcohol.
Way to go, you anti-social worm.
You're not drunk enough to be entertaining!
You forgot you can't handle being responsible for social encounters.
Akwardness.
Overwhelms.
You!
"""
loose("You're an anxious mess.")
# Inside(sober) - Alone
def inside_sober_alone():
print """
Wohoo! Time to drink what you've got and play some sweet video games until your eyes bleed!
Who needs other people to enjoy themselves?
Being socially awkward rules!
And the best part is:
You don't have to wear pants!
"""
win("This is the (anti-social) life!")
# Outside - Rewe - bar(buzzed)
def bar_buzzed():
print """
On the way to the bar, you see the disco lights flashing
and you can here the German Schlager music being accompanied
by the voices of old people.
Nice.
The few bottles of liquor you drank are kicking in just in time!
You've consumed the perfect amount for this kind of thing!
Once you get in the bar everyone cheers, even though you don't know them!
Some old lady is celebrating the death of her husband and buying rounds
for everyone.
"""
win("You hit the party jackpot!")
# Outside - Bar(sober)
def bar_sober():
print """
So now you're inside, and people seem to be having a good time.
The problem is: they are drunk; you are not!
You then realize that you can't pay with card here
and you don't have enough cash for a drink..
Even if you brought booze with you, you wouldn't be able to
drink it in here. Way to go..
Because you're too sober to be socially acceptable, you can't
find the courage to ask the people celebrating if you can join.
"""
loose("You're uncomfortable and go home as the anxious mess that alaways had been.")
# End of game, added to the variable of "why"
def win(why):
print why, " Bitchin'."
exit(0)
def loose(why):
print why, " Laaame.."
exit (0)
def die():
print """
How dare you think out of the box?! You die sober!!
"""
# Begining of game
def start():
print """
It's Friday night and you want to get hammered!
Do you want to go out or stay home?
"""
choice = raw_input("> ")
if choice == "out":
outside()
elif choice == "stay home":
inside_sober()
else:
die()
start()
|
4,501 | a9a067ee3b176d2f2ca558b69ce2bc598bb31d22 | from celery.task.schedules import crontab
from celery.decorators import periodic_task
from celery.utils.log import get_task_logger
from bbapp.scripts.getScores import doScoresScrape, fixScores
logger = get_task_logger(__name__)
@periodic_task(
run_every=(crontab(minute='*/10')),
name="scrape_espn_feed",
ignore_result=True
)
def scrape_espn_feed():
"""
Saves latest image from Flickr
"""
thescores = doScoresScrape()
fixScores(thescores, 'MLB')
logger.info("Scores scraped") |
4,502 | da19bc4fc999bd48a3d55b8cb5f47ba6208bc02b | # Duy B. Lam
# 61502602
# Project 3
# A module that reads the input and constructs the objects
# that will generate the program's output. This is the only
# module that should have an if __name__ == '__main__' block
# to make it executable; you would execute this module to run your program.
import Module1
#USED TO RETREIVE THE NUMBER OF LOCATIONS
def tripQuantity() -> int:
try:
locationQ = int(input())
return locationQ
finally:
print('locationQ: ' + str(locationQ))
#USED TO RETREIVE THE NUMBER OF REQUESTED OUTPUTS
def outputQ() -> int:
try:
outputQ = int(input())
return outputQ
finally:
print('output quantity:' + str(outputQ))
#USED TO RECORD SEARCH LOCATIONS
def quantityToLocations(tripQ: int) -> list:
locationCount = 0
locationList = list()
while (locationCount < tripQ):
locationList.append(input())
locationCount+=1
return locationList
#USED TO RECORD OUTPUT OPTIONS
def quantityToOutput(outputQ: int) -> list:
outputCount = 0
outputList = list()
while (outputCount < outputQ):
outputList.append(input())
outputCount += 1
return outputList
if __name__ == '__main__':
#USED TO GET USER INPUTS
locationQ = tripQuantity()
locationList = quantityToLocations(locationQ) #print to double check
#CREATES A NEW SEARCH INSTANCE AND IT'S REQUEST URL
newSearch = Module1.URL()
newSearch.set_from_location(locationList[0])
newSearch.set_to_location(locationList[1:len(locationList)])
print(str(newSearch.get_To_Location()))
newSearch.set_request_url()
newSearch_request_url = newSearch.get_Request_URL() #print to double check
#THIS FUNCTION MAKES THE REQUEST AND GATHERS RESPONSE INTO DICTIONARY
newSearch_reponse = newSearch.search_request_response()
#USED TO GET USER OUTPUTS
#outputQ = outputQ()
#outputList = quantityToOutput(outputQ)
#print(outputList)
'''
#USED TO REQUEST MAPQUEST SEARCH
x = urllib.request.urlopen(url)
#USED TO DECODE MAPQUEST RESPONSE
y = x.read().decode(encoding = 'utf-8')
print(y) # USE decoded response string to check with pretty json
#USED TO CONVERT DECODED STRING TO DICT/LISTS
z = json.loads(y) #dictionary of mapquest response which also includes lists
print(type(z['route']['locations']))
locationsList = z['route']['locations']
print(locationsList)
print(locationsList[1]['latLng'])
i = 0
if i < len(locationsList):
for key in locationsList[i]:
if key == 'latLng':
print(locationsList[i][key])
i+=1
#### i = 0
#### if i < len(locationsList):
#### if locationList[i] == 'latLng':
#### print(locationsList[i])
####
#print (y)
'''
|
4,503 | 58fb2676b599b5f7fb9041cfae113a9d428d8ef8 | #Horror_Novel_Generator.py
import markovify as mk
import random as rng
from fpdf import FPDF
def makePDF(filename):
#Get text, separating title and paragraphs
#Assumes first line is title
file= open(filename, "r")
title= file.readline()
pars= []
for line in file:
pars.append(line)
file.close()
#Format PDF
pdf= FPDF(unit='pt')
pdf.add_page()
pdf.set_font("Helvetica", "U", 16)
pdf.cell(595, 16, txt=title, ln=1, align="C")
pdf.set_font("Helvetica", size=12)
for par in pars:
pdf.multi_cell(0, 12, txt=par)
fileTitle= ("Abominations\\%s.pdf" % title[0:-1])
pdf.output(fileTitle)
def generate(model, file, sent_num):
counter= 0
par_length= 0
file.write("\t")
for i in range(sent_num):
if(counter==0):
par_length= rng.randrange(4,8)
file.write(str(model.make_sentence()+" "))
if(counter==par_length):
file.write("\n\t")
counter= -1
counter+= 1
def main():
#Create Models
dracBook= open("Dracula.txt","r")
dracula= mk.Text(dracBook.read())
dracBook.close()
frankBook= open("Frankenstein.txt","r")
frankenstein= mk.Text(frankBook.read())
frankBook.close()
cthuluBook= open("AtMoM.txt","r")
mountains= mk.Text(cthuluBook.read())
cthuluBook.close()
#Create Samples From Individual Book Models
output1= open("Abominations\\DifferentDracula.txt","w+")
output1.close()
output1= open("Abominations\\DifferentDracula.txt","a")
output1.write("Different Dracula\n\n")
generate(dracula, output1, 200)
output1.close()
makePDF("Abominations\\DifferentDracula.txt")
output2= open("Abominations\\NewFrankenstein.txt","w+")
output2.close()
output2= open("Abominations\\NewFrankenstein.txt","a")
output2.write("New Frankenstein\n\n")
generate(frankenstein, output2, 200)
output2.close()
makePDF("Abominations\\NewFrankenstein.txt")
output3= open("Abominations\\OriginalCthulhu.txt","w+")
output3.close()
output3= open("Abominations\\OriginalCthulhu.txt","a")
output3.write("Original Cthulhu\n\n")
generate(mountains, output3, 200)
output3.close()
#Combine Models and output combined story
model= mk.combine([frankenstein, dracula])
output4= open("Abominations\\Pure_Horror.txt","w+")
output4.close()
output4= open("Abominations\\Pure_Horror.txt","a")
output4.write("Pure Horror\n\n")
generate(model, output4, 55000)
output4.close()
makePDF("Abominations\\Pure_Horror.txt")
main()
|
4,504 | d13957c3d3f4d34279dc660d80ca91ca84ba4a77 | # As variáveis abaixo estão recebendo uma função anônima
contador_letras = lambda lista: [len(x) for x in lista]
lista_animais = ['cachorro', 'pato', 'marreco']
print(contador_letras(lista_animais))
|
4,505 | 3343844bf49cb3f4d655613475e44a140ac3106d | from django.db import models
# Create your models here.
STATUS_CHOICES=(
('Pending','Pending'),
('Completed','Completed'))
class Appointment(models.Model):
first_name=models.CharField(max_length=100)
last_name=models.CharField(max_length=100)
phone_number=models.CharField(max_length=12,null=False)
date=models.DateField(null=True)
time=models.TimeField(default="10:00")
presciption = models.TextField(max_length=100,default="Write here")
status = models.CharField(max_length=10,choices=STATUS_CHOICES,default="Pending")
def __str__(self):
return self.first_name + self.last_name |
4,506 | f47e4d6ff079b6ac2320467d87b34ae82face032 | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class SearchConfig(AppConfig):
name = 'search'
verbose_name = _("Search")
|
4,507 | 9e9403ea1c128e07803d080b337003055759c5ae | # project/tests/test_tmdb.py
import unittest
import json
from project.server import db
from project.server.models import Tmdb
from project.tests.base import BaseTestCase
class TestTmdb(BaseTestCase):
"""
Testing if we have the good responses from the api
"""
def test_discover(self):
""" Testing the TMDB API discover endpoint """
response = Tmdb.discover()
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_search(self):
""" Testing the TMDB API search endpoint """
response = Tmdb.search('ozark')
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_detail(self):
""" Testing the TMDB API get show """
response = Tmdb.detail(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(data['id'])
self.assertTrue(data['name'])
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_similar(self):
""" Testing the TMDB API similar endpoint """
response = Tmdb.similar(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_seasons(self):
""" Testing the TMDB API seasons endpoint """
response = Tmdb.season(tmdb_show_id = 69740, season_number = 1)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['episodes'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
if __name__ == '__main__':
unittest.main()
|
4,508 | 851cfd4e71ffd2d5fed33616abca4444474669a3 | def four_Ow_four(error):
'''
method to render the 404 error page
'''
return render_template('fourOwfour.html'),404 |
4,509 | 2c89f12d633da8da4d500dca910662d351b0958f | #!/usr/bin/python
# -*- coding:utf-8 -*-
################################################################
# 服务器程序
################################################################
import json
import time
import traceback
from flask import Flask, abort, render_template, redirect, send_from_directory, request, make_response
from flask.ext.bootstrap import Bootstrap
from tools.http_tools import WeiboHandler
from tools.db_operation.db_tools import save_user_log_info, get_user_log_info, batch_put_info, CONTENT_INFO, SCRAP_INFO, put_info, get_info, put_scrap_info, get_scraped_weibo_info
from tools.__init__ import debug_flag
from tools.scrap_tools import scrap_user
from multiprocessing import Process
global log_handler
global search_user_list
log_handler = {}
search_user_list = {}
process_list = []
server = Flask(__name__)
bootstrap = Bootstrap(server)
def read_wh(username):
if log_handler.get(username) is None:
log_handler[username] = WeiboHandler(username, '', 'flask_server/static/png/')
return log_handler[username]
def read_cookie():
username = request.cookies.get('username')
if username is None:
user_list = []
else:
user_list = [{'username': username}]
return user_list
@server.route('/')
def index():
user_list = read_cookie()
return render_template('index.html', user_list=user_list)
@server.route('/signup')
def sign_up():
return redirect('http://weibo.com/signup/signup.php')
@server.route('/login', methods=['POST'])
def log_in():
username = request.form['id']
wh = read_wh(username)
wh.passwd = request.form['passwd']
vercode = request.form['vercode']
log_flag = request.form['logflag']
if log_flag == '1':
resp = make_response(json.dumps({'stat': '200', 'furl': request.form['ip']}))
resp.set_cookie('username', username)
return resp
# log_handler.prelog_data = get_user_log_info(username)
data2, replace_url = wh.do_log_req(vercode)
if int(data2['retcode'][0]) == 0:
wh.final_log_req(replace_url)
resp = make_response(json.dumps({'stat': '200', 'furl': request.form['ip']}))
resp.set_cookie('username', username)
return resp
print 'Log in failed ... retcode:', data2['retcode'][0], ', reason:', data2['reason'][0].decode('gbk')
no = wh.get_vercode()
return json.dumps({'stat': '502', 'reason': data2['reason'][0].decode('gbk'), 'vercode_no': no})
@server.route('/check_log', methods=['POST'])
def check_log():
username = request.form['id']
wh = read_wh(username)
wh.check_log_status(wh.open_weibo_page())
if wh.log_flag:
return json.dumps({'stat': '200'})
prelog = wh.prelog()
# save_user_log_info(username, prelog)
try:
if prelog['showpin'] == 1:
no = wh.get_vercode()
return json.dumps({'stat': '502', 'vercode_no': no})
return json.dumps({'stat': '501'})
except Exception, e:
return json.dumps({'stat': '501'})
@server.route('/logout')
def log_out():
resp = make_response(redirect('/'))
resp.set_cookie('username', '', expires=0)
return resp
@server.route('/static/<path:path>')
def send_static_file(path):
return send_from_directory('static', path)
@server.route('/search_user/<word>')
def search_user(word):
username = request.cookies.get('username')
wh = read_wh(username)
if username is None:
return {'stat': '404'}
search_user_list[username] = wh.get_user_list(word)
if debug_flag:
print search_user_list
return json.dumps({'stat': '200', 'result': search_user_list[username]})
@server.route('/scrap/<user_no>')
def to_scrap(user_no):
username = request.cookies.get('username')
if username is None:
return render_template('index.html')
user = search_user_list[username][int(user_no)]
last_record = get_info(SCRAP_INFO, cond=' 1=1 order by id desc limit 1')
scrap_id = 0 if len(last_record) == 0 else (int(last_record[0]['id']) + 1)
put_scrap_info(scrap_id, username, user['user_id'], '开始爬取%s的所有微博内容...' % user['title'])
sp = Process(target=scrap_process, name='%s_%s_%s' % (username, user['user_id'], scrap_id), args=(username, user, scrap_id))
sp.start()
process_list.append(sp)
return redirect('/scrap_listen?d=%s' % scrap_id)
@server.route('/scrap_listen', methods=['GET'])
def scrap_listen():
scrap_id = request.args.get('d')
if debug_flag:
print scrap_id
user_list = read_cookie()
return render_template('scrap_listen.html', scrap_id=scrap_id, user_list=user_list)
@server.route('/read_scrap/<scrap_id>/<last_message_id>')
def read_scrap(scrap_id, last_message_id):
data = get_info(SCRAP_INFO, cond=' scrap_id=%s and id > %s ' % (scrap_id, last_message_id))
return json.dumps(data)
def scrap_process(username, user, scrap_id):
try:
wh = read_wh(username)
data_list = scrap_user(wh, user, scrap_id, 0)
batch_put_info(CONTENT_INFO, data_list)
put_scrap_info(scrap_id, username, user['user_id'], '爬取完毕!共爬取%s%s条微博.保存至数据库....' % (user['title'], len(data_list)), 1)
except Exception, e:
traceback.print_exc()
put_scrap_info(scrap_id, username, user['user_id'], '出现异常,数据未保存,请重新爬取数据!', -1)
@server.route('/search')
def search_scrap_result():
user_list = read_cookie()
return render_template('/search.html', user_list=user_list)
@server.route('/search_scraped_weibo/<username>', methods=['GET'])
def search_scraped_weibo(username):
print 'here'
keyword = request.args.get('keyword')
print 'there'
if keyword is None:
weibo_list = get_scraped_weibo_info(username)
else:
weibo_list = get_scraped_weibo_info(username, keyword)
return json.dumps({'stat': '200', 'result': weibo_list})
|
4,510 | e1c68c7eb899718dd1c28dc6e95d5538c2b8ad74 | import copy
import math
import operator
import numpy as np, pprint
def turn_left(action):
switcher = {
(-1, 0): (0, -1),
(0, 1): (-1, 0),
(1, 0): (0, 1),
(0, -1): (1, 0)
}
return switcher.get(action)
def turn_right(action):
switcher = {
(-1, 0): (0, 1),
(0, 1): (1, 0),
(1, 0): (0, -1),
(0, -1): (-1, 0)
}
return switcher.get(action)
def addTwoTuples(a, b):
return tuple(map(operator.add, a, b))
def argmax(seq, fn):
best = seq[0]
best_score = fn(best)
for x in seq:
x_score = fn(x)
if x_score > best_score:
best, best_score = x, x_score
return best
def go(current_state, action, grid_size):
state1 = addTwoTuples(current_state, action)
x_coord = state1[0]
y_coord = state1[1]
if x_coord < 0 or x_coord >= grid_size or y_coord < 0 or y_coord >= grid_size:
return current_state
else:
return state1
def play(env, policy):
utility_values = []
for j in range(10):
pos = env.start_loc
utility = 0
np.random.seed(j)
swerve = np.random.random_sample(1000000)
k = 0
while pos != env.terminal_loc:
move = policy[pos]
if swerve[k] > 0.7:
if swerve[k] > 0.8:
if swerve[k] > 0.9:
move = turn_right(turn_right(move))
else:
move = turn_right(move)
else:
move = turn_left(move)
k += 1
pos = go(pos, move, env.grid_size)
utility += env.get_reward(pos)
utility_values.append(utility)
# print utility_values
cost = int(math.floor(sum(utility_values) / len(utility_values)))
return cost
class GridMDP:
def __init__(self, grid_size):
self.grid_size = grid_size
self.action_dim = (4,)
# North, south, East, West
self.action_coordinates = [(-1, 0), (1, 0), (0, 1), (0, -1)]
self.rewards = [[-1 for x in range(grid_size)] for y in range(grid_size)]
self.gamma = 0.9
self.epsilon = 0.1
self.states = [(x, y) for x in range(grid_size) for y in range(grid_size)]
self.utility = None
self.policy = None
self.T = None
def __deepcopy__(self, memodict={}):
copy_object = GridMDP(self.grid_size)
copy_object.rewards = copy.deepcopy(self.rewards)
copy_object.T = dict(self.T)
# copy_object.T = self.generate_trans_matrix()
copy_object.policy = copy.deepcopy(self.policy)
return copy_object
# For every obstacle add a -101 as reward
def add_obstacles(self, list_of_obstacles):
for obstacle in list_of_obstacles:
self.rewards[obstacle[0]][obstacle[1]] = -101
# Keep a track of every start location
def add_start_location(self, start_loc):
self.start_loc = start_loc
# Update the reward as 99 for every end location
def add_end_location(self, end_loc):
self.terminal_loc = end_loc
self.rewards[end_loc[0]][end_loc[1]] = 99
end_loc_no = end_loc[0] * self.grid_size + end_loc[1]
action_list = {}
# No of action co-ordinates
for i in range(4):
action_list[i] = self.turn(end_loc, None)
self.T[end_loc_no] = action_list
def get_reward(self, state):
return self.rewards[state[0]][state[1]]
def get_actions(self, state):
if state == self.terminal_loc:
return [None]
else:
return self.action_coordinates
def go(self, current_state, action):
return go(current_state, action, self.grid_size)
def turn(self, current_state, action):
if action is None:
return [(0, current_state)]
else:
return [(0.7, self.go(current_state, action)),
(0.1, self.go(current_state, turn_right(action))),
(0.1, self.go(current_state, turn_left(action))),
(0.1, self.go(current_state, turn_left(turn_left(action))))]
def run_with_trans_matrix(self):
utility1 = dict([(s, 0) for s in self.states])
while True:
delta = 0
revised_utility1 = utility1
for s in self.states:
state_no = s[0] * self.grid_size + s[1]
u = utility1[s]
max_util = - float("inf")
for i in range(len(self.action_coordinates)):
# a = self.action_coordinates[i]
util = 0
for (p, s1) in self.T[state_no][i]:
util += (p * revised_utility1[s1])
if util > max_util:
max_util = util
utility1[s] = self.get_reward(s) + self.gamma * max_util
delta = max(delta, abs(u - utility1[s]))
if delta < self.epsilon * (1 - self.gamma) / self.gamma:
break
self.utility = utility1
pi = self.get_policy()
return pi
def get_policy(self):
policy = {}
for s in self.states:
policy[s] = argmax(self.get_actions(s), lambda a: self.expected_utility(a, s, self.utility))
self.policy = policy
return policy
def expected_utility(self, a, s, utility):
return sum([p * utility[s1] for (p, s1) in self.turn(s, a)])
def generate_trans_matrix(self):
transmat = {}
action_list = {}
for s in range(self.grid_size * self.grid_size):
x_coord = s // self.grid_size
y_coord = s % self.grid_size
state = (x_coord, y_coord)
for i in range(len(self.action_coordinates)):
action_list[i] = self.turn(state, self.action_coordinates[i])
transmat[s] = action_list
action_list = {}
self.T = transmat
def read_file(input_file_name):
with open(input_file_name, "r") as file:
# Read 1st line for the grid size
line = file.readline().rstrip()
grid_size = int(line)
environment = GridMDP(grid_size)
# Read 2nd line for the no. of cars
line = file.readline().rstrip()
no_of_cars = int(line)
# Read 3rd line for the no. of obstacles
line = file.readline().rstrip()
no_of_obstacles = int(line)
location_of_obstacles = []
car_locations = {}
for i in range(0, no_of_cars):
car_locations[i] = {}
# Read all the obstacles co-ordinates
while len(location_of_obstacles) != no_of_obstacles:
loc = map(int, file.readline().rstrip().split(",")[::-1])
location_of_obstacles.append(tuple(loc))
car_cnt = 0
# Read all the car start location co-ordinates
while car_cnt < no_of_cars:
loc = map(int, file.readline().rstrip().split(",")[::-1])
car_locations[car_cnt]["Start"] = tuple(loc)
car_cnt += 1
car_cnt = 0
# Read all the car terminal location co-ordinates
while car_cnt < no_of_cars:
loc = map(int, file.readline().rstrip().split(",")[::-1])
car_locations[car_cnt]["End"] = tuple(loc)
car_cnt += 1
if len(location_of_obstacles) > 0:
environment.add_obstacles(location_of_obstacles)
environment.generate_trans_matrix()
return [environment, car_locations]
def execute(inputfile="grading_case/input30.txt"):
original_grid, car_locations = read_file(inputfile)
endloc_policy = {}
# f = open("output.txt", "w")
for car, locations in car_locations.items():
start_loc = locations["Start"]
end_loc = locations["End"]
if start_loc == end_loc:
cost = 100
else:
if end_loc in endloc_policy:
grid = copy.deepcopy(endloc_policy[end_loc])
grid.add_end_location(end_loc)
pi = grid.policy
else:
grid = copy.deepcopy(original_grid)
grid.add_end_location(end_loc)
pi = grid.run_with_trans_matrix()
endloc_policy[end_loc] = grid
grid.add_start_location(start_loc)
cost = play(grid, pi)
print cost
# f.write(str(cost) + "\n")
# f.close()
if __name__ == "__main__":
execute()
|
4,511 | 00260e23614a7b0a11ff3649e71392e4892de423 | class Node:
def __init__(self, dataVal=None):
self.dataVal = dataVal
self.nextVal = None
class LinkedList:
def __init__(self):
self.headVal = None
def atBeginning(self, data):
NewNode = Node(data)
NewNode.nextVal = self.headVal
self.headVal = NewNode
return NewNode
def atEnd(self, data):
NewNode = Node(data)
NewNode.nextVal = None
if self.headVal is None:
self.headVal = NewNode
return NewNode
last = self.headVal
while(last.nextVal):
last = last.nextVal
last.nextVal = NewNode
return NewNode
def inBetween(self, n1, n2, data):
NewNode = Node(data)
n1.nextVal = NewNode
NewNode.nextVal = n2
return NewNode
def deleteNode(self,node):
last = self.headVal
if self.headVal == node:
self.headVal = node.nextVal
return
else:
while(last):
if (last.nextVal.dataVal) == (node.dataVal):
if last.nextVal is not None:
last.nextVal = node.nextVal
return
else:
self.headVal.nextVal = None
last = last.nextVal
def printList(self):
self.printVal = self.headVal
while self.printVal is not None:
# print(self.printVal.dataVal)
print(self.printVal.dataVal)
self.printVal = self.printVal.nextVal
# def isPalindrome(self):
def deleteNthNode(self, n):
last = self.headVal
i=2
while(last):
if i == n:
prevNode = last
nextNode = last.nextVal.nextVal
prevNode.nextVal = nextNode
return
i+=1
last = last.nextVal
def getNthNode(self, n):
curr = self.headVal
i=1
while(curr):
if i == n:
print (curr.dataVal)
return
i+=1
curr = curr.nextVal
def reverse(self, node):
print("2:", node.dataVal)
if node.nextVal == None:
self.headVal = node
return
print("3:", node.dataVal)
self.reverse(node.nextVal)
tmp = node.nextVal
tmp.nextVal = node
node.nextVal = None
def reverseLinkedList(self):
curr = self.headVal
print("1:", curr.dataVal)
while(curr):
self.reverse(curr)
curr = curr.nextVal
def checkPalindrome(self):
curr = self.headVal
firstNode = self.headVal
nextNode = curr.nextVal
ans = False
while(curr.dataVal is None):
if curr.dataVal == firstNode.dataVal:
ans = True
list1 = LinkedList()
list1.headVal = Node(1)
e2 = Node(2)
e3 = Node(3)
e4 = Node(4)
list1.headVal.nextVal = e2
e2.nextVal = e3
e3.nextVal = e4
e4.nextVal = None
# list1.headVal = Node('Sunday')
# e2 = Node('Monday')
# e3 = Node('Tuesday')
# e4 = Node('Wednesday')
# e5 = Node('Thursday')
# e6 = Node('Friday')
# e7 = Node('Saturday')
# list1.headVal.nextVal = e2
# e2.nextVal = e3
# e3.nextVal = e4
# e4.nextVal = e5
# e5.nextVal = e6
# e6.nextVal = e7
# e8 = list1.atBeginning('MyTestJanuary')
# e9 = list1.atEnd('MyTestDecember')
# e10 = list1.inBetween(list1.headVal, e2, 'I hate this')
# e11 = list1.inBetween(e6, e7, 'I love this')
# list1.deleteNode(e2)
# list1.printList()
# list1.deleteNode(list1.headVal)
# list1.printList()
# print(">>>>>>>>>",type(e6), type(e8), type(e10))
# list1.deleteNode(e9)
# print("Deleting the last node>>>>>>")
# list1.deleteNthNode(3)
# list1.getNthNode(3)
# list1.printList()
list1.reverseLinkedList()
# list1.printList()
# e10 = list1.atBeginning('1')
# e8.nextVal = None
|
4,512 | bf60e34190f4c453c85baaf2fbbff027fb77b7c8 | import os
import sendgrid
class Mail:
def __init__(self, to, subject, msg):
self.to = to
self.subject = subject
self.msg = msg
def send(self):
sg = sendgrid.SendGridClient(os.environ.get('SENDGRID_KEY', ''))
message = sendgrid.Mail()
message.add_to(self.to)
print self.to
message.set_subject(self.subject)
print self.subject
message.set_html(self.msg)
message.set_text(self.msg)
print self.msg
message.set_from('cx-col <encuestas@cx-col.com>')
print (sg.send(message)) |
4,513 | d5c6582547df540ffc9c73d10a3405ec97487bba | #!/usr/bin/env python
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import subprocess
import sys
import os
def main():
parser = argparse.ArgumentParser(
description='Create the symbol specifying the location of test fixtures.')
parser.add_argument('--fixtures_location_file', type=str, required=True)
parser.add_argument('--fixtures_location', type=str, required=True)
args = parser.parse_args()
with open(args.fixtures_location_file, 'w') as file:
file.write('namespace flutter {namespace testing {const char* GetFixturesPath() {return "%s";}}}'
% args.fixtures_location)
if __name__ == '__main__':
sys.exit(main())
|
4,514 | bf1d54015a9ae529f4fda4fa9b9f7c874ec3b240 | #!/usr/bin/python
import serial
import time
import sys
senderId="\x01"
receiverId="\x00"
#openSerial just opens the serial connection
def openSerial(port):
#Some configuration for the serial port
ser = serial.Serial()
ser.baudrate = 300
ser.port = port
ser.bytesize = 8
ser.stopbits = 2
ser.open()
return ser
def initializePort(ser, payloadLen, sender, receiver, layerVersion="\x02"):
#SenderID
ser.write(sender)
#ReceiverID
ser.write(receiver)
#layerconfig: At the moment layer2
ser.write(layerVersion)
#payloadlen
ser.write(payloadLen)
#USART Protocol type: No one reads this field at the moment
ser.write("\x01")
def main():
if (len(sys.argv) < 2):
print "sender.py <port>"
sys.exit(1)
layerVersion = "\x02"
if (len(sys.argv) >= 3):
layerVersion = "\x03"
print "Use reliable transport"
ser = openSerial(sys.argv[1])
time.sleep(2)
initializePort(ser, payloadLen="\x01", sender="\x01", receiver="\x00", layerVersion=layerVersion)
time.sleep(0.5)
char = 1
while (char != "\x00"):
char = ser.read(1)
sys.stdout.write(char)
sys.stdout.flush()
print ""
while (char != "\x00"):
char = ser.read(1)
sys.stdout.write(char)
sys.stdout.flush()
ser.close()
if __name__ == '__main__':
main()
|
4,515 | fe1d47b63e88935f8b2eb4bac883f3028d6f560b | from flask import render_template, request, redirect, url_for
from flask_login import current_user
from application import app, db, login_required
from application.auth.models import User
from application.memes.models import Meme
from application.comments.forms import CommentForm
# only a dummy new comment form
@app.route("/comments/new/")
@login_required(role="ANY")
def comments_form():
return render_template("comments/new.html", form = CommentForm()) |
4,516 | 1ff2f06349ab1906a1649bdb83828fbdb3cf584f | #!/usr/bin/env python
# coding: utf-8
#%%:
import secrets
import hashlib
import base64
import ecdsa
from sys import byteorder
#%%:
class k_box:
def __init__(self, string = 0, file = 0):
if string != 0:
if not(len(string) == 64):
raise Exception("Bad len")
self.__priv_key = bytes.fromhex(string)
else:
self.__priv_key = secrets.randbits(256).to_bytes(32,byteorder=byteorder)
self.__pub_key = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.SECP256k1).verifying_key.to_string()
def get_secret_key(self):
return self.__priv_key
def get_public_key(self)->bytearray:
return (0x04.to_bytes(1,byteorder=byteorder) + self.__pub_key)
def get_public_key_compresed(self) -> bytearray:
return (b'\x03' if self.__pub_key[-1] % 2 else b'\x02') + self.__pub_key[0:32]
def get_address(self) -> str:
e_pub = self.get_encrypted_pub_key()
main_net_key = 0x00.to_bytes(1,byteorder=byteorder) + e_pub
check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()).digest()[:4]
hex_addr = main_net_key + check_sum
return base58_encode(hex_addr)
def get_encrypted_pub_key(self):
sha = hashlib.sha256(self.get_public_key_compresed()).digest()
result = hashlib.new(name='ripemd160', data=sha).digest()
return result
def sign(self, message:bytes = 0) -> bytearray:
sk = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.SECP256k1 )
return sk.sign(message)
def verify(self, signature, message):
vk = ecdsa.VerifyingKey.from_string(self.__pub_key, curve=ecdsa.SECP256k1)
return vk.verify(signature, message.encode())
#%%:
def covert_to_address(pub_key:bytes) -> str:
sha = hashlib.sha256(pub_key).digest()
pub_key = hashlib.new(name='ripemd160', data=sha).digest()
main_net_key = 0x00.to_bytes(1,byteorder=byteorder) + pub_key
check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()).digest()[:4]
hex_addr = main_net_key + check_sum
return base58_encode(hex_addr)
#%%:
def base58_encode(n:bytearray)->str:
alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
b58_string = ""
leading_zeros = len(n.hex()) - len(n.hex().lstrip('0')) # ! refactor counting zeros
address_int = int.from_bytes(n,byteorder="big")
while address_int > 0:
digit = address_int % 58
digit_char = alphabet[digit]
b58_string = digit_char + b58_string
address_int //= 58
ones = leading_zeros // 2
for one in range(ones):
b58_string = '1' + b58_string
return b58_string
def base58_decode(s):
"""Decode a base58-encoding string, returning bytes"""
if not s:
return b''
alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
# Convert the string to an integer
n = 0
for c in s:
n *= 58
if c not in alphabet:
raise Exception('Character %r is not a valid base58 character' % c)
digit = alphabet.index(c)
n += digit
# Convert the integer to bytes
h = '%x' % n
if len(h) % 2:
h = '0' + h
# res = ""
res = bytearray.fromhex(h)
# Add padding back.
pad = 0
for c in s[:-1]:
if c == alphabet[0]: pad += 1
else: break
return b'\x00' * pad + res
# def base58_decode(s:str, len):
# alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
# result = 0
# for c in s:
# result = result * 58 + alphabet.index(c)
# return bytearray.fromhex(f"{result:0x}".rjust(len * 2, '0'))
# %%:
def to_WIF(key:str):
if not(len(key) == 64):
raise Exception("Bad key len")
key = "80" + key
key_b = bytes.fromhex(key)
sha_key1 = hashlib.sha256(hashlib.sha256(key_b).digest()).digest()
key_b = key_b + sha_key1[0:4]
return base58_encode(key_b)
def f_import_private(filename):
file = open(filename, 'r')
wif_key = file.read()
file.close()
key = from_WIF(wif_key)
key_pair = k_box(string=key.hex())
return key_pair
#%%:
def from_WIF(wif_key):
if not(len(wif_key) == 51):
raise Exception("Bad len of WIF key")
key = base58_decode(wif_key)
checksum = key[-4:]
key = key[1:33]
if hashlib.sha256(hashlib.sha256(0x80.to_bytes(1,"big") + key).digest()).digest()[0:4] != checksum:
raise Exception("Bad checksum")
return key
#%%:
def uncompress_key(comp_key: bytearray):
x = int.from_bytes(comp_key[1:], byteorder='big')
is_even = True if comp_key[1] == '2' else False
""" Derive y point from x point """
curve = ecdsa.SECP256k1.curve
# The curve equation over F_p is:
# y^2 = x^3 + ax + b
a, b, p = curve.a(), curve.b(), curve.p()
alpha = (pow(x, 3, p) + a * x + b) % p
beta = ecdsa.numbertheory.square_root_mod_prime(alpha, p)
if (beta % 2) == is_even:
beta = p - beta
return bytearray.fromhex( f"04{x:064x}{beta:064x}") |
4,517 | a2fc9d947c75eaaaeafcd92750c99f4cfcdb9d7d | # python3
from random import randint
def partition3(array, left, right):
pivot = array[right]
begin = left - 1
end = left - 1
for j in range(left, right):
if array[j] < pivot:
begin += 1
array[begin], array[j] = array[j], array[begin]
end += 1
if array[j] == pivot:
array[end], array[j] = array[j], array[end]
elif array[j] == pivot:
end += 1
array[end], array[j] = array[j], array[end]
array[end + 1], array[right] = array[right], array[end + 1]
return begin + 1, end + 1
def randomized_quick_sort(array, left, right):
if left >= right:
return
k = randint(left, right)
array[left], array[k] = array[k], array[left]
small, equal = partition3(array, left, right)
randomized_quick_sort(array, left, small - 1)
randomized_quick_sort(array, equal + 1, right)
if __name__ == '__main__':
input_n = int(input())
elements = list(map(int, input().split()))
assert len(elements) == input_n
randomized_quick_sort(elements, 0, len(elements) - 1)
print(*elements)
|
4,518 | 32e3eed2e279706bca2925d3d9d897a928243b4c | class Handlers():
change_store = "/change_store"
change_status = "/change_status"
mail = "/mail"
get_status = "/get_status"
create_order = "/create_order"
ask_store = "/ask_store"
check = "/check"
test = "/test"
|
4,519 | 71a0900dc09b1ff55e4e5a4cc7cab617b9c73406 | from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.http import HttpResponse
from .models import Post
from django.utils import timezone
def list_of_posts(request):
posts = (Post.objects
.filter(published_date__lte=timezone.now())
.order_by('published_date')
)
return render(request, 'blog/list_of_posts.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request,
'blog/post_detail.html',
{'post': post}
)
|
4,520 | c55b6fed92a5f4f2961c6f8d5b150b22a5f622e8 | import datetime
import time
import requests
from config import url
from data import DistrictList
import random
import pymysql
def base_url():
default_request = {
'base_url': url,
'headers': {
"Content-Type": "application/json;charset=UTF-8"}
}
return default_request['base_url']
# 生成一个指定长度的随机数
def random_Num(length, string=[]):
for i in range(length):
y = str(random.randint(0, 9))
string.append(y)
string = ''.join(string)
return string
# a = random_Num(9, ['1','3'])
# b = random_Num(6, ['粤','B'])
# c = random_Num(9)
# print(a,b,c)
# 生成一个身份证号码,以及对应的生日
def generator():
# 生成身份证号码
districtcode = DistrictList[random.randint(0, len(DistrictList) - 1)]['code']
# date = datetime.date.today() - datetime.timedelta(weeks=random.randint(1, 3840))
date = datetime.datetime.now() - datetime.timedelta(weeks=random.randint(1, 2350))
birthDay = date.strftime('%Y%m%d')
randomNum = str(random.randint(100, 300))
idnum = districtcode + birthDay + randomNum
i = 0
count = 0
weight = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]
checkcode = {'0': '1', '1': '0', '2': 'X', '3': '9', '4': '8', '5': '7', '6': '6', '7': '5', '8': '5', '9': '3', '10': '2'}
for i in range(0, len(idnum)):
count = count + int(idnum[i]) * weight[i]
id = idnum + checkcode[str(count%11)]
# 生成生日时间戳
# timstamp = date.strftime('%Y%m%d%H%M%S')
# timstamp = datetime.datetime.strptime(date, '%Y%m%d%H%M%S').timestamp()
timstamp = int(time.mktime(date.timetuple()) * 1000)
return id, timstamp
a = generator()
def returnId():
return a[0]
def returnTimestamp():
return a[1]
# 连接数据库公用方法
def query_mysql(sql, *params, database="zbcf_injury_test"):
conn = pymysql.connect(host="rm-wz97oujls3998784i.mysql.rds.aliyuncs.com", user="testuser",
password="testuser@2018", database=database, charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
cursor.execute(sql, params)
data = cursor.fetchone()
cursor.close()
conn.close()
return data
# 模拟订单超过48/12小时/7天
# 只需将数据库过期时间设置为当前时间
# def orderTimeOut(operation_id):
# now = datetime.datetime.now()
# now = now.strftime('%Y-%m-%d %H:%M:%S')
# # delta = datetime.timedelta(days=outTime)
# # now = now + delta
# print(now)
# # sql = "UPDATE t_auth_info t SET end_effect_time = str_to_date(\'%s\','%%Y-%%m-%%d %%H:%%i:%%s') WHERE t.operation_id = '%s'"
# sql = "UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = '%s'"
# params = [now, operation_id]
# update_result = query_mysql(sql, *params)
# return update_result
# # return now.strftime('%Y-%m-%d %H:%M:%S')
# # 有什么问题??
# a = orderTimeOut(289)
# print(a)
# 模拟订单超过48/12小时/7天
# 只需将数据库过期时间设置为当前时间
def orderTimeOut(order_id, database="zbcf_injury_test"):
conn = pymysql.connect(host="rm-wz97oujls3998784i.mysql.rds.aliyuncs.com", user="testuser",
password="testuser@2018", database=database, charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%d %H:%M:%S')
sql = "UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = (SELECT id from t_operation where order_id = '%s')" % (now, order_id)
effectRows = cursor.execute(sql)
conn.commit()
print('make order time out!')
cursor.close()
conn.close()
return effectRows
# a = orderTimeOut(260)
# print(a)
def sleep(num):
time.sleep(num)
# 依据order_Id查询登录码
def queryLoginNum(orderId):
sql = "SELECT token from t_auth_info t where t.operation_id = (SELECT id from t_operation where order_id = '%s') and t.del_flag = '0'"
query_result = query_mysql(sql, orderId)
print(orderId)
return query_result['token']
# a = queryLoginNum(418)
# print (a)
# 依据operation_Id查询登录码
def opetationId_queryLoginNum(operation_Id):
sql = "SELECT token from t_auth_info where operation_id = '%s' and del_flag = '0'"
query_result = query_mysql(sql, operation_Id)
return query_result['token']
# a = queryLoginNum(290)
# print (a)
# 返回orderId的方法
def queryOrderId():
pass
# 返回operationId的方法
def queryOperationId():
pass
# 查询订单状态
def queryOrderStatus():
pass
# # 登录PC端获取token,拼接到headers中
# def setup_hook_token(request):
# #print(request)
# url_path="http://testrenshang.cias.cn/injury/user/pc/login"
# header={"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
# payload={"loginName": "haadmin003", "loginPass": "Y2lhczEyMzQ1Ng==", "verifyCode": "tubd"}
# req=requests.post(url=url_path, headers=header, params=payload).json()
# token=req['data']['token']
# request["headers"]['token']=token
# # print(token,'\n', req)
# # print(request)
# # request = {'headers':{'Content-Type': 'application/json;charset=UTF-8', 'method': 'GET', 'url': '$uri', 'token': '$token'}}
# # setup_hook_token(request)
# 登录H5端获取token
def getH5Token(accessCode):
url = "http://testrsapp.cias.cn/injury/user/h5/login"
headers = {"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
data = {"accessCode": accessCode}
req = requests.post(url=url, headers=headers, data=data).json()
return req['data']['token']
# a = getH5Token('31583310')
# print(a)
# 登录Web端获取token
def getWebToken(accessCode):
url = "http://testrsapp.cias.cn/injury/user/pc/login"
headers = {"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
data = {"loginName": "haadmin003", "loginPass": "Y2lhczEyMzQ1Ng==", "verifyCode": "tubd"}
req = requests.post(url=url, headers=headers, data=data).json()
return req['data']['token']
# a = getWebToken('31583310')
# print(a)
# |
4,521 | 9dead39e41fd0f3cff43501c659050885a50fec3 | try:
a=100
b=a/0
print(b)
except ZeroDivisionError as z:
print("Error= ",z) |
4,522 | 79f03af05fb40f5f5247b582eabae2dc125e6b52 | # THIS FILE WAS CREATED IN THIS DIRECTORY EARLIER, NOW MOIVED TO ROOT OF THE REPO
print "Hello buddy"
print "Let's get started"
spy_name = raw_input ("What is your spy name? ")
if len(spy_name) >3:
print "Welcome " + spy_name + ". Glad to have you with us."
spy_salutation= raw_input("What's your title? ")
if spy_salutation == "Mr." or spy_salutation =="Ms.":
spy_name = spy_salutation + " " + spy_name
print "Welcome " + spy_name + ". Let me know about you a bit more."
spy_age = input("Please enter your age")
if 50>spy_age>18:
print "Your age is Correct."
spy_rating = input("Please enter your rating ")
if spy_rating>=5.0:
print "Great spy"
elif 3.5<=spy_rating<5.0:
print "Good spy"
elif 2<=spy_rating<3.5:
print "Not bad."
else :
print "Not good. Need hardwork"
spy_is_active = True
print "Authentication process completed successfully. Welcome " +spy_name+ "age: " + str(spy_age) + " and rating: " + str(spy_rating) + " Glad to have ypou with us."
else:
print "Sorry, you are not eligible to be a spy"
else:
print "Invalid Information."
else:
print "Opps! please enter a valid name."
|
4,523 | 6f13ebe7355d530ba3403aab54b313ecf35b1261 | import turtle
import random
shaan = turtle.Turtle()
#shaan.color(50,50,50)
#shaan.begin_fill()
for i in range (2):
shaan.forward(200)
shaan.right(90)
shaan.forward(250)
shaan.right(90)
shaan.left(60)
for i in range(4):
shaan.forward(200)
shaan.right(120)
shaan.forward(100)
shaan.left(150)
shaan.forward(100)
shaan.right(90)
shaan.forward(20)
shaan.right(90)
shaan.forward(135)
shaan.left(30)
shaan.forward(60)
shaan.right(120)
shaan.forward(32.5)
shaan.pu()
shaan.left(90)
shaan.forward(60)
shaan.pd()
for i in range(4):
shaan.forward(25)
shaan.right(90)
shaan.forward(25)
shaan.right(90)
shaan.forward(25)
shaan.pu()
shaan.forward(30)
shaan.pd()
for i in range(4):
shaan.forward(25)
shaan.right(90)
shaan.forward(25)
shaan.pu()
shaan.forward(30)
shaan.pd()
for i in range(4):
shaan.forward(25)
shaan.right(90)
shaan.forward(25)
shaan.pu()
shaan.forward(32.5)
shaan.pd()
shaan.left(90)
shaan.forward(165)
shaan.left(90)
shaan.forward(100)
shaan.left(90)
shaan.forward(100)
shaan.right(90)
shaan.forward(50)
shaan.right(90)
shaan.forward(100)
shaan.right(180)
shaan.forward(75)
shaan.pu()
shaan.left(90)
shaan.forward(20)
shaan.pd()
shaan.begin_fill()
shaan.circle(5,360)
shaan.end_fill()
shaan.pu()
shaan.forward(1000)
turtle.done()
#shaan.end_fill() |
4,524 | fb332808890e369d1439d1dba61244a0f7b89301 | #!/usr/bin/env python
import rospy
from racecar_control.msg import drive_param
import curses
forward = 0;
left = 0;
stdscr = curses.initscr()
curses.cbreak()
stdscr.keypad(1)
rospy.init_node('keyop', anonymous=True)
pub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)
stdscr.refresh()
key = ''
while key != ord('q'):
key = stdscr.getch()
stdscr.refresh()
if key == curses.KEY_UP:
forward = forward + 1;
if forward >= 40:
forward = 40
elif forward < -40:
forward = -40
stdscr.addstr(2, 20, "Up ")
stdscr.addstr(2, 25, '%.2f' % forward)
stdscr.addstr(5, 20, " ")
elif key == curses.KEY_DOWN:
forward = forward - 1;
if forward >= 40:
forward = 40
elif forward < -40:
forward = -40
stdscr.addstr(2, 20, "Down")
stdscr.addstr(2, 25, '%.2f' % forward)
stdscr.addstr(5, 20, " ")
if key == curses.KEY_LEFT:
left = left + 0.1;
if left >= 0.78:
left = 0.78
elif left < -0.78:
left = -0.78
stdscr.addstr(3, 20, "left")
stdscr.addstr(3, 25, '%.2f' % left)
stdscr.addstr(5, 20, " ")
elif key == curses.KEY_RIGHT:
left = left - 0.1;
if left >= 0.78:
left = 0.78
elif left < -0.78:
left = -0.78
stdscr.addstr(3, 20, "rgt ")
stdscr.addstr(3, 25, '%.2f' % left)
stdscr.addstr(5, 20, " ")
if key == curses.KEY_DC:
left = 0
forward = 0
stdscr.addstr(5, 20, "Stop")
msg = drive_param()
msg.velocity = forward
msg.angle = left
pub.publish(msg)
curses.endwin()
|
4,525 | 8e3f23733235d73fab14e80ee0a3706ae351c7a2 | vozrast=int(input("сколько вам лет?"))
print ("через 10 лет вам бóдет", vozrast+10) |
4,526 | 7ba2377b7d4f8d127cfee63c856d20753da9b7c6 | import requests
from datetime import datetime, timedelta
from . import base
class YoutubeVerifier(base.SimpleVerifier):
def __init__(self, channel_id, access_token):
self.channel_id = channel_id
self.access_token = access_token
self.headers = {
'Authorization': 'Bearer {}'.format(self.access_token)
}
def _get_subscription(self):
"""
Gets information if user is subscribed to channel
:rtype: requests.Response
:return: good game player information http api call result
"""
response = requests.get(
'https://www.googleapis.com/youtube/v3/subscriptions',
params={
'part': 'snippet',
'mine': 'true',
'forChannelId': self.channel_id
},
headers=self.headers
)
if response.status_code == 200:
return response.json()
return {}
def get_follower_info(self):
"""
Get follower information from youtube api subscriptions
:rtype: dict
:return: follower info
"""
payload = self._get_subscription()
snippet = payload.get('items', [{}])[0].get('snippet', {})
#: re-processing publishedAt
if 'publishedAt' in snippet:
snippet['publishedAt'] = datetime.strptime(
snippet['publishedAt'],
"%Y-%m-%dT%H:%M:%S.%fZ"
)
else:
#: if nothing has been found publishedAt set to future
snippet['publishedAt'] = datetime.now() + timedelta(days=1)
return snippet
|
4,527 | 26ac0c94d0ab70d90854ca2c913ef0f633b54a3c | #!/usr/bin/env python
import rospy
import cv2
from geometry_msgs.msg import PoseStamped
class PositionReader:
def __init__(self):
self.image_sub = rospy.Subscriber(
"/visp_auto_tracker/object_position", PoseStamped, self.callback)
self.pub = rospy.Publisher('object_position', PoseStamped, queue_size=10)
rospy.init_node('PositionReader', anonymous=False)
self.data = PoseStamped()
def callback(self, data):
if(self.data.pose.position.x != data.pose.position.x):
self.pub.publish(data)
print(data)
self.data = data
if __name__ == '__main__':
try:
PositionReader()
rospy.spin()
except rospy.ROSInterruptException:
cv2.destroyAllWindows()
pass
|
4,528 | 173e6017884a1a4df64018b306ea71bcaa1c5f1d | #!flask/bin/python
from config import SQLALCHEMY_DATABASE_URI
from app.models import Patient, Appointment, PhoneCalls
from app import db
import os.path
db.create_all()
# Patient.generate_fake();
# Appointment.generate_fake();
# PhoneCalls.generate_fake();
Patient.add_patient();
Appointment.add_appointment();
PhoneCalls.add_call(); |
4,529 | 5f77e93d63c696363c30f019019acd22c694308b | from datetime import date
from django.test import TestCase
from model_mommy import mommy
from apps.debtors.models import Debtor
from apps.invoices.models import Invoice, InvoiceStatusChoices
from apps.invoices.services import InvoiceService
class InvoiceServiceTestCase(TestCase):
def setUp(self) -> None:
self.invoice_service = InvoiceService()
self.debtor_1 = mommy.make(Debtor)
self.invoice_1 = mommy.make(Invoice, debtor=self.debtor_1)
def test_create_invoice(self):
invoice = self.invoice_service.create_invoice(
amount=12.1, status=InvoiceStatusChoices.OVERDUE,
due_date=date(2019, 4, 1), debtor=self.debtor_1)
self.assertEqual(invoice.amount, 12.1)
self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE)
self.assertEqual(invoice.due_date, date(2019, 4, 1))
self.assertEqual(invoice.debtor, self.debtor_1)
def test_update_invoice(self):
updated_invoice = self.invoice_service.update_invoice(
instance=self.invoice_1, status=InvoiceStatusChoices.PAID,
random_attr='foo')
self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID)
self.assertFalse(hasattr(updated_invoice, 'random_attr'))
def test_delete_invoice(self):
self.invoice_service.delete_invoice(instance=self.invoice_1)
self.assertFalse(Invoice.objects.all().exists())
|
4,530 | 602df213c0d588404597c566001cd9c96b5034d0 | __author__ = 'laispace.com'
import sqlite3
dbname = 'alloyteam.db'
def createTable():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS posts
(url text primary key,
title text,
date text,
authorLink text,
authorName text,
view text)
''')
conn.commit()
conn.close()
def createPosts(posts):
conn = sqlite3.connect(dbname)
c = conn.cursor()
for post in posts:
c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)
# c.executemany('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', posts)
conn.commit()
conn.close()
def readPosts():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('SELECT * FROM posts')
posts = c.fetchall()
conn.commit()
conn.close()
return posts
def dropTable():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('DROP table IF EXISTS posts')
conn.commit()
conn.close()
|
4,531 | 00b06b5e6465bae3eab336441b283a9831bb93c0 | a=int(raw_input())
if (a%2)==0:
print("Even")
else:
print("Odd")
|
4,532 | fa09937ce64952795ae27cb91bf2c52dfb3ef4da | # Generated by Django 3.1.3 on 2020-11-18 13:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('bookstore', '0003_auto_20201118_1325'),
]
operations = [
migrations.AddField(
model_name='book',
name='seller',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
]
|
4,533 | 25dd7ea4a154e5693c65f8c42107224efee42516 | import pandas as pd
from fbprophet import Prophet
import os
from utils.json_utils import read_json, write_json
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import mean_absolute_error
root_dir = "/home/charan/Documents/workspaces/python_workspaces/Data/ADL_Project/"
final_df_path = os.path.join(root_dir, "final_data/311_Cases_master_with_desc_with_prediction.csv")
test_train_df = os.path.join(root_dir, "final_data/Data_with_no_desc.csv")
dept_category = read_json(os.path.join(root_dir, "dept/dept_category.json"))
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
value_dict = {}
# Python
final_df = pd.read_csv(final_df_path)
test_train_df = pd.read_csv(test_train_df)
test_train_df = test_train_df[test_train_df['CREATION YEAR'] > 2015]
train_split = 80
final_df['DAYS TO CLOSE'].fillna(0, inplace=True)
print(final_df['CREATION DATE'].isna().sum())
print(final_df['DAYS TO CLOSE'].isna().sum())
test_train_df['DAYS TO CLOSE'] = test_train_df['DAYS TO CLOSE'].apply(lambda x: str(x).replace(",", ""))
list_of_dataframes = []
for each_dept in sorted(list(dept_category.values())):
print(f' processing - {each_dept}')
each_test_train = test_train_df[test_train_df.DEPARTMENT == each_dept].reset_index()
each_dept_df = final_df[final_df.DEPARTMENT == each_dept].reset_index()
test_time_train = each_test_train[['CREATION DATE', 'DAYS TO CLOSE']]
each_df = each_dept_df[['CREATION DATE', 'DAYS TO CLOSE']]
each_df.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE': 'y'}, inplace=True)
test_time_train.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE': 'y'}, inplace=True)
# test_time_train.y.apply(lambda x: str(x).replace(",", ""))
test_time_train.y = test_time_train.y.astype('float64')
test_time_train.y.fillna(0, inplace=True)
train, test = train_test_split(test_time_train, test_size=0.2)
m = Prophet()
m.fit(train)
forecast = m.predict(test)
mae_value = mean_absolute_error(test['y'].values, forecast['yhat'].values)
mape_error = mean_absolute_percentage_error(test['y'].values, forecast['yhat'].values)
print(f'mean absolute error : {mae_value},MAPE {mape_error} , department {each_dept}')
metric_dict = {'MAE': mae_value, 'MAPE': mape_error}
value_dict[each_dept] = metric_dict
fig1 = m.plot(forecast)
fig1.savefig(each_dept + ".png")
whole_result = m.predict(each_df)
each_df['TIME_PRED'] = whole_result['yhat']
each_df['CASE ID'] = each_dept_df['CASE ID']
list_of_dataframes.append(each_df)
write_json(value_dict, "time_series_metrics.json")
final_pred = pd.concat(list_of_dataframes)
final_pred.to_csv("final_val.csv", header=True, index=False)
|
4,534 | 047b3398a73c9e7d75d43eeeab85f52c05ff90c3 | """
This file contains the ScoreLoop which is used to show
the user thw at most 10 highest scores made by the player
"""
import pygame
from score_fetcher import fetch_scores
from entities.sprite_text import TextSprite
class ScoreLoop:
def __init__(self):
self.scores = fetch_scores()
self.sprites = pygame.sprite.Group()
self.get_score_sprites()
self.space_cooldown = True
def get_score_sprites(self):
rank = 1
for score in self.scores:
self.sprites.add(
TextSprite(str(score), 256, 100+50*rank, True)
)
rank += 1
def increment(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
if self.space_cooldown:
return None
return "startloop"
self.space_cooldown = False
return None
def get_sprites(self):
"""retruns sprites for the UI"""
return self.sprites
if __name__ == "__main__":
pass
|
4,535 | 012d9b5aa13c557ad958343cadf935b73c808a56 | """
定义函数,根据年、月、日计算星期。
0 星期一
1 星期二
....
"""
import time
def get_week(year, month, day):
str_time = "%d-%d-%d" % (year, month, day)
time_tuple = time.strptime(str_time, "%Y-%m-%d")
tuple_week = ("星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日")
return tuple_week[time_tuple[6]]
print(get_week(2020, 1, 16))
|
4,536 | 89ec04280ecfdfcba1923e2742e31d34750f894f | import falcon
import json
from sqlalchemy.exc import SQLAlchemyError
from db import session
import model
import util
class AchievementGrant(object):
def on_post(self, req, resp):
"""
Prideleni achievementu
Format dat:
{
"users": [ id ],
"task": (null|id),
"achievement": id
}
"""
try:
user = req.context['user']
data = json.loads(req.stream.read().decode('utf-8'))
if (not user.is_logged_in()) or (not user.is_org()):
resp.status = falcon.HTTP_400
return
errors = []
req.context['result'] = {
'errors': [{
'status': '401',
'title': 'Unauthorized',
'detail': 'Přístup odepřen.'
}]
}
for u in data['users']:
if not data['task']:
data['task'] = None
else:
evl = session.query(model.Evaluation).\
filter(model.Evaluation.user == u).\
join(model.Module,
model.Module.id == model.Evaluation.module).\
filter(model.Module.task == data['task']).\
first()
if not evl:
errors.append({
'title': ("Uživatel " + str(u) +
" neodevzdal vybranou úlohu\n")
})
continue
if session.query(model.UserAchievement).\
get((u, data['achievement'])):
errors.append({
'title': ("Uživateli " + str(u) +
" je již trofej přidělena\n")
})
else:
ua = model.UserAchievement(
user_id=u,
achievement_id=data['achievement'],
task_id=data['task']
)
session.add(ua)
session.commit()
if len(errors) > 0:
req.context['result'] = {'errors': errors}
else:
req.context['result'] = {}
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
4,537 | 0356b408624988100c10b20facecef14f1552203 | import numpy as np
import pandas as pd
import nltk
from collections import defaultdict
import os.path
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers',
'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are',
'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does',
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until',
'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into',
'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here',
'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so',
'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now', '\n', 'the',
'!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=',
'>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~']
# stop words source: https://github.com/uclmr/stat-nlp-book/blob/python/chapters/doc_classify.ipynb
def tokenizer_nltk(input):
return nltk.word_tokenize(input)
# sklearn models
def statement_to_dict(statement):
statement_features = defaultdict(float)
for token in statement:
statement_features[token] += 1.0
return statement_features
def build_statements_features(df, vectorizer, train=True, tokenizer=tokenizer_nltk):
filtered_statements_dic = {}
for index, row in df.iterrows():
filtered_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode('utf-8'))
for token in tokenized_statement:
if token not in stop_words:
filtered_statement.append(token)
filtered_statements_dic[index] = filtered_statement
filtered_statements = filtered_statements_dic.values()
if train:
statements_features = vectorizer.fit_transform([statement_to_dict(statement) for statement in filtered_statements])
else:
statements_features = vectorizer.transform([statement_to_dict(statement) for statement in filtered_statements])
return statements_features
# tensorflow models
def extract_vocab(df, embeddings, tokenizer=tokenizer_nltk):
path = '../saved_data/embeddings/'
lowercase = True if embeddings in ['glove_100d_6b', 'glove_300d_6b', 'facebook'] else False
if os.path.isfile(path + 'vocab.txt') and lowercase:
df_vocab = pd.read_table(path + 'vocab.txt', sep=' ', header=None, index_col=0)
print('full vocab already exists - vocab loaded, tokens found: {:.0f}'.format(len(df_vocab) - 2))
elif os.path.isfile(path + 'vocab_upper.txt') and not lowercase:
df_vocab = pd.read_table(path + 'vocab_upper.txt', sep=' ', header=None, index_col=0)
print('full upper vocab already exists - upper vocab loaded, tokens found: {:.0f}'.format(len(df_vocab) - 2))
else:
df_vocab = pd.DataFrame(columns=['1'])
# add OOV and PAD tokens
df_vocab.loc['PAD'] = 0
df_vocab.loc['OOV'] = 1
if lowercase:
combined_statements = ' '.join(df['statement']).lower().decode('utf-8')
else:
combined_statements = ' '.join(df['statement']).decode('utf-8')
tokenized_combined_statements = tokenizer(combined_statements)
for token in tokenized_combined_statements:
token = token.encode('utf-8')
if token not in df_vocab.index:
df_vocab.loc[token] = len(df_vocab)
df_vocab = df_vocab.astype(int)
df_vocab.to_csv(path + 'vocab.txt', sep=' ', header=None, index_col=0)
print('full vocab built and saved, tokens found: {:.0f}'.format(len(df_vocab) - 2))
return df_vocab
def load_embeddings(df, df_vocab, embeddings):
# glove_300d_6b
if embeddings == 'glove_300d_6b':
path = '../saved_data/embeddings/glove_300d_6b/'
# check if embeddings reduced exist already and if so return them
if os.path.isfile(path + 'glove_300d_6b_reduced.txt') and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path + 'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_300d_6b reduced embeddings and reduced vocab already exist - glove_300d_6b reduced embeddings loaded')
# build the vocab from the combined statements and reduce the embeddings
else:
df_embeddings = pd.read_table(path + 'glove_300d_6b.txt', sep=' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_300d_6b embeddings loaded')
for token in df_vocab.index:
# token = token.encode('utf-8')
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)
# else:
# df_vocab.drop(df_vocab.loc[token])
# change OOV token
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_300d_6b_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_300d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('vocab reduced built and saved')
# glove_100d_6b
if embeddings == 'glove_100d_6b':
path = '../saved_data/embeddings/glove_100d_6b/'
# check if embeddings reduced exist already and if so return them
if os.path.isfile(path + 'glove_100d_6b_reduced.txt') and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path + 'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_100d_6b reduced embeddings and reduced vocab already exist - glove_100d_6b reduced embeddings loaded')
# build the vocab from the combined statements and reduce the embeddings
else:
df_embeddings = pd.read_table(path + 'glove_100d_6b.txt', sep=' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_100d_6b embeddings loaded')
for token in df_vocab.index:
# token = token.encode('utf-8')
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)
# else:
# df_vocab.drop(df_vocab.loc[token])
# change OOV token
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_100d_6b_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_100d_6b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('vocab reduced built and saved')
# glove_300d_6b
if embeddings == 'glove_300d_84b':
path = '../saved_data/embeddings/glove_300d_84b/'
# check if embeddings reduced exist already and if so return them
if os.path.isfile(path + 'glove_300d_84b_reduced.txt') and os.path.isfile(path + 'vocab_upper_reduced.txt'):
df_embeddings_reduced = pd.read_table(path + 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_300d_84b reduced embeddings and reduced upper vocab already exist - glove_300d_84b reduced embeddings loaded')
# build the vocab from the combined statements and reduce the embeddings
else:
df_embeddings = pd.read_table(path + 'glove_300d_84b.txt', sep=' ', header=None, index_col=0)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full glove_300d_84b embeddings loaded')
for token in df_vocab.index:
# token = token.encode('utf-8')
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)
# else:
# df_vocab.drop(df_vocab.loc[token])
# change OOV token
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'glove_300d_84b_reduced.txt', sep=' ', header=None, index_col=0)
print('glove_300d_84b reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print('vocab upper reduced built and saved')
# google
if embeddings == 'google':
path = '../saved_data/embeddings/google/'
# check if embeddings reduced exist already and if so return them
if os.path.isfile(path + 'google_word2vec_300d_reduced.txt') and os.path.isfile(path + 'vocab_upper_reduced.txt'):
df_embeddings_reduced = pd.read_table(path + 'google_word2vec_300d_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print('google_word2vec_300d reduced embeddings and reduced upper vocab already exist - google_word2vec_300d reduced embeddings loaded')
# build the vocab from the combined statements and reduce the embeddings
else:
df_embeddings = pd.read_table(path + 'google_word2vec_300d.txt', sep=' ', header=None, index_col=0, nrows=1000000)
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full google_word2vec_300d embeddings loaded')
for token in df_vocab.index:
# token = token.encode('utf-8')
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)
# else:
# df_vocab.drop(df_vocab.loc[token])
# change OOV token
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'google_word2vec_300d_reduced.txt', sep=' ', header=None, index_col=0)
print('google_word2vec_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_upper_reduced.txt', sep=' ', header=None, index_col=0)
print('vocab upper reduced built and saved')
# facebook
if embeddings == 'facebook':
path = '../saved_data/embeddings/facebook/'
# check if embeddings reduced exist already and if so return them
if os.path.isfile(path + 'facebook_fastText_300d_reduced.txt') and os.path.isfile(path + 'vocab_reduced.txt'):
df_embeddings_reduced = pd.read_table(path + 'facebook_fastText_300d_reduced.txt', sep=' ', header=None, index_col=0)
df_vocab_reduced = pd.read_table(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('facebook_fastText_300d reduced embeddings and reduced upper vocab already exist - facebook_fastText_300d reduced embeddings loaded')
# build the vocab from the combined statements and reduce the embeddings
else:
df_embeddings = pd.read_table(path + 'wiki.en.vec', sep=' ', skiprows=1, header=None, index_col=0, usecols=range(301))
df_embeddings_reduced = pd.DataFrame(columns=df_embeddings.columns)
df_embeddings_reduced.loc['PAD'] = 0
df_embeddings_reduced.loc['OOV'] = 1
print('full facebook_fastText_300d embeddings loaded')
for token in df_vocab.index:
# token = token.encode('utf-8')
if token in df_embeddings.index:
df_embeddings_reduced = df_embeddings_reduced.append(df_embeddings.loc[token], ignore_index=False)
# else:
# df_vocab.drop(df_vocab.loc[token])
# change OOV token
df_embeddings_reduced.loc['OOV'] = df_embeddings_reduced.iloc[2:].mean(axis=0)
df_embeddings_reduced.to_csv(path + 'facebook_fastText_300d_reduced.txt', sep=' ', header=None, index_col=0)
print('facebook_fastText_300d reduced embeddings built and saved, tokens found: {:.0f}, out of: {:.0f}'.format(len(df_embeddings_reduced) - 2, len(df_vocab) - 2))
df_vocab_reduced = pd.DataFrame(columns=['1'])
for i, token in enumerate(df_embeddings_reduced.index):
df_vocab_reduced.loc[token] = i
df_vocab_reduced = df_vocab_reduced.astype(int)
df_vocab_reduced.to_csv(path + 'vocab_reduced.txt', sep=' ', header=None, index_col=0)
print('vocab reduced built and saved')
return df_embeddings_reduced, df_vocab_reduced
def build_W_embeddings(df_embeddings_reduced):
W = df_embeddings_reduced.as_matrix()
return W
def build_statements_embeddings(df, df_vocab, tokenizer=tokenizer_nltk, max_statement_len=None):
embedded_statements_dic = {}
max_statement_len_ = -1
for index, row in df.iterrows():
embedded_statement = []
tokenized_statement = tokenizer(row['statement'].lower().decode('utf-8'))
for token in tokenized_statement:
if token in df_vocab.index:
embedded_statement.append(np.array(df_vocab.loc[token]))
else:
embedded_statement.append(np.array(df_vocab.loc['OOV']))
embedded_statements_dic[index] = np.array(embedded_statement)
if len(embedded_statement) > max_statement_len_:
max_statement_len_ = len(embedded_statement)
if max_statement_len is not None:
max_statement_len_ = max_statement_len
for key in embedded_statements_dic:
embedded_statement = embedded_statements_dic[key]
embedded_statement_len = np.shape(embedded_statement)[0]
padded_embedded_statement = np.tile(np.array(df_vocab.loc['PAD']), (max_statement_len_, 1))
if max_statement_len_ >= embedded_statement_len:
padded_embedded_statement[:embedded_statement_len, :] = embedded_statement
else:
padded_embedded_statement[:, :] = embedded_statement[:max_statement_len_, :]
embedded_statements_dic[key] = padded_embedded_statement
embedded_statements_matrix = np.squeeze(np.asarray(embedded_statements_dic.values()))
return embedded_statements_matrix, max_statement_len_
|
4,538 | e3f180d4309ade39ac42a895f7f73469fd20724f | #coding=utf-8
from selenium import webdriver
from selenium.webdriver import ActionChains
# 常用鼠标操作
driver = webdriver.Chrome()
driver.get('https://www.baidu.com')
driver.maximize_window()
element = driver.find_element_by_link_text(u"新闻")
#˫ 双击 ‘新闻’ 这个超链接
ActionChains(driver).double_click(element).perform()
import time
time.sleep(2)
driver.quit()
# 右键 单击 ‘新闻’
element = driver.find_element_by_link_text('地图')
ActionChains(driver).context_click(element).perform()
time.sleep(2)
# driver.quit()
|
4,539 | d85c0929b22f57367c0e707bac78e56027113417 | import time
import numpy as np
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
from utils import *
g = 9.8
t_start = 0
def init():
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0, 0.0, 0.0)
glPointSize(2)
gluOrtho2D(0.0, 500.0, 0.0, 500.0)
def disp():
draw_circle(50, 50, 10)
def mouse(btn, state, x, y):
global t_start
if btn == 0 and state == 1:
t_start = time.time()
kick(50, 50, 45, 20)
def kick(x, y, theta, u):
theta *= np.pi/180
tot_time = 2 * u * np.sin(theta) / g
print(tot_time)
t0 = time.time()
t = 0
while t < tot_time:
t = time.time() - t0
x_inc = u * np.cos(theta) + t + x
y_inc = u * np.sin((theta)) - g * t ** 2 + y
print(x_inc, y_inc)
poly(get_square_vertices(x_inc, y_inc))
time.sleep(0.1)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(500, 500)
glutInitWindowPosition(0, 0)
glutCreateWindow(b'Projectile Motion')
init()
glutDisplayFunc(disp)
glutMouseFunc(mouse)
glutMainLoop()
main()
|
4,540 | 139d06497a44031f6414980ad54454477e3d0b2c | import numpy as np
import matplotlib.pyplot as plt
import math
filename = '/home/kolan/mycode/python/dektak/data/t10_1_1_normal.csv'
#filename = '/home/kolan/mycode/python/dektak/t10_1_3_normal.csv'
#filename = '/home/kolan/mycode/python/dektak/t10_1_6_normal.csv'
#filename = '/home/kolan/mycode/python/dektak/t10_1_7_normal.csv'
#filename = '/home/kolan/mycode/python/dektak/t10_1_3_parallel.csv'
def FindHeaderLength():
"""
Finds the positionon the 'Scan Data' and adds additional 2 lines
to give as a result the lenght of the header in number of lines.
This is then used in csv function
"""
lookup = 'Lateral um'
with open(filename) as myFile:
for FoundPosition, line in enumerate(myFile, 1):
if lookup in line:
print 'Scan Data found at line:', FoundPosition
break
return FoundPosition+4
x=np.loadtxt(filename,dtype=float,delimiter=',',skiprows=FindHeaderLength(),usecols=(0,))
y=np.loadtxt(filename,dtype=float,delimiter=',',skiprows=FindHeaderLength(),usecols=(1,))
coefficients = np.polyfit(x, y, 1)
polynomial = np.poly1d(coefficients)
ys = polynomial(x)
print coefficients
print polynomial
yLevelled=y-ys
plt.figure(1)
plt.plot(x,y)
plt.plot(x,ys)
plt.title('Raw data plot')
plt.xlabel('Lateral [um]')
plt.ylabel('Raw Micrometer [um]')
plt.grid(True)
plt.figure(2)
plt.title('Histogram of y')
n, bins, patches = plt.hist(y, 256, normed=1, facecolor='g', alpha=0.75)
plt.grid(True)
plt.figure(3)
d = np.diff(y)
plt.plot(d)
plt.title('Derivative of y')
plt.xlabel('Point []')
plt.ylabel('Raw Micrometer [um]')
plt.grid(True)
plt.figure(4)
plt.plot(x,yLevelled)
plt.title('Levelled data plot')
plt.xlabel('Lateral [um]')
plt.ylabel('Micrometer [um]')
plt.grid(True)
plt.figure(5)
plt.title('Histogram of yLevelled')
n, bins, patches = plt.hist(yLevelled, 256, normed=1, facecolor='g', alpha=0.75)
plt.grid(True)
dataLenght = len(yLevelled)
xDiff = np.delete(x,dataLenght-1) #diff consumes one last element from the array
plt.figure(6)
d = np.diff(y)
plt.plot(xDiff,d)
plt.title('Derivative of y')
plt.xlabel('Lateral [um]')
plt.ylabel('Raw Micrometer [um]')
plt.grid(True)
yLevelledMin = np.min(yLevelled)
yLevelledZeroShift = yLevelled - yLevelledMin
plt.figure(7)
plt.plot(x,yLevelledZeroShift)
plt.title('Levelled and shifted data plot')
plt.xlabel('Lateral [um]')
plt.ylabel('Micrometer [um]')
plt.grid(True)
##FFT###########################################################################
dataLenghtFFT = len(yLevelled)/2 #divide by 2 to satify rfft
# scale by the number of points so that
# the magnitude does not depend on the length
# of the signal or on its sampling frequency
calculatedFFT = np.fft.rfft(yLevelled)
#calculatedFFT = np.fft.rfft(yLevelledZeroShift)
amplitudeFFT = np.abs(calculatedFFT) #calculates FFT amplitude from
#complex calculatedFFT output
phaseFFT = np.angle(calculatedFFT) #calculates FFT phase from
#complex calculatedFFT output
phaseDegreesFFT = np.rad2deg(phaseFFT) #convert to degrees
amplitudeScaledFFT = amplitudeFFT/float(dataLenghtFFT)
# scale by the number of points so that
# the magnitude does not depend on the length
# of the signal
amplitudeScaledRMSFFT = amplitudeFFT/float(dataLenghtFFT)/math.sqrt(2)
# Scaling to Root mean square amplitude (dataLenghtFFT/sqrt{2}),
#############################################################################
# Plot the results
#############################################################################
xFFT = np.linspace(0,dataLenghtFFT+1,dataLenghtFFT+1)
#the range is two times smaller +1 for RFFT
#sinus signal without noise used for fit
plt.figure("FFT amplitude and phase coefficients")
plt.subplot(2,1,1)
plt.vlines(xFFT,0,amplitudeScaledFFT)
plt.title("FFT amplitude coefficients")
plt.xlabel("Harmonics")
plt.ylabel("Amplitude [V]")
plt.xlim(0,dataLenghtFFT/2+1) #adjuts the x axis to maximum of numberOfPoints
plt.grid(True)
plt.subplot(2,1,2)
plt.vlines(xFFT,0,phaseDegreesFFT)
plt.title("FFT phase coefficients")
plt.xlabel("Harmonics")
plt.ylabel("Phase [deg]")
plt.tight_layout() #removes the overlapping of the labels in subplots
plt.xlim(0,dataLenghtFFT+1)
plt.grid(True)
##############################################################################
##Moving average
##############################################################################
plt.figure('LevelledData with moving average ')
yLevelledMA = np.convolve(yLevelled, np.ones(10)/10)
plt.plot(yLevelled)
plt.hold(True)
plt.plot(yLevelledMA)
plt.title('Filtered levelled data plot')
plt.xlabel('Sample []')
plt.ylabel('Micrometer [um]')
plt.grid(True)
##orizontal line
diffMA = np.convolve(d, np.ones(10)/10)
dataLenghtDiff = len(d)
dataLenghtDiffMA = len(diffMA)
xLine = np.linspace(0,dataLenghtDiffMA,dataLenghtDiffMA)
yLine = np.linspace(0.05,0.05,dataLenghtDiffMA)
plt.figure('Derivative with moving average')
plt.plot(d)
plt.hold(True)
plt.plot(diffMA)
plt.plot(yLine)
plt.title('Derivative with moving average')
plt.xlabel('Sample []')
plt.ylabel('Micrometer [um]')
plt.grid(True)
print dataLenghtDiff
print dataLenghtDiffMA
#thresholded = np.array(diffMA)
#x = np.where(thresholded == 0.05)[0]
#print x
#plt.figure('Derivative with moving average thresholded')
#plt.plot(thresholded)
#plt.title('Derivative with moving average')
#plt.xlabel('Sample []')
#plt.ylabel('Micrometer [um]')
#plt.grid(True)
#
#itemindex = np.where(diffMA > 0.05 and diffMA < 0.051)
plt.show() |
4,541 | 67509ce426fd572b22d5059d98e5439e87cdc591 | '''
@author: Victor Barrera Burgos
Created on 09 Feb 2014
Description: This script permits the obtention of the
methylation profile of a CpGRegion indicating the
methylation status of each CpG dinucleotide.
Addition on 02 March 2014
Description: permits the obtention of the
methylation profile of the whole genome using methylationMap.
'''
# Imports
import sys
import pysam
import re
# Defining types
# Structures from the first level of abstraction
class CpGRegion:
def __init__(self,id,chrom,start,end,sequence):
self.id=id
self.chrom=chrom
self.start=start
self.end=end
self.sequence=sequence
self.methCoef=-1
self.nCG=0
self.cpgList=[]
# Structures from the second level of abstraction
class CpGdinucleotide:
def __init__(self,chrom,firstPos,secondPos):
self.chrom=chrom
self.firstPos=firstPos
self.secondPos=secondPos
self.firstPosMethCoef=-1.0
self.secondPosMethCoef=-1.0
self.meanMethCoef=-1.0
# Defining functions
# Functions from the first level of abstraction
def methylationMap(cgR,filter):
# Pre: The function receives an object from the class CpGRegion and a filter value
upSequence=(cgR.sequence).upper()
# Look for CG positions
starts = [match.start() for match in re.finditer('CG',upSequence)]
for i in starts:
cpgDin=CpGdinucleotide(cgR.chrom,int(i)+cgR.start-1,int(i)+cgR.start)
# Call the methCG function
methCG(cpgDin,filter)
cgR.nCG=cgR.nCG+1
(cgR.cpgList).append(cpgDin)
cgRPositions=""
for j in cgR.cpgList:
if (j.meanMethCoef>=0):
if (j.meanMethCoef<=0.2):
cgRPositions=cgRPositions+str(j.firstPos)+";"+"0"+"\n"
elif (j.meanMethCoef>=0.8):
cgRPositions=cgRPositions+str(j.firstPos)+";"+"1"+"\n"
else:
cgRPositions=cgRPositions+str(j.firstPos)+";"+"X"+"\n"
else:
cgRPositions=cgRPositions+str(j.firstPos)+";"+"N"+"\n"
print "%s" % (cgRPositions)
# Post:
def methylationProfile(cgR,filter):
# Pre: The function receives an object from the class CpGRegion and a filter value
upSequence=(cgR.sequence).upper()
# Look for CG positions
starts = [match.start() for match in re.finditer('CG',upSequence)]
for i in starts:
cpgDin=CpGdinucleotide(cgR.chrom,int(i)+cgR.start-1,int(i)+cgR.start)
# Call the methCG function
methCG(cpgDin,filter)
cgR.nCG=cgR.nCG+1
(cgR.cpgList).append(cpgDin)
# Generate the profile using the code (0,1,X,N)
# 0 For unmethylated, 1 for methylated
# X for intermediate methylation, N for not informative
cgRProf=""
infCpG=0
cgRAcumMethCoef=0
for j in cgR.cpgList:
if (j.meanMethCoef>=0):
infCpG=infCpG+1
cgRAcumMethCoef=cgRAcumMethCoef+j.meanMethCoef
if (j.meanMethCoef<=0.2):
cgRProf=cgRProf+"0"
elif (j.meanMethCoef>=0.8):
cgRProf=cgRProf+"1"
else:
cgRProf=cgRProf+"X"
else:
cgRProf=cgRProf+"N"
if (infCpG>0):
cgR.methCoef=cgRAcumMethCoef/infCpG
print "%s;%s;%i;%i;%i;%i;%f;%s" % (cgR.id,cgR.chrom,cgR.start,cgR.end,cgR.nCG,infCpG,cgR.methCoef,cgRProf)
# Post: The id, chrom, start, end, total number of CG, number of informative CpG
# and a ternary profile for each of its CpG corresponding to the CpGRegion object
# have been printed
# Functions from the second level of abstraction
def methCG(cpgDin,filter):
# Pre: The function receives an object from the class CpGdinucleotide and a filter value
seq=""
for pileupcolumn in samfile.pileup(cpgDin.chrom,cpgDin.firstPos,cpgDin.firstPos+1):
if not (pileupcolumn.pos==cpgDin.firstPos and pileupcolumn.n>=filter):
continue
for pileupread in pileupcolumn.pileups:
seq+=pileupread.alignment.seq[pileupread.qpos]
seq=seq.upper()
numA=int(seq.count("A"))
numT=int(seq.count("T"))
numC=int(seq.count("C"))
numG=int(seq.count("G"))
reads=numA+numT+numC+numG
if ((numT+numC)>=filter):
cpgDin.firstPosMethCoef=(float(numC)/float(numC+numT))
seq=""
for pileupcolumn in samfile.pileup(cpgDin.chrom,cpgDin.secondPos,cpgDin.secondPos+1):
if not (pileupcolumn.pos==cpgDin.secondPos and pileupcolumn.n>=filter):
continue
for pileupread in pileupcolumn.pileups:
seq+=pileupread.alignment.seq[pileupread.qpos]
seq=seq.upper()
numA=int(seq.count("A"))
numT=int(seq.count("T"))
numC=int(seq.count("C"))
numG=int(seq.count("G"))
reads=numA+numT+numC+numG
if ((numT+numC)>=filter):
cpgDin.secondPosMethCoef=(float(numC)/float(numC+numT))
if (((cpgDin.firstPosMethCoef)!=-1) & ((cpgDin.secondPosMethCoef)==-1)):
cpgDin.meanMethCoef=cpgDin.firstPosMethCoef
elif (((cpgDin.firstPosMethCoef)==-1) & ((cpgDin.secondPosMethCoef)!=-1)):
cpgDin.meanMethCoef=cpgDin.secondPosMethCoef
else:
cpgDin.meanMethCoef=float(cpgDin.firstPosMethCoef+cpgDin.secondPosMethCoef)/2.0
# Post: The object is returned with its methylation Coefficients recalculated according
# to the data present in the alignment file and using a minimum read filter.
####################
### Main ###########
####################
# Obtain the files
cpgr_sec_path=sys.argv[1]
sam_path=sys.argv[2]
filter=int(sys.argv[3])
# Load the files
cpgr_sec_file=open(cpgr_sec_path,'r')
samfile = pysam.Samfile(sam_path, "rb" )
for cpgr in cpgr_sec_file:
cgRTuple=cpgr.split()
cgR=CpGRegion(cgRTuple[0],str(cgRTuple[1]),int(cgRTuple[2]),int(cgRTuple[3]),str(cgRTuple[4]))
# We can use methylationMap or methylationProfile
methylationMap(cgR,filter)
|
4,542 | f7c6990b4ddbe5ef9d79ef2326e60cdf1f761db3 | #python -m marbles test_clean_rangos.py
import unittest
from marbles.mixins import mixins
import pandas as pd
import requests
from pyspark.sql import SparkSession
import psycopg2 as pg
import pandas as pd
from pyspark.sql.types import StructType, StructField, StringType
from src.features.build_features import get_clean_data
class Test_Ranges_Case(unittest.TestCase, mixins.CategoricalMixins):
'''
Verifica que los valores de la columna rangoatrasohoras
sean los indicados
'''
def test_that_all_ranges_are_present(self):
df = get_clean_data()
RANGOS=['cancelled', '0-1.5', '1.5-3.5' ,'3.5-']
self.assertCategoricalLevelsEqual(list(df.toPandas()["rangoatrasohoras"].unique()), RANGOS)
|
4,543 | da2c615b8fab8de6bd63864508da254a46e65bb8 | import proactive
import unittest
import numbers
import os
import pytest
class RestApiTestSuite(unittest.TestCase):
"""Advanced test cases."""
gateway = None
username = ""
password = ""
@pytest.fixture(autouse=True)
def setup_gateway(self, metadata):
self.gateway = proactive.ProActiveGateway(metadata['proactive_url'], debug=True)
self.username = metadata['username']
self.password = metadata['password']
def test_rm_model_hosts(self):
self.gateway.connect(self.username, self.password)
restapi = self.gateway.getProactiveRestApi()
hosts = restapi.get_rm_model_hosts()
self.assertIsNotNone(hosts)
self.assertTrue(isinstance(hosts, list))
self.gateway.disconnect()
def test_rm_model_nodesources(self):
self.gateway.connect(self.username, self.password)
restapi = self.gateway.getProactiveRestApi()
nodesources = restapi.get_rm_model_nodesources()
self.assertIsNotNone(nodesources)
self.assertTrue(isinstance(nodesources, list))
self.gateway.disconnect()
def test_rm_model_tokens(self):
self.gateway.connect(self.username, self.password)
restapi = self.gateway.getProactiveRestApi()
tokens = restapi.get_rm_model_tokens()
self.assertIsNotNone(tokens)
self.assertTrue(isinstance(tokens, list))
self.gateway.disconnect()
if __name__ == '__main__':
unittest.main()
|
4,544 | f86d01c4b980ac44dcdb1b0008493e1dbda25971 | from bacalhau.tei_document import TEIDocument
import nltk
import unittest
class TestDocument(unittest.TestCase):
def setUp(self):
self.filepath = 'tests/corpus/a.xml'
self.doc = TEIDocument(self.filepath,
nltk.tokenize.regexp.WordPunctTokenizer(),
nltk.corpus.stopwords.words('english'),
'//tei:body/tei:div[@type = "dummy"]')
def test_get_text_count(self):
self.assertEqual(2, self.doc.get_text_count())
def test_get_texts(self):
texts = self.doc.get_texts()
self.assertEqual(2, len(texts))
def test_get_term_data(self):
term_data = self.doc.get_term_data()
self.assertIsNotNone(term_data)
if __name__ == '__main__':
unittest.main()
|
4,545 | 8bd918896fb72c89a622ba4e18666bb90755cafd | import abc
import hashlib
import hmac
from typing import Any, Dict
from urllib.parse import urlencode
class IceCubedClientABC(abc.ABC):
@abc.abstractproperty
def _has_auth_details(self) -> bool:
pass
@abc.abstractmethod
def sign(self, params: Dict[str, Any]) -> str:
pass
class IceCubedClientBase(IceCubedClientABC):
BASE_URI = "https://ice3x.com/api/v1/"
def __init__(self, api_key: str = None, secret: str = None) -> None:
"""Instantiate the client
Args:
api_key: An ICE3X public API key
secret: An ICE3X private API key
"""
self.api_key = api_key
self.secret = secret
@property
def _has_auth_details(self) -> bool:
"""Internal helper function which checks that an API key and secret have been provided"""
return all([self.secret is not None, self.api_key is not None])
def sign(self, params: Dict[str, Any]) -> str:
"""Sign a dict of query params for private API calls
Args:
params: A dict of query params
Returns:
A sha512 signed payload
"""
assert self.secret is not None, "A client secret is required to sign requests."
query = urlencode(params)
signature = hmac.new(self.secret.encode(), query.encode(), hashlib.sha512)
return signature.hexdigest()
|
4,546 | 63360ec9693a916375b49d0881008b1d7d4ec953 | from function import *
from .propogation import optimize
from .initialize import initialize_with_zeros
def predict(weight, intercept, x_vector):
"""
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
"""
m = x_vector.shape[1]
y_prediction = np.zeros((1, m))
weight = weight.reshape(x_vector.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
yhat = sigmoid(np.dot(weight.T, x_vector) + intercept)
for i in range(yhat.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
if yhat[0][i] > 0.5:
y_prediction[0][i] = 1
else:
y_prediction[0][i] = 0
assert (y_prediction.shape == (1, m))
return y_prediction
class Logistic(object):
"""
This class provides the flexibility to run
logistic regression to your data set
"""
def __init__(self, *args, **kwargs):
"""
Initializing the model parameter
:param args:
:param kwargs:
X_train,
Y_train,
X_test,
Y_test,
num_iterations = 2000,
learning_rate = 0.5
"""
# Initializing the test & training set
self._x_train = kwargs['X_train']
self._y_train = kwargs['Y_train']
self._x_test = kwargs['X_test']
self._y_test = kwargs['Y_test']
self.num_iteration = kwargs['num_iteration']
self.learning_rate = kwargs['learning_rate']
def fit(self):
"""
function will fit the model with initialized parameter
:return:
costs,
y_prediction_test,
y_prediction_train,
weight,
intercept,
self.learning_rate,
self.num_iteration
"""
# initialize parameters with zeros (≈ 1 line of code)
weight, intercept = initialize_with_zeros(self._x_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(weight,
intercept,
self._x_train,
self._y_train,
self.num_iteration,
self.learning_rate
)
# Retrieve parameters w and b from dictionary "parameters"
weight = parameters["w"]
intercept = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
y_prediction_test = predict(weight, intercept, self._x_test)
y_prediction_train = predict(weight, intercept, self._x_train)
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(y_prediction_train - self._y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(y_prediction_test - self._x_test)) * 100))
return {"costs": costs,
"Y_prediction_test": y_prediction_test,
"Y_prediction_train": y_prediction_train,
"w": weight,
"b": intercept,
"learning_rate": self.learning_rate,
"num_iterations": self.num_iteration}
|
4,547 | 897075810912e8360aa5cdedda3f12ce7c868263 | from PIL import Image, ImageStat
import os
import shutil
# full white photo - 255.0
# full black photo - 0.0
class ImageSelection:
def __init__(self, path):
self.path = path
def brightness_check(self, image):
'''count function to set value of brightness, 0 - full black, 100 - full bright'''
with Image.open(image).convert("L") as img:
z = ImageStat.Stat(img)
stat = 100*(255-z.mean[0])/255
return int(stat)
def averange_threshold(self, dictionary, img_list):
'''counts thrshold which is RMS of all images value'''
sum = 0
for value in dictionary.values():
sum += value
return int(sum/len(img_list))
def image_analysis(self):
'''execution of class, creates two folders (bright, dark) in path
to images name is added theior value of brightness'''
img_list = os.listdir(self.path)
img_list = [os.path.join(self.path,elem) for elem in img_list]
extend_set = {".png", ".jpeg", ".jpg"}
dictionary = {os.path.basename(img): ImageSelection.brightness_check(self, img) for ext in extend_set for img in img_list if ext in img}
threshold = ImageSelection.averange_threshold(self, dictionary, img_list)
for key, value in dictionary.items():
if value < threshold:
os.makedirs(os.path.join(self.path, "bright"), exist_ok=True)
shutil.copy(os.path.join(self.path,key), os.path.join(self.path, "bright", key[:key.index(".")] + "_" + str(value) + key[key.index("."):]))
else:
os.makedirs(os.path.join(self.path, "dark"), exist_ok=True)
shutil.copy(os.path.join(self.path,key), os.path.join(self.path, "dark", key[:key.index(".")] + "_" + str(value) + key[key.index("."):]))
path = r"D:\Programy\z.programowanie\learning\to be sorted"
a = ImageSelection(path)
a.image_analysis()
|
4,548 | 235fce2615e2a5879f455aac9bcecbc2d152679b | from collections import Counter
class Solution:
def countStudents(self, students, sandwiches) -> int:
if not students or not sandwiches:
return 0
while students:
top_san = sandwiches[0]
if top_san == students[0]:
students = students[1:]
sandwiches = sandwiches[1:]
else:
if top_san not in students:
break
idx = students.index(top_san)
students = students[idx:] + students[:idx]
return len(students)
def countStudents_2(self, students, sandwiches) -> int:
prefers = Counter(students)
n, k = len(students), 0
while k < n and prefers[sandwiches[k]]:
prefers[sandwiches[k]] -= 1
k += 1
return n - k
s = Solution()
s.countStudents([1,1,0,0], [0,1,0,1])
|
4,549 | 10c8316aee2107dc84ce7c1427dd62f52a2ce697 | import os
import numpy as np
import scipy as sp
import sys
from sure import that
from itertools import combinations, permutations
input_file = open('input1.txt', 'r')
output_file = open('output1.txt', 'w')
T = int(input_file.readline().rstrip('\n'))
case_num = 1
while case_num - 1 < T:
# Parse data
data = map(int, input_file.readline().rstrip('\n').split(' '))
typed = data[0]
length = data[1]
probs = map(float, input_file.readline().rstrip('\n').split(' '))
assert that(len(probs)).equals(typed)
enter = 1
def product(probs):
if not probs:
return 1
return reduce(lambda x, y: x * y, probs)
def expected_strokes(typed, length):
finish = length - typed + enter
retype = finish + length + enter
correct = product(probs[:typed])
strokes = correct * finish + (1 - correct) * retype
return strokes
def get_min_backspace_stroke_count(typed, length):
min_strokes = 99999999999999
for backspaces in range(typed + 1):
min_strokes = min(backspaces + expected_strokes(typed - backspaces, length), min_strokes)
return min_strokes
result = min(length + 2, get_min_backspace_stroke_count(typed, length))
# Write result
output_file.write('Case #{}: {}\n'.format(case_num, result))
case_num += 1
|
4,550 | a8190c7c8926df18ee9439922ce8e3241e9a6140 | n=int(input("enter a number"))
cp=n
rev=0
sum=0
while(n>0):
rev=n%10
sum+=rev**3
n=n//10
if(cp==sum):
print("the given no is amstrong ")
else:
print("the given no is not amstrong ") |
4,551 | a4b61a5a79e314e56ba25c6e2e735bd2ee4ef0d3 | # Generated by Django 2.2.3 on 2019-07-14 13:34
from django.db import migrations, models
def forwards_func(apps, schema_editor):
""" Add Theater Rooms """
TheaterRoom = apps.get_model("main", "TheaterRoom")
db_alias = schema_editor.connection.alias
TheaterRoom.objects.using(db_alias).bulk_create([
TheaterRoom(name="Red Room", rows_count=10, seats_per_row_count=15),
TheaterRoom(name="Blue Room", rows_count=20, seats_per_row_count=30),
])
def reverse_func(apps, schema_editor):
""" No need to do anything since the table is dropped completely """
pass
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TheaterRoom',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('rows_count', models.IntegerField()),
('seats_per_row_count', models.IntegerField()),
],
),
migrations.RunPython(forwards_func, reverse_func),
]
|
4,552 | 3630f83e7e6a10f42e96f8bd6fa9714232d9176b | import os
import time
import pickle
from configparser import ConfigParser
from slackbot import bot
from slackbot.bot import Bot
from slackbot.bot import listen_to
from elasticsearch_dsl.connections import connections
from okcom_tokenizer.tokenizers import CCEmojiJieba, UniGram
from marginalbear_elastic.query import post_multifield_query
from marginalbear_elastic.utils import concat_tokens
from marginalbear_elastic.ranking import avg_pmi
top_title = 100
top_response = 15
package_dir = os.path.dirname(os.path.realpath(__name__))
config = ConfigParser()
config.read(package_dir + '/chatbot_apps/config.ini')
bot.settings.API_TOKEN = config.get('slack', 'slack_token')
SLACK_CHANNEL = config.get('slack', 'slack_channel')
@listen_to(r'(.*)')
def receive_question(message, question_string):
if message._body['channel'] == SLACK_CHANNEL:
try:
query_ccjieba = ccjieba.cut(question_string.strip())
query_unigram = unigram.cut(question_string.strip())
results = post_multifield_query(client,
index='post',
query_ccjieba=concat_tokens(query_ccjieba, pos=False),
query_unigram=concat_tokens(query_unigram, pos=False),
top=top_title)
ans = avg_pmi(query_unigram, results, pairs_cnt, total_pairs_cnt, tokenizer='unigram')
ans_string = '\n'.join(['<{:.3f}> <title:{}> comment: {}'.format(score, title, comment) for score, comment, title in ans[:top_response]])
message.send(ans_string)
except Exception as err:
print(err)
def main():
bot = Bot()
bot.run()
if __name__ == '__main__':
client = connections.create_connection()
ccjieba = CCEmojiJieba()
unigram = UniGram()
t = time.time()
print('Loading unigram pmi pickle')
with open(package_dir + '/data/pmi_pickle/pmi_unigram.pickle', 'rb') as f:
pairs_cnt = dict(pickle.load(f))
total_pairs_cnt = sum(pairs_cnt.values())
print('Pickle loaded in {:.5f}s'.format(time.time() - t))
main()
|
4,553 | b1a808e76008edec02d37ec596461e3a00a1d349 | from flask_wtf import FlaskForm
from wtforms import StringField, DateField, DecimalField
class HoursForm(FlaskForm):
date = StringField("Date")
begins = DecimalField("Begins")
ends = DecimalField("Ends")
class Meta:
csrf = False
|
4,554 | 6420d1b9da7ff205e1e138f72b194f63d1011012 | import unittest
from .context import *
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_hello_world(self):
self.assertEqual(hello_world(), 'hello world')
if __name__ == '__main__':
unittest.main()
|
4,555 | 13451352e8dcdfe64771f9fc188b13a31b8109f5 | import giraffe.configuration.common_testing_artifactrs as commons
from giraffe.business_logic.ingestion_manger import IngestionManager
from redis import Redis
def test_parse_redis_key(config_helper, ingestion_manager):
im = ingestion_manager
job_name = config_helper.nodes_ingestion_operation
operation = config_helper.nodes_ingestion_operation
labels = config_helper.test_labels
parsed: IngestionManager.key_elements_type = im.parse_redis_key(
key=f'{job_name}{config_helper.key_separator}{operation}{config_helper.key_separator}{",".join(labels)}')
assert parsed.job_name == job_name
assert parsed.operation == operation
assert set(parsed.arguments) == set(labels)
def test_publish_job(config_helper, redis_driver, ingestion_manager, nodes, edges, logger, redis_db):
r: Redis = redis_driver
im: IngestionManager = ingestion_manager
commons.purge_redis_database(redis_db=redis_db, log=logger)
# Populate nodes
im.publish_job(job_name=config_helper.test_job_name,
operation=config_helper.nodes_ingestion_operation,
operation_arguments=','.join(config_helper.test_labels),
items=[str(value) for value in nodes])
# Populate edges
im.publish_job(job_name=config_helper.test_job_name,
operation=config_helper.edges_ingestion_operation,
operation_arguments=f'{config_helper.test_edge_type},{config_helper.test_labels[0]}',
items=[str(value) for value in edges])
keys = r.keys(pattern=f'{config_helper.test_job_name}*')
assert len(keys) == 2
node_keys = r.keys(pattern=f'{config_helper.test_job_name}{config_helper.key_separator}{config_helper.nodes_ingestion_operation}{config_helper.key_separator}*')
assert len(node_keys) == 1
edges_keys = r.keys(pattern=f'{config_helper.test_job_name}{config_helper.key_separator}{config_helper.edges_ingestion_operation}{config_helper.key_separator}*')
assert len(edges_keys) == 1
nodes_key = node_keys[0]
edges_key = edges_keys[0]
num_stored_nodes = r.scard(name=nodes_key)
assert num_stored_nodes == len(nodes)
num_stored_edges = r.scard(name=edges_key)
assert num_stored_edges == len(edges)
def test_process_job(config_helper, ingestion_manager, redis_db, logger, neo):
commons.purge_redis_database(redis_db=redis_db, log=logger)
commons.purge_neo4j_database(log=logger, neo=neo)
commons.init_redis_test_data(im=ingestion_manager)
im = ingestion_manager
im.process_redis_content(translation_id=config_helper.test_job_name, request_id='unit-testing')
query = f'MATCH (:{config_helper.test_labels[0]}) RETURN COUNT(*) AS count'
count = neo.pull_query(query=query).value()[0]
assert count == config_helper.number_of_test_nodes
query = f'MATCH ()-[:{config_helper.test_edge_type}]->() RETURN COUNT(*) AS count'
count = neo.pull_query(query=query).value()[0]
assert count == config_helper.number_of_test_edges
|
4,556 | 7ac53779a98b6e4b236b1e81742163d2c610a274 | __author__ = 'samar'
import mv_details
import product
|
4,557 | 0457ac2ecd0a951b0088c887539ab696797d68bc | import os
from datetime import datetime, timedelta
from django.shortcuts import render
from django.utils.decorators import method_decorator
from rest_framework.viewsets import GenericViewSet, mixins
from common.jwt_util import generate_jwt
from .serializers import ApiUser, ApiUserSerializer, UserSerializer
from common.myresponse import StatusResponse
from rest_framework.generics import GenericAPIView
from .models import User
from common.utils.login_util import login_decorator
# Create your views here.
@method_decorator(login_decorator,name="list")
class UsersOptionsView(GenericViewSet, mixins.ListModelMixin):
"""
list:
返回用户列表
"""
serializer_class = ApiUserSerializer
queryset = User.objects.filter(is_superuser=0,is_active=1).all()
def list(self, request, *args, **kwargs):
return StatusResponse(data=super().list(request).data)
class UserLoginView(GenericAPIView):
def _generate_tokens(self, user_id, with_refresh_token=True):
"""
生成token 和refresh_token
:param user_id: 用户id
:return: token, refresh_token
"""
# 颁发JWT
now = datetime.utcnow()
expiry = now + timedelta(hours=float(os.environ['JWT_EXPIRY_HOURS']))
token = generate_jwt({'user_id': user_id, 'refresh': False}, expiry)
refresh_token = None
if with_refresh_token:
refresh_expiry = now + timedelta(days=float(os.environ['JWT_REFRESH_DAYS']))
refresh_token = generate_jwt({'user_id': user_id, 'refresh': True}, refresh_expiry)
return token, refresh_token
def post(self, request):
username = request.data.get("username")
password = request.data.get("password")
try:
user = User.objects.get(username=username)
except User.DoesNotExist as e:
return StatusResponse(http_code=400,data={"tip": "用户不存在"})
if user is not None and user.check_password(password):
token, refresh_token = self._generate_tokens(user.id, with_refresh_token=True)
data = {
"token": token,
"refresh_token": refresh_token,
"id": user.id,
"username": user.username
}
response = StatusResponse(data=data, http_code=201)
return response
return StatusResponse(http_code=400,data={"tip":"登录失败"})
def put(self,request):
if request.user_id and request.refresh:
token,refresh_token = self._generate_tokens(request.user_id, with_refresh_token=False)
data ={
"id":request.user_id,
"token":token
}
response = StatusResponse(data=data, http_code=201)
return response
else:
return StatusResponse(http_code=401, data={"tip": "token刷新失败"})
class UserRegistView(GenericAPIView):
serializer_class = UserSerializer
def post(self, request):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return StatusResponse(http_code=200,data=serializer.data)
|
4,558 | 4d05e65dce9f689ae533a57466bc75fa24db7b4d | from tkinter import *
import re
class Molecule:
def __init__(self, nom, poids, adn):
self.nom = nom
self.poids = poids
self.adn = adn
def __repr__(self):
return "{} : {} g".format(self.nom, self.poids)
class Menu:
def __init__(self):
self.data = dict()
self.main = Tk()
self.main.title("Molécules")
self.main.config(bg="black")
self.main.minsize(210, 220)
self.mean = float
Button(self.main, width=14, bg="black", fg="white", text='Ajouter molécule', command=self.add_molecule).grid(
pady=10)
Button(self.main, width=14, bg="black", fg="white", text='Poids maximum', command=self.get_max).grid()
Button(self.main, width=14, bg="black", fg="white", text='Poids moyen', command=self.get_mean).grid(pady=10)
Button(self.main, bg="black", fg="white", text='Molécules au poids supérieur\nà la moyenne',
command=self.greater_than_mean).grid(padx=10)
self.io = Frame(self.main, bg="black")
Button(self.io, bg="black", fg="white", text='Importer', command=self.import_data).grid(row=1, column=1, padx=5)
Button(self.io, bg="black", fg="white", text='Exporter', command=self.export_data).grid(row=1, column=2, padx=5)
self.io.grid(pady=10)
self.dessein = Canvas(self.main, width=500, height=500)
self.y = 45
self.y2 = 50
self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155, fill="deeppink2", outline="")
self.right = self.dessein.create_oval(225, self.y, 300, self.y + 155, fill="deeppink2", outline="")
self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2, 300, 400, 200, 400, fill="salmon1")
self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.y2, 300, 400, 275, 400, fill="salmon2")
self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.y2, 300, 400, 290, 400, fill="salmon3")
self.giggle = True
self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill="salmon1", outline="")
self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill="salmon1", outline="")
self.main.bind("<Down>", self.grow_penis)
self.dessein.grid(pady=10)
Button(self.main, width=14, bg="black", fg="white", text='Enlarge your penis !!!',
command=self.grow_penis).grid()
self.main.mainloop()
def grow_penis(self, event=None):
if self.y >= 0:
self.y -= 2
if self.y2 <= 75:
self.y2 += 1
self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)
self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)
self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, 400, 200, 400)
self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, 400, 275, 400)
self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, 400, 290, 400)
if self.giggle:
self.giggle = False
self.dessein.coords(self.ball_left, 275, 350, 100, 450)
self.dessein.coords(self.ball_right, 225, 350, 400, 450)
else:
self.giggle = True
self.dessein.coords(self.ball_left, 275, 345, 100, 445)
self.dessein.coords(self.ball_right, 225, 345, 400, 445)
def add_molecule(self):
GUIAdd(self)
def get_max(self):
GUIMax(self)
def get_mean(self):
GUIMean(self)
def greater_than_mean(self):
GUIGtm(self)
def calc_mean(self):
self.mean = sum([x['poids'] for x in self.data.values()]) / len(self.data.values())
def import_data(self):
with open('mols.txt', 'r') as input_file:
input_txt = input_file.readlines()
liste_name = input_txt[0].split()
liste_weight = [float(x) for x in input_txt[1].split()]
liste_adn = input_txt[2].split()
for i in range(len(liste_name)):
self.data[liste_name[i]] = {'poids': liste_weight[i], 'ADN': liste_adn[i]}
def export_data(self):
if len(self.data) > 0:
with open('mols.txt', 'w') as output:
valeurs = self.data.values()
liste_weight = [x['poids'] for x in valeurs]
liste_adn = [x['ADN'] for x in valeurs]
output.write(' '.join(self.data.keys()) + '\n')
output.write(' '.join([str(x) for x in liste_weight]) + '\n')
output.write(' '.join(liste_adn))
class GUIAdd:
def __init__(self, menu: Menu):
self.root = menu
self.gui = Toplevel(menu.main)
self.gui.title('Ajout de molécule')
self.gui.minsize(210, 100)
Label(self.gui, text='Nom de la molécule').pack()
self.mole_nom = Entry(self.gui)
self.mole_nom.pack()
Label(self.gui, text='Poids de la molécule').pack()
self.mole_poids = Entry(self.gui)
self.mole_poids.pack()
Label(self.gui, text='ADN de la molécule').pack()
self.mole_adn = Entry(self.gui)
self.mole_adn.pack()
Button(self.gui, text='Ajouter', command=self.close_gui).pack()
self.error = Label(self.gui, text="")
self.error.pack()
self.gui.mainloop()
def close_gui(self):
try:
if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()) > 0 and len(self.mole_adn.get()) > 0:
if self.mole_nom.get() not in self.root.data.keys():
if not re.search(r'[^ACGT]', self.mole_adn.get()):
self.root.data[self.mole_nom.get()] = {'poids': float(self.mole_poids.get()),
'ADN': self.mole_adn.get()}
else:
self.error['text'] = "Séquence d'ADN non réglementaire"
return
else:
self.error['text'] = "Molecule déjà existante dans les données"
return
else:
self.error['text'] = "Tous les champs ne sont pas remplis"
return
except ValueError:
self.error['text'] = "Poids doit être un float ou un int"
return
self.gui.destroy()
class GUIMax:
def __init__(self, menu: Menu):
maxi = 0
max_list = []
self.gui = Toplevel(menu.main)
self.gui.title('Molécule au poids maximal')
self.gui.minsize(210, 100)
for mol in menu.data:
if menu.data[mol]['poids'] > maxi:
maxi = menu.data[mol]['poids']
max_list = [mol]
elif menu.data[mol]['poids'] == maxi:
max_list.append(mol)
for mol in max_list:
Label(self.gui, text="{} : {} g".format(mol, menu.data[mol]["poids"])).pack()
self.gui.mainloop()
class GUIMean:
def __init__(self, menu: Menu):
self.gui = Toplevel(menu.main)
self.gui.title('Poids moyen')
self.gui.minsize(210, 100)
menu.calc_mean()
Label(self.gui, text="Poids moyen des molécules").pack()
Label(self.gui, text=menu.mean).pack()
self.gui.mainloop()
class GUIGtm:
def __init__(self, menu: Menu):
menu.calc_mean()
self.gui = Toplevel(menu.main)
self.gui.title('Molécule au poids supérieur à la moyenne')
self.gui.minsize(210, 100)
for mol in menu.data.keys():
if menu.data[mol]['poids'] >= menu.mean:
Label(self.gui, text="{} : {} g".format(mol, menu.data[mol]["poids"])).pack()
self.gui.mainloop()
def pascal(n: int):
prec = [1]
for i in range(1, n + 2):
print(' '.join([str(x) for x in prec]))
new = []
for j in range(i + 1):
if j == 0 or j == i:
new.append(1)
else:
new.append(prec[j] + prec[j - 1])
prec = new
Menu()
# pascal(50)
|
4,559 | 8afce5b47c7c9c67a8be493f7f4de1510352b1c7 | from django.db import models
class TestModel(models.Model):
name = models.CharField(max_length=15)
surname = models.CharField(max_length=10)
age = models.IntegerField()
class Example(models.Model):
integer_field = models.IntegerField()
positive_field = models.PositiveIntegerField()
positive_small_field = models.PositiveSmallIntegerField()
big_integer_field = models.BigIntegerField()
float_field = models.FloatField()
binary_field = models.BinaryField()
boolean_field = models.BooleanField()
char_field = models.CharField(max_length=5)
text_field = models.TextField(max_length=20)
date_field = models.DateField(auto_now=False, auto_now_add=False)
date_time_field = models.DateTimeField(auto_now_add=False)
decimal_field = models.DecimalField(max_digits=8, decimal_places=2) #222222.22
email = models.EmailField()
file_field = models.FileField(upload_to='file')
image_field = models.ImageField(upload_to='images')
class Author(models.Model):
name = models.CharField(max_length=50, verbose_name="Имя", blank=True)
surname = models.CharField(max_length=50, verbose_name="Фамилия")
date_birth = models.DateField(auto_now=False, verbose_name="Дата рождения")
def __str__(self):
return self.name + ' ' + self.surname
class Book(models.Model):
CHOISE_GENRE = (
('comedy', "Comedy"),
('tragedy', "Tragedy"),
('drama', "Drama"),
)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
title = models.CharField(max_length=50)
text = models.TextField(max_length=1000)
genre = models.CharField(max_length=50, choices=CHOISE_GENRE)
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return self.name
class Restaurant(models.Model):
place = models.OneToOneField(Place, on_delete=models.CASCADE, primary_key=True)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.place
class Publication(models.Model):
title = models.CharField(max_length=30)
# def __str__(self):
# return self.title
#
# class Meta:
# ordering = ('title', )
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
# def __str__(self):
# return self.headline
#
class Meta:
ordering = ('headline', )
|
4,560 | 9c653719ea511d78de9ddcc19442d9f9f7dc11dc | # -*- coding: utf-8 -*-
import pickle
import pathlib
from pathlib import Path
from typing import List, Tuple, Dict
import numpy as np
import torch
import torch.nn as nn
from torch.optim import SGD, Adam
from torch.utils.data import Dataset, DataLoader
from torchtext.data import get_tokenizer
from matplotlib import pyplot as plt
"""### **Preprocesare**"""
def read_data(directory):
ids = []
texts = []
labels = []
for f in directory.glob('*.txt'):
id = f.name.replace('article', '').replace('.txt', '')
ids.append(id)
texts.append(f.read_text('utf8'))
labels.append(parse_label(f.as_posix().replace('.txt', '.labels.tsv')))
# labels can be empty
return ids, texts, labels
def parse_label(label_path):
labels = []
f = Path(label_path)
if not f.exists():
return labels
for line in open(label_path):
parts = line.strip().split('\t')
labels.append([int(parts[2]), int(parts[3]), parts[1], 0, 0])
labels = sorted(labels)
if labels:
length = max([label[1] for label in labels])
visit = np.zeros(length)
res = []
for label in labels:
if sum(visit[label[0]:label[1]]):
label[3] = 1
else:
visit[label[0]:label[1]] = 1
res.append(label)
return res
else:
return labels
def clean_text(articles, ids):
texts = []
for article, id in zip(articles, ids):
sentences = article.split('\n')
end = -1
res = []
for sentence in sentences:
start = end + 1
end = start + len(sentence) # length of sequence
if sentence != "": # if not empty line
res.append([id, sentence, start, end])
texts.append(res)
return texts
def make_dataset(texts, lbls):
txt = []
lbl = []
for text, label in zip(texts, lbls):
for Text in text:
txt.append(Text[1])
k = 0
for l in label:
if Text[2] < l[0] < Text[3]:
lbl.append(1)
k = 1
break
elif Text[2] < l[1] < Text[3]:
lbl.append(1)
k = 1
break
if k == 0:
lbl.append(0)
return txt, lbl
directory = pathlib.Path('data/protechn_corpus_eval/train')
ids, texts,lbl = read_data(directory)
ids_train = ids
texts_train = texts
lbl_train = lbl
directory = pathlib.Path('data/protechn_corpus_eval/test')
ids_test, texts_test,lbl_test = read_data(directory)
directory = pathlib.Path('data/protechn_corpus_eval/dev')
ids_dev, texts_dev,lbl_dev = read_data(directory)
txt_train = clean_text(texts_train, ids_train)
txt_test = clean_text(texts_test, ids_test)
txt_dev =clean_text(texts_dev, ids_dev)
train_txt, train_lbl = make_dataset(txt_train, lbl_train)
test_txt, test_lbl = make_dataset(txt_test, lbl_test)
dev_txt, dev_lbl = make_dataset(txt_dev, lbl_dev)
pickle.dump([dev_txt,dev_lbl], open("savedata/dev.txt", "wb"))
pickle.dump([test_txt,test_lbl], open("savedata/test.txt", "wb"))
pickle.dump([train_txt,train_lbl], open("savedata/train.txt", "wb"))
train_txt, train_lbl = pickle.load(open("savedata/train.txt", "rb"))
test_txt, test_lbl = pickle.load(open("savedata/test.txt", "rb"))
dev_txt, dev_lbl = pickle.load(open("savedata/dev.txt", "rb"))
"""### **Dataset+ data_loader**"""
class Vocabulary:
"""
Helper class that maps words to unique indices and the other way around
"""
def __init__(self, tokens: List[str]):
# dictionary that maps words to indices
self.word_to_idx = {'<PAD>': 0}
for idx, tok in enumerate(tokens, 1):
self.word_to_idx[tok] = idx
# dictionary that maps indices to words
self.idx_to_word = {}
for tok, idx in self.word_to_idx.items():
self.idx_to_word[idx] = tok
def get_token_at_index(self, idx: int):
return self.idx_to_word[idx]
def get_index_of_token(self, token: str):
return self.word_to_idx[token]
def size(self):
return len(self.word_to_idx)
class PropagandaDataset(Dataset):
def __init__(self,
fold: str,
examples: List[str],
labels: List[int],
vocab: Vocabulary):
"""
:type vocab: object
:param fold: 'train'/'eval'/'test'
:param examples: List of sentences/paragraphs
:param labels: List of labels (1 if propaganda, 0 otherwise)
"""
self.fold = fold
self.examples = examples
self.labels = labels
self.vocab = vocab
def __getitem__(self, index: int) -> (torch.Tensor, torch.Tensor):
"""
This function converts an example to a Tensor containing the indices
:param index: position of example to be retrieved.
"""
# retrieve sentence and label (correct class index)
example, label = self.examples[index], self.labels[index]
# tokenize sentence into words and other symbols
tokenizer = get_tokenizer("spacy")
tokens = tokenizer(example)
# convert tokens to their corresponding indices, according to
# vocabulary
token_indices = []
for i in tokens:
token_indices.append(self.vocab.get_index_of_token(i))
return torch.LongTensor(token_indices), torch.LongTensor(label)
def __len__(self):
"""
Return the size of this dataset. This is given by the number
of sentences.
"""
return len(self.examples)
def collate_sentences(batch: List[Tuple]):
"""
This function converts a list of batch_size examples to
a Tensor of size batch_size x max_len
batch: [(example_1_tensor, example_1_label),
...
(example_batch_size_tensor, example_batch_size_label)]
"""
# fill this list with all the labels in the batch
batch_labels = []
# we need to find the maximum length of a sentence in this batch
max_len = 0
for i in batch:
if len(i[0]) > max_len:
max_len = len(i[0])
batch_size = len(batch)
# print('batch size',batch_size)
# initialize a Tensor filled with zeros (aka index of <PAD>)
batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)
# fill each row idx in batch_sentences with the corresponding
# sequence tensor
#
# ... batch_sentences[idx, ...] = ...
for idx in range(0, batch_size):
# print(idx)
# print(len(batch[idx][0]))
# print(len(batch_sentences[idx]))
batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]
print(batch[idx])
batch_labels.append(batch[idx][1])
# print(batch_sentences[idx])
print(type(batch_labels))
# batch_labels = [torch.LongTensor(x) for x in batch_labels]
batch_labels = torch.tensor(batch_labels)
# print(batch_labels)
return batch_sentences, batch_labels
def fill_vocab(txt: List[Tuple]):
tokenizer = get_tokenizer("spacy")
list_v = []
for i in txt:
tok = tokenizer(i)
for j in tok:
if list_v.count(j) == 0:
list_v.append(j)
vocab = Vocabulary(tokens=list_v)
return vocab
full_text = train_txt + dev_txt
vocab = fill_vocab(full_text)
test_vocab = fill_vocab(test_txt)
train_vocab = fill_vocab(train_txt)
dev_vocab = fill_vocab(dev_txt)
pickle.dump(dev_vocab, open("savedata/dev_vocab.txt", "wb"))
pickle.dump(test_vocab, open("savedata/test_vocab.txt", "wb"))
pickle.dump(train_vocab, open("savedata/train_vocab.txt", "wb"))
pickle.dump(vocab, open("savedata/vocab.txt", "wb"))
dev_vocab = pickle.load(open("savedata/dev_vocab.txt","rb"))
test_vocab = pickle.load(open("savedata/test_vocab.txt","rb"))
train_vocab = pickle.load(open("savedata/train_vocab.txt","rb"))
vocab = pickle.load(open("savedata/vocab.txt", "rb"))
dataset_train = PropagandaDataset('train', train_txt, train_lbl, train_vocab)
train_loader = DataLoader(dataset_train, batch_size=16, collate_fn=collate_sentences)
dataset_test = PropagandaDataset('train', test_txt, test_lbl, test_vocab)
test_loader = DataLoader(dataset_test, batch_size=16, collate_fn=collate_sentences)
dataset_dev = PropagandaDataset('train', dev_txt, dev_lbl, dev_vocab)
dev_loader = DataLoader(dataset_dev, batch_size=16, collate_fn=collate_sentences)
pickle.dump(train_loader, open("savedata/train_loaded.txt", "wb"))
pickle.dump(test_loader, open("savedata/test_loaded.txt", "wb"))
pickle.dump(dev_loader, open("savedata/dev_loaded.txt", "wb"))
train_loader = pickle.load(open("savedata/train_loaded.txt", "rb"))
test_loader = pickle.load(open("savedata/test_loaded.txt", "rb"))
dev_loader = pickle.load(open("savedata/dev_loaded.txt", "rb"))
"""### model"""
############################## PARAMETERS ######################################
_hyperparameters_dict = {
"batch_size": 64,
"num_epochs": 10, # 10,
"max_len": 250,
"embedding_size": 128, # 256,
"rnn_size": 256, # 1024,
"learning_algo": "adam",
"learning_rate": 0.001,
"max_grad_norm": 5.0
}
class RNN(nn.Module):
def __init__(self, vocab_size: int, char_embedding_size: int,
rnn_size: int):
super().__init__()
self.vocab_size = vocab_size
self.char_embedding_size = char_embedding_size
self.rnn_size = rnn_size
self.dropout = nn.Dropout(p=0.3)
# instantiate Modules with the correct arguments
self.embedding = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=char_embedding_size)
self.rnn = nn.LSTM(input_size=char_embedding_size,
hidden_size=rnn_size, bidirectional=True)
# self.rnn_cell = nn.GRUCell(input_size = char_embedding_size,
# hidden_size = rnn_size)
self.logits = nn.Linear(in_features=2 * rnn_size, out_features=2)
# self.softmax = nn.Softmax(dim = 2)
self.loss = nn.CrossEntropyLoss()
def get_loss(self, logits: torch.FloatTensor, y: torch.FloatTensor):
"""
Computes loss for a batch of sequences. The sequence loss is the
average of the individual losses at each timestep. The batch loss is
the average of sequence losses across all batches.
:param logits: unnormalized probabilities for T timesteps, size
batch_size x max_timesteps x vocab_size
:param y: ground truth values (index of correct characters), size
batch_size x max_timesteps
:returns: loss as a scalar
"""
#
# logits: B x T x vocab_size
# B x T
# cross entropy: B x vocab_size x T
# B x T
# vision: B x num_classes
# B
return self.loss(logits, y)
def get_logits(self, hidden_states: torch.FloatTensor,
temperature: float = 1.0):
"""
Computes the unnormalized probabilities from hidden states. Optionally
divide logits by a temperature, in order to influence predictions at
test time (https://www.quora.com/What-is-Temperature-in-LSTM)
:param hidden_states: tensor of size batch_size x timesteps x rnn_size
:param temperature: coefficient that scales outputs before turning them
to probabilities. A low temperature (0.1) results in more conservative
predictions, while a higher temperature (0.9) results in more diverse
predictions
:return: tensor of size batch_size x timesteps x vocab_size
"""
return self.logits(hidden_states) / temperature
def forward(self, batch: torch.LongTensor,
hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:
"""
Computes the hidden states for the current batch (x, y).
:param x: input of size batch_size x max_len
:param hidden_start: hidden state at time step t = 0,
size batch_size x rnn_size
:return: hidden states at all timesteps,
size batch_size x timesteps x rnn_size
"""
# max_len = x.size(1)
# x,label = batch
# batch_size x max_len x embedding_dim
x_embedded = self.embedding(batch)
# x_drop = self.dropout
x_drop = self.dropout(x_embedded)
# compute hidden states and logits for each time step
# hidden_states_list = []
# prev_hidden = hidden_start
hidden_state = self.rnn(x_drop)[0]
# print(hidden_state)
# print(hidden_state[0].shape)
# print(hidden_state[1].shape)
# hidden_state = hidden_state.permute(2,1,0)
# hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])
# hidden_state_maxPooled = hidden_state.permute(2,1,0)
hidden_state_pooled, _ = torch.max(hidden_state, dim=1)
output = self.get_logits(hidden_state_pooled)
# Loss = self.loss(output, y)
# hidden_state = softmax(logits(hidden_state))
# batch_size x max_len x rnn_size
# hidden_states = torch.stack(hidden_states_list, dim=1)
return output
# instantiate the RNNLM module
network = RNN(vocab.size(),
_hyperparameters_dict['embedding_size'],
_hyperparameters_dict['rnn_size'])
# if torch.cuda.is_available():
# device = torch.device('cuda:0')
# else:
# device = torch.device('cpu')
# move network to GPU if available
# network = network.to(device)
# device = torch.device('cpu')
# network = network.to(device)
optimizer = Adam(params=network.parameters(), lr=0.001)
# CHECKPOINT: make sure you understand each parameter size
print("Neural network parameters: ")
for param_name, param in network.named_parameters():
print("\t" + param_name, " size: ", param.size())
"""# Training/evaluation loop"""
# Commented out IPython magic to ensure Python compatibility.
class Trainer:
def __init__(self, model: nn.Module,
train_data: torch.LongTensor,
dev_data: torch.LongTensor,
vocab: Vocabulary,
hyperparams: Dict):
self.model = model
self.train_data = train_data
self.dev_data = dev_data
self.vocab = vocab
# self.device = torch.device('cuda:0')
if hyperparams['learning_algo'] == 'adam':
self.optimizer = Adam(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
else:
self.optimizer = SGD(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
self.num_epochs = hyperparams['num_epochs']
self.max_len = hyperparams['max_len']
self.batch_size = hyperparams['batch_size']
self.rnn_size = hyperparams['rnn_size']
self.max_grad_norm = hyperparams['max_grad_norm']
# number of characters in training/dev data
self.train_size = len(train_data)
self.dev_size = len(dev_data)
# number of sequences (X, Y) used for training
self.num_train_examples = \
self.train_size // (self.batch_size * self.max_len) * self.batch_size
def train_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the training set
:param epoch_num: number of current epoch
"""
self.model.train()
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size)
# for batch_num, (x, y) in enumerate(make_batches(self.train_data,
# self.batch_size,
# self.max_len)):
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients in train epoch
self.optimizer.zero_grad()
x = len(batch_tuple[0])
y = len(batch_tuple[0][0])
# compute hidden states
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# compute unnormalized probabilities
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
# compute loss
# scalar
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
# backpropagation (gradient of loss wrt parameters)
batch_loss.backward()
# clip gradients if they get too large
torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),
self.max_grad_norm)
# update parameters
self.optimizer.step()
# we use a stateful RNN, which means the first hidden state for the
# next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:] # add comment
if batch_num % 100 == 0:
print("epoch %d, %d/%d examples, batch loss = %f"
% (epoch_num, (batch_num + 1) * self.batch_size,
self.num_train_examples, batch_loss.item()))
epoch_loss /= (batch_num + 1)
return epoch_loss
def eval_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the validation set
:param epoch_num: number of current epoch
"""
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size).to(device)
with torch.no_grad():
# for batch_num, (x, y) in enumerate(make_batches(self.dev_data,
# self.batch_size,
# self.max_len)):
acc = 0;
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients
# self.optimizer.zero_grad()
# x = len(batch_tuple[0])
# y = len(batch_tuple[0][0])
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
hidden_states_m = torch.argmax(hidden_states, dim=1)
acc += sum(hidden_states_m == y).item()
# we use a stateful RNN, which means the first hidden state for
# the next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:]
epoch_loss /= (batch_num + 1)
return epoch_loss, acc
def train(self) -> Dict:
train_losses, dev_losses, dev_acc = [], [], []
for epoch in range(self.num_epochs):
epoch_train_loss = self.train_epoch(epoch)
epoch_dev_loss, epoch_dev_train = self.eval_epoch(epoch)
train_losses.append(epoch_train_loss)
dev_losses.append(epoch_dev_loss)
dev_acc.append(epoch_dev_train)
return {"train_losses": train_losses,
"dev_losses": dev_losses,
"dev_acc": epoch_dev_train}
def plot_losses(metrics: Dict):
"""
Plots training/validation losses.
:param metrics: dictionar
"""
plt.figure()
plt.plot(metrics['train_losses'], c='b', label='Train')
plt.plot(metrics['dev_losses'], c='g', label='Valid')
plt.ylabel('Loss')
plt.xlabel('Iteration')
plt.legend()
plt.show()
# op= torch.rand(4)
# thx = torch.rand(4)
# thx[0] = op[0]
# t = thx==op
# print(t)
# print(sum(t).item())
# train network for some epoch
trainer = Trainer(network, train_loader, dev_loader, vocab, _hyperparameters_dict)
metrics = trainer.train()
# plot training and validations losses each epoch
plot_losses(metrics)
# for i in train_loader:
# print(len(i[0][0]))
# print(len(i[0]))
# print(i[0])
# x = 1
# while (True)
# x = 0
|
4,561 | 0e2b4e8e8c5a728e5123dfa704007b0f6adaf1e1 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
n=int(input("Enter the number of votes : "))
print()
path="C:\\Program Files\\chromedriver.exe"
driver=webdriver.Chrome(path)
driver.get("https://strawpoll.com/jhzd6qwjw")
for i in range(0,n+1):
driver.delete_all_cookies()
try:
button=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,"//input[@value='9c1zz2ugv55r']")))
driver.execute_script("arguments[0].click();", button)
buttons=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,"//button[@class='button is-primary is-fullwidth']")))
driver.execute_script("arguments[0].click();", buttons)
except:
print()
try:
c=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,"//h1[@class='title']")))
driver.back()
print("Vote Successful")
print()
except:
print()
if i==n-1:
driver.quit() |
4,562 | 0abba9fdd98d6bb5c706b82a01a267dbcefbba28 | import re
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)" (\d{3}) (\S+)'
pattern = re.compile(APACHE_ACCESS_LOG_PATTERN)
print re.match('ix-sac6-20.ix.netcom.com - - [08/Aug/1995:14:43:39 -0400] "GET / HTTP/1.0 " 200 7131', 0)
|
4,563 | a2c62091b14929942b49853c4a30b851ede0004b | #!/usr/bin/env python3.4
from flask import Flask, render_template, request, jsonify
from time import time
application = Flask(__name__)
@application.route("/chutesnladders")
@application.route("/cnl")
@application.route("/snakesnladders")
@application.route("/snl")
def chutesnladders():
response = application.make_response(
render_template(
'chutesnladders.min.html'
)
)
return response
@application.route("/report", methods=['GET', 'POST'])
def reportBug():
d = {}
if request.method == 'POST':
try:
email = request.form.get("email", type=str).lower()
kind = request.form.get("kind", type=str).lower()
title = kind.upper() + ": " + request.form.get("title", type=str)
details = request.form.get("details", type=str)
ts = int(time()*(10**6))
report = request.form.get("reportingEnabled", type=bool)
if report:
sendmail.sendMeResponse({
'ts': ts,
'feed_email': email,
'feed_name': email.split('@')[0],
'feed_message': details,
'feed_subject': title,
})
d = {"result": "Received"}
except Exception:
d = {"result": "Error in receiving"}
else:
d = {"result": "Bad request"}
response = application.make_response(jsonify(**d))
return response
# EVERY FUNCTION FOR CHUTES n LADDERS END HERE
if __name__ == "__main__":
application.run(
host='0.0.0.0',
debug=False
)
|
4,564 | e3afaabc1f7f64b9189fc88dd478ed75e81f35e1 | import json
import sys
from os import listdir
from os.path import isfile, join
import params
def encodeText(tweet_text):
tweet_text = tweet_text.replace('\n',' ')
return str(tweet_text)
def parse_file(file_in, file_out):
ptrFile_in = open(file_in, "r")
ptrFile_out = open(file_out, "w", encoding="utf-8")
cleanLines = []
for line in ptrFile_in:
cleanLine = {}
line = line.rstrip()
if line != "":
try:
decoded = json.loads(line)
cleanLine.update({"id" : decoded['id']})
cleanLine.update({"date" : decoded['created_at']})
if decoded.get('extended_tweet') is not None:
cleanLine.update({"text": encodeText(decoded['extended_tweet']['full_text'])})
else:
cleanLine.update({"text": encodeText(decoded['text'])})
cleanLine.update({"user_id" : decoded['user']['id']})
cleanLine.update({"user_name" : '@' + decoded['user']['screen_name']})
if decoded.get('place') is not None:
cleanLine.update({"location" : {"country": decoded['place']['country'], "city": decoded['place']['name']} })
else:
cleanLine.update({"location" : {} })
if decoded.get('retweeted_status') is not None:
cleanLine.update({"retweeted" : True })
if decoded.get('retweeted_status').get('extended_tweet') is not None:
cleanLine.update({"RT_text" : encodeText(decoded['retweeted_status']['extended_tweet']['full_text']) })
else:
cleanLine.update({"RT_text" : encodeText(decoded['retweeted_status']['text']) })
cleanLine.update({"RT_user_id" : decoded['retweeted_status']['user']['id'] })
cleanLine.update({"RT_user_name" : '@' + decoded['retweeted_status']['user']['screen_name'] })
else:
cleanLine.update({"retweeted" : False})
cleanLines.append(cleanLine)
except Exception as e:
print(e, " :: ", line)
ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))
ptrFile_out.close()
if __name__ == '__main__':
path_in = params.folder_path
path_out = params.clean_path
for f in listdir(path_in):
file_in = join(path_in, f)
file_out = join(path_out, f)
if isfile(file_in):
parse_file(file_in, file_out)
|
4,565 | 2a062f0c2836850320cdd39eee6a354032ba5c33 | # coding=utf8
from __future__ import unicode_literals, absolute_import, division, print_function
"""This is a method to read files, online and local, and cache them"""
import os
from .Read import read as botread
from .Database import db as botdb
class BotNotes():
def __init__(self):
self.notes = botdb.get_plugin_value('SpiceBot_Release_Notes', 'notes') or dict()
self.dir_to_scan = botread.get_config_dirs("SpiceBot_Release_Notes")
self.load_txt_files(self.dir_to_scan)
self.save_notes()
def save_notes(self):
savenotes = {}
for notefile in list(self.notes.keys()):
savenotes[notefile] = {"old": self.notes[notefile]["old"]}
botdb.set_plugin_value('SpiceBot_Release_Notes', 'notes', savenotes)
def load_txt_files(self, dir_to_scan):
# iterate over files within
for directory in dir_to_scan:
for file in os.listdir(directory):
filepath = os.path.join(directory, file)
filepath = os.path.join(directory, file)
if os.path.isfile(filepath) and filepath.endswith('.txt'):
# gather file stats
slashsplit = str(filepath).split("/")
filename = slashsplit[-1]
filename_base = str(os.path.basename(filename).rsplit('.', 1)[0]).lower()
if filename_base not in list(self.notes.keys()):
self.notes[filename_base] = {}
if "old" not in list(self.notes[filename_base].keys()):
self.notes[filename_base]["old"] = []
if "new" not in list(self.notes[filename_base].keys()):
self.notes[filename_base]["new"] = []
text_file = open(filepath, 'r')
lines = text_file.readlines()
for line in lines:
if str(line) not in self.notes[filename_base]["old"]:
self.notes[filename_base]["new"].append(str(line))
self.notes[filename_base]["old"].append(str(line))
text_file.close()
releasenotes = BotNotes()
|
4,566 | f7a335db0ddf8a871e98eac54b59c41a40622153 | # Generated by Django 3.2.4 on 2021-08-09 03:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('employee', '0013_auto_20210808_1242'),
]
operations = [
]
|
4,567 | 79f945694f853e5886b590020bb661ecd418510d | import os
import sqlite3
from typing import Any
from direct_geocoder import get_table_columns
from reverse_geocoder import is_point_in_polygon
from utils import zip_table_columns_with_table_rows, get_average_point
def get_organizations_by_address_border(city: str,
nodes: list[tuple[float, float]]) \
-> list[dict[str, Any]]:
result = []
radius = 0.0025
with sqlite3.connect(os.path.join('db', f'{city}.db')) as connection:
cursor = connection.cursor()
lat, lon = get_average_point(nodes)
south, north = lat - radius, lat + radius
west, east = lon - radius, lon + radius
request_template = f"SELECT * FROM nodes WHERE " \
f"(lat BETWEEN ? AND ?) AND " \
f"(lon BETWEEN ? AND ?) AND " \
f"(highway IS NULL) AND" \
f"(NOT(name IS NULL) OR " \
f"NOT(shop IS NULL) OR " \
f"NOT(amenity IS NULL))"
organizations_within_radius = []
nodes_columns = get_table_columns(cursor, 'nodes')
ways_columns = get_table_columns(cursor, 'ways')
cursor.execute(request_template, (south, north, west, east))
organizations_within_radius += zip_table_columns_with_table_rows(
nodes_columns,
cursor.fetchall())
request_template = request_template.replace('nodes', 'ways')
cursor.execute(request_template, (south, north, west, east))
organizations_within_radius += zip_table_columns_with_table_rows(
ways_columns,
cursor.fetchall())
for organization in organizations_within_radius:
if is_point_in_polygon((organization['lat'], organization['lon']),
nodes):
result.append(organization)
return result
|
4,568 | 15821bb33c2949f5a3e72e23cf7b5d8766dfce70 | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, lumisToProcess = cms.untracked.VLuminosityBlockRange(*('1:11169', '1:11699', '1:16592', '1:23934', '1:17699', '1:22722', '1:23672', '1:23971', '1:16186', '1:24286', '1:23038', '1:11494', '1:22331', '1:13850', '1:12164', '1:12639', '1:17861', '1:12137', '1:18160', '1:16336', '1:17383', '1:17265', '1:24278', '1:19156', '1:15205', '1:19339', '1:19999', '1:15849', '1:14177', '1:15012', '1:15134', '1:15207', '1:15136', '1:21300', '1:21465', '1:21473', '1:23292', '1:13149', '1:13596', ))
)
readFiles.extend( ['/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/222ADB5A-A0FB-E911-A01F-002590E3A286.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/D0B3C949-C110-EA11-9996-E0071B889698.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/100692F6-5812-EA11-90F1-0CC47AFCC646.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/B85503FC-5812-EA11-880B-509A4C63A055.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/B07EA5EF-3D12-EA11-BC8E-44A842CFD5D8.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/88A10EAD-5812-EA11-A4EF-20040FE94288.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/E2F9F049-5912-EA11-80CE-0017A4771048.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/34CB6C5F-5912-EA11-B919-0425C5DE7BF4.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/60672E4E-5912-EA11-ACA2-0026B9278678.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/C81FA8C8-5812-EA11-B5F6-90E6BA3BD795.root']); |
4,569 | aa913fd40a710cfd7288fd59c4039c4b6a5745cc | import pandas as pd
import random
import string
import names
def generatetest(n=100, filename="test_data"):
ids = []
names_list = []
for _ in range(n):
ids.append(''.join(random.choices(
string.ascii_letters + string.digits, k=9)))
names_list.append(names.get_full_name())
df = pd.DataFrame({
'id': ids,
'names': names_list,
})
df.to_csv('tmp/{}.csv'.format(filename), index=False)
if __name__ == "__main__":
generatetest()
print("test set generated!")
|
4,570 | 001d2ae89a2d008fdf6621a1be73de94c766c65f | SOURCE_FILE = "D:\\temp\\twitter\\tweet.js"
TWITTER_USERNAME = 'roytang'
auto_tags = ["mtg"]
syndicated_sources = ["IFTTT", "Tumblr", "instagram.com", "Mailchimp", "Twitter Web", "TweetDeck", "mtgstorm"]
debug_id = None
# debug_id = "11143081155"
import frontmatter
import json
import requests
import urllib.request
from urllib.parse import urlparse, parse_qs, urldefrag
from urllib.error import HTTPError
import sys
from pathlib import Path
import os, shutil
import inspect
from datetime import datetime
import re
from utils import loadurlmap, load_map_from_json, URLResolver, PostBuilder
cwd = Path.cwd()
contentdir = cwd / "content"
blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])
mediadir = Path("D:\\temp\\roy_mtg-twitter\\tweet_media")
retweetscache = load_map_from_json("d:\\temp\\twitter\\retweets.json")
resolver = URLResolver()
def loadurlmap(cleanupdupes=False):
blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])
urlmapfile = blogdir / "urlmap.json"
urlmap = {}
urlmapdupes = {}
with urlmapfile.open(encoding="UTF-8") as f:
tempurlmap = json.loads(f.read())
for u in tempurlmap:
u1 = tempurlmap[u]
if "syndicated" in u1:
for s in u1['syndicated']:
if 'url' in s:
su = s['url']
if su in urlmap:
# we expect syndicated urls to be unique,
# so if it's already in the map,
# it must be a dupe
# (This is really just to clean up my own mess!)
if su not in urlmapdupes:
urlmapdupes[su] = [u1, urlmap[su]]
else:
urlmapdupes[su].append(u1)
else:
urlmap[su] = u1
urlmap[u] = u1
title = u1.get("title", "").strip()
if len(title) > 0:
urlmap[title] = u1
if cleanupdupes:
# clean up any found dupes by syndicated url
for su in urlmapdupes:
dupes = urlmapdupes[su]
canonical = None
for_deletion = []
for d in dupes:
if d["source_path"].startswith("post") or d["source_path"].startswith("links") or len(d['syndicated']) > 2:
if canonical is not None:
print("\n\r##### WTH. More than one canonical urls were detected for %s" % (su))
print(json.dumps(dupes, indent=4))
canonical = d
else:
for_deletion.append(d)
if canonical is None:
print("##### Dupes were detected for %s but no canonical url found!" % (su))
print(dupes)
else:
urlmap[su] = canonical
for d in for_deletion:
source_path = Path(d['source_path'])
full_path = contentdir / source_path
if full_path.exists():
os.remove(str(full_path))
return urlmap
urlmap = loadurlmap(False)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def add_syndication(mdfile, url, stype):
with mdfile.open(encoding="UTF-8") as f:
try:
post = frontmatter.load(f)
except:
print("Error parsing file")
return
if post.get('syndicated') == None:
post['syndicated'] = []
else:
for s in post['syndicated']:
if s["type"] == stype and s["url"] == url:
# dont add a duplicate!
return
post['syndicated'].append({
'type': stype,
'url': url
})
newfile = frontmatter.dumps(post)
with mdfile.open("w", encoding="UTF-8") as w:
w.write(newfile)
def get_content(t):
content = t['full_text']
if "entities" in t:
# get raw urls in the text
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content)
# replace mentions with link
for m in t["entities"]["user_mentions"]:
screen_name = m["screen_name"]
# replace with markdown link
mdlink = "[@%s](https://twitter.com/%s/)" % (screen_name, screen_name)
content = content.replace("@"+screen_name, mdlink)
processed_urls = []
# clean urls
for u in t["entities"]["urls"]:
url = u["url"]
processed_urls.append(url)
expanded_url = u["expanded_url"]
processed_urls.append(expanded_url)
# print("##### A URL!!! %s" % expanded_url)
expanded_url, no_errors = resolver.get_final_url(expanded_url)
processed_urls.append(expanded_url)
content = content.replace(url, expanded_url)
# find urls that were not in the entities
for raw_url in raw_urls:
if raw_url not in processed_urls:
expanded_url, no_errors = resolver.get_final_url(raw_url)
content = content.replace(raw_url, expanded_url)
return content
def create_post(t):
id = t['id_str']
d = datetime.strptime(t['created_at'], "%a %b %d %H:%M:%S %z %Y")
content = get_content(t)
post = frontmatter.Post(content)
post['date'] = d
post['syndicated'] = [
{
"type": "twitter",
"url": "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, t['id'])
}
]
kind = "notes"
if "in_reply_to_status_id_str" in t and "in_reply_to_screen_name" in t:
kind = "replies"
post["reply_to"] = {
"type": "twitter",
"url": "https://twitter.com/%s/statuses/%s/" % (t['in_reply_to_screen_name'], t['in_reply_to_status_id_str']),
"name": t["in_reply_to_screen_name"],
"label": "%s's tweet" % (t["in_reply_to_screen_name"])
}
elif t["full_text"].startswith("RT @"):
rc = retweetscache.get(id)
if rc is None:
# RTed status is inaccessible, we'll just render it as an ordinary note
pass
else:
if "retweeted_user" in rc:
kind = "reposts"
post['repost_source'] = {
"type": "twitter",
"name": rc["retweeted_user"],
"url": "https://twitter.com/%s/statuses/%s/" % (rc['retweeted_user'], rc['retweeted_id'])
}
# dont process reposts for now
# return False
else:
# 785744070027030528 fails this
# RTed status is inaccessible, we'll just render it as an ordinary note
pass
# else:
# # dont process others for now
# return False
media = []
for m in t.get("extended_entities", {}).get("media", []):
media.append(m["media_url_https"])
if len(media) > 0:
if kind != "reposts" and kind != "replies":
kind = "photos"
# dont process media for now
# return False
tags = []
for tag in t.get('entites', {}).get('hashtags', []):
tags.append(tag['text'].lower())
parsed_tags = re.findall(r"\s#(\w+)", " " + content)
for tag in parsed_tags:
if tag not in tags:
tags.append(tag.lower())
for tag in auto_tags:
if tag not in tags:
tags.append(tag)
if len(tags) > 0:
post["tags"] = tags
post["source"] = "twitter"
outdir = contentdir / kind / d.strftime("%Y") / d.strftime("%m")
if len(media) > 0:
outdir = outdir / (id)
if not outdir.exists():
outdir.mkdir(parents=True)
if len(media) > 0:
outfile = outdir / ( "index.md" )
# find photos
for imgfile in mediadir.glob(id + "*.*"):
to_file = outdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
else:
outfile = outdir / ( id + ".md" )
newfile = frontmatter.dumps(post)
with outfile.open("w", encoding="UTF-8") as w:
w.write(newfile)
return True
def process_syn_url(d1, raw_url, url):
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
url, no_errors = resolver.get_final_url(url)
if not no_errors:
print(d1["full_text"])
url = url.replace("www.instagram.com", "instagram.com")
url = url.replace("/roytang0400", "")
url = urldefrag(url)[0]
if url.find("instagram.com") >= 0 and url.find("?") >= 0:
# remove utm and other misc query params from insta urls
url = url.split("?")[0]
if url in urlmap:
u = urlmap[url]
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
if url.find("://roytang.net") >= 0 or url.find("://mtgstorm.com") >= 0:
link_url = urlparse(url)
u = urlmap.get(link_url.path, None)
if u is None:
# try matching by title
title_search_term = d1["full_text"]
title_search_term = title_search_term.replace("New blog post: ", "")
title_search_term = title_search_term.replace("New post: ", "")
title_search_term = title_search_term.replace(raw_url, "")
title_search_term = title_search_term.strip()
u = urlmap.get(title_search_term, None)
if u is not None:
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
else:
print("######## Unmatched roytang url: %s" % (url))
print(d1["full_text"])
return True
return False
def process_tweet(d1):
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
if orig_tweet_url in urlmap:
og = urlmap.get(orig_tweet_url)
if og['source_path'].startswith('post\\') or og['source_path'].startswith('photos\\'):
# no need to process further any tweets that are already mapped to a post
return True
tweet_source = d1["source"]
# print("#### %s: %s" % (tweet_source, orig_tweet_url))
# detect content syndicated from elsewhere
# instagram, tumblr, roytang.net
for s in syndicated_sources:
if tweet_source.find(s) >= 0:
for u in d1.get('entities', {}).get("urls", []):
raw_url = u["url"]
url = u["expanded_url"]
if process_syn_url(d1, raw_url, url):
return True
# print("######## URL = %s" % (url))
# also process raw urls
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', d1["full_text"])
for raw_url in raw_urls:
if process_syn_url(d1, raw_url, raw_url):
return True
break
return create_post(d1)
def import_all():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
if debug_id is not None and d1["id_str"] != debug_id:
continue
if process_tweet(d1):
continue
tweet_source = d1["source"]
if tweet_source not in countbysource:
countbysource[tweet_source] = 1
else:
countbysource[tweet_source] = countbysource[tweet_source] + 1
is_reply = False
if "in_reply_to_status_id_str" in d1 and "in_reply_to_screen_name" in d1:
replies = replies + 1
is_reply = True
# handle retweet
is_retweet = False
content = d1["full_text"]
if content.startswith("RT @"):
retweets = retweets + 1
is_retweet = True
media = []
if "extended_entities" in d1:
for m in d1["extended_entities"]["media"]:
media.append(m["media_url_https"])
if len(media) > 0:
withmedia = withmedia + 1
if not is_reply and not is_retweet and len(media) == 0:
raw = raw + 1
idx = idx + 1
# if idx > 100:
# break
# save the url cache for future use
resolver.save_cache()
for source in countbysource:
print("countbysource: %s = %s" % (source, countbysource[source]))
print("replies: %s" % (replies))
print("retweets: %s" % (retweets))
print("withmedia: %s" % (withmedia))
print("raw: %s" % (raw))
print("total: %s" % (idx))
def thread_replies():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
# process in reverse order so tweet sequences are in order
d = reversed(d)
for d1 in d:
is_reply = False
if "in_reply_to_status_id_str" in d1 and "in_reply_to_screen_name" in d1:
is_reply = True
if not is_reply:
continue
id_str = d1['id_str']
# if id_str != "602009895437737984" and id_str != "602009747294924802":
# continue
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, id_str)
# dont bother if already syndicated
if orig_tweet_url in urlmap:
continue
date = datetime.strptime(d1['created_at'], "%a %b %d %H:%M:%S %z %Y")
# process replies to myself
if d1["in_reply_to_screen_name"] == TWITTER_USERNAME:
replied_to_url = "https://twitter.com/%s/statuses/%s/" % (d1['in_reply_to_screen_name'], d1['in_reply_to_status_id_str'])
info = urlmap[replied_to_url]
source_path = Path(info['source_path'])
full_path = contentdir / source_path
# welp, we might as well move them to bundles
if full_path.name == "index.md":
parentdir = full_path.parent
else:
parentdir = full_path.parent / full_path.stem
if not parentdir.exists():
parentdir.mkdir(parents=True)
oldfile = full_path
full_path = parentdir / "index.md"
shutil.move(str(oldfile), str(full_path))
# also update the urlmap!
urlmap[replied_to_url]['source_path'] = str(full_path.relative_to(contentdir))
# append the reply to the original post, and add it as a syndication as well
with full_path.open(encoding="UTF-8") as f:
try:
post = frontmatter.load(f)
except:
print("Error parsing file")
return
post['syndicated'].append({
'type': 'twitter',
'url': orig_tweet_url
})
content = get_content(d1)
post.content = post.content + "\n\r" + content
newfile = frontmatter.dumps(post)
with full_path.open("w", encoding="UTF-8") as w:
w.write(newfile)
# copy over any media from the reply as well
media = []
for m in d1.get("extended_entities", {}).get("media", []):
media.append(m["media_url_https"])
for imgfile in mediadir.glob(d1["id_str"] + "*.*"):
to_file = parentdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
# delete any existing file created for this reply
oldfile = contentdir / "replies" / date.strftime("%Y") / date.strftime("%m") / (id_str + ".md")
if oldfile.exists():
os.remove(str(oldfile))
oldfolder = contentdir / "replies" / date.strftime("%Y") / date.strftime("%m") / (id_str)
if oldfolder.exists():
shutil.rmtree(str(oldfolder))
# replace this entry in the urlmap! this is so that succeeding replies can find the correct root tweet to attach to
urlmap[orig_tweet_url] = info
else:
continue
idx = idx + 1
print(idx)
from utils import urlmap_to_mdfile
def cleanup_videos():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1["id_str"])
info = urlmap.get(orig_tweet_url)
if info is None:
continue
for m in d1.get("extended_entities", {}).get("media", []):
if "video_info" in m:
videos = []
lowest_bitrate = 1000000000000
lowest_video = ""
for vi in m["video_info"]["variants"]:
if 'bitrate' in vi:
videos.append(vi["url"])
bitrate = int(vi['bitrate'])
if bitrate < lowest_bitrate:
lowest_video = vi["url"]
lowest_bitrate = bitrate
mdfile = urlmap_to_mdfile(info)
if str(mdfile).find("\\photos\\") >= 0:
print(mdfile)
# move it to notes, since it's not a photo
p = PostBuilder.from_mdfile(mdfile)
p.kind = "notes"
p.save()
# delete the old files
container = mdfile.parent
for f in container.iterdir():
os.remove(str(f))
container.rmdir()
continue
# delete all the video files except for the one with the lowest bitrate
for v in videos:
if v == lowest_video:
continue
name = Path(v).name
if name.find("?") >= 0:
name = name.split("?")[0]
vfilename = d1["id_str"] + "-" + name
vfile = container / vfilename
print(vfile)
os.remove(str(vfile))
def stats():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
count_by_year = {}
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for t in d:
dt = datetime.strptime(t['created_at'], "%a %b %d %H:%M:%S %z %Y")
count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1
print(json.dumps(count_by_year, indent=2))
# thread_replies()
# import_all()
# cleanup_videos()
stats()
|
4,571 | 5024db0538f0022b84c203882df9c35979ba978a | # Example solution for HW 5
# %%
# Import the modules we will use
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %%
# ** MODIFY **
# Set the file name and path to where you have stored the data
filename = 'streamflow_week5.txt' #modified filename
filepath = os.path.join('../data', filename) #modified path to look one directory up
print(os.getcwd())
print(filepath)
#filepath = '../Assignments/Solutions/data/streamflow_week5.txt'
# %%
#Read the data into a pandas dataframe
data=pd.read_table(filepath, sep = '\t', skiprows=30,
names=['agency_cd', 'site_no', 'datetime', 'flow', 'code']
)
# Expand the dates to year month day
data[["year", "month", "day"]] =data["datetime"].str.split("-", expand=True)
data['year'] = data['year'].astype(int)
data['month'] = data['month'].astype(int)
data['day'] = data['day'].astype(int)
# %%
# Sorry no more helpers past here this week, you are on your own now :)
# Hints - you will need the functions: describe, info, groupby, sort, head and tail.
# %% Start of Mekha's code
# 1 and 2 week forecast
# Look at most recent 2 weeks of data ending 9/26
print(data.tail(14))
# Calculate avg of last two week's flow
print(data.tail(14).describe())
# Calculate avg of last week's flow
print(data.tail(7).describe())
# Look at stats for 2019 because from my previous analysis, I know it is a smiliarly dry year
data_2019 = data[data['year']==2019]
print(data_2019['flow'].describe())
# Look at stats for 2019 by month
print(data_2019.groupby(['month'])[['flow']].describe())
# %% 1. Provide a summary of the data frames properties.
# What are the column names?
# What is its index?
# What data types do each of the columns have?
print(data.info())
# %% 2.Provide a summary of the flow column including the min, mean, max, standard
# deviation and quartiles.
print(data['flow'].describe())
# %% 3.Provide the same information but on a monthly basis. (Note: you should be
# able to do this with one or two lines of code)
print(data.groupby(['month'])[['flow']].describe())
# %% 4.Provide a table with the 5 highest and 5 lowest flow values for the period
# of record. Include the date, month and flow values in your summary.
# 5 highest
print(data.sort_values(by="flow",ascending=True).tail())
# 5 lowest
print(data.sort_values(by="flow",ascending=True).head())
# %% 5.Find the highest and lowest flow values for every month of the year (i.e. you
# will find 12 maxes and 12 mins) and report back what year these occurred in.
# highest value for each month
for i in range(1,13):
month_data = data[data['month']==i]
print(month_data.nlargest(1,['flow']))
# lowest value for each month
for i in range(1,13):
month_data = data[data['month']==i]
print(month_data.nsmallest(1,['flow']))
# %% 6.Provide a list of historical dates with flows that are within 10% of your week 1
# forecast value. If there are none than increase the %10 window until you have at
# least one other value and report the date and the new window you used
forecast = 58.4
data_10percent = data[(data['flow'] >= (0.9*forecast)) & (data['flow'] <= (1.1*forecast))]
pd.set_option('display.max_rows', None)
print(data_10percent['datetime'])
# %%
|
4,572 | 790110a8cba960eb19593e816b579080dfc46a4e | from bs4 import BeautifulSoup
import urllib2
def get_begin_data(url):
headers = {
'ser-Agent': '',
'Cookie': ''
}
request = urllib2.Request(url, headers=headers)
web_data = urllib2.urlopen(request)
soup = BeautifulSoup(web_data, 'html.parser')
results = soup.select('table > tr > td > a')
answers = soup.select('table > tr > td > span')
index = 0
datas = []
for result in results:
if index == 0:
data = {
'link': result.get('href')
}
index += 1
elif index == 1:
data['title'] = result.get_text()
datas.append(data)
index += 1
elif index == 2:
index += 1
else:
index = 0
index = 0
for answer in answers:
if answer.get('class') != None:
datas[index]['answer'] = answer.get('class')
index += 1
return datas
def anly_data(datas):
linkList = []
for data in datas:
if data['answer'] == [u'submitRes-3']:
link = {
'url':'https://www.patest.cn'+data['link'] + '/source',
'title':data['title']
}
linkList.append(link)
return linkList
def save_file(linkList):
headers = {
'ser-Agent': '',
'Cookie': ''
}
for link in linkList:
request = urllib2.Request(link['url'], headers=headers)
web_data = urllib2.urlopen(request)
soup = BeautifulSoup(web_data, 'html.parser')
code = soup.select('#sourceCode')
file = open(link['title']+'.cpp', 'w')
for i in code:
file.write(i.get_text().encode('utf-8'))
file.close()
if __name__ == '__main__':
datas = []
for page_number in range(1, 12):
url = 'https://www.patest.cn/contests/pat-b-practise/submissions?page={}&self=true'.format(page_number)
datas = get_begin_data(url)
linkList = anly_data(datas)
save_file(linkList)
|
4,573 | 0aed35827e6579f7a9434d252d0b9150ab24adf9 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-31 07:54
from __future__ import unicode_literals
import codenerix.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('codenerix_products', '0005_remove_product_name'),
]
operations = [
migrations.CreateModel(
name='BrandTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),
('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),
('public', models.BooleanField(default=False, verbose_name='Public')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BrandTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),
('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),
('public', models.BooleanField(default=False, verbose_name='Public')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='brand',
name='name',
),
migrations.RemoveField(
model_name='brand',
name='slug',
),
migrations.AddField(
model_name='brandtextes',
name='brand',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='es', to='codenerix_products.Brand'),
),
migrations.AddField(
model_name='brandtexten',
name='brand',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='en', to='codenerix_products.Brand'),
),
]
|
4,574 | bb1caf4d04c8a42279afa0ac586ced991e0dff84 | import Individual
import Grupal
import matplotlib.pyplot as plt
import pandas as pd
plt.show()
|
4,575 | 8b29c12c294a8614d8be96c312ecffa9d3bcb3f8 | import Bio
import os
import sys
from Bio import PDB
from Bio.PDB import PDBIO
from Bio.PDB.PDBParser import PDBParser
import math
import numpy
from collections import Counter
import random
from Bio.PDB import *
import gzip
def get_center(res_list):
coord = []
for atom in residue:
# print(atom.coord)
at=atom.coord
x=at[0]
y=at[1]
z=at[2]
atcord=[x,y,z]
coord.append(atcord)
x=0
y=0
z=0
i=0
for point in coord:
i=i+1
x=x+point[0]
y=y+point[1]
z=z+point[2]
x=x/i
y=y/i
z=z/i
center=numpy.array([x,y,z])
return center;
pdbl=PDB.PDBList()
Error_out=open("microfolds_out.txt","w")
cng=0
AA=['PHE','TRP','TYR','ALA','CYS','ASP','GLU','GLY','HIS','ILE','LYS','LEU','MET','ASN','PRO','GLN','ARG','SER','THR','VAL']
CF=[' DA',' DC',' DG',' DT',' A',' C',' G',' U','HOH','UNK','UNX']
Metals=['FE','MN','CU','CD','OS','CO','NI','W','PT','MO','U','TA','V','AU','IR','Y','GD','RU','YB','SM','PD','AG','EU','RH','PR','RE','LU','TB','HF','HO','DY','ZR','CR','LA','CE','ER','AM','CM','TH','PU','SC','PA']
cofactor=['BCB','CLA','CHL','BCL','CL0','PMR','PHO']
#organic_cofactors_list=[]
#organic_cofactors_pdb_file=open('manual_cofactor_list_with_quinone.txt','r')
#for line in organic_cofactors_pdb_file:
# line=line.split('\t')
# organic_cofactors_list.append(line[1][:-1])
idxfile='cullpdb_pc90_res10_R1_inclNOTXRAY_inclCA_d161006_chains.txt'
idx=open(idxfile,"r")
idx.readline()
#idx.readline()
#idx.readline()
EC=""
i=0
for line in idx:
i=i+1
print(i)
try:
protein=line[0:4]
protein=protein.lower()
parser = PDB.PDBParser(PERMISSIVE=1)
curdir=os.getcwd()
pdbl.retrieve_pdb_file(protein,pdir=curdir+'/pdbs/')
#print (protein,'/home/hraanan/pdb_download/'+protein[1:3]+'/pdb'+protein+'.ent.gz')
#print ('unziping')
# gz = gzip.open(filename, 'rb')
# with open(final_file, 'wb') as out:
# out.writelines(gz)
# gz.close()
# #structure = parser.get_structure(protein,protein+'.pdb')
##
# #print ('unziping done')
# #os.remove(filename)
# pdbl.retrieve_pdb_file(protein)
# structure = parser.get_structure(protein,protein[1:3]+'/pdb'+protein+'.ent')
# head= structure.header['head']
# comp = structure.header['compound']
# EC==""
#
# try:
# comp=comp['1']
## except KeyError:
## try:
## EC=comp['ec_number']
## except KeyError:
## try:
## EC=comp['ec']
# except KeyError:
# EC='-.-.-.-'
# try:
# EC=comp['ec']
# except KeyError:
# pass
# try:
# EC=comp['ec_number']
# except KeyError:
# pass
# if EC=="":
# EC='-.-.-.-'
# #print(EC)
###
###
#
# sf4ID=[]
# sf4coord=[]
# for model in structure:
# if model.id==0:
# atom_list = Selection.unfold_entities(model, 'A') # A for atoms
# ns = NeighborSearch(atom_list)
# lig=[]
# for chain in model:
# for residue in chain:
# #if residue.resname not in AA and residue.resname not in CF :
# #print(chain.id,residue.resname)
# if residue.resname in organic_cofactors_list:
# #print(chain.id,residue.resname)
# atom_in_res=[]
# for atom in residue:
# atom_in_res.append(atom.element)
#
# #if any(x in Metals for x in atom_in_res)==False:
# #print ('not metal')
# # continue
#
# center = get_center(residue)
# #print ('center',center)
# lig=protein,chain.id,residue.id[1],residue.resname,center
# #print(lig)
# all_neighbors = ns.search(center, 15.0,"R") # 15.0 for distance in angstrom
# microfold_name=protein+'.'+residue.resname+'_'+ chain.id +'_'+str(residue.id[1])+'_'+head+'_'+EC
# microfold_name=microfold_name.replace(' ','')
# microfold_name=microfold_name.replace('/','_')
# microfold_dir=residue.resname
# microfold_dir=microfold_dir.replace(' ','')
# # print(microfold_name)
# if not os.path.exists('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir):
# os.makedirs('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir)
# Select = Bio.PDB.Select
# class MicroSelect(Select):
# def accept_residue(self, residue):
# if residue in all_neighbors and residue.resname!='HOH':
# return 1
# else:
# return 0
# io=PDBIO()
# io.set_structure(structure)
# #print('/home/hraanan/MicrofoldsPDBs/'+microfold_dir+'/'+microfold_name+'.pdb', MicroSelect())
# io.save('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir+'/'+microfold_name+'.pdb', MicroSelect())
except:
# e = sys.exc_info()[0]
Error_out.write('xxx\n')
Error_out.write('/n' )
Error_out.write( "<p>Error: %s</p>" )
Error_out.write('xxx\n')
print('err')
continue
Error_out.close()
#prot.close()
print("end")
|
4,576 | d4c297af395581c6d955eb31a842ab86e599d23c | ##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
import IECoreScene
class TestMotionPrimitive( unittest.TestCase ) :
def test( self ) :
m = IECoreScene.MotionPrimitive()
self.assertTrue( m.isInstanceOf( "MotionPrimitive" ) )
self.assertTrue( m.isInstanceOf( "VisibleRenderable" ) )
self.assertEqual( m.keys(), [] )
self.assertEqual( m.values(), [] )
self.assertEqual( len( m ), 0 )
self.assertRaises( Exception, m.__setitem__, "notAFloat", IECoreScene.PointsPrimitive( 1 ) )
m[0] = IECoreScene.PointsPrimitive( 1 )
self.assertEqual( len( m ), 1 )
self.assertEqual( m.keys(), [ 0 ] )
self.assertEqual( m.values(), [ IECoreScene.PointsPrimitive( 1 ) ] )
m[1] = IECoreScene.PointsPrimitive( 1 )
self.assertEqual( len( m ), 2 )
self.assertEqual( m.keys(), [ 0, 1 ] )
self.assertEqual( m.values(), [ IECoreScene.PointsPrimitive( 1 ), IECoreScene.PointsPrimitive( 1 ) ] )
iface = IECore.IndexedIO.create( os.path.join( "test", "motionPrimitive.fio" ), IECore.IndexedIO.OpenMode.Write )
m.save( iface, "test" )
mm = IECore.Object.load( iface, "test" )
self.assertEqual( m, mm )
mmm = m.copy()
self.assertEqual( m, mmm )
del m[0]
self.assertEqual( len( m ), 1 )
self.assertEqual( m.keys(), [ 1 ] )
self.assertEqual( m.values(), [ IECoreScene.PointsPrimitive( 1 ) ] )
del m[1]
self.assertEqual( m.keys(), [] )
self.assertEqual( m.values(), [] )
self.assertEqual( len( m ), 0 )
def testItems( self ) :
m = IECoreScene.MotionPrimitive()
m[0] = IECoreScene.PointsPrimitive( 1 )
m[1] = IECoreScene.PointsPrimitive( 2 )
self.assertEqual( m.items(), [ ( 0, IECoreScene.PointsPrimitive( 1 ) ), ( 1, IECoreScene.PointsPrimitive( 2 ) ) ] )
def testHash( self ) :
m = IECoreScene.MotionPrimitive()
m2 = IECoreScene.MotionPrimitive()
self.assertEqual( m.hash(), m2.hash() )
m[0] = IECoreScene.SpherePrimitive()
self.assertNotEqual( m.hash(), m2.hash() )
m2[0] = IECoreScene.SpherePrimitive()
self.assertEqual( m.hash(), m2.hash() )
m[1] = IECoreScene.SpherePrimitive()
self.assertNotEqual( m.hash(), m2.hash() )
m2[2] = IECoreScene.SpherePrimitive()
self.assertNotEqual( m.hash(), m2.hash() )
def tearDown( self ) :
if os.path.isfile( os.path.join( "test", "motionPrimitive.fio" ) ):
os.remove( os.path.join( "test", "motionPrimitive.fio" ) )
if __name__ == "__main__":
unittest.main()
|
4,577 | 4e383130b185c6147315517d166ffe66be1be40d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=75)),
('total_subscription', models.IntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MemberSubscription',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('member', models.ForeignKey(to='members.Member')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subreddit', models.CharField(max_length=200)),
('count', models.IntegerField(default=5)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='subscription',
unique_together=set([('subreddit', 'count')]),
),
migrations.AddField(
model_name='membersubscription',
name='subscription',
field=models.ForeignKey(to='members.Subscription'),
preserve_default=True,
),
migrations.AddField(
model_name='member',
name='subscription',
field=models.ManyToManyField(to='members.Subscription', through='members.MemberSubscription'),
preserve_default=True,
),
]
|
4,578 | 5fa8ae36c4b4a5bffa64f4c65b74b74b29ba246f | # 0=RED, 1=GREEN, 2=BLUE, 3=ALPHA
#import tkinter as tk
#import tkinter.ttk as ttk
#from tkcolorpicker import askcolor
import time
c1 = [0,0,0,0] #this color
c2 = [0,0,0] #over this color
c3 = [0,0,0] #result
cont='y'
#--------------------------------
while cont=='y':
print('--enter underlay color in r,g,b--')
c2[0]=int(input('red: '))
c2[1]=int(input('green: '))
c2[2]=int(input('blue: '))
print('')
print('--enter desired color in r,g,b--')
c3[0]=int(input('red: '))
c3[1]=int(input('green: '))
c3[2]=int(input('blue: '))
print('')
#--------------------------------
alpha = 0
r = -1
g = -1
b = -1
while alpha < 1 and r < 0 or g < 0 or b < 0 or r > 255 or g > 255 or b > 255:
alpha+= 1/256
inv = 1 / alpha
r = c3[0] * inv + c2[0] * (1 - inv)
g = c3[1] * inv + c2[1] * (1 - inv)
b = c3[2] * inv + c2[2] * (1 - inv)
print('---result---')
print('red:', round(r))
print('green:', round(g))
print('blue:', round(b))
print('alpha:', round(alpha*256))
print('------------')
print('')
cont=input('again? y/n')
print('')
|
4,579 | d7db617131bf6e72c7aa808030f7286ddb609cc2 | from abc import ABC
# This is base class
class Vehicle(ABC):
pass
# GroundVehicle inherits from Vehicle
class GroundVehicle(Vehicle):
pass
# Car inherits from GroundVehicle
class Car(GroundVehicle):
pass
# Motorcycle inherits from GroundVehicle
class Motorcycle(GroundVehicle):
pass
# FlightVehicle inherits from Vehicle
class FlightVehicle(Vehicle):
pass
# Starship inherits from FlightVehicle
class Starship(FlightVehicle):
pass
# Airplane inherits from FlightVehicle
class Airplane(FlightVehicle):
pass
|
4,580 | 9e28fa1f221df13f9cc8e6b71586da961ebdc0e0 | # 上传文件
import os
from selenium import webdriver
# 获取当前路径的 “files” 文件夹
file_path = os.path.abspath("./files//")
# 浏览器打开文件夹的 upfile.html 文件
driver = webdriver.Firefox()
upload_page = "file:///" + file_path + "/upfile.html"
driver.get(upload_page)
# 定位上传按钮,添加本地文件
driver.find_element_by_id("inputfile").send_keys(file_path + "\\test.txt")
|
4,581 | e642054dad8a2de5b01f2994348e10e9c7574ee0 | from django.apps import AppConfig
class BooksaleConfig(AppConfig):
name = 'booksale'
|
4,582 | a430b4629ee06dbfb267f839599383624e37451e | # -*- coding: utf-8 -*-
"""Test custom node separator."""
import six
from helper import assert_raises, eq_
import anytree as at
class MyNode(at.Node):
separator = "|"
def test_render():
"""Render string cast."""
root = MyNode("root")
s0 = MyNode("sub0", parent=root)
MyNode("sub0B", parent=s0)
MyNode("sub0A", parent=s0)
MyNode("sub1", parent=root)
r = at.RenderTree(root)
expected = "\n".join(
[
"MyNode('|root')",
"├── MyNode('|root|sub0')",
"│ ├── MyNode('|root|sub0|sub0B')",
"│ └── MyNode('|root|sub0|sub0A')",
"└── MyNode('|root|sub1')",
]
)
if six.PY2:
eq_(str(r).decode("utf-8"), expected)
else:
eq_(str(r), expected)
def test_get():
"""Get."""
top = MyNode("top", parent=None)
sub0 = MyNode("sub0", parent=top)
sub0sub0 = MyNode("sub0sub0", parent=sub0)
sub0sub1 = MyNode("sub0sub1", parent=sub0)
sub1 = MyNode("sub1", parent=top)
r = at.Resolver("name")
eq_(r.get(top, "sub0|sub0sub0"), sub0sub0)
eq_(r.get(sub1, ".."), top)
eq_(r.get(sub1, "..|sub0|sub0sub1"), sub0sub1)
eq_(r.get(sub1, "."), sub1)
eq_(r.get(sub1, ""), sub1)
with assert_raises(at.ChildResolverError, "MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'."):
r.get(top, "sub2")
eq_(r.get(sub0sub0, "|top"), top)
eq_(r.get(sub0sub0, "|top|sub0"), sub0)
with assert_raises(at.ResolverError, "root node missing. root is '|top'."):
r.get(sub0sub0, "|")
with assert_raises(at.ResolverError, "unknown root node '|bar'. root is '|top'."):
r.get(sub0sub0, "|bar")
def test_glob():
"""Wildcard."""
top = MyNode("top", parent=None)
sub0 = MyNode("sub0", parent=top)
sub0sub0 = MyNode("sub0", parent=sub0)
sub0sub1 = MyNode("sub1", parent=sub0)
sub0sub1sub0 = MyNode("sub0", parent=sub0sub1)
MyNode("sub1", parent=sub0sub1)
sub1 = MyNode("sub1", parent=top)
sub1sub0 = MyNode("sub0", parent=sub1)
r = at.Resolver()
eq_(r.glob(top, "*|*|sub0"), [sub0sub1sub0])
eq_(r.glob(top, "sub0|sub?"), [sub0sub0, sub0sub1])
eq_(r.glob(sub1, "..|.|*"), [sub0, sub1])
eq_(r.glob(top, "*|*"), [sub0sub0, sub0sub1, sub1sub0])
eq_(r.glob(top, "*|sub0"), [sub0sub0, sub1sub0])
with assert_raises(at.ChildResolverError, "MyNode('|top|sub1') has no child sub1. Children are: 'sub0'."):
r.glob(top, "sub1|sub1")
|
4,583 | bd0cc8cf059440f8fd7ad135894d82c9b18ebc80 | aax=int(input("enter aa-x"))
aay=int(input("enter aa-y"))
bbx=int(input("enter bb-x"))
bby=int(input("enter bb-y"))
ccx=int(input("enter cc-x"))
ccy=int(input("enter cc-y"))
ddx=int(input("enter dd-x"))
ddy=int(input("enter dd-y"))
if aax==aay and aay==bbx and bby==ccx and ccx==ccy and ccy==ddx and ddy==aax:
print("yes")
else:
print("no")
|
4,584 | 4ba0affd3cbdc2652274213a8d410b541fb3edb4 | ## n.b. uses python 3 wordseg virtualenv (wordseg needs Py3)
# e.g. $ source ~/venvs/Py3/wordseg/bin/activate
## wordseg: see https://wordseg.readthedocs.io
from __future__ import division
import io, collections, os, glob, csv, re
from scipy.stats import entropy
from copy import deepcopy
# get username
import getpass
uname = getpass.getuser()
## get corpus stats
def process_corpus(lcount, text, language, corpus, child, utts, owus, pdict, bdict):
owu = owus/utts
lineout1 = [language, corpus, child, utts, owu]
# corpus types, tokens
ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)
tokencount = sum(pdict.values())
lineout1.append(tokencount)
typecount = len(ordered)
lineout1.append(typecount)
ttr = typecount / tokencount
lineout1.append(ttr)
# diphone distributions
boundarydist = []
diphonedist = []
k=0
diphfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_diphone-system.txt'
with io.open(diphfile, 'w', encoding='utf8') as writefile:
writefile.write('k\tf\ttype\trel.freq\tboundary.prob\n') # only columns 1-3 are used by lnre.R
for diph, denom in ordered:
k+=1
if bdict[diph]:
num = bdict[diph]
else:
num = 0
boundprob = num / denom # boundary prob
boundarydist.append(boundprob)
relfreq = denom / tokencount # diphone prob
diphonedist.append(relfreq)
writefile.write('%i\t%i\t%s\t%.6f\t%.6f\n' % (k, denom, diph, relfreq, boundprob))
writefile.close()
# entropy calcs
boundaryH = entropy(boundarydist, qk=None, base=2)
lineout1.append(boundaryH)
diphoneH = entropy(diphonedist, qk=None, base=2)
lineout1.append(diphoneH)
# run Zipf LNRE fit (clear old file first)
tmplnre = '/Users/' + uname + '/tmp/lnre.txt'
cmd1 = 'rm '+ tmplnre
os.system(cmd1)
cmd2 = 'Rscript lnre.R '+ diphfile
os.system(cmd2)
if os.path.exists(tmplnre):
with open(tmplnre, 'r') as lnre:
for line in lnre:
lineout1.append(line.rstrip())
lnre.close()
else: # else 3 zeros
lineout1.append(0)
lineout1.append(0)
lineout1.append(0)
# get C_WALS stat (not in use)
#langcode = langcodes[lang]
return lineout1
## run wordseg
def word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount, wcount):
# start point is output of process_corpus()
lineout2 = deepcopy(lineout1)
meanlength = round(pcount/wcount, 6) # phones per word
pboundary = round(wcount/pcount, 6) # words per phone
lineout2.append(wcount)
lineout2.append(pcount)
lineout2.append(meanlength)
lineout2.append(pboundary)
# prepare filenames
tmpfile = '/Users/' + uname + '/tmp/tmp.txt'
goldfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_gold-for-wordseg.txt'
prepfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_prepared-for-wordseg.txt'
segfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_segmented-by_' + algo + '.txt'
evalfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_segmented-by_' + algo + '_eval.txt'
# write text so far to temporary file
tmp = open(tmpfile, 'w')
tmp.write(text)
tmp.close()
# prepare gold and input files for wordseg
os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' % (tmpfile, goldfile, prepfile)) # ignore punctuation
lineout2.append(algo)
# run wordseg command
if algo=='dibs': # DIBS-phrasal uses phrases (utterances) as chunks
os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile, algo, tmpfile, segfile))
elif algo=='utt_baseline': # utterance baseline
os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))
elif algo=='rand_baseline': # random baseline
os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile))
elif algo=='unit_baseline': # basic unit baseline
os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))
elif algo=='oracle': # oracle baseline: P(word|phone)
os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile, pboundary, segfile))
elif algo=='tp_ftp': # transitional prob: forwards
os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile, segfile))
elif algo=='tp_btp': # transitional prob: forwards
os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile, segfile))
elif algo=='tp_mi': # transitional prob: mutual information
os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile, segfile))
else:
os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))
# evaluate
os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))
with open(evalfile, 'r') as eval:
for line in eval:
lineout2.append(re.sub('^[^\d]*', '', line.rstrip())) # strip from the start until first number encountered
eval.close()
print(lineout2)
return lineout2
## open results file
statsfile = '/Users/' + uname + '/Corpora/CHILDES/segmentation_experiment_stats.csv'
statsopen = open(statsfile,'wt')
statscsv = csv.writer(statsopen)
statscsv.writerow(('language', 'corpus', 'child', 'n.utterances', 'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy', 'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones', 'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR', 'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P', 'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P', 'boundary.noedge.R', 'boundary.noedge.F'))
## input directory (the phonemized files)
thousand = re.compile('000$')
algos = ['utt_baseline', 'rand_baseline', 'unit_baseline', 'oracle', 'tp_ftp', 'tp_btp', 'tp_mi', 'dibs', 'puddle']
directory = '/Users/' + uname + '/Corpora/CHILDES/phonemized/'
for filein in glob.glob(directory+'*_phonemes.txt', recursive=True):
print(filein)
# parse filename
(language, corpus, child) = filein.split('/')[-1].split('_')[0:3]
# read corpus
phondict = collections.Counter()
boundaries = collections.Counter()
phonecount = 0
wordcount = 0
with io.open(filein, 'r', encoding='utf8') as myfile:
linecount = 0
owucount = 0
inputsofar = ''
for line in myfile:
inputsofar += line
linecount += 1
ewords = line.count(';eword')
wordcount += ewords
if ewords==1:
owucount += 1
#print('utterance: %s' % (line.rstrip()))
phones = line.split() # split on whitespace
nphones = len(phones) - ewords
phonecount += nphones
for (i, phone) in enumerate(phones):
if i==0 or phones[i]==';eword' or phones[i-1]==';eword':
pass # ignore phone 1 in utterance or word and word delimiters
else:
diphone = phones[i-1] + phones[i]
phondict[diphone] += 1
if i==1 or phones[i+1]==';eword' or phones[i-2]==';eword':
#print('boundary diphone: %s' % (diphone))
boundaries[diphone] += 1
#print('count: %i' % (boundaries[diphone]))
# reached iteration point? (round 1000)
if thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language, corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1, language, corpus, child, phonecount, wordcount)
statscsv.writerow((csvline2))
# run again at end of file, if not round 1000 line count
if not thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language, corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1, language, corpus, child, phonecount, wordcount)
statscsv.writerow((csvline2))
myfile.close()
print('FINISHED')
print('see '+ statsfile)
|
4,585 | 6434e427c9015544985a38104cffeaa10866b9ea | import os
import string
filenames = os.listdir('data/SENTIMENT_test')
filenames.sort()
outfile = open('sentiment_test.txt', 'w')
remove_punctuation_map = dict((ord(char), None) for char in string.punctuation)
for filename in filenames:
infile = open('data/SENTIMENT_test/' + filename, errors='ignore')
infiletext = infile.read()
infiletext = infiletext.replace('\n', ' ')
infiletext = infiletext.translate(remove_punctuation_map)
outfile.write(infiletext + '\n')
infile.close()
outfile.close()
|
4,586 | d80cb5ea57faa0f9e3a8dd5d40c9852c2f7f83e4 | # coding: utf-8
import logging
from flask import request
from flask.ext.admin import expose
from cores.actions import action
from cores.adminweb import BaseHandler
from dao.bannerdao import banner
from extends import csrf
from libs.flask_login import login_required
from utils.function_data_flow import flow_tools
from utils.helpers import utf8
from utils.numbering import numbers
__author__ = 'bin wen'
_log = logging.getLogger("ADMIN")
_handler_log = logging.getLogger("HANDLER")
class BannerHandler(BaseHandler):
"""
轮播焦点图列表
"""
column_list = ("banner_code", "name", "banner_type", "target", "image", 'validity',
"updated_time", "remark")
column_labels = {
"banner_code": u"编号",
"name": u"名称",
"banner_type": u"类型",
"target": u"跳转目标",
"image": u"图片",
"validity": u"状态",
"updated_time": u"变更时间",
"remark": u"备注"
}
column_widget_args = {
"image": {'class': "hidden-480"},
"remark": {'class': "hidden-480"}
}
tabs_list = (
{"query_type": -1, "name": u"全部"},
{"query_type": 1, "name": u"有效的"},
{"query_type": 0, "name": u"已作废"}
)
@expose('/')
@expose('/banner/list.html')
@login_required
def list_view(self):
page = request.args.get('page', 0, type=int)
name = request.args.get('name', "")
query_type = request.args.get('query_type', -1, type=int)
query_kwargs = dict(name=name, query_type=query_type)
def pager_url(p):
if p is None:
p = 0
return self._get_url('.list_view', p, **query_kwargs)
count = banner.get_total_count(**query_kwargs)
results = []
num_pages = 0
if count > 0:
num_pages = self.gen_total_pages(count)
if num_pages - 1 < page:
page -= 1
offset_value = page * self.page_size
results = banner.query_list(
query_type=query_type,
name=name,
limit=self.page_size,
offset=offset_value
)
actions, actions_confirmation = self.get_actions_list()
return_url = self.gen_return_url(".list_view", page=page, **query_kwargs)
return self.render(
template="banner/list.html",
actions=actions,
actions_confirmation=actions_confirmation,
count=count,
page=page,
num_pages=num_pages,
pager_url=pager_url,
data=results,
query_kwargs=query_kwargs,
return_url=return_url,
column_list=self.column_list,
column_labels=self.column_labels,
column_widget_args=self.column_widget_args,
tabs_list=self.tabs_list,
banner_types=flow_tools.gen_banner_type()
)
@expose('/banner/action.html', methods=('POST',))
@login_required
def action_view(self):
return_url = request.form.get("return_url", "")
return self.handle_action(return_view=return_url)
@action('disable', u"注销(下架)所选", u"你确定要注销(下架)所选的记录?")
def action_disable(self, ids):
try:
result = banner.set_validity(ids, validity=0)
_handler_log.info(u"[BannerListHandler] batch disable, id:{}, operator: {}".format(
utf8(ids), self.current_operator)
)
return result
except Exception as e:
_log.exception(u"[BannerListHandler] batch disable error")
@action('activate', u"激活(上架)选择", u"你确定要激活所选的记录?")
def action_activate(self, ids):
try:
result = banner.set_validity(ids, validity=1)
_handler_log.info(u"[BannerListHandler] batch disable, id:{}, operator: {}".format(
utf8(ids), self.current_operator)
)
return result
except Exception as e:
_log.exception(u"[BannerListHandler] batch disable error")
@action('delete', u"删除所选", u"你确定要删除所选的记录?")
def action_delete(self, ids):
try:
result = banner.delete(ids)
_handler_log.info(u"[BannerListHandler] batch delete, id:{}, operator: {}".format(
utf8(ids), self.current_operator)
)
return result
except Exception as e:
_log.exception(u"[BannerListHandler] batch delete error")
@expose('/banner/create.html', methods=('GET', 'POST'))
@login_required
def create_view(self):
if request.method == "GET":
select_content_list = flow_tools.gen_bind_products()
result = {
"select_content_list": select_content_list,
"banner_types": flow_tools.gen_banner_type()
}
return self.render(template="banner/create.html", data=result)
else:
req_data = self.gen_arguments
name = req_data.get("name")
banner_type = int(req_data.get("banner_type", 0))
url_target = req_data.get("url_target", "") # 外部url
select_target = req_data.get("select_target", "") # 下拉内容
remark = req_data.get("remark", "")
picture_url_list = req_data.getlist("picture_url") # 图片url
if not picture_url_list:
return self.make_write(result_code=4002)
if banner_type == 2:
target = url_target
else:
target = select_target
result = banner.save(
banner_code=numbers.gen_banner_code(),
name=name,
banner_type=banner_type,
target=target,
image_url=picture_url_list[0],
remark=remark
)
return self.make_write(result_code=0, result_data=self.reverse_url(".list_view"))
@expose('/banner/edit.html', methods=('GET', 'POST'))
@login_required
def edit_view(self):
if request.method == "GET":
_id = request.args.get("id", "")
return_url = request.args.get("return_url", "")
result = banner.get_detail(_id)
banner_type = result.banner_type
select_content_list = []
if banner_type == 0:
select_content_list = flow_tools.gen_bind_products()
elif banner_type == 1:
select_content_list = flow_tools.gen_bind_tweets()
elif banner_type == 3:
select_content_list = flow_tools.gen_bind_groups()
result["banner_types"] = flow_tools.gen_banner_type()
result["select_content_list"] = select_content_list
return self.render(
template="banner/edit.html",
data=result,
return_url=return_url
)
else:
req_data = self.gen_arguments
return_url = req_data.get("return_url", "")
_id = req_data.get("id")
name = req_data.get("name")
banner_type = int(req_data.get("banner_type", 0))
url_target = req_data.get("url_target", "") # 外部url
select_target = req_data.get("select_target", "") # 下拉内容
remark = req_data.get("remark", "")
picture_url_list = req_data.getlist("picture_url") # 图片url
if not picture_url_list:
return self.make_write(result_code=4002)
if banner_type == 2:
target = url_target
else:
target = select_target
result = banner.update(
_id=_id,
name=name,
banner_type=banner_type,
target=target,
image_url=picture_url_list[0],
remark=remark
)
return self.make_write(result_code=0, result_data=self.decode_return_url(return_url))
@expose('/banner/delete.html', methods=('POST',))
@login_required
def delete_view(self):
req_data = self.gen_arguments
return_url = req_data.get("return_url", "")
_id = req_data.get("id")
result = banner.delete([_id])
_handler_log.exception(u"[AdminDeleteHandler] admin_id:{}, operator: {}".format(
utf8(_id), self.current_operator))
return self.make_write(result_code=0, result_data=self.decode_return_url(return_url))
@expose('/banner/detail.html', methods=('GET',))
@login_required
def detail_view(self):
pass
@csrf.exempt
@expose('/banner/ajax/check.html', methods=('POST',))
def check_view(self):
pass
|
4,587 | 23bd2ed783ab117bee321d97aa1c70698bdeb387 | ../../3.1.1/_downloads/b19d86251aea30061514e17fba258dab/nan_test.py |
4,588 | 98c2fdf0dfc9a660a3eb9a359aa9ca14d83c60ce | import numpy as np
import sympy as sp
# (index: int, cos: bool)
# 0 1 1 2 2 3 3 4 4 5 5 ...
# {0, cos}, {1, cos}, {1, sen}, {2, cos}, {2, sen}, ...
alternatingRange = lambda m : [{'index': j, 'cos': True if k == 0 else False} for j in range(m + 1) for k in range(2 if j != 0 else 1)]
# data: "dict"
# data = {'x': [x-points], 'y': [y-points]}
def trigLSQ(data):
noPoints = len(data['x']) # N
order = int(noPoints/2) if int(noPoints/2) < noPoints/2 else int(noPoints/2)-1 # m
c = lambda a : np.array([np.cos(a * float(data['x'][i])) for i in range(noPoints)])
s = lambda a : np.array([np.sin(a * float(data['x'][i])) for i in range(noPoints)])
y = np.array([data['y'][i] for i in range(noPoints)])
# matrix * sol = res
matrix = np.array(
[[np.dot(c(i['index']) if i['cos'] else s(i['index']), c(j['index']) if j['cos'] else s(j['index'])) for i in alternatingRange(order)] for j in alternatingRange(order)]
)
res = [[np.dot(y, c(i['index']) if i['cos'] else s(i['index']))] for i in alternatingRange(order)]
sol = np.linalg.solve(matrix, res)
# F is the function approximation
F = 0
for j, i in enumerate(alternatingRange(order)): F += sol[j][0] * sp.sympify(('cos(' if i['cos'] else 'sin(') + str(i['index']) + ' * 2*pi/12 * x)')
return F
# x = 2kpi/N --> k = xN/2pi |
4,589 | 5850be6aef6e4adb36a122cb8e5ffe044b1c9009 | __author__ = 'cromox'
from time import sleep
import inspect
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from Forex_CFD.features.main_page import FxMainPage
class FxBuySell(FxMainPage):
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
def buy(self, amount):
self.log.info("--> " + inspect.stack()[0][3] + " started")
if self.driver.find_element_by_xpath("//div[@class='visible-input']//input[contains(@id, 'uniqName')]"):
# element = WebDriverWait(driver, 5).until(EC.visibility_of_element_located(
# (By.XPATH, "//div[@class='visible-input']//input[contains(@id, 'uniqName')]")))
element = self.driver.find_elements_by_xpath("//div[@class='visible-input']//input[contains(@id, 'uniqName')]")[0]
element.clear()
for character in str(amount):
element.send_keys(character)
sleep(0.5)
# Confirm Button
if self.driver.find_element_by_xpath("//div[contains(@class,'confirm-button')]"):
self.driver.find_elements_by_xpath("//div[contains(@class,'confirm-button')]")[0].click()
elif self.driver.find_element_by_xpath("//*[contains(text(),'Market closed')]"):
print('Market closed')
self.driver.find_elements_by_xpath("//*[@class='header']//*[@class='close-icon']")[0].click()
def sell(self, amount):
self.log.info("--> " + inspect.stack()[0][3] + " started")
# Switching to sell
self.driver.find_elements_by_xpath("//div[@data-dojo-attach-event='click: setDirectionSell']")[0].click()
# From there on it's exactly like the buy
self.buy(amount)
def script_click_xpath(self, xpath):
self.log.info("--> " + inspect.stack()[0][3] + " started")
self.driver.execute_script(f"document.evaluate(\"{xpath}\", document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue.click()")
def open_stock_dialog(self, stock):
self.log.info("--> " + inspect.stack()[0][3] + " started")
WebDriverWait(self.driver, 5).until(EC.visibility_of_any_elements_located((By.XPATH, "//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]")))
elem = self.driver.find_elements_by_xpath("//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]")
# try both elements
try:
elem[0].click()
except:
elem[1].click()
# Search the stock
elem = self.driver.find_element_by_xpath("//input[@placeholder=\"Instrument search\"]")
# Setting the max length to 100 so the API'll be able to enter long stocks names
self.driver.execute_script("arguments[0].setAttribute('maxlength',arguments[1])", elem, 100)
elem.send_keys(stock)
# Open its dialog with JS. Selenium couldn't open the dialog itself.
self.script_click_xpath(f"//*[@id='list-results-instruments']//span[contains(@class, 'instrument-name') and .='{stock}']")
sleep(1)
def buy_stock(self, stock, amount):
self.log.info("--> " + inspect.stack()[0][3] + " started")
self.open_stock_dialog(stock)
self.buy(amount)
sleep(0.5)
def sell_stock(self, stock, amount):
self.log.info("--> " + inspect.stack()[0][3] + " started")
# It's just opening a stock and selling it
self.open_stock_dialog(stock)
self.sell(amount)
sleep(0.5) |
4,590 | 2f6d51d5c14ddc1f6cd60ab9f3b5d4a879d14af0 | from django import forms
BET_CHOICES = (
('1', 'Will rise'),
('x', 'Will stay'),
('2', 'Will fall'),
)
class NormalBetForm(forms.Form):
song = forms.CharField()
data = forms.ChoiceField(BET_CHOICES)
|
4,591 | 08408cf096bbe23f9a832cc0cf2e017abdbd359f | import sys, os
import cv2
# set the video reader
video_path = 0 # camera number index
# video_path = "/home/pacific/Documents/Work/Projects/Workflows/server/PycharmProjects/Pacific_AvatarGame_Host/humanpose_2d/LiveCamera/test.mp4" # real video file
if type(video_path).__name__ == "str":
videoReader = cv2.VideoCapture(video_path)
print("Load live video from file...")
elif type(video_path).__name__ == "int":
videoReader = cv2.VideoCapture(video_path)
print("Get live video from camera...")
if videoReader.isOpened():
print("Camera staus ready...")
else:
print("Camera status fault...")
exit()
video_fps = videoReader.get(cv2.CAP_PROP_FPS)
print("Live Video FPS: ", video_fps)
video_width = videoReader.get(cv2.CAP_PROP_FRAME_WIDTH)
video_height = videoReader.get(cv2.CAP_PROP_FRAME_HEIGHT)
video_size = (int(video_width), int(video_height))
print("Live Video Size: ", video_size)
# set the video writer
videoWriter = cv2.VideoWriter('./save.avi', cv2.VideoWriter_fourcc('M', 'P', '4', '2'), int(video_fps), video_size)
# read and write the video frame
while videoReader.isOpened():
success, frame = videoReader.read()
if success:
# show the video frame
print("Live Video Frame Shape: {}".format(frame.shape))
cv2.putText(frame, "Live Camera", (470,30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2)
cv2.namedWindow("Live Video", 0)
cv2.imshow("Live Video", frame)
# save the video frame
videoWriter.write(frame)
cv2.waitKey(20) # wait 20 ms for next frame of the live video
# check whether manual exit command entered
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
continue
videoReader.release()
videoWriter.release()
cv2.destroyAllWindows()
print("Live Video Done.")
|
4,592 | a484272ace089008e27f4e00d2e641118432665e | from PIL import Image
from random import randrange
class PileMosaic:
def __init__(self):
self.width, self.height = 2380, 2800
self.filename = "pile_mosaic.png"
self.crema = (240, 233, 227)
self.choco = (89, 62, 53)
self.luna = (43, 97, 123)
self.latte = (195, 175, 148)
self.piscina = (170, 200, 211)
self.lavanda = (189, 192, 209)
self.viola = (133, 108, 140)
self.morado = (121, 69, 92)
self.rosa = (222, 179, 172)
self.flamingo = (238, 157, 140)
self.color_tuple = (self.crema, self.choco, self.luna, self.latte, self.piscina)
# self.color_tuple = (self.lavanda, self.viola, self.rosa, self.morado, self.flamingo)
self.tile_width = 300
self.tile_height = 100
def create_new_image(self):
self.image = Image.new("RGB", (self.width, self.height), "white")
self.data = [(255, 255, 255)]*(self.width*self.height)
def write_image(self):
self.image.save(self.filename, "PNG")
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
def place_pile(self, color, x=0, y=0):
for i in range(self.tile_width):
for j in range(self.tile_height):
self.image.im.putpixel((x + i, y + j), color)
def fill_random(self):
for x in range(self.width / self.tile_width):
for y in range(self.height / self.tile_height):
current_color = randrange(5)
self.place_pile(self.color_tuple[current_color], x=x*self.tile_width, y=y*self.tile_height)
def create_random_pattern(self):
initial_pattern = []
for x in range(self.width / self.tile_width):
initial_pattern.append([])
for y in range(self.height / self.tile_height):
temp_list = list(self.color_tuple)
if x - 1 >= 0:
try:
temp_list.remove(initial_pattern[x - 1][y])
except ValueError:
pass
if y - 1 >= 0:
try:
temp_list.remove(initial_pattern[x][y - 1])
except ValueError:
pass
initial_pattern[x].append(temp_list[randrange(len(temp_list))])
return initial_pattern
def fill(self, pattern):
for x in range(self.width / (self.tile_width + 4)):
for y in range(self.height / (self.tile_height + 4)):
self.place_pile(pattern[x][y], x=x*(self.tile_width+4), y=y*(self.tile_height+4))
pile = PileMosaic()
pile.create_new_image()
pile.fill(pile.create_random_pattern())
pile.write_image()
|
4,593 | d0adbcd60727c2c68e06dc5e796f2676f927c45a |
# coding: utf-8
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
import random
csvfilename = 'data/0901/exp1/xiaoxiong.csv'
df = pd.read_csv(csvfilename, header=None,
names=['abstime','posx','posy','posz','roty','rotx','anim'])
# skiprows=1, skipfooter=1)
df.head()
Xr=df['posx'].values
Yr=df['posy'].values
Zr=df['posz'].values
m=len(Xr)
print(m)
deltaTime = 0.0
totalTime = 0.0
P = 1.0*np.eye(9)
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
rp = 0.01 # Noise of Position Measurement
R = np.matrix([[rp, 0.0, 0.0],
[0.0, rp, 0.0],
[0.0, 0.0, rp]])
sa = 0.05
u = 0.0
B = np.matrix([[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0]])
I = np.eye(9)
sp= 0.01 # Sigma for position noise
Xm = Xr + sp * (np.random.randn(m))
Ym = Yr + sp * (np.random.randn(m))
Zm = Zr + sp * (np.random.randn(m))
measurements = np.vstack((Xm,Ym,Zm))
x = np.matrix([measurements[0][0], measurements[1][0],measurements[2][0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T
# Preallocation for Plotting
xt = []
yt = []
zt = []
mean = [i*0.01 for i in range(1,21)]
print(mean)
for i in mean:
random.seed(1)
randomFactor = [random.random() * 0.01 + (i - 0.005) for _ in range(m)]
for idx,step in enumerate(range(m)):
frameBegin = time.time()
time.sleep(randomFactor[idx])
computeBegin = time.time()
# 更新随时间变化的矩阵
dt = i if idx == 0 else deltaTime # Time Step between Filter Steps
A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
G = np.matrix([[1 / 2.0 * dt ** 2],
[1 / 2.0 * dt ** 2],
[1 / 2.0 * dt ** 2],
[dt],
[dt],
[dt],
[1.0],
[1.0],
[1.0]])
Q = G * G.T * sa ** 2
# Time Update (Prediction)
# ========================
# Project the state ahead
x = A*x + B*u
# Project the error covariance ahead
P = A*P*A.T + Q
# Measurement Update (Correction)
# ===============================
# Compute the Kalman Gain
S = H*P*H.T + R
K = (P*H.T) * np.linalg.pinv(S)
# Update the estimate via z
Z = measurements[:,step].reshape(H.shape[0],1)
y = Z - (H*x) # Innovation or Residual
x = x + (K*y)
# Update the error covariance
P = (I - (K*H))*P
# Save states for Plotting
xt.append(float(x[0]))
yt.append(float(x[1]))
zt.append(float(x[2]))
frameEnd = time.time()
deltaTime = frameEnd - frameBegin
totalTime += (frameEnd - computeBegin)
# distance calculate
dist = np.sqrt(((Xr-xt)**2 + (Yr-yt)**2 + (Zr-zt)**2).mean())
print('%.3f,%.8f,%.3f' % (i, totalTime, dist))
# 还原初始设置
totalTime = 0.0
P = 1.0 * np.eye(9)
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
rp = 0.01 # Noise of Position Measurement
R = np.matrix([[rp, 0.0, 0.0],
[0.0, rp, 0.0],
[0.0, 0.0, rp]])
sa = 0.05
u = 0.0
B = np.matrix([[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0]])
I = np.eye(9)
sp = 0.01 # Sigma for position noise
Xm = Xr + sp * (np.random.randn(m))
Ym = Yr + sp * (np.random.randn(m))
Zm = Zr + sp * (np.random.randn(m))
measurements = np.vstack((Xm, Ym, Zm))
x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T
# Preallocation for Plotting
xt = []
yt = []
zt = []
|
4,594 | 493dbf85069f2115896a5f5f5d593c8d95b85cff | #
# Wrappers for model evaluation
#
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from modules import Classifier
from typing import Generator, NamedTuple, Optional, Union
from utils import expand_generator
class Evaluator(object):
class Result(NamedTuple):
accuracy: float
log_loss: float
def evaluate(self, *args, **kwargs):
return NotImplemented
class ModelEvaluator(Evaluator):
def __init__(self, dataset: Dataset, batch_size: int, num_workers: int, mixed_precision: bool = True):
self.dataset = dataset
self.mixed_precision = mixed_precision
self.loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=num_workers, drop_last=False)
@property
def num_batches(self):
return len(self.loader)
def evaluate(self, model: Classifier, device: Optional[Union[torch.device, str]] = None) -> Evaluator.Result:
return expand_generator(self.evaluate_iter(model, device), return_only=True)
def evaluate_iter(
self,
model: Classifier,
device: Optional[Union[torch.device, str]] = None) -> Generator[dict, None, Evaluator.Result]:
with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled=self.mixed_precision):
mean_accuracy = 0.
mean_log_loss = 0.
for i, (x, y) in enumerate(self.loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
correct = torch.sum(logits.argmax(-1) == y).item()
log_loss = F.cross_entropy(logits, y, reduction='sum').item()
mean_accuracy += correct / len(self.dataset)
mean_log_loss += log_loss / len(self.dataset)
yield dict(batch=i)
return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)
|
4,595 | b8fa36ed3587511e0c64f0ffc87ea6e7857725d7 | from django.utils.html import strip_tags
from rest_framework import serializers
from home.models import *
class SliderSerializer(serializers.ModelSerializer):
class Meta:
model = Slider
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = Images
fields = "__all__"
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = "__all__"
def to_representation(self, instance):
data = super().to_representation(instance)
data['detail'] = strip_tags(instance.detail)
return data
class PropertySerializer(serializers.ModelSerializer):
class Meta:
model = Property
fields = "__all__"
class RatingSerializer(serializers.ModelSerializer):
class Meta:
model = Rating
fields = "__all__"
class PriceSerializer(serializers.ModelSerializer):
class Meta:
model = Price
fields = "__all__"
|
4,596 | fb9ae5b3cdeac0c254669e214779ad43a02bff6d | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.contrib.factorization import WALSMatrixFactorization
tf.logging.set_verbosity(tf.logging.INFO)
import os
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from tensorflow.contrib.factorization import WALSMatrixFactorization
import os
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from tensorflow.contrib.factorization import WALSMatrixFactorization
def read_dataset(mode, args):
def decode_example(protos, vocab_size):
features = {
"key": tf.FixedLenFeature(shape = [1], dtype = tf.int64),
"indices": tf.VarLenFeature(dtype = tf.int64),
"values": tf.VarLenFeature(dtype = tf.float32)}
parsed_features = tf.parse_single_example(serialized = protos, features = features)
values = tf.sparse_merge(sp_ids = parsed_features["indices"], sp_values = parsed_features["values"], vocab_size = vocab_size)
# Save key to remap after batching
# This is a temporary workaround to assign correct row numbers in each batch.
# You can ignore details of this part and remap_keys().
key = parsed_features["key"]
decoded_sparse_tensor = tf.SparseTensor(indices = tf.concat(values = [values.indices, [key]], axis = 0),
values = tf.concat(values = [values.values, [0.0]], axis = 0),
dense_shape = values.dense_shape)
return decoded_sparse_tensor
def remap_keys(sparse_tensor):
# Current indices of our SparseTensor that we need to fix
bad_indices = sparse_tensor.indices # shape = (current_batch_size * (number_of_items/users[i] + 1), 2)
# Current values of our SparseTensor that we need to fix
bad_values = sparse_tensor.values # shape = (current_batch_size * (number_of_items/users[i] + 1),)
# Since batch is ordered, the last value for a batch index is the user
# Find where the batch index chages to extract the user rows
# 1 where user, else 0
user_mask = tf.concat(values = [bad_indices[1:,0] - bad_indices[:-1,0], tf.constant(value = [1], dtype = tf.int64)], axis = 0) # shape = (current_batch_size * (number_of_items/users[i] + 1), 2)
# Mask out the user rows from the values
good_values = tf.boolean_mask(tensor = bad_values, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],)
item_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],)
user_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 1))[:, 1] # shape = (current_batch_size,)
good_user_indices = tf.gather(params = user_indices, indices = item_indices[:,0]) # shape = (current_batch_size * number_of_items/users[i],)
# User and item indices are rank 1, need to make rank 1 to concat
good_user_indices_expanded = tf.expand_dims(input = good_user_indices, axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1)
good_item_indices_expanded = tf.expand_dims(input = item_indices[:, 1], axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1)
good_indices = tf.concat(values = [good_user_indices_expanded, good_item_indices_expanded], axis = 1) # shape = (current_batch_size * number_of_items/users[i], 2)
remapped_sparse_tensor = tf.SparseTensor(indices = good_indices, values = good_values, dense_shape = sparse_tensor.dense_shape)
return remapped_sparse_tensor
def parse_tfrecords(filename, vocab_size):
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
else:
num_epochs = 1 # end-of-input after this
files = tf.gfile.Glob(filename = os.path.join(args["input_path"], filename))
# Create dataset from file list
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(map_func = lambda x: decode_example(x, vocab_size))
dataset = dataset.repeat(count = num_epochs)
dataset = dataset.batch(batch_size = args["batch_size"])
dataset = dataset.map(map_func = lambda x: remap_keys(x))
return dataset.make_one_shot_iterator().get_next()
def _input_fn():
features = {
WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords("items_for_user", args["nitems"]),
WALSMatrixFactorization.INPUT_COLS: parse_tfrecords("users_for_item", args["nusers"]),
WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)
}
return features, None
return _input_fn
def input_cols():
return parse_tfrecords('users_for_item', args['nusers'])
return _input_fn
def find_top_k(user, item_factors, k):
all_items = tf.matmul(a = tf.expand_dims(input = user, axis = 0), b = tf.transpose(a = item_factors))
topk = tf.nn.top_k(input = all_items, k = k)
return tf.cast(x = topk.indices, dtype = tf.int64)
def batch_predict(args):
import numpy as np
with tf.Session() as sess:
estimator = tf.contrib.factorization.WALSMatrixFactorization(
num_rows = args["nusers"],
num_cols = args["nitems"],
embedding_dimension = args["n_embeds"],
model_dir = args["output_dir"])
# This is how you would get the row factors for out-of-vocab user data
# row_factors = list(estimator.get_projections(input_fn=read_dataset(tf.estimator.ModeKeys.EVAL, args)))
# user_factors = tf.convert_to_tensor(np.array(row_factors))
# But for in-vocab data, the row factors are already in the checkpoint
user_factors = tf.convert_to_tensor(value = estimator.get_row_factors()[0]) # (nusers, nembeds)
# In either case, we have to assume catalog doesn"t change, so col_factors are read in
item_factors = tf.convert_to_tensor(value = estimator.get_col_factors()[0])# (nitems, nembeds)
# For each user, find the top K items
topk = tf.squeeze(input = tf.map_fn(fn = lambda user: find_top_k(user, item_factors, args["topk"]), elems = user_factors, dtype = tf.int64))
with file_io.FileIO(os.path.join(args["output_dir"], "batch_pred.txt"), mode = 'w') as f:
for best_items_for_user in topk.eval():
f.write(",".join(str(x) for x in best_items_for_user) + '\n')
def train_and_evaluate(args):
train_steps = int(0.5 + (1.0 * args["num_epochs"] * args["nusers"]) / args["batch_size"])
steps_in_epoch = int(0.5 + args["nusers"] / args["batch_size"])
print("Will train for {} steps, evaluating once every {} steps".format(train_steps, steps_in_epoch))
def experiment_fn(output_dir):
return tf.contrib.learn.Experiment(
tf.contrib.factorization.WALSMatrixFactorization(
num_rows = args["nusers"],
num_cols = args["nitems"],
embedding_dimension = args["n_embeds"],
model_dir = args["output_dir"]),
train_input_fn = read_dataset(tf.estimator.ModeKeys.TRAIN, args),
eval_input_fn = read_dataset(tf.estimator.ModeKeys.EVAL, args),
train_steps = train_steps,
eval_steps = 1,
min_eval_frequency = steps_in_epoch
)
from tensorflow.contrib.learn.python.learn import learn_runner
learn_runner.run(experiment_fn = experiment_fn, output_dir = args["output_dir"])
batch_predict(args) |
4,597 | 0efac7d9d1a9180eafa8c9c4e3a42b4c68e718a2 | ##Linear Queue Data Structure
#Main Queue Class
class LinearQueue():
def __init__(self, length):
#When initiating, user defines the length.
#The head and tail pointers are set at -1 (i.e. not pointing to anything, index beginning at zero)
#The queue is set as a series of None objects in a list the length the user gave
self._length = length
self._head = self._tail = -1
self._queue = [None]*self._length
def enqueue(self, *args):
#Enqueue - Adds value to Queue (First-In)
#Arguments are taken as a tuple of any length and are processed one at a time
for i in args:
if not self.isFull():
#The queue is checked if it is full. If it isn't, the value is added to the end of the queue and the tail is updated.
self._tail += 1
self._queue[self._tail] = i
else:
#Otherwise, if the list is full, the loop breaks and no more values are taken from the arguments.
break
def dequeue(self):
#Dequeue - Take value from Queue (First Out)
if self.isEmpty() or (self._tail == self._head):
#If the queue is empty or the head and tail point at the same position, None is returned
return None
else:
#If the queue is not empty, the value being pointed to by the head pointer is returned and the head pointer shifts up one
#To emulate a real Queue, this value is not removed, however it is ignored
self._head += 1
self._dequeueValue = self._queue[self._head]
return self._dequeueValue
def isFull(self):
return self._tail == (self._length-1) #If the tail pointer is the same as the length (minus one) of the queue then it is full. If not, it isn't full.
def isEmpty(self):
return self._tail == self._head == -1 #If the head and tail pointers are both -1, then the queue is empty. If not, it isn't empty.
#Test with a Queue of Length 5 named 'q'
print("Creating Linear Queue of 5 with No Values")
q = LinearQueue(5)
print("empty",q.isEmpty())
print("Enqueuing 1, 2, 3")
q.enqueue(1, 2, 3)
print("full",q.isFull())
print("empty",q.isEmpty())
print("dequeuing 1, 2, 3")
for i in range(0,3):
print("dequeuing",q.dequeue())
print("empty",q.isEmpty())
print("Enqueuing 4, 5")
q.enqueue(4, 5)
print("full",q.isFull())
print("empty",q.isEmpty())
print("dequeuing all")
for i in range(0,2):
print("dequeuing",q.dequeue())
print("full",q.isFull())
print("empty",q.isEmpty())
print("dequeuing extra value (should return None)")
print("dequeuing",q.dequeue())
|
4,598 | 80d49b24a2233569a340cee918393b1663c3d55d | import inspect
import threading
from monitor.mutex import Mutex, mutex_hooks
from monitor.condition import Condition, condition_hooks
from monitor.shared_variables import SharedList, SharedDict, shared_auto, \
variable_hooks
hooks = {}
for h in [mutex_hooks, condition_hooks, variable_hooks]:
hooks.update(h)
def method_decorator(method):
def wrapped(self, *args, **kwargs):
# print(self, *args, **kwargs)
self._mutex.acquire()
for var in self._variables:
var.apply_pending_changes()
value = method(self, *args, **kwargs)
for var in self._variables:
var.sync()
self._mutex.release()
return value
return wrapped
class MonitorMeta(type):
def __init__(cls, name, bases, attrs):
super(MonitorMeta, cls).__init__(name, bases, attrs)
for name, method in inspect.getmembers(cls, predicate=inspect.isfunction):
if name not in ['wait', 'signal', 'register', 'shared',
'condition', '__init__', '__new__']:
setattr(cls, name, method_decorator(method))
class ConditionWrapper:
def __init__(self, condition, monitor):
self.condition = condition
self.monitor = monitor
def wait(self):
for var in self.monitor._variables:
var.sync()
self.condition.wait()
for var in self.monitor._variables:
var.apply_pending_changes()
def signal(self):
self.condition.signal()
class MonitorBase(object, metaclass=MonitorMeta):
_monitor_counter = 0
_variable_counter = 0
_condition_counter = 0
def __new__(cls, *args, **kwargs):
obj = super(MonitorBase, cls).__new__(cls, *args, **kwargs)
cls._monitor_counter += 1
mutex_name = 'mutex-{}-{}'.format(cls.__name__, cls._monitor_counter)
obj._mutex = Mutex(mutex_name)
obj._variables = []
return obj
def wait(self, condition):
condition.wait()
def signal(self, condition):
condition.signal()
def register(self, variables):
self._variables.extend(variables)
def shared(self, data):
self.__class__._variable_counter += 1
name = 'variable-{}-{}'.format(self.__class__.__name__, self.__class__._variable_counter)
var = shared_auto(name, data)
self._variables.append(var)
return var
def condition(self):
self.__class__._condition_counter += 1
name = 'condition-{}-{}'.format(self.__class__.__name__, self.__class__._condition_counter)
c = ConditionWrapper(Condition(self._mutex, name), self)
return c
class Monitor(MonitorBase):
def __init__(self):
# self.s1 = SharedList('s1', [1,2,3])
# self.register([self.s1])
self.s1 = self.shared([1,2,3])
self.c = self.condition()
def test(self):
self.wait("aaa")
print("test")
self.signal("aaa")
return 1
def abc(self):
print("abc")
return 2
def seq(self):
for i in range(10):
print(rank, i)
def list_append(self, elem):
self.s1.append(elem)
def list_print(self):
print(self.s1)
if __name__ == '__main__':
import time
from monitor.main import event_loop, send_exit
m = Monitor()
event_loop_thread = threading.Thread(target=event_loop, args=(hooks,))
event_loop_thread.start()
# print(m._mutex)
# while True:
# m.seq()
m.list_append(5)
time.sleep(1)
m.list_print()
send_exit()
event_loop_thread.join()
|
4,599 | 9d37d1618fb9d00d63b7ed58290c5ba1b8f106cd | import numpy
#calculate field of simple
def dipole(x, y, z, dx, dy, dz, mx, my, mz):
R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2
return (3.0*(x - dx) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mx/R**1.5,
3.0*(y - dy) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - my/R**1.5,
3.0*(z - dz) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mz/R**1.5)
#calculaion only one component of dipole
def dipoleX(x, y, z, dx, dy, dz, mx, my, mz):
R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2
return 3.0*(x - dx) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mx/R**1.5
def dipoleY(x, y, z, dx, dy, dz, mx, my, mz):
R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2
return 3.0*(y - dy) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - my/R**1.5
def dipoleZ(x, y, z, dx, dy, dz, mx, my, mz):
R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2
return 3.0*(z - dz) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mz/R**1.5
#calculate field caused by crack from array of coordinates and magntization of crack parts
def crack(x,y,z,coordinates,magnetization):
ret = numpy.array([0.0]*3)
for it in range(len(coordinates)):
ret+=numpy.array(dipole(x,y,z,coordinates[it][0],coordinates[it][1],coordinates[it][2],magnetization[it][0],magnetization[it][1],magnetization[it][2]))
return ret
#generator of crack parts coordinates and magntization
def crackGenerator(funcCoord, funcMagn,crackLen = 30, paramBouns = [0,1]):
coordinates = []
magnetization = []
for t in numpy.arange(paramBouns[0],paramBouns[1],(paramBouns[1]-paramBouns[0])/crackLen):
coordinates.append(funcCoord(t))
magnetization.append(funcMagn(t))
return coordinates,magnetization
#generates one random crack in volume vol
def randomCrackExampleLinearModel(vol):
sizeMax = (vol[3]/5,vol[4]/5,vol[5]/5)
coordParams = numpy.random.rand(3,2)
return crackGenerator(lambda t:(coordParams[0][0]*vol[3]+vol[0]+t*coordParams[0][1]*sizeMax[0],
coordParams[1][0]*vol[4]+vol[1]+t*coordParams[1][1]*sizeMax[1],
coordParams[2][0]*vol[5]+vol[2]+t*coordParams[2][1]*sizeMax[2]),
lambda t: (0,0,10+numpy.random.rand()*t)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.