index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
12,000 | bb488d6eeceb0f3ce048fccf4b5a2773b11f51b1 | import numpy as np
a=np.array([1,2,3,4])
print(a)
print("aaaaa")
print("bbbbb")
print("aaaaaaaaa") |
12,001 | c795ecb6183938e93399af9520fa75bc71bef8f5 | import numpy as np
import abc
class Env:
def __init__(self):
pass
def __str__(self):
pass
def reset(self):
pass
|
12,002 | a37c363cce26ca64fea280c8252b02da6f388491 | import pymysql
db = pymysql.connect("192.168.80.134", "sczhan", "Sczhan@1998.", "ceshi", 3306)
cursor = db.cursor()
# 创建删除语句
sql = "delete from ceshibiao where INCOME > '%f'"%(9999)
try:
cursor.execute(sql)
db.commit()
print("执行成功")
except Exception as e:
db.rollback()
print("执行失败:" + e)
db.close() |
12,003 | ed14fdc37c79c16ad66de498eea604b32c65871f | from . import hetero, image, sequence
from .utils import get_embedding, get_dense_block, bi2uni_dir_rnn_hidden |
12,004 | 561ca542be0c6b735e385f2a3c7481d26340c196 | #!user/bin/env python
"""
Created on 2015/7/21
Author:LiQing
QQ:626924971
Tel:*********
Function:get spider conf
"""
import os
import ConfigParser
import xml.dom.minidom
from logger import logger
class get_conf():
@classmethod
def find(cls, method):
if method[0] == "http_header":
header = {}
try:
DOMTree = xml.dom.minidom.parse("../../../configures/http_header/http_header.xml")
Data = DOMTree.documentElement
http_headers = Data.getElementsByTagName("image")
if method[1] == "baidu":
for http_header in http_headers:
if http_header.getAttribute("name") == "baidu":
header["Cache-Control"] = http_header.getElementsByTagName('Cache-Control')[0].childNodes[0].data.encode("utf-8")
header["Host"] = http_header.getElementsByTagName('Host')[0].childNodes[0].data.encode("utf-8")
header["Referer"] = http_header.getElementsByTagName('Referer')[0].childNodes[0].data.encode("utf-8")
header["Upgrade-Insecure-Requests"] = http_header.getElementsByTagName('Upgrade-Insecure-Requests')[0].childNodes[0].data.encode("utf-8")
header["Cookie"] = http_header.getElementsByTagName('Cookie')[0].childNodes[0].data.encode("utf-8")
elif method[1] == "sougou":
for http_header in http_headers:
if http_header.getAttribute("name") == "sougou":
header["Accept"] = http_header.getElementsByTagName('Accept')[0].childNodes[0].data.encode("utf-8")
header["Accept-Language"] = http_header.getElementsByTagName('Accept-Language')[0].childNodes[0].data.encode("utf-8")
header["Host"] = http_header.getElementsByTagName('Host')[0].childNodes[0].data.encode("utf-8")
header["Referer"] = http_header.getElementsByTagName('Referer')[0].childNodes[0].data.encode("utf-8")
header["User-Agent"] = http_header.getElementsByTagName('User-Agent')[0].childNodes[0].data.encode("utf-8")
header["X-Requested-With"] = http_header.getElementsByTagName('X-Requested-With')[0].childNodes[0].data.encode("utf-8")
header["Cookie"] = http_header.getElementsByTagName('Cookie')[0].childNodes[0].data.encode("utf-8")
elif method[1] == "360":
for http_header in http_headers:
if http_header.getAttribute("name") == "360":
header["Accept"] = http_header.getElementsByTagName('Accept')[0].childNodes[0].data.encode("utf-8")
header["Accept-Language"] = http_header.getElementsByTagName('Accept-Language')[0].childNodes[0].data.encode("utf-8")
header["Host"] = http_header.getElementsByTagName('Host')[0].childNodes[0].data.encode("utf-8")
header["Referer"] = http_header.getElementsByTagName('Referer')[0].childNodes[0].data.encode("utf-8")
header["User-Agent"] = http_header.getElementsByTagName('User-Agent')[0].childNodes[0].data.encode("utf-8")
header["X-Requested-With"] = http_header.getElementsByTagName('X-Requested-With')[0].childNodes[0].data.encode("utf-8")
header["Cookie"] = http_header.getElementsByTagName('Cookie')[0].childNodes[0].data.encode("utf-8")
elif method[1] == "common":
for http_header in http_headers:
if http_header.getAttribute("name") == "common":
header["Accept"] = http_header.getElementsByTagName('Accept')[0].childNodes[0].data.encode("utf-8")
header["Accept-Encoding"] = http_header.getElementsByTagName('Accept-Encoding')[0].childNodes[0].data.encode("utf-8")
header["Accept-Language"] = http_header.getElementsByTagName('Accept-Language')[0].childNodes[0].data.encode("utf-8")
header["Connection"] = http_header.getElementsByTagName('Connection')[0].childNodes[0].data.encode("utf-8")
header["User-Agent"] = http_header.getElementsByTagName('User-Agent')[0].childNodes[0].data.encode("utf-8")
return header
except Exception as e:
logger.error(e)
elif method[0] == "image":
image = {}
try:
config = "../../../configures/image_conf/image.conf"
cf = ConfigParser.ConfigParser()
cf.read(config)
image["timeout"] = cf.get("spider", "timeout")
image["time_wait"] = cf.get("spider", "time_wait")
image["try_cnt"] = cf.get("spider", "try_cnt")
image["image_cnt"] = cf.get("spider", "image_cnt")
image["spider_source"] = cf.get("spider", "spider_source")
image["download_image_process_cnt"] = cf.get("spider", "download_image_process_cnt")
image["max_query_process_cnt"] = cf.get("spider", "max_query_process_cnt")
return image
except Exception as e:
logger.error(e)
elif method[0] == "url":
url = {}
try:
config = "../../../configures/image_conf/image.conf"
cf = ConfigParser.ConfigParser()
cf.read(config)
if method[1] == "baidu":
url["url"] = cf.get("baidu", "url")
elif method[1] == "sougou":
url["url"] = cf.get("sougou", "url")
elif method[1] == "360":
url["url"] = cf.get("360", "url")
return url
except Exception as e:
logger.error(e)
elif method[0] == "file":
file_home = {}
try:
config = "../../../configures/image_conf/image.conf"
cf = ConfigParser.ConfigParser()
cf.read(config)
conf_path_abspath = os.path.join(os.getcwd(), config)
file_home["image_path"] = os.path.join(os.path.dirname(conf_path_abspath), cf.get("file_home", "image_path"))
file_home["query_home"] = os.path.join(os.path.dirname(conf_path_abspath), cf.get("file_home", "query_home"))
file_home["finished_home"] = os.path.join(os.path.dirname(conf_path_abspath), cf.get("file_home", "finished_home"))
file_home["error_home"] = os.path.join(os.path.dirname(conf_path_abspath), cf.get("file_home", "error_home"))
file_home["download_record_home"] = os.path.join(os.path.dirname(conf_path_abspath), cf.get("file_home", "download_record_home"))
return file_home
except Exception as e:
logger.error(e)
else:
raise NoMethod(method)
def NoMethod(method):
logger.error("[find]get_conf::find Not have %s method" % str(method))
if __name__ == "__main__":
get_conf.find(("http_header", "common")) |
12,005 | d967c5c52caf5573130646c3caf0d544945cfad4 | from airtravel import *
aircraft = Aircraft("G-EUT", "Airbus A319", num_rows=22, num_seats_per_row=6)
f = Flight("BA758", aircraft)
f.allocate_seat('12A', "Ankita singh")
f.allocate_seat('12A', "Ankit Pandey")
f.allocate_seat('15A', "Andres Chao")
f.allocate_seat('15E', "Killi Sahng")
f.allocate_seat('E27', "Amy Lanistor")
f.allocate_seat('1C', "Richa Sharama")
f.allocate_seat('1D', "Ricky Bhat")
|
12,006 | d8d0bda355a60507a7e811c2b0ee57ea0dfd6c9a | """
Some scaffolding to help with testing
context = the unit text class (self)
"""
from rest_framework.authtoken.models import Token
from rest_framework.test import (
APIClient,
ForceAuthClientHandler
)
from django.contrib.auth.models import User
from ..models.author import (
Author,
CachedAuthor)
from ..models.content import Post, Comment
from api_settings import settings
import uuid
import json
import base64
import os
import shutil
ACL_DEFAULT = "PUBLIC"
# Post attributes
TEXT = "Some post text"
TEST_FOLDER_RELATIVE = "/../tests"
class SocialAPIClient(APIClient):
"""
Create APIClient with token credentials for given author.
token (boolean) representing to creat token auth credentials.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(APIClient, self).__init__(**defaults)
self.handler = ForceAuthClientHandler(enforce_csrf_checks)
self._credentials = {}
def token_credentials(self, author):
token, created = Token.objects.get_or_create(user = author.user)
return self.credentials(HTTP_AUTHORIZATION=("Token %s" % token))
def basic_credentials(self, username, password):
basic = base64.b64encode('%s:%s' %(username, password))
return self.credentials(HTTP_AUTHORIZATION=("Basic %s" % basic))
def bad_credentials(self, token = True):
"""Authorization header credentials are not valid"""
# Should not match up with anything in the database
bad_creds = str(uuid.uuid4())
if token:
return self.credentials(HTTP_AUTHORIZATION=("Token %s" % bad_creds))
else:
return self.credentials(HTTP_AUTHORIZATION=("Basic %s" % bad_creds))
def get_image_base64(path):
"""
Returns a base64 encoded image
"""
with open(path, 'r') as img:
return base64.b64encode(img.read())
def get_test_image():
return get_image_base64(
os.path.dirname(__file__) + TEST_FOLDER_RELATIVE + '/fixtures/images/s.jpg'
)
def pretty_print(data):
"""Pretty prints a dictionary object"""
print json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
def create_author(user_dict, author_dict):
"""Create and return an author for testing
Takes:
A defined author dictionary holding all author parameters,
Access control list (or defaults to ACL_DEFAULT)
Returns: User and Author tuple
"""
user = User.objects.create_user(**user_dict)
user.save()
author_dict['user'] = user
author = Author.objects.create(**author_dict)
author.save()
return (user, author)
def create_authenticated_author(user_dict, author_dict):
user, author = create_author(user_dict, author_dict)
client = SocialAPIClient(HTTP_ORIGIN=settings.FRONTEND_HOST)
client.token_credentials(author)
return (user, author, client)
def create_multiple_posts(author, num, ptext = TEXT, visibility = ACL_DEFAULT):
"""
Returns a list of created posts for the given author
num: number of posts to create
ptext: The text that each post will contain
"""
posts = []
for i in range(num):
posts.append(Post.objects.create(content = ptext, author = author, visibility=visibility))
return posts
def authors_in_relation(context, data, authors):
"""
Test to ensure that all authors added to relationship are in the returned data
Called after a retrieve relationship test has passed
authors: a list of authors expected to be in the relation
data: a list of guids returned from the get relationship
"""
guids = [a.id for a in authors]
guids = map( lambda x: str(x).replace('-', ''), guids)
for guid in guids:
context.assertTrue(unicode(guid) in data)
#
# Need to be redone TODO
#
# def create_requestors(_requestee, requestors):
# for r in requestors:
# FriendRequest.objects.create(requestor = r, requestee = _requestee)
# def create_followers(_followee, followers):
# for f in followers:
# FollowerRelationship.objects.create(follower = f, followee = _followee)
def create_friends(friend, friendors, create_post = True, visibility = ACL_DEFAULT):
"""
Create Friends and Friends of Friends and associated posts
Friendors: A list of author objects that will friend.
Friend: An author object to be friended.
Create_posts: If you want to create a post for each friend
visibility: acl type for each post created
"""
for friendor in friendors:
friend.add_friend(friendor)
friendor.add_friend(friend)
# FriendRelationship.objects.create(friendor = friendor, friend = friend)
if create_post:
Post.objects.create(content = TEXT, author = friendor, visibility = visibility)
def create_post_with_comment(pauthor, cauthor, visibility, ptext, ctext):
"""Takes post author, comment author and creates a post and associated comment"""
post = Post.objects.create(content = ptext, author = pauthor, visibility=visibility)
comment = Comment.objects.create(comment = ctext, post = post, author = cauthor)
return (post, comment)
def assertNoRepeatGuids(context, posts):
"""Takes response.data and confirms no repeated guids (No repeated posts)"""
guids = [p['guid'] for p in posts]
context.assertTrue(len(set(guids)) == len(posts), "Some guids repeated")
def cross_check(context, authors, poscom):
"""
Compares a list of authors against a list of displaynames
Takes:
poscom: list of posts or comments
"""
displaynames = [x['author']['displayname'] for x in poscom]
for author in authors:
if author.user.username not in displaynames:
context.assertFalse(True, "%s not in list" %author.user.username)
def assertAuthorsInPosts(context, authors, posts):
"""Cross checks a list of authors against post"""
cross_check(context, authors, posts)
def assertAuthorsInComments(context, authors, comments):
"""Cross checks a list of authors against comments"""
cross_check(context, authors, comments)
def assertNumberPosts(context, posts, expected):
context.assertEquals(len(posts["posts"]), expected,
"expected %s, got %s posts" %(expected, len(posts)))
def assertNumberComments(context, post, expected):
context.assertEquals(len(post['comments']), expected,
"expected %s, got %s comments" %(expected, len(post['comments'])))
def assertPostAuthor(context, post, author):
context.assertEquals(post["author"]["displayname"], author.user.username,
'Post author incorrect')
def assertCommentAuthor(context, comment, author):
context.assertEquals(comment["author"]["displayname"], author.user.username,
'Comment author incorrect')
def assertPostContent(context, post, content):
context.assertEquals(post["content"], content, "Post text does not match")
def assertACLPermission(context, post, permission):
context.assertEquals(post['visibility'], str(permission),
"expected %s, got %s" %(permission, post['visibility']))
def assertSharedUser(context, post, author):
context.assertTrue(unicode(author.id) in post["acl"]["shared_users"],
"author not in shared users list")
def assertPostTitle(context, post, title):
context.assertEquals(post['title'], title, "Title did not match")
def assertUserNotExist(context, name):
try:
user = User.objects.get(username = name)
context.assertFalse(True, "User should not exist")
except:
pass
def assertUserExists(context, name):
try:
user = User.objects.get(username = name)
except:
context.assertFalse(True, "User should exist")
def assertCachedAuthorExists(context, guid):
try:
author = CachedAuthor.objects.get(id = guid)
except:
context.assertFalse(True, "CachedAuthor should exist")
def assertFollower(context, follower, guid):
context.assertEquals(follower.id, guid)
def assertNumberFollowers(context, followers, expected):
context.assertEquals(len(followers['followers']), expected,
"expected %s, got %s comments" %(expected, len(followers['followers'])))
def create_cached_author_followers(author, followers):
"""Takes a list of cachedauthors and adds them to the author follower list"""
for f in followers:
author.followers.add(f)
def create_multiple_cached_authors(amount, host, username):
authors = []
for i in range(amount):
cached = CachedAuthor(id = uuid.uuid4(), host = host, displayname = username)
cached.save()
authors.append(cached)
return authors
def enable_author(username):
author = Author.objects.get(user__username=username)
author.enabled = True
author.save()
|
12,007 | 684b75a27d8acd214e2b31bfdbb9e97ea7a22111 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
In here we put all things util (methods, code snipets) that are often useful, but not yet in AiiDA
itself.
So far it contains:
export_extras
import_extras
delete_nodes
delete_trash
create_group
"""
# TODO import, export of descriptions, and labels...?
import json
from aiida import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
load_dbenv()
from aiida.orm import Code, DataFactory, load_node
from aiida.orm.querybuilder import QueryBuilder, Node
from aiida.orm import Group
__copyright__ = (u"Copyright (c), 2016, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.27"
__contributors__ = "Jens Broeder"
RemoteData = DataFactory('remote')
ParameterData = DataFactory('parameter')
FleurInpData = DataFactory('fleur.fleurinp')
def export_extras(nodes, filename='node_extras.txt'):
"""
writes uuids and extras of given nodes to a file (json).
This is useful for import/export because currently extras are lost.
Therefore this can be used to save and restore the extras on the nodes.
:param: nodes: list of AiiDA nodes, pks, or uuids
:param: filename, string where to store the file and its name
example use:
node_list = [120,121,123,46]
export_extras(node_list)
"""
#outstring = ''#' node uuid | extras \n'
outdict = {}
for node in nodes:
if isinstance(node, int): #pk
node = load_node(node)
elif isinstance(node, basestring): #uuid
node = load_node(node)
if not isinstance(node, Node):
print('skiped node {}, is not an AiiDA node, did not know what to do.'.format(node))
continue
uuid = node.uuid
extras_dict = node.get_extras()
outdict[uuid] = extras_dict
#line = '{} | {}\n'.format(uuid, extras_dict)
#outstring = outstring + line
#outfile = open(filename, 'w')
#outfile.write(outstring)
#outfile.close()
json.dump(outdict, open(filename,'w'))
return
def import_extras(filename):
"""
reads in nodes uuids and extras from a file and aplies them to nodes in the DB.
This is useful for import/export because currently extras are lost.
Therefore this can be used to save and restore the extras on the nodes.
:param: filename, string what file to read from (has to be json format)
example use:
import_extras('node_extras.txt')
"""
all_extras = {}
# read file
#inputfile = open(filename, 'r')
#lines = inputfile.readlines()
#for line in lines[1:]:
# splitted = line.split(' | ')
# uuid = splitted[0].rstrip(' ')
# extras = splitted[1].rstrip(' ')
# #extras = dict(extras)
# print(extras)
# all_extras[uuid] = extras
#inputfile.close()
try:
all_extras = json.load(open(filename))
except:
print('The file has to be loadabel by json. i.e json format (which it is not).')
for uuid, extras in all_extras.iteritems():
try:
node = load_node(uuid)
except:
# Does not exists
print('node with uuid {} does not exist in DB'.format(uuid))
node = None
continue
if isinstance(node, Node):
node.set_extras(extras)
else:
print('node is not instance of an AiiDA node')
#print(extras)
return
def delete_nodes(pks_to_delete):
"""
Delete a set of nodes. (From AiiDA cockbook)
Note: TODO this has to be improved for workfchain removal. (checkpoints and co)
Also you will be backchecked.
BE VERY CAREFUL!
:note: The script will also delete
all children calculations generated from the specified nodes.
:param pks_to_delete: a list of the PKs of the nodes to delete
"""
from django.db import transaction
from django.db.models import Q
from aiida.backends.djsite.db import models
from aiida.orm import load_node
# Delete also all children of the given calculations
# Here I get a set of all pks to actually delete, including
# all children nodes.
all_pks_to_delete = set(pks_to_delete)
for pk in pks_to_delete:
all_pks_to_delete.update(models.DbNode.objects.filter(
parents__in=pks_to_delete).values_list('pk', flat=True))
print "I am going to delete {} nodes, including ALL THE CHILDREN".format(
len(all_pks_to_delete))
print "of the nodes you specified. Do you want to continue? [y/N]"
answer = raw_input()
if answer.strip().lower() == 'y':
# Recover the list of folders to delete before actually deleting
# the nodes. I will delete the folders only later, so that if
# there is a problem during the deletion of the nodes in
# the DB, I don't delete the folders
folders = [load_node(pk).folder for pk in all_pks_to_delete]
with transaction.atomic():
# Delete all links pointing to or from a given node
models.DbLink.objects.filter(
Q(input__in=all_pks_to_delete) |
Q(output__in=all_pks_to_delete)).delete()
# now delete nodes
models.DbNode.objects.filter(pk__in=all_pks_to_delete).delete()
# If we are here, we managed to delete the entries from the DB.
# I can now delete the folders
for f in folders:
f.erase()
def delete_trash():
"""
This method deletes all AiiDA nodes in the DB, which have a extra trash=True
And all their children. Could be advanced to a garbage collector.
Be careful to use it.
"""
#query db for marked trash
q = QueryBuilder()
nodes_to_delete_pks = []
q.append(Node,
filters = {'extras.trash': {'==' : True}
}
)
res = q.all()
for node in res:
nodes_to_delete_pks.append(node[0].dbnode.pk)
print('pk {}, extras {}'.format(node[0].dbnode.pk, node[0].get_extras()))
#Delete the trash nodes
print('deleting nodes {}'.format(nodes_to_delete_pks))
delete_nodes(nodes_to_delete_pks)
return
def create_group(name, nodes, description=None):
"""
Creates a group for a given node list.
So far this is only an AiiDA verdi command.
:param name: string name for the group
:param nodes: list of AiiDA nodes, pks, or uuids
:param description, optional string that will be stored as description for the group
:return: the group, AiiDa group
usage example:
group_name = 'delta_structures_gustav'
nodes_to_goup_pks =[2142, 2084]
create_group(group_name, nodes_to_group_pks, description='delta structures added by hand. from Gustavs inpgen files')
"""
group, created = Group.get_or_create(name=name)
if created:
print('Group created with PK={} and name {}'.format(group.pk, group.name))
else:
print('Group with name {} and pk {} already exists. Do you want to add nodes?[y/n]'.format(group.name, group.pk))
answer = raw_input()
if answer.strip().lower() == 'y':
pass
else:
return
nodes2 = []
nodes2_pks = []
for node in nodes:
try:
node = int(node)
except ValueError:
pass
nodes2_pks.append(node)
try:
nodes2.append(load_node(node))
except:# NotExistentError:
pass
group.add_nodes(nodes2)
print('added nodes: {} to group {} {}'.format(nodes2_pks, group.name, group.pk))
if description:
group.description = description
return group
def get_nodes_from_group(group, return_format='uuid'):
"""
returns a list of node uuids for a given group as, name, pk, uuid or group object
"""
from aiida.orm import Group
from aiida.common.exceptions import NotExistent
nodes = []
g_nodes = []
try:
group_pk = int(group)
except ValueError:
group_pk = None
group_name = group
if group_pk is not None:
try:
str_group = Group(dbgroup=group_pk)
except NotExistent:
str_group = None
message = ('You have to provide a valid pk for a Group '
'or a Group name. Reference key: "group".'
'given pk= {} is not a valid group'
'(or is your group name integer?)'.format(group_pk))
print(message)
elif group_name is not None:
try:
str_group = Group.get_from_string(group_name)
except NotExistent:
str_group = None
message = ('You have to provide a valid pk for a Group or a Group name.'
'given group name= {} is not a valid group'
'(or is your group name integer?)'.format(group_name))
print(message)
elif isinstance(group, Group):
str_group = group
else:
str_group = None
print('I could not handle given input, either Group, pk, or group name please.')
return nodes
g_nodes = str_group.nodes
for node in g_nodes:
if return_format == 'uuid':
nodes.append(node.uuid)
elif return_format == 'pk':
nodes.append(node.pk)
return nodes
|
12,008 | 036614a0164117e2f9a02944ab808f9aab804420 | # creates lists from user input and converts the input into an int as well as a list
X = [int(x) for x in input().split()]
Y = [int(y) for y in input().split()]
# zips the 2 lists, but first squares 'x' value in list 'X' & uses that to zip with Y
squared_map = zip((x ** 2 for x in X), Y)
for x in squared_map:
print(x[0], x[1])
|
12,009 | 20e95f7300fae788ee769493e0c6e1f02bcead02 | from os import abort
from flask import Flask, request, redirect, make_response, jsonify
import json
from flask_cors import CORS
import psycopg2
app = Flask(__name__, static_folder="./build/static", template_folder="./build")
CORS(app) #Cross Origin Resource Sharing
def get_connection():
localhost = 'localhost'
port = '5432'
users = 'postgres'
dbnames = 'fr_test'
passwords = 'mwmw1225zwzw'
return psycopg2.connect(
"host=" + localhost + " port=" + port + " user=" + users + " dbname=" + dbnames + " password=" + passwords)
@app.route("/", methods=['GET', 'POST'])
def index():
with get_connection() as conn:
with conn.cursor() as cur:
if request.method == 'POST':
print(request.get_json()) #params{post_text: title}
data = request.get_json()
if data != '':
sql = "INSERT INTO todo (title) VALUES (%s)"
cur.execute(sql, (data['post_text'],))
conn.commit()
cur.execute('SELECT max(id) FROM todo')
result2 = cur.fetchall()
print(result2) # 追加されたタスクのidを取得
print(type(result2))
def returnAddId():
for i in result2:
print(str(i[0]))
return str(i[0]) #最新のidを取得
conn.commit()
redirect('/')
return returnAddId()
elif request.method == 'GET': #初期表示
cur.execute('SELECT * FROM todo') # *からidに変更
result = cur.fetchall()
print(result)
# tup_result = tuple(result)
d_result = dict(result) #selectでidのみ指定のため、dic型にできないためコメントアウトへ
print(d_result)
return d_result
else:
return abort(400)
# return returnAddId() #d_resultからresultに変更
@app.route("/delete", methods=['GET', 'POST'])
def delete():
with get_connection() as conn:
with conn.cursor() as cur:
if request.method == 'POST':
print(request.get_json()) #params{post_id: dbkey}
data = request.get_json()
if data != '':
sql = "DELETE FROM todo WHERE id = %s"
cur.execute(sql, (data['post_id'],))
conn.commit()
redirect('/')
cur.execute('SELECT * FROM todo')
result = cur.fetchall()
d_result = dict(result)
print(d_result)
return jsonify(d_result)
@app.route("/update/<int:Id>", methods=['GET','POST'])
def update(Id):
# print(Id)
with get_connection() as conn:
with conn.cursor() as cur:
if request.method == 'POST':
print(request.get_json())
data = request.get_json()
if data !='':
sql = "UPDATE todo SET title = %s WHERE id = %s"
cur.execute(sql, (data['post_text'],Id,))
conn.commit()
return redirect('/')
if __name__ == "__main__":
app.debug = True
app.run(host='127.0.0.1', port=5000) |
12,010 | d16fe708d8c0a4c424da2edd0bc3e8430c5207b7 | """
"""
from link.common import APIResponse
from link.wrappers import APIRequestWrapper, APIResponseWrapper
from requests.auth import AuthBase
import json
import requests
class SpringAuth(AuthBase):
"""
Does the authentication for Spring requests.
"""
def __init__(self, token):
# setup any auth-related data here
self.token = token
def __call__(self, r):
# modify and return the request
r.headers['Authorization'] = self.token
return r
class SpringServeAPIResponseWrapper(APIResponseWrapper):
"""
Wraps a response from the springserve api
"""
def __init__(self, wrap_name = None, response = None):
super(SpringServeAPIResponseWrapper, self).__init__(response = response,
wrap_name = wrap_name)
@property
def xml(self):
"""
This api does not return xml
"""
raise NotImplementedError('This api does not return xml')
@property
def error(self):
"""
Spring error is either an error in the wrapper response or
an error returned by the api in the json
"""
error = self._wrapped.error
if error:
return error
return self.json['response'].get('error')
@property
def error_code(self):
"""
return the error code
"""
return self.json['response'].get('error_code')
@property
def error_id(self):
"""
return the error_id
"""
return self.json['response'].get('error_code')
def noauth(self):
"""
Returns whether erorr is NOAUTH
"""
try:
# some endpoints dont return json
return self.json['response'].get('error_id') == 'NOAUTH'
except:
return False
class SpringServeAPI(APIRequestWrapper):
"""
Wrap the Spring API
"""
headers = { "Content-Type": "application/json" }
def __init__(self, wrap_name=None, base_url=None, user=None, password=None):
self._token = None
super(SpringServeAPI, self).__init__(wrap_name = wrap_name,
base_url=base_url,
user=user,
password=password,
response_wrapper =
SpringServeAPIResponseWrapper)
def authenticate(self):
"""
Write a custom auth property where we grab the auth token and put it in
the headers
"""
#it's weird i have to do this here, but the code makes this not simple
auth_json={'email':self.user, 'password':self.password}
#send a post with no auth. prevents an infinite loop
auth_response = self.post('/auth', data = json.dumps(auth_json), auth =
None)
_token = auth_response.json['token']
self._token = _token
self._wrapped.auth = SpringAuth(_token)
@property
def token(self):
"""
Returns the token from the api to tell us that we have been logged in
"""
if not self._token:
self._token = self.authenicate().token
return self._token
|
12,011 | f6b006ae9ae46f0ee0504b10773f16eea965b326 | import torch
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.utils.data.dataloader import DataLoader
from torchvision import datasets
from torchvision.transforms import transforms, Normalize
class Unit(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(Unit, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, (1,3), (1, stride), (0,1)),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(out_channels, out_channels, (3,1), (stride, 1), (1,0)),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class CNN(nn.Module):
def __init__(self, num_classes, drop_rate):
super(CNN, self).__init__()
self.layer1 = Unit(3, 16) #16 64 64
self.layer2 = Unit(16, 32, 2) #32 32 32
self.layer3 = nn.Sequential(
Unit(32, 64, 2),
nn.Dropout(drop_rate)
) #64 16 16
self.layer4 = nn.Sequential(
Unit(64, 64, 2),
nn.Dropout(drop_rate)
) #128 8 8
self.layer5 = nn.Sequential(
Unit(64, 128, 2),
nn.Dropout(drop_rate)
) #256 4 4
self.gap = nn.AdaptiveAvgPool2d((1,1))
self.linear = nn.Linear(128, num_classes)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.gap(x)
x = x.view(x.size(0), -1)
return self.linear(x)
if __name__ == '__main__':
epochs = 2000
num_classes = 6
best_performance = 0.8
batch_size = 200
save_path = 'model'
drop_rate= 0.3
train_data = datasets.ImageFolder('out/train', transform=transforms.Compose([
transforms.ToTensor(),
Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
]))
test_data = datasets.ImageFolder('out/test', transform=transforms.Compose([
transforms.ToTensor(),
Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
]))
train_data = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_data = DataLoader(test_data, batch_size=batch_size, shuffle=True)
device = torch.device('cuda:0')
net = CNN(num_classes, drop_rate)
net.to(device)
criterion = nn.CrossEntropyLoss()
# modify...
# optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
optimizer = optim.SGD(net.parameters(), lr=0.1)
lr_decay = lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
for epoch in range(epochs):
train_loss = 0
train_acc = 0
net = net.train()
for img, label in train_data:
img = img.to(device)
label = label.to(device)
out = net(img)
loss = criterion(out, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predict = out.max(1)
num_correct = (predict==label).sum().item()
acc = num_correct/img.shape[0]
train_acc += acc
train_loss /= len(train_data)
train_acc /= len(train_data)
test_acc = 0
net.eval()
for img, label in test_data:
img = img.to(device)
label = label.to(device)
out = net(img)
_, predict = out.max(1)
num_correct = (predict == label).sum().item()
acc = num_correct / img.shape[0]
test_acc += acc
test_acc /= len(test_data)
if test_acc > best_performance:
best_performance = test_acc
torch.save(net.state_dict(), save_path)
print('Epoch {}, training loss: {}, training accuracy: {}, testing accuracy: {}'.format(epoch+1, train_loss, train_acc, test_acc))
lr_decay.step()
|
12,012 | 589d11afc791fbf055dccf85292609f3411acfdf | def leianum(msg):
while True:
if x == 0:
try:
i = int(input(msg))
except (ValueError, TypeError):
print(f'\033[31mERRO! Digite um nº {tipo[x]}\033[m')
except (KeyboardInterrupt):
i = 0
print('Usuario preferiu não informar um nº')
return i
else:
return i
else:
try:
r = float(input(msg))
except (ValueError, TypeError):
print(f'\033[31mERRO! Digite um nº {tipo[x]}\033[m')
except (KeyboardInterrupt):
print('Usuário preferiu não informar um nº')
r = 0
return r
else:
return r
#MAIN
tipo = ['inteiro', 'real']
n = list()
for x in range(0, 2):
n.append(leianum(f'Digite um nº {tipo[x]}: '))
print(f'O nº {tipo[0]} digitado foi {n[0]}, enquanto o nº {tipo[1]} foi {n[1]}') |
12,013 | 3420e7e0be98a28192f3c88403b561fd1e7fbfce | from flask import render_template, url_for, flash, redirect, request, abort
from library import app, db, bcrypt
from library.forms import RegistrationForm, LoginForm, BookForm
from library.models import User, Book
from flask_login import login_user, current_user, logout_user, login_required
'''books = [
{
'genre': 'Fiction',
'title': 'Sun also rises',
'author': 'john dewy'
},
{
'genre': 'Mystery',
'title': 'Discover Haunted car',
'author': 'mary rose'
}
]'''
@app.route('/')
@login_required
def home():
books = Book.query.all()
return render_template('home.html', books=books)
@app.route('/about')
@login_required
def about():
return render_template('about.html', title='About')
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=hashed_password,usertype=form.usertype.data)
db.session.add(user)
db.session.commit()
flash('Your Account has been created. You are now able to login', 'success')
return redirect(url_for('home'))
return render_template('register.html', title="Register", form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login Unsuccessful, Please check email and password', 'danger')
#else:
# flash('Login Unsuccessful, Please check email and password', 'danger')
return render_template('login.html' ,title="Login", form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/account')
@login_required
def account():
return render_template('account.html' ,title="Account")
@app.route('/book/new' , methods=['GET', 'POST'])
@login_required
def new_book():
form = BookForm()
if form.validate_on_submit():
book = Book(genre=form.genre.data, title=form.title.data, author=form.author.data, user_id=current_user.username)
db.session.add(book)
db.session.commit()
flash('Your book has been added', 'success')
return redirect(url_for('home'))
return render_template('create_book.html' ,title="New Book", form=form, legend="New Book")
@app.route('/book/<int:book_id>')
def book(book_id):
book = Book.query.get_or_404(book_id)
return render_template('book.html', title=book.title, book=book)
@app.route('/book/<int:book_id>/update', methods=['GET', 'POST'])
@login_required
def update_book(book_id):
book = Book.query.get_or_404(book_id)
if book.user_id != current_user.username:
#abort(403)
flash('There is no rights to update the record', 'success')
return redirect(url_for('home'))
form = BookForm()
if form.validate_on_submit():
book.genre = form.genre.data
book.title = form.title.data
book.author = form.author.data
db.session.commit()
flash('Book updated successfully!', 'success')
return redirect(url_for('book', book_id=book.id))
elif request.method == 'GET':
form.genre.data = book.genre
form.title.data = book.title
form.author.data = book.author
return render_template('create_book.html', title='Update Book', form=form, legend="Update Book")
app.route('/book/<int:book_id>/delete', methods=['POST'])
@login_required
def delete_book(book_id):
book = Book.query.get_or_404(book_id)
if book.user_id != current_user.username:
flash('Your book has been deleted', 'success')
#abort(403)
db.session.delete(book)
db.session.commit()
flash('Your book has been deleted', 'success')
return redirect(url_for('home')) |
12,014 | c612d118f13e9f35033c391ef9ecd7ebaf956642 | from workingmemory import IWorkingMemory
from knowledgebase import IKnowledgeBase
from inferencemachine import InferenceMachine
import tkinter as tk
from tkinter import messagebox
class MainWindow():
def __init__(self):
super().__init__()
self.working_memory = IWorkingMemory()
self.knowledge_base = IKnowledgeBase(self.working_memory)
self.inference_machine = InferenceMachine(self.working_memory, self.knowledge_base)
self.initUI()
def initUI(self):
root.minsize(width=600, height=300)
root.title('Экспертная система "На костылях"')
self.answer_frame = tk.Frame(root)
self.answer_frame.pack()
self.start_label = tk.Label(self.answer_frame, text="Начать диагностику?")
self.start_label.pack()
self.start_button = tk.Button(self.answer_frame, text="Начать", command=lambda: self.ask_question_get_answer())
self.start_button.pack()
menu = tk.Menu(root)
root.config(menu=menu)
exe_menu = tk.Menu(menu)
exe_menu.add_command(label="Repeat", command=lambda: self.restart())
menu.add_cascade(label="Repeat", menu=exe_menu)
def restart(self):
for key in self.answer_frame.children:
self.answer_frame.children[key].pack_forget()
self.start_label = tk.Label(self.answer_frame, text="Начать диагностику?")
self.start_label.pack()
self.start_button = tk.Button(self.answer_frame, text="Начать", command=lambda: self.ask_question_get_answer())
self.start_button.pack()
def ask_question_get_answer(self):
self.start_label.pack_forget()
self.start_button.pack_forget()
question = self.inference_machine.start(self)
self.question_label = tk.Label(self.answer_frame, text=question)
self.question_label.pack()
if "Так как" not in str(question):
self.answer_entry = tk.Entry(self.answer_frame, width=40)
self.answer_entry.pack()
self.answer_button = tk.Button(self.answer_frame, text="Ответить", command=lambda: self.get_answer())
self.answer_button.pack()
if "Мы" in str(question):
self.answer_entry.pack_forget()
self.answer_button.pack_forget()
else:
if "диагноз" in str(question):
self.answer_entry.pack_forget()
self.answer_button.pack_forget()
else:
self.get_rule_worked()
pass
def get_rule_worked(self):
self.inference_machine.set_answer()
self.ask_question_get_answer()
def get_answer(self):
answer = self.answer_entry.get()
if str(answer) == "да" or str(answer) == "нет" or "частота сердечного ритма" in str(self.question_label.cget('text')):
self.inference_machine.set_answer(answer)
self.answer_entry.pack_forget()
self.answer_button.pack_forget()
label = tk.Label(self.answer_frame, text=answer)
label.pack()
self.ask_question_get_answer()
else:
messagebox.showinfo("Ошибка", "Введите корректный ответ")
self.answer_entry.delete(0, 'end')
if __name__ == '__main__':
root = tk.Tk()
main_window = MainWindow()
root.mainloop() |
12,015 | bef4029ccd54d9074b766cc890be17ab26c3f109 | import tensorflow as tf
import numpy as np
import pandas as pd
tf.logging.set_verbosity(tf.logging.INFO)
# 模型定义
def cnn_model_fn(features, labels, mode):
# 输入层 3x23 ,通道1
input_layer = tf.reshape(features['x'], [-1, 111, 1])
# input_layer = features['x']
# 第1个卷积层 卷积核5x5,激活函数sigmoid
conv1 = tf.layers.conv1d(
inputs=input_layer,
filters=64,
kernel_size=5,
padding='SAME',
activation=tf.nn.tanh
)
# 第2个卷积层 卷积核5x5,激活函数ReLU
conv2 = tf.layers.conv1d(
inputs=conv1,
filters=128,
kernel_size=3,
padding='SAME',
activation=tf.nn.tanh
)
# 第2个汇合层 大小3x3
pool2 = tf.layers.max_pooling1d(
inputs=conv2,
pool_size=3,
strides=1
)
pool3_flat = tf.reshape(pool2, [-1, 1 * 109 * 128])
# 全连接层FC1
dense1 = tf.layers.dense(pool3_flat, units=128, activation=tf.nn.tanh)
# dropout1
# dropout1 = tf.layers.dropout(
# inputs=dense1, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)
# 全连接层FC2
# dense2 = tf.layers.dense(dense1, units=1024, activation=tf.nn.tanh)
# dropout2
# dropout2 = tf.layers.dropout(
# inputs=dense2, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)
# 输出层
logits = tf.layers.dense(inputs=dense1, units=2)
# 预测结果
predictions = tf.argmax(input=logits, axis=1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.00001)
train_op = optimizer.minimize(
loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op)
precision = tf.metrics.precision(
labels=labels, predictions=predictions)
recall = tf.metrics.recall(
labels=labels, predictions=predictions)
eval_metrics_ops = {
'precision': precision,
'recall': recall,
# 'f1-score': 2 * precision * recall / (precision + recall)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metrics_ops)
# 模型训练
def train():
train_xy = pd.read_csv('G:/dataset/a3d6_chusai_a_train/dealt_data/train_test/train.csv')
# print(train_xy.columns)
train_data = train_xy.drop(['user_id', 'label'], axis=1)
train_xy['label'].fillna(1, inplace=True)
train_labels = train_xy['label']
print(len(train_data.columns))
train_data = np.array(train_data, dtype=np.float32)
train_labels = np.array(train_labels, dtype=np.int32)
cnn_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir='G:/models/ksci_bdc_model/feature111_cnn')
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=50,
num_epochs=1,
shuffle=False)
cnn_classifier.train(
input_fn=train_input_fn,
steps=20000,
# hooks=[logging_hook]
)
# eval_input_fn = tf.estimator.inputs.numpy_input_fn(
# x={'x': train_data},
# y=train_labels,
# num_epochs=1,
# shuffle=False)
# ret = cnn_classifier.evaluate(input_fn=eval_input_fn)
# print(ret)
# f1_score = 2 * ret['precision'] * ret['recall'] / (ret['precision'] + ret['recall'])
# print('F1-Score: ', f1_score)
# return f1_score
# 模型评估
def evaluate():
eval_xy = pd.read_csv('G:/dataset/a3d6_chusai_a_train/dealt_data/train_test/validate.csv')
# print(train_xy.columns)
eval_data = eval_xy.drop(['user_id', 'label'], axis=1)
eval_xy['label'].fillna(1, inplace=True)
eval_labels = eval_xy['label']
print(len(eval_data.columns))
eval_data = np.array(eval_data, dtype=np.float32)
eval_labels = np.array(eval_labels, dtype=np.int32)
cnn_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir='G:/models/ksci_bdc_model/feature111_cnn')
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
ret = cnn_classifier.evaluate(input_fn=eval_input_fn)
print(ret)
f1_score = 2 * ret['precision'] * ret['recall'] / (ret['precision'] + ret['recall'])
print('F1-Score: ', f1_score)
return f1_score
# 预测
def predict(fname):
classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir='G:/models/ksci_bdc_model/feature111_cnn')
test_x = pd.read_csv('G:/dataset/a3d6_chusai_a_train/dealt_data/train_test/test.csv')
test_data = test_x.drop(['user_id'], axis=1)
test_data = np.array(test_data, dtype=np.float32)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': test_data},
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
pre = [k for k in predictions]
pred = pd.DataFrame()
pred['user_id'] = test_x['user_id']
pred['active'] = pre
active_users = pred[(pred['active'] > 0)]['user_id']
active_users.to_csv('G:/dataset/a3d6_chusai_a_train/prediction/predict_feature111cnn_' + fname + '.txt',
index=False, header=None)
def main(argv):
# train()
# predict1('0.80')
# evaluate()
train()
score = evaluate()
# while score < 0.82:
# score = train()
predict(str(score))
if __name__ == '__main__':
tf.app.run() |
12,016 | 3ad509a3ebf2441522210663da1e79d8197b4201 | from __future__ import absolute_import
from . import deffile
from .util import type_str, load_type
def get_all_msg_types(msg, skip_this=False, type_set=None):
if type_set is None:
type_set = set()
if not skip_this:
if msg in type_set:
return type_set
type_set.add(msg)
for slot_type in msg._slot_types:
if '/' not in slot_type:
continue
type_set = get_all_msg_types(load_type(slot_type), type_set=type_set)
return type_set
def get_msg_definitions(msg, skip_this=False):
type_set = get_all_msg_types(msg, skip_this=skip_this)
msg_dfns = []
for msg_type in type_set:
dfn = deffile.ROSStyleDefinition('msg', type_str(msg_type), ['msg'])
for field_name, field_type in zip(msg_type.__slots__, msg_type._slot_types):
dfn.segment(0).append((field_name, field_type))
msg_dfns.append(dfn)
return msg_dfns
def get_definitions(services=None, topics=None, actions=None):
if services is None:
services = []
if topics is None:
topics = []
if actions is None:
actions = []
service_dfns = []
action_dfns = []
msg_dfns = []
type_set = set()
for service in services:
dfn = deffile.ROSStyleDefinition('srv', service.get('rostype_name'), ['request', 'response'])
service_type = load_type(service.get('rostype_name'))
for field_name, field_type in service.get('srvtype',[None, None])[0].iteritems():
dfn.segment(0).append((field_name, field_type))
type_set = get_all_msg_types(service_type._request_class, skip_this=True, type_set=type_set)
for field_name, field_type in service.get('srvtype',[None, None])[1].iteritems():
dfn.segment(1).append((field_name, field_type))
type_set = get_all_msg_types(service_type._response_class, skip_this=True, type_set=type_set)
service_dfns.append(dfn)
for action in actions:
dfn = deffile.ROSStyleDefinition('action', action.rostype_name, ['goal', 'result', 'feedback'])
for field_name, field_type in zip(action.rostype_goal.__slots__, action.rostype_goal._slot_types):
dfn.segment(0).append((field_name, field_type))
type_set = get_all_msg_types(action.rostype_goal, skip_this=True, type_set=type_set)
for field_name, field_type in zip(action.rostype_result.__slots__, action.rostype_result._slot_types):
dfn.segment(1).append((field_name, field_type))
type_set = get_all_msg_types(action.rostype_result, skip_this=True, type_set=type_set)
for field_name, field_type in zip(action.rostype_feedback.__slots__, action.rostype_feedback._slot_types):
dfn.segment(2).append((field_name, field_type))
type_set = get_all_msg_types(action.rostype_feedback, skip_this=True, type_set=type_set)
action_dfns.append(dfn)
for topic in topics:
topic_type = load_type(topic.get('rostype_name'))
type_set = get_all_msg_types(topic_type, type_set=type_set)
for msg_type in type_set:
dfn = deffile.ROSStyleDefinition('msg', type_str(msg_type), ['msg'])
for field_name, field_type in zip(msg_type.__slots__, msg_type._slot_types):
dfn.segment(0).append((field_name, field_type))
msg_dfns.append(dfn)
return msg_dfns + service_dfns + action_dfns
def manifest(services, topics, actions, full=False):
dfile = deffile.DefFile()
dfile.manifest.def_type = 'Node'
if services:
service_section = deffile.INISection('Services')
for service_name, service in services.iteritems():
service_section.fields[service_name] = service.rostype_name
dfile.add_section(service_section)
if actions:
action_section = deffile.INISection('Actions')
for action_name, action in actions.iteritems():
action_section.fields[action_name] = action.rostype_name
dfile.add_section(action_section)
topics_section = deffile.INISection('Topics')
publish_section = deffile.INISection('Publishes')
subscribe_section = deffile.INISection('Subscribes')
for topic_name, topic in topics.iteritems():
if topic.allow_sub:
publish_section.fields[topic_name] = topic.rostype_name
if topic.allow_pub:
subscribe_section.fields[topic_name] = topic.rostype_name
if topics_section.fields:
dfile.add_section(topics_section)
if publish_section.fields:
dfile.add_section(publish_section)
if subscribe_section.fields:
dfile.add_section(subscribe_section)
if full:
dfns = get_definitions(services=services.itervalues(), topics=topics.itervalues(), actions=actions.itervalues())
[dfile.add_definition(dfn) for dfn in dfns]
return dfile
def describe_service(service_name, service, full=False):
dfile = deffile.DefFile()
dfile.manifest.def_type = 'Service'
dfile.manifest['Name'] = service_name
dfile.manifest['Type'] = service.get('rostype_name', 'Unknown')
if full:
dfns = get_definitions(services=[service])
[dfile.add_definition(dfn) for dfn in dfns]
return dfile
# Interestingly this is used from rostful, to make sense of the data returned by pyros client, not from pyros itself...
# TODO : maybe need to move it ?
# TODO : check, maybe same with some other methods here...
def describe_topic(topic_name, topic, full=False):
dfile = deffile.DefFile()
dfile.manifest.def_type = 'Topic'
dfile.manifest['Name'] = topic_name
dfile.manifest['Type'] = topic.get('rostype_name', 'Unknown')
# this is obsolete, now each Topic instance does both...
#dfile.manifest['Publishes'] = get_json_bool(topic.allow_sub)
#dfile.manifest['Subscribes'] = get_json_bool(topic.allow_pub)
if full:
dfns = get_definitions(topics=[topic])
[dfile.add_definition(dfn) for dfn in dfns]
return dfile
|
12,017 | 0af71c7c7ce9d5f953090638eced815648fff571 | # -*- coding: utf-8 -*-
# @Time : 2021/3/27 16:27
# @Author : kjleo
# @Software: PyCharm
# @E-mail :2491461491@qq.com
import re
import time
from concurrent.futures.thread import *
from PyQt5.QtCore import *
import PyQt5.QtCore
import requests
results = []
length = 0
class Down(QThread):
process = pyqtSignal(int, int, float)
success = pyqtSignal(str)
def __init__(self, Id, row, url = None, sigle_url=False,Name=None):
super().__init__()
if sigle_url:
self.url = url.replace("i.pximg.net","i.pixiv.cat")
else:
self.url = "https://pixiv.cat/82939788.png".replace("82939788", str(Id))
self.Id = str(Id)
self.FileObj = None
self.row = row
self.le = requests.get(self.url, stream=True).headers["Content-Length"]
if Name:
self.FileObj = open(Name, "wb")
else:
self.prepare()
def run(self):
global length
start = time.time()
if float(self.le) < 0.5 * 2024 * 1024:
resp = requests.get(self.url)
self.FileObj.write(resp.content)
self.FileObj.close()
else:
l = 0
DownSplit(self.url)
for i in results:
data = i.result()
string = data[0]
data = data[1]
p = "bytes (.*?)-"
t = int(re.findall(p, string=string)[0])
self.FileObj.seek(t, 0)
self.FileObj.write(data)
self.FileObj.close()
end = time.time()
s = "id:" + self.Id + " 下载成功,是否打开文件夹"
self.process.emit(100, self.row, int(length/(end-start)/1024))
self.success.emit(s)
def prepare(self):
try:
des = requests.get(self.url, stream=True).headers["Content-Disposition"]
except:
print(self.url)
print(requests.get(self.url, stream=True).headers)
p = 'filename="(.*?)"'
res = re.findall(p,des)
if len(res) != 0:
self.FileObj = open(res[0],"wb")
def down(downStart, length, no, url):
global resps
data = b''
headers = {
'Range': 'bytes=%s-%s' % (downStart, downStart + length),
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.43'
'89.90 Safari/537.36 Edg/89.0.774.63',
'Host':'pixiv.cat'
}
response = requests.get(url, headers=headers)
data=response.content
return response.headers["Content-Range"], data
def DownSplit(url):
global results,length
length = int(requests.get(url,stream=True).headers['Content-Length'])
part = int(length/12)
with ThreadPoolExecutor(max_workers=8) as pool:
for i in range(12):
results.append(pool.submit(lambda p: down(*p),[i*part,part,i,url]))
pool.shutdown()
|
12,018 | af34bb34e4bbe71dd183ac3f9553f9290ef7b6ac | #%%
from ImgManager import ImgManager
from CNNModel import CNNModel
from Analyser import Analyser
import warnings
import pandas as pd
import time
import numpy as np
output2='output_Q2/'
warnings.filterwarnings('ignore')
#%%
imgMan=ImgManager(testSize=0.2)
imgMan.readImages()
# %%
imgMan.procesImages()
# %%
# imgMan.displayProcessImages()
#%%
X_Train,y_train,X_Test,y_test,X_Val,y_Val=imgMan.get_Train_Test_Val_Data()
print('X_train shape: '+str(X_Train.shape))
print('y_train shape: '+str(y_train.shape))
print('X_test shape: '+str(X_Test.shape))
print('y_test shape: '+str(y_test.shape))
print('X_Val shape: '+str(X_Val.shape))
print('y_Val shape: '+str(y_Val.shape))
# %%
cnnMod=CNNModel(imgMan.getSideDimension(),X_Train,y_train,X_Test,y_test)
#%%
#Find the best hyper paramters to get best results
epoch=[5,10]
batSize=[25,20]
optimizers=['rmsprop','adam']
outAct=['softmax','sigmoid']
hiddenUnit=[256,128]
dictParam={'epochs':epoch,'batch_size':batSize,'anOptimizer':optimizers,'outActivation':outAct,'hidUnit':hiddenUnit}
start=time.time()
df_full,df_result,bestParam,bestScore,model=cnnMod.findOptimizeParamCV(dictParam,fold=2)
end=time.time()
# %%
print('Time taken for grid search is '+str(end-start)+" seconds")
# %%
#Print full results to output_Q2/DF_Full_Result.xlsx
df_full.to_excel(output2+"DF_Full_Result.xlsx")
# %%
#Print partial results to output_Q2/DF_Partial_Result.xlsx
df_result.to_excel(output2+"DF_Partial_Result.xlsx")
df_result.head()
# %%
# Show the best parameter to be used after grid search
bestParam
df_param=pd.DataFrame([bestParam])
df_param
# %%
# Show the best score after grid search
print('Best accuracy after grid search on training data: '+str(bestScore))
# %%
# Evaluating the best model found in grid search using Test data
res=model.score(X_Test,y_test)
print('Accuracy of grid search model on test data: '+str(res))
#%%
# Train new model with best parameters using full data set
start=time.time()
df_metrics,network,hist=cnnMod.trainModel(bestParam,X_Train,y_train)
end=time.time()
#%%
print('Time taken for training model is '+str(end-start)+" seconds")
# %%
#Show mertrics after training with best parameters
df_metrics
#%%
#Evaluate trained network with test data
param= network.evaluate(X_Test, y_test,batch_size=bestParam.get('batch_size'))
#%%
#Print results of test data
print('Test loss:', param[0])
print('Test accuracy:', param[1]*100)
print('Test precision:', param[2]*100)
print('Test recall:', param[3]*100)
print('Test false negative:', param[4])
print('Test false positive:', param[5])
print('Test true negative:', param[6])
print('Test true positive:', param[7])
#%%
# Display loss vs epoch graph for test Data. See \output_Q2\Loss.png
analyser=Analyser(output2)
analyser.plot_loss(hist,'Loss')
#%%
# Display loss vs epoch graph for test Data. See \output_Q2\Accuracy.png
analyser.plot_accuracy(hist,'Accuracy')
#%%
#Evaluate trained network with validation data
param= network.evaluate(X_Val, y_Val,batch_size=bestParam.get('batch_size'))
#%%
#Print results of validation data
print('Validation loss:', param[0])
print('Validation accuracy:', param[1]*100)
print('Validation precision:', param[2]*100)
print('Validation recall:', param[3]*100)
print('Validation false negative:', param[4])
print('Validationt false positive:', param[5])
print('Validation true negative:', param[6])
print('Validation true positive:', param[7])
#%%
# Validate model using validation img
# %matplotlib inline
import matplotlib.pyplot as plt
wrong,right=0,0
for index,anImage in enumerate(X_Val):
actual = np.argmax(y_Val[index])
actualStr=imgMan.getStrKeyFromVal(actual)
prob = network.predict(np.expand_dims(anImage, axis=0))
predictIndx = np.argmax(prob)
predictStr=imgMan.getStrKeyFromVal(predictIndx)
if actualStr !=predictStr:
# print(actual)
# print(predictIndx)
plt.imshow(anImage)
plt.show()
print('Actual img is: '+ actualStr)
print('Predicted img is '+predictStr +'\n')
wrong+=1
if actualStr ==predictStr:
right+=1
index+=1
print('Wrong '+str(wrong))
print('Right '+str(right))
#%%
import pickle
#Save Image manager and network model
data = [X_Test,y_test,X_Train,y_train,X_Val,y_Val]
with open(output2+'Q2_Data.pickle', 'wb+') as out_file:
pickle.dump(data, out_file)
with open(output2+'Q2_ImageManager.pickle', 'wb+') as out_file:
pickle.dump(imgMan, out_file)
network.save(output2+"Q2_CNN_model.h5")
# %% |
12,019 | 82218b213bce34dcf4e24d470542bbf6b79580b0 | from translate import Translator
try:
with open('./test.txt', mode='r') as file:
text = file.read()
print(text)
translator = Translator(to_lang="ja")
translated_text = translator.translate(text)
print(translated_text)
translator = Translator(to_lang="hi")
hindi = translator.translate(text)
print(hindi)
translator = Translator(to_lang="ta")
tamil = translator.translate(text)
print(tamil)
translator = Translator(to_lang="ar")
arabic = translator.translate(text)
print(arabic)
with open('./test-translated.txt', mode='w') as file2:
# file2.write('Japanese\n')
file2.write(translated_text)
# file2.write('Hindi\n')
# file2.write(hindi)
# file2.write('Tamil\n')
# file2.write(tamil)
# file2.write('Arabic\n')
# file2.write(arabic)
# unicodeData = 'Japanese\n' + translated_text + '\nHindi\n' + hindi + '\nTamil\n' + tamil + '\nArabic\n' + arabic
# print(unicodeData.encode('utf-8', 'ignore'))
# file2.write(unicodeData)
except FileNotFoundError as e:
print('File is missing', e)
|
12,020 | 1664b4f0793644e57847540e98fcb5a8bfa61739 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Przerabia pojedynczy plik, wyciągnięty wcześniej z .dsk na .tap.
Użycie, np.
./dsk2tap.py PACKMAKE packmake.tap
"""
import array
import sys
def save_data(f, data):
d = array.array('B', [255])
d.extend(data)
ch = 0
for i in d:
ch ^= i
d.append(ch)
l = len(d)
dd = array.array('B')
dd.append(l & 255)
dd.append(l / 256)
dd.tofile(f)
d.tofile(f)
f.close()
def header_prefix(name, typ, length, start):
header = array.array('B', [0,typ])
name = name.split('/')[-1]
if name[0] == '.':
name = name[1:]
name += " "
name = name[:10].lower()
header.fromstring(name)
header.append(length & 255)
header.append(length / 256)
header.extend(start)
return header
def save_program(name, autostart, variables, program, data):
for i in xrange(1, len(data) - 1):
if data[i] == ord('*') and data[i - 1] == 239 and data[i + 1] == ord('"'):
data[i] = 32
header = header_prefix(name, 0, len(data), autostart)
header.extend(program)
checksum = 0
for i in header:
checksum ^= i
header.append(checksum)
#print "header", header
l = len(header)
hh = array.array('B')
hh.append(l & 255)
hh.append(l / 256)
f = open(sys.argv[2], "wb")
hh.tofile(f)
header.tofile(f)
save_data(f, data)
def save_array(name, typ, array_length, array_address, data):
header = header_prefix(name, typ, len(data), array_address)
header.append(0)
header.append(128)
checksum = 0
for i in header:
checksum ^= i
header.append(checksum)
#print "header", header
l = len(header)
hh = array.array('B')
hh.append(l & 255)
hh.append(l / 256)
f = open(sys.argv[2], "wb")
hh.tofile(f)
header.tofile(f)
save_data(f, data)
def save_code(name, l, address, data):
header = header_prefix(name, 3, len(data), address)
header.append(0)
header.append(128)
checksum = 0
for i in header:
checksum ^= i
header.append(checksum)
#print "header", header
l = len(header)
hh = array.array('B')
hh.append(l & 255)
hh.append(l / 256)
f = open(sys.argv[2], "wb")
hh.tofile(f)
header.tofile(f)
save_data(f, data)
if __name__ == "__main__":
#print sys.argv
inp = array.array('B')
inp.fromstring(open(sys.argv[1], "rb").read())
#print "inp", inp
if inp[0] == 0: #program
save_program(sys.argv[1], inp[1:3], inp[3:5], inp[5:7], inp[7:])
elif inp[0] == 1: # numeric array
save_array(sys.argv[1], 1, inp[1:3], inp[3:5], inp[5:])
elif inp[0] == 2: # alphanumeric array
save_array(sys.argv[1], 2, inp[1:3], inp[3:5], inp[5:])
elif inp[0] == 3: # code
save_code(sys.argv[1], inp[1:3], inp[3:5], inp[5:])
else:
raise Exception, "Unknown type " + str(inp[0])
|
12,021 | aea70eb2f6b09c1f3071d6a5fa8b038cb4c79a50 | # Generated by Django 3.2.8 on 2021-11-10 16:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('units', '0003_alter_employee_prefix'),
]
operations = [
migrations.AddField(
model_name='college',
name='sanitized_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='department',
name='sanitized_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='division',
name='sanitized_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='organization',
name='sanitized_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
12,022 | 2173d827c99fb01ab01ff72845b7c7c1d9642936 | #056-2.py
a = ["tree", "lake", "park"]
idx = a.index("lake")
a.pop(idx)
print(a)
|
12,023 | 11e66860ee17d6deeabb5fcf2801a33429ad7421 | import logging
import asyncio
import pickle
import sys
from aiocoap import *
def get_args():
total = len(sys.argv)
if total != 2:
print("ERROR: Incorrect number of arguments")
sys.exit()
else:
return sys.argv[1]
logging.basicConfig(level=logging.INFO)
async def main():
ip = get_args()
protocol = await Context.create_client_context()
request = Message(code=GET, uri='coap://' + ip + '/other/block')
try:
response = await protocol.request(request).response
except Exception as e:
print('Failed to fetch resource:')
print(e)
else:
global token
receive_data = pickle.loads(response.payload)
x = receive_data[0]
y = receive_data[1]
z = receive_data[2]
token = receive_data[3]
print('Result: %s\n%r %r %r %r'%(response.code, x, y, z, token))
#Making code for player A
#sends block type 1
if token == 3:
context = await Context.create_client_context()
await asyncio.sleep(2)
payload = (token, x + 1, y, z, 41)
send_data = pickle.dumps(payload)
request = Message(code=PUT, payload=send_data)
request.opt.uri_host = ip
request.opt.uri_path = ("other", "block")
response = await context.request(request).response
#print('Result: %s\n%r'%(response.code, response.payload))
if token == 4:
print("Wall is complete")
sys.exit()
if __name__ == "__main__":
while 1:
asyncio.get_event_loop().run_until_complete(main())
|
12,024 | 01071df38bb4ee55ee2b478bedd176e188b8d645 | import re
N = int(input())
if N > 0 and N < 50:
for _ in range(N):
css = input()
col_codes = re.findall(r".(#[0-9A-Fa-f]{6}|#[0-9A-Fa-f]{3})",css) #return a list with all the matches in the css code
if col_codes:
print(*col_codes,sep="\n") |
12,025 | 542ff78d2a1d1181b04f939da5ae667cdd95d906 | import json as j
import pandas as pd
import re
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest, chi2
json_data = None
with open('yelp_academic_dataset_review.json') as data_file:
'''
lines = []
for x in range(50000): # I didn't choose whole dataset as it didn't fit into memory.
lines.append(data_file.readline())
'''
lines = data_file.readlines() #to train only on subset of whole data comment this line and uncomment above 3 lines.
joined_lines = "["+ ",".join(lines) + "]"
json_data = j.loads(joined_lines)
data = pd.DataFrame(json_data)
nltk.download('stopwords')
stemmer = SnowballStemmer('english')
words = stopwords.words('english')
data['cleaned'] = data['text'].apply(lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]"," ",x).split() if i not in words]).lower())
X_train,X_test,y_train,y_test = train_test_split(data['cleaned'], data.stars, test_size = 0.2)
pipeline = Pipeline([('vect',TfidfVectorizer(ngram_range = (1,2), stop_words = "english", sublinear_tf = True)),
('chi',SelectKBest(chi2, k = 10000)),
('clf',LinearSVC(C = 1.0, penalty = 'l1', max_iter = 3000, dual = False))])
model = pipeline.fit(X_train,y_train)
vectorizer = model.named_steps['vect']
chi = model.named_steps['chi']
clf = model.named_steps['clf']
feature_names = vectorizer.get_feature_names()
feature_names = [feature_names[i] for i in chi.get_support(indices = True)]
feature_names = np.asarray(feature_names)
target_names = ['1','2','3','4','5']
print("top 10 keywords per class : ")
for i, label in enumerate(target_names):
top10 = np.argsort(clf.coef_[i])[-10:]
print("%s %s"%(label," ".join(feature_names[top10])))
print("accuracy score :" + str(model.score(X_test,y_test)))
print(model.predict(['That was an awesome place. great food']))
|
12,026 | 714968ccd71136f633925e51b38b054573d5ac49 | import datetime
import sqlalchemy
from .db_session import SqlAlchemyBase
from sqlalchemy_serializer import SerializerMixin
class Trainer(SqlAlchemyBase, SerializerMixin):
__tablename__ = 'trainers'
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True, autoincrement=True)
surname = sqlalchemy.Column(sqlalchemy.String, nullable=False)
name = sqlalchemy.Column(sqlalchemy.String, nullable=False)
age = sqlalchemy.Column(sqlalchemy.Integer, nullable=False)
img_trainer = sqlalchemy.Column(sqlalchemy.String, nullable=False,
default='https://sun9-16.userapi.com/c856028/v856028901/202200/Epy2s3_x81I.jpg')
clientele = sqlalchemy.Column(sqlalchemy.String, nullable=True, default='0, 0, 0')
telefon = sqlalchemy.Column(sqlalchemy.String, nullable=True, default='99999999999')
email = sqlalchemy.Column(sqlalchemy.String, unique=True, nullable=False)
modified_date = sqlalchemy.Column(sqlalchemy.DateTime, default=datetime.datetime.now)
|
12,027 | 7291736f147c5c39199a00c687ae9e96f4f3afb9 | import csv
import os
import re
import sys
import sqlparse
META_FILE = "./files/metadata.txt"
AGGREGATE = ["min", "max", "sum", "avg"]
schema = {}
table_contents = {}
# create table schema
# -------------------
def metadata():
try:
with open(META_FILE,'r') as f:
contents = f.readlines()
contents = [t.strip() for t in contents if t.strip()]
table_name = None
for t in contents:
if t=="<begin_table>":
table_name = None
elif t=="<end_table>":
pass
elif not table_name:
table_name, schema[t] = t, []
else:
schema[table_name].append(t)
except ValueError:
print("error in reading data.")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# populate table contents
# -----------------------
def read_data(dictionary):
try:
for i in dictionary:
table_contents[i] = []
with open('./files/'+i+'.csv') as csvfile:
read = csv.reader(csvfile)
for row in read:
row = [j.replace("'",'') for j in row]
row = [int(j) for j in row]
table_contents[i].append(row)
except ValueError:
print("error populating data.")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# joining two tables
# ------------------
def join_tables(table1,table2):
a = []
for i in table1:
for j in table2:
a.append(i+j)
try:
pass
except ValueError:
print("table not found.")
sys.exit()
return a
# ---------------------------------------------------------------------------------------------------------------------
# select query on joint tables
# ----------------------------
def select_part(tables,column_args):
try:
dictionary_headers = []
for j in tables:
for i in schema[j]:
dictionary_headers.append(j+'.'+i)
if "*" in column_args:
column_args = dictionary_headers
else:
column_args = convert_argsv(column_args,tables)
if column_args is None:
return
else:
pass
if len(tables)>1:
p = join_tables(table_contents[tables[0]],table_contents[tables[1]])
tables.remove(tables[0])
tables.remove(tables[0])
for i in tables:
p = join_tables(p,table_contents[i])
else:
p = table_contents[tables[0]]
indexes_printed = []
for i in column_args:
index = dictionary_headers.index(i)
indexes_printed.append(index)
fin_table = []
for j in p:
temp = []
for i in indexes_printed:
temp.append(j[i])
fin_table.append(temp)
except ValueError:
print("error")
sys.exit()
return column_args, fin_table, p, dictionary_headers
# ---------------------------------------------------------------------------------------------------------------------
# print required output
# ---------------------
def print_particulars(cols_arg,fin_table):
print(','.join(cols_arg))
for i in fin_table:
print(*i,sep=',')
# ---------------------------------------------------------------------------------------------------------------------
# create header dictionary
# -----------------------
def create_table_dictionary(table):
a = []
for i in schema[table]:
a.append(table+'.'+i)
return a
# ---------------------------------------------------------------------------------------------------------------------
# select distinct columns
# -----------------------
def select_distinct(columns,table):
columns = re.split(r'[\ \t,]+',columns)
temp = re.split(r'[\ \t,]+',table)
if len(temp)>=2:
print("multiple table distinct pairs not handled")
sys.exit()
p = create_table_dictionary(table)
new_column = []
for i in columns:
if '.' not in i:
if i in schema[table]:
new_column.append(table+'.'+i)
else:
print("column "+i+" doesn't exist in the table.")
sys.exit()
else:
if i not in p:
print("column "+i+" doesn't exist in the table.")
sys.exit()
else:
new_column.append(i)
indices_needed = []
print(','.join(new_column))
for i in new_column:
indices_needed.append(p.index(i))
col_array = []
for i in table_contents[table]:
temp = []
for j in indices_needed:
temp.append(i[j])
col_array.append(temp)
col_array = [list(x) for x in set(tuple(x) for x in col_array)]
for i in col_array:
print(*i,sep=',')
# ---------------------------------------------------------------------------------------------------------------------
# check if a column exists in one or more tables with same name
# -------------------------------------------------------------
def find_multible_occurances(column,tables):
p = 0
table_name = ""
for i in tables:
if column in schema[i]:
p = p+1
table_name = i
if p==1:
return table_name
elif p>1:
return False
else:
print("column not found!")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# convert table names to (table + '.' + col) format
# -------------------------------------------------
def convert_argsv(column_args,tables):
new_col = []
for i in column_args:
if '.' not in i:
check = find_multible_occurances(i,tables)
if check=='ok':
return None
elif check==False:
print("ambiguity!")
sys.exit()
else:
new_col.append(check+'.'+i)
else:
new_col.append(i)
return new_col
# ---------------------------------------------------------------------------------------------------------------------
# check aggregate functions
# -------------------------
def aggregate(col,dictionary,table,aggr):
try:
index = dictionary.index(col)
column = []
print(aggr+'('+col+')')
for i in table:
column.append(i[index])
if aggr=="max":
print(max(column))
elif aggr=="min":
print(min(column))
elif aggr=="avg":
print (sum(column)/float(len(column)))
elif aggr=="sum":
print (sum(column))
else:
print("error in aggregate format.")
sys.exit()
except ValueError:
print("error in format")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# check AND condition
# -------------------
def checkAnd(left,right,table_content,table_headers,tables):
left_table = checkCondOps(left,table_content,table_headers,tables)
right_table = checkCondOps(right,table_content,table_headers,tables)
new_table = []
for i in right_table:
if i in left_table:
new_table.append(i)
return new_table
# ---------------------------------------------------------------------------------------------------------------------
# check OR condition
# ------------------
def checkOr(left,right,table_content,table_headers,tables):
left_table = checkCondOps(left,table_content,table_headers,tables)
right_table = checkCondOps(right,table_content,table_headers,tables)
new_table, intersection = [] , []
for i in right_table:
if i in left_table:
intersection.append(i)
new_table = left_table+right_table
for i in intersection:
new_table.remove(i)
return new_table
# ---------------------------------------------------------------------------------------------------------------------
# check if the condition has integer on one side
# ----------------------------------------------
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
# ---------------------------------------------------------------------------------------------------------------------
# resolve condition if RHS and LLHS are table arguments
# -----------------------------------------------------
def both_table_vals(col1,col2,operator,table_headers,table_content):
try:
new_table = []
index_cond1 = table_headers.index(col1)
index_cond2 = table_headers.index(col2)
for i in table_content:
if operator == "<=":
if i[index_cond1]<=i[index_cond2]:
new_table.append(i)
elif operator == ">=":
if i[index_cond1]>=i[index_cond2]:
new_table.append(i)
elif operator == "=":
if i[index_cond1]==i[index_cond2]:
new_table.append(i)
elif operator == ">":
if i[index_cond1]>i[index_cond2]:
new_table.append(i)
elif operator == "<":
if i[index_cond1]<i[index_cond2]:
new_table.append(i)
return new_table
except ValueError:
print("error")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# resolve condition for int and table parts
# -----------------------------------------
def single_int_handler(arg,col_name,operator,table_headers,table_content):
try:
arg = int(arg)
new_table = []
index_cond = table_headers.index(col_name)
for i in table_content:
if operator == "<=":
if i[index_cond]<=arg:
new_table.append(i)
elif operator == ">=":
if i[index_cond]>=arg:
new_table.append(i)
elif operator == "=":
if i[index_cond]==arg:
new_table.append(i)
elif operator == ">":
if i[index_cond]>arg:
new_table.append(i)
elif operator == "<":
if i[index_cond]<arg:
new_table.append(i)
return new_table
except ValueError:
print("error")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# check condition operators
# -------------------------
def checkCondOps(condition,table_content,table_headers,tables):
try:
operator = None
if "<=" in condition:
condition = condition.split("<=")
operator = "<="
elif ">=" in condition:
condition = condition.split(">=")
operator = ">="
elif "=" in condition:
condition = condition.split("=")
operator = "="
elif ">" in condition:
condition = condition.split(">")
operator = ">"
elif "<" in condition:
condition = condition.split("<")
operator = "<"
else:
print("error in condition")
sys.exit()
if RepresentsInt(condition[1]):
if "." not in condition[0]:
condition[0] = convert_arg_condition(condition[0],tables)
return single_int_handler(condition[1],condition[0],operator,table_headers,table_content)
else:
if "." not in condition[0]:
condition[0] = convert_arg_condition(condition[0],tables)
if "." not in condition[1]:
condition[1] = convert_arg_condition(condition[1],tables)
return both_table_vals(condition[0],condition[1],operator,table_headers,table_content)
except ValueError:
print("error")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# conversion
# ----------
def convert_arg_condition(name,tables):
try:
occur = 0
new = ""
for i in tables:
if name in schema[i]:
new = i+"."+name
occur=occur+1
if occur>1:
print("ambiguity")
sys.exit()
if occur==0:
print("column not exist")
sys.exit()
else:
return new
except ValueError:
print("error")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# check where condition
# ---------------------
def where(commands,columns,tables):
try:
if len(tables)>=2:
table_names = []
for i in tables:
table_names.append(i)
temp = select_part(tables,columns)
joint_table, joint_table_dict = temp[2],temp[3]
columns_needed = temp[0]
if "AND" in commands:
new_table = checkAnd(commands[0],commands[-1],joint_table,joint_table_dict,table_names)
return print_new_table(new_table,columns_needed,joint_table_dict)
elif "OR" in commands:
new_table = checkOr(commands[0],commands[-1],joint_table,joint_table_dict,table_names)
return print_new_table(new_table,columns_needed,joint_table_dict)
else:
new_table = checkCondOps(commands[0],joint_table,joint_table_dict,table_names)
return print_new_table(new_table,columns_needed,joint_table_dict)
elif len(tables)==1:
columns_needed = convert_argsv(columns,tables)
joint_table_dict = create_table_dictionary(tables[0])
new_table = table_contents[tables[0]]
if "AND" in commands:
new_table = checkAnd(commands[0],commands[-1],new_table,joint_table_dict,tables)
return print_new_table(new_table,columns_needed,joint_table_dict)
elif "OR" in commands:
new_table = checkOr(commands[0],commands[-1],new_table,joint_table_dict,tables)
return print_new_table(new_table,columns_needed,joint_table_dict)
else:
new_table = checkCondOps(commands[0],new_table,joint_table_dict,tables)
return print_new_table(new_table,columns_needed,joint_table_dict)
except ValueError:
print("error")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# print table
# -----------
def print_new_table(table,column,joint_table_dict):
indices = []
print(','.join(column))
for i in column:
indices.append(joint_table_dict.index(i))
for i in table:
temp = []
for j in indices:
temp.append(i[j])
print(*temp,sep=',')
return
# ---------------------------------------------------------------------------------------------------------------------
# check commands for whitespace characters
# ----------------------------------------
def command_analyzer(commands):
try:
if ("AND" in commands and len(commands)==3) or ("OR" in commands and len(commands)==3):
return commands
elif "AND" in commands:
fin_command = []
index = commands.index("AND")
cond1 = "".join(commands[:index])
cond2 = "".join(commands[index+1:])
fin_command.append(cond1)
fin_command.append("AND")
fin_command.append(cond2)
return fin_command
elif "OR" in commands:
fin_command = []
index = commands.index("OR")
cond1 = "".join(commands[:index])
cond2 = "".join(commands[index+1:])
fin_command.append(cond1)
fin_command.append("OR")
fin_command.append(cond2)
return fin_command
else:
fin_command = []
fin_command.append("".join(commands))
return fin_command
except ValueError:
print("error")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# select what query needs to be executed
# --------------------------------------
def select_process(commands):
try:
# print(len(commands))
if "where" in commands[-1]:
strip_commands = commands[-1].replace(";","").split(" ")
strip_commands = command_analyzer(strip_commands[1:])
tables = re.split(r'[\ \t,]+',commands[3])
cols_arg = re.split(r'[\ \t,]+',commands[1])
if "*" in cols_arg:
if len(cols_arg)==1:
dictionary = []
for i in tables:
for j in schema[i]:
dictionary.append(i+"."+j)
cols_arg = dictionary
else:
print("column error")
sys.exit()
where(strip_commands,cols_arg,tables)
elif commands[1].lower()=="distinct":
select_distinct(commands[2],commands[4])
return
elif len(commands)==6 and "where" not in commands[-1]:
print("Query not supported.")
sys.exit()
else:
tables, cols_arg = commands[3], commands[1]
check_aggr = cols_arg[:3]
# print(check_aggr)
if check_aggr in AGGREGATE and cols_arg[len(cols_arg)-1]==')':
tables = re.split(r'[\ \t,]+',tables)
dictionary = []
for i in tables:
for j in schema[i]:
dictionary.append(i+"."+j)
cols_arg = cols_arg.replace(check_aggr,"").replace("(","").replace(")","")
# print(cols_arg)
if "." not in cols_arg:
cols_arg = convert_argsv(cols_arg,tables)[0]
if len(tables)==1:
new_table = (table_contents[tables[0]])
elif len(tables)>=2:
new_table = join_tables(table_contents[tables[0]],table_contents[tables[1]])
tables.remove(tables[0])
tables.remove(tables[0])
for i in tables:
new_table = join_tables(new_table,table_contents[i])
else:
print("input error")
sys.exit()
if cols_arg in dictionary:
aggregate(cols_arg,dictionary,new_table,check_aggr)
else:
print("unknown error")
sys.exit()
elif commands[2]=="from":
cols_arg = re.split(r'[\ \t,]+',cols_arg)
tables = re.split(r'[\ \t,]+',tables)
reverted = select_part(tables,cols_arg)
print_particulars(reverted[0],reverted[1])
else:
print("This query is not supported by slq-engine.")
sys.exit()
return
except ValueError:
print("error")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# process query and parse
# -----------------------
def queryProcessor(query):
if query[-1]!=';':
print("Syntax Err: Expected ';' in the end")
sys.exit()
else:
query_convert = sqlparse.parse(query)[0].tokens
commands = []
lst = sqlparse.sql.IdentifierList(query_convert).get_identifiers()
for command in lst:
commands.append(str(command))
if commands[0].lower() == 'select':
select_process(commands)
else:
print("This query is not supported by slq-engine.")
sys.exit()
# ---------------------------------------------------------------------------------------------------------------------
# run main
#---------
def main():
metadata()
read_data(schema)
if len(sys.argv) != 2:
print("ERROR : invalid args")
print("USAGE : python {} '<sql query>'".format(sys.argv[0]))
exit(-1)
queryProcessor(sys.argv[1])
# ---------------------------------------------------------------------------------------------------------------------
#run
#---
if __name__ == "__main__":
main()
# --------------------------------------------------------------------------------------------------------------------- |
12,028 | 2964a96b45bbb6a84f57374aaf1fd79b1933c197 | import sys
LetterValues = {
'a':1,
'b':2,
'c':3,
'd':4,
'e':5,
'f':6,
'g':7,
'h':8,
'i':9,
'j':10,
'k':11,
'l':12,
'm':13,
'n':14,
'o':15,
'p':16,
'q':17,
'r':18,
's':19,
't':20,
'u':21,
'v':22,
'w':23,
'x':24,
'y':25,
'z':26,
'A':27,
'B':28,
'C':29,
'D':30,
'E':31,
'F':32,
'G':33,
'H':34,
'I':35,
'J':36,
'K':37,
'L':38,
'M':39,
'N':40,
'O':41,
'P':42,
'Q':43,
'R':44,
'S':45,
'T':46,
'U':47,
'V':48,
'W':49,
'X':50,
'Y':51,
'Z':52
}
# Generate prime table
PrimeTable = [None]*( 20*53 )
PrimeTable[ 0 ] = False
PrimeTable[ 1 ] = False
PrimeTable[ 2 ] = True
PrimeList = [ 2 ]
for N in range( 4 , len( PrimeTable ), 2 ):
PrimeTable[ N ] = False
for N in range( 3, len( PrimeTable ) ):
if PrimeTable[ N ] == None:
PrimeTable[ N ] = True
PrimeList.append( N )
for P in range( ( 2*N ), len( PrimeTable ), N ):
PrimeTable[ P ] = False
else:
continue
#print( PrimeList )
#print( len( PrimeTable ) )
for InputString in sys.stdin:
Letters = list( InputString.rstrip() )
#print( Letters )
TotalValue = 0
for L in Letters:
TotalValue += LetterValues[ L ]
#print( TotalValue )
if TotalValue == 1:
print( 'It is a prime word.' )
continue
if PrimeTable[ TotalValue ] == True:
print( 'It is a prime word.' )
else:
print( 'It is not a prime word.' )
|
12,029 | fb70609254fdba9dc014af4eb004929b0e6d7bbc | from pathlib import Path
import shutil
def copy_paster(settings):
"""
Note:
This functions does a direct copy/paste of model input and output files into the post-proc run results folder.
Parameters:
settings: The SetInputs class.
Return:
Nothing is returned but files are copied and pasted.
"""
print('Doing a copy/paste of model run inputs and outputs for this run ID.')
# make a model runs folder within the postproc runs runid folder
path_paste_model_runs = settings.path_postproc_runs_runid / 'CAFE_model_runs'
path_paste_model_runs.mkdir(exist_ok=False)
for idx, model_run in enumerate(settings.model_runs):
if settings.run_primary_runs:
path_copy_input = settings.path_project / f'CAFE_model_runs/input/{model_run}'
path_copy_output = settings.path_project / f'CAFE_model_runs/output/{model_run}/reports-csv'
else:
path_copy_input = settings.path_project / f'CAFE_model_runs/sensitivities/input/{model_run}'
path_copy_output = settings.path_project / f'CAFE_model_runs/sensitivities/output/{model_run}/reports-csv'
# create the subfolder paths we want
path_paste_model_runs_folder = dict()
for folder in ['input', 'output']:
path_paste_model_runs_folder[folder] = path_paste_model_runs / folder
path_paste_model_runs_folder[folder].mkdir(exist_ok=True)
path_paste_model_runs_folder[folder] = path_paste_model_runs_folder[folder] / f'{model_run}'
path_paste_model_runs_folder[folder].mkdir(exist_ok=True)
# create generator of files in copy paths
files_in_path_copy_input = (entry for entry in path_copy_input.iterdir() if entry.is_file())
files_in_path_copy_output = (entry for entry in path_copy_output.iterdir() if entry.is_file())
# copy/paste input files
for file in files_in_path_copy_input:
shutil.copy2(file, path_paste_model_runs_folder['input'] / file.name)
# copy/paste output files
for file in files_in_path_copy_output:
shutil.copy2(file, path_paste_model_runs_folder['output'] / file.name)
return
if __name__ == '__main__':
from postproc_setup import SetInputs as settings
copy_paster(settings)
print('Copy/Paste complete')
|
12,030 | fb3abeafd05fa9f733668aed3059e7dbc925851e | """
Examples to show available string methods in python
"""
# Replace Method
a = "1abc2abc3abc4abc"
print(a.replace('abc', 'ABC', 1))
print(a.replace('abc', 'ABC', 2))
# Sub-Strings
# starting index is inclusive
# Ending index is exclusive
b = a[1]
print(b)
c = a[1:6]
print(c)
d = a[1:6:2]
print(d)
e = 'This is a string'
print(e)
print(e[:])
print(e[1:])
print(e[:6])
print(e[-1:])
print(e[-1])
print(e[:-1])
print(e[:len(e)])
print(e[::1])
print(e[::2])
# reverse string
print(e[::-1])
# cannot change the original string
e[1] = 'j'
# strings are immutable. You cannot change the memory allocation of a string. All changes are done at runtime.
|
12,031 | 9e445d22ea9609e65ed9da29cb5e989598d0a7cf | import matplotlib.pyplot as plt
import numpy as np
def to_one_hot_encoding(s, obs_n):
x = np.zeros(obs_n, dtype=np.float32)
x[s] = 1.0
return x
def plot_learning(x, scores, epsilons, filename, lines=None):
fig = plt.figure()
ax = fig.add_subplot(111, label="1")
ax2 = fig.add_subplot(111, label="2", frame_on=False)
ax.plot(x, epsilons, color="C0")
ax.set_xlabel("Game", color="C0")
ax.set_ylabel("Epsilon", color="C0")
ax.tick_params(axis='x', colors="C0")
ax.tick_params(axis='y', colors="C0")
N = len(scores)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = np.mean(scores[max(0, t-20):(t+1)])
ax2.scatter(x, running_avg, color="C1")
ax2.axes.get_xaxis().set_visible(False)
ax2.yaxis.tick_right()
ax2.set_ylabel('Score', color="C1")
ax2.yaxis.set_label_position('right')
ax2.tick_params(axis='y', colors="C1")
if lines is not None:
for line in lines:
plt.axvline(x=line)
plt.savefig(filename)
|
12,032 | 54bc7d12f43b2fbc2fe1497887c02b002d33b441 | # This program prints Hello, world!
print ('Hello World') |
12,033 | bf1b9f9203aea6e6d93a6efb0ecbfd49b978cf08 | from django.contrib import admin
from .models import Question,Choice
# Register your models here.
admin.site.site_header = 'Pollon Admin'
admin.site.site_title = 'Pollon Admin'
admin.site.index_title = 'Welcome To Pollon Admin'
class ChoiceInline(admin.TabularInline):
model = Choice
extra =2
class QuestionAdmin(admin.ModelAdmin):
fieldsets=[(None,{'fields':['question_text']}),
('Date Information', {'fields':['pub_date'],'classes':['collapse']}),
('Open For All', {'fields':['open_for_all']}),
('Voted By', {'fields':['voted_by']}),
('Login Required', {'fields':['login_required']})
]
inlines=[ChoiceInline]
admin.site.register(Question,QuestionAdmin)
admin.site.register(Choice) |
12,034 | c7f922f0c056e2b45c8855efbc0755aa2a3bc680 | #!/usr/bin/python
# vim: set fileencoding=utf8
#
# Copyright (C) 2012 - Matt Brown
#
# All rights reserved.
#
# Example RRD commands for use with this script:
# rrdtool graph /tmp/test.png --start end-48h --lower-limit 0 \
# --title "Electrical Power Usage" DEF:power=/tmp/power.rrd:node1_kWh:AVERAGE \
# --vertical-label Watts --width 800 --height 600 \
# CDEF:watts=power,21600,\* LINE1:watts#ff0000:Power && eog /tmp/test.png
# rrdtool graph /tmp/test.png --start end-48h --lower-limit 0 \
# --title "Battery voltage" DEF:bat=/tmp/bat.rrd:node1_bat:AVERAGE \
# --vertical-label mV --width 800 --height 600 CDEF:mv=bat,50,\+,20,\* \
# LINE1:mv#ff0000:Voltage && eog /tmp/test.png
#
# Reads logger.py output and generates rrd updates.
import common
import optparse
import os
import rrdtool
import sys
START_TS = 1351378113
# Assuming 60s step size.
RRA_LAST = 'RRA:LAST:0.9:1:2628000' # 5 years of exact measurements.
RRA_5 = 'RRA:AVERAGE:0.9:5:1051200' # 10 years of 5min averages.
RRA_60 = 'RRA:AVERAGE:0.9:60:87600' # 10 years of 1hr averages.
RRAS = (RRA_LAST, RRA_5, RRA_60)
class RRDUpdater(common.Updater):
"""Updates RRDs based on a directory of logfiles."""
def __init__(self, state_dir, dry_run, debug=False):
# self.history must be defined first to avoid infinite loop in setattr.
self.rrds = []
self.update_ts = None
self.update_queue = {}
super(RRDUpdater, self).__init__(state_dir, 'rrd-history.pickle', dry_run, debug)
def CheckOrCreateRRD(self, ds):
rrd = self.RRDForDs(ds)
if rrd in self.rrds:
return
if not os.path.exists(self.RRDForDs(ds)):
self.CreateRRD(ds)
else:
self.rrds.append(rrd)
def CreateRRD(self, ds):
if ds.endswith('bat') or ds.endswith('temp'):
ds_type = 'GAUGE:3600:-50:255'
else:
ds_type = 'COUNTER:300:U:U'
rrdfile = self.RRDForDs(ds)
if not self.dry_run:
try:
rrdtool.create(rrdfile,
'--start', str(START_TS), '--step', '60',
['DS:%s:%s' % (ds, ds_type)],
*RRAS)
except rrdtool.error, e:
sys.stderr.write('ERROR: Could not create rrd %s for %s: %s\n' %
(rrdfile, ds, e))
sys.exit(1)
self.rrds.append(rrdfile)
print 'Created new RRD %s' % rrdfile
def RRDForDs(self, ds):
return os.path.join(self.state_dir, '%s.rrd' % ds)
def UpdateRRD(self, ts, updates):
if self.update_ts and self.update_ts != ts:
self.FlushUpdateQueue()
# Queue requested updates for insertion.
self.update_queue.update(updates)
self.update_ts = ts
def FlushUpdateQueue(self):
files = {}
for ds, val in self.update_queue.iteritems():
self.CheckOrCreateRRD(ds)
rrd = self.RRDForDs(ds)
files.setdefault(rrd, {})
files[rrd][ds] = val
for rrd, data in files.iteritems():
if self.update_ts < self.LastUpdateFor(rrd):
if self.debug:
print 'ignoring update for %s, too old' % rrd, data
continue
keys = data.keys()
datastr = ':'.join(['%s' % data[k] for k in keys])
try:
if not self.dry_run:
rrdtool.update(rrd, '-t', ':'.join(keys),
'%s:%s' % (int(self.update_ts), datastr))
elif self.debug:
print ('rrdtool update -t', ':'.join(keys),
'%s:%s' % (int(self.update_ts), datastr))
except rrdtool.error, e:
print e, 'from', self.update_queue, 'at', self.current_line
self.update_queue = {}
def LastUpdateFor(self, rrd):
if self.dry_run and not os.path.exists(rrd):
return 0
if rrd not in self.latest_update:
self.latest_update[rrd] = rrdtool.last(rrd)
return self.latest_update[rrd]
def FinishedProcessing(self):
# Make sure the last report gets flushed.
self.FlushUpdateQueue()
# and whatever else our parent does.
super(RRDUpdater, self).FinishedProcessing()
def ReportMetric(self, node_id, metric, ts, value):
data = {'node%d_%s' % (node_id, metric): value}
self.UpdateRRD(ts, data)
def main():
parser = optparse.OptionParser()
parser.add_option('--dry_run', action='store_true', dest='dry_run')
parser.add_option('--debug', action='store_true', dest='debug')
parser.add_option('--state_dir', action='store', dest='state_dir')
options, args = parser.parse_args()
if len(args) < 2:
sys.stderr.write('Usage: %s [--dry_run] [--debug] [--state_dir foo] '
'logfile1 [logfile2, ...]\n' % sys.argv[0])
sys.exit(1)
updater = RRDUpdater(options.state_dir, options.dry_run, options.debug)
updater.ProcessFiles(args)
updater.PrintMeterSummary()
if __name__ == "__main__":
main()
# Vim modeline
# vim: set ts=2 sw=2 sts=2 et:
|
12,035 | 95c472d64bf11fb7b323e7ae12d0047d0f073203 | """
**************************************************************************
Theano Logistic Regression
**************************************************************************
This version was just for local testing (Vee ran his version for our SBEL batch jobs)
@author: Jason Feriante <feriante@cs.wisc.edu>
@date: 10 July 2015
**************************************************************************
logistic regression using Theano and stochastic gradient descent. Logistic regression is a
probabilistic, linear classifier. It is parametrized by a weight matrix :math:`W` and a bias vector :math:`b`.
Classification is done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability. Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of the vector whose i'th element is P(Y=i|x).
.. math:: y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method suitable for large datasets.
"""
import cPickle, time, os, sys, numpy, theano
from sklearn import metrics
import theano.tensor as T
from lib.theano import helpers
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W` and bias vector :math:`b`.
Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True )
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True )
# symbolic expression for computing the matrix of class-membership probabilities where:
# W is a matrix where column-k represent the separation hyper plain for class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of hyper plane-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
# T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class
# LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and
# T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch.
#print "y.ndim = ",y.ndim
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) )
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def sgd_optimization(data_type, target, model_dir, learning_rate=0.1, n_epochs=10, batch_size=100):
"""
Demonstrate stochastic gradient descent optimization of a log-linear model
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
"""
test_fold = 1 #xxxxxxxxxxxx TEMP XXXXXXXXXXXXXXXX
write_model_file = model_dir + '/model.' + target + '.' + str(test_fold) +'.pkl'
fold_path = helpers.get_fold_path(data_type)
targets = helpers.build_targets(fold_path, data_type)
fnames = targets[target]
fold_accuracies = {}
did_something = False
# pct_ct = []
# roc_auc = []
# run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# loop through all folds, for now just do 1!
datasets, test_set_labels = helpers.th_load_data(data_type, fold_path, target, fnames, 0, test_fold)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
valid_set_x = train_set_x
valid_set_y = train_set_y
# compute number of rows for training, validation and testing
rows_train = train_set_x.get_value(borrow=True).shape[0]
rows_valid = valid_set_x.get_value(borrow=True).shape[0]
rows_test = test_set_x.get_value(borrow=True).shape[0]
# compute number of minibatches for training, validation and testing
n_train_batches = rows_train / batch_size
n_valid_batches = rows_valid / batch_size
n_test_batches = rows_test / batch_size
####################### BUILD ACTUAL MODEL #######################
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# n_in: Each MNIST image has size 32*32 = 1024
# n_out: 10 different digits - multi-task LR
classifier = LogisticRegression(input=x, n_in=32 * 32, n_out=2)
# the cost we minimize during training is the negative log likelihood of the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by the model on a minibatch
test_model = theano.function( inputs=[index], outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function( inputs=[index], outputs=cost, updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
################ TRAIN MODEL ################
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is found
improvement_threshold = 0.995 # a relative improvement of this much is considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many minibatches before checking the network on the validation set; in this case we check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
# print( 'epoch %i, minibatch %i/%i, validation error %f %%' %
# (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.) )
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
# print( (' epoch %i, minibatch %i/%i, test error of best model %f %%' ) %
# ( epoch, minibatch_index + 1, n_train_batches, test_score * 100. ) )
# save the best model
with open(write_model_file, 'w') as f:
cPickle.dump(classifier, f)
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print( ('Optimization complete for %d with best validation score of %f %% with test performance %f %%')
% (test_fold, best_validation_loss * 100., test_score * 100.) )
print 'The code ran for %d epochs, with %f epochs/sec' % (epoch, 1. * epoch / (end_time - start_time))
# print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.1fs' % ((end_time - start_time)))
# end-snippet-4
# Now we do the predictions
# load the saved best model for this fold
classifier = cPickle.load(open(write_model_file))
# compile a predictor function
predict_model = theano.function(inputs=[classifier.input], outputs=[classifier.y_pred,classifier.p_y_given_x])
# compile a confidence predictor function
# predict_conf_model = theano.function( inputs=[classifier.input], outputs=classifier.p_y_given_x)
# We can test it on some examples from test test
""" *************** build AUC curve *************** """
# get the probability of our predictions
test_set = test_set_x.get_value()
predicted_values, conf_preds = predict_model(test_set[:(rows_test)])
conf_predictions = []
for i in range(len(conf_preds)):
# ignore the first column; this gives a lower score that seems wrong.
conf_predictions.append(conf_preds[i][1])
# determine ROC / AUC
fpr, tpr, thresholds = metrics.roc_curve(test_set_labels, conf_predictions)
auc = metrics.auc(fpr, tpr) # e.g. 0.855
""" *********************************************** """
num_correct = 0
num_false = 0
for i in range(len(predicted_values)):
if predicted_values[i] == test_set_labels[i]:
num_correct += 1
else:
num_false += 1
total = len(predicted_values)
percent_correct = num_correct / float(total)
fold_results = ''
fold_results += '#################### Results for ' + data_type + ' ####################' + '\n'
fold_results += 'target:' + target + ' fold:' + str(test_fold) + ' predicted: ' + \
str(total) + ' wrong: ' + \
str(num_false) + ' pct correct: ' + str(percent_correct) + ', auc: ' + str(auc)
print fold_results
write_predictions_file = model_dir + '/predictions.' + target + '.' + str(test_fold) +'.txt'
with open(write_predictions_file, 'w') as f:
f.write(fold_results + "\n")
# def run_predictions(data_type, curr_target):
# fold_path = get_fold_path(data_type)
# targets = build_targets(fold_path, data_type)
# # print "Found " + str(len(targets)) + " targets for " + data_type
# fold_accuracies = {}
# did_something = False
# for target, fnames in targets.iteritems():
# if (target != curr_target):
# continue
# else:
# did_something = True
# # retrieve our stratified folds
# folds = get_folds(data_type, fold_path, target, fnames)
# pct_ct = []
# roc_auc = []
# # run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# # folds 1-4
# temp_data = []
# for i in range(len(folds)):
# if(i == curr_fl):
# # don't include the test fold
# continue
# else:
# temp_data += folds[i]
# # vs current 5th test fold
# test_data = folds[curr_fl]
# """ Turning 1024 bits into features is a slow process """
# # build training data
# X = []
# Y = []
# for i in range(len(temp_data)):
# row = []
# for bit in temp_data[i][0]:
# row.append(int(bit))
# X.append(row)
# Y.append(int(temp_data[i][1]))
# X = np.array(X)
# Y = np.array(Y)
# # build test data
# X_test = []
# Y_test = []
# for i in range(len(test_data)):
# row = []
# for bit in test_data[i][0]:
# row.append(int(bit))
# X_test.append(row)
# Y_test.append(int(test_data[i][1]))
# X_test = np.array(X_test)
# Y_test = np.array(Y_test)
# percent_correct, auc = random_forest(target, X, Y, X_test, Y_test, curr_fl)
# pct_ct.append(percent_correct)
# roc_auc.append(auc)
# # now get the average fold results for this target
# accuracy = sum(pct_ct) / float(len(pct_ct))
# all_auc = sum(roc_auc) / float(len(roc_auc))
# print 'Results for '+ target + ': accuracy: ' + str(accuracy) + ', auc: ' + str(all_auc)
# # update fold accuracies
# fold_accuracies[target] = (accuracy, all_auc)
if(did_something == False):
print curr_target + ' not found in ' + data_type + '!'
exit(0)
print '#################### Results for ' + data_type + ' ####################'
# output results
accuracies = 0.00
aucs = 0.00
num_targets = 0.00
for target, obj in fold_accuracies.iteritems():
acc = obj[0]
auc = obj[1]
print target + ' accuracy: ' + str(acc) + ', auc:' + str(auc)
accuracies += acc
aucs += auc
num_targets += 1
# overall_acc = accuracies / num_targets
# overall_auc = aucs / num_targets
# print ' overall accuracy: ' + str(overall_acc) + ', overall auc: ' + str(overall_auc)
print '############################################################'
def main(args):
if(len(args) < 3 or len(args[2]) < 1):
print 'usage: <tox21, dud_e, muv, or pcba> <target> '
return
dataset = args[1]
target = args[2]
# in case of typos
if(dataset == 'dude'):
dataset = 'dud_e'
print "Running Theano Logistic Regression for " \
+ dataset + "........."
is_numeric = helpers.is_numeric(target)
if(is_numeric):
target_list = helpers.get_target_list(dataset)
target = target_list[int(target)]
model_dir = 'theano_saved/logistic_regression'
if(dataset == 'tox21'):
sgd_optimization('Tox21', target, model_dir)
elif(dataset == 'dud_e'):
sgd_optimization('DUD-E', target, model_dir)
elif(dataset == 'muv'):
sgd_optimization('MUV', target, model_dir)
elif(dataset == 'pcba'):
sgd_optimization('PCBA', target, model_dir)
else:
print 'dataset param not found. options: tox21, dud_e, muv, or pcba'
if __name__ == '__main__':
start_time = time.clock()
main(sys.argv)
end_time = time.clock()
print 'runtime: %.2f secs.' % (end_time - start_time)
|
12,036 | b4536be5fbabc4091845cfd8e15d08846001ab9b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Nekozilla is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nekozilla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nekozilla. If not, see <https://www.gnu.org/licenses/>.
"""
Application entry point. This loads any modules, DMing the owner if there is
any error, and then starts the bot.
If a path is provided as the first argument, we look in this path directory
for any configuration files, otherwise, we assume ../neko2config.
"""
import asyncio
import logging
import os
import sys
import warnings
from neko3 import bot as client
from neko3 import configuration_files
from neko3 import logging_utils
from neko3 import module_detection
LOGGERS_TO_SUPPRESS = ["discord.http"]
SUPPRESS_TO_LEVEL = "FATAL"
def cli():
NEKO3_TOKEN = os.environ["NEKO3_TOKEN"]
NEKO3_CLIENT_ID = os.environ["NEKO3_CLIENT_ID"]
NEKO3_OWNER_ID = os.environ["NEKO3_OWNER_ID"]
NEKO3_PREFIX = os.getenv("NEKO3_PREFIX", "n.")
config = dict(
bot=dict(
command_prefix=NEKO3_PREFIX,
owner_id=int(NEKO3_OWNER_ID),
),
auth=dict(
client_id=NEKO3_CLIENT_ID,
token=NEKO3_TOKEN,
),
debug=False,
)
logging_kwargs = {
"level": os.getenv("LOGGER_LEVEL", "INFO"),
"format": "%(asctime)s.%(msecs)03d L:%(levelname)s M:%(module)s F:%(funcName)s: %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
logging.basicConfig(**logging_kwargs)
logger = logging.getLogger("neko3")
for other_logger in LOGGERS_TO_SUPPRESS:
other_logger = logging.getLogger(other_logger)
other_logger.setLevel(SUPPRESS_TO_LEVEL)
try:
import uvloop
uvloop.install()
logging.info("Using uvloop for asyncio event loop")
except:
logging.info("Using default asyncio event loop")
# Shuts up BeautifulSoup about every first world problem
warnings.filterwarnings(action="ignore", category=UserWarning)
loop = asyncio.get_event_loop()
try:
with client.Bot(loop, config) as bot:
module_detection.ModuleDetectionService().auto_load_modules(bot)
try:
loop.run_until_complete(bot.run(bot.token))
except client.BotInterrupt as ex:
logger.critical(f"Received interrupt {ex}")
except Exception as ex:
logger.exception("An unrecoverable error occurred.", exc_info=ex)
else:
logger.info("The bot stopped executing as expected")
try:
if bot.logged_in:
loop.run_until_complete(bot.logout())
except client.BotInterrupt:
bot.logger.critical("Asked to shut down AGAIN, guess I will die")
except Exception as ex:
bot.logger.exception("Giving up all hope of a safe exit", exc_info=ex)
finally:
bot.logger.critical("Process is terminating NOW.")
|
12,037 | bc179e54f21b970c9d226380b1c7307512496910 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The VMware API utility module.
"""
def build_selection_spec(client_factory, name):
"""Builds the selection spec."""
sel_spec = client_factory.create('ns0:SelectionSpec')
sel_spec.name = name
return sel_spec
def build_traversal_spec(client_factory, name, spec_type, path, skip,
select_set):
"""Builds the traversal spec object."""
traversal_spec = client_factory.create('ns0:TraversalSpec')
traversal_spec.name = name
traversal_spec.type = spec_type
traversal_spec.path = path
traversal_spec.skip = skip
traversal_spec.selectSet = select_set
return traversal_spec
def build_recursive_traversal_spec(client_factory):
"""
Builds the Recursive Traversal Spec to traverse the object managed
object hierarchy.
"""
visit_folders_select_spec = build_selection_spec(client_factory,
"visitFolders")
# For getting to hostFolder from datacenter
dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter",
"hostFolder", False,
[visit_folders_select_spec])
# For getting to vmFolder from datacenter
dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter",
"vmFolder", False,
[visit_folders_select_spec])
# For getting Host System to virtual machine
h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem",
"vm", False,
[visit_folders_select_spec])
# For getting to Host System from Compute Resource
cr_to_h = build_traversal_spec(client_factory, "cr_to_h",
"ComputeResource", "host", False, [])
# For getting to datastore from Compute Resource
cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds",
"ComputeResource", "datastore", False, [])
rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp")
rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm")
# For getting to resource pool from Compute Resource
cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp",
"ComputeResource", "resourcePool", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec])
# For getting to child res pool from the parent res pool
rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool",
"resourcePool", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec])
# For getting to Virtual Machine from the Resource Pool
rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool",
"vm", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec])
# Get the assorted traversal spec which takes care of the objects to
# be searched for from the root folder
traversal_spec = build_traversal_spec(client_factory, "visitFolders",
"Folder", "childEntity", False,
[visit_folders_select_spec, dc_to_hf,
dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,
rp_to_rp, h_to_vm, rp_to_vm])
return traversal_spec
def build_property_spec(client_factory, type="VirtualMachine",
properties_to_collect=["name"],
all_properties=False):
"""Builds the Property Spec."""
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = all_properties
property_spec.pathSet = properties_to_collect
property_spec.type = type
return property_spec
def build_object_spec(client_factory, root_folder, traversal_specs):
"""Builds the object Spec."""
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = root_folder
object_spec.skip = False
object_spec.selectSet = traversal_specs
return object_spec
def build_property_filter_spec(client_factory, property_specs, object_specs):
"""Builds the Property Filter Spec."""
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_filter_spec.propSet = property_specs
property_filter_spec.objectSet = object_specs
return property_filter_spec
def get_object_properties(vim, collector, mobj, type, properties):
"""Gets the properties of the Managed object specified."""
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.get_service_content().propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = (properties is None or len(properties) == 0)
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec])
def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
obj_content = \
get_object_properties(vim, None, mobj, type, [property_name])
property_value = None
if obj_content:
dynamic_property = obj_content[0].propSet
if dynamic_property:
property_value = dynamic_property[0].val
return property_value
def get_objects(vim, type, properties_to_collect=["name"], all=False):
"""Gets the list of objects of the type specified."""
client_factory = vim.client.factory
object_spec = build_object_spec(client_factory,
vim.get_service_content().rootFolder,
[build_recursive_traversal_spec(client_factory)])
property_spec = build_property_spec(client_factory, type=type,
properties_to_collect=properties_to_collect,
all_properties=all)
property_filter_spec = build_property_filter_spec(client_factory,
[property_spec],
[object_spec])
return vim.RetrieveProperties(vim.get_service_content().propertyCollector,
specSet=[property_filter_spec])
def get_prop_spec(client_factory, spec_type, properties):
"""Builds the Property Spec Object."""
prop_spec = client_factory.create('ns0:PropertySpec')
prop_spec.type = spec_type
prop_spec.pathSet = properties
return prop_spec
def get_obj_spec(client_factory, obj, select_set=None):
"""Builds the Object Spec object."""
obj_spec = client_factory.create('ns0:ObjectSpec')
obj_spec.obj = obj
obj_spec.skip = False
if select_set is not None:
obj_spec.selectSet = select_set
return obj_spec
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
"""Builds the Property Filter Spec Object."""
prop_filter_spec = \
client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
def get_properties_for_a_collection_of_objects(vim, type,
obj_list, properties):
"""
Gets the list of properties for the collection of
objects of the type specified.
"""
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory,
lst_obj_specs, [prop_spec])
return vim.RetrieveProperties(vim.get_service_content().propertyCollector,
specSet=[prop_filter_spec])
|
12,038 | aad1ba7b2573b4b702d6e0e2d6e71c78a0647c5d | employee=[(80, "rajat", 414),(84, "rithik", 261),(82, "nipun", 9052),(83, "harshil", 369)]
print("print the employee list----------->")
for i in employee:
print(i)
print()
employee[0]=(82,"rakshit",463)
print("after modification---------->")
for i in employee:
print(i)
|
12,039 | e4bf7b14138fe801091b2d1dae9db900778890fc | import urllib.request
def read_text():
quotes=open("profanity.txt")
contents=quotes.read()
print(contents)
quotes.close()
check_profanity(contents)
def check_profanity(text_to_check):
connection=urllib.request.urlopen("http://www.wdyl.com/profanity?q="+text_to_check)
## 404
output=connection.read()
print(output)
read_text() |
12,040 | 767c6bc033e64fb519c09d04985bc8ff5dd98d2d | rows = int(input())
matrix = []
diagonal_sum = 0
for row in range(rows):
matrix.append([int(el) for el in input().split()])
diagonal_sum += matrix[row][row]
print(diagonal_sum) |
12,041 | 5f7e3fe83a4849d7fbb946d9450ab5db196e9243 | import numpy as np
def preprocess(text):
text = text.lower()
text = text.replace('.', ' .')
words = text.split(' ')
word_to_id = {}
id_to_word = {}
for word in words:
if word not in word_to_id:
new_id = len(word_to_id)
word_to_id[word] = new_id
id_to_word[new_id] = word
corpus = np.array([word_to_id[w] for w in words])
return corpus, word_to_id, id_to_word
text = 'You say goodbye and I say hello.'
corpus, word_to_id, id_to_word = preprocess(text)
print(corpus)
print(word_to_id)
print(id_to_word)
'''
[0 1 2 3 4 1 5 6]
{'you': 0, 'say': 1, 'goodbye': 2, 'and': 3, 'i': 4, 'hello': 5, '.': 6}
{0: 'you', 1: 'say', 2: 'goodbye', 3: 'and', 4: 'i', 5: 'hello', 6: '.'}
''' |
12,042 | 648bb90d97c9153d7bc89622e573e01ef158cd7e | def sort_dict(dict1, sb):
if sb.lower() == 'value':
dict1 = {x: y for x, y in sorted(dict1.items(), key=lambda x: x[1])}
elif sb.lower() == 'key':
dict1 = {x: y for x, y in sorted(dict1.items())}
return dict1
def main():
count_dict = {'a': 4, 'b': 3, 'c': 2, 'd': 1}
sb_in = input("Enter if sorting should be based on 'key' or 'value': ")
print(sort_dict(count_dict, sb_in))
if __name__ == '__main__':
main()
|
12,043 | cad003637271b6c4ac5419f70e12a73669dce5d7 | #!/usr/bin/python
import sys
import codecs
import re
def calculate_g0_dice(ngram_frequency_distr,unigram_frequency_distr):
result=[]
for ngram in ngram_frequency_distr:
if ngram_frequency_distr[ngram]>=min_frequency: #minimum frequency
result.append((float(ngram_length)*ngram_frequency_distr[ngram]/sum([unigram_frequency_distr[e] for e in ngram]),ngram))
return sorted(result,reverse=True)
def calculate_g0_mi(ngram_frequency_distr,unigram_frequency_distr):
result=[]
ngram_probability=relativize_distr(ngram_frequency_distr)
unigram_probability=relativize_distr(unigram_frequency_distr)
from math import log
for ngram in ngram_probability:
if ngram_frequency_distr[ngram]>=min_frequency: # minimum frequency
#result.append((log(ngram_frequency_distr[ngram]*ngram_probability[ngram]/product([unigram_probability[e] for e in ngram])),ngram))
result.append(((log(ngram_probability[ngram]/product([unigram_probability[e] for e in ngram]))),ngram))
return sorted(result,reverse=True)
def calculate_g0_ll(ngram_frequency):
result=[]
from math import log
observed_functions=[observed2,observed3,observed4]
expected_functions=[expected2,expected3,expected4]
for ngram in lemma_token_frequency_distr:
if ngram_frequency[ngram]>=min_frequency:
marginals=[ngram_frequency[e] for e in create_ngram_star(ngram)]
observed=observed_functions[ngram_length-2](marginals)
expected=expected_functions[ngram_length-2](observed)
ll=0.0
for o,e in zip(observed,expected):
try:
ll+=o*log(o/e)
except:
pass
result.append((2*ll,ngram))
return sorted(result,reverse=True)
def calculate_g0_chisq(ngram_frequency):
result=[]
observed_functions=[observed2,observed3,observed4]
expected_functions=[expected2,expected3,expected4]
for ngram in lemma_token_frequency_distr:
if ngram_frequency[ngram]>=min_frequency:
marginals=[ngram_frequency[e] for e in create_ngram_star(ngram)]
observed=observed_functions[ngram_length-2](marginals)
expected=expected_functions[ngram_length-2](observed)
chisq=0.0
for o,e in zip(observed,expected):
try:
chisq+=(o-e)**2/e
except:
pass
result.append((chisq,ngram))
return sorted(result,reverse=True)
def calculate_g0_tscore(ngram_frequency):
from math import sqrt
result=[]
observed_functions=[observed2,observed3,observed4]
expected_functions=[expected2,expected3,expected4]
for ngram in lemma_token_frequency_distr:
if ngram_frequency[ngram]>=min_frequency:
marginals=[ngram_frequency[e] for e in create_ngram_star(ngram)]
observed=observed_functions[ngram_length-2](marginals)
expected=expected_functions[ngram_length-2](observed)
tscore=float(observed[0]-expected[0])/sqrt(expected[0])
result.append((tscore,ngram))
return sorted(result,reverse=True)
def observed2(marginals):
n_ii,n_ix,n_xi,n_xx=marginals
n_oi = n_xi - n_ii
n_io = n_ix - n_ii
return (n_ii, n_io, n_oi, n_xx - n_ii - n_oi - n_io)
def expected2(observed):
n_ii, n_io, n_oi, n_oo=observed
n_xx = float(n_ii+n_oi+n_io+n_oo)
e_ii=(n_ii+n_io)*(n_ii+n_oi)/n_xx
e_io=(n_ii+n_io)*(n_io+n_oo)/n_xx
e_oi=(n_oi+n_oo)*(n_ii+n_oi)/n_xx
e_oo=(n_oi+n_oo)*(n_io+n_oo)/n_xx
return (e_ii,e_io,e_oi,e_oo)
def expected_nltk(contingency):
n_xx=sum(contingency)
for i in range(4):
yield (contingency[i] + contingency[i ^ 1]) * (contingency[i] + contingency[i ^ 2]) / float(n_xx)
def observed3(marginals):
n_iii,n_iix,n_ixi,n_ixx,n_xii,n_xix,n_xxi,n_xxx=marginals
n_oii = n_xii - n_iii
n_ioi = n_ixi - n_iii
n_iio = n_iix - n_iii
n_ooi = n_xxi - n_iii - n_oii - n_ioi
n_oio = n_xix - n_iii - n_oii - n_iio
n_ioo = n_ixx - n_iii - n_ioi - n_iio
n_ooo = n_xxx - n_iii - n_oii - n_ioi - n_iio - n_ooi - n_oio - n_ioo
return (n_iii, n_iio, n_ioi, n_oii, n_ioo, n_ooi, n_oio, n_ooo)
def expected3(observed):
o_iii, o_iio, o_ioi, o_oii, o_ioo, o_ooi, o_oio, o_ooo=observed
n_xxx=float(o_iii+o_iio+o_ioi+o_oii+o_ioo+o_ooi+o_oio+o_ooo)
e_iii=(o_iii+o_oii)*(o_iii+o_ioi)*(o_iii+o_iio)/n_xxx
e_iio=(o_iio+o_oio)*(o_iio+o_ioi)*(o_iio+o_iii)/n_xxx
e_ioi=(o_ioi+o_ooi)*(o_ioi+o_iii)*(o_ioi+o_ioo)/n_xxx
e_oii=(o_oii+o_iii)*(o_oii+o_ooi)*(o_oii+o_oio)/n_xxx
e_ioo=(o_ioo+o_ooo)*(o_ioo+o_iio)*(o_ioo+o_ioi)/n_xxx
e_ooi=(o_ooi+o_ioi)*(o_ooi+o_oii)*(o_ooi+o_ooo)/n_xxx
e_oio=(o_oio+o_iio)*(o_oio+o_ooo)*(o_oio+o_oii)/n_xxx
e_ooo=(o_ooo+o_ioo)*(o_ooo+o_oio)*(o_ooo+o_ooi)/n_xxx
return (e_iii,e_iio,e_ioi,e_oii,e_ioo,e_ooi,e_oio,e_ooo)
def observed4(marginals):
n_iiii,n_xiii,n_ixii,n_iixi,n_iiix,n_xxii,n_ixxi,n_iixx,n_xixi,n_xiix,n_ixix,n_xxxi,n_xxix,n_xixx,n_ixxx,n_xxxx=marginals
o_iiii=n_iiii
o_oiii=n_xiii-n_iiii
o_ioii=n_ixii-n_iiii
o_iioi=n_iixi-n_iiii
o_iiio=n_iiix-n_iiii
o_ooii=n_xxii-n_iiii-o_oiii-o_ioii
o_iooi=n_ixxi-n_iiii-o_ioii-o_iioi
o_iioo=n_iixx-n_iiii-o_iioi-o_iiio
o_oioi=n_xixi-n_iiii-o_oiii-o_iioi
o_ioio=n_ixix-n_iiii-o_ioii-o_iiio
o_oiio=n_ixxi-n_iiii-o_ioii-o_iioi
o_oooi=n_xxxi-n_iiii-o_oiii-o_ioii-o_iioi-o_ooii-o_oioi-o_iooi
o_ooio=n_xxix-n_iiii-o_oiii-o_ioii-o_iiio-o_ooii-o_oiio-o_ioio
o_oioo=n_xixx-n_iiii-o_oiii-o_iioi-o_iiio-o_oioi-o_iioo-o_oiio
o_iooo=n_ixxx-n_iiii-o_ioii-o_iioi-o_iiio-o_iooi-o_iioo-o_ioio
o_oooo=n_xxxx-n_iiii-o_oiii-o_ioii-o_iioi-o_iiio-o_ooii-o_iooi-o_iioo-o_oioi-o_ioio-o_oiio-o_oooi-o_ooio-o_oioo-o_iooo
return (o_iiii,o_oiii,o_ioii,o_iioi,o_iiio,o_ooii,o_iooi,o_iioo,o_oioi,o_ioio,o_oiio,o_oooi,o_ooio,o_oioo,o_iooo,o_oooo)
def expected4(observed):
o_iiii,o_oiii,o_ioii,o_iioi,o_iiio,o_ooii,o_iooi,o_iioo,o_oioi,o_ioio,o_oiio,o_oooi,o_ooio,o_oioo,o_iooo,o_oooo=observed
n_xxxx=float(o_iiii+o_oiii+o_ioii+o_iioi+o_iiio+o_ooii+o_iooi+o_iioo+o_oioi+o_ioio+o_oiio+o_oooi+o_ooio+o_oioo+o_iooo+o_oooo)
e_iiii=(o_iiii+o_oiii)*(o_iiii+o_ioii)*(o_iiii+o_iioi)*(o_iiii+o_iiio)/n_xxxx
e_oiii=(o_oiii+o_iiii)*(o_oiii+o_ooii)*(o_oiii+o_oioi)*(o_oiii+o_oiio)/n_xxxx
e_ioii=(o_ioii+o_ooii)*(o_ioii+o_iiii)*(o_ioii+o_iooi)*(o_ioii+o_ioio)/n_xxxx
e_iioi=(o_iioi+o_oioi)*(o_iioi+o_iooi)*(o_iioi+o_iiii)*(o_iioi+o_iioo)/n_xxxx
e_iiio=(o_iiio+o_oiio)*(o_iiio+o_ioio)*(o_iiio+o_iioo)*(o_iiio+o_iiii)/n_xxxx
e_ooii=(o_ooii+o_ioii)*(o_ooii+o_oiii)*(o_ooii+o_oooi)*(o_ooii+o_ooio)/n_xxxx
e_oioi=(o_oioi+o_iioi)*(o_oioi+o_oooi)*(o_oioi+o_oiii)*(o_oioi+o_oioo)/n_xxxx
e_oiio=(o_oiio+o_iiio)*(o_oiio+o_ooio)*(o_oiio+o_oioo)*(o_oiio+o_oiii)/n_xxxx
e_iooi=(o_iooi+o_oooi)*(o_iooi+o_iioi)*(o_iooi+o_ioii)*(o_iooi+o_iooo)/n_xxxx
e_ioio=(o_ioio+o_ooio)*(o_ioio+o_iiio)*(o_ioio+o_iooo)*(o_ioio+o_ioii)/n_xxxx
e_iioo=(o_iioo+o_oioo)*(o_iioo+o_iooo)*(o_iioo+o_iiio)*(o_iioo+o_iioi)/n_xxxx
e_oooi=(o_oooi+o_iooi)*(o_oooi+o_oioi)*(o_oooi+o_ooii)*(o_oooi+o_oooo)/n_xxxx
e_ooio=(o_ooio+o_ioio)*(o_ooio+o_oiio)*(o_ooio+o_oooo)*(o_ooio+o_ooii)/n_xxxx
e_oioo=(o_oioo+o_iioo)*(o_oioo+o_oooo)*(o_oioo+o_oiio)*(o_oioo+o_oioi)/n_xxxx
e_iooo=(o_iooo+o_oooo)*(o_iooo+o_iioo)*(o_iooo+o_ioio)*(o_iooo+o_iooi)/n_xxxx
e_oooo=(o_oooo+o_iooo)*(o_oooo+o_oioo)*(o_oooo+o_ooio)*(o_oooo+o_oooi)/n_xxxx
return (e_iiii,e_oiii,e_ioii,e_iioi,e_iiio,e_ooii,e_iooi,e_iioo,e_oioi,e_ioio,e_oiio,e_oooi,e_ooio,e_oioo,e_iooo,e_oooo)
def create_ngram_star(lemmas):
from itertools import product
ngrams=[]
for logical_vector in product((True,False),repeat=ngram_length):
logical_ngram=['*' for e in range(ngram_length)]
for index,logical_value in enumerate(logical_vector):
if logical_value:
logical_ngram[index]=lemmas[index]
ngrams.append(tuple(logical_ngram))
return ngrams
product=lambda s: reduce(lambda x, y: x * y, s)
def avg_idf(ngram):
max_idf=max(idf.values())
avg_idf=0.0
for lemma in ngram:
avg_idf+=idf.get(lemma,max_idf)
return avg_idf/len(ngram)
def calculate_tfidf(ngram_frequency_distr):
result=[]
ngram_probability=relativize_distr(ngram_frequency_distr)
for ngram,tf in ngram_probability.iteritems():
if ngram_frequency_distr[ngram]>=min_frequency:
idf_avg=0.0
result.append((tf*avg_idf(ngram),ngram))
return sorted(result,reverse=True)
def relativize_distr(distr):
copy_distr={}
all=float(sum(distr.values()))
for key,value in distr.iteritems():
copy_distr[key]=distr[key]/all
return copy_distr
min_frequency=3
optional_properties=set(['h','s','n','t','o','pos','prop','min','idf','seq','norm','terms'])
mandatory_properties=set(['i','p','m','l'])
definable_properties=optional_properties.union(mandatory_properties)
properties={}
ranking_methods={'dice':calculate_g0_dice,'mi':calculate_g0_mi,'ll':calculate_g0_ll,'chisq':calculate_g0_chisq,'tscore':calculate_g0_tscore,'tfidf':calculate_tfidf}
def parse_arguments():
key=None
for argument in sys.argv[1:]:
if argument.startswith('-'):
key=argument[1:]
if key=='h':
print_help()
if key not in definable_properties:
sys.stderr.write('Warning: argument identifier '+argument+' unknown\n')
else:
properties[key]=[]
elif key is None:
sys.stderr.write('Warning: argument '+argument+' without argument identifier'+'\n')
elif key in definable_properties:
properties[key].append(argument)
else:
sys.stderr.write('Error: unknown argument identifier '+key+'\n')
print_help()
def print_help():
sys.stderr.write('Syntax:\n')
sys.stderr.write('python CollTerm.py -i input_file -p phrase_configuration_file -m ngram_ranking_method -l length_of_ngrams\n')
sys.stderr.write('Arguments:\n')
sys.stderr.write('-i\ttab separated input file\n')
sys.stderr.write('-p\tphrase configuration file\n')
sys.stderr.write('\t\t- symbols for defining stop word phrases:\n')
sys.stderr.write('\t\t\t- !STOP - non-stop word\n')
sys.stderr.write('\t\t\t- STOP - stop word\n')
sys.stderr.write('\t\t\t- * - any word\n')
sys.stderr.write('\t\t- bigram example: STOP *\n')
sys.stderr.write('-s\tstop word file\n')
sys.stderr.write('-n\tmaximum number of top ranked terms\n')
sys.stderr.write('-t\tthreshold (minimum score)\n')
sys.stderr.write('-m\tn-gram ranking method\n')
sys.stderr.write('\t\tdice - Dice coefficient\n')
sys.stderr.write('\t\tmi - modified mutual information\n')
sys.stderr.write('\t\tchisq - chi-square statistic\n')
sys.stderr.write('\t\tll - log-likelihood ratio\n')
sys.stderr.write('\t\ttscore - t-score\n')
sys.stderr.write('\t\ttfidf - tf-idf score (with a mandatory idf file)\n')
sys.stderr.write('-l\tlength of n-grams to be extracted (0-4, 0 for all length n-grams)\n')
sys.stderr.write('-o\textracted term list output file (if not given, stdout used)\n')
sys.stderr.write('-pos\tpositions of tokens, POS tags and lemmas (zero-based indices)\n')
sys.stderr.write('-min\tminimum frequency of n-grams taken in consideration\n')
sys.stderr.write('-prop\tproperty file\n')
sys.stderr.write('-idf\tidf file (mandatory for tfidf, in case of other ranking methods a linear combination is computed)\n')
sys.stderr.write('-seq\toutput n-grams as a sequence of 0 - lemmata n-grams, 1 - most frequent token n-grams, 2 - lemmata n-grams + most frequent token n-grams, 3 - all token n-grams with their frequencies\n')
sys.stderr.write('-norm\tnormalize output to a [0,x] range\n')
sys.stderr.write('-terms\toutput terms as\n')
sys.stderr.write('\t\t0 - terms and weights (+ frequency if "-seq 2")\n')
sys.stderr.write('\t\t1 - terms only\n')
sys.exit(1)
def check_properties():
global term_number
term_number=None
global threshold
threshold=None
global ranking_method
global ngram_length
global output_file
global position
global min_frequency
global seq
seq=0
global norm
norm=0
global terms
terms=0
# -prop
if 'prop' in properties:
if properties['prop']==[]:
sys.stderr.write('Error: no argument given for property file (-prop)\n')
print_help()
import re
line_re=re.compile('(.+?)=(.+)')
try:
properties_file=open(properties['prop'][-1])
except:
sys.stderr.write('Error: can not open property file (-prop): '+properties['prop'][-1]+'\n')
print_help()
i=0
for line in properties_file:
i+=1
if line.strip().startswith('#'):
continue
result=line_re.search(line)
if result is None:
sys.stderr.write('Error in property file line '+str(i)+'\n'+repr(line)+'\n')
print_help()
key=result.group(1).strip()
if '"' in line:
values=re.findall(r'"(.+?)"',result.group(2).strip())
else:
values=re.split(r'\s+',result.group(2).strip())
if key not in definable_properties:
sys.stderr.write('Error: unknown argument identifier '+key+' (property file)\n')
print_help()
properties[key]=values
missing_properties=mandatory_properties.difference(mandatory_properties.intersection(properties))
if len(missing_properties)>0:
sys.stderr.write('Error: these mandatory properties are missing: '+', '.join(missing_properties)+'\n')
print_help()
# -n
if 'n' in properties:
if properties['n']==[]:
sys.stderr.write('Error: no argument given for number of terms (-n)\n')
print_help()
try:
term_number=int(properties['n'][-1])
except:
sys.stderr.write('Error: wrong argument for number of terms (-n): '+properties['n'][-1]+'\n')
print_help()
# -t
if 't' in properties:
if properties['t']==[]:
sys.stderr.write('Error: no argument given for threshold (-t)\n')
print_help()
try:
threshold=float(properties['t'][-1])
except:
sys.stderr.write('Error: wrong argument for threshold (-t): '+properties['t'][-1]+'\n')
print_help()
# -m
if properties['m']==[]:
sys.stderr.write('Error: no argument given for n-gram ranking method (-m)\n')
print_help()
ranking_method=properties['m'][-1]
if ranking_method not in ranking_methods:
sys.stderr.write('Error: wrong argument for n-gram ranking method (-m): '+ranking_method+'\n')
print_help()
# -l
if properties['l']==[]:
sys.stderr.write('Error: no argument given for length of n-grams (-l)\n')
print_help()
try:
ngram_length=int(properties['l'][-1])
if ngram_length<1 or ngram_length>4:
raise Exception
except:
sys.stderr.write('Error: wrong argument for length of n-grams (-l): '+properties['l'][-1]+'\n')
print_help()
#if ranking_method=='tfidf' and ngram_length!=1:
# sys.stderr.write('Error: tfidf ranking method applicable only to unigrams (-l 1)\n')
# print_help()
if ngram_length==1 and 'idf' not in properties:
sys.stderr.write('Error: idf file has to be defined for length of n-grams 1 (-l 1)\n')
print_help()
# -o
if 'o' in properties:
if properties['o']==[]:
sys.stderr.write('Error: no argument given output file (-o)\n')
print_help()
try:
output_file=open(properties['o'][-1],'w')
except:
sys.stderr.write('Error: can not write to output file '+properties['o'][-1]+'\n')
print_help()
else:
output_file=sys.stdout
# -pos
if 'pos' in properties:
if len(properties['pos'])<3:
sys.stderr.write('Error: wrong number of arguments given for position (-pos)\n')
print_help()
try:
position=[int(e) for e in properties['pos'][-3:]]
if len(set(position))!=3:
raise Exception
except:
sys.stderr.write('Error: wrong arguments given for position (-pos): '+', '.join(properties['pos'][-3:])+'\n')
print_help()
else:
position=range(3)
# -min
if 'min' in properties:
if properties['min']==[]:
sys.stderr.write('Error: no argument given for minimum frequency (-min)\n')
print_help()
try:
min_frequency=int(properties['min'][-1])
except:
sys.stderr.write('Error: wrong argument given for minimum frequency (-min): '+properties['min'][-1]+'\n')
print_help()
# -idf
if 'idf' in properties:
if properties['idf']==[]:
sys.stderr.write('Error: no argument given for idf file (-idf)\n')
print_help()
# -seq
if 'seq' in properties:
if properties['seq']==[]:
sys.stderr.write('Error: no argument given for sequence output (-seq)\n')
print_help()
if properties['seq'][-1]!='0' and properties['seq'][-1]!='1' and properties['seq'][-1]!='2':
sys.stderr.write('Error: wrong argument given for sequence output (-seq): '+properties['seq'][-1]+'\n')
print_help()
seq=int(properties['seq'][-1])
# -norm
if 'norm' in properties:
if properties['norm']==[]:
sys.stderr.write('Error: no argument given for normalization (-norm)\n')
print_help()
try:
norm=float(properties['norm'][-1])
except:
sys.stderr.write('Error: wrong argument given for normalization (-norm): '+properties['norm'][-1]+'\n')
print_help()
# -terms
if 'terms' in properties:
if properties['terms']==[]:
sys.stderr.write('Error: no argument given for term output (-terms)\n')
print_help()
if properties['terms'][-1]!='0' and properties['terms'][-1]!='1':
sys.stderr.write('Error: wrong argument given for term output (-terms): '+properties['terms'][-1]+'\n')
print_help()
terms=int(properties['terms'][-1])
def read_phrase_configuration_file():
global valid_phrases
valid_phrases=[]
global stop_phrase
stop_phrase=[]
global ngram_length
if properties['p']==[]:
sys.stderr.write('Error: no argument given for phrase configuration file (-p)\n')
print_help()
try:
phrase_configuration_file=open(properties['p'][-1],'r')
except:
sys.stderr.write('Error: can not read phrase configuration file '+properties['p'][-1]+'\n')
print_help()
i=0
for line in phrase_configuration_file:
i+=1
if line.strip().startswith('#'):
continue
if line.strip()=='':
continue
valid_phrase=line.strip().split('\t')
try:
if ngram_length!=len(valid_phrase):
continue
if 'STOP' in line:
if stop_phrase!=[]:
raise Exception
for stop in valid_phrase:
if stop not in ('!STOP','STOP','*'):
raise Exception
stop_phrase=valid_phrase
continue
valid_phrase=[re.compile(e) for e in valid_phrase]
except:
sys.stderr.write('Error in phrase configuration file line '+str(i)+'\n'+repr(line)+'\n')
print_help()
valid_phrases.append(valid_phrase)
def read_stopword_file():
global stopwords
stopwords=set()
if 's' not in properties:
return
if properties['s']==[]:
sys.stderr.write('Error: no argument given for stop-word file (-s)\n')
print_help()
try:
stopwords=set([e.strip() for e in codecs.open(properties['s'][-1],'r','utf-8')])
except:
sys.stderr.write('Error: can not read stop-word file '+properties['s'][-1]+'\n')
print_help()
def read_idf_file():
if 'idf' not in properties:
return
global idf
idf={}
if properties['idf']==[]:
sys.stderr.write('Error: no argument given for idf file (-idf)\n')
print_help()
try:
idf=dict([(a,float(b)) for a,b in [e.split('\t') for e in codecs.open(properties['idf'][-1],'r','utf-8')]])
except:
sys.stderr.write('Error: can not read idf file '+properties['idf'][-1]+'\n')
print_help()
def read_input_text():
enamex=0
numex=0
timex=0
if properties['i']==[]:
sys.stderr.write('Error: no argument given for input file (-i)\n')
print_help()
path=properties['i'][-1]
try:
input_file=codecs.open(path,'r','utf-8')
except:
sys.stderr.write('Error: can not read input file '+path+'\n')
print_help()
i=0
ngram=[None for e in range(ngram_length)]
global ngram_frequency_distr
ngram_frequency_distr={}
global unigram_frequency_distr
unigram_frequency_distr={}
global lemma_token_frequency_distr
lemma_token_frequency_distr={}
global lemma_phrase_map
lemma_phrase_map={}
for line in input_file:
if i==0:
if line.startswith(u'\ufeff'):
line=line[1:]
i+=1
split_line=line.strip().split('\t')
if split_line==['']:
continue
if split_line[0][0]=='<':
if split_line[0].startswith('<NUMEX'):
numex+=1
elif split_line[0].startswith('<ENAMEX'):
enamex+=1
elif split_line[0].startswith('<TIMEX'):
timex+=1
elif split_line[0].startswith('</NUMEX'):
numex-=1
elif split_line[0].startswith('</ENAMEX'):
enamex-=1
elif split_line[0].startswith('</TIMEX'):
timex-=1
else:
continue
ngram=[None for e in range(ngram_length)]
if enamex<0 or numex<0 or timex<0:
sys.stderr.write('Error: closing bracket without open on line '+str(i)+'\n'+repr(line)+'\n')
sys.exit(1)
continue
if enamex!=0 or numex!=0 or timex!=0:
continue
try:
token,pos,lemma=split_line[position[0]],split_line[position[1]],split_line[position[2]]
token=token.lower()
lemma=lemma.lower()
except:
sys.stderr.write('Error in input file line '+str(i)+'\n'+repr(line)+'\n')
print_help()
# actual stuff
if ranking_method in ('dice','mi'):
unigram_frequency_distr[lemma]=unigram_frequency_distr.get(lemma,0)+1
ngram=ngram[1:]
ngram.append((token,pos,lemma))
try:
tokens=tuple([e[0] for e in ngram])
poses=tuple([e[1] for e in ngram])
lemmas=tuple([e[2] for e in ngram])
except:
continue
if ranking_method in ('ll','chisq','tscore'):
for star_ngram in create_ngram_star(lemmas):
ngram_frequency_distr[star_ngram]=ngram_frequency_distr.get(star_ngram,0)+1
phrase=check_ngram_for_phrases(poses)
if phrase!=None and check_ngram_for_stops(tokens):
if lemmas not in lemma_phrase_map:
lemma_phrase_map[lemmas]={}
lemma_phrase_map[lemmas][phrase]=lemma_phrase_map[lemmas].get(phrase,0)+1
if ranking_method in ('dice','mi','tfidf'):
ngram_frequency_distr[lemmas]=ngram_frequency_distr.get(lemmas,0)+1
if lemmas not in lemma_token_frequency_distr:
lemma_token_frequency_distr[lemmas]={}
lemma_token_frequency_distr[lemmas][tokens]=lemma_token_frequency_distr[lemmas].get(tokens,0)+1
global results
if ranking_method in ('dice','mi'):
results=ranking_methods[ranking_method](ngram_frequency_distr,unigram_frequency_distr)
elif ranking_method in ('ll','chisq','tscore','tfidf'):
results=ranking_methods[ranking_method](ngram_frequency_distr)
def check_ngram_for_phrases(pos_ngram):
for phrase in valid_phrases:
valid=True
for pos,regex in zip(pos_ngram,phrase):
if pos is None:
valid=False
break
if regex.match(pos) is None:
valid=False
break
if valid:
return ' '.join([e.pattern for e in phrase])
def check_ngram_for_stops(token_ngram):
if stop_phrase==[]:
return True
for token,stop in zip(token_ngram,stop_phrase):
if stop=='STOP' and token not in stopwords:
return False
elif stop=='!STOP' and token in stopwords:
return False
return True
def normalize_result():
global results
global norm
if norm==0:
norm=1
values=[a for a,b in results]
if len(values)==0:
return
min_value=min(values)
max_value=max(values)
range=float(max_value-min_value)
for index,(value,ngram) in enumerate(results):
try:
results[index]=(norm*(value-min_value)/range,ngram)
except ZeroDivisionError:
results[index]=(norm,ngram)
def linearly_combine_with_idf():
normalize_result()
global results
results=sorted([(value*0.5+avg_idf(ngram)*0.5, ngram) for value,ngram in results],reverse=True)
def output_result():
i=0
for value,ngram in results:
if threshold is not None:
if value<threshold:
break
if seq==0:
output=' '.join(ngram).encode('utf-8')+'\t('+'|'.join([e[0] for e in sorted(lemma_phrase_map[ngram].items(),key=lambda x:-x[1])])+', '+str(ngram_frequency_distr[ngram])+')'
if terms==0:
output+='\t'+str(round(value,5))
output_file.write(output+'\n')
elif seq==1:
output=' '.join(sorted(lemma_token_frequency_distr[ngram].items(),key=lambda x:x[1])[-1][0]).encode('utf-8')+'\t('+'|'.join([e[0] for e in sorted(lemma_phrase_map[ngram].items(),key=lambda x:-x[1])])+', '+str(ngram_frequency_distr[ngram])+')'
if terms==0:
output+='\t'+str(round(value,5))
output_file.write(output+'\n')
elif seq==2:
output=' '.join(ngram).encode('utf-8')+'\t'+' '.join(sorted(lemma_token_frequency_distr[ngram].items(),key=lambda x:x[1])[-1][0]).encode('utf-8')+'\t('+'|'.join([e[0] for e in sorted(lemma_phrase_map[ngram].items(),key=lambda x:-x[1])])+', '+str(ngram_frequency_distr[ngram])+')'
if terms==0:
output+='\t'+str(round(value,5))
output_file.write(output+'\n')
else:
for token_sequence,frequency in lemma_token_frequency_distr[ngram].iteritems():
output=' '.join(token_sequence).encode('utf-8')+'\t('+'|'.join([e[0] for e in sorted(lemma_phrase_map[ngram].items(),key=lambda x:-x[1])])+', '+str(ngram_frequency_distr[ngram])+')'
if terms==0:
output+='\t'+str(frequency)+'\t'+str(round(value,5))
output_file.write(output+'\n')
i+=1
if term_number is not None:
if i>=term_number:
break
if __name__=='__main__':
parse_arguments()
check_properties()
read_phrase_configuration_file()
read_stopword_file()
read_idf_file()
read_input_text()
if ranking_method!='tfidf' and 'idf' in vars():
linearly_combine_with_idf()
if norm!=0:
normalize_result()
output_result()
|
12,044 | 598d0ec549f238c602eeeb442678cc3b5f1a8408 | from __future__ import annotations
from typing import Iterator
def _split(string: str, split_size: int) -> Iterator[str]:
for i in range(0, len(string), split_size):
yield string[i : i + split_size]
def _create_layer(string: str, width: int, height: int) -> list[list[int]]:
return [
[(int(c)) for c in string[r * width : (r + 1) * width]] for r in range(height)
]
def generate_input() -> Iterator[list[list[int]]]:
with open("d8/input.txt", "r") as f:
for chunk in _split(f.read(), 25 * 6):
yield _create_layer(chunk, 25, 6)
def count(layer: list[list[int]], pixel: int) -> int:
total = 0
for row in layer:
for cell in row:
if cell == pixel:
total += 1
return total
if __name__ == "__main__":
layer, _ = min(
((layer, count(layer, 0)) for layer in generate_input()), key=lambda i: i[1]
)
print(count(layer, 1) * count(layer, 2))
|
12,045 | a31cf08a3993aba9094ea3a063919319514c7454 | # -*- coding: utf-8
import urllib.request
# 取得先URL
url = "https://raw.githubusercontent.com/nishizumi-lab/sample/master/python/scraping/00_sample_data/sample01/index.html"
# ユーザーエージェント情報を設定
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ja; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 ( .NET CLR 3.5.30729)')]
# HTMLファイルを開く
data = opener.open(url)
# HTMLの取得
html = data.read()
html = html.decode('utf-8')
# 表示
print(html)
# HTMLファイルを閉じる
data.close()
"""
<!doctype html>
<html lang="ja">
<head>
<meta charset="UTF-8">
<title>ページタイトル</title>
</head>
<body>
<div class="header">ヘッダーです</div>
<div class="main">
<h1>H1の見出しです</h1>
<p>文章1です</p>
</div>
<div class="footer">
<p>フッターです</p>
<a href="#">リンク1</a>
<a href="#">リンク2</a>
</div>
</body>
</html>
""" |
12,046 | dc32c573ac2a820cfdb27f2a335f7eb43e8bb811 | from recoapp import app, db
import recoapp.models
import pandas as pd
import re
import urllib
import zipfile
def extract_year(x):
year = re.search("[12][0-9][0-9][0-9]", x)
if year:
title = re.sub("[ \t]+\(.+\)", "", x)
return title, year.group(0)
return x, None
## Download movieles data
print "Downloading MovieLens data"
urlopener = urllib.URLopener()
urlopener.retrieve('http://files.grouplens.org/datasets/movielens/ml-20m.zip', './data/ml-20m.zip')
zip_ref = zipfile.ZipFile('./data/ml-20m.zip', 'r')
zip_ref.extractall('./data/')
zip_ref.close()
## Input into database
movies = pd.read_csv("./data/ml-20m/movies.csv", index_col='movieId')
ratings = pd.read_csv("./data/ml-20m/ratings.csv")
ratings['liked'] = ratings.rating > 3.5
avg_rating = ratings.groupby(['movieId']).mean()
avg_rating = avg_rating[['liked']]
movies = movies.join(avg_rating)
movies = movies[['title', 'liked']]
db.drop_all()
db.create_all()
for movie_id, x, liked in movies.itertuples():
title, year = extract_year(x)
movie = recoapp.models.Movie(movie_id, title, year, liked)
try:
db.session.add(movie)
db.session.commit()
except:
print "A problem with: ",
print movie
db.session.rollback()
if movie_id % 100 == 0:
print "%i movies added to database" % movie_id
|
12,047 | 98f113687ea6c5493a2a384f1489004e94f22de3 | from Day6.Point import Point
def is_within_region(pos, pts):
max_distance = 10000
current_distance = 0
for pt in pts:
current_distance += abs(pos.x - pt.x) + abs(pos.y - pt.y)
return current_distance < max_distance
start_id = 65 # >= 2 for latter array initialization
non_id_char = chr(start_id - 1)
contested_id = chr(start_id - 2)
file_read = open("input.txt", "r")
line = file_read.readline()
points = []
point_id = start_id
# Get points
while line:
coordinates = line.split(', ')
points.append(Point(chr(point_id), int(coordinates[0]), int(coordinates[1])))
point_id += 1
line = file_read.readline()
# Find boundaries
max_x = min_x = points[0].x
max_y = min_y = points[0].y
for point in points:
if max_x < point.x:
max_x = point.x
if min_x > point.x:
min_x = point.x
if max_y < point.y:
max_y = point.y
if min_y > point.y:
min_y = point.y
# Count region area
region_area = 0
for coord_x in range(min_x, max_x + 1):
for coord_y in range(min_y, max_y + 1):
position = Point('', coord_x, coord_y)
if is_within_region(position, points):
region_area += 1
print(f'The area of this region equals {region_area}.')
|
12,048 | 35df847bb6c8316ddbd69b900e2e3c5c9fe58f7c | import logging
import sanic.exceptions
import sanic.response
from .repl_manager import ReplManager
log = logging.getLogger()
class Handlers:
"""Handlers for various routes in the app, plus attributes for tracking the
REPL process.
"""
def __init__(self):
self.repl_mgr = None
def close(self):
try:
if self.repl_mgr is not None:
log.info('destroying REPL')
self.repl_mgr.close()
finally:
self.repl_mgr = None
async def create_repl_handler(self, request):
if self.repl_mgr is not None:
log.info('request to create REPL while one already exists')
return sanic.response.HTTPResponse(status=409)
log.info('creating REPL')
self.repl_mgr = ReplManager(request.app.loop,
request.json)
return sanic.response.HTTPResponse(status=201) # created
async def delete_repl_handler(self, request):
# If there's not extant REPL, slap 'em with a 404
if self.repl_mgr is None:
return sanic.response.HTTPResponse(status=404)
self.close()
return sanic.response.HTTPResponse(status=200) # OK
async def websocket_handler(self, request, ws):
"""Create a new websocket and connect its input and output to the subprocess
with the specified PID.
"""
if self.repl_mgr is None:
return sanic.response.HTTPResponse(status=404)
log.info('initiating websocket')
await self.repl_mgr.process_websocket(ws)
log.info('terminating websocket')
|
12,049 | 1965ba6bd15ff7c1c0a378a3c7cd46a72646dd89 | import def_Patch_Extract_
import glob
import os
import numpy as np
CT_DATA_Path = "data"
DAT_DATA_Path = "pre_process_data"
if not os.path.exists(DAT_DATA_Path):
os.makedirs(DAT_DATA_Path)
CT_scans = sorted(glob.glob(CT_DATA_Path + '/*.mhd'))
# def_Patch_Extract_.slide_extraction(DAT_DATA_Path, CT_scans)
if not os.path.exists(DAT_DATA_Path + "/" + "inform.npy"):
# lbl = def_Patch_Extract_.nodule_label_extraction(DAT_DATA_Path, CT_scans)
# btm, mid, top = def_Patch_Extract_.total_patch_extraction(DAT_DATA_Path, CT_scans)
# np.save(DAT_DATA_Path + "/" + "origin_inform.npy", btm, mid, top)
btm, mid, top = def_Patch_Extract_.nodule_patch_extraction(DAT_DATA_Path, CT_scans)
np.save(DAT_DATA_Path + "/" + "inform.npy", btm, mid, top)
else:
dataSize = np.load(DAT_DATA_Path + "/" + "inform.npy")
originSize = np.load(DAT_DATA_Path + "/" + "origin_inform.npy")
k_splits = 5
training, validation, test = def_Patch_Extract_.k_fold_cross_validation(CT_scans, k_splits)
# def_Patch_Extract_.run_train_3D(CT_scans, training, validation, test, dataSize)
def_Patch_Extract_.run_train_3D(CT_scans, training, validation, test, dataSize, originSize)
# def_Patc-h_Extract_.run_train(training, validation, test, dataSize)
|
12,050 | 68dc9b1e49f90ad970f0bc7db58c7d5ecf61f591 | test_num = int(input("Input a number: "))
result = 1
for number in range(test_num, 0,-1):
result *= number
print(result)
#something = something * number
# something *= number
|
12,051 | f6f7e1a7a04ff6da82666e6cea9782f417a81632 | ### Model Training and Evaluation ###
# Author: Oliver Giesecke
from IPython import get_ipython
get_ipython().magic('reset -sf')
import os, shutil
import re
import csv
from utils import bigrams, trigram, replace_collocation
import timeit
import pandas as pd
import string
from nltk.stem import PorterStemmer
import numpy as np
import pickle
import random
from scipy import sparse
import itertools
from scipy.io import savemat, loadmat
import string
from sklearn.feature_extraction.text import CountVectorizer
from gensim.test.utils import datapath
from gensim.models import Word2Vec
from data_concatenate import *
import gensim.downloader
import pprint
from manetm import etm
pp = pprint.PrettyPrinter()
# =============================================================================
DATAPATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/etm/")
OVERLEAF = os.path.expanduser("~/Dropbox/Apps/Overleaf/FOMC_Summer2019/files")
if not os.path.exists(f"{DATAPATH}/full_results"):
os.makedirs(f"{DATAPATH}/full_results")
# =============================================================================
# #0 Set Parameters
# =============================================================================
# Dataset parameters
embphrase_itera = 2 # Number of phrase iterations
embthreshold = "inf" # Threshold value for collocations. If "inf": no collocations
emb_max_df = 1.0 # in a maximum of # % of documents if # is float.
emb_min_df = 1 # choose desired value for min_df // in a minimum of # documents
EMBDATASET = f"BBTSST_min{emb_min_df}_max{emb_max_df}_iter{embphrase_itera}_th{embthreshold}"
meetphrase_itera = 2
meetthreshold = "inf"
meetmax_df = 1.0
meetmin_df = 10
MEEETDATA = f"MEET_min{meetmin_df}_max{meetmax_df}_iter{meetphrase_itera}_th{meetthreshold}"
sta_phrase_itera = 2
sta_threshold = "inf"
sta_max_df = 1.0
sta_min_df = 5
STADATASET = f"STATEMENT_min{sta_min_df}_max{sta_max_df}_iter{sta_phrase_itera}_th{sta_threshold}"
# Skipgram parameters
mincount = 2
d_sg = 1
vectorsize = 300
iters = 100
cpus = 16
neg_samples = 10
windowsize = 4
# Activate code
d_construct = False
d_estemb = False
d_train = False
# =============================================================================
# #1 Data Preparation
# =============================================================================
if d_construct:
print("*" * 80)
print("Build datasets")
build_embdata(emb_max_df,emb_min_df,embphrase_itera,embthreshold,EMBDATASET)
build_meeting(meetmax_df,meetmin_df,meetphrase_itera,meetthreshold,MEEETDATA)
build_statement_data(sta_max_df,sta_min_df,sta_phrase_itera,sta_threshold,STADATASET)
print("*" * 80)
print("Datasets Construction Completed")
print("*" * 80)
print("\n")
# =============================================================================
# #2 Train Word Embeddings
# =============================================================================
if d_estemb:
# Run Skipgram
print(f"Run model: {EMBDATASET}\n")
sentences = pd.read_pickle(f"{DATAPATH}/data/{EMBDATASET}/corpus.pkl")
model = gensim.models.Word2Vec(sentences, min_count = mincount, sg = d_sg, vector_size = vectorsize, epochs = iters, workers = cpus, negative = neg_samples, window = windowsize)
model.save(f"{DATAPATH}/word2vecmodels/{EMBDATASET}")
# Write the embeddings to a file
with open(f"{DATAPATH}/embeddings/{EMBDATASET}_emb", 'w') as f:
for v in model.wv.index_to_key:
vec = list(model.wv[v])
f.write(v + ' ')
vec_str = ['%.9f' % val for val in vec]
vec_str = " ".join(vec_str)
f.write(vec_str + '\n')
print("*" * 80)
print(f"Embedding Training Completed")
print("*" * 80)
print("\n\n")
# =============================================================================
## #4 TRAIN TOPIC MODELS
# =============================================================================
# =============================================================================
## SPEAKERDATA - Pre-Trained Emb.
# speaker_ckpt = etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# batch_size = 1000, epochs = 150, num_topics = 10, rho_size = 300,
# emb_size = 300, t_hidden_size = 800, theta_act = 'relu',
# train_embeddings = 0, lr = 0.005, lr_factor=4.0,
# mode = 'train', optimizer = 'adam',
# seed = 2019, enc_drop = 0.0, clip = 0.0,
# nonmono = 10, wdecay = 1.2e-6, anneal_lr = 0, bow_norm = 1,
# num_words =10, log_interval = 2, visualize_every = 10, eval_batch_size = 1000,
# load_from = "", tc = 1, td = 1)
#
# print(f"Evaluate model: {speaker_ckpt}")
# etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'eval', load_from = f"{speaker_ckpt}", train_embeddings = 0 ,tc = 1, td = 1)
#
# print(f"Output the topic distribution: {speaker_ckpt}")
# etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'retrieve',load_from = f"{speaker_ckpt}", train_embeddings = 0)
#
# =============================================================================
## MEETINGS - Pre-Trained Emb.
if d_train:
meeting_ckpt = etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
batch_size = 1000, epochs = 2000, num_topics = 10, rho_size = 300,
emb_size = 300, t_hidden_size = 800, theta_act = 'relu',
train_embeddings = 0, lr = 0.005, lr_factor=4.0,
mode = 'train', optimizer = 'adam',
seed = 2019, enc_drop = 0.0, clip = 0.0,
nonmono = 10, wdecay = 1.2e-6, anneal_lr = 0, bow_norm = 1,
num_words =10, log_interval = 2, visualize_every = 10, eval_batch_size = 1000,
load_from = "", tc = 1, td = 1)
print(f"Evaluate model: {meeting_ckpt}")
etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
mode = 'eval', load_from = f"{meeting_ckpt}", train_embeddings = 0 ,tc = 1, td = 1)
print(f"Output the topic distribution: {meeting_ckpt}")
etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
mode = 'retrieve',load_from = f"{meeting_ckpt}", train_embeddings = 0)
# =============================================================================
## #5 OUTPUT DATA
# =============================================================================
# =============================================================================
# ## SPEAKERDATA
# raw_df = pd.read_pickle(f"raw_data/{SPEAKERDATA}.pkl")
#
# idx_df = pd.read_pickle(f'{OUTPATH}/{SPEAKERDATA}/original_indices.pkl')
# idx_df = idx_df.set_index(0)
# idx_df["d"] = 1
#
# data = pd.concat([idx_df,raw_df],axis=1)
# data_clean = data[data["d"]==1].reset_index()
# dist_df = pd.read_pickle(f'{speaker_ckpt}tpdist.pkl')
#
# full_data = pd.concat([data_clean,dist_df],axis=1)
# full_data.drop(columns=["content","d"],inplace=True)
# full_data.rename(columns=dict(zip([i for i in range(10)],[f"topic_{i}" for i in range(10)])),inplace=True)
# full_data["start_date"] = pd.to_datetime(full_data["start_date"])
# full_data.to_stata(f"{DATAPATH}/full_results/{SPEAKERDATA}.dta",convert_dates={"start_date":"td"})
#
# =============================================================================
### MEETING ###
# Retrieve raw data
raw_df = pd.read_pickle(f"raw_data/{MEEETDATA}.pkl")
idx_df = pd.read_pickle(f'{OUTPATH}/{MEEETDATA}/original_indices.pkl')
idx_df = idx_df.set_index(0)
idx_df["d"] = 1
data = pd.concat([idx_df,raw_df],axis=1)
data_clean = data[data["d"]==1].reset_index()
dist_df = pd.read_pickle(f'{meeting_ckpt}tpdist.pkl')
full_data = pd.concat([data_clean,dist_df],axis=1)
full_data.drop(columns=["content"],inplace=True)
full_data.rename(columns=dict(zip([i for i in range(10)],[f"topic_{i}" for i in range(10)])),inplace=True)
full_data["date"] = full_data["start_date"]
full_data.to_stata(f"{DATAPATH}/full_results/{MEEETDATA}.dta",convert_dates={"date":"td"})
full_data.to_pickle(f"{DATAPATH}/full_results/{MEEETDATA}.pkl")
### MEETING SAMPLED ###
# Retrieve raw data
raw_df = pd.read_pickle(f"raw_data/{MEETDATASAMPLE}.pkl")
idx_df = pd.read_pickle(f'{OUTPATH}/{MEETDATASAMPLE}/original_indices.pkl')
idx_df = idx_df.set_index(0)
idx_df["d"] = 1
data = pd.concat([idx_df,raw_df],axis=1)
data_clean = data[data["d"]==1].reset_index()
dist_df = pd.read_pickle(f'{meeting_ckptsampled}tpdist.pkl')
full_data = pd.concat([data_clean,dist_df],axis=1)
full_data.drop(columns=["content"],inplace=True)
full_data.rename(columns=dict(zip([i for i in range(10)],[f"topic_{i}" for i in range(10)])),inplace=True)
full_data["date"] = pd.to_datetime(full_data["date"])
full_data.to_stata(f"{DATAPATH}/full_results/{MEETDATASAMPLE}.dta",convert_dates={"date":"td"})
full_data.to_pickle(f"{DATAPATH}/full_results/{MEETDATASAMPLE}.pkl")
# =============================================================================
# ## 6 Visualize
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
meetphrase_itera = 2 # Number of phrase iterations
meetthreshold = "inf" # Threshold value for collocations. If "inf": no collocations
meetmax_df=1.0
meetmin_df=10
MEEETDATA = f"MEET_min{meetmin_df}_max{meetmax_df}_iter{meetphrase_itera}_th{meetthreshold}"
# Load data
full_data = pd.read_pickle(f"{DATAPATH}/full_results/{MEEETDATA}.pkl")
full_data.rename(columns=dict(zip([f"topic_{k}" for k in range(10)],[f"topic_{k+1}" for k in range(10)] )),inplace=True)
meeting_ckpt = f"{DATAPATH}/results/etm_MEET_min10_max1.0_iter2_thinf_K_10_Htheta_800_Optim_adam_Clip_0.0_ThetaAct_relu_Lr_0.005_Bsz_1000_RhoSize_300_trainEmbeddings_0"
# Retrieve topics
with open(f'{meeting_ckpt}topics.pkl', 'rb') as f:
meet_topics = pickle.load(f)
top_dic = dict(zip([item[0] for item in meet_topics ],[", ".join(item[1]) for item in meet_topics ] ))
# Check topics
for item in meet_topics:
print(f'{item[0]+1}: {", ".join(item[1])}')
section1 = full_data[full_data["Section"]==1].copy()
section2 = full_data[full_data["Section"]==2].copy()
k = 0
for k in range(1,11):
fig = plt.figure(figsize=(20,9))
axs = fig.add_subplot(1,1,1)
plt.subplots_adjust(.1,.20,1,.95)
section1.plot.scatter('start_date',f'topic_{k}',color="dodgerblue",ax=axs,label="Section 1")
section2.plot.scatter('start_date',f'topic_{k}',color="red",ax=axs,label="Section 2")
plt.figtext(0.10, 0.05, f"Topic {k} words: {top_dic[k-1]}", ha="left", fontsize=20)
axs.set_xlabel("Meeting Day",fontsize=20)
axs.set_ylabel(f"Topic {k}",fontsize=20)
axs.yaxis.set_major_formatter(tkr.FuncFormatter(lambda x, p: f"{x:.2f}"))
axs.grid(linestyle=':')
axs.tick_params(which='both',labelsize=20,axis="y")
axs.tick_params(which='both',labelsize=20,axis="x")
axs.legend( prop={'size': 20})
plt.savefig(f'output/transcript_topic_{k}.pdf')
try:
#plt.savefig(f'{OVERLEAF}/files/transcript_topic_{k}.eps', format='eps')
plt.savefig(f'{OVERLEAF}/transcript_topic_{k}.pdf')
except:
print("Invalid Overleaf Path")
# Meetings Sampled
# Retrieve topics
full_data = pd.read_pickle(f"{DATAPATH}/full_results/{MEETDATASAMPLE}.pkl")
full_data.rename(columns=dict(zip([f"topic_{k}" for k in range(12)],[f"topic_{k+1}" for k in range(12)] )),inplace=True)
with open(f'{meeting_ckptsampled}topics.pkl', 'rb') as f:
meet_topics = pickle.load(f)
top_dic = dict(zip([item[0] + 1 for item in meet_topics ],[", ".join(item[1]) for item in meet_topics ] ))
# Check topics
for item in top_dic.keys():
print(f'{item}: {top_dic[item]}')
section1 = full_data[full_data["Section"]==1].copy()
section2 = full_data[full_data["Section"]==2].copy()
k = 0
for k in range(1,11):
fig = plt.figure(figsize=(20,9))
axs = fig.add_subplot(1,1,1)
plt.subplots_adjust(.1,.20,1,.95)
section1.plot.scatter('date',f'topic_{k}',color="dodgerblue",ax=axs,label="Section 1")
section2.plot.scatter('date',f'topic_{k}',color="red",ax=axs,label="Section 2")
plt.figtext(0.10, 0.05, f"Topic {k} words: {top_dic[k]}", ha="left", fontsize=20)
axs.set_xlabel("Meeting Day",fontsize=20)
axs.set_ylabel(f"Topic {k}",fontsize=20)
axs.yaxis.set_major_formatter(tkr.FuncFormatter(lambda x, p: f"{x:.2f}"))
axs.grid(linestyle=':')
axs.tick_params(which='both',labelsize=20,axis="y")
axs.tick_params(which='both',labelsize=20,axis="x")
axs.legend( prop={'size': 20})
plt.savefig(f'output/transcriptsampled_topic_{k}.pdf')
try:
#plt.savefig(f'{OVERLEAF}/files/transcript_topic_{k}.eps', format='eps')
plt.savefig(f'{OVERLEAF}/transcriptsampled_topic_{k}.pdf')
except:
print("Invalid Overleaf Path")
# =============================================================================
# ## 7 MN Logit
import statsmodels.api as sm
import pandas as pd
import numpy as np
DATAPATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/etm/")
OVERLEAF = os.path.expanduser("~/Dropbox/Apps/Overleaf/FOMC_Summer2019/files")
topics = pd.read_stata("full_results/MEET_min10_max1.0_iter2_thinf.dta")
topics.rename(columns=dict(zip([f"topic_{k}" for k in range(10)],[f"topic_{k+1}" for k in range(10)] )),inplace=True)
topics.drop(columns=["level_0","index", "d"],inplace=True)
topics = topics[topics["start_date"]!="2009-09-16"]
econdata = pd.read_pickle("../economic_data/final_data/econmarketdata.pkl")
data = topics.merge(econdata,left_on="start_date",right_on="date",how="inner")
for k in range(1,11):
data[f"lns{k}s5"] = np.log(data[f"topic_{k}"]) - np.log(data[f"topic_5"])
for k in range(1,11):
data[f"s{k}s5"] = data[f"topic_{k}"] / data[f"topic_5"]
data["constant"] = 1
covs = "l1d_UNRATE l2d_UNRATE l1dln_PCEPI l2dln_PCEPI l1dln_INDPRO l2dln_INDPRO d14ln_spindx d28_SVENY01 d28_SVENY10 TEDRATE SVENY01 SVENY10 BAA10Y AAA10Y"
covs_list = covs.split(" ")
est_section1 = data.loc[data["Section"] == 1,[f"s{k}s5" for k in range(1,11) ]]
res_df = pd.DataFrame([])
for k in [1,2,3,4,6,7,8,9,10]:
print(f"\n ************** Topic: {k} ***********************\n")
model = sm.OLS(data.loc[data["Section"] == 1,f"lns{k}s5"], data.loc[data["Section"] == 1,["constant"]+covs_list])
results = model.fit()
print(results.summary())
ef_dict = []
for var in covs_list:
est_section1[f"mr_{var}"] = est_section1[f"s{k}s5"] * results.params[var]
ef_dict.append(est_section1[f"mr_{var}"].mean())
aux_df = pd.DataFrame(data=ef_dict,columns=[f"AMX T{k}"],index=list(results.params.keys())[1:])
res_df = pd.concat([res_df,aux_df],axis=1)
labels = {"l1d_UNRATE":"Lag 1 dURate","l2d_UNRATE": "Lag 2 dURate","l1dln_PCEPI": "Lag 1 dlnPCEPI",
"l2dln_PCEPI": "Lag 2 dlnPCEPI","l1dln_INDPRO":"Lag 1 dlnIP","l2dln_INDPRO": "Lag 2 dlnIP",
"d7ln_spindx": "7 day Return S\&P500","d14ln_spindx":"14 day Return S\&P500",
"EQUITYCAPE": "Equity Cape","TEDRATE": "Ted Spread","SVENY01": "Tr. 1yr Yield",
"d28_SVENY01": "$\Delta$ Tr. 1yr Yield","d28_SVENY10": "$\Delta$ Tr. 10yr Yield",
"SVENY10": "Tr. 10yr Yield", "BAA10Y": "BAA Credit Spread","AAA10Y": "AAA Credit Spread"}
res_df = res_df.reset_index().replace({"index":labels}).set_index("index")
print(res_df.to_latex(escape=False))
res_df.to_latex(f"{OVERLEAF}/section1_avgmr.tex",escape=False,float_format="{:0.3f}".format )
est_section1 = data.loc[data["Section"] == 2,[f"s{k}s5" for k in range(1,11) ]]
res_df = pd.DataFrame([])
for k in [1,2,3,4,6,7,8,9,10]:
print(f"\n ************** Topic: {k} ***********************\n")
model = sm.OLS(data.loc[data["Section"] == 2,f"lns{k}s5"], data.loc[data["Section"] == 2,["constant"]+covs_list])
results = model.fit()
print(results.summary())
ef_dict = []
for var in covs_list:
est_section1[f"mr_{var}"] = est_section1[f"s{k}s5"] * results.params[var]
ef_dict.append(est_section1[f"mr_{var}"].mean())
aux_df = pd.DataFrame(data=ef_dict,columns=[f"AMX T{k}"],index=list(results.params.keys())[1:])
res_df = pd.concat([res_df,aux_df],axis=1)
labels = {"l1d_UNRATE":"Lag 1 dURate","l2d_UNRATE": "Lag 2 dURate","l1dln_PCEPI": "Lag 1 dlnPCEPI",
"l2dln_PCEPI": "Lag 2 dlnPCEPI","l1dln_INDPRO":"Lag 1 dlnIP","l2dln_INDPRO": "Lag 2 dlnIP",
"d7ln_spindx": "7 day Return S\&P500","d14ln_spindx":"14 day Return S\&P500",
"EQUITYCAPE": "Equity Cape","TEDRATE": "Ted Spread","SVENY01": "Tr. 1yr Yield",
"d28_SVENY01": "$\Delta$ Tr. 1yr Yield","d28_SVENY10": "$\Delta$ Tr. 10yr Yield",
"SVENY10": "Tr. 10yr Yield", "BAA10Y": "BAA Credit Spread","AAA10Y": "AAA Credit Spread"}
res_df = res_df.reset_index().replace({"index":labels}).set_index("index")
print(res_df.to_latex(escape=False))
res_df.to_latex(f"{OVERLEAF}/section2_avgmr.tex",escape=False,float_format="{:0.3f}".format )
#
# =============================================================================
# =============================================================================
#
# # Glove pre-trained embeddings
# os.system(f'python main.py --mode train --dataset fomc_impemb --data_path {DATAPATH}/data/SPEAKERS_10_iter2_thinf --emb_path {DATAPATH}/embeddings/preSPEAKERS_10_iter2_thinf --num_topics 10 --train_embeddings 0 --epochs 100')
# model = "etm_fomc_impemb_K_10_Htheta_800_Optim_adam_Clip_0.0_ThetaAct_relu_Lr_0.005_Bsz_1000_RhoSize_300_trainEmbeddings_0"
# print(f"Evaluate model: {model}")
# os.system(f'python main.py --mode eval --dataset fomc_impemb --data_path {DATAPATH}/data/SPEAKERS_10_iter2_thinf --num_topics 10 --emb_path {DATAPATH}/embeddings/preSPEAKERS_10_iter2_thinf --train_embeddings 0 --tc 1 --td 1 --load_from {DATAPATH}/results/{model}')
#
# # Pre-trained embeddings -- meetings
# os.system(f'python main.py --mode train --dataset meeting_pre --data_path {DATAPATH}/data/MEETING_1_iter2_thinf --emb_path {DATAPATH}/embeddings/preMEETING_1_iter2_thinf --num_topics 10 --train_embeddings 0 --epochs 100')
# model = "etm_meeting_pre_K_10_Htheta_800_Optim_adam_Clip_0.0_ThetaAct_relu_Lr_0.005_Bsz_1000_RhoSize_300_trainEmbeddings_0"
# print(f"Evaluate model: {model}")
# os.system(f'python main.py --mode eval --dataset meeting_pre --data_path {DATAPATH}/data/MEETING_1_iter2_thinf --num_topics 10 --emb_path {DATAPATH}/embeddings/preMEETING_1_iter2_thinf --train_embeddings 0 --tc 1 --td 1 --load_from {DATAPATH}/results/{model}')
#
# =============================================================================
# =============================================================================
# ## # DO MODEL EVALUATION OF EMBEDDINGS
# # Load models
# models = []
# for mod in man_models:
# models.append(gensim.models.Word2Vec.load(f"{DATAPATH}/word2vecmodels/{mod}.model").wv)
#
# # All models
# model_title = man_models + [sel_mod]
# models = models + [glove_vectors]
# print("Use following models:")
# pp.pprint(model_title)
#
# pp = pprint.PrettyPrinter(width=80, compact=True)
# keywords = ['inflation','employment','interest','price','growth','output']
# for idx,model in enumerate(models):
# print("*"*80)
# print(f"{model_title[idx]} Word Vectors")
# print("*"*80)
# for key in keywords:
# msw = [v[0] for v in model.most_similar(key)]
# print(f"{key}:")
# pp.pprint(msw)
# print("\n")
#
# # Latex Export of results
# for idx,model in enumerate(models):
# fulldata =pd.DataFrame([])
# for key in keywords:
# msw = [v[0] for v in model.most_similar(key)]
# data = pd.DataFrame(msw,columns=[key])
# fulldata = pd.concat([data,fulldata],axis=1)
#
# #print(fulldata.to_latex())
# fulldata.to_latex(f"{OVERLEAF}/emb_{model_title[idx]}.tex")
#
# =============================================================================
# =============================================================================
#
# # Joint training of embeddings
# os.system(f'python main.py --mode train --dataset fomc_joint --data_path {DATAPATH}/data/SPEAKERS_10_iter2_th80 --num_topics 10 --train_embeddings 1 --epochs 100')
# model = "etm_fomc_joint_K_10_Htheta_800_Optim_adam_Clip_0.0_ThetaAct_relu_Lr_0.005_Bsz_1000_RhoSize_300_trainEmbeddings_1"
# print(f"Evaluate model: {model}")
# os.system(f'python main.py --mode eval --dataset fomc_joint --data_path {DATAPATH}/data/SPEAKERS_10_iter2_th80 --num_topics 10 --train_embeddings 1 --tc 1 --td 1 --load_from {DATAPATH}/results/{model}')
# # Joint training of embeddings
# =============================================================================
# =============================================================================
# # =============================================================================
# # ## #3 Get Pre-Trained Word Embeddings
#
# sel_mod = "glove-wiki-gigaword-300"
# glove_vectors = gensim.downloader.load(sel_mod)
#
# with open(f'{DATAPATH}/data/{SPEAKERDATA}/vocab.pkl', 'rb') as f:
# vocab = pickle.load(f)
#
# # Write the embeddings to a file
# with open(f"{DATAPATH}/embeddings/{EMBDATASET}_pre", 'w') as f:
# for v in glove_vectors.index_to_key:
# if v in vocab:
# vec = list(glove_vectors[v])
# f.write(v + ' ')
# vec_str = ['%.9f' % val for val in vec]
# vec_str = " ".join(vec_str)
# f.write(vec_str + '\n')
#
# with open(f'{DATAPATH}/data/{MEEETDATA}/vocab.pkl', 'rb') as f:
# vocab = pickle.load(f)
#
# # Write the embeddings to a file
# with open(f"{DATAPATH}/embeddings/{MEEETDATA}_pre", 'w') as f:
# for v in glove_vectors.index_to_key:
# if v in vocab:
# vec = list(glove_vectors[v])
# f.write(v + ' ')
# vec_str = ['%.9f' % val for val in vec]
# vec_str = " ".join(vec_str)
# f.write(vec_str + '\n')
# print("*" * 80)
# print(f"Embeddings Extracted")
# print("*" * 80)
# print("\n\n")
# =============================================================================
# =============================================================================
# ## MEETINGS - Pre-Trained Emb. - Sample
# MEETDATASAMPLE = f"{MEEETDATA}sampled"
# nr_topics = 10
# meeting_ckptsampled = etm(f"{MEETDATASAMPLE}",data_path=f"{DATAPATH}/data/{MEETDATASAMPLE}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# batch_size = 1000, epochs = 1000, num_topics = nr_topics, rho_size = 300,
# emb_size = 300, t_hidden_size = 800, theta_act = 'relu',
# train_embeddings = 0, lr = 0.005, lr_factor=4.0,
# mode = 'train', optimizer = 'adam',
# seed = 2019, enc_drop = 0.0, clip = 0.0,
# nonmono = 10, wdecay = 1.2e-6, anneal_lr = 0, bow_norm = 1,
# num_words = 15, log_interval = 2, visualize_every = 100, eval_batch_size = 1000,
# load_from = "", tc = 1, td = 1)
#
# print(f"Evaluate model: {meeting_ckptsampled}")
# etm(f"{MEETDATASAMPLE}",data_path=f"{DATAPATH}/data/{MEETDATASAMPLE}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'eval', load_from = f"{meeting_ckptsampled}", train_embeddings = 0 ,tc = 1, td = 1,num_topics = nr_topics)
#
# print(f"Output the topic distribution: {meeting_ckptsampled}")
# etm(f"{MEETDATASAMPLE}",data_path=f"{DATAPATH}/data/{MEETDATASAMPLE}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'retrieve',load_from = f"{meeting_ckptsampled}", train_embeddings = 0,num_topics = nr_topics)
#
# =============================================================================
# =============================================================================
# ## TRANSCRIPTS - Pre-Trained Emb.
#
# ts_ckpt = etm(f"{TSDATA}",data_path=f"{DATAPATH}/data/{TSDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# batch_size = 1000, epochs = 1000, num_topics = 10, rho_size = 300,
# emb_size = 300, t_hidden_size = 800, theta_act = 'relu',
# train_embeddings = 0, lr = 0.005, lr_factor=4.0,
# mode = 'train', optimizer = 'adam',
# seed = 2019, enc_drop = 0.0, clip = 0.0,
# nonmono = 10, wdecay = 1.2e-6, anneal_lr = 0, bow_norm = 1,
# num_words =10, log_interval = 2, visualize_every = 10, eval_batch_size = 1000,
# load_from = "", tc = 1, td = 1)
#
# print(f"Evaluate model: {ts_ckpt}")
# etm(f"{TSDATA}",data_path=f"{DATAPATH}/data/{TSDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'eval', load_from = f"{ts_ckpt}", train_embeddings = 0 ,num_topics = 10,tc = 1, td = 1)
#
# print(f"Output the topic distribution: {ts_ckpt}")
# etm(f"{TSDATA}",data_path=f"{DATAPATH}/data/{TSDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'retrieve',load_from = f"{ts_ckpt}", train_embeddings = 0,num_topics = 10)
# =============================================================================
# =============================================================================
# ts_phrase_itera = 2
# ts_threshold = "inf"
# ts_max_df= 1.0
# ts_min_df = 10
# TSDATA = f"TS_min{meetmin_df}_max{meetmax_df}_iter{meetphrase_itera}_th{meetthreshold}"
# build_transcriptdata(ts_max_df,ts_min_df,ts_phrase_itera,ts_threshold,TSDATA)
# =============================================================================
# =============================================================================
# speakerphrase_itera = 2 # Number o fphrase iterations
# speakerthreshold = "inf" # Threshold value for collocations. If "inf": no collocations
# speakermax_df = 0.7
# speakermin_df = 10
# SPEAKERDATA = f"SPEAKERS_min{speakermin_df}_max{speakermax_df}_iter{speakerphrase_itera}_th{speakerthreshold}"
# build_speakerdata(speakermax_df,speakermin_df,speakerphrase_itera,speakerthreshold,SPEAKERDATA)
# =============================================================================
|
12,052 | 2bd605b0331c2f1bacbb5667544c0c4342768367 | %%file mapper_client.py
#!/usr/bin/env python
import sys
import traceback
import datetime
from datetime import datetime as dt
import calendar
import time
import numpy as np
for line in sys.stdin:
print("{},{}".format(line, 1)) |
12,053 | bad559c08376692855aea404089b3276f9f4480b | from .cer import cer
from .wer import wer
from .metrics import Metrics
|
12,054 | b1459a336cfb0948c72f3346170d2b96571c7338 | # BSD-3-Clause License
#
# Copyright 2017 Orange
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import random
import networkx as nx
class Node(object):
"""
A generic Node for a bipartite graph
"""
def __init__(self, content, node_type=None):
"""
:param node_type: Type of node (for bi partite nodes)
:param content: the content object must have a name property that
will be used as the node name (and id, which means that in a graph
all names must be different)
"""
self.content = content
self.type = node_type
self.neighbors = []
@property
def name(self):
return self.content.name
def add_neighbors(self, node, directed=False):
if node.type is not None and self.type == node.type:
raise ValueError(
"In a bipartite graph two nodes with the same "
"type cannot be connected : {} - {}".format(node, self)
)
self.neighbors.append(node)
if not directed:
node.add_neighbors(self, directed=True)
def as_bipartite_graph(variables, relations):
nodes = {}
for v in variables:
n = Node(v, "VARIABLE")
nodes[v.name] = n
for r in relations:
n = Node(r, "CONSTRAINT")
nodes[r.name] = n
for v in r.dimensions:
current_var_neighbors = [n.content for n in nodes[v.name].neighbors]
if v not in current_var_neighbors:
n.add_neighbors(nodes[v.name])
return nodes.values()
def calc_diameter(nodes):
"""
Warning : this only works on tree graphs !!
For arbitrary graphs, we need to compute the shortest path between any
two vertices and take the length of the greatest of these paths
:param nodes:
:return:
"""
# Calculate the diameter of a graph made of variables and relations
# first pick a random node in the tree and use a BFS to find the furthest
# node in the graph
root = random.choice(nodes)
node, distance = find_furthest_node(root, nodes)
_, distance = find_furthest_node(node, nodes)
return distance
def find_furthest_node(root_node, nodes):
# BFS on the graph defined by nodes
queue = [root_node]
distances = {root_node.name: 0}
max_distance = 0
furthest_node = root_node
while len(queue) > 0:
current = queue.pop()
for neighbor in current.neighbors:
d = distances.get(neighbor.name, -1)
if d == -1:
d = distances[current.name] + 1
if d > max_distance:
max_distance = d
furthest_node = neighbor
distances[neighbor.name] = d
queue.append(neighbor)
return furthest_node, max_distance
def as_networkx_graph(variables, relations):
"""
Build a networkx graph object from variables and relations.
Parameters
----------
variables: list
a list of Variable objets
relations: list
a list of Relation objects
Returns
-------
a networkx graph object
"""
graph = nx.Graph()
# One node for each variables
graph.add_nodes_from([v.name for v in variables])
for r in relations:
for p in all_pairs([e.name for e in r.dimensions]):
graph.add_edge(*p)
return graph
def as_networkx_bipartite_graph(variables, relations):
"""
Build a networkx graph object from variables and relations.
Parameters
----------
variables: list
a list of Variable objets
relations: list
a list of Relation objects
Returns
-------
a networkx graph object
"""
graph = nx.Graph()
# One node for each variables
graph.add_nodes_from([v.name for v in variables], bipartite=0)
graph.add_nodes_from([r.name for r in relations], bipartite=1)
for r in relations:
for e in r.dimensions:
graph.add_edge(r.name, e.name)
return graph
def display_graph(variables, relations):
"""
Display the variables and relation as a graph, using networkx and
matplotlib.
Parameters
----------
variables: list
a list of Variable objets
relations: list
a list of Relation objects
"""
graph = as_networkx_graph(variables, relations)
# Do not crash if matplotlib is not installed
try:
import matplotlib.pyplot as plt
nx.draw_networkx(graph, with_labels=True)
# nx.draw_random(graph)
# nx.draw_circular(graph)
# nx.draw_spectral(graph)
plt.show()
except ImportError:
print("ERROR: cannot display graph, matplotlib is not installed")
def display_bipartite_graph(variables, relations):
"""
Display the variables and relation as a graph, using networkx and
matplotlib.
Parameters
----------
variables: list
a list of Variable objets
relations: list
a list of Relation objects
"""
graph = as_networkx_bipartite_graph(variables, relations)
# Do not crash if matplotlib is not installed
try:
import matplotlib.pyplot as plt
pos = nx.drawing.spring_layout(graph)
variables = set(n for n, d in graph.nodes(data=True) if d["bipartite"] == 0)
factors = set(graph) - variables
nx.draw_networkx_nodes(
graph,
pos=pos,
with_labels=True,
nodelist=variables,
node_shape="o",
node_color="b",
label="variables",
alpha=0.5,
)
nx.draw_networkx_nodes(
graph,
pos=pos,
with_labels=True,
nodelist=factors,
node_shape="s",
node_color="r",
label="factors",
alpha=0.5,
)
nx.draw_networkx_labels(graph, pos=pos)
nx.draw_networkx_edges(graph, pos=pos)
# nx.draw_random(graph)
# nx.draw_circular(graph)
# nx.draw_spectral(graph)
plt.show()
except ImportError:
print("ERROR: cannot display graph, matplotlib is not installed")
def cycles_count(variables, relations):
g = as_networkx_graph(variables, relations)
cycles = nx.cycle_basis(g)
return len(cycles)
def graph_diameter(variables, relations):
"""
Compute the graph diameter(s).
If the graph contains several independent sub graph, returns a list the
diamater of each of the subgraphs.
:param variables:
:param relations:
:return:
"""
diams = []
g = as_networkx_graph(variables, relations)
components = (g.subgraph(c).copy() for c in nx.connected_components(g))
for c in components:
diams.append(nx.diameter(c))
return diams
def all_pairs(elements):
"""
Generate all possible pairs from the list of given elements.
Pairs have no order: (a, b) is the same as (b, a)
:param elements: an array of elements
:return: a list of pairs, for example [('a', 'b)]
"""
if len(elements) < 2:
return []
elif len(elements) == 2:
return [(elements[0], elements[1])]
else:
new_pairs = []
for elt in elements[1:]:
new_pairs.append((elements[0], elt))
return all_pairs(elements[1:]) + new_pairs
|
12,055 | 1579ef63299274e7ba389f1df36ddbbc0303fbca | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-services'
services_policies = [
policy.DocumentedRuleDefault(
BASE_POLICY_NAME,
base.RULE_ADMIN_API,
"List all running Compute services in a region, enables or disable "
"scheduling for a Compute service, logs disabled Compute service "
"information, set or unset forced_down flag for the compute service "
"and delete a Compute service",
[
{
'method': 'GET',
'path': '/os-services'
},
{
'method': 'PUT',
'path': '/os-services/enable'
},
{
'method': 'PUT',
'path': '/os-services/disable'
},
{
'method': 'PUT',
'path': '/os-services/disable-log-reason'
},
{
'method': 'PUT',
'path': '/os-services/force-down'
},
{
# Added in microversion 2.53.
'method': 'PUT',
'path': '/os-services/{service_id}'
},
{
'method': 'DELETE',
'path': '/os-services/{service_id}'
}
]),
]
def list_rules():
return services_policies
|
12,056 | 8f8c87d8ff5360f8df94413b189ca26b62a703ce | #coding=utf-8
#Version:python3.6.0
#Tools:Pycharm 2017.3.2
# Author:LIKUNHONG
__date__ = '2019/2/27 21:41'
__author__ = 'likunkun' |
12,057 | f8d81eba8af36b5c9ff2d866e4412e314f301770 | # -*- coding: utf-8 -*-
import numpy as np
import random
import cPickle
import gzip
import matplotlib.cm as cm
import matplotlib.pyplot as plt
class Data(object):
def __init__(self, verb=False):
self.verb = verb
self.x = {'trn': None, 'vld': None, 'tst': None}
self.y = {'trn': None, 'vld': None, 'tst': None}
self.s = {'trn': None, 'vld': None, 'tst': None}
self.n = {'trn': 0, 'vld': 0, 'tst': 0}
self.sh, self.sz = None, None
def load_mnist(self, file_path):
def vectorized_result(j):
e = np.zeros((10, 1))
e[j] = 1.0
return e
f = gzip.open(file_path, 'rb')
trn_d, vld_d, tst_d = cPickle.load(f)
f.close()
self.x['trn'] = [np.reshape(x, (784, 1)) for x in trn_d[0]]
self.y['trn'] = [vectorized_result(y) for y in trn_d[1]]
self.x['vld'] = [np.reshape(x, (784, 1)) for x in vld_d[0]]
self.y['vld'] = [vectorized_result(y) for y in vld_d[1]]
self.x['tst'] = [np.reshape(x, (784, 1)) for x in tst_d[0]]
self.y['tst'] = [vectorized_result(y) for y in tst_d[1]]
self.sh = (28, 28)
self.sz = 784
# if len(self.sh)==1:
# self.sh = tuple([int(np.sqrt(self.sz))]*2)
# if np.prod(self.sh) != self.sz:
# raise ValueError('Incorrect image unforlding.')
for dt in self.x.iterkeys():
self.n[dt] = len(self.x[dt])
self.s[dt] = np.arange(0, self.n[dt], 1, dtype=int)
def random_shuffle(self, dt):
random.shuffle(self.s[dt])
def get(self, i, dt, y2int=False):
x, y = self.x[dt][self.s[dt][i]], self.y[dt][self.s[dt][i]]
if isinstance(y, int) and y2int==False:
e = np.zeros((self.szy, 1), dtype=int); e[y] = 1.; y = e
if not isinstance(y, int) and y2int==True:
y = int(np.argmax(y))
return x, y
def iterate(self, dt, y2int=False):
for i in range(self.n[dt]):
yield self.get(i, dt, y2int)
def present(self, i, dt='trn'):
x, y = self.get(i, dt, True)
if len(x.shape)==1 or x.shape[1]==1:
x = x.reshape(self.sh)
print 'Image: %s #%-d'%(dt, i),
print '| Content: "%s"'%(unicode(y))
plt.imshow(x, cmap=cm.Greys_r)
plt.show() |
12,058 | f2c1c28ab3d2b6b00f39a9d8ed5200855954af28 | import math, datetime, os
from FCN import *
from voxnet import VoxNet
from fmri_data import fMRI_data
from config import cfg
import time
from evaluation import *
from sklearn import svm
def main(data_index=None,cut_shape=None,data_type=['MCIc','MCInc'],pre_dir='/home/anzeng/rhb/fmri_data',
num_batches = 256*5,voxnet_point=None,test_size = 6,brain_map=[217]):
# fr = open(cfg.output, 'w')
tf.reset_default_graph()
time_dim = 80 # 挑选时间片个数
batch_size = 8
dataset = fMRI_data(data_type,data_index=data_index,varbass=False,dir=pre_dir)
#SVM index
#########################
svm_index = {}
train_len = 0
test_len = 0
for d_type in data_type:
t_dir = os.path.join(pre_dir,d_type)
t_len = os.listdir(t_dir)
t_len = len(t_len)
train_index = list(range(t_len))
test_index = data_index[d_type]['test']
for x in test_index:
train_index.remove(x)
_index = {'train':train_index,'test':test_index}
train_len += len(train_index)
test_len += len(test_index)
svm_index[d_type] = _index
print(train_len)
print(test_len)
print(svm_index)
svm_dataset = fMRI_data(data_type,data_index = svm_index,varbass=False,dir=pre_dir)
##########################
xyz = 32
input_shape = [None,xyz,xyz,xyz,1]
# for i in range(3):
# input_shape.append(cut_shape[2 * i + 1] + 1 - cut_shape[2 * i])
# input_shape.append(1)
# print(input_shape)
voxnet = VoxNet(input_shape=input_shape, voxnet_type='cut')
FCNs = Classifier_FCN(tf.placeholder(tf.float32,[None,time_dim,50]),nb_classes=2)
data_value = [[1], [1]]
# 创建数据
p = dict() # placeholders
p['labels'] = tf.placeholder(tf.float32, [None, 2])
p['data_value'] = tf.placeholder(tf.float32, [2, 1])
p['Weight'] = tf.matmul(p['labels'], p['data_value'])
p['cross_loss'] = tf.nn.softmax_cross_entropy_with_logits(logits=FCNs[-2], labels=p['labels'])
p['Weight'] = tf.reshape(p['Weight'], [-1])
p['x_loss'] = tf.multiply(p['Weight'], p['cross_loss'])
p['loss'] = tf.reduce_mean(p['x_loss'])
p['l2_loss'] = tf.add_n([tf.nn.l2_loss(w) for w in FCNs.kernels])
p['prediction'] = tf.argmax(FCNs[-1],1)
p['y_true'] = tf.argmax(p['labels'],1)
p['correct_prediction'] = tf.equal(p['prediction'], p['y_true'])
p['accuracy'] = tf.reduce_mean(tf.cast(p['correct_prediction'], tf.float32))
p['learning_rate'] = tf.placeholder(tf.float32)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
p['train'] = tf.train.AdamOptimizer(p['learning_rate'], epsilon=1e-3).minimize(p['loss'])
p['weights_decay'] = tf.train.GradientDescentOptimizer(p['learning_rate']).minimize(p['l2_loss'])
# p['test_error'] = tf.placeholder(tf.float32)
# 超参数设置
initial_learning_rate = 0.01
min_learning_rate = 0.000001
learning_rate_decay_limit = 0.0001
num_batches_per_epoch = len(dataset.train) / float(batch_size)
learning_decay = 10 * num_batches_per_epoch
weights_decay_after = 5 * num_batches_per_epoch
checkpoint_num = 0
learning_step = 0
min_loss = 1e308
if voxnet_point:
cfg.voxnet_checkpoint = voxnet_point
accuracy_filename = os.path.join(cfg.fcn_checkpoint_dir, 'accuracies.txt')
if not os.path.isdir(cfg.fcn_checkpoint_dir):
os.mkdir(cfg.fcn_checkpoint_dir)
if not os.path.exists(accuracy_filename):
with open(accuracy_filename, 'a') as f:
f.write('')
with open(accuracy_filename,'a') as f:
f.write(str(brain_map)+'\n')
#返回值
test_evaluation = evaluation()
with tf.Session() as session:
session.run(tf.global_variables_initializer())
voxnet.npz_saver.restore(session, cfg.voxnet_checkpoint)
#voxnet赋值
input_shape[0]=1
voxnet_data = np.ones(input_shape,np.float32)
input_shape[0]=-1
for batch_index in range(num_batches):
start = time.time()
# learning_rate = max(min_learning_rate,
# initial_learning_rate * 0.5 ** (learning_step / learning_decay))
learning_rate = 0.0001
learning_step += 1
if batch_index > weights_decay_after and batch_index % 256 == 0:
session.run(p['weights_decay'], feed_dict=feed_dict)
voxs, labels = dataset.train.oversampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs,voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:0.7, p['labels']: labels,
p['learning_rate']: learning_rate, FCNs.training: True,p['data_value']:data_value}
session.run(p['train'], feed_dict=feed_dict)
if batch_index and batch_index % 32 == 0:
print("{} batch: {}".format(datetime.datetime.now(), batch_index))
print('learning rate: {}'.format(learning_rate))
# fr.write("{} batch: {}".format(datetime.datetime.now(), batch_index))
# fr.write('learning rate: {}'.format(learning_rate))
feed_dict[FCNs.training] = False
loss = session.run(p['loss'], feed_dict=feed_dict)
print('loss: {}'.format(loss))
if (batch_index and loss > 1.5 * min_loss and
learning_rate > learning_rate_decay_limit):
min_loss = loss
learning_step *= 1.2
print("decreasing learning rate...")
min_loss = min(loss, min_loss)
if batch_index and batch_index % 16 == 0:
num_accuracy_batches = 20
train_evaluation = evaluation()
for x in range(num_accuracy_batches):
voxs, labels = dataset.train.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs, voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0,p['labels']: labels, FCNs.training: False}
predictions, y_true = session.run([p['prediction'], p['y_true']], feed_dict=feed_dict)
train_evaluation += evaluation(y_true=y_true, y_predict=predictions)
print('training accuracy \n' + str(train_evaluation))
num_accuracy_batches = test_size
test_evaluation = evaluation()
for x in range(num_accuracy_batches):
voxs, labels = dataset.test.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs,voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
predictions,y_true = session.run([p['prediction'],p['y_true']], feed_dict=feed_dict)
test_evaluation += evaluation(y_true=y_true, y_predict=predictions)
print(test_evaluation)
print('test accuracy \n'+str(test_evaluation))
with open(accuracy_filename, 'a') as f:
f.write('checkpoint_num:' + str(checkpoint_num) + ':\n')
f.write('train:\n' + str(train_evaluation) + '\n')
f.write('test:\n' + str(test_evaluation) + '\n')
if batch_index % 64 or train_evaluation.ACC >= 0.8 == 0:
######SVM分类器####################
svm_feature = np.zeros((train_len+test_len,128))
svm_label = np.zeros(train_len+test_len)
for x in range(train_len):
voxs, labels = svm_dataset.train.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size = 1)
feed_dict = {FCNs[0]: voxs, voxnet[0]: voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
feature,y_true = session.run([FCNs['gap'],p['y_true']],feed_dict = feed_dict)
feature = np.reshape(feature,[1,128])
svm_feature[x] = feature
# print(svm_feature[x])
svm_label[x] = y_true
for x in range(test_len):
voxs, labels = svm_dataset.test.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size = 1)
feed_dict = {FCNs[0]: voxs, voxnet[0]: voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
feature,y_true = session.run([FCNs['gap'],p['y_true']],feed_dict = feed_dict)
feature = np.reshape(feature,[1, 128])
svm_feature[train_len + x] = feature
svm_label[train_len + x] = y_true
# print(svm_feature[0:train_len])
# print(svm_label[0:train_len])
clf = svm.SVC(C=1.0,kernel='rbf',gamma='auto')
clf.fit(svm_feature[0:train_len],svm_label[0:train_len])
predictions = clf.predict(svm_feature)
svm_train_evaluation = evaluation(y_true=svm_label[:train_len],y_predict=predictions[:train_len])
svm_test_evaluation = evaluation(y_true=svm_label[train_len:],y_predict=predictions[train_len:])
print('svm_train:\n'+str(svm_train_evaluation))
print('svm_test:\n' + str(svm_test_evaluation))
with open(accuracy_filename,'a') as f:
f.write('svm_train:\n' + str(svm_train_evaluation) + '\n')
f.write('svm_test:\n' + str(svm_test_evaluation) + '\n')
#################################################
# fr.write('test accuracy: {}'.format(test_accuracy))
if batch_index % 128 == 0 or train_evaluation.ACC >= 0.85:
print('saving checkpoint {}...'.format(checkpoint_num))
filename = 'cx-{}.npz'.format(checkpoint_num)
filename = os.path.join(cfg.fcn_checkpoint_dir, filename)
FCNs.npz_saver.save(session, filename)
print('checkpoint saved!')
checkpoint_num += 1
if train_evaluation.ACC >= 0.85:
break
end = time.time()
print('time:',(end-start)/60)
return test_evaluation
if __name__ == '__main__':
tf.app.run()
|
12,059 | e5c12f9631ec0f5d6baa13ba34e4b071d9345b9d | #!/usr/bin/python
# @author: Viviana Castillo, APL UW
##################################################################################
#print pressure sensor output
##################################################################################
#standard imports
import time
#third party imports
import ms5837
#-------------------------------------------------------------------------------------------------
#pressure sensor
#-------------------------------------------------------------------------------------------------
sensor=ms5837.MS5837_30BA() #default i2c bus is 1
#initialize sensor before reading it
#Pressure sensor checked if iniitialized
if not sensor.init():
print ("Sensor could not be initialized")
exit(1)
#================================================================================
#Loop Begins
#================================================================================
while True:
#read sensor
if sensor.read():
print("P: %0.1f mbar %0.3f psi\tT: %0.2f C %0.2f F") % (sensor.pressure(),
sensor.pressure(ms5837.UNITS_psi),
sensor.temperature(),
sensor.temperature(ms5837.UNITS_Farenheit))
else:
print ("Sensor failed")
exit(1)
#------------------------------------------------------------
#calculate current depth from pressure sensor
#------------------------------------------------------------
#set variable to pressure sensors current psiS readings
psi = sensor.pressure(ms5837.UNITS_psi)
#standard psi for water
water=1/1.4233
#pressure at sea level
p = 14.7
#calculate the current depth
currentDepth = (psi-p)*water
print ("Current Depth: %s" % currentDepth)
time.sleep(0.5)
#================================================================================
#Loop Begins
#================================================================================SS |
12,060 | 36ae5230e168fb95c705ea3b20b80b4fd3ee6458 | import arcade
from arcade import Sprite, Texture, SpriteList
import math
from bounce_dot_sprite import BounceDotSprite
SCREEN_WIDTH = 600
DOT_SIDE_LENGTH = 20
SCREEN_HEIGHT = 600
SPRITE_SCALING_FUNKY = 0.9
MOVEMENT_SPEED = 5
class MyGame(arcade.Window):
def __init__(self, width, height, name):
super().__init__(width, height, name)
def setup(self):
self.the_funky_blue = arcade.SpriteList()
for i in range(1, 11):
funky_blue = arcade.Sprite("assets/funky_blue_colour_{}.jpg".format(i), SPRITE_SCALING_FUNKY)
self.the_funky_blue.append(funky_blue)
self.add_lifes()
self.current_Funky = self.the_funky_blue[-1]
placeSprite(self.current_Funky, SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2)
self.dots = []
blueTopDot = BounceDotSprite(DOT_SIDE_LENGTH, DOT_SIDE_LENGTH, "#0000ff", self)
placeSprite(blueTopDot, SCREEN_WIDTH // 2, SCREEN_HEIGHT - DOT_SIDE_LENGTH)
blueTopDot.rand_velocity(5, math.pi + 0.5 , 2 * (math.pi) - 0.5)
self.dots.append(blueTopDot)
redBottomDot = BounceDotSprite(DOT_SIDE_LENGTH, DOT_SIDE_LENGTH, "#ff0000", self)
placeSprite(redBottomDot, SCREEN_WIDTH // 2, DOT_SIDE_LENGTH)
redBottomDot.rand_velocity(5, 0.5, math.pi - 0.5)
self.dots.append(redBottomDot)
self.physics_engine = arcade.PhysicsEngineSimple(self.current_Funky, arcade.SpriteList())
self.score = 0
self.level = 1
self.next_score = 10
def add_lifes(self):
offset = 10
self.life_icons = arcade.SpriteList()
for i in range(0, 3):
heart = arcade.Sprite("assets/heart_life.png", 0.05)
placeSprite(heart, 40 + (offset + heart.width) * i, SCREEN_HEIGHT - heart.height * 2.5)
self.life_icons.append(heart)
self.lives = 3
def on_draw(self):
arcade.start_render()
if self.lives <= 0:
gameOverSign = f"GAME OVER SCORE: {self.score}"
arcade.draw_text(gameOverSign, self.width // 2, self.height // 2, arcade.color.NAPIER_GREEN, 30, bold=True,
align = "center",
anchor_x = "center", anchor_y = "center")
newGameSign = f"HIT RETURN TO START A NEW GAME"
arcade.draw_text(newGameSign, self.width // 2, self.height // 4, arcade.color.NAPIER_GREEN, 20, bold=True,
align="center",
anchor_x="center", anchor_y="center")
else:
self.current_Funky.draw()
for sprite in self.dots:
sprite.draw()
output = f"Score: {self.score}"
arcade.draw_text(output, self.width - 100, self.height - 40, arcade.color.AIR_FORCE_BLUE, 20, bold=True)
self.life_icons.draw()
def update(self, delta_time: float):
if self.lives > 0:
self.physics_engine.update()
if self.current_Funky.left < 1 or self.current_Funky.right >= self.width:
self.current_Funky.change_x = 0
for sprite in self.dots:
sprite.update()
if self.current_Funky.left - sprite.width <= sprite.left <= self.current_Funky.right:
if sprite.change_y < 0 and abs(sprite.bottom - self.current_Funky.top) <= 2:
self._reflect(sprite)
elif sprite.change_y > 0 and abs(sprite.top - self.current_Funky.bottom) <= 2:
self._reflect(sprite)
if self.current_Funky.bottom <= sprite.center_y <= self.current_Funky.top:
if abs(sprite.right - self.current_Funky.left) <= 2:
sprite.change_x = - sprite.change_x
elif abs(sprite.left - self.current_Funky.right) <= 2:
sprite.change_x = - sprite.change_x
self.dots = [self._swap_color(s) for s in self.dots]
if self.score >= self.next_score and self.level < len(self.the_funky_blue):
self.level += 1
self.next_score += 1
smallerFunky = self.the_funky_blue[- self.level]
placeSprite(smallerFunky, self.current_Funky.center_x, self.current_Funky.center_y)
self.current_Funky = smallerFunky
self.physics_engine = arcade.PhysicsEngineSimple(self.current_Funky, arcade.SpriteList())
def on_key_press(self, symbol: int, modifiers: int):
if symbol == arcade.key.RETURN and self.lives == 0:
self.setup()
if symbol == arcade.key.LEFT and self.current_Funky.left > 0:
self.current_Funky.change_x = -MOVEMENT_SPEED if self.current_Funky.left > MOVEMENT_SPEED else - self.current_Funky.left
elif symbol == arcade.key.RIGHT and self.current_Funky.right < self.width:
self.current_Funky.change_x = MOVEMENT_SPEED if self.current_Funky.right + MOVEMENT_SPEED <= self.width else self.width - self.current_Funky.right
def on_key_release(self, symbol: int, modifiers: int):
if symbol == arcade.key.LEFT or symbol == arcade.key.RIGHT:
self.current_Funky.change_x = 0
def _reflect(self, sprite):
sprite.change_x = - sprite.change_x
sprite.change_y = - sprite.change_y
self.score += 1
def _swap_color(self, bounceDotSprite: BounceDotSprite):
if bounceDotSprite.center_y > bounceDotSprite.window.height / 2 and bounceDotSprite.solidColor != "#0000ff":
self.lives -= 1
self.life_icons.pop()
return bounceDotSprite.change_solid_color("#0000ff")
elif bounceDotSprite.center_y < bounceDotSprite.window.height / 2 and bounceDotSprite.solidColor != "#ff0000":
self.lives -= 1
self.life_icons.pop()
return bounceDotSprite.change_solid_color("#ff0000")
return bounceDotSprite
def placeSprite(sprite: Sprite, center_x: float, center_y: float):
sprite.center_x = center_x
sprite.center_y = center_y
def placeSpriteList(spriteList: SpriteList, centersList):
for i in range(len(spriteList)):
placeSprite(spriteList[i], centersList[i][0], centersList[i][1])
def init():
return MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, "Funky Blue")
def render():
arcade.start_render()
arcade.set_background_color(arcade.color.WARM_BLACK)
arcade.draw_circle_filled(200, 300, 50, arcade.color.AERO_BLUE)
arcade.finish_render()
def run():
arcade.run()
def main():
funky_blue = init()
funky_blue.setup()
arcade.run()
if __name__ == '__main__':
main() |
12,061 | bee4d5b186d325da5868d5030524e68c0734cad5 |
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urlparse
import budget
import json
import datetime
BASE_WEB_DIR = "webserver/"
class BudgetHTTPRequestHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
self.manager = budget.AccountManager()
def do_GET(self):
parsed_url = urlparse.urlparse(self.path)
url_path = parsed_url.path.split("/")
query_params = urlparse.parse_qs(parsed_url.query)
if url_path[0] != "":
self.error("Invalid URL")
if url_path[1] == "":
# Root
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
with open(BASE_WEB_DIR + "index.html", "r") as f:
self.wfile.write(f.read())
return
if url_path[1][-3:] == ".js":
# JS file
self.send_response(200)
self.send_header('Content-type', 'text/javascript')
self.end_headers()
with open(BASE_WEB_DIR + url_path[1], "r") as f:
self.wfile.write(f.read())
return
if url_path[1][-4:] == ".css":
# CSS file
self.send_response(200)
self.send_header('Content-type', 'text/css')
self.end_headers()
with open(BASE_WEB_DIR + url_path[1], "r") as f:
self.wfile.write(f.read())
return
if url_path[1] == "accounts":
# Accounts query
self.manager = budget.AccountManager()
accounts = self.manager.list_accounts()
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps({"accounts": accounts})))
return
if url_path[1] == "history":
# History query
# Get filter from params
filter_accounts = None
filter_from_to = None
filter_charge_begin = None
filter_charge_end = None
filter_date_begin = None
filter_date_end = None
filter_notes_contain = None
if "accounts" in query_params:
filter_accounts = [x.strip() for x in query_params["accounts"][0].split(",")]
if "from_to" in query_params:
filter_from_to = query_params["from_to"][0]
if "charge_begin" in query_params:
filter_charge_begin = query_params["charge_begin"][0]
if "charge_end" in query_params:
filter_charge_end = query_params["charge_end"][0]
if "date_begin" in query_params:
filter_date_begin = datetime.datetime.strptime(query_params["date_begin"][0], "%Y-%m-%d")
if "date_end" in query_params:
filter_date_end = datetime.datetime.strptime(query_params["date_end"][0], "%Y-%m-%d")
if "notes_contain" in query_params:
filter_notes_contain = query_params["notes_contain"][0]
self.manager = budget.AccountManager()
history = self.manager.list_history_filter(accounts=filter_accounts, from_to=filter_from_to,
charge_begin=filter_charge_begin,
charge_end=filter_charge_end,
date_begin=filter_date_begin,
date_end=filter_date_end,
notes_contains=filter_notes_contain)
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps({"history": [x.as_dict() for x in history]})))
return
self.send_error(404, "That resource can't be found")
return
def do_POST(self):
parsed_url = urlparse.urlparse(self.path)
url_path = parsed_url.path.split("/")
length = int(self.headers.getheader('content-length'))
field_data = self.rfile.read(length)
params = urlparse.parse_qs(field_data)
if url_path[1] == "transaction":
user_charge = 0
user_date = ""
user_account_from = ""
user_account_to = ""
user_notes = ""
user_files = []
user_file_data = []
if "charge" in params:
user_charge = float(params["charge"][0])
if "date" in params:
user_date = datetime.datetime.strptime(params["date"][0], "%Y-%m-%d")
if "account_from" in params:
user_account_from = params["account_from"][0]
if "account_to" in params:
user_account_to = params["account_to"][0]
if "notes" in params:
user_notes = params["notes"][0]
try:
new_transaction = budget.Transaction()
new_transaction.from_new(user_charge, user_date, user_account_from, user_account_to,
user_notes, ",".join(user_files))
self.manager = budget.AccountManager()
self.manager.make_transaction(new_transaction, user_file_data)
except Exception as e:
print("Failed to commit transaction: " + e.message)
return
self.send_response(200)
def error(self, msg=""):
self.send_error(500, msg)
if __name__ == "__main__":
print("Starting server...")
server_address = ("0.0.0.0", 80)
httpd = HTTPServer(server_address, BudgetHTTPRequestHandler)
print("Server running")
httpd.serve_forever()
|
12,062 | 42a3207244ea94553b01df3696a691d3fd127ab6 | class ExampleCtrl(object):
"""Mealy transducer.
Internal states are integers, the current state
is stored in the attribute "state".
To take a transition, call method "move".
The names of input variables are stored in the
attribute "input_vars".
Automatically generated by tulip.dumpsmach on 2015-08-13 05:18:57 UTC
To learn more about TuLiP, visit http://tulip-control.org
"""
def __init__(self):
self.state = 52
self.input_vars = ['env2']
def move(self, env2):
"""Given inputs, take move and return outputs.
@rtype: dict
@return: dictionary with keys of the output variable names:
['loc', 'stage']
"""
output = dict()
if self.state == 0:
if (env2 == 0):
self.state = 0
output["loc"] = 16
output["stage"] = 1
elif (env2 == 1):
self.state = 33
output["loc"] = 17
output["stage"] = 1
elif (env2 == 2):
self.state = 34
output["loc"] = 17
output["stage"] = 1
else:
self._error(env2)
elif self.state == 1:
if (env2 == 0):
self.state = 0
output["loc"] = 16
output["stage"] = 1
elif (env2 == 1):
self.state = 33
output["loc"] = 17
output["stage"] = 1
elif (env2 == 2):
self.state = 34
output["loc"] = 17
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 2:
if (env2 == 0):
self.state = 0
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 34
output["loc"] = 17
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 1):
self.state = 33
output["loc"] = 17
output["stage"] = 1
else:
self._error(env2)
elif self.state == 3:
if (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 2
output["loc"] = 16
output["stage"] = 1
elif (env2 == 1):
self.state = 1
output["loc"] = 16
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 4:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 2
output["loc"] = 16
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 5:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 5
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 6:
if (env2 == 0):
self.state = 8
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 18
output["loc"] = 21
output["stage"] = 1
elif (env2 == 2):
self.state = 19
output["loc"] = 21
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 7:
if (env2 == 0):
self.state = 8
output["loc"] = 20
output["stage"] = 1
elif (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 18
output["loc"] = 21
output["stage"] = 1
elif (env2 == 2):
self.state = 19
output["loc"] = 21
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 8:
if (env2 == 0):
self.state = 8
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 18
output["loc"] = 21
output["stage"] = 1
elif (env2 == 2):
self.state = 19
output["loc"] = 21
output["stage"] = 1
else:
self._error(env2)
elif self.state == 9:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 7):
self.state = 20
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 10:
if (env2 == 7):
self.state = 10
output["loc"] = 20
output["stage"] = 2
elif (env2 == 6):
self.state = 11
output["loc"] = 20
output["stage"] = 2
elif (env2 == 5):
self.state = 12
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 11:
if (env2 == 7):
self.state = 10
output["loc"] = 20
output["stage"] = 2
elif (env2 == 6):
self.state = 11
output["loc"] = 20
output["stage"] = 2
elif (env2 == 5):
self.state = 12
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 12:
if (env2 == 7):
self.state = 10
output["loc"] = 20
output["stage"] = 2
elif (env2 == 6):
self.state = 11
output["loc"] = 20
output["stage"] = 2
elif (env2 == 5):
self.state = 12
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 13:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 6):
self.state = 11
output["loc"] = 20
output["stage"] = 2
elif (env2 == 5):
self.state = 12
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 14:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 5):
self.state = 12
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 15:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 0):
self.state = 17
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 16:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 0):
self.state = 17
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 17:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 0):
self.state = 17
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 18:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 0):
self.state = 17
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 19:
if (env2 == 2):
self.state = 16
output["loc"] = 20
output["stage"] = 2
elif (env2 == 0):
self.state = 17
output["loc"] = 20
output["stage"] = 2
elif (env2 == 4):
self.state = 13
output["loc"] = 20
output["stage"] = 2
elif (env2 == 3):
self.state = 14
output["loc"] = 20
output["stage"] = 2
elif (env2 == 1):
self.state = 15
output["loc"] = 20
output["stage"] = 2
else:
self._error(env2)
elif self.state == 20:
if (env2 == 7):
self.state = 20
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 21:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 18
output["loc"] = 21
output["stage"] = 1
elif (env2 == 2):
self.state = 19
output["loc"] = 21
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 22:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 7):
self.state = 20
output["loc"] = 20
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 23:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 7):
self.state = 20
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 24:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 2):
self.state = 19
output["loc"] = 21
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 25:
if (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 7):
self.state = 20
output["loc"] = 20
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 5):
self.state = 22
output["loc"] = 20
output["stage"] = 1
elif (env2 == 6):
self.state = 23
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 26:
if (env2 == 6):
self.state = 9
output["loc"] = 19
output["stage"] = 1
elif (env2 == 4):
self.state = 26
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 27
output["loc"] = 18
output["stage"] = 1
elif (env2 == 2):
self.state = 29
output["loc"] = 19
output["stage"] = 1
elif (env2 == 5):
self.state = 25
output["loc"] = 19
output["stage"] = 1
else:
self._error(env2)
elif self.state == 27:
if (env2 == 5):
self.state = 25
output["loc"] = 19
output["stage"] = 1
elif (env2 == 4):
self.state = 26
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 27
output["loc"] = 18
output["stage"] = 1
elif (env2 == 1):
self.state = 28
output["loc"] = 19
output["stage"] = 1
elif (env2 == 2):
self.state = 29
output["loc"] = 19
output["stage"] = 1
else:
self._error(env2)
elif self.state == 28:
if (env2 == 0):
self.state = 8
output["loc"] = 20
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 6
output["loc"] = 20
output["stage"] = 1
elif (env2 == 2):
self.state = 7
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 29:
if (env2 == 0):
self.state = 8
output["loc"] = 20
output["stage"] = 1
elif (env2 == 4):
self.state = 24
output["loc"] = 20
output["stage"] = 1
elif (env2 == 3):
self.state = 21
output["loc"] = 20
output["stage"] = 1
elif (env2 == 1):
self.state = 6
output["loc"] = 20
output["stage"] = 1
elif (env2 == 2):
self.state = 7
output["loc"] = 20
output["stage"] = 1
else:
self._error(env2)
elif self.state == 30:
if (env2 == 0):
self.state = 32
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 27
output["loc"] = 18
output["stage"] = 1
elif (env2 == 1):
self.state = 28
output["loc"] = 19
output["stage"] = 1
elif (env2 == 2):
self.state = 29
output["loc"] = 19
output["stage"] = 1
else:
self._error(env2)
elif self.state == 31:
if (env2 == 0):
self.state = 32
output["loc"] = 18
output["stage"] = 1
elif (env2 == 4):
self.state = 26
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 27
output["loc"] = 18
output["stage"] = 1
elif (env2 == 1):
self.state = 28
output["loc"] = 19
output["stage"] = 1
elif (env2 == 2):
self.state = 29
output["loc"] = 19
output["stage"] = 1
else:
self._error(env2)
elif self.state == 32:
if (env2 == 0):
self.state = 32
output["loc"] = 18
output["stage"] = 1
elif (env2 == 1):
self.state = 28
output["loc"] = 19
output["stage"] = 1
elif (env2 == 2):
self.state = 29
output["loc"] = 19
output["stage"] = 1
else:
self._error(env2)
elif self.state == 33:
if (env2 == 0):
self.state = 32
output["loc"] = 18
output["stage"] = 1
elif (env2 == 1):
self.state = 30
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 31
output["loc"] = 18
output["stage"] = 1
else:
self._error(env2)
elif self.state == 34:
if (env2 == 0):
self.state = 32
output["loc"] = 18
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 1):
self.state = 30
output["loc"] = 18
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 31
output["loc"] = 18
output["stage"] = 1
else:
self._error(env2)
elif self.state == 35:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 37
output["loc"] = 0
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 36:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 37
output["loc"] = 0
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 37:
if (env2 == 5):
self.state = 35
output["loc"] = 8
output["stage"] = 1
elif (env2 == 6):
self.state = 36
output["loc"] = 8
output["stage"] = 1
elif (env2 == 7):
self.state = 37
output["loc"] = 0
output["stage"] = 1
else:
self._error(env2)
elif self.state == 38:
if (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 34
output["loc"] = 17
output["stage"] = 1
elif (env2 == 1):
self.state = 33
output["loc"] = 17
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 39:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 5
output["loc"] = 16
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 40:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 5
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 41:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 2):
self.state = 34
output["loc"] = 17
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 42:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 37
output["loc"] = 0
output["stage"] = 1
elif (env2 == 3):
self.state = 38
output["loc"] = 16
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 43:
if (env2 == 6):
self.state = 40
output["loc"] = 16
output["stage"] = 1
elif (env2 == 4):
self.state = 41
output["loc"] = 16
output["stage"] = 1
elif (env2 == 7):
self.state = 37
output["loc"] = 0
output["stage"] = 1
elif (env2 == 5):
self.state = 39
output["loc"] = 16
output["stage"] = 1
else:
self._error(env2)
elif self.state == 44:
if (env2 == 1):
self.state = 48
output["loc"] = 0
output["stage"] = 0
elif (env2 == 0):
self.state = 44
output["loc"] = 0
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 45:
if (env2 == 6):
self.state = 43
output["loc"] = 8
output["stage"] = 0
elif (env2 == 5):
self.state = 42
output["loc"] = 8
output["stage"] = 0
elif (env2 == 3):
self.state = 3
output["loc"] = 8
output["stage"] = 0
elif (env2 == 4):
self.state = 4
output["loc"] = 8
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 46:
if (env2 == 1):
self.state = 48
output["loc"] = 0
output["stage"] = 0
elif (env2 == 0):
self.state = 44
output["loc"] = 0
output["stage"] = 0
elif (env2 == 3):
self.state = 3
output["loc"] = 8
output["stage"] = 0
elif (env2 == 4):
self.state = 4
output["loc"] = 8
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 47:
if (env2 == 6):
self.state = 43
output["loc"] = 8
output["stage"] = 0
elif (env2 == 5):
self.state = 42
output["loc"] = 8
output["stage"] = 0
elif (env2 == 7):
self.state = 51
output["loc"] = 0
output["stage"] = 0
elif (env2 == 4):
self.state = 4
output["loc"] = 8
output["stage"] = 0
else:
self._error(env2)
elif self.state == 48:
if (env2 == 1):
self.state = 48
output["loc"] = 0
output["stage"] = 0
elif (env2 == 3):
self.state = 3
output["loc"] = 8
output["stage"] = 0
elif (env2 == 0):
self.state = 44
output["loc"] = 0
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 49:
if (env2 == 7):
self.state = 51
output["loc"] = 0
output["stage"] = 0
elif (env2 == 5):
self.state = 42
output["loc"] = 8
output["stage"] = 0
elif (env2 == 3):
self.state = 3
output["loc"] = 8
output["stage"] = 0
elif (env2 == 4):
self.state = 4
output["loc"] = 8
output["stage"] = 0
elif (env2 == 6):
self.state = 43
output["loc"] = 8
output["stage"] = 0
else:
self._error(env2)
elif self.state == 50:
if (env2 == 1):
self.state = 48
output["loc"] = 0
output["stage"] = 0
elif (env2 == 5):
self.state = 42
output["loc"] = 8
output["stage"] = 0
elif (env2 == 3):
self.state = 3
output["loc"] = 8
output["stage"] = 0
elif (env2 == 4):
self.state = 4
output["loc"] = 8
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 51:
if (env2 == 6):
self.state = 43
output["loc"] = 8
output["stage"] = 0
elif (env2 == 5):
self.state = 42
output["loc"] = 8
output["stage"] = 0
elif (env2 == 7):
self.state = 51
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
elif self.state == 52:
if (env2 == 0):
self.state = 44
output["loc"] = 0
output["stage"] = 0
elif (env2 == 4):
self.state = 45
output["loc"] = 0
output["stage"] = 0
elif (env2 == 2):
self.state = 46
output["loc"] = 0
output["stage"] = 0
elif (env2 == 6):
self.state = 47
output["loc"] = 0
output["stage"] = 0
elif (env2 == 1):
self.state = 48
output["loc"] = 0
output["stage"] = 0
elif (env2 == 5):
self.state = 49
output["loc"] = 0
output["stage"] = 0
elif (env2 == 3):
self.state = 50
output["loc"] = 0
output["stage"] = 0
elif (env2 == 7):
self.state = 51
output["loc"] = 0
output["stage"] = 0
else:
self._error(env2)
else:
raise Exception("Unrecognized internal state: " + str(self.state))
return output
def _error(self, env2):
raise ValueError("Unrecognized input: " + (
"env2 = {env2}; ").format(
env2=env2))
|
12,063 | 3d762649594dcd13085f6af8e541d9b9b8ce186a | """Программа принимает действительное положительное число x и целое отрицательное число y.
Необходимо выполнить возведение числа x в степень y.
Задание необходимо реализовать в виде функции my_func(x, y).
При решении задания необходимо обойтись без встроенной функции возведения числа в степень."""
"""
Программа возведения в степень чисел, вводимых пользовтелем
"""
"""
Функция возведения числа в степень.
При возведении числа в степень, равную отрицательному числу,
необходимо возвести число в степень, взятую по модулю,
а затем взять обратое от полученного результата - поделить единицу на результат
"""
def my_func(x, y):
index = 0
while index <= abs(y):
x *= x
index += 1
result = 1 / x
return result
"""
Функция ввода основания степени с проверкой (является ли оно действительным положительым числом)
"""
def number_input():
while True:
base_number = float(input("Введите действительное положительное число, которое нужно возвести в степень>>> "))
if base_number <= 0:
print("Ошибка! Вы ввели число меньше 0. Введите действительное положительное число")
else:
return base_number
"""
Функция ввода степени числа с проверкой (является ли она отрицательным целым числом
"""
def power_input():
while True:
power = int(input("Введите целое отрицательное число - показатель степени>>> "))
if power >= 0:
print("Ошибка! Вы ввели число больше 0. Введите целое отрицательное число")
else:
return power
# Вызов функции возведения степени и печать результата на экран
powering = my_func(number_input(), power_input())
print(powering)
|
12,064 | a225d0d9f8e9eebfd76f30f44f9fde2f42384ed7 | from fbchat import Client
from fbchat.models import Message, MessageReaction
# facebook user credentials
username = "username.or.email"
password = "password"
# login
client = Client(username, password)
# get 20 users you most recently talked to
users = client.fetchThreadList()
print(users)
# get the detailed informations about these users
detailed_users = [ list(client.fetchThreadInfo(user.uid).values())[0] for user in users ]
# sort by number of messages
sorted_detailed_users = sorted(detailed_users, key=lambda u: u.message_count, reverse=True)
# print the best friend!
best_friend = sorted_detailed_users[0]
print("Best friend:", best_friend.name, "with a message count of", best_friend.message_count)
# message the best friend!
client.send(Message(
text=f"Congratulations {best_friend.name}, you are my best friend with {best_friend.message_count} messages!"
),
thread_id=best_friend.uid)
# get all users you talked to in messenger in your account
all_users = client.fetchAllUsers()
print("You talked with a total of", len(all_users), "users!")
# let's logout
client.logout() |
12,065 | 1d11d96dd7034024ae8d03128fccb221a299835b | def chose(a):
s = ''
m = 0
for i in range(len(a)):
if (m == 0) and(a[i] == '~'):
j = i+1
m +=1
while (a[j] != "~"):
s = s + a[j]
j+=1
return(s)
id_list = []
description_list = []
from Bio import SeqIO
file = open('Final_file_protein', 'w')
for record in SeqIO.parse('Final.fasta', "fasta"):
id_list.append(chose(record.description))
description_list.append(record.description)
print(len(description_list))
from Bio import Entrez
import time
Entrez.email = 'vorosviktoriya@yandex.ru'
id_list1 = []
seq_list = []
s = ''
p = 0
lenth = len(id_list)
print (lenth)
f = open('ex.txt', 'w')
while p <= lenth:
n = []
j = p
while (j>= p) and (j<lenth) and (j<(p+1000)):
n.append(id_list[j])
j+=1
p+=1000
handle = Entrez.efetch( db = "protein", id = n, rettype = "fasta",retmode = "text")
time.sleep(1)
f.write(handle.read())
f.close()
for record in SeqIO.parse("ex.txt", "fasta" ):
seq_list.append(record.seq)
print(len(seq_list))
a = open('Final_file_protein.fasta', 'w')
for j in range(len(description_list)):
a.write(">"+description_list[j]+"\n")
a.write(str(seq_list[j])+"\n\n")
a.close()
|
12,066 | 9e7e07d4f82af6395338f1b28b7a518b3fd575cd | from typing import *
from ..arg_check import *
from ..tensor import Tensor, Module
from ..typing_ import *
from .contextual import *
from .core import *
from .gated import *
__all__ = [
'ResBlock1d', 'ResBlock2d', 'ResBlock3d',
'ResBlockTranspose1d', 'ResBlockTranspose2d', 'ResBlockTranspose3d',
]
class ResBlockNd(BaseLayer):
"""
A general implementation of ResNet block.
The architecture of this ResNet implementation follows the work
"Wide residual networks" (Zagoruyko & Komodakis, 2016). It basically does
the following things:
.. code-block:: python
shortcut = input
if strides != 1 or (kernel_size != 1 and padding != 'half') or \
in_channels != out_channels or use_shortcut:
shortcut_layer = shortcut(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=stride,
padding=padding,
dilation=dilation,
)
shortcut = shortcut_layer(shortcut)
residual = input
if resize_at_exit:
conv0_layer = conv0(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=1,
padding='half',
output_padding=0, # for deconvolutional layers only
dilation=dilation,
)
conv1_layer = conv1(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding, # for deconvolutional layers only
dilation=dilation,
)
else:
conv0_layer = conv0(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding, # for deconvolutional layers only
dilation=dilation,
)
conv1_layer = conv1(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding='half'',
output_padding=0, # for deconvolutional layers only
dilation=dilation,
)
residual = normalizer0(residual)
residual = activation0(residual)
residual = conv0_layer(residual)
if merge_context0 is not None:
residual = merge_context0(residual, context)
residual = dropout(residual)
residual = normalizer1(residual)
residual = activation1(residual)
residual = conv1_layer(residual)
if merge_context1 is not None:
residual = merge_context1(residual, context)
output = shortcut + residual
"""
shortcut: Module
pre_conv0: Module
merge_context0: Module
conv0: Module
pre_conv1: Module
merge_context1: Module
conv1: Module
post_conv1: Module
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
padding: PaddingArgType = PaddingMode.DEFAULT,
output_padding: Union[int, Sequence[int]] = 0,
dilation: Union[int, Sequence[int]] = 1,
resize_at_exit: bool = False,
use_shortcut: Optional[bool] = None,
shortcut: Optional[LayerOrLayerFactory] = None,
conv0: Optional[LayerOrLayerFactory] = None,
conv1: Optional[LayerOrLayerFactory] = None,
merge_context0: Optional[Module] = None,
merge_context1: Optional[Module] = None,
activation: Optional[LayerFactory] = None,
normalizer: Optional[NormalizerFactory] = None,
dropout: Optional[Union[float, LayerOrLayerFactory]] = None,
weight_norm: WeightNormArgType = False,
gated: bool = False,
gate_bias: float = DEFAULT_GATE_BIAS,
use_bias: Optional[bool] = None,
weight_init: TensorInitArgType = DEFAULT_WEIGHT_INIT,
bias_init: TensorInitArgType = DEFAULT_BIAS_INIT,
data_init: Optional[DataInitArgType] = None,
device: Optional[str] = None,
):
"""
Construct a new resnet block.
Args:
in_channels: The number of channels of the input.
out_channels: The number of channels of the output.
kernel_size: The kernel size over spatial dimensions.
stride: The stride over spatial dimensions.
padding: The padding over spatial dimensions.
output_padding: The output padding for de-convolutional resnet
blocks. Must not be specified for convolutional resnet blocks.
dilation: The dilation over spatial dimensions.
resize_at_exit: If :obj:`True`, resize the spatial dimensions at
the "conv1" convolutional layer.
If :obj:`False`, resize at the "conv0" convolutional layer.
(see above)
use_shortcut: If :obj:`True`, always applies a linear
convolution transformation on the shortcut path.
Defaults to :obj:`None`, only use shortcut if necessary.
shortcut: The "shortcut" layer, or the factory to construct the layer.
conv0: The "conv0" layer, or the factory to construct the layer.
conv1: The "conv1" layer, or the factory to construct the layer.
merge_context0: Layer after "conv0" to merge the `context`
argument with the output of "conv0". (See above)
merge_context1: Layer after "conv1" to merge the `context`
argument with the output of "conv1". (See above)
activation: The factory of the activation layers.
It should expect no argument.
normalizer: The factory of the normalizer layers. It should accept
one positional argument, the output channel size.
dropout: A float, a layer or a factory.
If it is a float, it will be used as the `p` argument to
construct an instance of :class:`tensorkit.layers.Dropout`.
If it is a factory, it should expect no argument.
weight_norm: The weight norm mode for the convolutional layers.
If :obj:`True`, will use "full" weight norm for "conv1" and
"shortcut". For "conv0", will use "full" if `normalizer`
is :obj:`None` or `dropout` is not :obj:`None`.
If :obj:`False`, will not use weight norm for all layers.
gated: Whether or not to use gate on the output of "conv1"?
`conv1 = activation(conv1) * sigmoid(gate + gate_bias)`.
gate_bias: The bias added to `gate` before applying the `sigmoid`
activation.
use_bias: Whether or not to use bias in "conv0", "conv1"
and "shortcut"? If :obj:`True`, will always use bias.
If :obj:`False`, will never use bias.
Defaults to :obj:`None`, where "use_bias" of "shortcut",
"conv0" and "conv1" is set according to the following rules:
* "shortcut": `use_bias` is :obj:`True` if `gated` is True.
* "conv0": `use_bias` is :obj:`True` if `normalizer` is None,
or `dropout` is not None.
* "conv1": `use_bias` is always :obj:`True`.
weight_init: The weight initializer for the convolutional layers.
bias_init: The bias initializer for the convolutional layers.
data_init: The data-dependent initializer for the convolutional layers.
device: The device where to place new tensors and variables.
"""
def use_bias_or_else(default_val: bool):
if use_bias is None:
return default_val
return use_bias
def compile_layer_list(layers: List[Module]) -> Module:
if len(layers) == 0:
return Identity()
elif len(layers) == 1:
return layers[0]
else:
return Sequential(layers)
spatial_ndims = self._get_spatial_ndims()
is_deconv = self._is_deconv()
# validate arguments
in_channels = int(in_channels)
out_channels = int(out_channels)
kernel_size = validate_conv_size('kernel_size', kernel_size, spatial_ndims)
stride = validate_conv_size('strides', stride, spatial_ndims)
dilation = validate_conv_size('dilation', dilation, spatial_ndims)
padding = validate_padding(padding, kernel_size, dilation, spatial_ndims)
if output_padding != 0 and not is_deconv:
raise ValueError(f'The `output_padding` argument is not allowed '
f'by {self.__class__.__qualname__}.')
output_padding = validate_output_padding(
output_padding, stride, dilation, spatial_ndims)
if conv0 is None:
conv0 = self._default_conv_factory()
if conv1 is None:
conv1 = self._default_conv_factory()
orig_merge_context0 = merge_context0
if merge_context0 is None:
merge_context0 = IgnoreContext()
else:
merge_context0 = validate_layer('merge_context0', merge_context0)
if merge_context1 is None:
merge_context1 = IgnoreContext()
else:
merge_context1 = validate_layer('merge_context1', merge_context1)
if shortcut is not None:
use_shortcut = True
if use_shortcut is None:
use_shortcut = (
any(s != 1 for s in stride) or
any(p[0] + p[1] != (k - 1) * d
for p, k, d in zip(padding, kernel_size, dilation)) or
in_channels != out_channels)
if activation is not None:
activation_factory = validate_layer_factory('activation', activation)
else:
activation_factory = None
if normalizer is not None:
normalizer_factory = validate_layer_factory('normalizer', normalizer)
else:
normalizer_factory = None
if isinstance(dropout, float):
dropout = Dropout(p=dropout)
elif dropout is not None:
dropout = get_layer_from_layer_or_factory('dropout', dropout)
conv0_weight_norm = weight_norm
if conv0_weight_norm is True:
conv0_weight_norm = (
WeightNormMode.FULL if normalizer is None or dropout is not None
else WeightNormMode.NO_SCALE
)
kwargs = {'weight_init': weight_init, 'bias_init': bias_init,
'data_init': data_init, 'device': device}
# build the shortcut path
if use_shortcut:
if shortcut is None:
shortcut = self._default_conv_factory()
if not isinstance(shortcut, Module):
shortcut = get_layer_from_layer_or_factory(
'shortcut', shortcut, kwargs=dict(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
use_bias=use_bias_or_else(gated),
weight_norm=weight_norm,
**self._add_output_padding_to_kwargs(output_padding, kwargs)
)
)
else:
shortcut = Identity()
# prepare the arguments for the residual path
if resize_at_exit:
conv0_out_channels = in_channels
conv0_stride = 1
conv0_padding = PaddingMode.HALF # such that it can keep the output shape
conv0_kwargs = kwargs
conv1_stride = stride
conv1_padding = padding
conv1_kwargs = self._add_output_padding_to_kwargs(output_padding, kwargs)
else:
conv0_out_channels = out_channels
conv0_stride = stride
conv0_padding = padding
conv0_kwargs = self._add_output_padding_to_kwargs(output_padding, kwargs)
conv1_stride = 1
conv1_padding = PaddingMode.HALF # such that it can keep the output shape
conv1_kwargs = kwargs
conv1_out_channels = out_channels
if gated:
conv1_out_channels *= 2
# pre_conv0
pre_conv0 = []
if normalizer_factory is not None:
pre_conv0.append(normalizer_factory(in_channels))
if activation_factory is not None:
pre_conv0.append(activation_factory())
pre_conv0 = compile_layer_list(pre_conv0)
# conv0
conv0 = get_layer_from_layer_or_factory( # conv0
'conv0', conv0, kwargs=dict(
in_channels=in_channels,
out_channels=conv0_out_channels,
kernel_size=kernel_size,
stride=conv0_stride,
padding=conv0_padding,
dilation=dilation,
use_bias=use_bias_or_else(normalizer_factory is None or
dropout is not None or
orig_merge_context0 is not None),
weight_norm=conv0_weight_norm,
**conv0_kwargs,
)
)
# pre_conv1
pre_conv1 = []
if dropout is not None:
pre_conv1.append(dropout)
if normalizer_factory is not None:
pre_conv1.append(normalizer_factory(conv0_out_channels))
if activation_factory is not None:
pre_conv1.append(activation_factory())
pre_conv1 = compile_layer_list(pre_conv1)
# conv1
conv1 = get_layer_from_layer_or_factory(
'conv1', conv1, kwargs=dict(
in_channels=conv0_out_channels,
out_channels=conv1_out_channels,
kernel_size=kernel_size,
stride=conv1_stride,
padding=conv1_padding,
dilation=dilation,
use_bias=use_bias_or_else(True),
weight_norm=weight_norm,
**conv1_kwargs,
)
)
# post_conv1
if gated:
post_conv1 = Gated(
feature_axis=-(spatial_ndims + 1),
num_features=out_channels,
gate_bias=gate_bias,
)
else:
post_conv1 = Identity()
# construct the layer
super().__init__()
self.shortcut = shortcut
self.pre_conv0 = pre_conv0
self.merge_context0 = merge_context0
self.conv0 = conv0
self.pre_conv1 = pre_conv1
self.merge_context1 = merge_context1
self.conv1 = conv1
self.post_conv1 = post_conv1
def _get_spatial_ndims(self) -> int:
raise NotImplementedError()
def _default_conv_factory(self) -> LayerFactory:
raise NotImplementedError()
def _is_deconv(self) -> bool:
raise NotImplementedError()
def _add_output_padding_to_kwargs(self, output_padding, kwargs):
raise NotImplementedError()
def forward(self,
input: Tensor,
context: Optional[List[Tensor]] = None) -> Tensor:
if context is None:
context = []
# compute the residual path
residual = self.pre_conv0(input)
residual = self.conv0(residual)
residual = self.merge_context0(residual, context)
residual = self.pre_conv1(residual)
residual = self.conv1(residual)
residual = self.merge_context1(residual, context)
residual = self.post_conv1(residual)
# sum up the shortcut path and the residual path as the final output
return self.shortcut(input) + residual
class ResBlockConvNd(ResBlockNd):
def _add_output_padding_to_kwargs(self, output_padding, kwargs):
return kwargs
def _is_deconv(self) -> bool:
return False
class ResBlock1d(ResBlockConvNd):
"""1D ResNet convolution block."""
def _get_spatial_ndims(self) -> int:
return 1
def _default_conv_factory(self) -> LayerFactory:
return LinearConv1d
class ResBlock2d(ResBlockConvNd):
"""2D ResNet convolution block."""
def _get_spatial_ndims(self) -> int:
return 2
def _default_conv_factory(self) -> LayerFactory:
return LinearConv2d
class ResBlock3d(ResBlockConvNd):
"""3D ResNet convolution block."""
def _get_spatial_ndims(self) -> int:
return 3
def _default_conv_factory(self) -> LayerFactory:
return LinearConv3d
class ResBlockTransposeNd(ResBlockNd):
def _add_output_padding_to_kwargs(self, output_padding, kwargs=None):
kwargs = dict(kwargs or {})
kwargs['output_padding'] = output_padding
return kwargs
def _is_deconv(self) -> bool:
return True
class ResBlockTranspose1d(ResBlockTransposeNd):
"""1D ResNet de-convolution block."""
def _get_spatial_ndims(self) -> int:
return 1
def _default_conv_factory(self) -> LayerFactory:
return LinearConvTranspose1d
class ResBlockTranspose2d(ResBlockTransposeNd):
"""2D ResNet de-convolution block."""
def _get_spatial_ndims(self) -> int:
return 2
def _default_conv_factory(self) -> LayerFactory:
return LinearConvTranspose2d
class ResBlockTranspose3d(ResBlockTransposeNd):
"""3D ResNet de-convolution block."""
def _get_spatial_ndims(self) -> int:
return 3
def _default_conv_factory(self) -> LayerFactory:
return LinearConvTranspose3d
|
12,067 | a60a484a6ecdb651d8676a57abfe9caa23ef0c2f | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'sources': [
'egl/attrib_map.cc',
'egl/attrib_map.h',
'egl/config.cc',
'egl/config.h',
'egl/display.cc',
'egl/display.h',
'egl/display_impl.h',
'egl/display_registry.cc',
'egl/display_registry.h',
'egl/error.cc',
'egl/error.h',
'egl/get_proc_address_impl.h',
'egl/scoped_egl_lock.cc',
'egl/scoped_egl_lock.h',
'egl/surface.cc',
'egl/surface.h',
'egl/surface_impl.h',
'entry_points/egl.cc',
'entry_points/egl_ext.cc',
'entry_points/gles_2_0.cc',
'entry_points/gles_2_0_ext.cc',
'entry_points/gles_3_0.cc',
'gles/blend_state.h',
'gles/buffer.cc',
'gles/buffer.h',
'gles/buffer_impl.h',
'gles/context.cc',
'gles/context.h',
'gles/context_impl.h',
'gles/convert_pixel_data.cc',
'gles/convert_pixel_data.h',
'gles/cull_face_state.h',
'gles/draw_mode.h',
'gles/draw_state.cc',
'gles/draw_state.h',
'gles/framebuffer.cc',
'gles/framebuffer.h',
'gles/index_data_type.h',
'gles/pixel_format.cc',
'gles/pixel_format.h',
'gles/program.cc',
'gles/program.h',
'gles/program_impl.h',
'gles/ref_counted_resource_map.h',
'gles/renderbuffer.cc',
'gles/renderbuffer.h',
'gles/resource_manager.cc',
'gles/resource_manager.h',
'gles/sampler.h',
'gles/shader.cc',
'gles/shader.h',
'gles/shader_impl.h',
'gles/texture.cc',
'gles/texture.h',
'gles/texture_impl.h',
'gles/uniform_info.h',
'gles/unique_id_generator.cc',
'gles/unique_id_generator.h',
'gles/vertex_attribute.h',
'shaders/glsl_shader_map_helpers.h',
'shaders/hash_glsl_source.cc',
'shaders/hash_glsl_source.h',
],
'dependencies': [
'<(DEPTH)/glimp/tracing/tracing.gyp:tracing',
'<(DEPTH)/nb/nb.gyp:nb',
'<(DEPTH)/starboard/starboard.gyp:starboard',
],
'includes': [
'glimp_settings.gypi',
],
}
|
12,068 | ffe5d178311135ce3623fb5fc6d38321eba1a4f4 | class Card:
def __init__(self, value, symbol):
self.symbol = symbol
self.value = value
def print_card(self):
if self.value==14:
return f'[A{self.symbol}]'
elif self.value==13:
return f'[K{self.symbol}]'
elif self.value==12:
return f'[Q{self.symbol}]'
elif self.value==11:
return f'[J{self.symbol}]'
else:
return f'[{self.value}{self.symbol}]'
|
12,069 | 09ef4ff094486037a29f9420b776f3400bbe1b09 | import sys
import math
graph = {}
n = int(input())
for i in range(n):
x, y = [int(j) for j in input().split()]
graph.setdefault(x,[]).append(y)
print(graph, file=sys.stderr)
def find_length(start):
longest = 1
if start not in graph:
return longest
for p in graph[start]:
l = find_length(p) + 1
if l >= longest:
longest = l
return longest
longest = 1
for node in graph:
l = find_length(node)
if longest <= l:
longest = l
print(longest)
|
12,070 | fd7141cdc8f919b2e916e8cdb349b34e88878d27 | __author__ = 'diegoguaman'
'''
QUITO
==============
'''
import couchdb
import sys
import urllib2
import json
import textblob
from pylab import *
from couchdb import view
URL = 'localhost'
db_name = 'rusia'
'''========couchdb'=========='''
server = couchdb.Server('http://'+URL+':5984/') #('http://245.106.43.184:5984/') poner la url de su base de datos
try:
print db_name
db = server[db_name]
print 'success'
except:
sys.stderr.write("Error: DB not found. Closing...\n")
sys.exit()
url = 'http://localhost:5984/rusia/_design/rusia15JulHashtag/_view/rusia15JulHashtag'
req = urllib2.Request(url)
f = urllib2.urlopen(req)
d = json.loads(f.read())
archivo = open("/home/usrkap/Downloads/ResultadoRusiaFinalHashtag.txt","a") #opens file with name of "test.txt"
cont_positives = 0
cont_negatives = 0
cont_neutrals = 0
cont_total = 0
for x in d['rows']:
a = x['value']
texto_tweet = textblob.TextBlob(a)
auc = ''
if texto_tweet.sentiment.polarity > 0:
aux = a + ';positive'
cont_positives = cont_positives + 1
elif texto_tweet.sentiment.polarity < 0:
aux = a + ';negative'
cont_negatives = cont_negatives + 1
else:
aux = a + ';neutral'
cont_neutrals = cont_neutrals + 1
archivo.write(str((aux.encode("utf-8") + "\n")))
cont_total = cont_total + 1
archivo.close()
print ("total: " + str(cont_total))
print ("positives: " + str(cont_positives))
print ("negatives: " + str(cont_negatives))
print ("neutrals: " + str(cont_neutrals))
# make a square figure and axesfigure(1, figsize=(8,8))# tamanio de figura
ax = axes([0, 0, 0.9, 0.9])# donde esta la figura ancho alto etc..
#----------------------------------------------------------------------
labels = 'Positivos ', 'Negativos', 'Neutrales '#nomre de los datos
fracs = [cont_positives,cont_negatives,cont_neutrals]#datos a graficar
#----------------------------------------------------------------------
explode=(0, 0.1, 0)#exposicion de uno de los datos segun donde se encuentra
#tipo de grafico(datos,exposicion, titulos de los datos, nose,sombras true o false
pie(fracs, explode=explode,labels=labels, autopct='%10.0f%%', shadow=True)
legend()
title('Evaluacion de Sentimientos Tweets RusiaFinalHashtag', bbox={'facecolor':'0.8', 'pad':5})
savefig("tweets_sentiments_rusiafinal2.png")
show()#mostrar grafico
f.close()
|
12,071 | 4d88b7a1157adf720106f807be3d824df6631037 | def nextGreatest(arr):
size = len(arr)
max_from_right = arr[size-1]
arr[size-1] = -1
for i in range(size-2,-1,-1):
temp = arr[i]
arr[i]=max_from_right
if max_from_right< temp:
max_from_right= temp
def printArray(arr):
for i in range(0,len(arr)):
print arr[i]
|
12,072 | 4e490b24eda2ea227824e118edc55d28252a4ad4 | #!/usr/bin/python
import logging
import os
from pathlib import Path
logging.basicConfig(level=logging.INFO)
base = (Path(__file__).parent / "dotfiles").absolute()
assert base.is_dir()
home = Path(os.environ["HOME"])
for path in (
Path(dir) / name
for dir, _, names in os.walk(base)
for name in names
):
link_path = home / path.relative_to(base)
# logging.info(f"path={path} link_path={link_path}")
if link_path.is_symlink():
target = link_path.readlink()
if target == path:
logging.debug(f"ok: {link_path}")
continue
else:
logging.warning(f"wrong link: {link_path} → {target}")
link_path.unlink()
elif link_path.exists():
logging.error(f"exists: {link_path}")
continue
logging.info(f"creating: {link_path} → {path}")
link_path.parent.mkdir(parents=True, exist_ok=True)
link_path.symlink_to(path)
|
12,073 | 096325ed20677d1dbfecf23f9927da8ac93083d4 | class Prototype(object):
value = 'default'
def clone(self, **attr):
obj = self.__class__()
obj.__dict__.update(attr)
return obj
class PrototypeDispatcher(object):
def __init__(self):
self._objects = {}
def getObject(self, obj_name):
print(f'Getting object {obj_name}')
return self._objects[obj_name]
def setObject(self, name, obj):
self._objects[name] = obj
def delObject(self, obj_name):
del self._objects[obj_name]
def main():
dispatcher = PrototypeDispatcher()
prototype = Prototype()
proto1 = prototype.clone(value='origin', category='Double')
proto2 = prototype.clone(value='conn', is_valid=True)
dispatcher.setObject('proto1', proto1)
dispatcher.setObject('proto2', proto2)
print(dispatcher.getObject('proto1').category)
print(dispatcher.getObject('proto2').value)
if __name__ == '__main__':
main()
|
12,074 | 0b3781c11084af103544b895cceccf16206ea035 | # Generated by Django 2.1.3 on 2018-11-15 15:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('member', '0007_auto_20181115_2249'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='game',
),
migrations.RemoveField(
model_name='userprofile',
name='province',
),
migrations.RemoveField(
model_name='userprofile',
name='role',
),
]
|
12,075 | 14a97efe05a3208a7e4b122f0567a641ea141e08 | #!/usr/bin/python
from sys import argv
from datetime import datetime
import numpy as np
from itertools import groupby
from sklearn.metrics import confusion_matrix
from scipy import stats
from scipy import spatial
def separateFiles(trainningFile, testFile):
with open(trainningFile, 'r') as file:
data = np.loadtxt(file)
labels = data[:, [len(data[0])-1]]
characteristics = np.delete(data, -1, 1)
trainningLabelsFile = trainningFile+'_labels.txt'
trainningCharacteristicsFile = trainningFile+'_characteristics.txt'
np.savetxt(trainningLabelsFile, labels)
np.savetxt(trainningCharacteristicsFile, characteristics)
with open(testFile, 'r') as file:
data = np.loadtxt(file)
labels = data[:, [len(data[0])-1]]
characteristics = np.delete(data, -1, 1)
testLabelsFile = testFile+'_labels.txt'
testCharacteristicsFile = testFile+'_characteristics.txt'
np.savetxt(testLabelsFile, labels)
np.savetxt(testCharacteristicsFile, characteristics)
def extract(fileName):
with open(fileName) as file:
data = np.loadtxt(file)
return data
def euclideanDistance(trainning_characteristics, trainning_labels, testFile):
testLabelsFile = testFile+'_labels.txt'
testCharacteristicsFile = testFile+'_characteristics.txt'
test_labels = extract(testLabelsFile)
test_characteristics = extract(testCharacteristicsFile)
distance_list = []
# Calculates the distance of the unknown instance for every neighbour
for instance in test_characteristics:
distance_list.append([np.linalg.norm(np.array(instance)-i) for i in trainning_characteristics])
return test_labels, distance_list
def manhattanDistance(trainning_characteristics, trainning_labels, testFile):
testLabelsFile = testFile+'_labels.txt'
testCharacteristicsFile = testFile+'_characteristics.txt'
test_labels = extract(testLabelsFile)
test_characteristics = extract(testCharacteristicsFile)
distance_list = []
# Calculates the distance of the unknown instance for every neighbour
for instance in test_characteristics:
distance_list.append([spatial.distance.cityblock(instance, i) for i in trainning_characteristics])
print len(distance_list)
print len(distance_list[0])
return test_labels, distance_list
def get_k_closests(distance_list, k):
k_closest_classes = []
for i in range(len(distance_list)):
# Appends the k nearest classes
aux = np.array(distance_list[i])
ind = np.argpartition(aux, k)
k_closest_classes.append([trainning_labels[j] for j in ind[:k]])
return k_closest_classes
def classify(k_closest_classes):
classified = []
for i in k_closest_classes:
classified.append(majority_vote(i))
return classified
def majority_vote(L):
return max(groupby(sorted(L)), key=lambda(x, v):(len(list(v)),-L.index(x)))[0]
def normalizeMinMax(trainning_characteristics):
normalized_list = []
min_list = []
max_list = []
for characteristics in trainning_characteristics:
min_list.append(characteristics.tolist().index(min((characteristics))))
max_list.append(characteristics.tolist().index(max((characteristics))))
[normalized_list.append([((characteristics[i] - min_list[i]) / float(max_list[i] - min_list[i])) for i in range(len((characteristics)))]) for characteristics in trainning_characteristics]
return normalized_list
def normalize_z_score(trainning_characteristics):
normalized_list = []
[[normalized_list.append(stats.zscore(characteristics))] for characteristics in trainning_characteristics]
return normalized_list
def buildConfusionMatrix(classified, trainning_labels, test_labels):
n_rows = len(trainning_labels)
n_columns = len(trainning_labels)
# Creates a list with unique labels names from the trainning labels
used = set()
distinctLabels = [x for x in trainning_labels if x not in used and (used.add(x) or True)]
confusionMatrix = confusion_matrix(test_labels, classified)
print 'Linhas -> classes\nColunas -> classificadas'
return confusionMatrix
def printStat(confusionMatrix):
c = 0
correct = 0
for i in range(len(confusionMatrix)):
for j in range(len(confusionMatrix[0])):
if i == j:
correct += confusionMatrix[i][j]
c += 1
print('Taxa de acerto: '+str((float(correct)/(correct+c))*100)+'%%' )
if __name__ == '__main__':
startTime = datetime.now()
if(len(argv) == 6 and argv[4] == 'euclidean' and argv[5] == 'minmax'):
separateFiles(argv[1], argv[2])
trainningLabelsFile = argv[1]+'_labels.txt'
trainningCharacteristicsFile = argv[1]+'_characteristics.txt'
trainning_labels = extract(trainningLabelsFile)
trainning_characteristics = extract(trainningCharacteristicsFile)
normalized_characteristics = normalizeMinMax(trainning_characteristics)
test_labels, distance_list = euclideanDistance(normalized_characteristics, trainning_labels, argv[2])
k_closest_classes = get_k_closests(distance_list, int(argv[3]))
classified = classify(k_closest_classes)
confusionMatrix = buildConfusionMatrix(classified, trainning_labels, test_labels)
print confusionMatrix
printStat(confusionMatrix)
elif(len(argv) == 6 and argv[4] == 'euclidean' and argv[5] == 'zscore'):
separateFiles(argv[1], argv[2])
trainningLabelsFile = argv[1]+'_labels.txt'
trainningCharacteristicsFile = argv[1]+'_characteristics.txt'
trainning_labels = extract(trainningLabelsFile)
trainning_characteristics = extract(trainningCharacteristicsFile)
normalized_characteristics = normalize_z_score(trainning_characteristics)
test_labels, distance_list = euclideanDistance(normalized_characteristics, trainning_labels, argv[2])
k_closest_classes = get_k_closests(distance_list, int(argv[3]))
classified = classify(k_closest_classes)
confusionMatrix = buildConfusionMatrix(classified, trainning_labels, test_labels)
print confusionMatrix
printStat(confusionMatrix)
elif(len(argv) == 6 and argv[4] == 'manhattan' and argv[5] == 'minmax'):
separateFiles(argv[1], argv[2])
trainningLabelsFile = argv[1]+'_labels.txt'
trainningCharacteristicsFile = argv[1]+'_characteristics.txt'
trainning_labels = extract(trainningLabelsFile)
trainning_characteristics = extract(trainningCharacteristicsFile)
normalized_characteristics = normalizeMinMax(trainning_characteristics)
test_labels, distance_list = manhattanDistance(normalized_characteristics, trainning_labels, argv[2])
k_closest_classes = get_k_closests(distance_list, int(argv[3]))
classified = classify(k_closest_classes)
confusionMatrix = buildConfusionMatrix(classified, trainning_labels, test_labels)
print confusionMatrix
printStat(confusionMatrix)
elif(len(argv) == 6 and argv[4] == 'manhattan' and argv[5] == 'zscore'):
separateFiles(argv[1], argv[2])
trainningLabelsFile = argv[1]+'_labels.txt'
trainningCharacteristicsFile = argv[1]+'_characteristics.txt'
trainning_labels = extract(trainningLabelsFile)
trainning_characteristics = extract(trainningCharacteristicsFile)
normalized_characteristics = normalize_z_score(trainning_characteristics)
test_labels, distance_list = manhattanDistance(normalized_characteristics, trainning_labels, argv[2])
k_closest_classes = get_k_closests(distance_list, int(argv[3]))
classified = classify(k_closest_classes)
confusionMatrix = buildConfusionMatrix(classified, trainning_labels, test_labels)
print confusionMatrix
printStat(confusionMatrix)
else:
print 'Erro ao executar o script. Verifique os parametros entrados.'
print datetime.now() - startTime,'segundos' |
12,076 | 1506dd0aa3e958a73c04e7237f1bf758323f5d0a | from django.shortcuts import render, redirect
from django.contrib.auth import get_user_model
from ..filters import PatientFilter
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django_filters.views import FilterView
from django.core.mail import EmailMessage
from django.template.loader import get_template
import sweetify
from django.views.generic import CreateView, ListView, DetailView, UpdateView, DeleteView
from ..forms import MedicalHistoryForm, ContactForm
from ..models import MedicalHistory
User = get_user_model()
def index(request):
return render(request, 'home.html', {})
def contact(request):
form_class = ContactForm
# new logic!
if request.method == 'POST':
form = form_class(data=request.POST)
if form.is_valid():
name = request.POST.get(
'name'
, '')
email = request.POST.get(
'email'
, '')
message = request.POST.get('message', '')
# Email the profile with the
# contact information
template = get_template('contact_template.txt')
context = {
'name': name,
'email': email,
'message': message,
}
content = template.render(context)
email = EmailMessage(
"New contact form submission",
content,
"TIPSTAR" + '',
['tipstar3@gmail.com'],
headers={'Reply-To': email}
)
email.send()
return redirect('home')
return render(request, 'contact.html', {'form': form_class})
class DashboardView(FilterView, LoginRequiredMixin):
model = MedicalHistory
template_name = 'dashboard.html'
filterset_class = PatientFilter
paginate_by = 5
ordering = ['-id']
strict = False
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['filter'] = PatientFilter(self.request.GET, queryset=self.get_queryset())
query = self.request.GET.copy()
if 'page' in query:
del query['page']
context['queries'] = query
return context
# post a Medical record
class CreateMedicalRecordView(LoginRequiredMixin, CreateView):
model = MedicalHistory
form_class = MedicalHistoryForm
template_name = 'createrecords.html'
success_url = reverse_lazy('home')
context_object_name = 'medicalhistory'
def form_valid(self, form):
form.instance.user = self.request.user
form.save()
return super(CreateMedicalRecordView, self).form_valid(form)
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
sweetify.success(self.request, title='Successfully created job!', text='You have successfully created Medical record', icon='sucsess', button="OK", timer=3000)
return self.form_valid(form)
else:
sweetify.error(self.request, title='Error', text='Unsuccessful. Kindly try again', icon='error', button='Close', timer=5000)
return self.form_invalid(form)
# List view of Post for a specific user
class UserRecordListView(LoginRequiredMixin, ListView):
model = MedicalHistory
template_name = 'user_record.html'
context_object_name = 'records'
ordering = ['-created_at']
paginate_by = 3
def get_queryset(self):
user = self.request.user
return MedicalHistory.objects.filter(user=user).distinct().order_by('-created_at')
class RecordDetailView(DetailView):
model = MedicalHistory
template_name = 'record_detail.html'
context_object_name = 'record'
pk_url_kwarg = 'id'
# Update a Medical Record
class RecordUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = MedicalHistory
# fields = ['illness', 'symptoms', 'additional_info', 'disability', 'medications']
form_class = MedicalHistoryForm
template_name = 'update_form.html'
pk_url_kwarg = 'id'
success_url = reverse_lazy('medical-record-list')
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
record = self.get_object()
if self.request.user == record.user:
return True
return False
# Delete a Medical History
class RecordDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = MedicalHistory
template_name = 'record_confirm_delete.html'
success_url = reverse_lazy('medical-record-list')
def test_func(self):
record = self.get_object()
# Only users that created the post are permitted to delete the post
if self.request.user == record.user:
return True
def pie_chart(request):
count = {'Anxiety': 0, 'Arthritis': 0, 'Asthma': 0, 'Anemia': 0, 'Cancer': 0,
'Corona_virus': 0, 'Diabetes': 0, 'Ebola': 0, 'HIV': 0
}
queryset = MedicalHistory.objects.values('illness')
for entry in queryset:
for keys, values in entry.items():
count[values] += 1
labels = [*count.keys()]
data = [*count.values()]
return render(request, 'chart.html', {
'labels': labels,
'data': data,
})
def error_404(request, exception):
data = {}
return render(request, 'error_404.html', data)
def error_500(request):
data = {}
return render(request, 'error_500.html', data)
|
12,077 | 9833c6937f89a024601f5e59142131320ff15eae | #!usr/bin/python
# -*- coding:utf-8 -*-
import multiprocessing
import time
##实例 第一种方式
# def process(num):
# time.sleep(num)
# print 'Process:', num
#
#
# if __name__ == '__main__':
# for i in range(5):
# p = multiprocessing.Process(target=process, args=(i,))
# p.start()
#
# print 'CPU number:' + str(multiprocessing.cpu_count())
# for p in multiprocessing.active_children():
# print 'Child process name: ' + p.name + ' id: ' + str(p.pid)
#
# print 'Process End!'
##实例 自定义类 第二种方式
from multiprocessing import Process
class MyProcess(Process):
def __init__(self, loop):
Process.__init__(self)
self.loop = loop
def run(self):
for count in range(self.loop):
time.sleep(1)
print 'Pid: ' + str(self.pid) + ' LoopCount: ' + str(count)
# if __name__ == '__main__':
# for i in range(2, 5):
# p = MyProcess(i)
# p.start()
##deamon属性 如果设置为True,当父进程结束后,子进程会自动被终止。
# if __name__ == '__main__':
# for i in range(2, 5):
# p = MyProcess(i)
# p.daemon =True
# p.start()
#
# print 'Main process end!'
## 输出结果为 Main process end!
if __name__ == '__main__':
for i in range(2, 5):
p = MyProcess(i)
p.daemon =True
p.start()
p.join()
print 'Main process end!'
##每个子进程都调用了join()方法,这样父进程(主进程)就会等待子进程执行完毕。 |
12,078 | 3bc5e18eef1bc17c64e9f654e6a1f7f5e16c44a2 | from django.contrib import admin
from .models import Post
from django.contrib.auth.admin import UserAdmin
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ['name','image','hash', 'approval']
list_editable = ['approval']
admin.site.register(Post, PostAdmin)
|
12,079 | 3d2a1d0774ed280a1f0c18bb268d606dfc3cf7e0 | from django.apps import AppConfig
class EbcAppConfig(AppConfig):
name = 'ebc_app'
|
12,080 | cf1c00259fd3c0ff8888a05b2ced3a327625f611 |
from xai.brain.wordbase.nouns._crank import _CRANK
#calss header
class _CRANKED(_CRANK, ):
def __init__(self,):
_CRANK.__init__(self)
self.name = "CRANKED"
self.specie = 'nouns'
self.basic = "crank"
self.jsondata = {}
|
12,081 | 17e42765380dbd6cf7d8bd7a461fc0159d4300f2 | # -*- coding: utf-8 -*-
# Third Party Stuff
import factory
from django.conf import settings
class Factory(factory.DjangoModelFactory):
class Meta:
strategy = factory.CREATE_STRATEGY
model = None
abstract = True
class UserFactory(Factory):
class Meta:
model = settings.AUTH_USER_MODEL
email = factory.Sequence(lambda n: 'user%04d@email.com' % n)
password = factory.PostGeneration(lambda obj, *args, **kwargs: obj.set_password('123123'))
def create_user(**kwargs):
"Create an user along with their dependencies"
return UserFactory.create(**kwargs)
|
12,082 | 0548096e8092d01c072aadbdc0110e6690dcf1a2 | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.views.decorators.cache import cache_page
from django.views.generic.base import TemplateView
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', cache_page(60 * 15)(TemplateView.as_view(template_name='startpage.html')), name='startpage'),
url(r'^legal_information/$', cache_page(60 * 15)(TemplateView.as_view(template_name='legal_information.html')), name='legal_information'),
# Uncomment the admin/doc line below to enable admin documentation:
#url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
#url(r'^admin/', include(admin.site.urls)),
url(r'^', include('beautifulmind.mindmap.urls')),
)
if settings.ENVIRONMENT.IS_FOR_DEVELOPMENT:
urlpatterns += patterns('',
(r'^assets/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
(r'^admin_media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT+'../admin_media/', 'show_indexes': True}),
) |
12,083 | acb85c82d84af90c40bb133863a42a6dc9f0f3e9 | # coding: utf-8
from __future__ import absolute_import
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestServiceSettingsController(BaseTestCase):
""" ServiceSettingsController integration test stubs """
def test_get_liveness(self):
"""
Test case for get_liveness
Get job service liveness
"""
response = self.client.open('/api/v1//liveness',
method='GET',
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_get_readiness(self):
"""
Test case for get_readiness
Get job service readiness
"""
response = self.client.open('/api/v1//readiness',
method='GET',
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
|
12,084 | 44c5132507b6d49d70593db00f8174b8c48d9024 | """Installation script."""
from setuptools import find_packages, setup
setup(
name='blocks_mindlab',
description='Tools for build and launch experiments with Blocks',
author='Mindlab Group',
packages=find_packages(),
install_requires=['blocks', 'blocks_extras'],
extras_require={},
scripts=['bin/run_all', 'bin/job'],
zip_safe=False,
)
|
12,085 | 194dc522d18bcb47cef4b22b510bbf4798d841d8 | #!/Users/kyoungrok/Library/Enthought/Canopy_64bit/User/bin/python
from org.apache.pig.scripting import *
P1 = Pig.compile("""
previous_pagerank = load '$docs_in' as (page_id:int,
links:{link:(link_id:int)},
pagerank:float);
outbound_pagerank = foreach previous_pagerank generate
((float)pagerank / COUNT(links)) as pagerank,
flatten(links) as to_link;
cogrpd = cogroup outbound_pagerank by to_link,
previous_pagerank by page_id;
p_groupd = group previous_pagerank all;
N = foreach p_groupd generate COUNT(previous_pagerank) as count;
new_pagerank = foreach cogrpd generate group as page_id,
flatten(previous_pagerank.links) as links,
((float) (1 - $d)/N.count + $d * SUM(outbound_pagerank.pagerank)) as pagerank:float;
store new_pagerank into '$docs_out';
--explain -out explain/pagerank_$iteration.dot -dot new_pagerank;
""")
P2 = Pig.compile("""
pagerank_temp = load '$docs_in' as (page_id:int, links:{link:(link_id:int)}, pagerank:float);
pagerank = foreach pagerank_temp generate page_id, pagerank;
sorted = order pagerank by pagerank desc;
top = limit sorted 300;
id_title_temp = load 'output/id_title' as (page_id:int, title:chararray);
id_title = distinct id_title_temp;
top_title_temp = join top by page_id, id_title by page_id using 'replicated';
top_title = foreach top_title_temp generate id_title::title as title, top::pagerank as pagerank;
store top_title into '$docs_out';
--explain -out explain/result.dot -dot top_title;
""")
# Calculate PageRank
params = { 'd': '0.85', 'docs_in': 'output/graph' }
K = 20
for i in range(K):
out = "output/pagerank_" + str(i + 1)
params["docs_out"] = out
params["iteration"] = str(i + 1)
# Pig.fs("rmr " + out)
bound = P1.bind(params)
stats = bound.runSingle()
if not stats.isSuccessful():
raise 'failed'
params["docs_in"] = out
# Sort and Save Result
params = {'docs_in': 'output/pagerank_' + str(K), 'docs_out': 'output/result'}
bound = P2.bind(params)
stats = bound.runSingle()
if not stats.isSuccessful():
raise 'failed'
|
12,086 | eb510686c4bf25b1b70629197cdb8e86fc73618f | # Copyright (c) 2013 phrack. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import pyaudio
import pyttsx
from threading import Thread
import wave
# This class hold shootoff functions that should be exposed to training protocol
# plugins. Each instance of a plugin has its own instance of this class.
class ProtocolOperations():
def __init__(self, canvas, shootoff):
self._canvas = canvas
self._plugin_canvas_artifacts = []
self._shootoff = shootoff
self._feed_text = self._canvas.create_text(1, 1, anchor="nw", fill="white")
self._plugin_canvas_artifacts.append(self._feed_text)
self._tts_engine = pyttsx.init()
# slow down the wpm rate otherwise they speek to fast
self._tts_engine.setProperty("rate", 150)
self._tts_engine.startLoop(False)
def destroy(self):
if self._tts_engine._inLoop:
self._tts_engine.endLoop()
self.clear_canvas()
def clear_shots(self):
self._shootoff.clear_shots()
# Use text-to-speech to say message outloud
def say(self, message):
# if we don't do this on another thread we have to wait until
# the message has finished being communicated to do anything
# (i.e. shootoff freezes)
self._say_thread = Thread(target=self._say, args=(message,),
name="say_thread")
self._say_thread.start()
def _say(self, *args):
self._tts_engine.say(args[0])
self._tts_engine.iterate()
# Show message as text on the top left corner of the webcam feed. The
# new message will over-write whatever was shown before
def show_text_on_feed(self, message):
self._canvas.itemconfig(self._feed_text, text=message)
# Remove anything added by the plugin from the canvas
def clear_canvas(self):
for artifact in self._plugin_canvas_artifacts:
self._canvas.delete(artifact)
# Play the sound in sound_file
def play_sound(self, sound_file):
# if we don't do this on a nother thread we have to wait until
# the message has finished being communicated to do anything
# (i.e. shootoff freezes)
self._play_sound_thread = Thread(target=self._play_sound,
args=(sound_file,), name="play_sound_thread")
self._play_sound_thread.start()
def _play_sound(self, *args):
chunk = 1024
# initialize the sound file and stream
f = wave.open(args[0],"rb")
p = pyaudio.PyAudio()
stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
# play the sound file
data = f.readframes(chunk)
while data != '':
stream.write(data)
data = f.readframes(chunk)
# clean up
stream.stop_stream()
stream.close()
p.terminate()
|
12,087 | 3e3790b3db3733c7f5849e8bcf882718a153dbdd | __author__ = 'benjamin'
import numpy as np
from dc2D import dual_contour
from dcSample import sample_data, sphere_f, doubletorus_f, torus_f
from dcPlotting import plot_edges, plot_vertices, plot_non_manifold_vertices, plot_surface
from qtPlotting import plot_qt
from dcManifolds import detectManifolds2d
from quadtree import Quadtree
from Vertex import Vertex2
from Edge import Edge2
def transform_into_object_sets(dc_verts, dc_edges):
vertex_set = set()
edge_set = set()
for v_key, v_value in dc_verts.items():
new_vertex = Vertex2(v_value[0],v_value[1])
vertex_set.add(new_vertex)
dc_verts[v_key] = new_vertex
for e in dc_edges:
v1 = dc_verts[e[0]]
v2 = dc_verts[e[1]]
new_edge = Edge2(v1,v2)
edge_set.add(new_edge)
return vertex_set, edge_set
print "creating sample data..."
dimensions = {'xmin': 0.0, 'xmax': 8.0, 'ymin': 0.0, 'ymax': 8.0}
resolution = 1.0/32.0
data = sample_data(doubletorus_f, resolution, dimensions)
print "done"
print "dual contouring..."
[dc_verts, dc_edges] = dual_contour(data, resolution, dimensions)
print "done."
#non_manifold_verts = detectManifolds2d(dc_edges)
print "transforming into objects..."
vertex_set, edge_set = transform_into_object_sets(dc_verts, dc_edges)
print "done."
print "building quadtree..."
qt = Quadtree(8.0, np.array([0,0]))
qt.add_dataset(vertex_set)
print "quadtree of depth "+str(qt.get_depth())+" constructed."
print "done."
print "plotting..."
import matplotlib.pyplot as plt
#fig = plt.figure()
#plot_qt(qt)
#plot_vertices(vertex_set)
#plot_edges(edge_set)
#plot_non_manifold_vertices(dc_verts, non_manifold_verts)
plot_qt(qt, 'b--')
plot_vertices(vertex_set,'b.')
#plot_surface(vertex_set,'b.')
qt.do_coarsening(3)
print vertex_set.__len__()
vertex_set = qt.get_dataset()
print vertex_set.__len__()
plot_qt(qt, 'r')
#plot_vertices(vertex_set,'ro')
plot_surface(vertex_set,'ro')
#plot_edges(edge_set)
plt.xlim(xmin = 0, xmax = 8)
plt.ylim(ymin = 0, ymax = 8)
plt.axis('equal')
plt.show()
|
12,088 | 23a96489658248cbcffcc9a87701fae84f36f646 | import MakeInclude as mk
mk.MakeInclude()
|
12,089 | 29b181fc33e3d9896a894af514cb49f5e39c05e3 | from django.db import models
from django.contrib.auth.models import BaseUserManager
from rest_framework_simplejwt.tokens import RefreshToken
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self, username, email, telefono, programa):
if username is None:
raise TypeError('El usuario no se ha ingresado')
if email is None:
raise TypeError('El correo no se ha ingresado')
user = self.model(username=username, email=self.normalize_email(email),programa=programa, telefono=telefono)
user.save()
return user
class InfoUser(models.Model):
username = models.CharField(max_length=255, unique=True, db_index=True)
email = models.EmailField(max_length=255, unique=True, db_index=True)
telefono = models.IntegerField()
programa = models.CharField(max_length=255, unique=True, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
def __str__(self):
return self.email
def tokens(self):
jwt_token = RefreshToken.for_user(self)
return {
'refresh': str(jwt_token),
'access': str(jwt_token.access_token),
}
|
12,090 | 2b2ae91c9e92adf33d4c1b69cb90c3a643af975a | from django.db import models
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from wagtail.admin.edit_handlers import FieldPanel, PageChooserPanel
from wagtail.core.fields import RichTextField
from wagtail.core.models import Page
from common.models import SFIPage
from common.utils import paginate
DEFAULT_PAGINATION = 20 # the number should be even (two column view).
class PostIndex(SFIPage):
subpage_types = ['Post']
POSTS_PER_PAGE = DEFAULT_PAGINATION
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
context['posts'] = paginate(
Post.objects.live().public().descendant_of(self).order_by('-date'),
request, PostIndex.POSTS_PER_PAGE)
return context
class Meta:
verbose_name = _('post index')
verbose_name_plural = _('post indexes')
class Post(SFIPage):
content = RichTextField(verbose_name=_('content'))
date = models.DateTimeField(default=timezone.now, verbose_name=_('post date'))
redirect_to = models.ForeignKey(
"wagtailcore.Page",
null=True,
blank=True,
related_name="+",
on_delete=models.PROTECT,
verbose_name=_('redirect to page'),
help_text=_(
'Redirect to a specified page instead of showing content. '
'You should still add content or description to show in previews.')
)
content_panels = SFIPage.content_panels + [
FieldPanel('date'),
FieldPanel('content'),
]
settings_panels = SFIPage.settings_panels + [
PageChooserPanel('redirect_to'),
]
parent_page_types = ['PostIndex']
subpage_types = []
def serve(self, request, *args, **kwargs):
if self.redirect_to:
return redirect(self.redirect_to.url)
return super().serve(request, *args, **kwargs)
class Meta:
verbose_name = _('post')
verbose_name_plural = _('posts')
|
12,091 | 74939c5a6385990dbfb108df5b6157f6196b5406 | ###############################################################################
# For copyright and license notices, see __manifest__.py file in root directory
###############################################################################
from odoo import fields
from odoo.tests.common import TransactionCase
class TestContractCumulativeDiscount(TransactionCase):
def setUp(self):
super().setUp()
self.contract_template = self.env['contract.template'].create({
'name': 'Contract template',
'contract_type': 'sale',
})
self.product = self.env['product.product'].create({
'type': 'service',
'company_id': False,
'name': 'Service product',
'standard_price': 10,
'list_price': 100,
'is_contract': True,
'contract_template_id': self.contract_template.id,
})
self.partner = self.env['res.partner'].create({
'name': 'Partner test',
})
def test_contract_line_without_discount(self):
contract = self.env['contract.contract'].create({
'name': 'Test contract',
'partner_id': self.partner.id,
})
line_obj = self.env['contract.line']
line = line_obj.new({
'name': 'Test contract',
'contract_id': contract.id,
'product_id': self.product.id,
'multiple_discount': False,
'discount_name': False,
'recurring_next_date': fields.Date.today(),
'quantity': 1,
})
line._onchange_product_id()
line.price_unit = 100
line.multiple_discount = False
line = line_obj.create(line_obj._convert_to_write(line._cache))
self.assertEquals(line.price_subtotal, 100)
contract.recurring_create_invoice()
inv_line = self.env['account.invoice.line'].search([
('contract_line_id', 'in', line.ids),
])
self.assertEquals(len(inv_line), 1)
self.assertEquals(inv_line.multiple_discount, line.multiple_discount)
self.assertEquals(inv_line.discount_name, line.discount_name)
self.assertEquals(inv_line.price_subtotal, 100)
def test_contract_line_discount(self):
contract = self.env['contract.contract'].create({
'name': 'Test contract',
'partner_id': self.partner.id,
})
line_obj = self.env['contract.line']
line = line_obj.new({
'name': 'Test contract',
'contract_id': contract.id,
'product_id': self.product.id,
'multiple_discount': '10+10',
'discount_name': 'Two discount',
'recurring_next_date': fields.Date.today(),
'quantity': 1,
})
line._onchange_product_id()
line.price_unit = 100
line = line_obj.create(line_obj._convert_to_write(line._cache))
self.assertEquals(line.price_subtotal, 81.)
contract.recurring_create_invoice()
inv_line = self.env['account.invoice.line'].search([
('contract_line_id', 'in', line.ids),
])
self.assertEquals(len(inv_line), 1)
self.assertEquals(inv_line.multiple_discount, line.multiple_discount)
self.assertEquals(inv_line.discount_name, line.discount_name)
self.assertEquals(inv_line.price_subtotal, 81.)
def test_contract_line_discount_from_sale(self):
sale = self.env['sale.order'].create({
'partner_id': self.partner.id,
'order_line': [
(0, 0, {
'product_id': self.product.id,
'multiple_discount': '10+10',
'discount_name': 'Two discount',
'price_unit': 100,
'product_uom_qty': 1}),
]
})
sale.order_line[0].onchange_multiple_discount()
self.assertEquals(sale.order_line[0].price_subtotal, 81.)
sale.action_confirm()
contract = sale.order_line.mapped('contract_id')
self.assertEquals(len(contract), 1)
line = contract.mapped('contract_line_ids')
self.assertEquals(len(line), 1)
self.assertEquals('10+10', sale.order_line[0].multiple_discount)
self.assertEquals(
line.multiple_discount, sale.order_line[0].multiple_discount)
self.assertEquals(
line.discount_name, sale.order_line[0].discount_name)
def test_contract_line_onchange(self):
contract = self.env['contract.contract'].create({
'name': 'Test contract',
'partner_id': self.partner.id,
})
line_obj = self.env['contract.line']
line = line_obj.new({
'name': 'Test contract',
'contract_id': contract.id,
'product_id': self.product.id,
'multiple_discount': '10+10',
'discount_name': 'Two discount',
'recurring_next_date': fields.Date.today(),
'price_unit': 100,
'quantity': 1,
})
self.assertEquals(line.price_subtotal, 81.)
line.multiple_discount = False
self.assertEquals(line.price_subtotal, 100.)
line.multiple_discount = '10+10'
self.assertEquals(line.price_subtotal, 81.)
|
12,092 | d3697e3976236b6fda796fb3bd8b175574c60b9d | # -*- coding: cp1254 -*-
print "6/2"
print "selam"
print """bu üç tırnaklı satırların ilki
bu da ikincisi"""
|
12,093 | 723f0f97ef930ecb04d1f88dafcb45603e5e9675 | import warnings
import os
import numpy as np
from itertools import zip_longest
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from itertools import chain
from models.config import Config
from models.video import Video
from models.preprocessor import Preprocessor
warnings.filterwarnings('ignore')
class Visualisation:
"""
Visualisation class to be able to use the matplotlib.animation.FuncAnimation with global variables and keep it
reasonably structured.
"""
def __init__(self):
self.connections = [
(1, 2), (1, 5), (2, 3), (3, 4), (5, 6), (6, 7), (1, 8),
(8, 9), (9, 10), (1, 11), (11, 12), (12, 13), (1, 0),
(0, 14), (14, 16), (0, 15), (15, 17), (2, 16), (5, 17)
]
# Preset figure and axis
self.fig, self.ax = plt.subplots(figsize=(14, 10))
# Instantiate points, lines and annotation for the plot
self.points, = self.ax.plot([], [], 'o')
self.lines = [self.ax.plot([], [])[0] for i in range(len(self.connections))]
self.annotation = self.ax.annotate('', xy=(0.02, 0.95), xycoords='axes fraction',
bbox=dict(facecolor='red', alpha=0.5), fontsize=12)
def process_data(self, clip_name) -> Preprocessor:
"""
Process data and return preprocessor instance
:param clip_name: Clip name with at the end .mp4 which will be fetched from the video_data folder
:return preprocessor: Instance of the Preprocessor class from which attributes can be directly retrieved
"""
config: Config = Config.get_config()
folder_name = config.video_data
video_data_file = ''.join(clip_name.split('.')[:-1]) + '.json'
video = Video.from_json(os.path.join(folder_name, video_data_file))
# Convert to usable data type period_running_person division, alle fragment soorten
preprocessor = Preprocessor(video)
return preprocessor
def get_plottables(self, period_person_division, running_person_identifiers, running_fragments, turning_fragments):
"""
Function to construct all plottable files. In principle to be used for visualisation.
:param period_person_division: Dictionary within dictionary of the coordinates for each person for each frame
:param running_person_identifiers: Set of integers, indicating the running people
:param running_fragments: List of tuples with a start and end frame for each running fragment
:param turning_fragments: List of tuples with a start and end frame for each turning fragment
:return:
"""
period_running_person_division = {period: {person: coords for person,
coords in period_dictionary.items() if
person in running_person_identifiers}
for period, period_dictionary in period_person_division.items()}
running_plottables = {
period: {person: coords for person, coords in period_dictionary.items() if person in running_person_identifiers}
for period, period_dictionary in period_person_division.items() if
any(lower <= period <= upper for (lower, upper) in running_fragments)}
turning_plottables = {
period: {person: coords for person, coords in period_dictionary.items() if person in running_person_identifiers}
for period, period_dictionary in period_person_division.items() if
any(lower <= period <= upper for (lower, upper) in turning_fragments)}
period_running_person_division = dict(filter(lambda x: x[1] != {}, period_running_person_division.items()))
running_plottables = dict(filter(lambda x: x[1] != {}, running_plottables.items()))
turning_plottables = dict(filter(lambda x: x[1] != {}, turning_plottables.items()))
return period_running_person_division, running_plottables, turning_plottables
def func_init(self):
"""
Initialize function which will be called to create the base frame upon which the animation takes place.
This is used for blitting to create smoother animations
:return: Tuple with all plottable objects
"""
self.points.set_data([], [])
for line in self.lines:
line.set_data([],[])
self.annotation.set_text('')
return tuple(self.lines) + (self.points, self.annotation)
def set_axis_limits(self, plottables, image_h, image_w, zoom, pad):
if zoom:
y_coords = np.array([coords[~(coords == 0).any(axis=1)][:, 1]
for period_dictionary in plottables.values() for coords in period_dictionary.values()])
y_coords = np.array(list(chain.from_iterable(y_coords))) + image_h
cy = np.mean(y_coords) # y center
stdy = np.std(y_coords) # y standard deviation
self.ydiff = stdy * pad * 2 # total range of y
self.ax.set_ylim(cy - stdy * pad, cy + stdy * pad) # set y-limits by padding around the average center of y
# self.ax.set_xticks([])
# self.ax.set_yticks([])
else:
self.ax.set_ylim([0, image_h])
self.ax.set_xlim([0, image_w])
def plot_person(self, frame, plottables, image_h, image_w, zoom=True, pad=3):
"""
Function that is used by matplotlib.animation.FuncAnimation to iteratively plot given a frame
:param frame: Frame to plot
:param frame_to_index: Dictionary with frame as key and index through enumeration as value
:param plottables: Dictionary within dictionary of the coordinates for a person for each frame
:param image_h: Video height
:param image_w: Video width
:param zoom: Boolean indicating whether or not the animation is zoomed in
:param pad: Float/integer indicating what the padded region around the animated person should be
:return: Tuple with all plottable objects
"""
for person in plottables[frame].keys():
plot_coords = plottables[frame][person]
plot_coords[:, 1] = plot_coords[:, 1] + image_h
coord_dict = {key: value for key, value in dict(enumerate(plot_coords[:, :2])).items() if
not (value == 0).any()}
present_keypoints = set(coord_dict.keys())
present_connections = [connection for connection in self.connections if
len(present_keypoints & set(connection)) == 2]
plot_lines = [np.transpose([coord_dict[a], coord_dict[b]]) for a, b in present_connections]
for coords, line in zip_longest(plot_lines, self.lines):
if isinstance(coords, np.ndarray):
line.set_data(coords[0],coords[1])
else:
line.set_data([],[])
plot_coords = plot_coords[~(plot_coords == 0).any(axis=1)]
self.points.set_data(plot_coords[:, 0], plot_coords[:, 1])
self.annotation.set_text('Frame: {}'.format(frame))
self.ax.set_xlabel('X coordinate')
self.ax.set_ylabel('Y coordinate')
if zoom:
aspect = image_w / image_h
xlow, xhigh = plot_coords[:, 0].min(), plot_coords[:, 0].max() # get x higher and lower limit
xdiff = xhigh - xlow # calculate the total range of x
xpad = ((self.ydiff * aspect) - xdiff) / 2 # calculate how much the xlimits should be padded on either side to set aspect ratio correctly
self.ax.set_xlim(xlow - xpad, xhigh + xpad) # set new limits
break
return tuple(self.lines) + (self.points, self.annotation)
def run_animation(self, clip_name, fragment, zoom=True, pad=3, interval=100):
"""
Complete pipeline to process and plot data.
:param clip_name: Clip name with at the end .mp4 which will be fetched from the video_data folder
:param fragment: String indicating what part should be visualised (run, turn or all)
:return: None
"""
preprocessor = self.process_data(clip_name)
period_person_division = preprocessor.period_person_division
running_person_identifiers = preprocessor.get_running_person_identifiers()
running_fragments = preprocessor.get_running_fragments()
turning_fragments = preprocessor.get_turning_fragments()
period_running_person_division, running_plottables, turning_plottables = self.get_plottables(period_person_division, running_person_identifiers, running_fragments, turning_fragments)
if fragment == 'run':
plottables = running_plottables
elif fragment == 'turn':
plottables = turning_plottables
else:
plottables = period_running_person_division
self.set_axis_limits(plottables, preprocessor.height, preprocessor.width, zoom=zoom, pad=pad)
animate = animation.FuncAnimation(fig=self.fig, func=self.plot_person, frames=plottables.keys(), fargs=(plottables,
preprocessor.height, preprocessor.width, zoom, pad), interval=interval, init_func=self.func_init, blit=False, repeat=False)
plt.show()
if __name__ == '__main__':
visualisation = Visualisation()
visualisation.run_animation(clip_name='jeroenkrol_28121995_184_80_16500.mp4', fragment='run', zoom=True, pad=3, interval=100)
|
12,094 | b011dfd708d7c57f1476edeeb4088617fbab0ef2 | import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch
class PositionEncoding(nn.Module):
def __init__(self, word_dim, max_seq_len, padding_idx=0, freeze=True):
super(PositionEncoding, self).__init__()
n_position = max_seq_len + 1
self.max_seq_len = max_seq_len
self.freeze = freeze
# self.position_enc = nn.Embedding.from_pretrained(
# self.get_sinusoid_encoding_table(
# n_position, word_dim, padding_idx=padding_idx
# ),
# freeze=freeze,
# )
self.position_enc = nn.Embedding(n_position, word_dim, padding_idx=padding_idx)
nn.init.normal_(self.position_enc.weight)
def forward(self, pos_idxs):
return self.position_enc(pos_idxs)
@staticmethod
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
""" Sinusoid position encoding table """
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array(
[get_posi_angle_vec(pos_i) for pos_i in range(n_position)]
)
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.0
return torch.FloatTensor(sinusoid_table)
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill_((1 - mask).unsqueeze(1), -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, attention_drop=0.1, residual_drop=0.1, partition=False):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.d_model = d_model
self.partition = partition
if not partition:
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.xavier_normal_(self.w_qs.weight)
nn.init.xavier_normal_(self.w_ks.weight)
nn.init.xavier_normal_(self.w_vs.weight)
else:
self.w_qs_c = nn.Linear(d_model//2, n_head * d_k //2, bias=False)
self.w_ks_c = nn.Linear(d_model//2, n_head * d_k //2, bias=False)
self.w_vs_c = nn.Linear(d_model//2, n_head * d_v //2, bias=False)
self.w_qs_p = nn.Linear(d_model//2, n_head * d_k //2, bias=False)
self.w_ks_p = nn.Linear(d_model//2, n_head * d_k //2, bias=False)
self.w_vs_p = nn.Linear(d_model//2, n_head * d_v //2, bias=False)
nn.init.xavier_normal_(self.w_qs_c.weight)
nn.init.xavier_normal_(self.w_ks_c.weight)
nn.init.xavier_normal_(self.w_vs_c.weight)
nn.init.xavier_normal_(self.w_qs_p.weight)
nn.init.xavier_normal_(self.w_ks_p.weight)
nn.init.xavier_normal_(self.w_vs_p.weight)
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5), attn_dropout=attention_drop)
self.layer_norm = nn.LayerNorm(d_model)
if not partition:
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
else:
self.fc1 = nn.Linear(n_head * d_v // 2, d_model // 2, bias=False)
self.fc2 = nn.Linear(n_head * d_v // 2, d_model // 2, bias=False)
self.dropout = nn.Dropout(residual_drop)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
if not self.partition:
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
else:
q_c, q_p = q.chunk(2, dim=-1)
k_c, k_p = k.chunk(2, dim=-1)
v_c, v_p = v.chunk(2, dim=-1)
q_c, q_p = self.w_qs_c(q_c).view(sz_b, len_q, n_head, d_k//2), self.w_qs_p(q_p).view(sz_b, len_q, n_head, d_k//2)
k_c, k_p = self.w_ks_c(k_c).view(sz_b, len_k, n_head, d_k//2), self.w_ks_p(k_p).view(sz_b, len_k, n_head, d_k//2)
v_c, v_p = self.w_vs_c(v_c).view(sz_b, len_v, n_head, d_v//2), self.w_vs_p(v_p).view(sz_b, len_v, n_head, d_v//2)
q = torch.cat((q_c, q_p), dim=-1)
k = torch.cat((k_c, k_p), dim=-1)
v = torch.cat((v_c, v_p), dim=-1)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
if not self.partition:
output = (
output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
else:
output_c, output_p = output.chunk(2, dim=-1)
output_c = output_c.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output_p = output_p.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output_c, output_p = self.dropout(self.fc1(output_c)), self.dropout(self.fc2(output_p))
output = torch.cat((output_c, output_p), dim=-1)
output = self.layer_norm(output + residual)
return output, attn
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_in, d_hid, relu_drop=0.1, residual_drop=0.1, partition=False):
super(PositionwiseFeedForward, self).__init__()
self.partition = partition
if not partition:
self.w_1 = nn.Linear(d_in, d_hid) # position-wise
self.w_2 = nn.Linear(d_hid, d_in) # position-wise
else:
self.w_1c = nn.Linear(d_in//2, d_hid//2) # position-wise
self.w_2c = nn.Linear(d_hid//2, d_in//2) # position-wise
self.w_1p = nn.Linear(d_in//2, d_hid//2) # position-wise
self.w_2p = nn.Linear(d_hid//2, d_in//2) # position-wise
self.layer_norm = nn.LayerNorm(d_in)
self.relu_dropout = nn.Dropout(relu_drop)
self.residual_dropout = nn.Dropout(residual_drop)
def forward(self, x):
residual = x
if not self.partition:
output = self.relu_dropout(F.relu(self.w_1(x)))
output = self.residual_dropout(self.w_2(output))
else:
x_c, x_p = x.chunk(2, dim=-1)
output_c = self.relu_dropout(F.relu(self.w_1c(x_c)))
output_c = self.residual_dropout(self.w_2c(output_c))
output_p = self.relu_dropout(F.relu(self.w_1p(x_p)))
output_p = self.residual_dropout(self.w_2p(output_p))
output = torch.cat((output_c, output_p), dim=-1)
output = self.layer_norm(output + residual)
return output
class EncoderLayer(nn.Module):
def __init__(self, d_model, d_inner, n_head, d_k, d_v, relu_drop=0.1, attention_drop=0.1, residual_drop=0.1, partition=False):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, attention_drop=attention_drop, residual_drop=residual_drop, partition=partition)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, relu_drop=relu_drop, residual_drop=residual_drop, partition=partition)
def forward(self, enc_input, mask):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=mask
)
pad_mask = (1 - mask).unsqueeze(-1)
enc_output.masked_fill_(pad_mask, 0)
enc_output = self.pos_ffn(enc_output)
enc_output.masked_fill_(pad_mask, 0)
return enc_output, enc_slf_attn
|
12,095 | 834bc7fc97211ba99b8b3e8b7b4b59da3edceccd | # Leo colorizer control file for erlang mode.
# This file is in the public domain.
# Properties for erlang mode.
properties = {
"lineComment": "%",
}
# Attributes dict for erlang_main ruleset.
erlang_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for erlang mode.
attributesDictDict = {
"erlang_main": erlang_main_attributes_dict,
}
# Keywords dict for erlang_main ruleset.
erlang_main_keywords_dict = {
"-behaviour": "keyword3",
"-compile": "keyword3",
"-define": "keyword3",
"-else": "keyword3",
"-endif": "keyword3",
"-export": "keyword3",
"-file": "keyword3",
"-ifdef": "keyword3",
"-ifndef": "keyword3",
"-import": "keyword3",
"-include": "keyword3",
"-include_lib": "keyword3",
"-module": "keyword3",
"-record": "keyword3",
"-undef": "keyword3",
"abs": "keyword2",
"acos": "keyword2",
"after": "keyword1",
"alive": "keyword2",
"apply": "keyword2",
"asin": "keyword2",
"atan": "keyword2",
"atan2": "keyword2",
"atom": "keyword2",
"atom_to_list": "keyword2",
"begin": "keyword1",
"binary": "keyword2",
"binary_to_list": "keyword2",
"binary_to_term": "keyword2",
"case": "keyword1",
"catch": "keyword1",
"check_process_code": "keyword2",
"concat_binary": "keyword2",
"cond": "keyword1",
"constant": "keyword2",
"cos": "keyword2",
"cosh": "keyword2",
"date": "keyword2",
"delete_module": "keyword2",
"disconnect_node": "keyword2",
"element": "keyword2",
"end": "keyword1",
"erase": "keyword2",
"exit": "keyword2",
"exp": "keyword2",
"float": "keyword2",
"float_to_list": "keyword2",
"fun": "keyword1",
"function": "keyword2",
"get": "keyword2",
"get_cookie": "keyword2",
"get_keys": "keyword2",
"group_leader": "keyword2",
"halt": "keyword2",
"hash": "keyword2",
"hd": "keyword2",
"if": "keyword1",
"integer": "keyword2",
"integer_to_list": "keyword2",
"is_alive": "keyword2",
"length": "keyword2",
"let": "keyword1",
"link": "keyword2",
"list": "keyword2",
"list_to_atom": "keyword2",
"list_to_binary": "keyword2",
"list_to_float": "keyword2",
"list_to_integer": "keyword2",
"list_to_pid": "keyword2",
"list_to_tuple": "keyword2",
"load_module": "keyword2",
"log": "keyword2",
"log10": "keyword2",
"make_ref": "keyword2",
"math": "keyword2",
"module_loaded": "keyword2",
"monitor_node": "keyword2",
"node": "keyword2",
"nodes": "keyword2",
"now": "keyword2",
"number": "keyword2",
"of": "keyword1",
"open_port": "keyword2",
"pi": "keyword2",
"pid": "keyword2",
"pid_to_list": "keyword2",
"port_close": "keyword2",
"port_info": "keyword2",
"ports": "keyword2",
"pow": "keyword2",
"power": "keyword2",
"preloaded": "keyword2",
"process": "keyword2",
"process_flag": "keyword2",
"process_info": "keyword2",
"processes": "keyword2",
"purge_module": "keyword2",
"put": "keyword2",
"query": "keyword1",
"receive": "keyword1",
"record": "keyword2",
"reference": "keyword2",
"register": "keyword2",
"registered": "keyword2",
"round": "keyword2",
"self": "keyword2",
"set_cookie": "keyword2",
"set_node": "keyword2",
"setelement": "keyword2",
"sin": "keyword2",
"sinh": "keyword2",
"size": "keyword2",
"spawn": "keyword2",
"spawn_link": "keyword2",
"split_binary": "keyword2",
"sqrt": "keyword2",
"statistics": "keyword2",
"tan": "keyword2",
"tanh": "keyword2",
"term_to_binary": "keyword2",
"throw": "keyword2",
"time": "keyword2",
"tl": "keyword2",
"trunc": "keyword2",
"tuple_to_list": "keyword2",
"unlink": "keyword2",
"unregister": "keyword2",
"when": "keyword1",
"whereis": "keyword2",
}
# Dictionary of keywords dictionaries for erlang mode.
keywordsDictDict = {
"erlang_main": erlang_main_keywords_dict,
}
# Rules for erlang_main ruleset.
def erlang_rule0(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="%")
def erlang_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"")
def erlang_rule2(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
no_line_break=True)
def erlang_rule3(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="function", pattern="(",
exclude_match=True)
def erlang_rule4(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="literal2", pattern=":",
exclude_match=True)
def erlang_rule5(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal3", regexp="\\$.\\w*")
def erlang_rule6(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="literal3", seq="badarg")
def erlang_rule7(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="literal3", seq="nocookie")
def erlang_rule8(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="literal3", seq="false")
def erlang_rule9(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="literal3", seq="true")
def erlang_rule10(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="->")
def erlang_rule11(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="<-")
def erlang_rule12(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=".")
def erlang_rule13(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=";")
def erlang_rule14(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="=")
def erlang_rule15(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="/")
def erlang_rule16(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="|")
def erlang_rule17(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="#")
def erlang_rule18(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="+")
def erlang_rule19(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="*")
def erlang_rule20(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=":")
def erlang_rule21(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="{")
def erlang_rule22(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="}")
def erlang_rule23(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="[")
def erlang_rule24(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="]")
def erlang_rule25(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=",")
def erlang_rule26(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="?")
def erlang_rule27(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="!")
def erlang_rule28(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\bdiv\\b")
def erlang_rule29(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\brem\\b")
def erlang_rule30(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\bor\\b")
def erlang_rule31(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\bxor\\b")
def erlang_rule32(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\bbor\\b")
def erlang_rule33(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\bbxor\\b")
def erlang_rule34(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\bbsl\\b")
def erlang_rule35(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\bbsr\\b")
def erlang_rule36(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\band\\b")
def erlang_rule37(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\bband\\b")
def erlang_rule38(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\bnot\\b")
def erlang_rule39(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="operator", regexp="\\bbnot\\b")
def erlang_rule40(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for erlang_main ruleset.
rulesDict1 = {
"!": [erlang_rule27,],
"\"": [erlang_rule1,],
"#": [erlang_rule17,],
"$": [erlang_rule5,],
"%": [erlang_rule0,],
"'": [erlang_rule2,],
"(": [erlang_rule3,],
"*": [erlang_rule19,],
"+": [erlang_rule18,],
",": [erlang_rule25,],
"-": [erlang_rule10, erlang_rule40,],
".": [erlang_rule12,],
"/": [erlang_rule15,],
"0": [erlang_rule40,],
"1": [erlang_rule40,],
"2": [erlang_rule40,],
"3": [erlang_rule40,],
"4": [erlang_rule40,],
"5": [erlang_rule40,],
"6": [erlang_rule40,],
"7": [erlang_rule40,],
"8": [erlang_rule40,],
"9": [erlang_rule40,],
":": [erlang_rule4, erlang_rule20,],
";": [erlang_rule13,],
"<": [erlang_rule11,],
"=": [erlang_rule14,],
"?": [erlang_rule26,],
"@": [erlang_rule40,],
"A": [erlang_rule40,],
"B": [erlang_rule40,],
"C": [erlang_rule40,],
"D": [erlang_rule40,],
"E": [erlang_rule40,],
"F": [erlang_rule40,],
"G": [erlang_rule40,],
"H": [erlang_rule40,],
"I": [erlang_rule40,],
"J": [erlang_rule40,],
"K": [erlang_rule40,],
"L": [erlang_rule40,],
"M": [erlang_rule40,],
"N": [erlang_rule40,],
"O": [erlang_rule40,],
"P": [erlang_rule40,],
"Q": [erlang_rule40,],
"R": [erlang_rule40,],
"S": [erlang_rule40,],
"T": [erlang_rule40,],
"U": [erlang_rule40,],
"V": [erlang_rule40,],
"W": [erlang_rule40,],
"X": [erlang_rule40,],
"Y": [erlang_rule40,],
"Z": [erlang_rule40,],
"[": [erlang_rule23,],
"]": [erlang_rule24,],
"_": [erlang_rule40,],
"a": [erlang_rule36, erlang_rule40,],
"b": [erlang_rule6, erlang_rule32, erlang_rule33, erlang_rule34, erlang_rule35, erlang_rule37, erlang_rule39, erlang_rule40,],
"c": [erlang_rule40,],
"d": [erlang_rule28, erlang_rule40,],
"e": [erlang_rule40,],
"f": [erlang_rule8, erlang_rule40,],
"g": [erlang_rule40,],
"h": [erlang_rule40,],
"i": [erlang_rule40,],
"j": [erlang_rule40,],
"k": [erlang_rule40,],
"l": [erlang_rule40,],
"m": [erlang_rule40,],
"n": [erlang_rule7, erlang_rule38, erlang_rule40,],
"o": [erlang_rule30, erlang_rule40,],
"p": [erlang_rule40,],
"q": [erlang_rule40,],
"r": [erlang_rule29, erlang_rule40,],
"s": [erlang_rule40,],
"t": [erlang_rule9, erlang_rule40,],
"u": [erlang_rule40,],
"v": [erlang_rule40,],
"w": [erlang_rule40,],
"x": [erlang_rule31, erlang_rule40,],
"y": [erlang_rule40,],
"z": [erlang_rule40,],
"{": [erlang_rule21,],
"|": [erlang_rule16,],
"}": [erlang_rule22,],
}
# x.rulesDictDict for erlang mode.
rulesDictDict = {
"erlang_main": rulesDict1,
}
# Import dict for erlang mode.
importDict = {}
|
12,096 | f7ce62b2b4546e3c4528ac6bba54dc6500319699 | class Cita:
def __init__(self,id,paciente,fecha,hora,motivo,estado,usermedico,medico):
self.id=id
self.paciente=paciente
self.fecha=fecha
self.hora=hora
self.motivo=motivo
self.estado=estado
self.usermedico=usermedico
self.medico=medico
|
12,097 | 52ec9af768accf106a1bfabd2c0f81c744ae9f4f | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import svm
from stemming.porter2 import stem
import re
def readFromCSV(names):
return [np.array(pd.read_csv(name + ".csv", dtype='float64', header=None).as_matrix()) for name in names]
def plot(X, y):
marker = {0: 'o', 1: '^'}
color = {0: 'b', 1: 'r'}
for x, y in zip(X, y):
plt.scatter(x[0], x[1], marker=marker[y[0]], color = color[y[0]])
def plotGaussianBoundary(X, clf):
h = 0.02
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, cmap=plt.cm.Paired)
def predictionAccuracy(actualValues, predictions):
return (predictions.shape[0] - np.sum(abs((actualValues - predictions)))) / predictions.shape[0]
def bestCoefficients(X, y, Xval, yval):
accuracy = 0
proposedValues = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
for sigmaTest in proposedValues:
for CTest in proposedValues:
sigmaToGamma = lambda x: 1 / (x ** 2 * 2)
clf = svm.SVC(kernel='rbf', gamma=sigmaToGamma(sigmaTest), C=CTest)
clf.fit(X, y.flatten())
predictions = clf.predict(Xval)
accuracyTMP = predictionAccuracy(yval.flatten(), predictions)
if(accuracyTMP > accuracy):
accuracy = accuracyTMP
sigma = sigmaTest
C = CTest
return C, sigma
def processEmail(data):
# some text formatting #
data = data.lower()
data = re.sub(r'<[^<>]+>', r' ', data)
data = re.sub(r'[0-9]+', 'number', data)
data = re.sub(r'(http|https)://[^\s]*', 'httpaddr', data)
data = re.sub(r'[^\s]+@[^\s]+', 'emailaddr', data)
data = re.sub(r'[$]+', 'dollar', data)
data = re.sub('[^a-zA-Z0-9]', ' ', data)
data = data.split()
data = [stem(word) for word in data]
return data
def main():
names = ["X1", "X2", "X3", "XspamTest", "XspamTrain", "y1", "y2", "y3", "yspamTest", "yspamTrain", "X3val", "y3val"]
X1, X2, X3, XspamTest, XspamTrain, y1, y2, y3, yspamTest, yspamTrain, X3val, y3val = readFromCSV(names)
sigmaToGamma = lambda x: 1/(x**2 * 2)
# playing with linear kernel #
clf = svm.SVC(C=1, kernel='linear')
clf.fit(X1, y1.flatten())
coef = clf.coef_[0]
a = -coef[0] / coef[1]
xx = np.linspace(0, 4)
yy = a * xx - (clf.intercept_[0]) / coef[1]
plt.plot(xx, yy, 'k-')
plot(X1, y1)
plt.show()
# playing with gaussian kernel #
clf = svm.SVC(kernel='rbf', gamma=sigmaToGamma(0.1), C=1)
clf.fit(X2, y2.flatten())
plotGaussianBoundary(X2, clf)
plot(X2, y2)
plt.show()
# more playing with gaussian kernel, testing different parameters #
C, sigma = bestCoefficients(X3, y3, X3val, y3val)
clf = svm.SVC(kernel='rbf', gamma=sigmaToGamma(sigma), C=C)
clf.fit(X3, y3.flatten())
plotGaussianBoundary(X3, clf)
plot(X3, y3)
plt.show()
# spam classification #
with open('emailSample1.txt', 'r') as myfile:
data = myfile.read().replace('\n', ' ')
with open('vocab.txt', 'r') as myfile:
vocab = myfile.read().replace('\n', ' ')
vocab = vocab.split()
vocab = {vocab[x+1] : (int(vocab[x]) - 1) for x in range(0, len(vocab),2)}
data = processEmail(data)
data = [vocab[word] for word in data if word in vocab]
vector = np.zeros((len(vocab), 1))
for word in data:
vector[word] = 1
C, sigma = bestCoefficients(XspamTrain, yspamTrain, XspamTest, yspamTest)
clf = svm.SVC(kernel='rbf', gamma=sigmaToGamma(sigma), C=C)
clf.fit(XspamTrain, yspamTrain.flatten())
trainAccuracy = predictionAccuracy(yspamTrain.flatten(), clf.predict(XspamTrain))
testAccuracy = predictionAccuracy(yspamTest.flatten(), clf.predict(XspamTest))
print(trainAccuracy, testAccuracy, C, sigma)
if __name__ == "__main__":
main() |
12,098 | 0c393693cc660134a1609e9e3882911c5e5292d6 | import cv2
import numpy as np
from PIL import Image
import torchvision.transforms as transforms
import os
import json
import torch
from tqdm import tqdm
# image transform
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
def makedir(root):
if not os.path.exists(root):
os.makedirs(root)
def write_json(data, path):
with open(path, 'w') as f:
json.dump(data, f)
def collate_fn(data, max_length):
batch_size = len(data)
_, image_c, image_h, image_w = data[0][0].shape
images = torch.zeros(batch_size, image_c, image_h, image_w).float()
# captions = torch.zeros(batch_size, text_embedding_size).float()
input_ids = torch.zeros(batch_size, max_length).long()
token_type_ids = torch.zeros(batch_size, max_length).long()
attention_masks = torch.zeros(batch_size, max_length).long()
labels = torch.zeros(batch_size).long()
for i in range(batch_size):
images[i] = data[i][0]
# captions[i] = torch.from_numpy(data[i][1])
input_ids[i] = torch.LongTensor(data[i][1])
token_type_ids[i] = torch.LongTensor(data[i][2])
attention_masks[i] = torch.LongTensor(data[i][3])
labels[i] = data[i][4]
return images, input_ids, token_type_ids, attention_masks, labels
def compute_topk(scoring_i2t, scoring_t2i, images_embeddings, text_embeddings, labels, k=[1, 10]):
images_embeddings_norm = images_embeddings/images_embeddings.norm(dim=2)[:, :, None]
text_embeddings_norm = text_embeddings/text_embeddings.norm(dim=1)[:, None]
batch_size = images_embeddings.shape[0]
i2t = []
t2i = []
for i in tqdm(range(batch_size)):
item_i2t = torch.matmul(images_embeddings[i, :, :]. unsqueeze(0), text_embeddings_norm.transpose(0, 1))
item_t2i = torch.matmul(images_embeddings_norm[i, :, :].unsqueeze(0), text_embeddings.transpose(0, 1))
item_i2t, item_t2i = item_i2t.transpose(1, 2), item_t2i.transpose(1, 2)
item_i2t = scoring_i2t(item_i2t).squeeze().unsqueeze(0)
item_t2i = scoring_t2i(item_t2i).squeeze(-1)
i2t.append(item_i2t)
t2i.append(item_t2i)
# i2t = torch.matmul(images_embeddings, text_embeddings_norm.transpose(0, 1))
# t2i = torch.matmul(images_embeddings_norm, text_embeddings.transpose(0, 1))
i2t = torch.cat(i2t, dim=0)
t2i = torch.cat(t2i, dim=0)
t2i = t2i.transpose(0, 1)
# i2t, t2i = i2t.transpose(1, 2), t2i.transpose(1, 2)
# i2t = scoring_i2t(i2t).squeeze()
# t2i = scoring_t2i(t2i).squeeze().transpose(0, 1)
result = []
result.extend(topk(i2t, labels, k=[1, 10]))
result.extend(topk(t2i, labels, k=[1, 10]))
return result
def topk(sim, labels, k=[1, 10]):
result = []
maxk = max(k)
size_total = len(labels)
_, pred_index = sim.topk(maxk, 0, True, True)
pred_labels = labels[pred_index]
correct = pred_labels.eq(labels.view(1,-1).expand_as(pred_labels))
print(labels)
for topk in k:
correct_k = torch.sum(correct[:topk], dim=0)
correct_k = torch.sum(correct_k > 0).float()
result.append(correct_k * 100 / size_total)
print(correct_k, size_total)
return result
|
12,099 | 91c989a6931568ea399dac663fea5e243cd129f1 | #!/usr/bin/python3.4
# -*-coding:Utf-8 -*
from lib.inherit_docstring import inherit_docstring
from lib.choices import choices
from lib.gauss_int import gauss_int
from random import randint
from src.meta.ABCInheritableDocstringsMeta import ABCInheritableDocstringsMeta
from mario.bridge.events.action_events import Jump, Left, Right, Down, Fire
from src.EvolutiveGenerator.GeneticElementFactory import GeneticElementFactory
from src.entities.ActionEventData import ActionEventData
class ActionEventDataFactory(GeneticElementFactory, metaclass=ABCInheritableDocstringsMeta):
"""ActionEventData factory"""
@property
@inherit_docstring
def genetic_element_class(self):
return ActionEventData
ACTION_CLASSES = (Jump, Left, Right, Down, Fire)
@classmethod
@inherit_docstring
def create(cls):
action_class = cls.createActionClass()
return ActionEventData(action_class, cls.createDuration(action_class))
@classmethod
@inherit_docstring
def mutate(cls, element):
if randint(0, 1):
element.action_class = cls.createActionClass()
else:
element.duration = cls.createDuration(element.action_class)
@classmethod
def hydrate(cls, data):
for action_class in cls.ACTION_CLASSES:
if action_class.__name__ == data['action_class']:
return ActionEventData(action_class, data['duration'])
return ValueError("Action class {} doesn't exist.".format(data['action_class']))
@classmethod
def createActionClass(cls):
return choices(cls.ACTION_CLASSES, weights=[35, 10, 35, 10, 10])[0]
@staticmethod
def createDuration(action_class):
if action_class == Jump:
return gauss_int(2, 38)
else:
return randint(0, 25)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.