text stringlengths 38 1.54M |
|---|
from binascii import hexlify, unhexlify
from io import BytesIO
from .. import hashes, compact, ec, bip32, script
from ..networks import NETWORKS
from .errors import DescriptorError
from .base import DescriptorBase
from .miniscript import Miniscript
from .arguments import Key
class Descriptor(DescriptorBase):
def __init__(self, miniscript=None, sh=False, wsh=True, key=None, wpkh=True, taproot=False):
# TODO: add support for taproot scripts
if key is None and miniscript is None:
raise DescriptorError("Provide either miniscript or a key")
if miniscript is not None:
# will raise if can't verify
miniscript.verify()
if miniscript.type != "B":
raise DescriptorError("Top level miniscript should be 'B'")
branches = [k.branches for k in miniscript.keys]
branch = None
for b in branches:
if b is not None:
if branch is None:
branch = b
else:
if len(branch) != len(b):
raise DescriptorError(
"All branches should have the same length"
)
self.sh = sh
self.wsh = wsh
self.key = key
self.miniscript = miniscript
self.wpkh = wpkh
self.taproot = taproot
# make sure all keys are either taproot or not
for k in self.keys:
k.taproot = taproot
@property
def script_len(self):
if self.taproot:
return 34 # OP_1 <32:xonly>
if self.miniscript:
return len(self.miniscript)
if self.wpkh:
return 22 # 00 <20:pkh>
return 25 # OP_DUP OP_HASH160 <20:pkh> OP_EQUALVERIFY OP_CHECKSIG
@property
def num_branches(self):
return max([k.num_branches for k in self.keys])
def branch(self, branch_index=None):
if self.miniscript:
return type(self)(
self.miniscript.branch(branch_index),
self.sh,
self.wsh,
None,
self.wpkh,
self.taproot,
)
else:
return type(self)(
None, self.sh, self.wsh, self.key.branch(branch_index), self.wpkh, self.taproot
)
@property
def is_wildcard(self):
return any([key.is_wildcard for key in self.keys])
@property
def is_wrapped(self):
return self.sh and self.is_segwit
@property
def is_legacy(self):
return not (self.is_segwit or self.is_taproot)
@property
def is_segwit(self):
# TODO: is taproot segwit?
return (self.wsh and self.miniscript) or (self.wpkh and self.key) or self.taproot
@property
def is_pkh(self):
return self.key is not None and not self.taproot
@property
def is_taproot(self):
return self.taproot
@property
def is_basic_multisig(self):
return self.miniscript and self.miniscript.NAME in ["multi", "sortedmulti"]
@property
def is_sorted(self):
return self.is_basic_multisig and self.miniscript.NAME == "sortedmulti"
def scriptpubkey_type(self):
if self.is_taproot:
return "p2tr"
if self.sh:
return "p2sh"
if self.is_pkh:
if self.is_legacy:
return "p2pkh"
if self.is_segwit:
return "p2wpkh"
else:
return "p2wsh"
@property
def brief_policy(self):
if self.key:
return "single key"
if self.is_basic_multisig:
return (
str(self.miniscript.args[0])
+ " of "
+ str(len(self.keys))
+ " multisig"
+ (" (sorted)" if self.is_sorted else "")
)
return "miniscript"
@property
def full_policy(self):
if self.key or self.is_basic_multisig:
return self.brief_policy
s = str(self.miniscript)
for i, k in enumerate(self.keys):
s = s.replace(str(k), chr(65 + i))
return s
def derive(self, idx, branch_index=None):
if self.miniscript:
return type(self)(
self.miniscript.derive(idx, branch_index),
self.sh,
self.wsh,
None,
self.wpkh,
self.taproot,
)
else:
return type(self)(
None, self.sh, self.wsh, self.key.derive(idx, branch_index), self.wpkh, self.taproot
)
def to_public(self):
if self.miniscript:
return type(self)(
self.miniscript.to_public(),
self.sh,
self.wsh,
None,
self.wpkh,
self.taproot,
)
else:
return type(self)(
None, self.sh, self.wsh, self.key.to_public(), self.wpkh, self.taproot
)
def owns(self, psbt_scope):
"""Checks if psbt input or output belongs to this descriptor"""
# we can't check if we don't know script_pubkey
if psbt_scope.script_pubkey is None:
return False
# quick check of script_pubkey type
if psbt_scope.script_pubkey.script_type() != self.scriptpubkey_type():
return False
for pub, der in psbt_scope.bip32_derivations.items():
# check of the fingerprints
for k in self.keys:
if not k.is_extended:
continue
res = k.check_derivation(der)
if res:
idx, branch_idx = res
sc = self.derive(idx, branch_index=branch_idx).script_pubkey()
# if derivation is found but scriptpubkey doesn't match - fail
return (sc == psbt_scope.script_pubkey)
for pub, (leafs, der) in psbt_scope.taproot_bip32_derivations.items():
# check of the fingerprints
for k in self.keys:
if not k.is_extended:
continue
res = k.check_derivation(der)
if res:
idx, branch_idx = res
sc = self.derive(idx, branch_index=branch_idx).script_pubkey()
# if derivation is found but scriptpubkey doesn't match - fail
return (sc == psbt_scope.script_pubkey)
return False
def check_derivation(self, derivation_path):
for k in self.keys:
# returns a tuple branch_idx, idx
der = k.check_derivation(derivation_path)
if der is not None:
return der
return None
def witness_script(self):
if self.wsh and self.miniscript is not None:
return script.Script(self.miniscript.compile())
def redeem_script(self):
if not self.sh:
return None
if self.miniscript:
if not self.wsh:
return script.Script(self.miniscript.compile())
else:
return script.p2wsh(script.Script(self.miniscript.compile()))
else:
return script.p2wpkh(self.key)
def script_pubkey(self):
# covers sh-wpkh, sh and sh-wsh
if self.taproot:
return script.p2tr(self.key)
if self.sh:
return script.p2sh(self.redeem_script())
if self.wsh:
return script.p2wsh(self.witness_script())
if self.miniscript:
return script.Script(self.miniscript.compile())
if self.wpkh:
return script.p2wpkh(self.key)
return script.p2pkh(self.key)
def address(self, network=NETWORKS["main"]):
return self.script_pubkey().address(network)
@property
def keys(self):
if self.key:
return [self.key]
return self.miniscript.keys
@classmethod
def from_string(cls, desc):
s = BytesIO(desc.encode())
res = cls.read_from(s)
left = s.read()
if len(left) > 0 and not left.startswith(b"#"):
raise DescriptorError("Unexpected characters after descriptor: %r" % left)
return res
@classmethod
def read_from(cls, s):
# starts with sh(wsh()), sh() or wsh()
start = s.read(7)
sh = False
wsh = False
wpkh = False
is_miniscript = True
taproot = False
if start.startswith(b"tr("):
taproot = True
is_miniscript = False
s.seek(-4, 1)
elif start.startswith(b"sh(wsh("):
sh = True
wsh = True
elif start.startswith(b"wsh("):
sh = False
wsh = True
s.seek(-3, 1)
elif start.startswith(b"sh(wpkh"):
is_miniscript = False
sh = True
wpkh = True
assert s.read(1) == b"("
elif start.startswith(b"wpkh("):
is_miniscript = False
wpkh = True
s.seek(-2, 1)
elif start.startswith(b"pkh("):
is_miniscript = False
s.seek(-3, 1)
elif start.startswith(b"sh("):
sh = True
wsh = False
s.seek(-4, 1)
else:
raise ValueError("Invalid descriptor")
if is_miniscript:
miniscript = Miniscript.read_from(s)
key = None
nbrackets = int(sh) + int(wsh)
else:
miniscript = None
key = Key.read_from(s, taproot=taproot)
nbrackets = 1 + int(sh)
end = s.read(nbrackets)
if end != b")" * nbrackets:
raise ValueError("Invalid descriptor")
return cls(miniscript, sh=sh, wsh=wsh, key=key, wpkh=wpkh, taproot=taproot)
def to_string(self):
if self.taproot:
return "tr(%s)" % self.key
if self.miniscript is not None:
res = str(self.miniscript)
if self.wsh:
res = "wsh(%s)" % res
else:
if self.wpkh:
res = "wpkh(%s)" % self.key
else:
res = "pkh(%s)" % self.key
if self.sh:
res = "sh(%s)" % res
return res
|
from django.db.models.functions import Coalesce, Lower
from products.models import Product
class Manager:
__query = None
__products = None
__products_output = []
def __call__(self, query):
self.__initialize(query)
self.__obtain_products()
self.__format_product_output()
return self.__products_output
def __initialize(self, query):
self.__query = query
def __obtain_products(self):
self.__products = Product.objects.order_by('rate').all().reverse()
def __format_product_output(self):
self.__products_output.clear()
for product in self.__products:
icon, image = self.__obtain_medias_url(product)
formatted_product = {
'id': product.id,
'title': product.title,
'url': product.url,
'summary': product.summary(),
'icon': icon,
'image': image,
'rate': product.rate,
'hunter': product.user.username,
'published': product.published_pretty(),
}
self.__products_output.append(formatted_product)
def __obtain_medias_url(self, product):
try:
icon = ''
if product.icon is not None:
icon = product.icon.url
image = ''
if product.image is not None:
image = product.image.url
except ValueError:
icon = ''
image = ''
return icon, image
class Query:
__user_id = None
def __init__(self, user_id=None):
self.__user_id = user_id
def get_user_id(self):
return self.__user_id
|
from tree import TreeNode
# Test inputs
lst = ['apple', 'ape', 'array', 'argon', 'advanced', 'Barry', 'Bee', 'Bat', 'Ball']
class Trie:
def implementation(self, lst):
self.tree = TreeNode(len(lst))
head_node = self.tree
letters_in_tree = []
for word in lst:
word = word.lower()
self.tree = head_node
counter = 0
for letter in word:
if len(letters_in_tree) == 0:
self.tree.add_child(TreeNode(letter))
letters_in_tree.append(letter)
elif word[0] == head_node.children[0].value and len(letters_in_tree) != 0 and counter == 0:
counter += 1
self.tree = self.tree.children[0]
letters_in_tree.append(word[0])
continue
elif letter == letters_in_tree[-1]:
self.tree.add_child(TreeNode(letter))
letters_in_tree.append(letter)
self.tree = self.tree.children[0]
counter += 1
continue
else:
possible_letter = TreeNode(letter)
current_tree_lists = [i.value for i in self.tree.children]
if not letter in current_tree_lists:
self.tree.add_child(possible_letter)
letters_in_tree.append(letter)
if len(self.tree.children) == 2:
left_side = self.tree.children[0]
right_side = self.tree.children[-1]
if left_side.value == letter:
self.tree = left_side
continue
elif right_side.value == letter:
self.tree = right_side
continue
else:
self.tree = self.tree.children[0]
counter += 1
self.tree = head_node
def traverse(self):
user_prompt = input("Please enter a word you are trying to find in a trie: ")
tree = self.tree
user_prompt = user_prompt.lower()
user_letters = [i for i in user_prompt]
returned_path = []
nodes = [self.tree]
while len(nodes) > 0 and len(user_letters) > 0:
current_node = nodes.pop()
if current_node.value == user_letters[0]:
returned_path.append(current_node)
user_letters.pop(0)
nodes += current_node.children
returned_path = [i.value for i in returned_path]
user_letters = [i for i in user_prompt]
if returned_path == user_letters:
return True
else:
return False
test = Trie()
test.implementation(lst)
print(test.traverse())
|
import cv2
from PIL import Image
import numpy as np
def pil2cv(image):
''' PIL型 -> OpenCV型 '''
new_image = np.array(image, dtype=np.uint8)
if new_image.ndim == 2: # モノクロ
pass
elif new_image.shape[2] == 3: # カラー
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
elif new_image.shape[2] == 4: # 透過
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGBA2BGRA)
return new_image
image_path = "kitten_small.jpg"
cv_image = cv2.imread(image_path)
print("opencv image shape:", cv_image.shape)
pil_image = Image.open(image_path)
pil_image = np.asarray(pil_image)
print("pil image shape:", pil_image.shape)
|
import numpy as np
import math
from data_loader import loader
import argparse
import time
def Merge_sort(A,p,r):
if p < r:
q = int(math.floor((p+r)/2))
Merge_sort(A,p,q)
Merge_sort(A,q+1,r)
Merge(A,p,q,r)
def Merge(A,p,q,r):
n1 = q-p+1
n2 = r-q
L = A[p:p+n1].copy()
R = A[q+1:q+1+n2].copy()
i = 0
j = 0
k = p
while i < n1 and j < n2:
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
k += 1
while i < n1:
A[k] = L[i]
i += 1
k += 1
while j < n2:
A[k] = R[j]
j += 1
k += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merge_sort')
parser.add_argument('-d', '--data', type=str, nargs='?', default='unknown_data',
help='Please chose the dataset: -d almostsorted_10k')
args = parser.parse_args()
almostsorted_10k, random_10k, almostsorted_50k, random_50k = loader()
data = args.data
if data == 'unknown_data':
data = input('Enter data type from almostsorted_10k, random_10k, almostsorted_50k, random_50k : ')
Time = np.zeros(100)
print ('The input array is:')
print (data)
for n in range(100):
sorted = data.copy()
start = time.time()
Merge_sort(sorted,0,np.size(sorted)-1)
end = time.time()
Time[n] = end-start
print ('The sorted array is:')
print (sorted)
print ('The average time is: %.8f' % np.mean(Time))
print ('The minimum time is: %.8f' % np.min(Time))
print ('The maximum time is: %.8f' % np.max(Time))
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unittests for the projectexport servlet."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from mock import Mock, patch
from framework import permissions
from project import projectexport
from proto import tracker_pb2
from services import service_manager
from services.template_svc import TemplateService
from testing import fake
from testing import testing_helpers
class ProjectExportTest(unittest.TestCase):
def setUp(self):
self.services = service_manager.Services()
self.servlet = projectexport.ProjectExport(
'req', 'res', services=self.services)
def testAssertBasePermission(self):
mr = testing_helpers.MakeMonorailRequest(
perms=permissions.OWNER_ACTIVE_PERMISSIONSET)
self.assertRaises(permissions.PermissionException,
self.servlet.AssertBasePermission, mr)
mr.auth.user_pb.is_site_admin = True
self.servlet.AssertBasePermission(mr)
class ProjectExportJSONTest(unittest.TestCase):
def setUp(self):
self.services = service_manager.Services(
config=fake.ConfigService(),
project=fake.ProjectService(),
user=fake.UserService(),
template=Mock(spec=TemplateService))
self.services.user.TestAddUser('user1@example.com', 111)
self.servlet = projectexport.ProjectExportJSON(
'req', 'res', services=self.services)
self.project = fake.Project(project_id=789)
self.mr = testing_helpers.MakeMonorailRequest(
perms=permissions.OWNER_ACTIVE_PERMISSIONSET)
self.mr.auth.user_pb.is_site_admin = True
self.mr.project = self.project
@patch('time.time')
def testHandleRequest_Normal(self, mockTime):
mockTime.return_value = 123456789
self.services.project.GetProject = Mock(return_value=self.project)
test_config = fake.MakeTestConfig(project_id=789, labels=[], statuses=[])
self.services.config.GetProjectConfig = Mock(return_value=test_config)
test_templates = testing_helpers.DefaultTemplates()
self.services.template.GetProjectTemplates = Mock(
return_value=test_templates)
self.services.config.UsersInvolvedInConfig = Mock(return_value=[111])
json_data = self.servlet.HandleRequest(self.mr)
expected = {
'project': {
'committers': [],
'owners': [],
'recent_activity': 0,
'name': 'proj',
'contributors': [],
'perms': [],
'attachment_quota': None,
'process_inbound_email': False,
'revision_url_format': None,
'summary': '',
'access': 'ANYONE',
'state': 'LIVE',
'read_only_reason': None,
'only_owners_remove_restrictions': False,
'only_owners_see_contributors': False,
'attachment_bytes': 0,
'issue_notify_address': None,
'description': ''
},
'config': {
'templates': [{
'status': 'Accepted',
'members_only': True,
'labels': [],
'summary_must_be_edited': True,
'owner': None,
'owner_defaults_to_member': True,
'component_required': False,
'name': 'Defect report from developer',
'summary': 'Enter one-line summary',
'content': 'What steps will reproduce the problem?\n1. \n2. \n3. \n'
'\n'
'What is the expected output?\n\n\nWhat do you see instead?\n'
'\n\n'
'Please use labels and text to provide additional information.\n',
'admins': []
}, {
'status': 'New',
'members_only': False,
'labels': [],
'summary_must_be_edited': True,
'owner': None,
'owner_defaults_to_member': True,
'component_required': False,
'name': 'Defect report from user',
'summary': 'Enter one-line summary', 'content': 'What steps will '
'reproduce the problem?\n1. \n2. \n3. \n\nWhat is the expected '
'output?\n\n\nWhat do you see instead?\n\n\nWhat version of the '
'product are you using? On what operating system?\n\n\nPlease '
'provide any additional information below.\n',
'admins': []
}],
'labels': [],
'statuses_offer_merge': ['Duplicate'],
'exclusive_label_prefixes': ['Type', 'Priority', 'Milestone'],
'only_known_values': False,
'statuses': [],
'list_spec': '',
'developer_template': 0,
'user_template': 0,
'grid_y': '',
'grid_x': '',
'components': [],
'list_cols': 'ID Type Status Priority Milestone Owner Summary'
},
'emails': ['user1@example.com'],
'metadata': {
'version': 1,
'when': 123456789,
'who': None,
}
}
self.assertDictEqual(expected, json_data)
self.services.template.GetProjectTemplates.assert_called_once_with(
self.mr.cnxn, 789)
self.services.config.UsersInvolvedInConfig.assert_called_once_with(
test_config, test_templates)
|
# -*- coding: cp1252 -*-
"""
Description:
Challenge CAMELYON16.
Script for pixel classification.
Authors: Vaïa Machairas, Etienne Decencière, Peter Naylor, Thomas Walter.
Creation date: 2016-02-24
"""
from optparse import OptionParser
import sys
import timeit
import pdb
import os
from getpass import getuser
import smilPython as sp
import numpy as np
import folder_functions as ff
import segmentation_by_classification as sc
from evaluation import my_metrics
import segm_db_access as sdba
import cPickle as pickle
##---------------------------------------------------------------------------------------------------------------------------------------
##---------------------------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
from cluster_parameters import *
parser = OptionParser()
parser.add_option("-s", "--source", dest="folder_source",
help="Where to find Tumor files", metavar="FILE")
parser.add_option("-t", "--type", dest="type",
help="Normal, Tumor or Test", metavar="str")
parser.add_option("-n", "--number", dest="n",
help="Number of the type of slide", metavar="int")
parser.add_option("-x", "--x_axis", dest="x",
help="x", metavar="int")
parser.add_option("-y", "--y_axis", dest="y",
help="y", metavar="int")
parser.add_option("-w", "--width", dest="w",
help="width of the square", metavar="int")
parser.add_option("-a", "--height", dest="h",
help="height of the square", metavar="int")
parser.add_option("-r", "--resolution", dest="res",
help="resolution", metavar="int")
parser.add_option("-o", "--output", dest="out",
help="Output folder", metavar="folder")
parser.add_option("--subsample_folder", dest="subsample_folder",
help="Subsample folder", metavar="folder")
(options, args) = parser.parse_args()
para_ = [int(options.x),int(options.y),int(options.w),int(options.h),int(options.res)]
dico_input = {'para':para_}
#pdb.set_trace()
#slide_to_do = options.type + "_" + (3-len(str(options.n)))*"0" + options.n + '.tif'
try:
slide_to_do = '%s_%03i.tif' % (options.type, int(options.n))
except:
print 'incoherent inputs: '
print 'type (-t): ', options.type
print 'number (-n): ', options.number
raise ValueError("aborted due to incoherent inputs to Pred_.py")
db_server = sdba.SegmChallengeCamelyon16(options.folder_source,
slide_to_do = slide_to_do,
type_method = "pred",
dico_ROI = dico_input)
classif = sc.PixelClassification(db_server, options.out, pixel_features_list)#, nb_samples=input_3)
print "starting " + slide_to_do
start = timeit.default_timer()
if options.type in ['Normal', 'Tumor']:
iter_type = 'train'
else:
iter_type = 'prediction'
alll =[]
print iter_type
for el in db_server.iter_final_prediction(iter_type):
alll.append(el)
if len(alll)>2:
print "wrong inputs"
#pdb.set_trace()
original_image = alll[0][0]
original_image_name = alll[0][1]
folder_sauv_path = options.out
#image_sauv_path = folder_sauv_path+"/"+original_image_name.split('_')[0] + "_" + original_image_name.split('_')[1]
image_sauv_path = os.path.join(folder_sauv_path, '_'.join(original_image_name.split('_')[:2]))
print image_sauv_path
X = classif.get_X_per_image_with_save_3(original_image, original_image_name,
folder_sauv_path, image_sauv_path, save=True)
#X = classif.deal_with_missing_values_2(X)
# we can deal with missing value later.
#if not options.subsample_folder is None:
stop = timeit.default_timer()
print "time for "+slide_to_do+" "+str(stop-start)
print "ending " + slide_to_do
|
'''
Created on 18 Feb 2016
@author: Maxim Scheremetjew
'''
import mysql.connector
from mysql.connector import errorcode
class MySQLDBConnection:
"""Context manager class for oracle DB connection"""
def __init__(self, **config):
self.config = config
def __enter__(self):
try:
print 'Connecting to MySQL datavbase...'
self.connection = mysql.connector.connect(**self.config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
print "Connection successfully established."
return self.connection
def __exit__(self, ext_type, exc_value, traceback):
self.connection.close()
self.connection = None
print "Connection closed and cleaned up."
if __name__ == '__main__':
pass
|
import requests
import random
from bs4 import BeautifulSoup
from selenium import webdriver
from fake_useragent import UserAgent
options = webdriver.ChromeOptions()
options.add_argument('headless')
# change useragent
useragent = UserAgent()
options.add_argument(f'user-agent={useragent.random}')
url = 'https://coinmarketcap.com/'
driver = webdriver.Chrome(executable_path='F:\\PythonForGit\\CoinMarketCap_Scrapper_Selenium\\chromedriver'
'\\chromedriver.exe', options=options)
try:
driver.get(url)
# click the most growing coins for the 7 days period
week_gainers_button = driver.find_element_by_xpath('//*[@id="__next"]/div[1]/div[1]/div[2]/div/div[1]/div['
'2]/table/thead/tr/th[6]/div/div/div')
week_gainers_button.click()
# get the html source of the page
html = driver.page_source
# create soup object
soup = BeautifulSoup(html, 'lxml')
table = soup.findChildren('tbody')
rows = table[0].findChildren('tr')
count = 0
# now we print the 10 coins that have the biggest increase on the week
for row in rows:
coin_name = row.find_all('td')[2].find('p').text
symbol = row.find_all('td')[2].find('div', class_='fZIJcI').find('p').text
weekly_percents = row.find_all('td')[5].text
price = row.find_all('td')[3].text
if count < 10:
print(f'{coin_name} ({symbol}): {price}')
print(f'The weekly increase is {weekly_percents}')
print()
count += 1
else:
break
except Exception as ex:
print(ex)
finally:
driver.close()
driver.quit()
|
import pygame
from sprites.Ball import Ball
from sprites.Brick import Brick
from sprites.Paddle import Paddle
from threads.RankInput import RankInput
from threads.Webcam import Webcam
try:
from cv2 import cv2
except ImportError:
pass
import numpy
import sqlite3
import random
import time
# Adjustments
SCREEN_SIZE = (1280, 960)
PADDING = (50, 25, 25, 25)
# 8% of width, 3.5% of height is Fine
BRICK_SIZE = (100, 35)
BRICK_COUNT = (10, 5)
BRICK_PADDING = 10
BALL_SIZE = (24, 24)
BALL_SPEED = 10
PADDLE_SIZE = (75, 20)
PADDLE_SPEED = 10
PADDLE_BOTTOM = 200
low = (52, 72, 126)
high = (70, 154, 253)
roi = (450, 650)
# States
STATE_BALL_IN_PADDLE = 0
STATE_PLAYING = 1
STATE_WON = 2
STATE_GAME_OVER = 3
def resize_frame(frame):
frame = cv2.resize(frame, SCREEN_SIZE)
frame = frame[PADDING[0]:-PADDING[2], PADDING[3]:-PADDING[1]]
return True, frame
class Game:
def __init__(self):
self.init_pygame()
self.init_opencv()
self.init_game()
self.init_sqlite()
def init_pygame(self):
pygame.mixer.pre_init(44100, -16, 2, 512)
pygame.init()
self.screen = pygame.display.set_mode(SCREEN_SIZE, pygame.HWSURFACE | pygame.DOUBLEBUF) # type: pygame.Surface
self.background = pygame.Surface(self.screen.get_size())
self.clock = pygame.time.Clock()
self.font = pygame.font.Font('fonts/Noto_Sans_CJK_KR/Medium.otf', 32)
def init_opencv(self):
self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# self.cap = cv2.VideoCapture("C:/Users/maxsw/OneDrive/바탕 화면/blank.mp4")
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, SCREEN_SIZE[0])
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, SCREEN_SIZE[1])
max_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
max_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
if (max_width != SCREEN_SIZE[0]
or max_height != SCREEN_SIZE[1]):
print("Screen Too Big for Webcam")
print("Max Resolution : {0}x{1}".format(max_width, max_height))
# exit(1)
frame_size = (SCREEN_SIZE[0] - PADDING[1] - PADDING[3], SCREEN_SIZE[1] - PADDING[0] - PADDING[2])
self.cam_thread = Webcam(self.cap, SCREEN_SIZE, PADDING, low, high, roi)
self.cam_thread.onFrameRead.append(resize_frame)
self.cam_thread.start()
print('Waiting For First Frame to Loaded')
while not self.cam_thread.is_updated():
pass
print('Loaded First Frame')
print()
while self.cam_thread.get_frame() is None:
pass
def init_game(self):
self.state = STATE_BALL_IN_PADDLE
self.rank_saved = False
self.score = 0
self.prev = {}
self.prev['score'] = 0
self.prev['time'] = 0
# Create Paddle
image = pygame.image.load('images/Bar.png').convert_alpha()
image = pygame.transform.scale(image, PADDLE_SIZE)
self.paddle = Paddle(image)
self.paddle.rect.center = (SCREEN_SIZE[0] / 2, SCREEN_SIZE[1] - PADDLE_BOTTOM)
# Create Ball
image = pygame.image.load('images/Ball.png').convert_alpha()
image = pygame.transform.scale(image, BALL_SIZE)
self.ball = Ball(image, self.paddle.rect.centerx, self.paddle.rect.top - 12)
self.ball.speed = BALL_SPEED
self.ball.boundRect(pygame.Rect(PADDING[3], PADDING[0], SCREEN_SIZE[0] - (PADDING[1] + PADDING[3]),
SCREEN_SIZE[1] - (PADDING[0] + PADDING[2])))
self.create_bricks()
self.background.fill((0, 0, 0))
t = self.font.render('Score : {0:04d}'.format(0), True, (255, 255, 255))
self.background.blit(t,
(SCREEN_SIZE[0] - t.get_size()[0] - PADDING[1] - 15,
int((PADDING[0] - t.get_size()[1]) / 2)))
t = self.font.render('Time : {0:02d}:{1:02d}'.format(0, 0), True, (255, 255, 255))
self.background.blit(t,
(PADDING[3] + 15,
int((PADDING[0] - t.get_size()[1]) / 2)))
def init_sqlite(self):
self.db = sqlite3.connect('data/rank.db')
self.rank_thread = RankInput(self.db)
self.rank_token = None
self.rank_thread.start()
def create_bricks(self):
self.bricks = pygame.sprite.Group()
total_brick_width = -BRICK_PADDING + (BRICK_SIZE[0] + BRICK_PADDING) * BRICK_COUNT[0]
side_pad = SCREEN_SIZE[0] - total_brick_width
top_pad = PADDING[0] + BRICK_SIZE[1] * 2
for i in range(0, BRICK_COUNT[1]):
for j in range(0, BRICK_COUNT[0]):
brick = Brick(random.randrange(1, 10), BRICK_SIZE)
brick.rect.x = int(side_pad / 2) + j * (BRICK_SIZE[0] + BRICK_PADDING)
brick.rect.y = top_pad + i * (BRICK_SIZE[1] + BRICK_PADDING)
self.bricks.add(brick)
def check_input(self, cam_input=False):
pos = list(self.paddle.rect.center)
width = self.paddle.rect.width
max_x = SCREEN_SIZE[0] - (PADDING[1] + PADDING[3]) - width / 2
min_x = width
keys = pygame.key.get_pressed()
if cam_input:
if self.cam_thread.center != -1:
pos[0] = SCREEN_SIZE[0] - self.cam_thread.center
else:
if keys[pygame.K_LEFT]:
pos[0] -= PADDLE_SPEED
if pos[0] < min_x:
pos[0] = min_x
if keys[pygame.K_RIGHT]:
pos[0] += PADDLE_SPEED
if pos[0] > max_x:
pos[0] = max_x
if keys[pygame.K_SPACE] and self.state == STATE_BALL_IN_PADDLE:
self.state = STATE_PLAYING
self.time_start = time.time()
start_angle = random.randrange(20, 60)
if random.random() < 0.5:
start_angle = -start_angle
self.ball.start(start_angle + 180)
elif keys[pygame.K_RETURN] and (self.state in [STATE_GAME_OVER, STATE_WON]) and self.rank_token is None:
self.init_game()
return
elif keys[pygame.K_h] and (self.state in [STATE_GAME_OVER, STATE_WON, STATE_BALL_IN_PADDLE]):
self.draw_help()
return
elif keys[pygame.K_r] and (self.state in [STATE_GAME_OVER, STATE_WON]):
self.draw_rank()
if self.state == STATE_BALL_IN_PADDLE:
self.paddle.move(tuple(pos))
self.ball.move(pos=(pos[0], self.paddle.rect.top - 12))
elif self.state == STATE_PLAYING:
self.paddle.move(tuple(pos))
def update_header(self):
if self.state == STATE_PLAYING:
time_elapsed = int(time.time() - self.time_start)
if self.prev['score'] != self.score or time_elapsed != self.prev['time']:
black = pygame.Surface((SCREEN_SIZE[0], PADDING[0]))
self.background.blit(black, (0, 0))
t = self.font.render('Score : {0:04d}'.format(self.score), True, (255, 255, 255))
self.background.blit(t,
(SCREEN_SIZE[0] - t.get_size()[0] - PADDING[1] - 15,
int((PADDING[0] - t.get_size()[1]) / 2)))
self.prev['score'] = self.score
t = self.font.render('Time : {0:02d}:{1:02d}'.format(int(time_elapsed / 60), time_elapsed % 60), True,
(255, 255, 255))
self.background.blit(t,
(PADDING[3] + 15,
int((PADDING[0] - t.get_size()[1]) / 2)))
self.prev['time'] = time_elapsed
def run(self):
done = False
while not done:
if self.cam_thread.is_updated():
self.frame = self.cam_thread.get_frame()
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
self.frame = numpy.rot90(self.frame)
self.frame = pygame.surfarray.make_surface(self.frame)
self.background.blit(self.frame, (PADDING[3], PADDING[0]))
self.update_header()
self.bricks.draw(self.background)
self.paddle.draw(self.background)
self.ball.draw(self.background)
if roi[0] != -1 and roi[1] != -1 and self.state == STATE_BALL_IN_PADDLE:
pygame.draw.line(self.background, (255, 0, 0), (PADDING[3], roi[0]),
(SCREEN_SIZE[0] - PADDING[1], roi[0]), 2)
pygame.draw.line(self.background, (255, 0, 0), (PADDING[3], roi[1]),
(SCREEN_SIZE[0] - PADDING[1], roi[1]), 2)
if self.state in [STATE_WON, STATE_GAME_OVER] and not self.ball.go:
self.add_rank()
self.check_input(False)
self.screen.blit(self.background, (0, 0))
pygame.display.update()
self.clock.tick(30)
self.ball.move(self.bricks, self.paddle)
if self.state == STATE_PLAYING:
hit_list = pygame.sprite.spritecollide(self.ball, self.bricks, True, pygame.sprite.collide_mask)
for h in hit_list:
self.score += h.score
if h.rect.left <= self.ball.rect.centerx <= h .rect.right:
self.ball.direction[1] *= -1
else:
self.ball.direction[0] *= -1
if h.hit() == False:
self.bricks.remove(h)
if len(hit_list) != 0:
sfx1 = pygame.mixer.Sound('sounds/pop.ogg')
sfx1.set_volume(0.5)
sfx1.play()
if pygame.sprite.collide_mask(self.ball, self.paddle):
self.ball.colideBar(self.paddle.rect.center[0])
if self.ball.rect.centery > self.paddle.rect.top:
self.state = STATE_GAME_OVER
# self.ball.stop()
if len(self.bricks) == 0:
self.state = STATE_WON
ev = pygame.event.get()
for event in ev:
if event.type == pygame.QUIT:
self.clean()
done = True
def add_rank(self):
if not self.rank_saved:
time_elapsed = time.time() - self.time_start
self.rank_token = self.rank_thread.queue_info(self.score, time_elapsed, self.state)
self.rank_saved = True
status = self.rank_thread.get_status(self.rank_token)
if status is None:
s = pygame.Surface((800, 150))
s.set_alpha(128)
s.fill((255, 255, 255))
f = pygame.font.Font('fonts/Noto_Sans_CJK_KR/Medium.otf', 72)
t = f.render('Game Over', True, (255, 0, 0))
s.blit(t, ((800 - t.get_size()[0]) / 2, (150 - t.get_size()[1]) / 2))
self.background.blit(s, ((SCREEN_SIZE[0] - 800) / 2, (SCREEN_SIZE[1] - 150) / 2))
else:
# (uid, score, time, name, phone)
cur = self.db.cursor()
cur.execute('INSERT INTO rank(name, phone, score, "time") VALUES (?, ?, ?, ?)',
(status[3], status[4], status[1], status[2]))
self.db.commit()
self.rank_token = None
def clean(self):
print('Closing Game...')
print()
self.cam_thread.stop()
print('Waiting for Camera Thread to Stop')
while self.cam_thread.running:
pass
print('Camera Thread Stopped')
print()
self.rank_thread.stop()
print('Waiting for Rank Thread to Stop')
while self.rank_thread.running:
pass
print('Rank Thread Stopped')
print()
self.cap.release()
cv2.destroyAllWindows()
def main(debug=False):
game = Game()
game.run()
game.clean()
if __name__ == '__main__':
main(True)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-24 16:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainApp', '0037_auto_20171124_1823'),
('mainApp', '0034_review'),
]
operations = [
]
|
#!/sw/bin/python3
"""Tool for marking newick-format-tree branches."""
import argparse
import sys
import re
__author__ = "Bogdan Kirilenko, 2018."
def eprint(msg, end="\n"):
"""Like print but for stderr."""
sys.stderr.write(msg + end)
def die(msg, rc=1):
"""Write msg to stderr and abort program."""
eprint(msg)
sys.exit(rc)
def parse_args():
"""Read args, check."""
app = argparse.ArgumentParser()
app.add_argument("tree_file", type=str, help="Tree file for manipulations.")
app.add_argument("branches", type=str, default="", help="Comma-separated list of branches to mark.")
app.add_argument("--label", "-l", type=str, default="Foreground", help="Label to put to branch.")
app.add_argument("--show_branches", "-s", action="store_true", dest="show_branches",
help="Show the possible branches.")
# print help if there are no args
if len(sys.argv) < 2:
app.print_help()
sys.exit(0)
args = app.parse_args()
return args
def get_branches(tree):
"""Return a list of branches."""
# remove all special symbols
filtered_str = re.sub(r'[^\w]', ' ', tree)
# get rid of numbers
filtered = [x for x in filtered_str.split() if not x.isdigit()]
return filtered
def main():
"""Entry point."""
args = parse_args()
# read the file
with open(args.tree_file, "r") as f:
tree_content = f.read()
# get branches possible
all_branches = get_branches(tree_content)
# if a user needs only the list of branches
if args.show_branches or args.branches == "":
# sys.stdout.write("Branches in {} are:\n".format(args.tree_file))
sys.stdout.write(" ".join(sorted(all_branches)) + "\n")
sys.exit(0)
# get list of branches to mark
req_branches = [x for x in args.branches.split(",") if x != ""]
label = "{" + args.label + "}"
for branch in req_branches:
# if - or _ in tree: simple way
if "_" in branch or "-" in branch: # unique name
# line turTru2_balAcu1
tree_content = tree_content.replace(branch, branch + label)
else: # more complicated case, for example myoLuc2
# it might be myoLuc2 as is as well as myoLuc2_myoDav1
# there are two options
# (myoLuc2: and ,myoLuc2:
braced = "({}:".format(branch)
commed = ",{}:".format(branch)
tree_content = tree_content.replace(braced, "(" + branch + label + ":")
tree_content = tree_content.replace(commed, "," + branch + label + ":")
sys.stdout.write(tree_content)
sys.exit(0)
if __name__ == "__main__":
main()
|
import sys
import pandas as pd
import numpy as np
import multiprocessing as mp
import time
import datetime as dt
import adv_finance.sampling as sampling
from adv_finance.multiprocess import process_jobs_, process_jobs
# from adv_finance.sampling import get_ind_matrix, get_avg_uniqueness
def get_rnd_t1(num_obs, num_bars, max_h):
t1 = pd.Series()
for i in np.arange(num_obs):
ix = np.random.randint(0, num_bars)
val = ix + np.random.randint(1, max_h)
t1.loc[ix] = val
return t1.sort_index()
def auxMC(num_obs, num_bars, max_h):
t1 = get_rnd_t1(num_obs, num_bars, max_h)
bar_idx = range(t1.max() + 1)
ind_m = sampling.get_ind_matrix(bar_idx, t1)
phi = sampling.seq_bootstrap(ind_m)
seq_u = sampling.get_avg_uniqueness(ind_m[:, phi], None).mean()
phi = np.random.choice(np.arange(ind_m.shape[1]), size=ind_m.shape[1])
std_u = sampling.get_avg_uniqueness(ind_m[:, phi], None).mean()
return {'std_u': std_u, 'seq_u': seq_u}
def mainMC(num_obs=10, num_bars=100, max_h=5, num_iters=5, num_threads=1):
jobs = []
for i in np.arange(num_iters):
job = {'func': auxMC, 'num_obs': num_obs, 'num_bars': num_bars, 'max_h': max_h}
jobs.append(job)
if num_threads == 1:
out = process_jobs_(jobs)
else:
out = process_jobs(jobs, num_threads)
return pd.DataFrame(out)
if __name__ == "__main__":
# df = mainMC(numObs=10, numBars=10, numIters=20, numThreads=1)
df = mainMC(num_obs=10, num_bars=10, num_iters=5, num_threads=2)
print('finished')
### APPENDIX ###
# 원본 소스
# def seq_bootstrap_(ind_m, s_length=None):
# if s_length is None:
# s_length = ind_m.shape[1]
#
# phi = []
# while len(phi) < s_length:
# c = ind_m[phi].sum(axis=1) + 1
# avg_u = get_avg_uniqueness(ind_m, c)
# prob = (avg_u / avg_u.sum()).values
# phi += [np.random.choice(ind_m.columns, p=prob)]
# return phi
#
#
# # Sparse Matrix 버전
# def seq_bootstrap(ind_m, s_length=None):
# if s_length is None:
# s_length = ind_m.shape[1]
#
# phi = []
# m = ind_m.todense()
# while len(phi) < s_length:
# m_ = m[:, phi]
# c = m_.sum(axis=1) + 1
# avg_u = sampling.get_avg_uniqueness(m, c)
# prob = (avg_u / avg_u.sum())
# prob = np.asarray(prob).reshape(-1)
# phi += [np.random.choice(np.arange(ind_m.shape[1]), p=prob)]
# return phi
# def expandCall(kargs):
# # Expand the arguments of a callback function, kargs['func'] func= kargs['func']
# func = kargs['func']
# del kargs['func']
# out= func (**kargs)
# return out
#
#
# # single-thread execution for debugging [20.8]
# def processJobs_(jobs):
# # Run jobs sequentially, for debugging
# out=[]
# for job in jobs:
# out_= expandCall(job)
# out.append(out_)
# return out
#
# def report_progress(job_num, num_jobs, time0, task):
# """
# Snippet 20.9.1, pg 312, Example of Asynchrounous call to python multiprocessing library
#
# :param job_num:
# :param num_jobs:
# :param time0:
# :param task:
# :return:
# """
#
# # Report progress as asynch jobs are completed
# msg = [float(job_num) / num_jobs, (time.time() - time0) / 60.0]
# msg.append(msg[1] * (1 / msg[0] - 1))
# time_stamp = str(dt.datetime.fromtimestamp(time.time()))
#
# msg = time_stamp + ' ' + str(round(msg[0] * 100, 2)) + '%' + task + ' done after ' + \
# str(round(msg[1], 2)) + ' minutes. Remaining ' + str(round(msg[2], 2)) + ' minutes.'
#
# if job_num < num_jobs:
# sys.stderr.write(msg + '\r')
# else:
# sys.stderr.write(msg + '\n')
# def processJobs(jobs,task =None, numThreads=24):
# # Run in parallel.
# # jobs must contain a 'func' callback, for expandCall
# if task is None:task = jobs [0]['func'].__name__
# pool=mp. Pool(processes=numThreads)
# outputs,out, time0 =pool . imap_unordered(expandCall,jobs) ,[], time. time()
# # Process asyn output, report progress
# for i,out_ in enumerate(outputs,1):
# out.append(out_)
# report_progress(i,len( jobs),time0, task)
# pool.close()
# pool.join() # this is needed to prevent memory leaks
# return out
# def auxMC_(numObs, numBars, maxH):
# num_obs = numObs
# num_bars = numBars
# max_h = maxH
#
# t1 = get_rnd_t1(num_obs, num_bars, max_h)
# bar_idx = range(t1.max() + 1)
# ind_m = get_ind_matrix(bar_idx, t1)
# phi = np.random.choice(ind_m.columns, size=ind_m.shape[1])
# std_u = get_avg_uniqueness(ind_m[phi]).mean()
# phi = seq_bootstrap_(ind_m)
# seq_u = get_avg_uniqueness(ind_m[phi]).mean()
# return {'std_u': std_u, 'seq_u': seq_u}
#
# def mainMC(numObs=10, numBars=1000, maxH=5, numIters=1E3, numThreads=8):
# # Monte Carlo experiments
# jobs = []
# for i in np.arange(numIters):
# job = {'func':auxMC, 'numObs': numObs, 'numBars': numBars, 'maxH': maxH}
# jobs.append(job)
#
# if numThreads == 1:
# out = processJobs_(jobs)
# else:
# out = processJobs(jobs, numThreads=numThreads)
#
# return pd.DataFrame(out)
# def get_ind_matrix(bar_idx, t1):
# ind_m = pd.DataFrame(0, index=bar_idx,
# columns=range(t1.shape[0]))
# for i, (t0_, t1_) in enumerate(t1.iteritems()):
# ind_m.loc[t0_:t1_, i] = 1
# return ind_m
#
#
# def get_avg_uniqueness(ind_m, c=None):
# if c is None:
# c = ind_m.sum(axis=1)
#
# ind_m = ind_m.loc[c > 0]
# c = c.loc[c > 0]
# u = ind_m.div(c, axis=0)
# avg_u = u[u>0].mean()
# avg_u = avg_u.fillna(0)
# return avg_u
|
"""
"""
import time
start_time = time.time()
distict_powers = []
for a in range(2,101):
for b in range(2,101):
cache = a**b
if cache not in distict_powers:
distict_powers.append(cache)
print len(distict_powers)
print("--- %s ms ---" %int(round((time.time() - start_time)*1000)))
|
from .defaults import update_with_defaults
from .log_prob import get_log_prob
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from starry_process import StarryProcess
from starry_process.latitude import beta2gauss, gauss2beta
import starry
from starry_process.compat import theano, tt
import numpy as np
from scipy.stats import norm as Normal
import dynesty.plotting as dyplot
from dynesty import utils as dyfunc
from corner import corner as _corner
from tqdm.auto import tqdm
import glob
import os
import json
import pickle
def corner(*args, **kwargs):
"""
Override `corner.corner` by making some appearance tweaks.
"""
# Get the usual corner plot
figure = _corner(*args, **kwargs)
# Get the axes
ndim = int(np.sqrt(len(figure.axes)))
axes = np.array(figure.axes).reshape((ndim, ndim))
# Smaller tick labels
for ax in axes[1:, 0]:
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(10)
formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax.yaxis.set_major_formatter(formatter)
ax.set_ylabel(
ax.get_ylabel(), fontsize=kwargs.get("corner_label_size", 16)
)
for ax in axes[-1, :]:
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(10)
formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax.xaxis.set_major_formatter(formatter)
ax.set_xlabel(
ax.get_xlabel(), fontsize=kwargs.get("corner_label_size", 16)
)
# Pad the axes to always include the truths
truths = kwargs.get("truths", None)
if truths is not None:
for row in range(1, ndim):
for col in range(row):
lo, hi = np.array(axes[row, col].get_xlim())
if truths[col] < lo:
lo = truths[col] - 0.1 * (hi - truths[col])
axes[row, col].set_xlim(lo, hi)
axes[col, col].set_xlim(lo, hi)
elif truths[col] > hi:
hi = truths[col] - 0.1 * (hi - truths[col])
axes[row, col].set_xlim(lo, hi)
axes[col, col].set_xlim(lo, hi)
lo, hi = np.array(axes[row, col].get_ylim())
if truths[row] < lo:
lo = truths[row] - 0.1 * (hi - truths[row])
axes[row, col].set_ylim(lo, hi)
axes[row, row].set_xlim(lo, hi)
elif truths[row] > hi:
hi = truths[row] - 0.1 * (hi - truths[row])
axes[row, col].set_ylim(lo, hi)
axes[row, row].set_xlim(lo, hi)
return figure
def lat2y(lat):
"""
Return the fractional y position (in [0, 1])
corresponding to a given latitude on a Mollweide grid.
"""
lat = lat * np.pi / 180
theta = lat
niter = 100
for n in range(niter):
theta -= (2 * theta + np.sin(2 * theta) - np.pi * np.sin(lat)) / (
2 + 2 * np.cos(2 * theta)
)
return np.sin(theta)
def plot_data(data, ncols=10, clip=False, **kwargs):
"""
Plot a synthetic dataset.
"""
# Get kwargs
kwargs = update_with_defaults(**kwargs)
plot_kwargs = kwargs["plot"]
vmin = plot_kwargs["vmin"]
vmax = plot_kwargs["vmax"]
# Get data
t = data["t"]
incs = data["incs"]
flux = data["flux"]
flux0 = data["flux0"]
y = data["y"]
# Plot the synthetic dataset
nlc = len(flux)
if clip:
nlc = divmod(nlc, ncols)[0] * ncols
if nlc > ncols:
nrows = int(np.ceil(nlc / ncols))
else:
nrows = 1
wr = np.ones(min(nlc, ncols))
wr[-1] = 1.17
gridspec = {"width_ratios": wr}
fig, ax = plt.subplots(
2 * nrows,
min(nlc, ncols),
figsize=(min(nlc, ncols) + 2, 2 * nrows),
gridspec_kw=gridspec,
)
fig.subplots_adjust(hspace=0.4)
axtop = ax.transpose().flatten()[::2]
axbot = ax.transpose().flatten()[1::2]
yrng = 1.1 * np.max(
np.abs(1e3 * (flux0 - np.median(flux0, axis=1).reshape(-1, 1)))
)
ymin = -yrng
ymax = yrng
xe = 2 * np.linspace(-1, 1, 1000)
ye = np.sqrt(1 - (0.5 * xe) ** 2)
eps = 0.01
xe = (1 - eps) * xe
ye = (1 - 0.5 * eps) * ye
map = starry.Map(kwargs["generate"]["ydeg"], lazy=False)
for k in tqdm(range(nlc), disable=bool(int(os.getenv("NOTQDM", "0")))):
map[:, :] = y[k]
image = 1.0 + map.render(projection="moll", res=300)
im = axtop[k].imshow(
image,
origin="lower",
extent=(-2, 2, -1, 1),
cmap="plasma",
vmin=vmin,
vmax=vmax,
)
axtop[k].plot(xe, ye, "k-", lw=1, clip_on=False)
axtop[k].plot(xe, -ye, "k-", lw=1, clip_on=False)
axtop[k].plot(0, lat2y(90 - incs[k]), "kx", ms=3)
axtop[k].axis("off")
axtop[k].set_ylim(-1.01, 2.0)
axbot[k].plot(
t, 1e3 * (flux[k] - np.median(flux[k])), "k.", alpha=0.3, ms=1
)
axbot[k].plot(t, 1e3 * (flux0[k] - np.median(flux0[k])), "C0-", lw=1)
axbot[k].set_ylim(ymin, ymax)
if k < nrows:
axins = axtop[nlc - k - 1].inset_axes([0, 0, 1, 0.67])
axins.axis("off")
div = make_axes_locatable(axins)
cax = div.append_axes("right", size="7%", pad="50%")
cbar = fig.colorbar(im, cax=cax, orientation="vertical")
cbar.set_label("intensity", fontsize=8)
cbar.set_ticks([0.75, 1])
cbar.ax.tick_params(labelsize=6)
axbot[k].spines["top"].set_visible(False)
axbot[k].spines["right"].set_visible(False)
axbot[k].set_xlabel("rotations", fontsize=8)
axbot[k].set_ylabel("flux [ppt]", fontsize=8)
axbot[k].set_xticks([0, 1, 2, 3, 4])
for tick in (
axbot[k].xaxis.get_major_ticks()
+ axbot[k].yaxis.get_major_ticks()
):
tick.label.set_fontsize(6)
axbot[k].tick_params(direction="in")
else:
axbot[k].axis("off")
for k in range(nlc, len(axtop)):
axtop[k].axis("off")
axbot[k].axis("off")
for axis in ax.flatten():
axis.set_rasterization_zorder(99)
return fig
def plot_latitude_pdf(results, **kwargs):
"""
Plot posterior draws from the latitude hyperdistribution.
"""
# Get kwargs
kwargs = update_with_defaults(**kwargs)
plot_kwargs = kwargs["plot"]
gen_kwargs = kwargs["generate"]
mu_true = gen_kwargs["latitude"]["mu"]
sigma_true = gen_kwargs["latitude"]["sigma"]
nlat_pts = plot_kwargs["nlat_pts"]
nlat_samples = plot_kwargs["nlat_samples"]
# Resample to equal weight
samples = np.array(results.samples)
try:
weights = np.exp(results["logwt"] - results["logz"][-1])
except:
weights = results["weights"]
samples = dyfunc.resample_equal(samples, weights)
# Function to compute the pdf for a draw
_draw_pdf = lambda x, a, b: StarryProcess(a=a, b=b).latitude.pdf(x)
_x = tt.dvector()
_a = tt.dscalar()
_b = tt.dscalar()
# The true pdf
draw_pdf = theano.function([_x, _a, _b], _draw_pdf(_x, _a, _b))
x = np.linspace(-89.9, 89.9, nlat_pts)
if np.isfinite(sigma_true):
pdf_true = 0.5 * (
Normal.pdf(x, mu_true, sigma_true)
+ Normal.pdf(x, -mu_true, sigma_true)
)
else:
# Isotropic (special case)
pdf_true = 0.5 * np.cos(x * np.pi / 180) * np.pi / 180
# Draw sample pdfs
pdf = np.empty((nlat_samples, nlat_pts))
for k in range(nlat_samples):
idx = np.random.randint(len(samples))
pdf[k] = draw_pdf(x, samples[idx, 1], samples[idx, 2])
# Plot
fig, ax = plt.subplots(1)
for k in range(nlat_samples):
ax.plot(x, pdf[k], "C0-", lw=1, alpha=0.05, zorder=-1)
ax.plot(x, pdf_true, "C1-", label="truth")
ax.plot(x, np.nan * x, "C0-", label="samples")
ax.legend(loc="upper right")
ax.set_xlim(-90, 90)
xticks = [-90, -75, -60, -45, -30, -15, 0, 15, 30, 45, 60, 75, 90]
ax.set_xticks(xticks)
ax.set_xticklabels(["{:d}$^\circ$".format(xt) for xt in xticks])
ax.set_xlabel("latitude", fontsize=16)
ax.set_ylabel("probability", fontsize=16)
# Constrain y lims?
mx1 = np.max(pdf_true)
mx2 = np.sort(pdf.flatten())[int(0.9 * len(pdf.flatten()))]
mx = max(2.0 * mx1, 1.2 * mx2)
ax.set_ylim(-0.1 * mx, mx)
ax.set_rasterization_zorder(1)
return fig
def plot_trace(results, **kwargs):
"""
Plot the nested sampling trace.
"""
# Get kwargs
kwargs = update_with_defaults(**kwargs)
gen_kwargs = kwargs["generate"]
labels = ["r", "a", "b", "c", "n", "bm", "blv"]
# Get truths
try:
a, b = StarryProcess().latitude._transform.transform(
gen_kwargs["latitude"]["mu"], gen_kwargs["latitude"]["sigma"]
)
except:
a = np.nan
b = np.nan
truths = [
gen_kwargs["radius"]["mu"],
a,
b,
gen_kwargs["contrast"]["mu"],
gen_kwargs["nspots"]["mu"],
np.nan,
np.nan,
]
ndim = results.samples.shape[-1]
fig, _ = dyplot.traceplot(
results, truths=truths[:ndim], labels=labels[:ndim]
)
return fig
def plot_corner(results, transform_beta=False, **kwargs):
"""
Plot the posterior corner plot.
"""
# Get kwargs
kwargs = update_with_defaults(**kwargs)
gen_kwargs = kwargs["generate"]
sample_kwargs = kwargs["sample"]
plot_kwargs = kwargs["plot"]
span = [
(sample_kwargs["rmin"], sample_kwargs["rmax"]),
(sample_kwargs["amin"], sample_kwargs["amax"]),
(sample_kwargs["bmin"], sample_kwargs["bmax"]),
(sample_kwargs["cmin"], sample_kwargs["cmax"]),
(sample_kwargs["nmin"], sample_kwargs["nmax"]),
0.995,
0.995,
]
labels = [
r"$r$",
r"$a$",
r"$b$",
r"$c$",
r"$n$",
r"$\mu_b$",
r"$\ln\sigma^2_b$",
]
# Get truths
sp = StarryProcess()
try:
a, b = gauss2beta(
gen_kwargs["latitude"]["mu"], gen_kwargs["latitude"]["sigma"]
)
except:
a = np.nan
b = np.nan
truths = [
gen_kwargs["radius"]["mu"],
a,
b,
gen_kwargs["contrast"]["mu"],
gen_kwargs["nspots"]["mu"],
np.nan,
np.nan,
]
samples = np.array(results.samples)
ndim = samples.shape[-1]
if transform_beta:
# Transform from `a, b` to `mode, std`
a = samples[:, 1]
b = samples[:, 2]
mu, sigma = beta2gauss(a, b)
samples[:, 1] = mu
samples[:, 2] = sigma
labels[1] = r"$\mu_\phi$"
labels[2] = r"$\sigma_\phi$"
if np.isfinite(gen_kwargs["latitude"]["sigma"]):
truths[1] = gen_kwargs["latitude"]["mu"]
truths[2] = gen_kwargs["latitude"]["sigma"]
else:
truths[1] = np.nan
truths[2] = np.nan
span[1] = (0, 90)
span[2] = (0, 45)
# Get sample weights
try:
weights = np.exp(results["logwt"] - results["logz"][-1])
except:
weights = results["weights"]
fig = corner(
samples[:, :ndim],
plot_datapoints=False,
plot_density=False,
truths=truths[:ndim],
labels=labels[:ndim],
range=span[:ndim],
fill_contours=True,
weights=weights,
smooth=2.0,
smooth1d=2.0,
bins=100,
hist_kwargs=dict(lw=1),
truth_color="#ff7f0e",
**plot_kwargs
)
return fig
def plot_inclination_pdf(data, inc_results, **kwargs):
# Get the arrays
inc = inc_results["inc"]
lp = inc_results["lp"]
# Plot
nlc = lp.shape[0]
if nlc > 10:
nrows = int(np.ceil(nlc / 10))
else:
nrows = 1
fig, ax = plt.subplots(
nrows,
min(nlc, 10),
figsize=(min(nlc, 10) + 2, nrows),
sharex=True,
sharey=True,
)
ax = ax.flatten()
for n in range(lp.shape[0]):
for j in range(lp.shape[1]):
ax[n].plot(
inc, np.exp(lp[n, j] - lp[n, j].max()), "C0-", lw=1, alpha=0.25
)
ax[n].axvline(data["incs"][n], color="C1")
ax[n].margins(0.1, 0.1)
if n == 40:
ax[n].spines["top"].set_visible(False)
ax[n].spines["right"].set_visible(False)
ax[n].set_xlabel("inclination", fontsize=10)
ax[n].set_ylabel("probability", fontsize=10)
xticks = [0, 30, 60, 90]
ax[n].set_xticks(xticks)
ax[n].set_xticklabels([r"{}$^\circ$".format(xt) for xt in xticks])
ax[n].set_yticks([])
for tick in ax[n].xaxis.get_major_ticks():
tick.label.set_fontsize(8)
else:
ax[n].axis("off")
return fig
def plot_batch(path):
"""
Plot the results of a batch run.
"""
# Get the posterior means and covariances (w/o baseline mean and var)
files = glob.glob(os.path.join(path, "*", "mean_and_cov.npz"))
mean = np.empty((len(files), 5))
cov = np.empty((len(files), 5, 5))
for k, file in enumerate(files):
data = np.load(file)
mean[k] = data["mean"][:5]
cov[k] = data["cov"][:5, :5]
# Get the true values
kwargs = update_with_defaults(
**json.load(
open(files[0].replace("mean_and_cov.npz", "kwargs.json"), "r")
)
)
truths = [
kwargs["generate"]["radius"]["mu"],
kwargs["generate"]["latitude"]["mu"],
kwargs["generate"]["latitude"]["sigma"],
kwargs["generate"]["contrast"]["mu"],
kwargs["generate"]["nspots"]["mu"],
]
labels = [r"$r$", r"$\mu_\phi$", r"$\sigma_\phi$", r"$c$", r"$n$"]
# Misc plotting kwargs
batch_bins = kwargs["plot"]["batch_bins"]
batch_alpha = kwargs["plot"]["batch_alpha"]
batch_nsig = kwargs["plot"]["batch_nsig"]
# Plot the distribution of posterior means & variances
fig, ax = plt.subplots(
2,
len(truths) + 1,
figsize=(16, 6),
gridspec_kw=dict(width_ratios=[1, 1, 1, 1, 1, 0.01]),
)
fig.subplots_adjust(hspace=0.4)
for n in range(len(truths)):
# Distribution of means
ax[0, n].hist(
mean[:, n], histtype="step", bins=batch_bins, lw=2, density=True
)
ax[0, n].axvline(np.mean(mean[:, n]), color="C0", ls="--")
ax[0, n].axvline(truths[n], color="C1")
# Distribution of errors (should be ~ std normal)
deltas = (mean[:, n] - truths[n]) / np.sqrt(cov[:, n, n])
ax[1, n].hist(
deltas,
density=True,
histtype="step",
range=(-4, 4),
bins=batch_bins,
lw=2,
)
ax[1, n].hist(
np.random.randn(10000),
density=True,
range=(-4, 4),
bins=batch_bins,
histtype="step",
lw=2,
)
ax[0, n].set_title(labels[n], fontsize=16)
ax[0, n].set_xlabel("posterior mean")
ax[1, n].set_xlabel("posterior error")
ax[0, n].set_yticks([])
ax[1, n].set_yticks([])
# Tweak appearance
ax[0, -1].axis("off")
ax[0, -1].plot(0, 0, "C0", ls="--", label="mean")
ax[0, -1].plot(0, 0, "C1", ls="-", label="truth")
ax[0, -1].legend(loc="center left")
ax[1, -1].axis("off")
ax[1, -1].plot(0, 0, "C0", ls="-", lw=2, label="measured")
ax[1, -1].plot(0, 0, "C1", ls="-", lw=2, label=r"$\mathcal{N}(0, 1)$")
ax[1, -1].legend(loc="center left")
fig.savefig(
os.path.join(path, "calibration_bias.pdf"), bbox_inches="tight"
)
plt.close()
# Now let's plot all of the posteriors on a corner plot
files = glob.glob(os.path.join(path, "*", "results.pkl"))
samples = [None for k in range(len(files))]
nsamp = 1e9
ranges = [None for k in range(len(files))]
for k in tqdm(
range(len(files)), disable=bool(int(os.getenv("NOTQDM", "0")))
):
# Get the samples (w/o baseline mean and var)
with open(files[k], "rb") as f:
results = pickle.load(f)
samples[k] = np.array(results.samples)[:, :5]
samples[k][:, 1], samples[k][:, 2] = beta2gauss(
samples[k][:, 1], samples[k][:, 2]
)
try:
weights = np.exp(results["logwt"] - results["logz"][-1])
except:
weights = results["weights"]
samples[k] = dyfunc.resample_equal(samples[k], weights)
np.random.shuffle(samples[k])
if len(samples[k]) < nsamp:
nsamp = len(samples[k])
# Get the 4-sigma ranges
mu = np.mean(samples[k], axis=0)
std = np.std(samples[k], axis=0)
ranges[k] = np.array([mu - batch_nsig * std, mu + batch_nsig * std]).T
# We need all runs to have the same number of samples
# so our normalizations are correct in the histograms
print("Keeping {} samples from each run.".format(nsamp))
# Set plot limits to the maximum of the ranges
ranges = np.array(ranges)
ranges = np.array(
[np.min(ranges[:, :, 0], axis=0), np.max(ranges[:, :, 1], axis=0)]
).T
span = [
(kwargs["sample"]["rmin"], kwargs["sample"]["rmax"]),
(0, 90),
(0, 45),
(kwargs["sample"]["cmin"], kwargs["sample"]["cmax"]),
(kwargs["sample"]["nmin"], kwargs["sample"]["nmax"]),
]
# Go!
color = lambda i, alpha: "{}{}".format(
colors.to_hex("C{}".format(i)),
("0" + hex(int(alpha * 256)).split("0x")[-1])[-2:],
)
fig = None
cum_samples = np.empty((0, 5))
for k in tqdm(
range(len(mean)), disable=bool(int(os.getenv("NOTQDM", "0")))
):
# Plot the 2d hist
fig = corner(
samples[k][:nsamp, :5],
fig=fig,
labels=labels,
plot_datapoints=False,
plot_density=False,
fill_contours=True,
no_fill_contours=True,
color=color(k, 0.1 * batch_alpha),
contourf_kwargs=dict(),
contour_kwargs=dict(alpha=0),
bins=20,
hist_bin_factor=5,
smooth=2.0,
hist_kwargs=dict(alpha=0),
levels=(1.0 - np.exp(-0.5 * np.array([1.0]) ** 2)),
range=ranges,
)
# Plot the 1d hist
if k == len(mean) - 1:
truths_ = truths
else:
truths_ = None
fig = corner(
samples[k][:nsamp, :5],
fig=fig,
labels=labels,
plot_datapoints=False,
plot_density=False,
plot_contours=False,
fill_contours=False,
no_fill_contours=True,
color=color(k, batch_alpha),
bins=500,
smooth1d=10.0,
hist_kwargs=dict(alpha=0.5 * batch_alpha),
range=ranges,
truths=truths_,
truth_color="#ff7f0e",
)
# Running list
cum_samples = np.vstack((cum_samples, samples[k][:nsamp, :5]))
# Plot the cumulative posterior
np.random.shuffle(cum_samples)
fig = corner(
cum_samples,
fig=fig,
labels=labels,
plot_datapoints=False,
plot_density=False,
fill_contours=False,
no_fill_contours=True,
color="k",
contourf_kwargs=dict(),
contour_kwargs=dict(linewidths=1),
bins=100,
hist_bin_factor=5,
smooth=1.0,
hist_kwargs=dict(alpha=0),
levels=(1.0 - np.exp(-0.5 * np.array([1.0]) ** 2)),
range=ranges,
)
fig = corner(
cum_samples[:nsamp],
fig=fig,
labels=labels,
plot_datapoints=False,
plot_density=False,
plot_contours=False,
fill_contours=False,
no_fill_contours=True,
color="k",
bins=500,
smooth1d=10.0,
hist_kwargs=dict(lw=1),
range=ranges,
)
# Fix the axis limits
ax = np.array(fig.axes).reshape(5, 5)
for k in range(5):
axis = ax[k, k]
ymax = np.max([line._y.max() for line in axis.lines])
axis.set_ylim(0, 1.1 * ymax)
for axis in ax[:, k]:
axis.set_xlim(*span[k])
for axis in ax[k, :k]:
axis.set_ylim(*span[k])
# We're done
fig.savefig(
os.path.join(path, "calibration_corner.png"),
bbox_inches="tight",
dpi=300,
)
# Get the inclination posterior means and covariances (if present)
inc_files = glob.glob(os.path.join(path, "*", "inclinations.npz"))
if len(inc_files):
data_files = [
file.replace("inclinations", "data") for file in inc_files
]
x = np.linspace(-90, 90, 1000)
deltas = []
# Compute the "posterior error" histogram
for k in tqdm(
range(len(inc_files)), disable=bool(int(os.getenv("NOTQDM", "0")))
):
data = np.load(data_files[k])
results = np.load(inc_files[k])
truths = data["incs"]
inc = results["inc"]
lp = results["lp"]
nlc = len(truths)
nsamples = lp.shape[1]
for n in range(nlc):
for j in range(nsamples):
pdf = np.exp(lp[n, j] - np.max(lp[n, j]))
pdf /= np.trapz(pdf)
mean = np.trapz(inc * pdf)
var = np.trapz(inc ** 2 * pdf) - mean ** 2
deltas.append((mean - truths[n]) / np.sqrt(var))
# Plot it
fig, ax = plt.subplots(1, figsize=(6, 3))
ax.hist(deltas, bins=50, range=(-5, 5), density=True, label="measured")
ax.hist(
Normal.rvs(size=len(deltas)),
bins=50,
range=(-5, 5),
histtype="step",
color="C1",
density=True,
label=r"$\mathcal{N}(0, 1)$",
)
ax.legend(loc="upper right", fontsize=10)
ax.set_xlabel("inclination posterior error")
ax.set_ylabel("density")
fig.savefig(
os.path.join(path, "inclinations.pdf"), bbox_inches="tight"
)
|
"""
Copyright (c) 2016 Keitaro AB
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .companies import Companies
from .contacts import Contacts
from .deals import Deals
from .engagements import Engagements
class HubSpotClient(object):
def __init__(self, api_key, api_url=None, portal_id=None, pipeline=None):
# Register contacts API client
self.contacts = Contacts(api_key, api_url, portal_id, pipeline)
# Register Companies API client
self.companies = Companies(api_key, api_url, portal_id, pipeline)
# Register Deals API client
self.deals = Deals(api_key, api_url, portal_id, pipeline)
self.engagements = Engagements(api_key, api_url, portal_id, pipeline)
self.portal_id = portal_id
self.pipeline = self.deals.get_app_pipeline() |
#imports
from flask import Flask, request, session, redirect, url_for, abort, render_template, flash
from models import Blog, User
from werkzeug.security import generate_password_hash, check_password_hash
from google.appengine.ext import db
import cgi
#configuration
DEBUG = True
app = Flask(__name__)
app.secret_key = 'J\x89\x04\x7f\xc7\x13<\x02\xf7\xb1O\x80\xebdQ\x85o5\x18<;J\x1b\x9a'
#app.username = 'des'
#app.password = 'enter'
@app.route('/')
def show_entries():
entries = db.GqlQuery("SELECT * FROM Blog")
return render_template('show_entries.html',entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
blog = Blog()
blog.title = cgi.escape(request.form['title'])
blog.text = cgi.escape(request.form['text'])
blog.put()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
uname = request.form['username']
passwd = request.form['password']
user = db.GqlQuery("SELECT * FROM User LIMIT 1").get()
if user is None:
error = "Invalid username"
elif check_password(user.password,passwd):
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html',error=error)
@app.route('/register', methods=['GET', 'POST'])
def register():
error = None
if request.method == 'POST':
uname =request.form['username']
passwd = request.form['password']
spasswd = request.form['spassword']
usercheck = db.GqlQuery("SELECT * FROM User WHERE username = :username", username=uname).get()
if len(uname.strip()) == 0 :
error = "Username field cannot be empty"
elif usercheck is not None:
error = "Username is already in use. Choose another"
elif len(passwd) == 0 | len(spasswd) == 0:
error = 'Password field cannot be empty'
elif spasswd != passwd:
error = 'Passwords do not match'
else :
u = User()
u.username = uname
u.password = get_hash(passwd)
u.put()
flash("Registration was successful")
return redirect(url_for("login"))
return render_template('register.html',error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
#if __name__ == '__main__':
# app.run(debug=True)
def get_hash(password):
return generate_password_hash(password)
def check_password(hash, password):
return check_password_hash(hash,password)
|
'''
InEfficient Implementation of Python Decorator Hacker Rank problem :
https://www.hackerrank.com/challenges/standardize-mobile-number-using-decorators/problem
'''
def wrapper(f):
def fun(l):
# complete the function
number = ''
length = 0
num_list = []
for number in l:
length = len(number)
if (length == 10):
format_num = "+91" + " " +number[0:5] + " " + number[5:10]
#print(format_num)
num_list.append(format_num)
else:
if(length == 13):
format_num = "+91" + " " +number[3:8] + " " + number[8:13]
num_list.append(format_num)
if(length == 11):
format_num = "+91" + " " +number[1:6] + " " + number[6:11]
num_list.append(format_num)
if(length == 12):
format_num = "+91" + " " +number[2:7] + " " + number[7:12]
num_list.append(format_num)
#f = num_list
return(f(num_list))
return fun
#fun = wrapper(fun)
@wrapper
def sort_phone(l):
print(*sorted(l), sep='\n')
if __name__ == '__main__':
l = [input() for _ in range(int(input()))]
sort_phone(l)
|
import os
import json
import xmlrpclib
import SentimentAnalyzer as sentiment
from xml.dom.minidom import parseString
allComponents = []
allComponents.append('name')
allComponents.append('firstname')
allComponents.append('surname')
allComponents.append('email')
allComponents.append('address')
allComponents.append('phone')
reccomended_WorkExp = 4
eduLevels = {}
eduLevels['hs'] = 1
eduLevels['college'] = 2
eduLevels['masters'] = 3
eduLevels['doctorate'] = 4
def getResumeJSON(inputPath):
home = os.getcwd()
if isWindows():
outfile = os.path.join(home + '\static\ResumeJSON\out.json')
temp = inputPath.rfind("uploads")
filename = inputPath[temp+7:]
print filename
inputPath = os.path.join(home + '\static\uploads\\' + filename)
command = 'powershell ' + home + '\..\..\ResumeParser\ResumeTransducer\createJSON_Win.ps1 ' + inputPath + ' ' + outfile
else:
inputPath = home + '/' + inputPath
outfile = os.path.join(home + '/static/ResumeJSON/out.json')
command = 'sh ' + home + '/../../ResumeParser/ResumeTransducer/CreateJSON.sh ' + inputPath + ' ' + outfile
os.system(command)
with open(outfile) as data_file:
data = json.load(data_file)
missing, contribution = getMissingComponents(data)
countToModify = getWorkExpCount(data)
allSentiments = getSentiments(data)
allSkills = getKeySkills(data)
eduLevel = getEducationLevel(data)
writeAnalysis(missingComps=missing,contribution=contribution,workExp=countToModify, sentiments=allSentiments, skills=allSkills,education=eduLevel)
return outfile
def getMissingComponents(jsonInput):
print jsonInput
missingComponents = []
contribution = 0
rawJson = str(jsonInput).lower()
for component in allComponents:
if component in rawJson:
print 'Found ' + component
else:
print 'Could not find ' + component
missingComponents.append(component)
contribution -= 1
return (missingComponents, contribution)
def getKeySkills(jsonInput):
allSkills = []
if 'skills' in jsonInput:
for item in jsonInput['skills']:
for k,v in item.iteritems():
skillList = v.strip()
for skill in skillList.split(','):
allSkills.append(skill)
print 'SKILL: ' + skill
return allSkills
def getSentiments(jsonInput):
allSentences = []
alltext = ''
for item in jsonInput['work_experience']:
for entry in item:
if 'text' in entry:
allSentences.append(item[entry].encode('utf-8').split('.'))
alltext += item[entry].encode('utf-8')
outFile = os.path.join('ResumeText.txt')
open(outFile, 'w').close()
with open (outFile, 'w') as write:
write.write(alltext)
write.close()
sentiment.getSentiments(alltext)
sent = sentiment.sentiments
return sent
def getWorkExpCount(jsonInput):
workExperienceCount = len(jsonInput['work_experience'])
difference = reccomended_WorkExp - workExperienceCount
print difference
if difference < 0:
print 'too many entries'
else:
print 'too few entries'
return difference
def typoCheck(jsonInput):
return
def getEducationLevel(jsonInput):
level = 0
for item in jsonInput['education_and_training']:
for k,v in item.iteritems():
if (' hs ' in v.lower() or 'highscool' in v.lower()) and level <= 0:
level = eduLevels['hs']
if (' bs ' in v.lower() or 'bachelor' in v.lower()) and level <= 1:
level = eduLevels['college']
if (' ms ' in v.lower() or 'master' in v.lower()) and level <= 2:
level = eduLevels['masters']
if (' phd ' in v.lower() or 'doctorate' in v.lower()) and level <= 3:
level = eduLevels['doctorate']
return level
def getLicenses():
return
def writeAnalysis(missingComps, contribution, workExp, sentiments, skills,education):
home = os.getcwd()
home += '/static/ResumeJSON/'
outFile = home + 'ResumeAnalysis.txt'
open(outFile, 'w').close()
with open (outFile, 'w') as write:
for missing in missingComps:
write.write('missing:' + missing + '\n')
write.write('missing_points:' + str(contribution) + '\n')
write.write(sentiments + '\n')
# for sentiment in sentiments:
# write.write('sentiment:' + sentiment + '\n')
for skill in skills:
write.write('skill:' + skill.strip() + '\n')
write.write('work_experience:' + str(workExp) + '\n')
write.write('education_level:'+ str(education) + '\n')
write.close()
return
def isWindows():
name = os.name
if 'nt' in name:
return True
else:
return False
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
from scrapy.exceptions import DropItem
class FilterWordsPipeline(object):
"""A pipeline for filtering out items which contain certain words in their
description"""
words_to_filter = ['company', 'Date']
def process_item(self, item, spider):
for word in self.words_to_filter:
return item |
#!/usr/bin/env python
#Written by PJ
import string
import flask
from flask import request, flash, jsonify, Flask, redirect, current_app
import requests
import json
from functools import wraps
from flask.ext.restful import Resource, Api
# import relevance
import pandas as pd
import numpy as np
import re
import math
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
import html2text
OVERVIEW = True
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
application = flask.Flask(__name__)
app = application
app.secret_key = 'some_secret'
app.debug = True
api = Api(app)
#api.decorators=[cors.crossdomain(origin='*')]
api_key = 'ndcq6rwvpenbagu7p9rkxpw6'
#user_key = '41bf8e5a3861db3fd954d9b31ca64e36'
numResults = 50
relevance_threshhold = 0
#returned_json = {}
idfs_ov = {}
idfs_tag = {}
fn = 'data/master_all.csv'
data = pd.read_csv(fn,error_bad_lines=False)
data = data.applymap(lambda x: np.nan if isinstance(x, basestring) and x.isspace() else x)
data = data.fillna('a')
def support_jsonp(f):
"""Wraps JSONified output for JSONP"""
@wraps(f)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
content = str(callback) + '(' + str(f(*args,**kwargs).data) + ')'
return current_app.response_class(content, mimetype='application/javascript')
else:
return f(*args, **kwargs)
return decorated_function
class SearchAPI(Resource):
def post(self):
restQuery = request.form['restQuery']
#print restQuery
response= searchAPI(restQuery)
return response
#api.add_resource(TodoSimple, '/<string:todo_id>')
#api.add_resource(SearchAPI, '/api')
api.add_resource(SearchAPI, '/searchAPI')
@app.route('/searchAPI', methods=['POST','GET'])
@crossdomain(origin='*')
@support_jsonp
def searchAPI(restQuery):
searchTerm = restQuery
if 'category_code' in request.form:
category_code = request.form['category_code']
else:
category_code = 'all'
#Generate A JSON Object of Most Relevant Result
companiesList = getCompaniesList(searchTerm, numResults, category_code)
global idfs_ov
global idfs_tag
# if OVERVIEW:
# idfs = find_idfs_overview(companiesList)
# else:
# idfs = find_idfs(companiesList)
idfs_ov = find_idfs_overview(companiesList)
idfs_tag = find_idfs(companiesList)
# print idfs
#results = getResults(companiesList)
sortedResults = getListSortedByRelevance(companiesList,searchTerm)
return json.dumps(sortedResults, ensure_ascii=False)
def getCategory_codes():
categories = data_all_cleansed.groupby('category_code').groups.keys()
return categories
@app.route('/', methods=['GET'])
def home():
cc = getCategory_codes()
return flask.render_template(
'home.html',list=cc)
@app.route('/home', methods=['GET'])
def stuff():
return home()
@app.route('/search', methods=['POST','GET'])
def search():
if 'searchTerm' in request.form:
print "rest API"
searchTerm = request.form['searchTerm']
else:
searchTerm = restQuery
if 'category_code' in request.form:
category_code = request.form['category_code']
else:
category_code = 'all'
#Generate A JSON Object of Most Relevant Result
companiesList = getCompaniesList(searchTerm, numResults, category_code)
results = getResults(companiesList)
sortedResults = getListSortedByRelevance(results,searchTerm)
return flask.render_template(
'results.html',searchTerm=searchTerm,results=results)
#return flask.jsonify(results)
#return json.dumps(sortedResults, ensure_ascii=False)
def getCompaniesList(query,num,category_code):
combined_results = []
for page in range(1,num/10):
searchAPI = 'http://api.crunchbase.com/v/1/search.js?query='+query+'&page='+str(page)+'&api_key='+api_key
r = requests.get(searchAPI)
if (r.status_code!= 200) or r.text is None:
continue
returned_json = json.loads(r.text, strict= False)
for resultNum in range(0,len(returned_json["results"])-1):
individual_company = {}
if "namespace" in returned_json["results"][resultNum] \
and returned_json["results"][resultNum]["namespace"] == "company":
company = returned_json["results"][resultNum]
# print "company:"
# print company
#print resultNum
individual_company["name"]=company["name"]
# if "overview" in company and company["overview"] is not None:
# individual_company["overview"] = company["overview"]
if "image" in company and company["image"] is not None:
if "available_sizes" in company["image"] and company["image"]["available_sizes"] is not None:
img = company["image"]["available_sizes"][0][1]
logoDimensions = company["image"]["available_sizes"][0][0]
individual_company["image"] = img
individual_company["logoHeight"] = logoDimensions[1]
individual_company["logoWidth"] = logoDimensions[0]
else:
individual_company["image"] = None
individual_company["logoHeight"] = None
individual_company["logoWidth"] = None
else:
individual_company["image"] = None
individual_company["logoHeight"] = None
individual_company["logoWidth"] = None
#print "individual_company:"
#print individual_company
'''
if company["image"]["available_sizes"] is not None:
individual_company["logoHeight"] = company["image"]["available_sizes"][0][0][0]
else:
individual_company["logoHeight"] = None
'''
if "description" in company and company["description"] is not None:
individual_company["description"] = company["description"]
else:
individual_company["description"] = None
'''
if "logoWidth" in company and company["logoWidth"] is not None:
individual_company["logoWidth"] = company["image"]["available_sizes"][0][0][1]
else:
individual_company["logoWidth"] = None
'''
'''
if "founded_year" in company and company["founded_year"] is not None:
individual_company["yearFounded"] = company["founded_year"]
else:
individual_company["yearFounded"] = None
'''
'''
if "total_money_raised" in company and company["total_money_raised"] is not None:
individual_company["totalFunding"] = company["total_money_raised"]
else:
individual_company["totalFunding"] = None
'''
'''
if "ipo" in company and company["ipo"] is not None:
individual_company["status"] = company["ipo"]
else:
individual_company["status"] = None
'''
if "offices" in company and company["offices"] is not None:
tempArr = company["offices"]
if len(tempArr) > 0:
individual_company["country"] = company["offices"][0]["country_code"]
individual_company["state"] = company["offices"][0]["state_code"]
else:
individual_company["country"] = None
individual_company["state"] = None
else:
individual_company["country"] = None
individual_company["state"] = None
'''
if "offices" in company and company["offices"][0]["state_code"] is not None:
individual_company["state"] = company["offices"][0]["state_code"]
else:
individual_company["state"] = None
'''
if "permalink" in company and company["permalink"] is not None:
individual_company["permalink"] = company["permalink"]
else:
individual_company["permalink"] = None
individual_company["tag_list"] = getTagListFromPandas(individual_company["name"])
individual_company["overview"] = getOverviewFromPandas(individual_company["name"])
combined_results.append(individual_company)
return combined_results
fn_rel = 'data/relevant_master_all.csv'
data_rel = pd.read_csv(fn_rel, error_bad_lines=False, header=0)
d = data_rel.applymap(lambda x: np.nan if isinstance(x, basestring) and x.isspace() else x)
data_all_cleansed = d.fillna('a')
DATA_PATH = './'
def load_lines(path):
with open("%s/%s" % (DATA_PATH, path)) as f:
return [line.rstrip('\r\n') for line in f]
STOPWORDS_PATH = "stopwords.txt"
stopwords = set(load_lines(STOPWORDS_PATH))
split_regex = r'\W+'
def simple_tokenize(string):
return [t for t in re.split(split_regex, string.lower()) if len(t)]
# print simple_tokenize(
# print simple_tokenize(test_query)
def tokenize(string):
return [t for t in simple_tokenize(string) if t not in stopwords]
# print tokenize(tl_string)
# print tokenize(test_query)
def tf(tokens):
counts = {}
for t in tokens:
counts.setdefault(t, 0.0)
counts[t] += 1
return { t: counts[t] / len(tokens) for t in counts }
def find_idfs(companiesList):
counts = {}
for companyDict in companiesList:
seen = set()
tl = companyDict['tag_list']
tl_string = ' '.join(map(str, tl))
tokenized_tl = tokenize(tl_string)
for t in tokenized_tl:
if t not in seen:
counts.setdefault(t, 0.0)
counts[t] += 1
seen.add(t)
return { t: float(len(companiesList)) / counts[t] for t in counts }
def find_idfs_name(companyName):
seen = set()
counts = {}
tokenized_name = tokenize(companyName)
for t in tokenized_name:
if t not in seen:
counts.setdefault(t, 0.0)
counts[t] += 1
seen.add(t)
return { t: float(len(tokenized_name)) / counts[t] for t in counts }
def find_idfs_overview(companiesList):
counts = {}
for companyDict in companiesList:
seen = set()
tl = companyDict['overview']
# print tl
tl_string = ' '.join(map(str, tl))
tokenized_tl = tokenize(tl_string)
# print tokenized_ tl
for t in tokenized_tl:
if t not in seen:
counts.setdefault(t, 0.0)
counts[t] += 1
seen.add(t)
return { t: float(len(companiesList)) / counts[t] for t in counts }
def tfidf(tokens, idfs):
tfs = tf(tokens)
# print tfs
return { t: tfs[t] * idfs[t] if t in tfs and t in idfs else 0 for t in tfs }
# test_weights = tfidf(tokenize(test_query), idfs)
# print test_weights
def dotprod(a, b):
return sum([a[t] * b[t] for t in a if t in b])
def norm(a):
return math.sqrt(dotprod(a, a))
def cossim(a, b):
if norm(a) != 0 and norm(b) != 0:
return dotprod(a, b) / norm(a) / norm(b)
else:
return 0
def cosine_similarity(string1, string2, idfs):
w1 = tfidf(tokenize(string1), idfs)
w2 = tfidf(tokenize(string2), idfs)
return cossim(w1, w2)
def getRelevance(companiesList,searchQuery):
relevances = {}
for company in companiesList:
company_name = company['name']
company_name_string = str(company_name)
company_tags = company['tag_list']
company_tags_string = ' '.join(map(str, company_tags))
company_ov = company['overview']
company_ov_string = ' '.join(map(str, company_ov))
global idfs_ov
global idfs_tag
# print company_tags_string
#relevances[company_name] = 100.0 * cosine_similarity(searchQuery, company_tags_string, idfs)
#return relevances
rel_by_tag = 100.0 * cosine_similarity(searchQuery, company_tags_string, idfs_tag)
rel_by_ov = 100.0 * cosine_similarity(searchQuery, company_ov_string, idfs_ov)
idfs_name = find_idfs_name(company_name)
any_in = [i for i in tokenize(searchQuery) if i in tokenize(company_name_string)]
if any_in:
rel_by_name = len(tokenize(company_name_string)) / 2.0 + 20.0 * cosine_similarity(searchQuery, company_name_string, idfs_name)
else:
rel_by_name = 0.0
company["relevanceByTag"] = rel_by_tag
company["relevanceByOverview"] = rel_by_ov
if rel_by_ov != 0 and rel_by_tag != 0:
company['relevance'] = 0.8 * rel_by_ov + 0.2 * rel_by_tag
elif rel_by_tag == 0 and rel_by_ov == 0:
company['relevance'] = 0.0
else:
company['relevance'] = max(rel_by_ov, rel_by_tag)
company['relevance'] += rel_by_name
newList = sorted(companiesList, key=lambda k: k['relevance'], reverse=True)
print '--------------------------------------------SPITTING OUT COMPANY RELEVANCES ---------------------------------------'
with open("scores.txt", "a") as f:
for i, l in enumerate(newList):
print '--------------------NEXT COMPANY -------------------------'
print 'Company Number ', i
print 'Company Name: ' + l['name']
print 'Overview Relevance ', l['relevanceByOverview']
print 'Tag Relevance ', l['relevanceByTag']
# print 'Description: ' + str(l['description'])
print 'Tag List ' + str(l['tag_list'])
print 'Overview ' + str(l['overview'])
print 'Total Relevance ', l['relevance']
if l['relevanceByTag'] != 0 or l['relevanceByOverview'] != 0:
f.write(str(l['relevanceByTag']) + ', ' + str(l['relevanceByOverview']) + ', ' + str(l['relevance']) + '\n')
return newList
def getRelevanceByOverview(companiesList,searchQuery):
relevances = {}
for company in companiesList:
company_name = company['name']
company_ov = company['overview']
company_ov_string = ' '.join(map(str, company_ov))
global idfs
# print company_tags_string
#relevances[company_name] = 100.0 * cosine_similarity(searchQuery, company_tags_string, idfs)
#return relevances
company["relevanceByOverview"] = 100.0 * cosine_similarity(searchQuery, company_ov_string, idfs)
newList = sorted(companiesList, key=lambda k: k['relevanceByOverview'], reverse=True)
for l in newList:
print l
return newList
def getTagListFromPandas(companyName):
locate_row = data_all_cleansed[data_all_cleansed['name'] == companyName]
tl = locate_row['tag_list'].tolist()
tl_string = ' '.join(map(str, tl))
return [str(tl_string)]
def getOverviewFromPandas(companyName):
locate_row = data_all_cleansed[data_all_cleansed['name'] == companyName]
ov = locate_row['overview'].tolist()
ov_string = ' '.join(map(str, ov))
# ov = unicode(ov, errors='ignore')
# print ov_string
ov_string = unicode(ov_string,errors='ignore')
# return 'happy sad'
return [str(html2text.html2text(ov_string))]
def getListSortedByRelevance(companiesList,searchQuery):
# if OVERVIEW:
# return getRelevanceByOverview(companiesList, searchQuery)
# else:
# return getRelevance(companiesList, searchQuery)
return getRelevance(companiesList, searchQuery)
def getResults(companiesList):
i = 1
for companyDict in companiesList:
companyDict['founded_year'] = 1950+ 10*i
i = i+1
#companyDict["image1"] = data[ data["name"] == companyDict["name"]]["image1"]
#print companyDict["image1"]
return companiesList
def error():
print "OH SHI ERROR"
@app.errorhandler(404)
def page_not_found(e):
return flask.render_template('404.html'), 404
def getTEST(company):
companyAPI = 'http://api.crunchbase.com/v/1/company/'+company+'.js?api_key='+api_key
r = requests.get(companyAPI)
if (r.status_code!= 200) or r.text is None:
error()
returned_json = json.loads(r.text, strict= False)
if __name__ == "__main__":
app.run('0.0.0.0')
#app.run(port=int(63046))
#app.run(port=int(environ['FLASK_PORT']))
#app.run()
|
@pytest.fixture
def ff_pair():
ff0 = Forcefield.load_from_file(DEFAULT_FF)
ff1 = Forcefield.load_from_file(DEFAULT_FF)
# Modify the charge parameters for ff1
ff1.q_handle.params += 1.0
return ff0, ff1
def test_get_solvent_phase_system_parameter_changes(ff_pair):
ff0, ff1 = ff_pair
mol = fetch_freesolv()[0]
ubps, params, m, coords, box = enhanced.get_solvent_phase_system_parameter_changes(mol, ff0=ff0, ff1=ff1)
U_ff = construct_differentiable_interface_fast(ubps, params)
ubps0, params0, m0, coords0, box0 = enhanced.get_solvent_phase_system(mol, ff0, minimize_energy=False)
U0 = construct_differentiable_interface_fast(ubps0, params0)
ubps1, params1, m1, coords1, box1 = enhanced.get_solvent_phase_system(mol, ff1, minimize_energy=False)
U1 = construct_differentiable_interface_fast(ubps1, params1)
u_ff0 = U_ff(coords, params, box, lam=0)
u_ff1 = U_ff(coords, params, box, lam=1)
u0 = U0(coords0, params0, box0, lam=0)
u1 = U1(coords1, params1, box1, lam=0)
# Check that the endstate energies are consistent
assert pytest.approx(u_ff0) == u0
assert pytest.approx(u_ff1) == u1
# Check that the masses are consistent
assert pytest.approx(m) == m0
assert pytest.approx(m) == m1
# Check that the box is consistent
assert pytest.approx(box) == box0
assert pytest.approx(box) == box1
# Check that the coords is consistent
assert pytest.approx(coords) == coords0
assert pytest.approx(coords) == coords1
def test_get_vacuum_phase_system_parameter_changes(ff_pair):
ff0, ff1 = ff_pair
get_system_fxn = functools.partial(enhanced.get_vacuum_phase_system_parameter_changes, ff0=ff0, ff1=ff1)
mol = fetch_freesolv()[0]
ubps, params, m, coords = get_system_fxn(mol)
U_ff = construct_differentiable_interface_fast(ubps, params)
U0 = enhanced.VacuumState(mol, ff0).U_full
U1 = enhanced.VacuumState(mol, ff1).U_full
box = np.eye(3) * 1000.0
u_ff0 = U_ff(coords, params, box, lam=0)
u_ff1 = U_ff(coords, params, box, lam=1)
u0 = U0(coords)
u1 = U1(coords)
# Check that the endstate energies are consistent
assert pytest.approx(u_ff0) == u0
assert pytest.approx(u_ff1) == u1
# Check that the masses are consistent
assert pytest.approx(m) == utils.get_mol_masses(mol)
# Check that the coords is consistent
assert pytest.approx(coords) == utils.get_romol_conf(mol)
|
# import time
#Insertion Sort
import random
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# start_time=time.time()
tim=[]
bst=[]
wrt=[]
avg=[]
for n in range(10,101,10):
tim.append(n)
avgarr=[random.randint(1,100) for i in range(n)]
bstarr=sorted(avgarr)
wrtarr=sorted(avgarr)[::-1]
# ========================= AVERAGE CASE ==================== #
c=0
for i in range(1,n):
c+=1
key=avgarr[i]
c+=1
j=i-1
c+=1
while j>=0 and key<avgarr[j]:
c+=1
avgarr[j+1]=avgarr[j]
c+=1
j-=1
c+=1
c+=1 #checking condition
avgarr[j+1]=key
c+=1
c+=1 #checking condition
avg.append(c)
# ========================= BEST CASE ==================== #
c=0
for i in range(1,n):
c+=1
key=bstarr[i]
c+=1
j=i-1
c+=1
while j>=0 and key<bstarr[j]:
c+=1
bstarr[j+1]=bstarr[j]
c+=1
j-=1
c+=1
c+=1 #checking condition
bstarr[j+1]=key
c+=1
c+=1 #checking condition
bst.append(c)
# ======================== WORST CASE ===================== #
c=0
for i in range(1,n):
c+=1
key=wrtarr[i]
c+=1
j=i-1
c+=1
while j>=0 and key<wrtarr[j]:
c+=1
wrtarr[j+1]=wrtarr[j]
c+=1
j-=1
c+=1
c+=1 #checking condition
wrtarr[j+1]=key
c+=1
c+=1 #checking condition
wrt.append(c)
# print(time.time()-start_time)
# ================== PLOTTING GRAPHS ================ #
plt.scatter(tim,wrt)
plt.plot(tim,wrt, label="worst time")
plt.scatter(tim,bst)
plt.plot(tim,bst, label="best time")
plt.scatter(tim,avg)
plt.plot(tim,avg, label="average time")
# ====================== LEGEND ====================== #
plt.legend()
# ============= printing cordinates too ============== #
# for i in range(len(tim)):
# plt.text(tim[i],avg[i],str(tim[i])+","+str(avg[i]))
# for i in range(len(tim)):
# plt.text(tim[i],bst[i],str(tim[i])+","+str(bst[i]))
# for i in range(len(tim)):
# plt.text(tim[i],wrt[i],str(tim[i])+","+str(wrt[i]))
plt.xlabel('test cases')
plt.ylabel('number of steps')
plt.title('insertion sort time complexity')
plt.savefig('ins_sort.png')
|
# coding=utf-
import logging
import pika
from pymongo import MongoClient
import time
import datetime
from settings import REQUESTS_QUEUE, MONGO_COLLECTION, MONGO_DB
def callback(ch, method, properties, body):
mongodb = MongoClient('mongodb', 27017)
db = mongodb[MONGO_DB]
collection = db[MONGO_COLLECTION]
collection.insert_one({
'resource': repr(body),
'datetime': datetime.datetime.utcnow()
})
logging.log(logging.DEBUG, " [x] Received %r" % body)
mongodb.close()
if __name__ == '__main__':
wait = True
while wait:
try:
connection = pika.BlockingConnection(
pika.ConnectionParameters('rabbitmq'))
wait = False
except Exception as exc:
time.sleep(1)
logging.log(logging.ERROR, str(exc))
channel = connection.channel()
channel.queue_declare(queue=REQUESTS_QUEUE)
channel.basic_consume(
queue=REQUESTS_QUEUE,
on_message_callback=callback,
auto_ack=True)
logging.log(logging.DEBUG,
' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
|
import os
import config
from base import *
class RapidXML(Base):
def __init__(self):
self.name = "rapidxml"
self.version = "1.13"
self.compilers = [config.COMPILER_MAC_GCC, config.COMPILER_MAC_CLANG, config.COMPILER_UNIX_GCC]
self.arch = [config.ARCH_M32, config.ARCH_M64]
self.dependencies = []
def download(self):
rb_download_and_extract(self,
"https://sourceforge.net/projects/rapidxml/files/rapidxml/rapidxml%20" +self.version +"/rapidxml-" +self.version +".zip/download",
"rapidxml-" +self.version +".zip",
"rapidxml-" +self.version)
def build(self):
# header based library
return True
def is_build(self):
return rb_install_include_file_exists("rapidxml.hpp")
def deploy(self):
rb_deploy_header(rb_download_get_file(self, "rapidxml.hpp"))
rb_deploy_header(rb_download_get_file(self, "rapidxml_iterators.hpp"))
rb_deploy_header(rb_download_get_file(self, "rapidxml_print.hpp"))
rb_deploy_header(rb_download_get_file(self, "rapidxml_utils.hpp"))
|
class Solution:
def stringMatching(self, words: List[str]) -> List[str]:
words.sort(key=len)
res = []
lps = [self._compute_lps(w) for w in words[:-1]]
for i in range(len(words)):
for j in range(i + 1, len(words)):
if self._kmp(words[i], words[j], lps[i]):
res.append(words[i])
break
return res
def _kmp(self, w, s, lps):
i, j = 0, 0
c = 0
while i < len(s):
if s[i] == w[j]:
i += 1
j += 1
if j == len(w):
return True
else:
if j != 0:
j = lps[j-1]
else:
i += 1
return False
def _compute_lps(self, w):
n = len(w)
lps = [0] * n
i = 1
p = 0
while i < n:
if w[i] == w[p]:
p += 1
lps[i] = p
i += 1
else:
if p != 0:
p = lps[p-1]
else:
p = 0
i += 1
return lps
|
from flask import Flask, render_template, flash, request, redirect, url_for
from flask_cors import CORS
from flask_wtf import FlaskForm
from flask_bootstrap import Bootstrap
from flask_wtf.recaptcha import RecaptchaField, Recaptcha
from flask_wtf.csrf import CSRFProtect
from wtforms import TextField, TextAreaField, StringField, SubmitField, SelectField, IntegerField, FieldList, FormField, FloatField
from wtforms.validators import DataRequired, Length, NumberRange
from wtforms import validators
import pandas as pd
from math import ceil
import json
from MovieAlgo.movie_info import MovieInfo, get_movie_name_year
from MovieAlgo.knn import KNN
'''
CS 598 - Practical Statistical Learning Project 3
UIUC, Fall 2020
@authors:
- Pranav Velamakanni (pranavv2@illinois.edu)
- Tarik Koric (koric1@illinois.edu)
'''
# App config.
app = Flask(__name__)
CORS(app)
CSRFProtect(app)
Bootstrap(app)
app.config.from_object(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
# Load metadata
with open('data/movie_data.json') as f:
movie_names_id = json.loads(f.read())
# Pre-load model and data for System 2
model = KNN()
class MovieRecommendation1(FlaskForm):
genre_coices = sorted(['Animation',
'Romance',
'Action',
'Crime',
'Film-Noir',
'War',
'Fantasy',
'Drama',
'Musical',
'Comedy',
'Mystery',
'Documentary',
'Thriller',
'Western',
'Adventure',
'Horror',
"Children's",
'Sci-Fi'])
algo_choice = ['Number of reviews', 'Mean rating', 'Weighted rating']
genre = SelectField('Genre', choices = genre_coices, description = "Pick a genre to receive recommendations for.", validators = [DataRequired()])
algo = SelectField('Rated by', choices = algo_choice, description = "Pick a rating algorithm.", validators = [DataRequired()])
number = IntegerField('Recommendations', default = 5, description = "Number of recommendations, limited to 20.", validators = [DataRequired(), NumberRange(min=1, max=20, message = 'Must be within 1 and 20, default is 5.')])
submit = SubmitField()
@app.route('/', methods = ('GET', 'POST'))
def index():
return render_template('index.html')
@app.errorhandler(404)
def page_not_found(error):
return render_template('confused.html'), 404
@app.route('/system1', methods = ('GET', 'POST'))
def system1():
'''
Serves the /system1 end point. Redirects to /results for POST.
'''
form = MovieRecommendation1()
if request.method == 'POST':
if form.validate_on_submit():
return redirect(url_for('results1', genre = request.form.get('genre'), algo = request.form.get('algo'), number = request.form.get('number')))
return render_template('system1.html', form = form)
@app.route('/results/<genre>/<algo>/<number>', methods = ['GET'])
def results1(genre, algo, number):
'''
Processes requests for System 1.
NOTE: This end point is not intended to be used like an API to avoid maxing out resource limits.
'''
#if not request.referrer:
# return render_template('confused.html'), 401
#####
## Movie analysis - System 1
#####
algo_map = {'Number of reviews': 'size', 'Mean rating': 'mean_ratings', 'Weighted rating': 'weighted_rating'}
data = pd.read_csv('data/final_ratings.csv')
results = data[(data['genre 1'] == genre) | (data['genre 2'] == genre) | (data['genre 3'] == genre) | (data['genre 4'] == genre) | (data['genre 5'] == genre) | (data['genre 6'] == genre)].sort_values(by=[algo_map.get(algo)], ascending=False).head(int(number))
movie_year_list = [*map(get_movie_name_year, list(results.movie))]
MovieStats = MovieInfo()
final_data = MovieStats.prepare_movie_list(movie_year_list)
return render_template('success.html', data = final_data)
class MovieForm(FlaskForm):
movie_name = SelectField(
'Movie',
description = 'Pick a movie to rate',
choices = sorted(movie_names_id.keys()),
validators = [DataRequired()]
)
rating = FloatField(
'Rating',
description = 'Rate the movie from 1.0 to 5.0',
validators=[NumberRange(min = 1.0, max = 5.0, message='Value must be between 1 and 5')]
)
class System2(FlaskForm):
'''
System 2 parent form.
'''
movies = FieldList(
FormField(MovieForm),
min_entries=1,
max_entries=10
)
number = IntegerField(
'Recommendations',
default = 5,
description = "Number of recommendations to display, limited to 20.",
validators = [NumberRange(min=1, max=20, message = 'Must be within 1 and 20, default is 5.')]
)
class RequestFormResponse2:
def __init__(self, form):
self.raw_data = form
self.recommendations = int(form['number'])
self.form_data = self.prepare_data(form)
def prepare_data(self, data):
final_data = list()
for i in range(ceil((len(data) - 3) / 2)):
temp = dict()
temp['movie_name'] = data['movies-{}-movie_name'.format(i)]
temp['movie_id'] = movie_names_id.get(temp['movie_name'])
temp['rating'] = data['movies-{}-rating'.format(i)]
final_data.append(temp)
return final_data
@app.route('/system2', methods=['POST', 'GET'])
def system2():
form = System2()
template_form = MovieForm(prefix='movies-_-')
if request.method == 'POST':
if form.validate_on_submit():
data = RequestFormResponse2(request.form)
results = model.predict(data.form_data)
movie_year_list = [*map(get_movie_name_year, results[:data.recommendations])]
MovieStats = MovieInfo()
final_data = MovieStats.prepare_movie_list(movie_year_list)
return render_template('success.html', data = final_data)
return render_template('system2.html', form=form, _template = template_form)
if __name__ == '__main__':
app.run(host = '0.0.0.0', threaded = True, port = 5000)
|
import os
import math
import csv
import datetime
from dateutil import rrule
import numpy as np
import pandas as pd
from numpy import random
from scipy.fftpack import fft, ifft
from federated import t_product
# 读取txt文件中的数据,处理成三元组列表形式[src,des,sec]
def TxtFileLoad(filepath):
DataLoad = []
max_id = -1
max_T = -1
file = open(filepath, "r")
lines = file.readlines()
for line in lines:
datalist = list(map(int, line.split(' ')))
large_id = max(datalist[0], datalist[1])
now_T = math.ceil(datalist[2] / (3600 * 24)) # 向上取整
if large_id > max_id:
max_id = large_id
if now_T > max_T:
max_T = now_T
DataLoad.append(datalist)
file.close()
return DataLoad, max_id + 1, max_T
# 读取csv文件中的数据,首先编码,再提取三元组和特征
def CsvFileLoad(filepath,user_num,user_code_file):
data = pd.read_csv(filepath, low_memory=False)
basic_info = np.array(data[['DESYNPUF_ID']])
user_info = StrEncodeNum(basic_info[:,0], user_code_file,user_num)
user_list = []
for user in user_info:
user_list.append(user[0])
data_cut = DataframeCut(user_list,data)
basic_info_cut = np.array(data_cut[['DESYNPUF_ID', 'CLM_FROM_DT',
'CLM_PMT_AMT', 'AT_PHYSN_NPI', 'OP_PHYSN_NPI',
'OT_PHYSN_NPI', 'ICD9_DGNS_CD_1', 'ICD9_DGNS_CD_2',
'ICD9_DGNS_CD_3', 'ICD9_DGNS_CD_4', 'ICD9_DGNS_CD_5']])
basic_info_cut[:,0] = StrEncodeCache(basic_info_cut[:,0]) # 对已选取用户临时编码
max_T,basic_info_cut[:,1],min_date_str = GetT_ByMonth(basic_info_cut[:,1]) # 计算最大相差月数
U_Num, F_Num = np.shape(basic_info_cut)
basic_info_cut[:,2:F_Num] = Normalize(basic_info_cut[:,2:F_Num]) # 归一化,适用于数值型
return basic_info_cut,max_T,min_date_str
# 根据相差月数获取时间间隔
def GetT_ByMonth(data):
data_no_nan = []
for i in range(len(data)):
if math.isnan(data[i]):
data[i] = 'nan'
else:
data_no_nan.append(int(data[i]))
data[i] = str(int(data[i]))
mx = str(max(data_no_nan))
mn = str(min(data_no_nan))
max_T = BtwMonth(mn,mx)
return max_T,data,mn
def BtwMonth(start_str,end_str):
v_year_end = int(end_str[0:4])
v_year_start = int(start_str[0:4])
v_month_end = int(end_str[4:6])
v_month_start = int(start_str[4:6])
v_day_end = int(end_str[6:8])
v_day_start = int(start_str[6:8])
interval = (v_year_end - v_year_start) * 12 + \
(v_month_end - v_month_start) + \
(v_day_end - v_day_start) * (1/30)
return math.ceil(interval)
# 根据用户名选取对应行的数据
def DataframeCut(username_list,df):
df_cut = df.loc[df['DESYNPUF_ID'].isin(username_list)]
return df_cut
# 保存编码文件以便解码
def CsvEncodeSave(DataDict, filepath):
datalist = []
for value in DataDict.values():
datalist.append(value)
head = ['Name', 'EncodeID', 'OccurTimes']
savadata = pd.DataFrame(columns=head, data=datalist)
savadata.to_csv(filepath, encoding='gbk')
encode_dict_sort = Array2Dict_Sort(datalist)
return encode_dict_sort
# 对矩阵每一列单独进行归一化 np.array
def Normalize(matrix):
U_Num, F_Num = np.shape(matrix)
for i in range(F_Num):
matrix[:, i],nan_cache,data_info = NormalPreDeal(matrix[:, i])
m = np.mean(data_info)
mx = max(data_info)
mn = min(data_info)
for j in range(U_Num):
if j not in nan_cache:
matrix[j, i] = ((float(matrix[j, i]) - mn) / (mx - mn))*10
return matrix
# 归一化的预处理,某一列,如果为字符串就不处理,如果有nan先设置为0
def NormalPreDeal(data):
nan_cache = [] # 保存nan的位置
data_info = [] # 保存数值
for i in range(len(data)):
if type(data[i]) == type('str'):
try:
data[i] = float(data[i])
data_info.append(data[i])
except:
if data[i].find('OTHER') != -1:
nan_cache.append(i)
else:
data[i] = float(data[i][1:])
data_info.append(data[i])
elif math.isnan(data[i]):
nan_cache.append(i)
data[i] = 'nan'
else:
data_info.append(data[i])
return data,nan_cache,data_info
# 已获取出现次数较多的用户,对这些用户重新临时编码
def StrEncodeCache(datalist):
data_cache = []
i = 0
for data in datalist:
if data not in data_cache:
data_cache.append(data)
datalist[i] = data_cache.index(data)
i += 1
return datalist
# 利用列表索引对字符串编码,对整个源文件编码,记录用户出现次数并保存
def StrEncodeNum(datalist,filename,usernum):
if os.path.exists(filename):
print(filename,'exist')
data = pd.read_csv(filename, low_memory=False)
encode_info = np.array(data[['Name', 'EncodeID', 'OccurTimes']])
encode_dict_sort = Array2Dict_Sort(encode_info)
else:
data_cache = []
encode_record = {}
i = 0
for data in datalist:
if data not in data_cache:
data_cache.append(data)
encode_record[data] = [data, data_cache.index(data), 1]
encode_record[data][2] += 1
datalist[i] = data_cache.index(data)
print(data, 'encode to:', datalist[i])
i += 1
encode_dict_sort = CsvEncodeSave(encode_record, filename)
if usernum <= len(encode_dict_sort):
encode_dict_sort_cut = encode_dict_sort[0:usernum]
else:
encode_dict_sort_cut = encode_dict_sort
return encode_dict_sort_cut
def DictCut(data,num):
i = 0
res = {}
for key in data.keys():
if i < num:
res[key] = data[key]
else:
break
return res
def Array2Dict_Sort(data):
encode_dict = {}
for item in data:
encode_dict[item[0]] = [item[1],item[2]]
sort_dict = sorted(encode_dict.items(),key=lambda code:code[1][1],reverse=True)
return sort_dict
# 创建低秩张量
def create_tensor(A, B):
[a3, a1, a2] = A.shape
[b3, b1, b2] = B.shape
A = fft(A, axis=0)
B = fft(B, axis=0)
C = np.zeros((b3, a1, b2), dtype=complex)
for i in range(b3):
C[i, :, :] = np.dot(A[i, :, :], B[i, :, :])
C = ifft(C, axis=0)
return C
# 创建csv低秩张量,根据特征填充张量,有日期才填充数据
def create_tensor2(LoadData,A,B,min_date_str):
C = create_tensor(A,B)
for data in LoadData:
user = data[0]
date_str = data[1]
if date_str != 'nan':
now_T = BtwMonth(min_date_str, date_str)
for index in range(2,len(data)):
if data[index] == 'nan' or data[index] == 'OTHER':
C[now_T-1,user,index] = 0
else:
C[now_T-1,user,index] = data[index]
return C
def product_tensor(A, B):
return t_product(A, B)
# 创建时段图
def create_graph_by_time(LoadData, NodeNum, T_Num):
graph = np.zeros((T_Num, NodeNum, NodeNum), dtype=complex)
for data in LoadData:
src, des, sec = data
now_T = math.ceil(sec / (3600 * 24)) # 向上取整
graph[now_T - 1, src, des] = 1
return graph
# 创建csv文件的时段图
def create_graph2_by_time(LoadData, NodeNum, T_Num,min_date_str):
graph = np.zeros((T_Num, NodeNum, NodeNum), dtype=complex)
for data in LoadData:
user = data[0]
date_str = data[1]
if date_str != 'nan':
now_T = BtwMonth(min_date_str,date_str)
graph[now_T - 1, user, user] = 1
return graph
# 创建邻接矩阵
def create_adjacent(nNode, k, densityRate, unique):
adjacent = np.zeros((k, nNode, nNode), dtype=complex)
if unique:
for i in range(k):
if i == 0:
for iNode in range(math.floor((nNode + 1) / 2)):
for jNode in range(iNode + 1, nNode):
if random.rand(1) < densityRate:
adjacent[i, iNode, jNode] = 1
adjacent[i, jNode, iNode] = 1
else:
adjacent[i, :, :] = adjacent[0, :, :]
else:
for i in range(k):
for iNode in range(int(math.floor((nNode + 1) / 2))):
for jNode in range(iNode + 1, nNode):
if random.rand(1) < densityRate:
adjacent[i, iNode, jNode] = 1
adjacent[i, jNode, iNode] = 1
return adjacent
|
# coding=utf-8
#
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Most of this work is copyright (C) 2013-2019 David R. MacIver
# (david@drmaciver.com), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER
from __future__ import absolute_import, division, print_function
import math
import hypothesis.internal.conjecture.floats as flt
import hypothesis.internal.conjecture.utils as d
from hypothesis.control import assume, reject
from hypothesis.internal.conjecture.utils import calc_label_from_name
from hypothesis.internal.floats import float_of
from hypothesis.strategies._internal.strategies import SearchStrategy
class WideRangeIntStrategy(SearchStrategy):
distribution = d.Sampler([4.0, 8.0, 1.0, 1.0, 0.5])
sizes = [8, 16, 32, 64, 128]
def __repr__(self):
return "WideRangeIntStrategy()"
def do_draw(self, data):
size = self.sizes[self.distribution.sample(data)]
r = data.draw_bits(size)
sign = r & 1
r >>= 1
if sign:
r = -r
return int(r)
class BoundedIntStrategy(SearchStrategy):
"""A strategy for providing integers in some interval with inclusive
endpoints."""
def __init__(self, start, end):
SearchStrategy.__init__(self)
self.start = start
self.end = end
def __repr__(self):
return "BoundedIntStrategy(%d, %d)" % (self.start, self.end)
def do_draw(self, data):
return d.integer_range(data, self.start, self.end)
NASTY_FLOATS = sorted(
[
0.0,
0.5,
1.1,
1.5,
1.9,
1.0 / 3,
10e6,
10e-6,
1.175494351e-38,
2.2250738585072014e-308,
1.7976931348623157e308,
3.402823466e38,
9007199254740992,
1 - 10e-6,
2 + 10e-6,
1.192092896e-07,
2.2204460492503131e-016,
]
+ [float("inf"), float("nan")] * 5,
key=flt.float_to_lex,
)
NASTY_FLOATS = list(map(float, NASTY_FLOATS))
NASTY_FLOATS.extend([-x for x in NASTY_FLOATS])
FLOAT_STRATEGY_DO_DRAW_LABEL = calc_label_from_name(
"getting another float in FloatStrategy"
)
class FloatStrategy(SearchStrategy):
"""Generic superclass for strategies which produce floats."""
def __init__(self, allow_infinity, allow_nan, width):
SearchStrategy.__init__(self)
assert isinstance(allow_infinity, bool)
assert isinstance(allow_nan, bool)
assert width in (16, 32, 64)
self.allow_infinity = allow_infinity
self.allow_nan = allow_nan
self.width = width
self.nasty_floats = [
float_of(f, self.width) for f in NASTY_FLOATS if self.permitted(f)
]
weights = [0.2 * len(self.nasty_floats)] + [0.8] * len(self.nasty_floats)
self.sampler = d.Sampler(weights)
def __repr__(self):
return "{}(allow_infinity={}, allow_nan={}, width={})".format(
self.__class__.__name__, self.allow_infinity, self.allow_nan, self.width
)
def permitted(self, f):
assert isinstance(f, float)
if not self.allow_infinity and math.isinf(f):
return False
if not self.allow_nan and math.isnan(f):
return False
if self.width < 64:
try:
float_of(f, self.width)
return True
except OverflowError: # pragma: no cover
return False
return True
def do_draw(self, data):
while True:
data.start_example(FLOAT_STRATEGY_DO_DRAW_LABEL)
i = self.sampler.sample(data)
if i == 0:
result = flt.draw_float(data)
else:
result = self.nasty_floats[i - 1]
flt.write_float(data, result)
if self.permitted(result):
data.stop_example()
if self.width < 64:
return float_of(result, self.width)
return result
data.stop_example(discard=True)
class FixedBoundedFloatStrategy(SearchStrategy):
"""A strategy for floats distributed between two endpoints.
The conditional distribution tries to produce values clustered
closer to one of the ends.
"""
def __init__(self, lower_bound, upper_bound, width):
SearchStrategy.__init__(self)
assert isinstance(lower_bound, float)
assert isinstance(upper_bound, float)
assert 0 <= lower_bound < upper_bound
assert math.copysign(1, lower_bound) == 1, "lower bound may not be -0.0"
assert width in (16, 32, 64)
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.width = width
def __repr__(self):
return "FixedBoundedFloatStrategy(%s, %s, %s)" % (
self.lower_bound,
self.upper_bound,
self.width,
)
def do_draw(self, data):
f = self.lower_bound + (
self.upper_bound - self.lower_bound
) * d.fractional_float(data)
if self.width < 64:
try:
f = float_of(f, self.width)
except OverflowError: # pragma: no cover
reject()
assume(self.lower_bound <= f <= self.upper_bound)
return f
|
from django import template
from django.utils.safestring import mark_safe
register = template.Library() #register的名字是固定的,不可改变
# simple_tag可以多个参数,filter最多有2个参数
# 但是{% if %}后面只能是filter
@register.simple_tag
def my_add100_tag(value):
return value + 100
@register.filter
def my_add100_filter(value1, value2):
return value1 + 100 + value2 |
import argparse
import socket
HOST = '127.0.0.1' # The server's hostname or IP address
PORT = 1993 # The port used by the server
def main(key , value, wait=True):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
input = 'STORE '+key+'='+value
s.sendall(input.encode())
data = s.recv(4096)
print('AT CLIENT_ALICE: STORE ', data.decode())
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--key', default='username', help='Contains value of the key that needs to be stored')
parser.add_argument('--value', default='foo', help='Contains value of particular key')
args = parser.parse_args()
main(args.key, args.value) |
from django.db import models
from django.utils.translation import ugettext as _
from mptt.models import MPTTModel, TreeForeignKey
from django.utils.safestring import mark_safe
from core.abstract import (
ActiveModel,
DescriptionModel,
GeoModel,
HookModel,
NameModel,
SeoModel,
SlugModel,
)
class CategoryLink(DescriptionModel, NameModel, HookModel):
class meta:
verbose_name = _('category link')
verbose_name_plural = _('category links')
class Link(MPTTModel, DescriptionModel, HookModel, NameModel):
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True, on_delete=models.CASCADE, default='')
icon = models.CharField(_('icon'), blank=True, null=True, max_length=40)
url = models.CharField(_('url'), blank=True, null=True, max_length=120)
color = models.CharField(_('color'), blank=True, null=True, max_length=40)
category = models.OneToOneField(
CategoryLink,
verbose_name=_('category'),
related_name='link',
on_delete=models.CASCADE,
blank=True,
null=True
)
def show(self, title=False):
descendants = self.get_descendants()
if title and self.name:
menu = '<ul class="menu vertical medium-horizontal expanded medium-text-center" data-responsive-menu="drilldown medium-dropdown">'
menu += '<li class="has-submenu"><a href="{url}">{name}</a></li>'.format(url=self.url, name=self.name)
else:
menu = '<ul class="menu vertical medium-horizontal expanded medium-text-center" data-responsive-menu="drilldown medium-dropdown">'
for item in descendants:
menu += '<ul class="submenu menu vertical" data-submenu>'
menu += "<li class='has-submenu'><a href='{url}'>{name}</a>".format(
color=item.color,
url=item.url,
name=item.name.capitalize()
)
menu += '</ul>'
menu += '</ul>'
return mark_safe(menu)
@property
def full_display_admin(self):
text = self.name
if self.hook:
text += ' [ {} ]'.format(self.hook)
if self.description:
text += ' ( {} )'.format(self.description)
return text
class MPTTMeta:
order_insertion_by = ['name']
class meta:
verbose_name = _('link')
verbose_name_plural = _('links')
class Contact_us(NameModel, GeoModel):
email = models.CharField(_('email'), blank=True, null=True, max_length=40)
phone = models.CharField(_('phone'), blank=True, null=True, max_length=40)
address = models.CharField(_('address'), blank=True, null=True, max_length=40)
class Social(ActiveModel):
FACEBOOK = 'fa fa-facebook'
INSTAGRAM = 'fa fa-instagram'
LINKEDIN = 'fa fa-linkedin'
TWITTER = 'fa fa-twitter'
PINTEREST = 'fa fa-pinterest-square'
YOUTUBE = 'fa fa-youtube'
GOOGLE = 'fa fa-google-plus'
SOCIAL_CHOICES = [
(FACEBOOK, 'Facebbok'),
(INSTAGRAM, 'Instagram'),
(TWITTER, 'Twitter'),
(PINTEREST, 'Pinterest'),
(GOOGLE, 'Google'),
(LINKEDIN, 'Linkedin'),
]
name = models.CharField(_('network'), max_length=160, blank=True, choices=SOCIAL_CHOICES)
url = models.CharField(_('url'), max_length=160, blank=True)
class meta:
verbose_name = _('Social')
verbose_name_plural = _('Social')
|
import os
from time import sleep
from textwrap import wrap
from room import Room
from player import Player
# from minimap.minimap import MiniMap # Importing from subdirectories
from item import Item
from item import Treasure
from item import LightSource
# Declare Global Items
items = {
'dandelions': Item('dandelions', 'A handful of dandelions. Can be found at the entrance of the cave.'),
'sword': Item('sword', 'A masterfully crafted sword, suited for a champion!'),
'lamp': LightSource('lamp', 'An old and rusty lamp, looks like it still has oil in it'),
'amulet': Item('amulet', 'A glowing amulet, it vibrates as you get closer to treasures'),
'backpack': Item('backpack', 'A very large backpack. This can come in handy!'),
'laptop': Item('laptop', 'A personal laptop, too bad the batteries have run out'),
'treasure': Treasure('treasure', 'A cache of gems and artifacts, neatly packed in a old leather bag', 100),
}
# Declare all the rooms
room = {
#outside: { name: "Outside Cave Entrance", description: "North of you, the cave mount beckons", n_to: {POINTS TO FOYER}}
'outside': Room("Outside Cave Entrance", "Dandelions grow on the outside of the cave entrance. North of you, the cave mount beckons", is_lit=True),
'foyer': Room("Foyer","Dim light filters in from the south. Dusty passages run north and east.", is_lit=True),
'overlook': Room("Grand Overlook","A steep cliff appears before you, falling into the darkness. Ahead to the north, a light flickers in the distance, but there is no way across the chasm."),
'narrow': Room("Narrow Passage","The narrow passage bends here from west to north. The smell of gold permeates the air."),
'treasure': Room("Treasure Chamber","You've found the long-lost treasure chamber! Sadly, it has already been completely emptied by earlier adventurers. The only exit is to the south."),
# Add your own room here
'corridor': Room("Main Corridor", "You are located at a narrow corridor. West of you a dim light can be seen. East of you is the foyer"),
'gallery': Room(
"Grand Gallery",
"The passage opens up to a very large gallery with rocks hanging from above, and a piercing hole in the center where light rays seep into the darkness of the caverns. At first the room appeared as a dead end because of the blinding light hiding the shadowy corners. To the South lies the same narrow corridor. But as your eyes adjust to the darkness of the caverns you notice on the far East of the room there seems to be a shining golden light coming from a hole in the ground leading into another cavity."
),
}
# Add Inventory to Rooms
room['outside'].inventory.append(items['dandelions'])
room['foyer'].inventory.append(items['lamp'])
room['gallery'].inventory.append(items['sword'])
room['treasure'].inventory.extend([items['laptop'], items['treasure']])
# Link rooms together
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['foyer'].w_to = room['corridor']
room['corridor'].e_to = room['foyer']
room['corridor'].n_to = room['gallery']
room['gallery'].s_to = room['corridor']
room['gallery'].e_to = room['treasure']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
# Make a new player object that is currently in the 'outside' room.
# { name: 'Moises', location: SOME OBJECT REFERENCE }
player = Player('Moises', room['outside'])
player.inventory.append(items['backpack'])
player.inventory.append(items['amulet'])
# Define the game controlls!
player_movement = ['s', 'n', 'e', 'w', 'q']
player_inventory = ['i', 'inventory']
player_score = ['p', 'score']
# Parser Key and Functions assigned
input_parser = {
'take': player.loot,
'get': player.loot,
'loot': player.loot,
'drop': player.drop,
'discard': player.drop,
}
def print_items():
# Print Items in current location
items_info = []
# Find out if player has light
# player_has_light = False
# for item in player.inventory:
# try:
# if getattr(item, 'lit'):
# if item.lit == True:
# player_has_light = True
# except AttributeError:
# pass
# If room is lit
# or if player has light
player.check_visibility()
if player.location.is_lit or player.visibility:
for item in player.location.inventory:
items_info.append(item.info())
print('\nThe following items are available:')
print('-'*50)
print('\n'.join(items_info))
else:
print('\nWithout a light source, it is hard to see:')
print('-'*50)
print('\n')
while(True):
# Clear the console so it is more immersive
os.system('cls' if os.name == 'nt' else 'clear')
print('\n'*50)
command = None
# Printing current location and description
print(
'\nYou are located at:','\n'.join(wrap(player.location.name, width=50)),
'\nDescription:', '\n'.join(wrap(player.location.description, width=50))
)
# Printing items at current location
print_items()
command = input("\nIn what direction do you wish to proceed\n(N,E,W,S) or (Q: for quitting) pick one: ").lower()
# Handle movement and quit commands inside player
if command in player_movement:
player.move_to(command)
# Handle Displaying Player Inventory
elif command in player_inventory:
player.display_inventory()
elif command in player_score:
player.show_score()
else:
# Figure alternative commands in here and handle them
try:
# turn commands into an array
argz = command.split(' ')
if len(argz) == 2:
input_parser[argz[0]](argz[1])
except:
pass
# print('Command not available')
# input()
|
import speech_recognition as sp
recog = sp.Recognizer()
with sp.Microphone() as source:
audioData = recog.listen(source) # 使用 listen() 方法將聲源存起來
try:
question = recog.recognize_google(audioData, language = 'zh-tw')
print(question)
except:
print("聽不懂...")
|
# Tweepy 3.10.0
import os
from dotenv import load_dotenv
import tweepy as tw
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import streamlit as st
from textblob import TextBlob
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import re
# Authentication
load_dotenv('.env')
consumer_key = os.getenv("consumer_key")
consumer_secret = os.getenv("consumer_secret")
access_token = os.getenv("access_token")
access_token_secret = os.getenv("access_token_secret")
# def printenvironment():
# print(f'The client id is: {consumer_key}.')
# print(f'The secret id is: {consumer_secret}.')
# print(f'The access token is: {access_token}.')
# print(f'The access secret token is: {access_token_secret}.')
# printenvironment()
# Tweepy supports both OAuth 1a (application-user) and OAuth 2 (application-only) authentication.
auth = tw.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tw.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) # use smaller batches during api calls
try:
api.verify_credentials()
print("Authentication OK")
except:
print("Error during authentication")
# Input GUI
from inputGUI import *
tweets = tw.Cursor(api.search, q="%s" %keyword, lang="en",since=start_date, tweet_mode='extended').items(ntweets) #from:user #-filter:retweets
# Front-end
# st.title("Twitter Sentiment Analysis")
# st.markdown("The dashboard analyzes tweets based on user input and shows polarity of the tweet.")
# Preprocessed
def percentage(part, whole):
return 100 * float(part)/float(whole)
positive = 0
negative = 0
neutral = 0
polarity = 0
tweet_list = []
neutral_list = []
negative_list = []
positive_list = []
for tweet in tweets:
#print("-------------------")
#print(tweet.full_text)
#print("-------------------")
tweet_list.append(tweet.full_text)
analysis = TextBlob(tweet.full_text)
score = SentimentIntensityAnalyzer().polarity_scores(tweet.full_text)
neg = score['neg']
neu = score['neu']
pos = score['pos']
comp = score['compound']
polarity += analysis.sentiment.polarity
if neg > pos:
negative_list.append(tweet.full_text)
negative += 1
elif pos > neg:
positive_list.append(tweet.full_text)
positive += 1
elif pos == neg:
neutral_list.append(tweet.full_text)
neutral += 1
positive = percentage(positive, ntweets)
negative = percentage(negative, ntweets)
neutral = percentage(neutral, ntweets)
polarity = percentage(polarity, ntweets)
positive = format(positive, '.1f')
negative = format(negative, '.1f')
neutral = format(neutral, '.1f')
print("Positive : " + positive + " %" + " includes " + str(len(positive_list)))
print("Negative : " + negative + " %" + " includes " + str(len(negative_list)))
print("Neutral : " + neutral + " %" + " includes " + str(len(neutral_list)))
# print(polarity)
tw_list = pd.DataFrame(tweet_list)
tw_list["text"] = tw_list[0]
# Data Cleaning
# Removing RT, Punctuation
remove_rt = lambda x: re.sub(r'RT @\w+: '," " ,x)
rt = lambda x: re.sub(r'[^\w\s]', '', x)
nl = lambda x: re.sub(r'[\n]',' ',x)
tw_list["text"] = tw_list.text.map(remove_rt)
tw_list["text"] = tw_list.text.map(rt)
tw_list["text"] = tw_list.text.map(nl)
tw_list["text"] = tw_list.text.str.lower()
tw_list["text"] = tw_list.text.str.strip()
# print(tw_list.head(10))
# print(tw_list['text'])
positive = 0
negative = 0
neutral = 0
polarity = 0
tweet_list = []
neutral_list = []
negative_list = []
positive_list = []
print("-------------------")
for twInL in tw_list["text"]:
#print("-------------------")
#print(tweet.full_text)
#print("-------------------")
tweet_list.append(twInL)
analysis = TextBlob(twInL)
score = SentimentIntensityAnalyzer().polarity_scores(twInL)
neg = score['neg']
neu = score['neu']
pos = score['pos']
comp = score['compound']
polarity += analysis.sentiment.polarity
if neg > pos:
negative_list.append(twInL)
negative += 1
elif pos > neg:
positive_list.append(twInL)
positive += 1
elif pos == neg:
neutral_list.append(twInL)
neutral += 1
positive = percentage(positive, ntweets)
negative = percentage(negative, ntweets)
neutral = percentage(neutral, ntweets)
polarity = percentage(polarity, ntweets)
positive = format(positive, '.1f')
negative = format(negative, '.1f')
neutral = format(neutral, '.1f')
print("Positive : " + positive + " %" + " includes " + str(len(positive_list)))
print("Negative : " + negative + " %" + " includes " + str(len(negative_list)))
print("Neutral : " + neutral + " %" + " includes " + str(len(neutral_list)))
# # Processing GUI
# from prepostGUI import *
# Pie Chart
labels = ['Positive ['+str(positive)+'%]' , 'Neutral ['+str(neutral)+'%]','Negative ['+str(negative)+'%]']
sizes = [positive, neutral, negative]
colors = ['yellowgreen', 'blue','red']
patches, texts = plt.pie(sizes,colors=colors, startangle=90,explode = (0.01, 0.01, 0.01))
plt.style.use('default')
plt.legend(labels)
plt.title("Sentiment Analysis Result for keyword= "+keyword+"" )
plt.axis('equal')
plt.show()
# Bar Chart
# Sentiment = ['Positive','Neutral','Negative']
# notweets = [positive,neutral,negative]
# plt.bar(Sentiment, notweets)
# plt.title('Sentiment Vs %. of tweets')
# plt.xlabel('Sentiment')
# plt.ylabel('No. of tweets')
# plt.show()
# notweets = [positive, neutral, negative]
# bars = ('Positive', 'Neutral', 'Negative')
# y_pos = np.arange(len(bars))
# plt.bar(y_pos, notweets, color=(0.2, 0.4, 0.6, 0.6))
# plt.xlabel('%. of tweets v/s sentiment', fontweight='bold', color = 'orange', fontsize='17', horizontalalignment='center')
# plt.show() |
#!/usr/bin/env python
import os
from fabric.api import (
task,
local,
)
from datetime import datetime
from new7day import settings
DATABASES = settings.DATABASES['default']
@task
def manage(cmd):
local(
'python manage.py {cmd}'.format(cmd=cmd)
)
@task
def migrate():
'''
数据库迁移
'''
manage('makemigrations')
manage('migrate')
@task
def create_db():
'''
创建数据库
'''
kwargs = dict(
db_name=DATABASES['NAME'],
db_password=DATABASES['PASSWORD'],
)
local(
'mysql -uroot -p{db_password} -e '
'"CREATE DATABASE IF NOT EXISTS {db_name};"'.format(**kwargs)
)
@task
def drop_db():
'''
删除数据库
'''
kwargs = dict(
db_name=DATABASES['NAME'],
db_password=DATABASES['PASSWORD'],
)
local(
'mysql -uroot -p{db_password} -e '
'"DROP DATABASE {db_name};"'.format(**kwargs)
)
@task
def backup():
'''
数据备份
'''
if not os.path.exists('./backup'):
local('mkdir backup')
now = datetime.now().strftime('%Y%m%d%H%M')
kwargs = dict(
now=now,
db_name=DATABASES['NAME'],
db_password=DATABASES['PASSWORD'],
)
local(
'mysqldump -uroot -p{db_password} '
'{db_name} > ./backup/common_data_{now}.sql;'
'cp ./backup/common_data_{now}.sql ./backup/data.sql'.format(**kwargs)
)
@task
def recovery(sql_file=None):
'''
数据导入
'''
filename = './backup/data.sql' if not sql_file else sql_file
drop_db()
create_db()
kwargs = dict(
filename=filename,
db_name=DATABASES['NAME'],
db_password=DATABASES['PASSWORD'],
)
local(
'mysql -uroot -p{db_password} '
'{db_name} < {filename}'.format(**kwargs)
)
|
import os
from lib.conf.config import settings #导入配置路径(实现自定义配置+默认配置的整合)
class Nic(object):
"""获取主机网卡信息"""
def __init__(self):
pass
#执行构造方法之前,可以加点其他操作,可有可无
#类似预留的钩子
@classmethod
def initial(cls):
return cls()
def process(self,command_func,debug):
# if debug:
# output = open(os.path.join(settings.BASEDIR,'files/nic.out'),'r',encoding='utf-8').read()
# interfaces_info = self._interfaces_ip(output)
# else:
# interfaces_info = self.linux_interfaces(command_func)
# self.standard(interfaces_info)
return "nic11" |
#정수형 int 실수형 float
x =2
y=4
p=10
q=3
A = x*y #A=8? A=8.0? 당연히 8이다
C = y/x
D= x ** y ** x #x의 y의 x승 이다 D=256? D=65536
print(C) #나누기는 소수점이 나와서 2.0 임 나눗셈은 정수에 대해 닫혀있지 않음
print("D=",D)
print('17을 3으로 나누면 몫이', 17//3 ,'이고','나머지가', 17%3,"이다")
def wow(x,y):
a = int(x // y)
b = int(x % y)
print(x,"를",y,"로 나누면 몫이",a,"이고 나머지가",b,"이다" )
print("%d를 %d로 나누면 몫이 %d이고 나머지가 %d이다" % (x,y,a,b))
|
from django.db import models
from django.utils.text import slugify
from django.shortcuts import reverse
from django.db.models import Q
from user_account.models import UserAccount
class Profile(models.Model):
user = models.ForeignKey(UserAccount,
on_delete=models.CASCADE,
related_name='profiles')
contacts = models.ManyToManyField('self', blank=True)
name = models.CharField(max_length=100, unique=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
slug = models.CharField(max_length=255, blank=False)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse("core:profile", kwargs={"profile_slug": self.slug})
@property
def invitations(self):
return Invitation.objects.filter(Q(user_to=self) | Q(user_from=self))
@property
def connections(self):
accepted = self.invitations.filter(Q(status="accepted"))
contacts = []
for contact in accepted:
if(contact.user_to != self):
contacts.append(contact.user_to)
if(contact.user_from != self):
contacts.append(contact.user_from)
return set(contacts)
@property
def posts(self):
return Post.objects.filter(profile=self)
@property
def timeline(self):
my_posts = Post.objects.filter(profile=self)
for contact in self.connections:
timeline_posts = my_posts | Post.objects.filter(profile=contact)
return timeline_posts.order_by('-created')
class Invitation(models.Model):
STATUS_CHOICES = [
('accepted', 'Accepted'),
('rejected', 'Rejected'),
('waiting', 'Waiting'),
]
user_from = models.ForeignKey(Profile,
related_name='invite_from',
on_delete=models.CASCADE)
user_to = models.ForeignKey(Profile,
related_name='invite_to',
on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=10,
choices=STATUS_CHOICES,
default='waiting')
class Meta:
ordering = ('-created',)
def __str__(self):
return f"Invitation of '{self.user_from.name}'' to '{self.user_to.name}''"
def get_absolute_url(self):
return reverse("core:accept_invite", kwargs={"id": self.id})
class Post(models.Model):
profile = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name="posts")
body = models.TextField(null=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.profile.name}'s post from {self.created}"
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.profile.name}'s comment in {self.post.profile.name}'s post"
class Reaction(models.Model):
name = models.CharField(max_length=10)
weigth = models.SmallIntegerField()
def __str__(self):
return self.name
class PostReaction(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
reaction = models.ForeignKey(Reaction, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.profile.name}'s reaction in {self.post.profile.name}'s post"
|
from django.contrib import admin
from df_goods.models import TypeInfo, GoodsInfo, Comment
class TypeInfoAdmin(admin.ModelAdmin):
list_display = ['id', 'ttitle']
class GoodsInfoAdmin(admin.ModelAdmin):
list_per_page = 15
list_display = ['id', 'gtitle', 'gprice', 'gunit', 'gkucun', 'gcontent', 'gtype']
class CommentAdmin(admin.ModelAdmin):
list_per_page = 15
list_display = ['id', 'content', 'uid', 'gid']
admin.site.register(TypeInfo, TypeInfoAdmin)
admin.site.register(GoodsInfo, GoodsInfoAdmin)
admin.site.register(Comment, CommentAdmin)
|
print(2 + 2)
print(50 - 5 * 6)
print((50 - 5 * 6)/4)
print(8/5)
print(5 ** 2) #5 squared
width = 20
height = 5 * 9
print(width * height) |
import os
Import('env')
env = env.Clone()
env.Append(LIBPATH = ['#../build/log'])
env.Append(LIBS = ['kulog', 'rt'])
env.Program('simple_log', Glob('simple_log.cpp'))
|
from flask import Flask, render_template
from bs4 import BeautifulSoup
import requests
base_url = "https://ngojobsinafrica.com/?post_type=noo_job&s=&location[]=ethiopia&category[]=information-technology"
source = requests.get(base_url).text
soup = BeautifulSoup(source, 'lxml')
all_information_technology_jobs = soup.find_all('div', class_="loop-item-wrap")
for item in all_information_technology_jobs:
jobs = item.find('div', class_='item-featured')
job_name = (jobs.a)["title"]
job_links = item.find('div', class_='item-featured')
job_link = (job_links.a)["href"]
companies = item.find('span', class_='job-company')
company = (companies.a.text)
job_types = item.find('span', class_='job-type')
job_type = job_types.a.text
job_locations = item.find('span', class_='job-location')
job_location = job_locations.a.text
job_dates = item.find('time', class_='entry-date')
job_date = job_dates.find_all('span')[1].text
dict = {'JobName': job_name, 'Organisation': company, 'JobType': job_type, 'JobLocation': job_location,
'JobExpiry': job_date[3:], 'JobLink': job_link}
print(dict)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 09 11:28:29 2016
@author: utente
Pattern Analysis 2016
"""
import pandas as pd
import numpy as np
from functions_for_PA2016 import *
import matplotlib.pyplot as plt
import statsmodels.api
data = pd.read_excel("C:/Users/utente/Documents/PUN/Anno 2016_08.xlsx", sheetname = 1)
data = np.array(data["PUN"])
for sig in range(10):
print freq_greater_than(data, sig, False)
stdata = (data - np.mean(data))/np.std(data)
i = np.where(stdata < 0)
stdata[i] = 0
plt.figure()
plt.plot(stdata)
stdiff = np.diff(stdata,n=1)
plt.figure()
plt.plot(stdiff)
rng = pd.date_range(start = '2016-01-01', periods = data.size, freq ='H')
D = pd.DataFrame(data).set_index(rng)
dm = pd.DataFrame(D.resample('D').mean())
dm = dm.set_index(pd.date_range(start = '2016-01-01', periods = dm.shape[0], fraq = 'D'))
dm.index = pd.DatetimeIndex(pd.date_range(start = '2016-01-01', periods = dm.shape[0], fraq = 'D'))
pd.infer_freq(dm)
dmdiff = np.diff(dm[dm.columns[0]])
dm.plot()
plt.figure()
plt.plot(dmdiff)
np.mean(dmdiff)
#dm.to_csv('daily_means_2016.csv', sep=',')
#pd.DataFrame(dmdiff).to_csv('diff_daily_means_2016.csv', sep=',')
#### decompose dm and dmdiff ####
for i in range(245):
dec_dm = statsmodels.api.tsa.seasonal_decompose(dm, freq = 7) |
import json
import pymysql
class OptionMysql(object):
def __init__(self, options):
host = options['HOST']
user = options['USERNAME']
password = options['PASSWORD']
database = options['DATABASE']
port = options['PORT']
charset = 'utf8'
# 连接数据库
self.conn = pymysql.connect(host=host, port=port, user=user, password=password, database=database, charset=charset)
# 创建游标
self.cur = self.conn.cursor()
self.dict_cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
def __del__(self):
self.cur.close()
self.dict_cur.close()
self.conn.close()
def insert_data(self, sql, params=[]):
"""插入行"""
try:
if not params:
self.cur.execute(sql)
else:
self.cur.execute(sql, params)
self.conn.commit()
except Exception as e:
self.conn.rollback()
raise e
return True
def get_data_dict(self, sql, params=[]):
"""查询,返回字典类型"""
try:
if params:
self.dict_cur.execute(sql, params)
else:
self.dict_cur.execute(sql)
data = self.dict_cur.fetchall()
return data
except Exception as e:
self.conn.rollback()
raise e
def get_data(self, sql, params=[]):
"""查询"""
self.cur.execute(sql, params)
data = self.cur.fetchall()
return data
def update_data(self, sql, params=[]):
"""更新"""
try:
if not params:
self.cur.execute(sql)
else:
self.cur.execute(sql, params)
self.conn.commit()
except Exception as e:
self.conn.rollback()
raise e
return True
def delete_data(self, sql, params=[]):
"""删除"""
try:
self.cur.execute(sql, params)
self.conn.commit()
return True
except Exception as e:
self.conn.rollback()
raise e
def findOne(self, table, col=[], params=[]):
"""
验证记录是否存在
Args:
table: string, 数据表
col: list, 查询的列名
params:list,参数列表
"""
sql = "SELECT " + ",".join(col) + " FROM " + table + " WHERE deleted_at = '0000-01-01 00:00:00'"
# WHERE条件
for i in range(len(col)):
col[i] = col[i] + " = %s"
sql += " AND " + ",".join(col)
self.cur.execute(sql, params)
data = self.cur.fetchall()
return data
def update(self, table, where, args: object):
"""
更新数据
Args:
table: string, 表名
where: string, 待更新的row 的where条件
args: object, 更新的数据
"""
column = []
values = []
# 提取更新的列及值
for key in args.keys():
column.append("`" + key + "`" + " = %s")
if isinstance(args[key], dict):
args[key] = str(json.dumps(args[key]))
values.append(args[key])
# update sql
sql = "UPDATE " + table + " SET " + ",".join(column) + " WHERE " + where
try:
self.cur.execute(sql, values)
self.conn.commit()
return True
except Exception as e:
self.conn.rollback()
raise e
|
# -*- coding: utf-8 -*-
# Martínez García Mariana Yasmin
print (34*3)-(1/2)*(9.81)*(3**2)#1/2 calcula la división entera
print (34*3)-(1/2.0)*(9.81)*(3**2)#1.0/2 o 1/2.0 división flotante
print (34*1)-(1/2.0)*(9.81)*(1*2)
print (34*1.5)-(1/2.0)*(9.81)*(1.5**2)
print (34*5)-(1/2.0)*(9.81)*(5**2)
v0 = 34
g = 9.81
t = 5
y = v0*t - 1.0/2*g*t**2
print y
|
import scrapy
import json
from scrapy.http import Request
class movieSpider( scrapy.Spider ) :
name = "movit"
movie_name = "the_martian"
allowed_domains = [ "www.rottentomatoes.com" ]
#http://www.rottentomatoes.com/m/the_martian/reviews/
start_urls = [
"http://www.rottentomatoes.com/m/the_martian/reviews/"
]
#movie url http://www.rottentomatoes.com + item[ "url" ]
#movie review url "http://www.rottentomatoes.com" + item["url"] + "reviews/?page=%d" % i
def parse( self, response ) :
#print response.url.split( "/" )
# filename = response.url.split( "/" )[ 6 ] + '.html'
# with open( filename, 'wb' ) as f :
# f.write( response.body )
#review_table
#review_table_row
#critic_name a - name
#review_container
#review_area
#review_date - date
#review_desc
#the_review - content
#subtle - score
print "response is : \n"
print response.request.meta
print "\n\nend response \n"
for sel in response.css( '.review_table_row' ):
response.request.meta[ 'item' ][ 'movie_name' ] = response.request.meta[ 'item' ][ 'name' ]
response.request.meta['item' ][ 'date' ] = sel.css( '.review_date::text' ).extract()[0]
response.request.meta[ 'item' ][ 'content' ] = sel.css( '.the_review::text' ).extract()[0]
response.request.meta[ 'item' ][ 'score' ] = sel.css( '.review_desc .subtle::text' ).extract()[0]
yield Request("http://www.rottentomatoes.com" + response.request.meta[ 'item' ][ "url" ], meta={ 'item': response.request.meta[ 'item' ] }, callback=self.parse_movie)
# yield {
# "movie_name" : response.request.meta.name,
# "movie_url" : response.request.meta.url,
# "movie_release_date" : response.request.meta.theaterReleaseDate,
# "movie_imgs" : response.request.meta.posters,
# "movie_actors" : response.request.meta.actors,
# "movie_id" : response.request.meta.id,
# "name" : name,
# "date" : date,
# "content" : content,
# "score" : score,
# }
#print name, date, content, score
def parse_movie( self, response ) :
#print "parse_movie"
#id="movieSynopsis" - content
#itemprop="genre" - genre
#itemprop="director" -> a
yield {
"movie_name" : response.request.meta[ 'item' ][ 'movie_name' ],
"movie_url" : response.request.meta[ 'item' ][ 'url' ],
"movie_imgs" : response.request.meta[ 'item' ][ 'imgs' ],
"movie_actors" : response.request.meta[ 'item' ][ 'actors' ],
"movie_id" : response.request.meta[ 'item' ][ 'id' ],
"movie_content" : response.css( '#movieSynopsis::text' ).extract()[0],
"movie_genre" : response.css( 'span[itemprop="genre"]::text' ).extract(),
"date" : response.request.meta[ 'item' ][ 'date' ],
"content" : response.request.meta[ 'item' ][ 'content' ],
"score" : response.request.meta[ 'item' ][ 'score' ]
}
def start_requests(self):
with open('movie_name_list.json') as data_file:
data = json.load(data_file)
#print data
for item in data:
for i in range( 1,10 ):
yield Request("http://www.rottentomatoes.com" + item["url"] + "reviews/?page=%d" % i, meta={'item': item}, callback=self.parse)
#yield self.make_requests_from_url( "http://www.rottentomatoes.com" + item["url"] + "reviews/?page=%d" % i )
|
from django.conf.urls import url
from finder import views
urlpatterns = [
url(r'^inter-region/$', views.inter_region_lookup_view, name='inter_region_lookup_view'),
url(r'^single-type/$', views.SingleTypeLookupView.as_view(), name='single_type_lookup_view'),
]
|
import json
import array as arr
import sys
import glob
import errno
from time import sleep
import os
#import keyboard
nameofsong = ''
path = '/Users/alejandrasandoval/Desktop/usb/'
def getsong(songnum,count):
with open(json_arr[songnum]) as data_file:
data = json.load(data_file)
for p in data['tracks']:
for f in p['notes']:
notes_array.append(f['time'])
notes_array.append(f['name'])
count = count + 1 #keeps count of number of notes
#print(count)
print("Filename: ")
if(count == 1862):
print ("test4")
elif (count == 26):
print("test5")
elif count == 1954:
print("test2")
elif count == 17:
print("test3")
else:
print("test")
return notes_array
#Get path
#Create array of all files, and array of only json files
dirs = os.listdir( path )
files_arr = []
json_arr = []
# Creates array of files
for file in dirs:
files_arr.append(file)
# This would print all the files and directories for testing.
print("List of files:")
for f in files_arr:
print (f)
#This adds only json fil es to array (Need this for the actual playing"
for g in files_arr:
if g[-4:] == 'json':
json_arr.append(g)
print("List of jsons:")
print(json_arr)
song = 0
notes_array = []
play = True
while(play):
x = input ('Play/Replay (P), Next(N) or Pause(X) \n')
count = 0
if x == 'P' or x == 'p':#if key 'q' is pressed
if (song) < len(json_arr):
onplay = getsong(song, count)
else:
print("No more songs.")
if x == 'N' or x== 'n':
if(song + 1) < len(json_arr):
song = song + 1
onplay = getsong(song,count)
else:
onplay = ['']
print("No more songs.")
if x == 'X' or x == 'x':
onplay = ['']
print('Pause')
sleep (1)
for a in onplay:
print(a)
# Checks if array is with correct note
for a in onplay:
print(a)
#pprint(data) #prints all json file
#print(count) #print total number of notes
|
# Generated by Django 2.2.1 on 2019-05-28 08:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20190528_1720'),
]
operations = [
migrations.AlterField(
model_name='board',
name='created_date',
field=models.DateTimeField(auto_created=True),
),
]
|
import random
import generate_private_public
from elsig_hash import egGen
import RSA
Bob_id = 0
Alice_key = tuple()
Alice_id = 0
bob_n = 0
bob_e = 0
signed_m = ""
def key_id_Generation():
global Alice_id
Alice_id = random.randint(1000001, 10000000)
# to be from other ranges generated in cert (to be unique)
# generate private/public pair of Alice
global Alice_key
Alice_key = generate_private_public.generate_User_public_private_key()
print(generate_private_public.generate_rsa_keys())
Alice_key[1] = Alice_key[1] + generate_private_public.generate_rsa_keys()
print(Alice_key)
return
def send_id_key_toAuthority():
Alice_key_file = open("AliceKey_ToAuthority.txt", "w")
# Alice send her public key and her id to authority
Alice_key_file.write(str(Alice_id) + ' ' + str(Alice_key[1][0]) + ' ' + str(Alice_key[1][1]) + ' ' + str(
Alice_key[1][2]) + ' ' + str(Alice_key[1][3]) + ' ' + str(Alice_key[1][4]))
# id , q, a, ya ,n, e
Alice_key_file.close()
return
def send_id_toBob():
Alice_key_file = open("AliceKey_ToBob.txt", "w")
# Alice send her id to Bob
Alice_key_file.write(str(Alice_id))
Alice_key_file.close()
return
def verify_certificate():
# read Bob id
BobKey_ToAlice_file = open("BobKey_ToAlice.txt", "r")
global Bob_id
Bob_id = BobKey_ToAlice_file.readline()
# read Bob certificate
send_cert_ToAlice_file = open("send_cert_ToAlice.txt", "r")
Bob_certificate = send_cert_ToAlice_file.readline()
BobKey_ToAlice_file.close()
# decrypt Bob_certificate by public key of Authority and get Bob id
public_keyAuthority_file = open("KeyAuthority.txt", "r")
autority_n = int(public_keyAuthority_file.readline())
autority_e = int(public_keyAuthority_file.readline())
de_key = RSA.decrypt(int(Bob_certificate), autority_n, autority_e).split(' ',6)
print(de_key)
# Compare get_id from decryption with Bob_id
if (de_key[0] == Bob_id):
print("certificate verified")
global bob_n
global bob_e
bob_n = int(de_key[4])
bob_e = int(de_key[5])
return True
else:
print("wrong certificate")
return False
def signature(message):
# 1 0 = q 1 1 =a 1 2= ya 1 3=n 1 4=e , 0=xa
s1, s2 = egGen(Alice_key[1][0], Alice_key[1][1], Alice_key[0], message)
global signed_m
signed_m = str(message) + ' ' + str(s1) + ' ' + str(s2)
return
def encrypt_message():
# read key of bob
print("signed message",signed_m)
encrypted_message = RSA.encrypt(signed_m, bob_e, bob_n)
# write to file
message_file = open("message.txt", "w")
# message_file.codecs.StreamWriter.write(self,str(encrypted_message).encode('utf-8'))
message_file.write(str(encrypted_message))
message_file.close()
return
#key_id_Generation()
########################################################################################################
# print("Alice_id : ",Alice_id)
# print("Alice_key : ",Alice_key)
|
# -*- coding:utf-8 -*-
import operator
import string
import operator
import itertools
import snowballstemmer
from textblob import TextBlob, Word
LOWER_MAP = {"tr": {ord("I"): u"ı"}}
STEMMERS = {
"en": snowballstemmer.stemmer("english"),
"tr": snowballstemmer.stemmer("turkish"),
}
def noun_phrases(text):
blob = TextBlob(text)
return blob.tokenize()
def get_synsets(text):
return Word(to_lemma(text)).synsets
def get_lemmas(text):
word = Word(to_lemma(text))
sets = map(set, [synset.lemma_names() for synset in word.synsets])
return map(from_lemma, reduce(operator.or_, sets))
def to_lemma(text):
return text.replace(" ", "_")
def from_lemma(text):
return text.replace("_", " ")
def stem_word(word, language):
stemmer = STEMMERS.get(language)
if stemmer is None:
return word
return stemmer.stemWord(word).strip(string.punctuation)
def tokenize(wordlist, language, stem=True):
return " ".join((stem_word(word, language) if stem else word) for word in wordlist)
def lower(text, language):
if language in LOWER_MAP:
text = text.translate(LOWER_MAP[language])
return text.lower()
def build_ngrams(text, language="en"):
blob = TextBlob(lower(text, language))
ngrams = [blob.ngrams(n=n) for n in (3, 2, 1)]
wordlists = reduce(operator.add, ngrams)
tokenized = (tokenize(wordlist, language, stem=True) for wordlist in wordlists)
pure = (tokenize(wordlist, language, stem=False) for wordlist in wordlists)
return itertools.chain(tokenized, pure)
def is_subsequence(sequence, parent):
for i in xrange(1 + len(parent) - len(sequence)):
if sequence == parent[i : i + len(sequence)]:
return True
return False
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 3 15:43:36 2021
@author: Nikita
"""
"""
First solution i've written was very similar to this; the major
difference was that it used slices and sum(slice), leading to
a complexity of O(n^3) likely.
Essentially,
for array_size in range (2,size):
for start_index in range (0, size - array_size + 1):
current_sum = sum(nums[start_index : start_index + array_size])
In this version, I've cut down on the complexity by using an array. I
think this should be O(n^2) and O(n) memory.
It took me about 1 hour to come up with this.
The solution fails for a very large input because of the time limit;
as far as i'm concerned, this is the absolute limit of good enough
and maintainability.
"""
class Solution:
def checkSubarraySum(self, nums: list, k : int ) -> bool:
size = len(nums)
subsums = list(nums)
for array_size in range(2,size + 1):
for i in range(size - array_size + 1):
subsums[i] += nums[i+array_size-1]
if subsums[i] % k == 0:
return True
return False
|
import sys
sys.path.append(".")
import argparse
from model.utils import str2bool, str2list
LOGGING_PATH = './logging'
parser = argparse.ArgumentParser(description="Model Options")
parser.add_argument(
"--run-identifier",
"-id",
dest="run_id",
type=str,
required=True,
help="Add an identifier that will be used to store the run in tensorboard.",
)
parser.add_argument(
"--similarity-threshold",
"-thr",
dest="sim_threshold",
type=float,
default=None,
required=False,
help="Similarity threshold used for NNsim chunking in the embedding space.",
)
parser.add_argument(
"--dist-threshold",
"-distthr",
dest="dist_threshold",
type=float,
default=None,
required=False,
help="Distance threshold used for agglomerative chunking in the embedding space.",
)
parser.add_argument(
"--agg-layer",
"-agglayer",
dest="agg_layer",
type=int,
default=None,
required=False,
help="Layer of features to use for the agglomerative. If empty, the same trf-out-layer is used.",
)
parser.add_argument(
"--log-threshold",
"-logthr",
dest="log_threshold",
type=float,
default=None,
required=False,
help="Log likelihood threshold for the frequency based bracketing.",
)
parser.add_argument(
"--hard-span",
"-span",
dest="span",
type=int,
default=None,
required=False,
help="Hard span used for chunking naively.",
)
parser.add_argument(
"--max-skip",
"-skip",
dest="max_skip",
type=int,
default=None,
required=False,
help="Max skip for Agglomerative Clustering.",
)
parser.add_argument(
"--out-num",
"-out",
dest="out_num",
type=int,
default=None,
required=False,
help="Number of fixed output size for the fixed out size chunker.",
)
parser.add_argument(
"--chunker",
dest="chunker",
type=str,
required=False,
default=None,
choices=["NNSimilarity", "agglomerative", "hard", "fixed", "freq", "rand"],
help="Specify the chunker part of the net",
)
parser.add_argument(
"--pooling",
dest="pooling",
type=str,
default=None,
required=False,
choices=["abs_max_pooling", "mean_pooling", "freq_pooling", "rnd_pooling","conv_att", "lstm"],
help="function to do the generation"
)
parser.add_argument(
"--trf-out-layer",
"-layer",
dest="trf_out_layer",
type=int,
required=True,
help="Layer used from the transformer as embeddings.",
)
parser.add_argument(
"--tensorboard-dir",
"-tbdir",
dest="log_dir",
type=str,
required=False,
default="./tensorboard",
help="rood directory where tensorboard logs are stored. ./tensorboard by default",
)
parser.add_argument(
"--eval-periodicity",
"-evalperiod",
type=int,
required=False,
default=50,
dest="evalperiod",
help="How often in iterations the model is evaluated",
)
parser.add_argument(
"--checkpoint-id",
"-checkid",
dest='checkpoint_id',
default=None,
type=str,
required=False,
help="ID of the checkpoint to load."
)
parser.add_argument(
"--load-modules",
"-load",
dest="modules_to_load",
default='[]',
required=False,
type=str2list,
help="What modules need to be loaded from checkpoint?"
)
parser.add_argument(
"--save-modules",
"-save",
dest="modules_to_save",
default="[multitasknet]",
type=str2list,
required=False,
help="What modules need to be saved in checkpoint?"
)
parser.add_argument(
"--train-modules",
"-train",
dest="modules_to_train",
default="[multitasknet]",
type=str2list,
required=False,
help="What modules need to be trained by the optimizer?"
)
parser.add_argument(
"--wall-time",
"-wt",
dest="walltime",
required=False,
type=int,
default=3600,
help="Walltime for training",
)
parser.add_argument(
"--wall-steps",
"-ws",
dest="wallsteps",
required=False,
type=int,
default=500000,
help="Wall steps for training",
)
parser.add_argument(
"--train-compression",
"-tc",
required=False,
default='False',
type=str2bool,
dest="train_comp",
help="set if compression happens during training, True or False",
)
parser.add_argument(
"--eval-compression",
"-ec",
required=False,
default='False',
type=str2bool,
dest="eval_comp",
help="set if compression happens during evaluation, True or False",
)
parser.add_argument(
"--full-test-eval",
"-fev",
required=False,
default='False',
type=str2bool,
dest="full_test_eval",
help="Set if an evaluation on the full test set is made at the end.",
)
parser.add_argument(
"--datasets",
"-dts",
required=False,
default=None,
type=str2list,
dest="datasets",
help="Set the datasets to train on.",
)
parser.add_argument(
"--model-config",
"-mconfig",
required=False,
default='model',
type=str,
dest="model_config",
help="Name of the model config within the config folder without extension",
)
parser.add_argument(
"--datasets-config",
"-dconfig",
required=False,
default='datasets',
type=str,
dest="datasets_config",
help="Name of the model config within the config folder without extension",
)
parser.add_argument(
"--optimizer-config",
"-oconfig",
required=False,
default='optimizer',
type=str,
dest="optimizer_config",
help="Name of the model config within the config folder without extension",
)
parser.add_argument(
"--write-google-sheet",
"-GS",
required=False,
action='store_true',
dest="write_google_sheet",
help="Flag to write on Google Sheet",
)
parser.add_argument(
"--log-level",
"-log",
default='info',
type=str,
choices=['debug', 'info', 'warning'],
required=False,
help="Level used for debugging."
)
args = parser.parse_args()
|
#!python
import os
import numpy as np
import pandas as pd
import general_functions as gf
import argparse
import pdb
parser = argparse.ArgumentParser()
parser.add_argument('--trait', type=str, help='trait', default=None)
args = parser.parse_args()
OUT_DIR = os.environ['OUT_DIR']
## load trait condsigs
condsig = pd.read_csv(gf.out_dir+"/condout/results/condsig_"+args.trait+"_gwas_normalised.tsv", sep="\t")
condsig = gf.decompose_variantid(condsig['VARIANT'])
def get_partners(ld, var):
partnersA = ld.loc[(ld['SNP_A'] == var) & (ld['R2'] > 0.8),"SNP_B"]
partnersB = ld.loc[(ld['SNP_B'] == var) & (ld['R2'] > 0.8),"SNP_A"]
partners = set(partnersB.tolist() + partnersA.tolist())
return(list(partners))
def clump_function(partners, clumps, var):
# if no clumps exist make the first one
if clumps is None:
clumps = pd.DataFrame({'VARIANT': partners+[var]})
clumps['clump_id'] = 1
return(clumps)
# check if any partners already exist in a clump
matching_clumps = []
if len(partners) != 0:
clumps_s = clumps.loc[clumps['VARIANT'].isin(partners),:]
matching_clumps = clumps_s['clump_id'].unique().tolist()
# if there are no matching clumps add this var and partners to new clump
clumps_add = pd.DataFrame({'VARIANT': partners+[var]})
if len(matching_clumps) == 0:
clumps_add['clump_id'] = clumps['clump_id'].max()+1
# if there is one matching clump merge into that one
elif len(matching_clumps) == 1:
clumps_add['clump_id'] = matching_clumps[0]
# if there are more than one matching clumps, merge them all into a new clump
elif len(matching_clumps) > 1:
clumps_add['clump_id'] = clumps['clump_id'].max()+1
clumps.loc[clumps['clump_id'].isin(matching_clumps),"clump_id"] = clumps['clump_id'].max()+1
# append clumps_add into the dataframe
clumps = clumps.append(clumps_add, ignore_index=True)
return(clumps)
condsig.loc[condsig['chromosome'].isin(['X', 'XY']),"chromosome"] = 23
clumps = None
for chrom in condsig['chromosome'].unique():
ld = pd.read_csv(gf.out_dir+"/condout/ldclump_dosage/dosage_"+str(chrom)+".ld")
ld = ld.loc[ld['SNP_A'].isin(condsig['VARIANT']),:]
ld = ld.loc[ld['SNP_B'].isin(condsig['VARIANT']),:]
ld = ld.loc[ld['SNP_A'] != ld['SNP_B'],:]
condsig_s = condsig.loc[condsig['chromosome'] == chrom,:]
for index, row in condsig_s.iterrows():
partners = get_partners(ld, row['VARIANT'])
clumps = clump_function(partners, clumps, row['VARIANT'])
clumps = clumps.drop_duplicates()
clumps.to_csv(OUT_DIR+"/tao_clump/in_trait_clumps_"+args.trait+".csv", index=None)
|
import time
class LightControl:
def __init__(self, channel, pin):
self.channel = channel
def on(self):
return True
def off(self):
return True
def flash(self, time, count):
# if(not self.notPi)
for i in range(count):
self.on()
time.sleep(time*100)
self.off()
time.sleep(time*100)
i+=1
self.off() |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import sys
import os
from random import randint
import datetime
import time
from multiprocessing import Pool, TimeoutError
from collections import defaultdict
from scipy.stats import chisquare
from mmgroup import MM0, MMV
from mmgroup.mm_space import MMSpace
from mmgroup.mm import INT_BITS
################################################################
# Class and character for the monster information taken from GAP
################################################################
#The following information has been obtained from the GAP package:
GAP_INFO = """
gap> t := CharacterTable("M"); #! The character table of the Monster group
CharacterTable( "M" )
gap> ClassNames(t, "ATLAS"); #! Classes of the Monster in ATLAS notatation
[ "1A", "2A", "2B", "3A", "3B", "3C", "4A", "4B", "4C", "4D", "5A", "5B", "6A", "6B", "6C", "6D", "6E", "6F", "7A",
"7B", "8A", "8B", "8C", "8D", "8E", "8F", "9A", "9B", "10A", "10B", "10C", "10D", "10E", "11A", "12A", "12B",
"12C", "12D", "12E", "12F", "12G", "12H", "12I", "12J", "13A", "13B", "14A", "14B", "14C", "15A", "15B", "15C",
"15D", "16A", "16B", "16C", "17A", "18A", "18B", "18C", "18D", "18E", "19A", "20A", "20B", "20C", "20D", "20E",
"20F", "21A", "21B", "21C", "21D", "22A", "22B", "23A", "23B", "24A", "24B", "24C", "24D", "24E", "24F", "24G",
"24H", "24I", "24J", "25A", "26A", "26B", "27A", "27B", "28A", "28B", "28C", "28D", "29A", "30A", "30B", "30C",
"30D", "30E", "30F", "30G", "31A", "31B", "32A", "32B", "33A", "33B", "34A", "35A", "35B", "36A", "36B", "36C",
"36D", "38A", "39A", "39B", "39C", "39D", "40A", "40B", "40C", "40D", "41A", "42A", "42B", "42C", "42D", "44A",
"44B", "45A", "46A", "46B", "46C", "46D", "47A", "47B", "48A", "50A", "51A", "52A", "52B", "54A", "55A", "56A",
"56B", "56C", "57A", "59A", "59B", "60A", "60B", "60C", "60D", "60E", "60F", "62A", "62B", "66A", "66B", "68A",
"69A", "69B", "70A", "70B", "71A", "71B", "78A", "78B", "78C", "84A", "84B", "84C", "87A", "87B", "88A", "88B",
"92A", "92B", "93A", "93B", "94A", "94B", "95A", "95B", "104A", "104B", "105A", "110A", "119A", "119B" ]
gap> Irr(t)[2]; #! Character of degree 196883
Character( CharacterTable( "M" ), [ 196883, 4371, 275, 782, 53, -1, 275, 51, 19, -13, 133, 8, 78, 77, 14, -3, 5, -1,
50, 1, 35, 11, -1, -5, 3, -1, 26, -1, 21, 5, -4, 20, 0, 16, 14, 5, 6, -1, -2, 5, -3, 13, 1, -1, 11, -2, 10, 2, 9,
7, -2, 8, -1, 3, -1, 7, 6, -3, 6, 2, -1, 5, 5, 5, 1, 0, -3, 2, 4, 5, -2, -1, 4, 4, 0, 3, 3, 2, 2, -1, -2, -1, -1,
-1, 1, 3, -1, 3, 3, 2, 2, 2, 2, 2, -2, 1, 2, 2, 3, -1, 2, -1, 2, 0, 2, 2, 1, 1, -2, 1, 2, 0, 1, 2, -1, 0, 1,
1, 2, -1, 1, 1, -1, 1, 0, 0, 1, 1, 0, -1, 0, 0, 0, 1, -1, -1, 1, 1, 0, 0, 0, 1, 0, -1, 0, 0, 1, 0, -1, -1, -1, 0,
0, 1, -1, 0, -2, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, -1, -1, -2, -1, -1, -1, 0, 0, -1, -1, -1, -1, 0,
0, 0, 0, -1, -1, 0, -1, -1, -1 ] )
gap> SizesCentralizers(t); #! Sizes of the centralizers of the classes
[ 808017424794512875886459904961710757005754368000000000, 8309562962452852382355161088000000,
139511839126336328171520000, 3765617127571985163878400, 1429615077540249600, 272237831663616000,
8317584273309696000, 26489012826931200, 48704929136640, 8244323942400, 1365154560000000, 94500000000,
774741019852800, 2690072985600, 481579499520, 130606940160, 1612431360, 278691840, 28212710400, 84707280,
792723456, 778567680, 143769600, 23592960, 12582912, 3096576, 56687040, 2834352, 887040000, 18432000, 12000000,
6048000, 480000, 1045440, 119439360, 22394880, 17418240, 1161216, 884736, 483840, 373248, 276480, 82944, 23040,
73008, 52728, 1128960, 150528, 35280, 2721600, 145800, 10800, 9000, 12288, 8192, 8192, 2856, 34992, 23328, 15552,
3888, 3888, 1140, 76800, 28800, 24000, 19200, 1200, 960, 52920, 6174, 3528, 504, 2640, 2112, 552, 552, 6912, 4608,
3456, 2304, 1152, 864, 864, 576, 384, 288, 250, 624, 312, 486, 243, 4704, 2688, 896, 168, 87, 10800, 7200, 2880,
1800, 360, 240, 240, 186, 186, 128, 128, 594, 396, 136, 2100, 70, 1296, 648, 216, 72, 76, 702, 117, 78, 78, 400,
320, 80, 80, 41, 504, 504, 168, 126, 352, 352, 135, 184, 184, 92, 92, 94, 94, 96, 50, 51, 104, 52, 54, 110, 112,
56, 56, 57, 59, 59, 360, 240, 120, 120, 60, 60, 62, 62, 132, 66, 68, 69, 69, 140, 70, 71, 71, 78, 78, 78, 84, 84,
84, 87, 87, 88, 88, 92, 92, 93, 93, 94, 94, 95, 95, 104, 104, 105, 110, 119, 119 ]
"""
def find_table(name):
"""Return table in GAP_INFO after the comment starting with 'name'"""
s = GAP_INFO[GAP_INFO.find("#! " + name):]
copen, cclose = s.find("["), s.find("]")
return eval(s[copen:cclose+1])
ClassNames = find_table("Classes")
ClassOrders = [int(s[:-1]) for s in ClassNames]
CharacterValues = find_table("Character")
SizesCentralizers = find_table("Sizes of the centralizers")
assert len(ClassNames) == len(CharacterValues) == len(SizesCentralizers)
################################################################
# Check that monster group elements have coorect orders
################################################################
p = 3
space = MMV(3)
group = MM0
good_mm_orders = set(ClassOrders)
max_mmm_order = max(good_mm_orders)
def one_test_mm_order(v, m, verbose = 0):
v = v.copy()
v1, n = v.copy(), 0
while n <= max_mmm_order:
v1, n = v1 * m, n+1
if v1 == v:
return n
return None
def rand_v():
return space('R')
def rand_m(n_entries = 4):
return group('r', n_entries)
def random_test_order(n_entries = 4, display = True):
v, m = rand_v(), rand_m()
order = one_test_mm_order(v, m, display)
ok = order in good_mm_orders
st = "ok" if ok else "error"
if display:
print("\rorder is", order, ",", st)
s = "\nm = " + str(m)
s += "\norder = " + str(order) + ", " + st + "\n"
return ok, order, s
def check_mm_orders(ntests, display = True):
print("\nTesting orders of elements of the monster group")
nerrors = 0
order_sum = 0
start_time = datetime.datetime.now()
print(start_time)
t_start = time.process_time()
for i in range(ntests):
t = time.process_time()
if display:
print("Test %d, CPU time = %.3f s" % (i+1, t) )
ok, order, _ = random_test_order(display = display)
nerrors += not ok
if ok:
order_sum += order
t = time.process_time() - t_start
print("started: ", start_time)
print("finished:", datetime.datetime.now())
print("CPU time = %.3f s, per test: %.3f ms" % (t, 1000*t/ntests))
print("CPU time per standard operation: %.5f ms" % (1000.0*t/order_sum))
print("%d tests, %d errors, " % (ntests, nerrors))
if nerrors:
raise ValueError("Error in orders of monster group elements")
################################################################
# Chisquare test of orders of monster group elements
################################################################
MM_WORD_SIZE = 20 # No of elementary operations to construct
# an element of the monster
MIN_CHISQU = 560 # Min No of cases for chisquare test
class ChisquareOrder:
probabilities = defaultdict(float)
orders = set(ClassOrders)
good_orders = set()
for order, csize in zip(ClassOrders, SizesCentralizers):
probabilities[order] += 1.0/csize
if probabilities[order] >= 1.0/111:
good_orders.add(order)
max_small = max(orders - good_orders)
for x in orders:
if x <= max_small:
del probabilities[x]
min_order = min(probabilities)
probabilities[0] = 1.0 - sum(probabilities.values())
chisquare_ = chisquare
def __init__(self, p = p):
self.obtained = defaultdict(int)
self.p = p
self.total = 0
self.order_sum = 0
self.errors = 0
self.word_size = MM_WORD_SIZE
def add(self, ok, order):
ok = ok and order in self.orders
if ok:
key = order if order >= self.min_order else 0
self.obtained[key] += 1
self.total += 1
self.order_sum += order
self.errors += not ok
def chisquare(self):
f_obt = [self.obtained[key] for key in self.probabilities]
sum_obt = sum(f_obt)
f_exp = [sum_obt * self.probabilities[key]
for key in self.probabilities]
chisq, p = chisquare(f_obt, f_exp = f_exp)
return chisq, p
def is_ok(self):
if self.errors:
return False
if self.total < MIN_CHISQU:
return True
_, prob = self.chisquare()
return prob > 1.0e-6
def show_result(self):
description = (
"""Chisquare test of distribution of orders >= %d in the monster M,
%d degrees of freedom, characteristic p = %d, %d-bit C
random element of MM built from %d factors,
%d tests, %d MM operations, %d errors.
""" )
s = description % (
self.min_order,
len(self.probabilities) - 1,
self.p, INT_BITS, self.word_size,
self.total, self.order_sum, self.errors
)
if self.errors == 0 and self.total >= MIN_CHISQU:
st = "\nChisquare test statistics = %.3f, p = %.4f\n"
chisq, p = self.chisquare()
s += st % (chisq, p)
return s
def one_test_order(args):
v, m = args
order = one_test_mm_order(v, m)
ok = order in good_mm_orders
return ok, order
def get_test_values(ntests):
for i in range(ntests):
yield rand_v(), rand_m(MM_WORD_SIZE)
def statistics_chisqu_orders(results, start_time = None):
if not start_time is None:
end_time = datetime.datetime.now()
chisq = ChisquareOrder()
for i, (ok, order) in enumerate(results):
st = "ok" if ok else "error"
chisq.add(ok, order)
print("\n" + chisq.show_result())
if not start_time is None:
ntests, order_sum = chisq.total, chisq.order_sum
diff_time = end_time - start_time
t = diff_time.total_seconds()
print("started: ", start_time)
print("finished:", end_time)
print("time = %.3f s, per test: %.3f ms" % (t, 1000*t/ntests))
print("time per standard operation: %.5f ms" % (1000.0*t/order_sum))
return chisq.is_ok()
def check_chisqu_orders(ntests, nprocesses = 1, verbose = False):
verbose = 1
start_time = datetime.datetime.now()
header = "\nChisquare test of distribution of orders in the monster M,"
print(header)
print("%d tests, %d processes" % (ntests, nprocesses))
print("started: ", start_time)
testvalues = get_test_values(ntests)
if nprocesses > 1:
with Pool(processes = nprocesses) as pool:
results = pool.map(one_test_order, testvalues)
pool.join()
else:
results_ = map(one_test_order, testvalues)
results = []
for i, x in enumerate(results_):
ok, order = x
if verbose:
print("Test %d, order = %3d, %s" % (i+1, order, ok) )
else:
print("\r %d " % i, end = "")
results.append(x)
return statistics_chisqu_orders(results, start_time)
|
from django import forms
from .models import Vote
class VoteForm(forms.ModelForm):
BOOLEAN_CHOICES = (('1', 'Ja'), ('0', 'Nej'))
positive = forms.ChoiceField(choices=BOOLEAN_CHOICES,
widget=forms.RadioSelect)
class Meta:
model = Vote
fields = ['positive'] |
from django.test import TestCase
# Create your tests here.
from .views import get_location_from_zip, search_location_in_sheet, get_response_for_help
from data.gsheets import get_gsheet
class ViewsTestCase(TestCase):
def setUp(self):
self.gsheet = get_gsheet()
def test_get_location_from_zip_invalid_inputs(self):
"""Tests get location function from zipcode
"""
zipcode = '4220089'
response, location, _ = get_location_from_zip(f'ZIP X {zipcode}')
self.assertTrue('Incorrect ZIP information entered.' in response)
self.assertEqual(location, None)
zipcode = '4220089'
response, location, _ = get_location_from_zip(f'ZIP {zipcode}')
self.assertTrue('Incorrect ZIP code entered' in response)
self.assertEqual(location, None)
def test_get_location_from_zip_valid_input(self):
"""Tests get location function from zipcode
"""
zipcode = '422008'
response, location, _ = get_location_from_zip(f'ZIP {zipcode}')
latitude = location.latitude
longitude = location.longitude
self.assertEqual(latitude, '20.02405519230769')
self.assertEqual(longitude, '73.76155313076923')
def test_search_location_in_sheet(self):
"""Tests search location in sheet
"""
zipcode = '400012'
response, location, _ = get_location_from_zip(f'ZIP {zipcode}')
latitude = location.latitude
longitude = location.longitude
response = search_location_in_sheet(zipcode, location)
self.assertTrue('Mr. Rakesh' in response)
def test_get_response_for_help(self):
"""Tests get_response_for_help
"""
incoming_msg = 'Help\nCity: Mumbai\nReq: Oxygen'
response = get_response_for_help(incoming_msg)
print(response)
|
from google.cloud import storage as gcs
import csv
from numpy import genfromtxt
import requests
import tensorflow as tf
#get the bucket and blob containing the .csv data file
client=gcs.Client()
try:
bucket=client.get_bucket('parquery-sandbox')
except google.cloud.exceptions.NotFound:
print('Sorry, that bucket does not exist!')
#uncomment to display bucket properties
#print(bucket.client)
#print(bucket)
#uncomment to debug blobs
#iterator=bucket.list_blobs()
#for i in iterator:
# print(i)
try:
blob=bucket.get_blob('treedom/query_result.csv')
except google.cloud.exceptions.NotFound:
print('Sorry, that blob does not exist!')
#uncomment to display blob properties
#print(blob)
#print(blob.client)
blob.download_to_filename('data.csv')
with open('data.csv', 'rb') as csvfile:
#mydata=genfromtxt(csvfile, delimiter=',')
readCSV=csv.reader(csvfile, delimiter=',')
labels=[]
links=[]
for row in readCSV:
label=row[5]
link=row[7]
if link != "NULL":
labels.append(label)
links.append(link)
#remove first element from array - the header data
labels.pop(0)
links.pop(0)
labels=map(int, labels)
print(labels[0])
print(links[0].split('/'))
print(links[0].split('/')[5])
correct_folder='correct/'
incorrect_folder='incorrect/'
#downloading images for training
for i in range(10000):
if(labels[i] == 1):
folder=correct_folder
else:
folder=incorrect_folder
image=requests.get(links[i]).content
with open('images/training/' + folder + links[i].split('/')[5] + '.jpg', 'wb') as handler:
handler.write(image)
#downloading images for testing
for i in range(10000, 12000):
if(labels[i] == 1):
folder=correct_folder
else:
folder=incorrect_folder
image=requests.get(links[i]).content
with open('images/testing/' + folder + links[i].split('/')[5] + '.jpg', 'wb') as handler:
handler.write(image) |
#/usr/bin/env python
from ctypes import *
###############################################################################
AT_NULL = 0 # End of vector
AT_IGNORE = 1 # Entry should be ignored
AT_EXECFD = 2 # File descriptor of program
AT_PHDR = 3 # Program headers for program
AT_PHENT = 4 # Size of program header entry
AT_PHNUM = 5 # Number of program headers
AT_PAGESZ = 6 # System page size
AT_BASE = 7 # Base address of interpreter
AT_FLAGS = 8 # Flags
AT_ENTRY = 9 # Entry point of program
AT_NOTELF = 10 # Program is not ELF
AT_UID = 11 # Real uid
AT_EUID = 12 # Effective uid
AT_GID = 13 # Real gid
AT_EGID = 14 # Effective gid
AT_CLKTCK = 17 # Frequency of times()
# Some more special a_type values describing the hardware.
AT_PLATFORM = 15 # String identifying platform.
AT_HWCAP = 16 # Machine dependent hints about
# processor capabilities.
# This entry gives some information about the FPU initialization
# performed by the kernel.
AT_FPUCW = 18 # Used FPU control word.
# Cache block sizes.
AT_DCACHEBSIZE = 19 # Data cache block size.
AT_ICACHEBSIZE = 20 # Instruction cache block size.
AT_UCACHEBSIZE = 21 # Unified cache block size.
# A special ignored value for PPC, used by the kernel to control the
# interpretation of the AUXV. Must be > 16.
AT_IGNOREPPC = 22 #Entry should be ignored.
AT_SECURE = 23 #Boolean, was exec setuid-like?
# Pointer to the global system page used for system calls and other
# nice things.
AT_SYSINFO = 32
AT_SYSINFO_EHDR = 33
###############################################################################
class Elf32_auxv(Structure):
class a_union(Union):
_fields_ = [("a_val",c_long),
("a_ptr",c_void_p),
("a_fcn",c_void_p)] #XXX: CFUNCTYPE(None,None)?
_fields_ = [("a_type", c_int),
("a_un",a_union)]
###############################################################################
class Elf64_auxv(Elf32_auxv):
_fields_ = [("a_type", c_long),
("a_un",Elf32_auxv.a_union)]
|
from itertools import chain, combinations
# A class to encapsulate a Functional Dependency, and some helper functions
class FD:
def __init__(self, lhs, rhs):
self.lhs = frozenset(list(lhs))
self.rhs = frozenset(list(rhs))
def __str__(self):
return ''.join(self.lhs) + " -> " + ''.join(self.rhs)
def __eq__(self, other):
return (self.lhs == other.lhs) & (self.rhs == other.rhs)
def __hash__(self):
return hash(self.lhs) * hash(self.rhs)
def isTrivial(self):
"""A functional dependency is trivial if the right hand side is a subset of the left h.s."""
return self.lhs >= self.rhs
# The following is not really needed for normalization, but may be useful to get intuitions about FDs
class Relation:
def __init__(self, schema):
self.tuples = list()
self.schema = schema
def add(self, t):
if len(t) == len(self.schema):
self.tuples.append(t)
else:
print "Added tuple does not match the length of the schema"
def checkIfMatch(self, t1, t2, attr_set):
return all(t1[self.schema.index(attr)] == t2[self.schema.index(attr)] for attr in attr_set)
def checkFDHolds(self, fd):
"""Go over all pairs of tuples and see if the FD is violated"""
for t1 in self.tuples:
for t2 in self.tuples:
if t1 < t2 and self.checkIfMatch(t1, t2, fd.lhs) and not self.checkIfMatch(t1, t2, fd.rhs):
print "Tuples " + str(t1) + " and " + str(t2) + " violate the FD " + str(fd)
r = Relation(['A', 'B', 'C'])
r.add([1, 2, 3])
r.add([2, 2, 3])
r.checkFDHolds(FD('ABC', 'A'))
def powerset(S):
"""Returns the powerset of a set, except for the empty set, i.e., if S = {A, B, C}, returns {{A}, {B}, {C}, {A,B}, {B,C}, {A,C}, {A,B,C}"""
return list(chain.from_iterable(combinations(S, r) for r in range(1, len(S)+1)))
def applyreflexivity(R):
"""Generates all trivial dependencies, i.e., of the type X -> subset(X)"""
return { FD(i, j) for i in powerset(R) for j in powerset(i) }
def applyaugmentation(F, PW, printflag):
"""Augmentation: if X --> Y, then XZ --> YZ
PW is powerset of the schema
"""
N = {FD(x.lhs.union(y), x.rhs.union(y)) for x in F for y in PW}
for fd in N - F:
if printflag: print " Adding " + str(fd) + " by Augmenting " + str(x) + " using " + "".join(y)
return F.union(N)
def applytransitivity(F, printflag):
"""Transitivity: if X --> Y, and Y --> Z, then X --> Z"""
N = { FD(x.lhs, y.rhs) for x in F for y in F if x.rhs == y.lhs }
for fd in N - F:
if printflag:
print " Adding " + str(fd) + " using Transitivity from " + str(x) + " and " + str(y)
return F.union(N)
def findClosure(R, F, printflag = False):
"""Finds closure by repeatedly applying the three Armstrong Axioms, until there is no change"""
# Start with adding all trivial dependencies generated by using Reflexivity
F = F.union(applyreflexivity(R))
powersetR = list(chain.from_iterable(combinations(list(R), r) for r in range(1, len(R)+1)))
# Repeat application of the other two rules until no change
done = False;
while done == False:
if printflag: print "Trying to find new FDs using Transitivity"
F2 = applytransitivity(F, printflag)
if printflag: print "Trying to find new FDs using Augmentation"
F2 = applyaugmentation(F2, powerset(R), printflag)
done = len(F2) == len(F)
F = F2
if printflag: print "Finished"
return F
def findKeys(R, FClosure):
"""Keys are those where there is an FD with rhs = R"""
return { fd.lhs for fd in FClosure if len(fd.rhs) == len(list(R)) }
def findCandidateKeys(R, FClosure):
"""Candidate keys are minimal -- go over the keys increasing order by size, and add if no subset is present"""
keys = findKeys(R, FClosure)
ckeys = set()
for k in sorted(keys, lambda x, y: cmp(len(x), len(y))):
dontadd = False
for ck in ckeys:
if(ck <= k):
dontadd = True #Found a subset already in ckeys
if not dontadd:
ckeys.add(k)
return ckeys
def isInBCNF(R, FClosure, keys):
"""Find if there is a FD alpha --> beta s.t. alpha is not a key"""
if keys is None: keys = Keys(R, FClosure)
for fd in FClosure:
if (not fd.isTrivial()) and (fd.lhs not in keys):
return False
return True
def listAllBCNFViolations(R, FClosure, keys):
"""Same as above, but finds all violations and prints them"""
for fd in FClosure:
if (not fd.isTrivial()) and (fd.lhs not in keys):
print str(fd) + " is an FD whose LHS is not a key"
def findSmallestViolatingFD(R, FClosure, keys):
"""Same as above, but finds a small FD that violates"""
for fd in sorted(FClosure, lambda x, y: cmp(len(x.lhs), len(y.lhs))):
if (not fd.isTrivial()) and (fd.lhs not in keys):
return fd
def DecomposeUsingFD(R, FClosure, fd):
"""Uses the given FD to decompose the schema -- returns the resulting schemas and their closures
Let the fd be X --> Y
Then we create two relations: R1 = X UNION Y, and R2 = X UNION (R - Y)
Then, for R1, we find all FDs from FClosure that apply to R1 (i.e., that only contain attributes from R1)
And do the same for R2
"""
R1 = fd.lhs | fd.rhs
R2 = set(R) - R1 | fd.lhs
F1Closure = { fd for fd in FClosure if (fd.lhs <= R1) and (fd.rhs <= R1) }
F2Closure = { fd for fd in FClosure if (fd.lhs <= R2) and (fd.rhs <= R2) }
return (R1, R2, F1Closure, F2Closure)
# Do a recursive BCNF Decomposition, and print out the results
def BCNFDecomposition(R, FClosure):
keys = findKeys(R, FClosure)
if not isInBCNF(R, FClosure, keys):
print "".join(R) + " is not in BCNF"
fd = findSmallestViolatingFD(R, FClosure, keys)
# Decompose using that FD
(R1, R2, F1Closure, F2Closure) = DecomposeUsingFD(R, FClosure, fd)
print "Decomposing " + "".join(R) + " using " + str(fd) + " into relations " + "".join(R1) + " and " + "".join(R2)
# Recurse
BCNFDecomposition(R1, F1Closure)
BCNFDecomposition(R2, F2Closure)
else:
print "".join(R) + " is in BCNF"
R = "ABCD"
F = {FD('A', 'B'), FD('BC', 'D')}
# We will need powerset of R for several things, so lets just create it once
Fclosure = findClosure(R, F)
for i in Fclosure:
if not i.isTrivial(): print i
keys = findKeys(R, Fclosure)
print "Keys are:"
for i in keys:
print "".join(i)
candidatekeys = findCandidateKeys(R, Fclosure)
print "Candidate Keys are:"
for i in candidatekeys:
print "".join(i)
print "Checking if the schema is in BCNF"
if isInBCNF(R, Fclosure, keys):
print "The schema is in BCNF"
(R1, R2, F1Closure, F2Closure) = DecomposeUsingFD(R, Fclosure, FD('B', 'C'))
print "Decomposing using " + str(FD('B', 'C')) + " into relations " + "".join(R1) + " and " + "".join(R2)
print "-------------- Doing a full BCNF Decompisition -------"
BCNFDecomposition(R, Fclosure)
print "------------------------------------------------------"
|
import random
import string
import tkinter as tk
from tkinter import ttk
import pyperclip
def parameter_password():
entry.delete(0, 'end') # стирает сгенерированный пароль из окна ввода
length = var_1.get() # длина пароля
count = count_checkmarks()
password = generator_pass(length, count)
return password
def count_checkmarks(): # функция, подсчитывающая количество checkbox
count = 0
if checkbox_upper.get():
count += 1
if checkbox_lower.get():
count += 1
if checkbox_digits.get():
count += 1
if checkbox_symbols.get():
count += 1
return count
def generator_pass(length, count): # основная функция, которая генерирует пароль по заданным параметрам
password = ''
lower = string.ascii_lowercase
upper = string.ascii_uppercase
digits = string.digits
symbols = string.punctuation
if count == 1:
if checkbox_upper.get():
for i in range(0, length):
password += random.choice(upper)
return password
elif checkbox_lower.get():
for i in range(0, length):
password += random.choice(lower)
return password
elif checkbox_digits.get():
for i in range(0, length):
password += random.choice(digits)
return password
elif checkbox_symbols.get():
for i in range(0, length):
password += random.choice(symbols)
return password
if count == 2:
if checkbox_upper.get() and checkbox_lower.get():
password += random.choice(lower) # оставляем минимум одну букву в нижнем регистре
password += random.choice(upper) # оставляем минимум одну букву в верхнем регистре
for i in range(0, length - count): # length - count - из длины убираем два символа, кторые точно должны быть в пароле
password += random.choice(upper + lower)
return password
elif checkbox_upper.get() and checkbox_digits.get():
password += random.choice(upper)
password += random.choice(digits)
for i in range(0, length - count):
password += random.choice(upper + digits)
return password
elif checkbox_upper.get() and checkbox_symbols.get():
password += random.choice(upper)
password += random.choice(symbols)
for i in range(0, length - count):
password += random.choice(upper + symbols)
return password
elif checkbox_lower.get() and checkbox_digits.get():
password += random.choice(lower)
password += random.choice(digits)
for i in range(0, length - count):
password += random.choice(lower + digits)
return password
elif checkbox_lower.get() and checkbox_symbols.get():
password += random.choice(lower)
password += random.choice(symbols)
for i in range(0, length - count):
password += random.choice(lower + symbols)
return password
elif checkbox_digits.get() and checkbox_symbols.get():
password += random.choice(digits)
password += random.choice(symbols)
for i in range(0, length - count):
password += random.choice(digits + symbols)
return password
if count == 3:
if checkbox_upper.get() and checkbox_lower.get() and checkbox_digits.get():
password += random.choice(upper)
password += random.choice(lower)
password += random.choice(digits)
for i in range(0, length - count):
password += random.choice(upper + lower + digits)
return password
elif checkbox_upper.get() and checkbox_lower.get() and checkbox_symbols.get():
password += random.choice(upper)
password += random.choice(lower)
password += random.choice(symbols)
for i in range(0, length - count):
password += random.choice(upper + lower + symbols)
return password
elif checkbox_upper.get() and checkbox_digits.get() and checkbox_symbols.get():
password += random.choice(upper)
password += random.choice(digits)
password += random.choice(symbols)
for i in range(0, length - count):
password += random.choice(upper + digits + symbols)
return password
elif checkbox_lower.get() and checkbox_digits.get() and checkbox_symbols.get():
password += random.choice(lower)
password += random.choice(digits)
password += random.choice(symbols)
for i in range(0, length - count):
password += random.choice(lower + digits + symbols)
return password
if count == 4:
if checkbox_upper.get() and checkbox_lower.get() and checkbox_digits.get() and checkbox_symbols.get():
password += random.choice(upper)
password += random.choice(lower)
password += random.choice(digits)
password += random.choice(symbols)
for i in range(0, length - count):
password += random.choice(upper + lower + digits + symbols)
return password
def button_generate_password(): # функция генерации пароля в окно ввода
password_1 = parameter_password()
entry.insert(0, password_1)
def copy_your_password(): # функция копирования пароля в буфер обмена
copy_password = entry.get()
pyperclip.copy(copy_password)
# Создание окна
root = tk.Tk()
var_1 = tk.IntVar()
# Название окна
root.title = ('Генератор случайных паролей')
# Добавление ярлыка для отображения текста и поля для вывода пароля
Password = tk.Label(root, text = 'Пароль:')
Password.grid(row = 0, column = 0)
entry = tk.Entry(root)
entry.grid(row = 0, column = 1)
# Добавление ярлыка (Label) для отображения длины пароля
label = tk.Label(root, text = 'Длина пароля:')
label.grid(row = 1, column = 0)
# Кнопка копирования пароля
copy_button = tk.Button(root, text = 'Скопировать пароль', command = copy_your_password)
copy_button.grid(row = 0, column= 3)
# Кнопка генерации пароля
generate_button = tk.Button(root, text = 'Сгенерировать пароль', command = button_generate_password)
generate_button.grid(row = 0, column = 2)
# Создание выпадающего списка для выбора пользователя
combo_length_of_password = ttk.Combobox(root, textvariable = var_1)
combo_length_of_password['values'] = (8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 'Введите длину')
combo_length_of_password.current(0)
combo_length_of_password.grid(row = 1, column = 1)
# Создание флажка на выбор пользователя параметров генерируемого пароля
# Галочка для Верхнего регистра букв
checkbox_upper = tk.BooleanVar()
check_upper = tk.Checkbutton(root, text ='Использовать заглавные буквы', var = checkbox_upper)
check_upper.grid(row = 2, column = 0)
# Галочка для нижнего регистра букв
checkbox_lower = tk.BooleanVar()
check_lower = tk.Checkbutton(root, text ='Использовать строчные буквы', var = checkbox_lower)
check_lower.grid(row = 3, column = 0)
# Галочка для цифр
checkbox_digits = tk.BooleanVar()
check_digits = tk.Checkbutton(root, text ='Использовать цифры ', var = checkbox_digits)
check_digits.grid(row = 2, column = 1)
# Галочка для спец.символов
checkbox_symbols = tk.BooleanVar()
check_symbols = tk.Checkbutton(root, text ='Использовать спец.символы', var = checkbox_symbols)
check_symbols.grid(row = 3, column = 1)
# Запуск окна
root.mainloop()
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.NanoAOD.nano_eras_cff import *
from PhysicsTools.NanoAOD.common_cff import *
from PhysicsTools.NanoAOD.simpleCandidateFlatTableProducer_cfi import simpleCandidateFlatTableProducer
import PhysicsTools.PatAlgos.producersLayer1.muonProducer_cfi
# this below is used only in some eras
slimmedMuonsUpdated = cms.EDProducer("PATMuonUpdater",
src = cms.InputTag("slimmedMuons"),
vertices = cms.InputTag("offlineSlimmedPrimaryVertices"),
computeMiniIso = cms.bool(False),
fixDxySign = cms.bool(True),
pfCandsForMiniIso = cms.InputTag("packedPFCandidates"),
miniIsoParams = PhysicsTools.PatAlgos.producersLayer1.muonProducer_cfi.patMuons.miniIsoParams, # so they're in sync
recomputeMuonBasicSelectors = cms.bool(False),
)
isoForMu = cms.EDProducer("MuonIsoValueMapProducer",
src = cms.InputTag("slimmedMuonsUpdated"),
relative = cms.bool(False),
rho_MiniIso = cms.InputTag("fixedGridRhoFastjetAll"),
EAFile_MiniIso = cms.FileInPath("PhysicsTools/NanoAOD/data/effAreaMuons_cone03_pfNeuHadronsAndPhotons_94X.txt"),
)
ptRatioRelForMu = cms.EDProducer("MuonJetVarProducer",
srcJet = cms.InputTag("updatedJetsPuppi"),
srcLep = cms.InputTag("slimmedMuonsUpdated"),
srcVtx = cms.InputTag("offlineSlimmedPrimaryVertices"),
)
muonMVAID = cms.EDProducer("EvaluateMuonMVAID",
src = cms.InputTag("slimmedMuonsUpdated"),
weightFile = cms.FileInPath("RecoMuon/MuonIdentification/data/mvaID.onnx"),
isClassifier = cms.bool(False),
backend = cms.string('ONNX'),
name = cms.string("muonMVAID"),
outputTensorName= cms.string("probabilities"),
inputTensorName= cms.string("float_input"),
outputNames = cms.vstring(["probGOOD", "wpMedium", "wpTight"]),
batch_eval =cms.bool(True),
outputFormulas = cms.vstring(["at(1)", "? at(1) > 0.08 ? 1 : 0", "? at(1) > 0.20 ? 1 : 0"]),
variablesOrder = cms.vstring(["LepGood_global_muon","LepGood_validFraction","Muon_norm_chi2_extended","LepGood_local_chi2","LepGood_kink","LepGood_segmentComp","Muon_n_Valid_hits_extended","LepGood_n_MatchedStations","LepGood_Valid_pixel","LepGood_tracker_layers","LepGood_pt","LepGood_eta"]),
variables = cms.PSet(
LepGood_global_muon = cms.string("isGlobalMuon"),
LepGood_validFraction = cms.string("?innerTrack.isNonnull?innerTrack().validFraction:-99"),
LepGood_local_chi2 = cms.string("combinedQuality().chi2LocalPosition"),
LepGood_kink = cms.string("combinedQuality().trkKink"),
LepGood_segmentComp = cms.string("segmentCompatibility"),
LepGood_n_MatchedStations = cms.string("numberOfMatchedStations()"),
LepGood_Valid_pixel = cms.string("?innerTrack.isNonnull()?innerTrack().hitPattern().numberOfValidPixelHits():-99"),
LepGood_tracker_layers = cms.string("?innerTrack.isNonnull()?innerTrack().hitPattern().trackerLayersWithMeasurement():-99"),
LepGood_pt = cms.string("pt"),
LepGood_eta = cms.string("eta"),
)
)
slimmedMuonsWithUserData = cms.EDProducer("PATMuonUserDataEmbedder",
src = cms.InputTag("slimmedMuonsUpdated"),
userFloats = cms.PSet(
miniIsoChg = cms.InputTag("isoForMu:miniIsoChg"),
miniIsoAll = cms.InputTag("isoForMu:miniIsoAll"),
ptRatio = cms.InputTag("ptRatioRelForMu:ptRatio"),
ptRel = cms.InputTag("ptRatioRelForMu:ptRel"),
jetNDauChargedMVASel = cms.InputTag("ptRatioRelForMu:jetNDauChargedMVASel"),
mvaIDMuon_wpMedium = cms.InputTag("muonMVAID:wpMedium"),
mvaIDMuon_wpTight = cms.InputTag("muonMVAID:wpTight")
),
userCands = cms.PSet(
jetForLepJetVar = cms.InputTag("ptRatioRelForMu:jetForLepJetVar") # warning: Ptr is null if no match is found
),
)
(run2_nanoAOD_106Xv2 | run3_nanoAOD_122 ).toModify(slimmedMuonsWithUserData.userFloats,
mvaIDMuon = cms.InputTag("muonMVAID:probGOOD"))
finalMuons = cms.EDFilter("PATMuonRefSelector",
src = cms.InputTag("slimmedMuonsWithUserData"),
cut = cms.string("pt > 15 || (pt > 3 && (passed('CutBasedIdLoose') || passed('SoftCutBasedId') || passed('SoftMvaId') || passed('CutBasedIdGlobalHighPt') || passed('CutBasedIdTrkHighPt')))")
)
finalLooseMuons = cms.EDFilter("PATMuonRefSelector", # for isotrack cleaning
src = cms.InputTag("slimmedMuonsWithUserData"),
cut = cms.string("pt > 3 && track.isNonnull && isLooseMuon")
)
muonMVATTH= cms.EDProducer("MuonBaseMVAValueMapProducer",
src = cms.InputTag("linkedObjects","muons"),
weightFile = cms.FileInPath("PhysicsTools/NanoAOD/data/mu_BDTG_2017.weights.xml"),
name = cms.string("muonMVATTH"),
isClassifier = cms.bool(True),
variablesOrder = cms.vstring(["LepGood_pt","LepGood_eta","LepGood_jetNDauChargedMVASel","LepGood_miniRelIsoCharged","LepGood_miniRelIsoNeutral","LepGood_jetPtRelv2","LepGood_jetDF","LepGood_jetPtRatio","LepGood_dxy","LepGood_sip3d","LepGood_dz","LepGood_segmentComp"]),
variables = cms.PSet(
LepGood_pt = cms.string("pt"),
LepGood_eta = cms.string("eta"),
LepGood_jetNDauChargedMVASel = cms.string("?userCand('jetForLepJetVar').isNonnull()?userFloat('jetNDauChargedMVASel'):0"),
LepGood_miniRelIsoCharged = cms.string("userFloat('miniIsoChg')/pt"),
LepGood_miniRelIsoNeutral = cms.string("(userFloat('miniIsoAll')-userFloat('miniIsoChg'))/pt"),
LepGood_jetPtRelv2 = cms.string("?userCand('jetForLepJetVar').isNonnull()?userFloat('ptRel'):0"),
LepGood_jetDF = cms.string("?userCand('jetForLepJetVar').isNonnull()?max(userCand('jetForLepJetVar').bDiscriminator('pfDeepFlavourJetTags:probbb')+userCand('jetForLepJetVar').bDiscriminator('pfDeepFlavourJetTags:probb')+userCand('jetForLepJetVar').bDiscriminator('pfDeepFlavourJetTags:problepb'),0.0):0.0"),
LepGood_jetPtRatio = cms.string("?userCand('jetForLepJetVar').isNonnull()?min(userFloat('ptRatio'),1.5):1.0/(1.0+(pfIsolationR04().sumChargedHadronPt + max(pfIsolationR04().sumNeutralHadronEt + pfIsolationR04().sumPhotonEt - pfIsolationR04().sumPUPt/2,0.0))/pt)"),
LepGood_dxy = cms.string("log(abs(dB('PV2D')))"),
LepGood_sip3d = cms.string("abs(dB('PV3D')/edB('PV3D'))"),
LepGood_dz = cms.string("log(abs(dB('PVDZ')))"),
LepGood_segmentComp = cms.string("segmentCompatibility"),
)
)
muonMVALowPt = muonMVATTH.clone(
weightFile = cms.FileInPath("PhysicsTools/NanoAOD/data/mu_BDTG_lowpt.weights.xml"),
name = cms.string("muonMVALowPt"),
)
run2_muon_2016.toModify(
muonMVATTH,
weightFile = "PhysicsTools/NanoAOD/data/mu_BDTG_2016.weights.xml",
)
muonTable = simpleCandidateFlatTableProducer.clone(
src = cms.InputTag("linkedObjects","muons"),
name = cms.string("Muon"),
doc = cms.string("slimmedMuons after basic selection (" + finalMuons.cut.value()+")"),
variables = cms.PSet(CandVars,
ptErr = Var("bestTrack().ptError()", float, doc = "ptError of the muon track", precision=6),
tunepRelPt = Var("tunePMuonBestTrack().pt/pt",float,doc="TuneP relative pt, tunePpt/pt",precision=6),
dz = Var("dB('PVDZ')",float,doc="dz (with sign) wrt first PV, in cm",precision=10),
dzErr = Var("abs(edB('PVDZ'))",float,doc="dz uncertainty, in cm",precision=6),
dxybs = Var("dB('BS2D')",float,doc="dxy (with sign) wrt the beam spot, in cm",precision=10),
dxy = Var("dB('PV2D')",float,doc="dxy (with sign) wrt first PV, in cm",precision=10),
dxyErr = Var("edB('PV2D')",float,doc="dxy uncertainty, in cm",precision=6),
ip3d = Var("abs(dB('PV3D'))",float,doc="3D impact parameter wrt first PV, in cm",precision=10),
sip3d = Var("abs(dB('PV3D')/edB('PV3D'))",float,doc="3D impact parameter significance wrt first PV",precision=10),
segmentComp = Var("segmentCompatibility()", float, doc = "muon segment compatibility", precision=14), # keep higher precision since people have cuts with 3 digits on this
nStations = Var("numberOfMatchedStations", "uint8", doc = "number of matched stations with default arbitration (segment & track)"),
nTrackerLayers = Var("?track.isNonnull?innerTrack().hitPattern().trackerLayersWithMeasurement():0", "uint8", doc = "number of layers in the tracker"),
highPurity = Var("?track.isNonnull?innerTrack().quality('highPurity'):0", bool, doc = "inner track is high purity"),
jetIdx = Var("?hasUserCand('jet')?userCand('jet').key():-1", "int16", doc="index of the associated jet (-1 if none)"),
svIdx = Var("?hasUserCand('vertex')?userCand('vertex').key():-1", "int16", doc="index of matching secondary vertex"),
tkRelIso = Var("isolationR03().sumPt/tunePMuonBestTrack().pt",float,doc="Tracker-based relative isolation dR=0.3 for highPt, trkIso/tunePpt",precision=6),
miniPFRelIso_chg = Var("userFloat('miniIsoChg')/pt",float,doc="mini PF relative isolation, charged component"),
miniPFRelIso_all = Var("userFloat('miniIsoAll')/pt",float,doc="mini PF relative isolation, total (with scaled rho*EA PU corrections)"),
pfRelIso03_chg = Var("pfIsolationR03().sumChargedHadronPt/pt",float,doc="PF relative isolation dR=0.3, charged component"),
pfRelIso03_all = Var("(pfIsolationR03().sumChargedHadronPt + max(pfIsolationR03().sumNeutralHadronEt + pfIsolationR03().sumPhotonEt - pfIsolationR03().sumPUPt/2,0.0))/pt",float,doc="PF relative isolation dR=0.3, total (deltaBeta corrections)"),
pfRelIso04_all = Var("(pfIsolationR04().sumChargedHadronPt + max(pfIsolationR04().sumNeutralHadronEt + pfIsolationR04().sumPhotonEt - pfIsolationR04().sumPUPt/2,0.0))/pt",float,doc="PF relative isolation dR=0.4, total (deltaBeta corrections)"),
jetRelIso = Var("?userCand('jetForLepJetVar').isNonnull()?(1./userFloat('ptRatio'))-1.:(pfIsolationR04().sumChargedHadronPt + max(pfIsolationR04().sumNeutralHadronEt + pfIsolationR04().sumPhotonEt - pfIsolationR04().sumPUPt/2,0.0))/pt",float,doc="Relative isolation in matched jet (1/ptRatio-1, pfRelIso04_all if no matched jet)",precision=8),
jetPtRelv2 = Var("?userCand('jetForLepJetVar').isNonnull()?userFloat('ptRel'):0",float,doc="Relative momentum of the lepton with respect to the closest jet after subtracting the lepton",precision=8),
tightCharge = Var("?(muonBestTrack().ptError()/muonBestTrack().pt() < 0.2)?2:0", "uint8", doc="Tight charge criterion using pterr/pt of muonBestTrack (0:fail, 2:pass)"),
looseId = Var("passed('CutBasedIdLoose')",bool, doc="muon is loose muon"),
isPFcand = Var("isPFMuon",bool,doc="muon is PF candidate"),
isGlobal = Var("isGlobalMuon",bool,doc="muon is global muon"),
isTracker = Var("isTrackerMuon",bool,doc="muon is tracker muon"),
isStandalone = Var("isStandAloneMuon",bool,doc="muon is a standalone muon"),
mediumId = Var("passed('CutBasedIdMedium')",bool,doc="cut-based ID, medium WP"),
mediumPromptId = Var("passed('CutBasedIdMediumPrompt')",bool,doc="cut-based ID, medium prompt WP"),
tightId = Var("passed('CutBasedIdTight')",bool,doc="cut-based ID, tight WP"),
softId = Var("passed('SoftCutBasedId')",bool,doc="soft cut-based ID"),
softMvaId = Var("passed('SoftMvaId')",bool,doc="soft MVA ID"),
softMva = Var("softMvaValue()",float,doc="soft MVA ID score",precision=6),
highPtId = Var("?passed('CutBasedIdGlobalHighPt')?2:passed('CutBasedIdTrkHighPt')","uint8",doc="high-pT cut-based ID (1 = tracker high pT, 2 = global high pT, which includes tracker high pT)"),
pfIsoId = Var("passed('PFIsoVeryLoose')+passed('PFIsoLoose')+passed('PFIsoMedium')+passed('PFIsoTight')+passed('PFIsoVeryTight')+passed('PFIsoVeryVeryTight')","uint8",doc="PFIso ID from miniAOD selector (1=PFIsoVeryLoose, 2=PFIsoLoose, 3=PFIsoMedium, 4=PFIsoTight, 5=PFIsoVeryTight, 6=PFIsoVeryVeryTight)"),
tkIsoId = Var("?passed('TkIsoTight')?2:passed('TkIsoLoose')","uint8",doc="TkIso ID (1=TkIsoLoose, 2=TkIsoTight)"),
miniIsoId = Var("passed('MiniIsoLoose')+passed('MiniIsoMedium')+passed('MiniIsoTight')+passed('MiniIsoVeryTight')","uint8",doc="MiniIso ID from miniAOD selector (1=MiniIsoLoose, 2=MiniIsoMedium, 3=MiniIsoTight, 4=MiniIsoVeryTight)"),
mvaMuID = Var("mvaIDValue()",float,doc="MVA-based ID score ",precision=6),
mvaMuID_WP = Var("userFloat('mvaIDMuon_wpMedium') + userFloat('mvaIDMuon_wpTight')","uint8",doc="MVA-based ID selector WPs (1=MVAIDwpMedium,2=MVAIDwpTight)"),
multiIsoId = Var("?passed('MultiIsoMedium')?2:passed('MultiIsoLoose')","uint8",doc="MultiIsoId from miniAOD selector (1=MultiIsoLoose, 2=MultiIsoMedium)"),
puppiIsoId = Var("passed('PuppiIsoLoose')+passed('PuppiIsoMedium')+passed('PuppiIsoTight')", "uint8", doc="PuppiIsoId from miniAOD selector (1=Loose, 2=Medium, 3=Tight)"),
triggerIdLoose = Var("passed('TriggerIdLoose')",bool,doc="TriggerIdLoose ID"),
inTimeMuon = Var("passed('InTimeMuon')",bool,doc="inTimeMuon ID"),
jetNDauCharged = Var("?userCand('jetForLepJetVar').isNonnull()?userFloat('jetNDauChargedMVASel'):0", "uint8", doc="number of charged daughters of the closest jet"),
),
externalVariables = cms.PSet(
mvaTTH = ExtVar(cms.InputTag("muonMVATTH"),float, doc="TTH MVA lepton ID score",precision=14),
mvaLowPt = ExtVar(cms.InputTag("muonMVALowPt"),float, doc="Low pt muon ID score",precision=14),
fsrPhotonIdx = ExtVar(cms.InputTag("leptonFSRphotons:muFsrIndex"), "int16", doc="Index of the lowest-dR/ET2 among associated FSR photons"),
),
)
(run2_nanoAOD_106Xv2 | run3_nanoAOD_122).toModify(muonTable.variables,mvaMuID=None).toModify(
muonTable.variables, mvaMuID = Var("userFloat('mvaIDMuon')", float, doc="MVA-based ID score",precision=6))
# Revert back to AK4 CHS jets for Run 2
run2_nanoAOD_ANY.toModify(
ptRatioRelForMu,srcJet="updatedJets"
)
muonsMCMatchForTable = cms.EDProducer("MCMatcher", # cut on deltaR, deltaPt/Pt; pick best by deltaR
src = muonTable.src, # final reco collection
matched = cms.InputTag("finalGenParticles"), # final mc-truth particle collection
mcPdgId = cms.vint32(13), # one or more PDG ID (13 = mu); absolute values (see below)
checkCharge = cms.bool(False), # True = require RECO and MC objects to have the same charge
mcStatus = cms.vint32(1), # PYTHIA status code (1 = stable, 2 = shower, 3 = hard scattering)
maxDeltaR = cms.double(0.3), # Minimum deltaR for the match
maxDPtRel = cms.double(0.5), # Minimum deltaPt/Pt for the match
resolveAmbiguities = cms.bool(True), # Forbid two RECO objects to match to the same GEN object
resolveByMatchQuality = cms.bool(True), # False = just match input in order; True = pick lowest deltaR pair first
)
muonMCTable = cms.EDProducer("CandMCMatchTableProducer",
src = muonTable.src,
mcMap = cms.InputTag("muonsMCMatchForTable"),
objName = muonTable.name,
objType = muonTable.name, #cms.string("Muon"),
branchName = cms.string("genPart"),
docString = cms.string("MC matching to status==1 muons"),
)
muonTask = cms.Task(slimmedMuonsUpdated,isoForMu,ptRatioRelForMu,slimmedMuonsWithUserData,finalMuons,finalLooseMuons )
muonMCTask = cms.Task(muonsMCMatchForTable,muonMCTable)
muonTablesTask = cms.Task(muonMVATTH,muonMVALowPt,muonTable,muonMVAID)
|
"""
Machine Learning(기계 학습) -> Deep Learning(심층 학습)
training data set(학습 세트) / test data set(검증 세트)
신경망 층을 지나갈 때 사용되는 가중치(weight) 행렬, 편항(bias) 행렬을 찾는 게 목적
오차를 최소화하는 가중치 행렬을 찾아야 한다
손실(loss) 함수 / 비용(cost) 함수의 값을 최소화하는 가중치 행렬 찾기
손실 함수:
- 평균 제곱 오차(MSE: Mean Squared Error)
- 교차 엔트로피(Cross-Entropy)
"""
import numpy as np
from dataset.mnist import load_mnist
if __name__ == '__main__':
(X_train, y_train), (X_test, y_true) = load_mnist()
# 10개 테스트 이미지의 실제 값
print('y_true =', y_true[:10])
# 10개 테스트 이미지의 예측 값
y_pred = np.array([7, 2, 1, 6, 4, 1, 4, 9, 6, 9])
print('y_pred =', y_pred)
# 오차
error = y_pred - y_true[:10]
print('error =', error)
# 오차 제곱(squared error)
sq_err = error ** 2
print('squared error =', sq_err)
# 평균 제곱 오차(mean squared error)
mse = np.mean(sq_err)
print('MSE =', mse)
# RMSE(Root Mean Squared Error)
print('RMSE =', np.sqrt(mse))
|
integer=int(input("enter an integer?\n"))
decimal=float(input("enter a float?\n"))
string=str(input("enter a string?\n"))
string2=eval(input("enter any type?\n"))
print("krishanth likes "+string);
|
from .. import init_blueprint, schedule_daily
bp = init_blueprint(__name__)
from . import routes, handlers
from .jobs import update_users_data
schedule_daily(update_users_data)
|
################################################################
# Reads in World Bank tariff data
################################################################
from pulp import *
import math
import json
import numpy as np
import re
from sys import exit
import matplotlib.cm as cm
import matplotlib.pyplot as plt
try:
wddata='/Users/lilllianpetersen/iiasa/data/supply_chain/'
wdfigs='/Users/lilllianpetersen/iiasa/figs/'
wdvars='/Users/lilllianpetersen/iiasa/saved_vars/'
f=open(wddata+'trading_across_borders2017.csv','r')
except:
wddata='C:/Users/garyk/Documents/code/riskAssessmentFromPovertyEstimations/supply_chain/data/'
wdfigs='C:/Users/garyk/Documents/code/riskAssessmentFromPovertyEstimations/supply_chain/figs/'
wdvars='C:/Users/garyk/Documents/code/riskAssessmentFromPovertyEstimations/supply_chain/vars/'
subsaharancountry = np.load(wdvars+'supply_chain/subsaharancountry.npy')
subsaharancountry[subsaharancountry=='Congo']='Congo (DRC)'
subsaharancountry[subsaharancountry=='Congo (Republic of the)']='Congo'
countrycosted=np.load(wdvars+'supply_chain/countrycosted.npy')
countrycosted[countrycosted=='Congo']='Congo (DRC)'
countrycosted[countrycosted=='Congo (Republic of the)']='Congo'
f = open(wddata+'tariff/world_bank_tariff/tariff.csv')
i=-2
countries=[]
tariff10=np.zeros(shape=(len(subsaharancountry),10)) # 2008-2018
tariff=np.zeros(shape=(len(subsaharancountry))) # 2008-2018
for line in f:
i+=1
if i==-1:
continue
line=line[:-2]
line=line.replace('"','')
tmp=np.array(line.split(','))
country=tmp[0]
if np.amax(country==np.array(subsaharancountry[:]))==0:
continue
countries.append(country)
icountry=np.where(subsaharancountry==country)
j=55 # 2007
for y in range(10):
j+=1
try:
tariff10[icountry,y]=float(tmp[j])
except:
continue
tariff[icountry] = np.mean(tariff10[icountry][tariff10[icountry]!=0])
tariff[np.isnan(tariff)] = 0
#tariff[:24] = tariff[:24]-np.mean(tariff[:24])
#tariff[:24] = tariff[:24]+7.0
#np.save(wdvars+'supply_chain/tariff_by_country.npy',tariff)
africanCountries = np.load(wdvars+'country_correlates/africanCountries.npy')
tariff1 = np.zeros(shape=(43))
for i in range(len(subsaharancountry)):
country = subsaharancountry[i]
if country[:2]=='I_': continue
if country=='DRC': country='Congo (DRC)'
if country=='Ivory Coast': country="Cote d'Ivoire"
p = np.where(country==africanCountries)[0][0]
tariff1[p] = tariff[i]
tariff1Full = np.zeros(shape=(43,22))
for y in range(22):
tariff1Full[:,y] = tariff1
np.save(wdvars+'country_correlates/tariff.npy',tariff1Full)
|
from IPython.utils.tokenutil import line_at_cursor
from ipykernel.ipkernel import IPythonKernel
from jupytervvp.vvpsession import VvpSession, SessionException
from jupytervvp.flinksql import complete_sql
from IPython.core.magic_arguments import parse_argstring
from jupytervvp import VvpMagics
import json
def _do_flink_completion(code, cursor_pos):
# Get VvpSession
lines = code.split('\n')
command_line = lines[0]
session = load_session(command_line)
if session is None:
return None
if cursor_pos is None:
cursor_pos = len(code)
matches = fetch_vvp_suggestions(session, code, cursor_pos, len(command_line)+1)
if matches is None:
return None
text_length = calculate_text_length(code, cursor_pos)
return {'matches': matches,
'cursor_end': cursor_pos,
'cursor_start': cursor_pos - text_length,
'metadata': {},
'status': 'ok'}
def calculate_text_length(code, cursor_pos):
# get length of word for completion
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
line_up_to_cursor = line[:line_cursor]
if line_up_to_cursor.endswith(' '):
return 0
text = line_up_to_cursor.split()[-1]
# print("Text: " + text + ", code: " + code, file=open('dbg.log', 'a')) # dbg
text_length = len(text)
return text_length
def fetch_vvp_suggestions(session, code, cursor_pos, command_line_length):
# Strip first line to only send SQL to VVP
sql = code[command_line_length:]
sql_pos = cursor_pos - command_line_length
# Cursor seems to be in the first line of the cell magic, let the normal auto completion handle that
if sql_pos < 0:
return None
# Request suggestions from VVP
suggestions = complete_sql(sql, sql_pos, session)
# print(suggestions.text, file=open('dbg.log', 'a')) # dbg
suggest_json = json.loads(suggestions.text)
if 'completions' not in suggest_json:
return None
matches = []
for suggestion in suggest_json['completions']:
matches.append(suggestion["text"])
return matches
def load_session(command_line):
command = command_line.replace('%%flink_sql', '')
args = parse_argstring(VvpMagics.flink_sql, command)
session_name = args.session or VvpSession.default_session_name
if session_name is not None:
try:
return VvpSession.get_session(session_name)
except SessionException:
return None
return None
class FlinkSqlKernel(IPythonKernel):
def __init__(self,**kwargs):
super(FlinkSqlKernel, self).__init__(**kwargs)
def do_complete(self, code, cursor_pos):
if code.startswith('%%flink_sql'):
completions = _do_flink_completion(code, cursor_pos)
if completions is not None:
return completions
return super(FlinkSqlKernel, self).do_complete(code, cursor_pos)
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=FlinkSqlKernel) |
import sys
import ctypes
import re
import collections
import io
import json
def printf(fmt, *args, **kwargs):
print(fmt.format(*args, **kwargs), end='')
def printf_line(fmt, *args, **kwargs):
print(fmt.format(*args, **kwargs))
def printf_error(fmt, *args, **kwargs):
print("error:", fmt.format(*args, **kwargs))
def printf_compile_error(file_name, lnum, fmt, *args, **kwargs):
printf_line('{}:{}: {}', file_name, lnum, fmt.format(*args, **kwargs))
printf('\t{}', line)
VARIABLE_NAME_PATTERN = '[a-zA-Z_][a-zA-Z_0-9]*'
def is_valid_variable_name(string):
return re.fullmatch(VARIABLE_NAME_PATTERN, string)
EXIT_NORMAL = 0
EXIT_ERROR = 1
DWORD = ctypes.c_ulong
STD_OUTPUT_HANDLE = -11
INVALID_HANDLE_VALUE = -1
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode
SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode
GetLastError = ctypes.windll.kernel32.GetLastError
def set_vt_mode(enable):
handle = GetStdHandle(STD_OUTPUT_HANDLE)
if handle == INVALID_HANDLE_VALUE or handle == None:
return False
mode = DWORD()
if not GetConsoleMode(handle, ctypes.addressof(mode)):
return False
if enable:
mode.value |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
else:
mode.value &= ~ENABLE_VIRTUAL_TERMINAL_PROCESSING
if not SetConsoleMode(handle, mode):
return False
return True
PROCESS_BYTE_CODE_FILE_MAGIC = b'PBC.'
BYTE_CODE_WRITE = b'\x00' # code, string
BYTE_CODE_WRITE_EVAL = b'\x01' # code, string
BYTE_CODE_EXEC = b'\x02' # code, string
BYTE_CODE_LOAD = b'\x03' # code, name
BYTE_CODE_RUN = b'\x04' # code, name
BYTE_CODE_PLACE = b'\x05' # code, name
BYTE_CODE_EXEC_FILE = b'\x06' # code, name
BYTE_CODE_DEFINE = b'\x03' # code, name, value
BYTE_CODE_JUMP_IF = b'\x04' # code, condition, address
BYTE_CODE_JUMP_IF_NOT = b'\x05' # code, condition, address
BYTE_CODE_SIZE = 1
MACRO_REQUIRES = '{} macro requires {}'
INVALID_VARIABLE_NAME = 'invalid variable name: {}'
UNKNOWN_MACRO = 'unknown macro: {}'
WITHOUT_PRECEDING = '{} macro without preceding {}'
UNTERMINATED_MACRO = 'unterminated {} macro'
class CompilerError(Exception):
def __init__(self, lnum, line, fmt, *args, **kwargs):
super().__init__(fmt.format(*args, **kwargs))
self.lnum = lnum
self.line = line
ADDRESS_SIZE = 8
NULL_ADDRESS = b'\x00' * ADDRESS_SIZE
STRING_LENGTH_SIZE = 4
def write_string(string, outfile):
buffer = string.encode('ascii')
outfile.write(len(buffer).to_bytes(STRING_LENGTH_SIZE, 'big',
signed = False))
outfile.write(buffer)
def read_string(infile):
buffer = infile.read(STRING_LENGTH_SIZE)
if len(buffer) != STRING_LENGTH_SIZE:
raise EOFError('while reading string')
return infile.read(int.from_bytes(buffer, 'big', signed = False)) \
.decode('ascii')
def read_address(infile):
buffer = infile.read(ADDRESS_SIZE)
if len(buffer) != ADDRESS_SIZE:
raise EOFError('while reading address')
return int.from_bytes(buffer, 'big', signed = True)
def compile_file(
infile, # open in "rt" mode
outfile, # open in "rb" mode
macro_prefix = '//#',
macro_suffix = '',
statement_prefix = '//@',
statement_suffix = '',
comment_prefix = '//!',
comment_suffix = '',
variable_prefix = "#{",
variable_suffix = "}",
evaluation_prefix = "@{",
evaluation_suffix = "}@",
):
variable_evaluation_re = re.compile('{}({}){}|{}(.*){}'.format(
re.escape(variable_prefix),
VARIABLE_NAME_PATTERN,
re.escape(variable_suffix),
re.escape(evaluation_prefix),
re.escape(evaluation_suffix),
))
outfile.write(PROCESS_BYTE_CODE_FILE_MAGIC)
macro_stack = collections.deque()
write_buffer = bytearray()
line = infile.readline()
lnum = 1
while line:
striped_line = line.strip(' \t\n')
if striped_line.startswith(macro_prefix) and \
striped_line.endswith(macro_suffix):
if len(write_buffer) != 0:
outfile.write(BYTE_CODE_WRITE)
outfile.write(len(write_buffer).to_bytes(
STRING_LENGTH_SIZE, 'big', signed = False))
outfile.write(write_buffer)
write_buffer.clear()
macro, *args = \
striped_line[ len(macro_prefix): \
len(striped_line) - len(macro_suffix)] \
.strip(' \t').split(maxsplit = 1)
args = args[0] if args else ''
if macro == 'define':
args = args.split(maxsplit = 1)
if len(args) != 2:
raise CompilerError(lnum, line,
MACRO_REQUIRES, 'define', 'a name and a definition')
if not is_valid_variable_name(args[0]):
raise CompilerError(lnum, line,
INVALID_VARIABLE_NAME, args[0])
# write define
outfile.write(BYTE_CODE_DEFINE)
write_string(args[0], outfile)
write_string(args[1], outfile)
elif macro == 'load':
if not args:
raise CompilerError(lnum, line,
MACRO_REQUIRES, 'load', 'a file name')
# write load
outfile.write(BYTE_CODE_LOAD)
write_string(args, outfile)
elif macro == 'exec':
if not args:
raise CompilerError(lnum, line,
MACRO_REQUIRES, 'exec', 'a file name')
# write exec
outfile.write(BYTE_CODE_EXEC_FILE)
write_string(args, outfile)
elif macro == 'end':
try:
last_macro = macro_stack.pop()
except IndexError:
raise CompilerError(lnum, line,
WITHOUT_PRECEDING, 'end', 'if or for or while or ...')
if args:
if args != last_macro[0]:
raise CompilerError(lnum, line,
0 / 0)
if last_macro[0] == 'if':
address = outfile.tell()
outfile.seek(last_macro[2], 0)
outfile.write((address - last_macro[3]).to_bytes(
ADDRESS_SIZE, 'big', signed = True))
outfile.seek(address)
elif last_macro[0] == 'while':
outfile.write(BYTE_CODE_JUMP_IF)
write_string(last_macro[4], outfile)
outfile.write(
(last_macro[3] - outfile.tell() - ADDRESS_SIZE) \
.to_bytes(ADDRESS_SIZE, 'big', signed = True))
address = outfile.tell()
outfile.seek(last_macro[2], 0)
outfile.write((address - last_macro[3]).to_bytes(
ADDRESS_SIZE, 'big', signed = True))
outfile.seek(address)
elif macro == 'if':
if not args:
raise CompilerError(lnum, line,
MACRO_REQUIRES, 'if', 'a condition')
# write if
outfile.write(BYTE_CODE_JUMP_IF_NOT)
write_string(args, outfile)
address = outfile.tell()
outfile.write(NULL_ADDRESS)
macro_stack.append(
('if', (lnum, line), address, outfile.tell()) )
elif macro == 'while':
if not args:
raise CompilerError(lnum, line,
MACRO_REQUIRES, 'while', 'a condition')
# write while
outfile.write(BYTE_CODE_JUMP_IF_NOT)
write_string(args, outfile)
address = outfile.tell()
outfile.write(NULL_ADDRESS)
macro_stack.append(
('while', (lnum, line), address, outfile.tell(), args) )
else:
raise CompilerError(lnum, line,
UNKNOWN_MACRO, macro)
elif striped_line.startswith(statement_prefix) and \
striped_line.endswith(statement_suffix):
exec_string = \
striped_line[ len(macro_prefix): \
len(striped_line) - len(macro_suffix)] \
.strip(' \t')
if exec_string:
if len(write_buffer) != 0:
outfile.write(BYTE_CODE_WRITE)
outfile.write(len(write_buffer).to_bytes(
STRING_LENGTH_SIZE, 'big', signed = False))
outfile.write(write_buffer)
write_buffer.clear()
outfile.write(BYTE_CODE_EXEC)
write_string(exec_string, outfile)
elif striped_line.startswith(comment_prefix) and \
striped_line.endswith(comment_suffix):
pass
else:
m = re.search(variable_evaluation_re, line)
while m:
span = m.span()
if span[0] != 0:
write_buffer += line[:span[0]].encode('ascii')
if len(write_buffer) != 0:
outfile.write(BYTE_CODE_WRITE)
outfile.write(len(write_buffer).to_bytes(
STRING_LENGTH_SIZE, 'big', signed = False))
outfile.write(write_buffer)
write_buffer.clear()
eval = m.group(1)
if eval is None:
eval = m.group(2)
# write evaluation
outfile.write(BYTE_CODE_WRITE_EVAL)
write_string(eval, outfile)
line = line[span[1]:]
if span[1] == len(line):
break
m = re.search(variable_evaluation_re, line)
else:
write_buffer += line.encode('ascii')
line = infile.readline()
lnum += 1
if len(write_buffer) != 0:
outfile.write(BYTE_CODE_WRITE)
outfile.write(len(write_buffer).to_bytes(
STRING_LENGTH_SIZE, 'big', signed = False))
outfile.write(write_buffer)
try:
last_macro = macro_stack.pop()
except IndexError:
pass
else:
raise CompilerError(*last_macro[1],
UNTERMINATED_MACRO, last_macro[0])
class FileStructureError(Exception):
def __init__(self, fmt, *args, **kwargs):
super().__init__(fmt.format(*args, **kwargs))
class ProcessError(Exception):
def __init__(self, fmt, *args, **kwargs):
super().__init__(fmt.format(*args, **kwargs))
def process_byte_code(
infile,
outfile,
variables = None,
outfile_variable_name = '__outfile__',
):
buffer = infile.read(len(PROCESS_BYTE_CODE_FILE_MAGIC))
if buffer != PROCESS_BYTE_CODE_FILE_MAGIC:
raise FileStructureError('file magic number mismatch: {}', buffer)
if variables is None:
variables = {}
if outfile_variable_name not in variables:
pass
buffer = infile.read(BYTE_CODE_SIZE)
while buffer:
if buffer == BYTE_CODE_WRITE:
string = read_string(infile)
outfile.write(string)
elif buffer == BYTE_CODE_WRITE_EVAL:
string = read_string(infile)
outfile.write(str(eval(string, variables)))
elif buffer == BYTE_CODE_EXEC:
string = read_string(infile)
exec(string, variables)
elif buffer == BYTE_CODE_LOAD:
string = read_string(infile)
string = eval(string, variables)
if not isinstance(string, str):
raise ProcessError('load macro argument must be type of str')
try:
with open(string) as json_file:
try:
variables.update(json.load(json_file))
except json.JSONDecodeError as e:
raise ProcessError(
'decoding json file \'{}\' raise error: {}', e)
except FileNotFoundError:
raise ProcessError('json file \'{}\' not found', string)
elif buffer == BYTE_CODE_EXEC_FILE:
string = read_string(infile)
string = eval(string, variables)
if not isinstance(string, str):
raise ProcessError('exec macro argument must be type of str')
try:
with open(string) as exec_file:
try:
exec(exec_file.read())
except Exception as e:
raise ProcessError(
'python file \'{}\' raise exception: {}', e)
except FileNotFoundError:
raise ProcessError('python file \'{}\' not found', string)
elif buffer == BYTE_CODE_DEFINE:
name = read_string(infile)
value = read_string(infile)
variables[name] = eval(value, variables)
elif buffer == BYTE_CODE_JUMP_IF:
test = read_string(infile)
address = read_address(infile)
if bool(eval(test, variables)):
infile.seek(address, 1)
elif buffer == BYTE_CODE_JUMP_IF_NOT:
test = read_string(infile)
address = read_address(infile)
if not bool(eval(test, variables)):
infile.seek(address, 1)
else:
raise FileStructureError('unknown byte code: {}', buffer)
buffer = infile.read(BYTE_CODE_SIZE)
def main(argv):
program_name = argv[0] if argv else "frocess.py"
if len(argv) < 2:
printf_line('usage: python3 {} [OPTION]... [FILE]...', program_name)
return 0
for name in argv[1:]:
try:
infile = open(name, "rt")
except FileNotFoundError as e:
printf_error("can't open '{}': {}", name, e.strerror)
return EXIT_ERROR
with open('temp', 'wb') as temp_file:
compile_file(infile, temp_file)
with open('temp', 'rb') as temp_file:
process_byte_code(temp_file, sys.stdout)
# design:
# //@ define <name> <value>
# //@ load <filename>
# //@ place <filename>
# //@ include <filename>
# //@ function <name>( <args> ... )
# //@ end
# //@ call <name> [ <args> ... ]
# //@ for <name> <iterable>
# //@ end
# //@ while <cond>
# //@ end
# //@ break
# //@ continue
# //@ if <cond>
# //@ elif <cond>
# //@ else
# //@ end [ if | for | while ]
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
import pygame
class Paddle(pygame.sprite.Sprite):
surface: pygame.Surface
velocity: pygame.Vector2
paddle_width: int
paddle_height: int
rect: pygame.Rect
oldrect: pygame.Rect
def __init__(self, surface: pygame.Surface, x: int, y: int) -> None:
super().__init__()
self.surface = surface
self.velocity = pygame.Vector2(0, 0)
self.paddle_width, self.paddle_height = 30, 150
self.rect = pygame.Rect(
x, y, self.paddle_width, self.paddle_height)
self.oldrect = pygame.Rect(
x, y, self.paddle_width, self.paddle_height)
def check_edges(self) -> None:
if self.rect.top < 0:
self.velocity.y = 0
self.rect.top = 0
if self.rect.bottom > self.surface.get_height():
self.rect.bottom = self.surface.get_height()
self.velocity.y = 0
def update(self) -> None:
pygame.sprite.Sprite.update(self)
self.check_edges()
self.oldrect = self.rect
self.rect.topleft += self.velocity
def draw(self) -> None:
pygame.draw.rect(self.surface, pygame.Color(255, 255, 255), self.rect)
|
from os import close
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import BeW
import sys
class Root(Tk):
def __init__(self):
super(Root, self).__init__()
self.title("Test")
self.minsize(800,600)
self.labelFrame = ttk.LabelFrame(self,text = "abrir")
self.labelFrame.grid(column = 0,row = 1,pady = 20)
self.button()
def button(self):
self.button = ttk.Button(self.labelFrame, text ="Arquivo", command = self.fileDialog)
self.button.grid(column = 1,row = 1)
def fileDialog(self):
global caminho
caminho = filedialog.askopenfilename()
root.quit()
def showimage(self):
BeW.teste(caminho)
if __name__=='__main__':
root = Root()
root.mainloop()
root.showimage()
|
'''
Created on 15-Apr-2017
@author: rmaduri
'''
import matplotlib, sys
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from tkinter import *
master = Tk()
master.title("Hello World!")
#-------------------------------------------------------------------------------
f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
a.plot(t,s)
dataPlot = FigureCanvasTkAgg(f, master=master)
dataPlot.show()
dataPlot.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
#-------------------------------------------------------------------------------
master.mainloop() |
#encoding:utf-8
__authors__ = ['"Wei Keke" <keke.wei@cs2c.com.cn>']
__version__ = "V0.1"
'''
# ChangeLog:
#---------------------------------------------------------------------------------
# Version Date Desc Author
#---------------------------------------------------------------------------------
# V0.1 2014/10/09 初始版本 Wei Keke
#---------------------------------------------------------------------------------
'''
import unittest
from BaseTestCase import BaseTestCase
from TestAPIs.DiskAPIs import DiskAPIs
from TestAPIs.ProfilesAPIs import ProfilesAPIs
from Utils.PrintLog import LogPrint
from Utils.Util import DictCompare,wait_until
#from Utils.HTMLTestRunner import HTMLTestRunner
from TestAPIs.DataCenterAPIs import DataCenterAPIs,smart_attach_storage_domain,smart_deactive_storage_domain,\
smart_detach_storage_domain
from TestAPIs.ClusterAPIs import ClusterAPIs
from TestAPIs.VirtualMachineAPIs import VirtualMachineAPIs,VmDiskAPIs
from TestAPIs.TemplatesAPIs import TemplatesAPIs, TemplateDisksAPIs,\
TemplateNicsAPIs,smart_create_template,smart_create_tempnic,smart_delete_template
from TestAPIs.HostAPIs import smart_create_host,smart_del_host
from TestAPIs.StorageDomainAPIs import smart_create_storage_domain,smart_del_storage_domain,\
StorageDomainAPIs
from TestAPIs.NetworkAPIs import NetworkAPIs
from TestData.Template import ITC07_SetUp as ModuleData
import xmltodict
class ITC07_SetUp(BaseTestCase):
'''
@summary: 模板管理模块级测试用例,初始化模块测试环境;
@note: (1)创建一个NFS类型数据中心;
@note: (2)创建一个集群;
@note: (3)创建一个主机,并等待其变为UP状态;
@note: (4)创建3个存储域(data1/data2/Export);
@note: (5)将 data1 附加到数据中心;
@note: (6)创建一个虚拟机
@note: (7)创建一个磁盘
@note: (8)将磁盘附加到虚拟机
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
def test_CreateModuleTestEnv(self):
dcapi = DataCenterAPIs()
capi = ClusterAPIs()
# 创建1个数据中心(nfs类型)
LogPrint().info("Pre-Module-Test-1: Create DataCenter '%s'." % self.dm.dc_nfs_name)
self.assertTrue(dcapi.createDataCenter(self.dm.xml_dc_info)['status_code']==self.dm.expected_status_code_create_dc)
# 创建1个集群
LogPrint().info("Pre-Module-Test-2: Create Cluster '%s' in DataCenter '%s'." % (self.dm.cluster_nfs_name, self.dm.dc_nfs_name))
self.assertTrue(capi.createCluster(self.dm.xml_cluster_info)['status_code']==self.dm.expected_status_code_create_cluster)
# 在NFS数据中心中创建一个主机,并等待主机UP。
LogPrint().info("Pre-Module-Test-3: Create Host '%s' in Cluster '%s'." % (self.dm.host1_name, self.dm.cluster_nfs_name))
self.assertTrue(smart_create_host(self.dm.host1_name, self.dm.xml_host_info))
# 为NFS数据中心创建Data(data1/data2/export)。
@BaseTestCase.drive_data(self, self.dm.xml_storage_info)
def create_storage_domains(xml_storage_domain_info):
sd_name = xmltodict.parse(xml_storage_domain_info)['storage_domain']['name']
LogPrint().info("Pre-Module-Test-4: Create Data Storage '%s'." % sd_name)
self.assertTrue(smart_create_storage_domain(sd_name, xml_storage_domain_info))
create_storage_domains()
# 将创建的的data1、data2和export域附加到NFS/ISCSI数据中心里。
LogPrint().info("Pre-Module-Test-5: Attach the data storages to data centers.")
self.assertTrue(smart_attach_storage_domain(self.dm.dc_nfs_name, self.dm.data1_nfs_name))
self.assertTrue(smart_attach_storage_domain(self.dm.dc_nfs_name, self.dm.data2_nfs_name))
#self.assertTrue(smart_attach_storage_domain(self.dm.dc_nfs_name, self.dm.export1_name))
#创建一个虚拟机
self.vmapi = VirtualMachineAPIs()
r = self.vmapi.createVm(self.dm.vm_info)
if r['status_code'] == 201:
self.vm_name = r['result']['vm']['name']
else:
LogPrint().error("Create vm failed.Status-code is WRONG.")
self.assertTrue(False)
#创建一个磁盘
self.diskapi = DiskAPIs()
sd_id = StorageDomainAPIs().getStorageDomainIdByName(ModuleData.data1_nfs_name)
r = self.diskapi.createDisk(self.dm.disk_info, sd_id)
def is_disk_ok():
return self.diskapi.getDiskStatus(self.disk_id)=='ok'
if r['status_code'] == 202:
self.disk_id = r['result']['disk']['@id']
if wait_until(is_disk_ok, 200, 5):
LogPrint().info("Create disk ok.")
else:
LogPrint().error("Create disk failed.Status-code is WRONG.")
self.assertTrue(False)
#将磁盘附加到虚拟机
self.vmdiskapi = VmDiskAPIs()
r=self.vmdiskapi.attachDiskToVm(self.vm_name, self.disk_id)
if r['status_code'] == 200:
LogPrint().info("Attach Disk to vm SUCCESS.")
else:
LogPrint().error("Attach Disk to vm fail.Status-code is WRONG.")
self.assertTrue(False)
class ITC070101_GetTemplateList(BaseTestCase):
'''
@summary: 07模板管理-01基本操作-01获取模板列表
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
def test_GetTemplateList(self):
'''
@summary: 获取模板列表
@note: 操作成功,验证返回状态码
'''
temp_api = TemplatesAPIs()
LogPrint().info("Test: Get template list.")
r = temp_api.getTemplatesList()
if r['status_code'] == 200:
LogPrint().info("PASS: Get TemplateList SUCCESS.")
self.assertTrue(True)
else:
LogPrint().error("FAIL: Returned status code is WRONG.")
self.assertTrue(False)
class ITC070102_GetTemplateInfo(BaseTestCase):
'''
@summary: 07模板管理-01基本操作-02获取模板详情
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
self.temp_api = TemplatesAPIs()
LogPrint().info("Pre-Test: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
def test_GetTemplateInfo(self):
'''
@summary: 获取模板详情
@note: 操作成功,验证返回状态码和返回信息
'''
self.flag=True
LogPrint().info("Test: Get info of template %s."%self.dm.temp_name)
r = self.temp_api.getTemplateInfo(self.dm.temp_name)
if r['status_code'] == self.dm.expected_status_code:
LogPrint().info("PASS: Get TemplateInfo SUCCESS.")
else:
LogPrint().error("FAIL: Get TemplateInfo fail.The Template info is WRONOG.")
self.flag=False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC0701030101_CreateTemplate(BaseTestCase):
'''
@summary: 07模板管理-01基本操作-03创建模板-01成功创建-01最小测试集
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
def test_CreateTemplate(self):
'''
@summary: 创建模板
@note: 操作成功,验证返回状态码和返回信息
'''
self.tempapi = TemplatesAPIs()
self.expected_result_index = 0
@BaseTestCase.drive_data(self, self.dm.temp_info)
def do_test(xml_info):
self.flag=True
LogPrint().info("Test: Create template %s."%self.dm.temp_name[self.expected_result_index])
r = self.tempapi.createTemplate(xml_info)
def is_temp_ok():
return self.tempapi.getTemplateInfo(temp_name=self.dm.temp_name[self.expected_result_index])['result']['template']['status']['state']=='ok'
if r['status_code'] == self.dm.expected_status_code:
if wait_until(is_temp_ok, 600, 10):
LogPrint().info("PASS: Create Template '%s'ok."%self.dm.temp_name[self.expected_result_index])
else:
LogPrint().error("FAIL: Create Template '%s'overtime"%self.dm.temp_name[self.expected_result_index])
self.flag=False
else:
LogPrint().error("FAIL: Create Template '%s'failed.Status-code is WRONG."%self.dm.temp_name[self.expected_result_index])
self.flag=False
self.assertTrue(self.flag)
self.expected_result_index += 1
do_test()
def tearDown(self):
for index in range(0,5):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name[index])
self.assertTrue(smart_delete_template(self.dm.temp_name[index]))
class ITC0701030102_CreateTemplate_SD(BaseTestCase):
'''
@summary: 07模板管理-01基本操作-03创建模板-01成功创建-02指定存储域
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
def test_CreateTemplate_SD(self):
'''
@summary: 创建模板,指定存储域
@note: 操作成功,验证返回状态码和返回信息
'''
self.tempapi = TemplatesAPIs()
LogPrint().info("Test: Create template %s."%self.dm.temp_name)
r = self.tempapi.createTemplate(self.dm.temp_info)
print r
def is_temp_ok():
return self.tempapi.getTemplateInfo(temp_name=self.dm.temp_name)['result']['template']['status']['state']=='ok'
if r['status_code'] == self.dm.expected_status_code:
if wait_until(is_temp_ok, 600, 10):
LogPrint().info("PASS: Create Template ok.")
else:
LogPrint().error("FAIL: Create Template overtime")
self.assertTrue(False)
else:
LogPrint().error("FAIL: Create Template failed.Status-code is WRONG.")
self.assertTrue(False)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC0701030201_CreateTemplate_DupName(BaseTestCase):
'''
@summary: 07模板管理-01基本操作-03创建模板-02创建失败-01模板重名
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test: Create a template %s for TC."%self.dm.temp_name)
smart_create_template(self.dm.temp_name, self.dm.temp_info)
def test_CreateTemplate_DupName(self):
'''
@summary: 创建模板,重名
@note: 操作失败,验证返回状态码和返回信息
'''
self.tempapi = TemplatesAPIs()
LogPrint().info("Test: Create dupname template %s."%self.dm.temp_name)
r = self.tempapi.createTemplate(self.dm.temp_info)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
d1 = xmltodict.parse(self.dm.expected_info)
if dictCompare.isSubsetDict(d1, r['result']):
LogPrint().info("PASS: Returned status code and messages are CORRECT when create host with dup name.")
else:
LogPrint().error("FAIL: Returned messages are incorrectly.")
self.flag = False
else:
LogPrint().error("FAIL: Status-code is WRONG.")
self.assertTrue(False)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC0701030202_CreateTemplate_VerifyName(BaseTestCase):
'''
@summary: 07模板管理-01基本操作-03创建模板-02创建失败-02验证名称合法性
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
def test_CreateTemplate_VerifyName(self):
'''
@summary: 创建模板,名称不合法
@note: 操作失败,验证返回状态码和返回信息
'''
self.tempapi = TemplatesAPIs()
LogPrint().info("Test: Create template %s."%self.dm.temp_name)
r = self.tempapi.createTemplate(self.dm.temp_info)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
d1 = xmltodict.parse(self.dm.expected_info)
if dictCompare.isSubsetDict(d1, r['result']):
LogPrint().info("PASS: Returned status code and messages are CORRECT when create host with dup name.")
else:
LogPrint().error("FAIL: Returned messages are incorrectly.")
self.flag = False
else:
LogPrint().error("FAIL: Status-code is WRONG.")
self.assertTrue(False)
class ITC0701030203_CreateTemplate_NoRequired(BaseTestCase):
'''
@summary: 07模板管理-01基本操作-03创建模板-02创建失败-03验证参数完整性
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
def test_CreateTemplate_NoRequired(self):
'''
@summary: 创建模板,缺少必填项
@note: 操作失败,验证返回状态码和返回信息
'''
self.tempapi = TemplatesAPIs()
self.expected_result_index = 0
@BaseTestCase.drive_data(self, self.dm.temp_info)
def do_test(xml_info):
self.flag = True
r = self.tempapi.createTemplate(xml_info)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
if dictCompare.isSubsetDict(xmltodict.parse(self.dm.expected_info_list[self.expected_result_index]), r['result']):
LogPrint().info("PASS: The returned status code and messages are CORRECT.")
else:
LogPrint().error("FAIL: The returned messages are INCORRECT.")
self.flag = False
else:
LogPrint().error("FAIL: The returned status code is '%s' while it should be '%s'." % (r['status_code'], self.dm.expected_status_code))
self.flag = False
self.assertTrue(self.flag)
self.expected_result_index += 1
do_test()
class ITC070105_DeleteTemplate(BaseTestCase):
'''
@summary: 07模板管理-01基本操作-05删除模板
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
def test_DeleteTemplate(self):
'''
@summary: 删除模板
@note: 操作成功,验证返回状态码和返回信息
'''
self.flag=True
self.tempapi = TemplatesAPIs()
LogPrint().info("Test: Delete template %s."%self.dm.temp_name)
r = self.tempapi.delTemplate(self.dm.temp_name)
def temp_not_exist():
return self.tempapi.searchTemplateByName(self.dm.temp_name)['result']['templates'] ==None
if r['status_code'] == self.dm.expected_status_code:
if wait_until(temp_not_exist,300, 5):
LogPrint().info("PASS: Delete Template SUCCESS.")
else:
LogPrint().info("FAIL: Delete Template failed.The Template still exist")
self.flag=False
else:
LogPrint().info("FAIL: Delete Template failed.The status_code is WRONG")
self.flag=False
self.assertTrue(self.flag)
# class ITC07010601_ExportTemplate_sync(BaseTestCase):
# '''
# @summary: 07模板管理-01基本操作-06导出模板-01同步
# @bug: 该功能目前在web界面上失败,暂时只能通过返回状态码来判断
# '''
# def setUp(self):
# self.dm = super(self.__class__, self).setUp()
# self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
# def test_exportTemplate_sync(self):
# self.flag=True
# self.tempapi = TemplatesAPIs()
# r = self.tempapi.exportTemplate(self.dm.temp_name, self.dm.action)
# if r['status_code'] == self.dm.expected_status_code:
# LogPrint().info("Export template SUCCESS.")
# else:
# LogPrint().error("Export template failed.The status_code is WRONG.")
# self.flag=False
# self.assertTrue(self.flag)
# def tearDown(self):
# self.assertTrue(smart_delete_template(self.dm.temp_name))
#
# class ITC07010602_ExportTemplate_async(BaseTestCase):
# '''
# @summary: 07模板管理-01基本操作-06导出模板-02异步
# @bug: 该功能目前在web界面上失败,暂时只能通过返回状态码来判断
# '''
# def setUp(self):
# self.dm = super(self.__class__, self).setUp()
# self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
# def test_exportTemplate_sync(self):
# self.flag=True
# self.tempapi = TemplatesAPIs()
# r = self.tempapi.exportTemplate(self.dm.temp_name, self.dm.action)
# if r['status_code'] == self.dm.expected_status_code:
# LogPrint().info("Export template SUCCESS.")
# else:
# LogPrint().error("Export template failed.The status_code is WRONG.")
# self.flag=False
# self.assertTrue(self.flag)
class ITC070201_GetTemplateDiskList(BaseTestCase):
'''
@summary: 07模板管理-02模板磁盘管理-01获取模板磁盘列表
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
def test_GetTemplateDiskList(self):
'''
@summary: 获取模板的磁盘列表
@note: 操作成功,验证返回状态码和返回信息
'''
self.flag = True
tempdisk_api = TemplateDisksAPIs()
LogPrint().info("Test: Get disk list of template %s."%self.dm.temp_name)
r = tempdisk_api.getTemplateDiskList(self.dm.temp_name)
if r['status_code'] == self.dm.expected_status_code:
LogPrint().info("PASS: Get disk list of template %s SUCCESS."%self.dm.temp_name)
else:
LogPrint().error("FAIL: The status_code is WRONG")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC070202_GetTemplateDiskInfo(BaseTestCase):
'''
@summary: 07模板管理-02模板磁盘管理-02获取模板磁盘详情
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
def test_GetTemplateDiskInfo(self):
'''
@summary: 获取模板的磁盘详情
@note: 操作成功,验证返回状态码和返回信息
'''
self.flag = True
tempdisk_api = TemplateDisksAPIs()
LogPrint().info("Test: Get disk info of template %s."%self.dm.temp_name)
r = tempdisk_api.getTemplateDiskInfo(self.dm.temp_name,self.dm.disk_name)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
sd_id = StorageDomainAPIs().getStorageDomainIdByName(ModuleData.data1_nfs_name)
expected_result = xmltodict.parse(self.dm.disk_info %sd_id)
actual_result = r['result']
if dictCompare.isSubsetDict(expected_result,actual_result):
LogPrint().info("PASS: Get disk info of template %s SUCCESS."%self.dm.temp_name)
else:
LogPrint().error("FAIL: The disk_info is WRONG")
self.flag = False
else:
LogPrint().error("FAIL: The status_code is WRONG")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC07020301_CopyTemplateDisk_sync(BaseTestCase):
'''
@summary: 07模板管理-02模板磁盘管理-03复制模板-01同步
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
def test_CopyTemplateDisk_sync(self):
'''
@summary: 拷贝模板磁盘,同步
@note: 操作成功,验证返回状态码,检查磁盘的存储域变化
'''
self.flag = True
tempdisk_api = TemplateDisksAPIs()
LogPrint().info("Test: Copy disk of template %s sync."%self.dm.temp_name)
r = tempdisk_api.copyTemplateDisk(self.dm.temp_name, self.dm.disk_name, self.dm.copy_data)
def is_tempdisk_ok():
return tempdisk_api.getTemplateDiskStatus(self.dm.temp_name, self.dm.disk_name)=='ok'
def check_tempdisk_sd(temp_name,disk_name,sd_id):
'''
@summary: 检查模板磁盘所在的存储域是否包含源和目的存储域
@param temp_name: 模板名称
@param disk_name: 磁盘名称
@param sd_id:存储域id
@return: True or False
'''
sd_list = tempdisk_api.getTemplateDiskSdList(temp_name, disk_name)
flag = False
for index in range(len(sd_list)):
if sd_list[index]['@id'] == sd_id:
flag = True
return flag
if r['status_code'] == self.dm.expected_status_code:
if wait_until(is_tempdisk_ok, 300, 10):
if check_tempdisk_sd(self.dm.temp_name, self.dm.disk_name, self.dm.des_sd_id):
LogPrint().info("PASS: Copy Template Disk sync SUCCESS.")
else:
LogPrint().error("FAIL: The des sd is not %s."%self.dm.des_sd_name)
self.flag= False
else:
LogPrint().error("FAIL: CopyTemplateDisk overtime")
self.flag= False
else:
LogPrint().error("FAIL: The status_code is WRONG")
self.flag= False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC07020302_CopyTemplateDisk_async(BaseTestCase):
'''
@summary: 07模板管理-02模板磁盘管理-03复制模板-01异步
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Test: Copy disk of template %s sync."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
def test_CopyTemplateDisk_async(self):
'''
@summary: 拷贝模板磁盘,异步
@note: 操作成功,验证返回状态码,检查磁盘的存储域变化
'''
LogPrint().info("Test: Copy disk of template %s async."%self.dm.temp_name)
self.flag = True
tempdisk_api = TemplateDisksAPIs()
r = tempdisk_api.copyTemplateDisk(self.dm.temp_name, self.dm.disk_name, self.dm.copy_data)
print r
def is_tempdisk_ok():
return tempdisk_api.getTemplateDiskStatus(self.dm.temp_name, self.dm.disk_name)=='ok'
def check_tempdisk_sd(temp_name,disk_name,sd_id):
'''
@summary: 检查模板磁盘所在的存储域是否包含源和目的存储域
@param temp_name: 模板名称
@param disk_name: 磁盘名称
@param sd_id:存储域id
@return: True or False
'''
sd_list = tempdisk_api.getTemplateDiskSdList(temp_name, disk_name)
flag = False
for index in range(len(sd_list)):
if sd_list[index]['@id'] == sd_id:
flag = True
return flag
if r['status_code'] == self.dm.expected_status_code:
if wait_until(is_tempdisk_ok, 300, 10):
if check_tempdisk_sd(self.dm.temp_name, self.dm.disk_name, self.dm.des_sd_id):
LogPrint().info("PASS: Copy Template Disk sync SUCCESS")
else:
LogPrint().error("FAIL: The des sd is not %s."%self.dm.des_sd_name)
self.flag= False
else:
LogPrint().error("FAIL: CopyTemplateDisk overtime")
self.flag= False
else:
LogPrint().error("FAIL: The status_code is WRONG")
self.flag= False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC07020303_CopyTemplateDisk_nosd(BaseTestCase):
'''
@summary: 07模板管理-02模板磁盘管理-03复制模板-03缺少存储域
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
def test_CopyTemplateDisk_nosd(self):
'''
@summary: 拷贝模板磁盘,未指定存储域
@note: 操作失败,验证返回状态码和返回信息
'''
self.flag = True
tempdisk_api = TemplateDisksAPIs()
LogPrint().info("Test: Copy disk of template %s without SD."%self.dm.temp_name)
r = tempdisk_api.copyTemplateDisk(self.dm.temp_name, self.dm.disk_name, self.dm.copy_data)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
if dictCompare.isSubsetDict(xmltodict.parse(self.dm.expected_info), r['result']):
LogPrint().info("PASS: Returned status code ans messages are CORRECT.")
else:
LogPrint().error("FAIL: The error_log is WRONG.")
self.flag = False
else:
LogPrint().error("FAIL: The status_code is WRONG.")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC070301_GetTemplateNicList(BaseTestCase):
'''
@summary: 07模板管理-03模板网络接口-01获取网络接口列表
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
def test_GetTemplateNicList(self):
'''
@summary: 获取模板的网络接口列表
@note: 操作成功,验证返回状态码
'''
tempnic_api = TemplateNicsAPIs()
LogPrint().info("Test: Get nic list of template %s."%self.dm.temp_name)
r=tempnic_api.getTemplateNicList(self.dm.temp_name)
if r['status_code'] == self.dm.expected_status_code:
LogPrint().info("PASS: GetTemplateNicList SUCCESS.")
else:
LogPrint().error("FAIL: GetTemplateNicList fail.The status_code is WRONG")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC070302_GetTemplateNicInfo(BaseTestCase):
'''
@summary: 07模板管理-03模板网络接口-02获取网络接口详情
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test-1: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
LogPrint().info("Pre-Test-2: Create a nic for template %s."%self.dm.temp_name)
self.assertTrue(smart_create_tempnic(self.dm.temp_name, self.dm.nic_data))
def test_GetTemplateNicInfo(self):
'''
@summary: 获取模板的网络接口详情
@note: 操作成功,验证返回状态码和返回信息
'''
tempnic_api = TemplateNicsAPIs()
LogPrint().info("Test: Get nic %s info of template %s."%(self.dm.nic_name, self.dm.temp_name))
r = tempnic_api.getTemplateNicInfo(self.dm.temp_name, self.dm.nic_name)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
expected_result = xmltodict.parse(self.dm.nic_data)
actual_result = r['result']
if dictCompare.isSubsetDict(expected_result,actual_result):
LogPrint().info("PASS: Get nic %s info of template %s SUCCESS."%(self.dm.nic_name, self.dm.temp_name))
else:
LogPrint().error("FAIL: Returned nic info is WRONG")
self.flag = False
else:
LogPrint().error("FAIL: The status_code is WRONG")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC0703030101_CreateTemplateNic(BaseTestCase):
'''
@summary: 07模板管理-03模板网络接口-03新建网络接口-01成功创建-01测试最小集
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test-1: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
def test_CreateTemplateNic(self):
'''
@summary: 创建模板的网络接口
@note: 操作成功,验证返回状态码和返回信息
'''
tempnic_api = TemplateNicsAPIs()
self.expected_result_index = 0
@BaseTestCase.drive_data(self, self.dm.nic_data)
def do_test(xml_info):
LogPrint().info("Test: Create nic %s for template %s."%(self.dm.nic_name[self.expected_result_index], self.dm.temp_name))
r = tempnic_api.createTemplateNic(self.dm.temp_name, xml_info)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
print xml_info
expected_result = xmltodict.parse(xml_info)
actual_result = r['result']
if dictCompare.isSubsetDict(expected_result,actual_result):
LogPrint().info("PASS: Create Nic %s SUCCESS."%self.dm.nic_name[self.expected_result_index])
else:
LogPrint().error("FAIL: The nic %s info is WRONG"%self.dm.nic_name[self.expected_result_index])
self.flag = False
else:
LogPrint().error("FAIL: The status_code is WRONG")
self.flag = False
self.assertTrue(self.flag)
self.expected_result_index += 1
do_test()
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC0703030102_CreateTemplateNic_proid(BaseTestCase):
'''
@summary: 07模板管理-03模板网络接口-03新建网络接口-01成功创建-02指定配置集
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test-1: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
#为所在数据中心的ovirtmgmt网络创建一个配置集
LogPrint().info("Pre-Test-2: Create a profile %s for ovirtmgmt."%self.dm.profile_name)
self.nw_id = NetworkAPIs().getNetworkIdByName('ovirtmgmt', self.dm.dc_name)
print self.nw_id
r =ProfilesAPIs().createProfiles(self.dm.profile_info, self.nw_id)
if r['status_code'] == 201:
self.proid = r['result']['vnic_profile']['@id']
LogPrint().info("Create Profile SUCCESS.")
else:
LogPrint().error("Create Profile fail.The status_code is WRONG.")
def test_CreateTemplateNic_proid(self):
'''
@summary: 为模板创建网络接口,指定配置集
@note: 操作成功,验证返回状态码和返回信息
'''
tempnic_api = TemplateNicsAPIs()
LogPrint().info("Test-: Create a nic %s with profile %s for template %s."%(self.dm.nic_name, self.dm.profile_name, self.dm.temp_name))
r = tempnic_api.createTemplateNic(self.dm.temp_name, self.dm.nic_data,self.proid)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
expected_result = xmltodict.parse((self.dm.nic_data %self.proid))
actual_result = r['result']
if dictCompare.isSubsetDict(expected_result,actual_result):
LogPrint().info("PASS: Create a nic %s with profile %s for template %s SUCCESS."%(self.dm.nic_name, self.dm.profile_name, self.dm.temp_name))
else:
LogPrint().error("FAIL: The nic_info is WRONG")
self.flag = False
else:
LogPrint().error("FAIL: The status_code is WRONG")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test-1: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
LogPrint().info("Post-Test-2: Delete profile %s."%self.dm.profile_name)
ProfilesAPIs().delProfile(self.dm.profile_name, self.nw_id)
class ITC0703030201_CreateTemplateNic_DupName(BaseTestCase):
'''
@summary: 07模板管理-03模板网络接口-03新建网络接口-01创建失败-01重名
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test-1: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
LogPrint().info("Pre-Test-2: Create a nic %s for this template."%self.dm.nic_name)
self.assertTrue(smart_create_tempnic(self.dm.temp_name, self.dm.nic_data))
def test_CreateTemplateNic_DupName(self):
'''
@summary: 为模板创建网络接口,重名
@note: 操作失败,验证返回状态码和返回信息
'''
tempnic_api = TemplateNicsAPIs()
LogPrint().info("Test: Create dupname nic %s for this template."%self.dm.nic_name)
r = tempnic_api.createTemplateNic(self.dm.temp_name, self.dm.nic_data)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
expected_result = xmltodict.parse(self.dm.expected_info)
actual_result = r['result']
if dictCompare.isSubsetDict(expected_result,actual_result):
LogPrint().info("PASS: The returned status code and messages are CORRECT.")
else:
LogPrint().error("FAIL: Returned messages are incorrectly.")
self.flag = False
else:
LogPrint().error("FAIL: The status_code is WRONG")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC0703030202_CreateTemplateNic_VerifyName(BaseTestCase):
'''
@summary: 07模板管理-03模板网络接口-03新建网络接口-01创建失败-02验证名称合法性
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
def test_CreateTemplateNic_VerifyName(self):
'''
@summary: 为模板创建网络接口,名称不合法
@note: 操作失败,验证返回状态码和返回信息
'''
tempnic_api = TemplateNicsAPIs()
LogPrint().info("Test: Create nic %s for this template."%self.dm.nic_name)
r = tempnic_api.createTemplateNic(self.dm.temp_name, self.dm.nic_data)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
expected_result = xmltodict.parse(self.dm.expected_info)
actual_result = r['result']
if dictCompare.isSubsetDict(expected_result,actual_result):
LogPrint().info("PASS: The returned status code and messages are CORRECT.")
else:
LogPrint().error("FAIL: Returned messages are incorrectly.")
self.flag = False
else:
LogPrint().error("FAIL: The status_code is WRONG")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC0703030203_CreateTemplateNic_NoRequired(BaseTestCase):
'''
@summary: 07模板管理-03模板网络接口-03新建网络接口-01创建失败-03缺少必填项
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
def test_CreateTemplateNic_NoRequired(self):
'''
@summary: 为模板创建网络接口,缺少必填项
@note: 操作失败,验证返回状态码和返回信息
'''
tempnic_api = TemplateNicsAPIs()
LogPrint().info("Test: Create nic for this template.")
r = tempnic_api.createTemplateNic(self.dm.temp_name, self.dm.nic_data)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
expected_result = xmltodict.parse(self.dm.expected_info)
actual_result = r['result']
if dictCompare.isSubsetDict(expected_result,actual_result):
LogPrint().info("PASS: The returned status code and messages are CORRECT.")
else:
LogPrint().error("FAIL: Returned messages are incorrectly.")
self.flag = False
else:
LogPrint().error("FAIL: The status_code is WRONG")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC07030401_UpdateTemplateNic(BaseTestCase):
'''
@summary: 07模板管理-03模板网络接口-04编辑网络接口-01成功
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test-1: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
LogPrint().info("Pre-Test-2: Create a nic %s for this template."%self.dm.nic_name)
self.assertTrue(smart_create_tempnic(self.dm.temp_name, self.dm.nic_data))
#为所在数据中心的ovirtmgmt网络创建一个配置集
self.nw_id = NetworkAPIs().getNetworkIdByName('ovirtmgmt', self.dm.dc_name)
r =ProfilesAPIs().createProfiles(self.dm.profile_info, self.nw_id)
if r['status_code'] == 201:
self.proid = r['result']['vnic_profile']['@id']
LogPrint().info("Create Profile SUCCESS.")
else:
LogPrint().error("Create Profile fail.The status_code is WRONG.")
def test_UpdateTemplateNic(self):
'''
@summary: 为模板编辑网络接口
@note: 操作成功,验证返回状态码和返回信息
'''
self.flag = True
tempnic_api = TemplateNicsAPIs()
LogPrint().info("Test: Update nic %s for this template."%self.dm.nic_name)
r = tempnic_api.updateTemplateNic(self.dm.temp_name, self.dm.nic_name,self.dm.update_info,self.proid)
if r['status_code'] == self.dm.expected_status_code:
dictCompare = DictCompare()
expected_result = xmltodict.parse((self.dm.update_info %self.proid))
actual_result = r['result']
if dictCompare.isSubsetDict(expected_result,actual_result):
LogPrint().info("PASS: UpdateTemplateNic SUCCESS.")
else:
LogPrint().error("FAIL: UpdateTemplateNic fail.The nic_info is WRONG")
self.flag = False
else:
LogPrint().error("FAIL: UpdateTemplateNic fail.The status_code is WRONG")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test-1: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
LogPrint().info("Post-Test-2: Delete profile %s."%self.dm.profile_name)
ProfilesAPIs().delProfile(self.dm.profile_name, self.nw_id)
class ITC070305_DeleteTemplateNic(BaseTestCase):
'''
@summary: 07模板管理-03模板网络接口-05删除网络接口
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test-1: Create a template %s for TC."%self.dm.temp_name)
self.assertTrue(smart_create_template(self.dm.temp_name, self.dm.temp_info))
LogPrint().info("Pre-Test-2: Create a nic %s for this template."%self.dm.nic_name)
self.assertTrue(smart_create_tempnic(self.dm.temp_name, self.dm.nic_data))
def test_DeleteTemplateNic(self):
'''
@summary: 删除模板网络接口
@note: 操作成功,验证返回状态码,检查接口是否存在
'''
tempnic_api = TemplateNicsAPIs()
LogPrint().info("Test: Delete nic %s for this template %s."%(self.dm.nic_name, self.dm.temp_name))
r = tempnic_api.deleteTemplateNic(self.dm.temp_name, self.dm.nic_name)
if r['status_code'] == self.dm.expected_status_code:
if not tempnic_api.getNicIdByName(self.dm.temp_name, self.dm.nic_name):
LogPrint().info("PASS: Delete nic %s for this template %s SUCCESS."%(self.dm.nic_name, self.dm.temp_name))
else:
LogPrint().error("FAIL: The nic %s is still exist."%self.dm.nic_name)
self.flag = False
else:
LogPrint().error("FAIL: The status_code is WRONG")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
LogPrint().info("Post-Test: Delete template %s."%self.dm.temp_name)
self.assertTrue(smart_delete_template(self.dm.temp_name))
class ITC07_TearDown(BaseTestCase):
'''
@summary: “模板管理”模块测试环境清理(执行完该模块所有测试用例后,需要执行该用例清理环境)
@note: (1)删除虚拟机(删除磁盘)
@note: (2)将导出域和data域(data2)设置为Maintenance状态;分离导出域;
@note: (3)将数据中心里的Data域(data1)设置为Maintenance状态;
@note: (4)删除数据中心dc(非强制);
@note: (5)删除所有unattached状态的存储域(data1/data2);
@note: (6)删除主机host1;
@note: (7)删除集群cluster1。
'''
def setUp(self):
'''
@summary: 模块测试环境初始化(获取测试数据
'''
# 调用父类方法,获取该用例所对应的测试数据模块
self.dm = self.initData('ITC07_SetUp')
def test_TearDown(self):
vmapi=VirtualMachineAPIs()
#Step1:删除虚拟机
vmapi.delVm(self.dm.vm_name)
dcapi = DataCenterAPIs()
capi = ClusterAPIs()
# Step2:将export存储域和data2存储域设置为Maintenance状态,然后从数据中心分离
# LogPrint().info("Post-Module-Test-1: Deactivate storage domains '%s'." % self.dm.export1_name)
# self.assertTrue(smart_deactive_storage_domain(self.dm.dc_nfs_name, self.dm.export1_name))
# LogPrint().info("Post-Module-Test-2: Detach storage domains '%s'." % self.dm.export1_name)
# self.assertTrue(smart_detach_storage_domain(self.dm.dc_nfs_name, self.dm.export1_name))
LogPrint().info("Post-Module-Test-3: Deactivate data storage domains '%s'." % self.dm.data2_nfs_name)
self.assertTrue(smart_deactive_storage_domain(self.dm.dc_nfs_name, self.dm.data2_nfs_name))
LogPrint().info("Post-Module-Test-4: Detach data storage domains '%s'." % self.dm.data2_nfs_name)
self.assertTrue(smart_detach_storage_domain(self.dm.dc_nfs_name, self.dm.data2_nfs_name))
# Step3:将data1存储域设置为Maintenance状态
LogPrint().info("Post-Module-Test-5: Deactivate data storage domains '%s'." % self.dm.data1_nfs_name)
self.assertTrue(smart_deactive_storage_domain(self.dm.dc_nfs_name, self.dm.data1_nfs_name))
# Step4:删除数据中心dc1(非强制,之后存储域变为Unattached状态)
if dcapi.searchDataCenterByName(self.dm.dc_nfs_name)['result']['data_centers']:
LogPrint().info("Post-Module-Test-6: Delete DataCenter '%s'." % self.dm.dc_nfs_name)
self.assertTrue(dcapi.delDataCenter(self.dm.dc_nfs_name)['status_code']==self.dm.expected_status_code_del_dc)
# Step5:删除3个Unattached状态存储域(data1/data2/export1)
LogPrint().info("Post-Module-Test-7: Delete all unattached storage domains.")
dict_sd_to_host = [self.dm.data1_nfs_name, self.dm.data2_nfs_name]
for sd in dict_sd_to_host:
smart_del_storage_domain(sd, self.dm.xml_del_sd_option, host_name=self.dm.host1_name)
# Step6:删除主机(host1)
LogPrint().info("Post-Module-Test-8: Delete host '%s'." % self.dm.host1_name)
self.assertTrue(smart_del_host(self.dm.host1_name, self.dm.xml_del_host_option))
# Step7:删除集群cluster1
if capi.searchClusterByName(self.dm.cluster_nfs_name)['result']['clusters']:
LogPrint().info("Post-Module-Test-9: Delete Cluster '%s'." % self.dm.cluster_nfs_name)
self.assertTrue(capi.delCluster(self.dm.cluster_nfs_name)['status_code']==self.dm.expected_status_code_del_dc)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
test_cases = ["Template.ITC07_TearDown"]
testSuite = unittest.TestSuite()
loader = unittest.TestLoader()
tests = loader.loadTestsFromNames(test_cases)
testSuite.addTests(tests)
unittest.TextTestRunner(verbosity=2).run(testSuite) |
import requests
from bs4 import BeautifulSoup
import csv
url = 'https://www.bilibili.com/ranking'
# 发起网络请求
response = requests.get(url)
html_text = response.text
print(html_text)
soup = BeautifulSoup(html_text, 'html.parser')
# 用来保存视频信息的对象
class Vidoe:
def __init__(self, rank, title, score, visit, up, up_id, url):
self.rank = rank
self.title = title
self.score = score
self.visit = visit
self.up = up
self.up_id = up_id
self.url = url
def to_csv(self):
return [self.rank, self.title, self.score, self.visit, self.up, self.up_id, self.url]
@staticmethod
def CSV_title():
return ['排名', '标题', '分数', '播放量', 'Up主', 'Up ID', 'URL']
# 提取列表
items = soup.findAll('li', {'class': 'rank-item'})
vidoes = [] # 保存提取出来的video列表
for item in items:
title = item.find('a', {'class': 'title'}).text # 视频标题
score = item.find('div', {'class': 'pts'}).find('div').text # 综合得分
rank = item.find('div', {'class': 'num'}).text # 排名
visit = item.find('span', {'class': 'data-box'}).text # 排名
up = item.find_all('a')[2].text # 播放量
space = item.find_all('a')[2].get('href')
up_id = space[len('//space.bilibili.com/'):] # 播放量
url = item.find('a', {'class': 'title'}).get('href')
v = Vidoe(rank, title, score, visit, up, up_id, url)
vidoes.append(v)
#print(f'{url}')
file_name = 'top100.csv'
with open(file_name, 'w', newline='',encoding='utf-8') as f:
pen = csv.writer(f)
pen.writerow(Vidoe.CSV_title())
for v in vidoes:
pen.writerow(v.to_csv())
|
# 生成种群
import random
from NSGA2.Filtpop import filtpop # 约束条件过滤
from NSGA2.Decodechrom import binary2decimal
def genepop(pop_size, genes_num, gene_length):
"""
生成二进制种群基因集合
:param pop_size:
:param gene_lenth:
:return:
"""
pop = []
while len(pop) < pop_size:
n = pop_size - len(pop)
for i in range(n):
X = []
for j in range(genes_num):
x = [random.randint(0, 1) for j in range(gene_length)]
X.append(x)
pop.append(X)
pop = filtpop(pop)
if len(pop) == pop_size:
return pop
def pop2to10(pop, min_value, max_value):
"""
二进制种群转换成十进制
:param pop:
:param min_value:
:param max_value:
:return:
"""
pop10 = []
for X in pop:
genes = []
for x in X:
genes.append(binary2decimal(x, min_value, max_value))
pop10.append(genes)
return pop10
|
import os
import json
from flask import Flask, request, url_for, redirect
from twilio.util import TwilioCapability
import twilio.twiml
from twilio.rest import TwilioRestClient
# Account Sid and Auth Token can be found in your account dashboard
ACCOUNT_SID = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
AUTH_TOKEN = 'YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'
# TwiML app outgoing connections will use
APP_SID = 'APZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
CALLER_ID = '+12345678901'
CLIENT = 'jenny'
app = Flask(__name__)
@app.route('/token')
def token():
account_sid = os.environ.get("ACCOUNT_SID", ACCOUNT_SID)
auth_token = os.environ.get("AUTH_TOKEN", AUTH_TOKEN)
app_sid = os.environ.get("APP_SID", APP_SID)
capability = TwilioCapability(account_sid, auth_token)
# This allows outgoing connections to TwiML application
if request.values.get('allowOutgoing') != 'false':
capability.allow_client_outgoing(app_sid)
# This allows incoming connections to client (if specified)
client = request.values.get('client')
if client != None:
capability.allow_client_incoming(client)
# This returns a token to use with Twilio based on the account and capabilities defined above
return capability.generate()
# Method for request for a call is made
@app.route('/call', methods=['GET', 'POST'])
def call():
resp = twilio.twiml.Response()
# from_value = request.values.get('From')
from_value = "+12018174217"
to = request.values.get('To')
if to:
print("method call() Phone: Will call "+to)
elif request.values.get('Numbers'):
numbers = request.values.get('Numbers')
parsed_numbers = json.loads(numbers)
to = parsed_numbers['1']
print("method call() JSON: Will call "+to)
else:
to = "+17875430767"
print("method call() Hardcoded: Will call "+to)
account_sid = os.environ.get("ACCOUNT_SID", ACCOUNT_SID)
auth_token = os.environ.get("AUTH_TOKEN", AUTH_TOKEN)
app_sid = os.environ.get("APP_SID", APP_SID)
try:
twilio_client = TwilioRestClient(account_sid, auth_token)
# resp.say("created client")
print("method call(): created client")
except Exception, e:
msg = 'Missing configuration variable: {0}'.format(e)
# resp.say(msg)
print("method call():" +msg)
return str(resp)
try:
twilio_client.calls.create(from_=from_value,to=to,status_callback=url_for('.status',_external=True),status_callback_method="POST",
status_events=["initiated", "ringing", "answered", "completed"],if_machine='Continue',url=url_for('.outbound',_external=True))
# resp.say("created call")
print("method call(): created call")
except Exception, e:
msg = str(e)
# resp.say(msg)
print(msg)
return str(resp)
return str(resp)
# method called when a call is made
@app.route('/outbound', methods=['POST'])
def outbound():
resp = twilio.twiml.Response()
if request.values.get('AnsweredBy') == "machine":
print("method outbound(): AnsweredBy by machine")
resp.say("Le llamaba para avisarle sobre emergencia.", voice="alice",language="es-ES")
else:
print("method outbound(): The status of the call: "+request.values.get('CallStatus'))
with resp.gather(numDigits=1, action=url_for('menu'), method="POST") as g:
# resp.dial(to, callerId=caller_id, action=url_for("outbound"))
g.say("Por favor presione el numero 1 si me escucha, o undir 2 si esta ocupado", voice="alice",language="es-ES")
return str(resp)
# method for the status of the current call
@app.route('/status', methods=['POST'])
def status():
try:
resp = twilio.twiml.Response()
st = request.values.get('CallStatus')
if st == "completed":
print("method status(): The call was completed")
elif st in ('failed', 'no-answer', 'canceled'):
print("method status(): The call was "+st)
else:
print("method status(): The status is "+st)
except Exception, e:
msg = 'Exception: {0}'.format(e)
# resp.say(msg)
print("method status(): "+ msg)
return str(resp)
return str(resp)
# method that will take an action depending on the key pressed by the caregiver
@app.route('/menu', methods=['POST'])
def menu():
option = request.form['Digits']
actions = {'1': _activate_Speaker,
'2': _not_Available}
if actions.has_key(option):
response = twilio.twiml.Response()
actions[option](response)
return str(response)
return _redirect()
@app.route('/', methods=['GET', 'POST'])
def welcome():
resp = twilio.twiml.Response()
resp.say("Welcome to the MAK Solutions Testing ground")
return str(resp)
#Private methods
#Will "activate" the speaker
def _activate_Speaker(response):
response.say("Activate speaker")
return response
def _not_Available(response):
response.say("Sorry not available")
response.hangup()
return response
def _redirect():
response = twilio.twiml.Response()
response.say("Returning to the menu")
response.redirect(url_for('outbound'))
return str(response)
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port, debug=True)
|
from collections import OrderedDict
import copy
import numpy
import torch
"""
`state_dict` or `nested_dict`?
When `torch.nn.Module` loads from state_dict, the keys in state_dict
contains prefix and parameter/buffer name. The prefix determines the
specific module to load the parameter/buffer. Meanwhile the parameter/buffer
name prevents mismatch among parameters/buffers.
With nested dict, we don't need to care about the specific module
name(prefix) or the complicated structure(e.g. model.stem.conv1).
We ONLY care the order, the order of the module determines which module to
load.
Use cases:
1. When some of submodule names of `torch.nn.Module` have been changed,
or some submodules are wrapped into one big abstract module,
use TorchNestedLoader to save/load between previous and new module
2. Convert between darknet weights and torch weights
.cfg ------> DarknetParser torch.nn.Module
| |
.weights <-> DarknetNestedLoader <-> nested_dict <-> TorchNestedLoader <-> state_dict <-> .pth file
"""
__all__ = ["nested_dict_counter", "nested_dict_tensor2ndarray", "nested_dict_ndarray2tensor"]
def nested_dict_counter(nested_dict: OrderedDict) -> int:
"""
Count total number of parameters of `nested_dict`
"""
counter = 0
for state_dict_block in nested_dict.values():
for params in state_dict_block.values():
if isinstance(params, torch.Tensor):
counter += params.numel()
elif isinstance(params, numpy.ndarray):
counter += params.size
else:
raise TypeError(
"params inside nested_dict should be "
"torch.Tensor or numpy.ndarry"
)
return counter
def nested_dict_tensor2ndarray(nested_dict: OrderedDict) -> OrderedDict:
"""
Convert OrderedDict[str, OrderedDict[str, torch.Tensor]]
to OrderedDict[str, OrderedDict[str, numpy.ndarray]]
"""
nested_dict = copy.copy(nested_dict)
for state_dict_block in nested_dict.values():
for name, params in state_dict_block.items():
assert isinstance(params, torch.Tensor)
state_dict_block[name] = params.numpy()
return nested_dict
def nested_dict_ndarray2tensor(nested_dict: OrderedDict) -> OrderedDict:
"""
Convert OrderedDict[str, OrderedDict[str, numpy.ndarray]]
to OrderedDict[str, OrderedDict[str, torch.Tensor]]
"""
nested_dict = copy.copy(nested_dict)
for state_dict_block in nested_dict.values():
for name, params in state_dict_block.items():
assert isinstance(params, numpy.ndarray)
state_dict_block[name] = torch.from_numpy(params)
return nested_dict |
import flask
cv = flask.Flask(__name__)
@cv.route('/name')
def name():
return '<h1>Goldie Perlmann</h1>'
@cv.route('/pic')
def pic():
return 'picture'
@cv.route('/hobbies')
def hobbies():
return '<p></p>'
@cv.route('/skills')
def skills():
return '<p></p>'
if __name__ == "__main__":
cv.run() |
# APPROACH 1 : OPTIMAL SOLUTION
# Time Complexity : O(n*m), n: number of rows of the matrix, m: number of columns of the matrix
# Space Complexity : O(1), not considering the space of the matrix (else, O(n*m) - result holds all the elements of the matrix)
# Did this code successfully run on Leetcode : Yes
# Any problem you faced while coding this : None
#
#
# Your code here along with comments explaining your approach
# 1. Traversal is split into - up and down directions
# 2. Up direction -> Normally, go to upper right neighbor
# -> if first row, then got to right neighbor and change the direction
# -> if last column, then go to down neighbor and change the direction
# 3. Down direction -> Normally go to downleft neighbor
# -> if last row, then go to right neighbor and change the direction
# -> if first column, then go to down negihbor and change the direction
# 4. Keep track of count of elements traversed to exit the loop
class Solution:
def findDiagonalOrder(self, matrix: List[List[int]]) -> List[int]:
if not matrix:
return []
result = []
isUp, isDown, count, total_elements, row, column = True, False, 0, len(matrix) * len(matrix[0]), 0, 0
while count < total_elements:
result.append(matrix[row][column])
count += 1
if isUp:
if column == len(matrix[0]) - 1:
row, isUp, isDown = row + 1, False, True
elif row == 0:
column, isUp, isDown = column + 1, False, True
else:
row, column = row - 1, column + 1
elif isDown:
if row == len(matrix) - 1:
column, isUp, isDown = column + 1, True, False
elif column == 0:
row, isUp, isDown = row + 1, True, False
else:
row, column = row + 1, column - 1
return result
|
import requests
from bs4 import BeautifulSoup
class C1maps:
def __init__(self):
self.mprint = 0
self.dos = 0
self.s = ''
def getRusData(self):
vgm_url = 'https://1maps.ru/statistika-koronavirusa-v-rossii-i-mire-na-19-maya-2020-na-segodnyashnij-den/'
html_text = requests.get(vgm_url).text
soup = BeautifulSoup(html_text, 'html.parser')
contries = {}
for td in soup.find_all('td'):
if td.text == 'Москва':
self.mprint = 1
if self.mprint:
if self.dos == 0:
self.s = td.text
if self.dos == 1:
self.s += ',' + td.text[1:] + '\n'
self.dos+=1
if self.dos == 4:
contries[self.s.split(',')[0]] = int(self.s.split(',')[1])
self.dos = 0
self.s = ''
sorted_contries = sorted(contries.items(), key=lambda x: x[1], reverse=True)
return sorted_contries
def getWorldData(self):
vgm_url = 'https://koronavirus-ncov.ru/'
html_text = requests.get(vgm_url).text
soup = BeautifulSoup(html_text, 'html.parser')
contries = {}
for td in soup.find_all('td'):
if td.text == 'Великобритания' or td.text == 'Россия':
self.mprint = 1
if self.mprint:
if self.dos == 0:
self.s = td.text
if self.dos == 1:
if td.text.find('+') != -1:
self.s += ',' + td.text.replace(',','')[:(td.text.find('+')-2)] #+ '\n'
else:
self.s += ',' + td.text.replace(',','')# + '\n'
self.dos+=1
if self.dos == 6:
if self.s.find('Итого') == -1 & self.s.find('Европа') == -1:
contries[self.s.split(',')[0]] = int(self.s.split(',')[1])
self.dos = 0
self.s = ''
sorted_contries = sorted(contries.items(), key=lambda x: x[1], reverse=True)
return sorted_contries
#c = C1maps()
#c.getRusData()
#l = c.getRusData()
#for key, value in l:
# print(key + '->' + str(value)+ '\n') |
class ContactInfo:
def __init__(self, name, phone,email):
self.name= name
self.email= email
self.phone =phone
def print_info(self):
print('{0}:{1}:{2}'.format(self.name,self.phone,self.email))
if __name__ == '__main__':
sanghyun = ContactInfo('박상현','seenlab@gmail.com')
hanbit = ContactInfo('hanbit','noreply@hanb.co.kr')
sanghyun.print_info()
hanbit.print_info()
|
import random
class ListGenerator:
@staticmethod
def listselect(data):
return random.choice(data)
@staticmethod
def listseed(seed, data):
random.seed(seed)
return ListGenerator.listselect(data)
@staticmethod
def listselected(numbers, data):
newlist = []
while len(newlist) < numbers:
newlist.append(ListGenerator.listselect(data))
return newlist
@staticmethod
def listseeded(seed, numbers, data):
random.seed(seed)
return ListGenerator.listselected(numbers, data)
|
#!/usr/bin/env python3
"""
:mod:`strain` -- title
========================================
.. module strain
:platform: Unix, Windows, Mac, Linux
:synopsis: doc
.. moduleauthor:: Qi Zhang <qz2280@columbia.edu>
"""
class EulerianStrain:
def __init__(self, v0: float, v: float):
self.v0 = v0
self.v = v
@property
def isotropic_compression(self) -> float:
return 1 / 2 * ((self.v0 / self.v) ** (2 / 3) - 1)
def is_compression(self) -> bool:
if self.isotropic_compression > 0:
return True
else:
return False
class LagrangianStrain:
def __init__(self, v0: float, v: float):
self.v0 = v0
self.v = v
@property
def isotropic_compression(self) -> float:
return 1 / 2 * ((self.v / self.v0) ** (2 / 3) - 1)
def is_compression(self) -> bool:
if self.isotropic_compression < 0:
return True
else:
return False
|
import nuke
import nukeSearcher
toolbar = nuke.menu('Nodes')
c = toolbar.addMenu('PP Tools', 'pptool.png')
c.addCommand('Nuke API Searcher', lambda: nukeSearcher.nukeSearcher(), '', icon='pptool.png')
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.http import require_GET
from .models import Grade
@require_GET
def get_all_grades_by_category_and_kind_id(request, category_id, kind_id):
grades = Grade.objects.filter(kind__category_id=category_id, kind_id=kind_id)
return HttpResponse(grades)
@require_GET
def get_grade_by_id(request, category_id, kind_id, grade_id):
return HttpResponse(Grade.objects.filter(kind__category_id=category_id, kind_id=kind_id, id=grade_id))
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Author : SUN FEIFEI
from selenium.webdriver.common.by import By
from app.honor.teacher.home.vanclass.object_page.home_page import ThomePage
from app.honor.teacher.home.vanclass.object_page.vanclass_paper_page import VanclassPaperPage
from app.honor.teacher.home.vanclass.test_data.vanclass_data import GetVariable as ge
from conf.decorator_vue import teststep, teststeps
from conf.base_page import BasePage
from utils.assert_package import MyAssert
from utils.wait_element_vue import WaitElement
class DynamicPaperPage(BasePage):
"""app主页面- 试卷动态信息页面 元素信息"""
dynamic_tips = '★★★ Error- 未进入近期卷子界面'
dynamic_vue_tips = '★★★ Error- 未进入近期卷子vue界面'
dynamic_list_tips = '★★★ Error- 近期卷子列表未加载成功'
dynamic_empty_tips = '★★★ Error- 近期卷子列表为空'
def __init__(self):
self.home = ThomePage()
self.wait = WaitElement()
self.paper = VanclassPaperPage()
self.screen = self.get_window_size()
self.my_assert = MyAssert()
@teststeps
def wait_check_app_page(self):
"""以“title:近期作业”为依据"""
locator = (By.XPATH, '//android.view.View[@text="卷子"]')
return self.wait.wait_check_element(locator)
@teststeps
def wait_check_page(self):
"""以“title:近期卷子作业”为依据"""
locator = (By.XPATH, '//div[@class="van-nav-bar__title van-ellipsis" and text()="卷子"]')
return self.wait.wait_check_element(locator)
@teststeps
def wait_check_list_page(self):
"""以 完成情况 为依据"""
locator = (By.XPATH, "//div[@id='homework-list']")
return self.wait.wait_check_element(locator)
@teststeps
def wait_check_no_hw_page(self, var=10):
"""删除所有作业后, 无最近作业提示检查点 以提示text作为依据"""
locator = (By.XPATH, "//div[text()='学生练得不够?给学生布置个作业吧!']")
return self.wait.wait_check_element(locator, var)
@teststep
def back_up_button(self):
"""返回按钮"""
locator = (By.XPATH, '//img[@class="vt-page-left-img-Android"]')
self.wait \
.wait_find_element(locator).click()
@teststep
def help_button(self):
""" 提示 按钮"""
locator = (By.XPATH, '//i[@class="nav-right-icon van-icon van-icon-question-o"]')
self.wait \
.wait_find_element(locator).click()
# 列表 元素
@teststep
def hw_create_time(self):
"""创建时间 完成情况"""
locator = (By.XPATH, '//div[@class="homework-list-content-subtitle-text"]')
return self.wait.wait_find_elements(locator)
@teststep
def remind_button(self):
"""提醒 按钮"""
locator = (By.XPATH, '//img[@class="homework-list-content-icon-img"]')
return self.wait.wait_find_elements(locator)
@teststep
def hw_name(self):
"""作业条目名称"""
locator = (By.XPATH, '//div[@class="homework-list-content-title-text"]')
return self.wait.wait_find_elements(locator)
@teststep
def hw_vanclass(self):
"""班级"""
locator = (By.XPATH, '//div[@class="homework-list-content-icon-text"]')
return self.wait.wait_find_elements(locator)
@teststep
def hw_status(self):
"""完成情况"""
locator = (By.XPATH, '//div[@class="homework-list-content-subtitle-text"]')
return self.wait.wait_find_elements(locator)
@teststeps
def help_operation(self):
""" 右上角 提示按钮"""
self.help_button() # 右上角 提示按钮
self.tips_content_commit()
# 温馨提示 页面
@teststeps
def wait_check_tips_page(self, var=3):
"""以“温馨提示”为依据"""
locator = (By.XPATH, '//div[@class="van-dialog__header" and text()="温馨提示"]')
return self.wait.wait_check_element(locator, var)
@teststep
def tips_title(self):
"""温馨提示title"""
locator = (By.XPATH, '//div[text()="温馨提示"]')
item = self.wait.wait_find_element(locator).text
print(item)
return item
@teststep
def tips_content(self):
"""温馨提示 具体内容"""
locator = (By.XPATH, '//div[@class="van-dialog__message van-dialog__message--has-title van-dialog__message--left"]')
item = self.wait.wait_find_element(locator).text
print(item)
return item
@teststep
def commit_button(self):
"""确认 按钮"""
locator = (By.XPATH, '//span[text()="确认"]/parent::button')
self.wait.wait_find_element(locator).click()
@teststeps
def into_hw(self):
"""进入作业/卷子/口语列表中的该作业/卷子/口语
"""
# var = self.home.brackets_text_out(var)
self.my_assert.assertTrue_new(self.wait_check_page(), self.dynamic_tips) # 页面检查点
self.my_assert.assertFalse(self.wait_check_no_hw_page(), self.dynamic_list_tips) # 页面检查点
hw = self.hw_name() # 作业条目
van = self.hw_vanclass() # 班级名
count = 0
van_name = []
name = []
for i in range(len(hw)):
if van[i].text != ge.VANCLASS: # 班级 (近期作业中 不能进行编辑的)
print("进入作业/试卷:", hw[i].text)
name.append(hw[i].text)
van_name.append(van[i].text)
van[i].click() # 进入作业
count += 1
break
if count == 0:
print('★★★ Error- 没有可测试的数据')
else:
return name[0], van_name[0]
@teststeps
def hw_list_operation(self):
"""获取 近期试卷列表
"""
name = self.hw_name()
create = self.hw_create_time()
status = self.hw_status()
van = self.hw_vanclass() # 班级
for i in range(len(name)):
print(name[i].text, '\n',
create[i].text, ' ', van[i].text, ' ', status[i].text)
print('----------------------')
@teststep
def delete_recent_hw_operation(self):
"""清空最近习题作业列表"""
while True:
self.my_assert.assertTrue_new(self.wait_check_page(), self.dynamic_tips) # 页面检查点
self.swipe_vertical_web(0.5, 0.2, 0.9)
if self.wait_check_no_hw_page():
print('作业已清空完毕')
ThomePage().back_up_button()
break
else:
van_class = self.hw_vanclass() # 班级
for i in range(len(van_class)):
if self.wait_check_page():
name = self.hw_name() # 作业名称
date = self.hw_create_time() # 创建时间
status = self.hw_status() # 完成情况
print(name[0].text, date[0].text, status[0].text, van_class[0].text)
name[0].click()
if self.paper.wait_check_page():
self.paper.delete_commit_operation() # 删除作业 具体操作
print('-' * 20)
@teststeps
def tips_content_commit(self, var=5):
"""温馨提示 页面信息 -- 确定"""
if self.wait_check_tips_page(var): # 温馨提示 页面
print('--------------------------')
self.tips_title()
self.tips_content()
self.commit_button() # 确定按钮
print('--------------------------')
@teststeps
def tips_commit(self):
"""温馨提示 -- 确定"""
if self.wait_check_tips_page(): # 温馨提示 页面
self.commit_button() # 确定按钮
@teststeps
def swipe_vertical_web(self, ratio_x, start_y, end_y, steps=1000):
"""
上/下滑动 x值不变
:param ratio_x: x坐标系数
:param start_y: 滑动起点y坐标系数
:param end_y: 滑动终点y坐标系数
:param steps: 持续时间ms
:return: None
"""
x = int(self.screen[0] * ratio_x)
y1 = int(self.screen[1] * start_y)
y2 = int(self.screen[1] * end_y)
self.driver.swipe(x, y1, x, y2, steps)
|
from django.shortcuts import render,redirect,get_object_or_404
from django.contrib import messages
from django.core.paginator import EmptyPage,Paginator
from .models import *
from .forms import *
def List(request):
items=Report.objects.all()
print(items)
paginator=Paginator(items,10)
page=request.GET.get('page')
pagged_items=paginator.get_page(page)
context={
'items':pagged_items
}
return render(request,'reports.html',context)
def Submit(request):
if request.method=='POST':
form=ReportForm(request.POST,request.FILES)
print (str(form.is_valid()))
if form.is_valid():
Name=form.cleaned_data.get('Name')
date=form.cleaned_data.get('date')
reports=form.cleaned_data.get('reports')
TL=form.cleaned_data.get('TL')
No_hours=form.cleaned_data.get('No_hours')
progress=form.cleaned_data.get('progress')
Dtoday=form.cleaned_data.get('Dtoday')
concern=form.cleaned_data.get('concern')
Nextplan=form.cleaned_data.get('Nextplan')
Dnextp=form.cleaned_data.get('Dnextp')
form.save()
messages.success(request,'Entry Submitted')
return redirect('List')
else:
messages.error(request,'Error! Retry')
form=ReportForm()
return render(request,"submit.html",{'form':form})
else:
form=ReportForm()
return render(request,"submit.html",{'form':form}) |
"""
A PySpark script that converts raster images to COGss.
Edit the settings at the top to tune how many concurrent images to process per machine.
Edit get_input_and_output_paths to return the input rasters mapped to their desired COG locations.
Edit gdal_cog_commands to modify any GDAL settings to make the COGs you want.
"""
import os
import tempfile
import shutil
from urlparse import urlparse
from subprocess import Popen, PIPE
NUM_PARTITIONS = 50
# Fill this out for your particular job.
def get_input_and_output_paths():
"""
Return a list of tuples of (input_uri, output_uri).
URIs can be local paths or S3 paths.
"""
##
# This is an example that reads some images from SpaceNet on S3 and creates
# COGs in another bucket.
##
import boto3
bucket = 'spacenet-dataset'
prefix = 'mvs_dataset/WV3/MSI/'
def target_from_source(uri):
base_name = os.path.splitext(os.path.basename(uri))[0]
return "s3://azavea-research-emr/cog-creator/spacenet/test/{}.TIF".format(base_name)
s3 = boto3.client('s3')
list_result = s3.list_objects(Bucket=bucket, Prefix=prefix, Delimiter='/')
result = []
for o in list_result['Contents']:
key = o['Key']
if key.endswith('NTF'):
result.append(("s3://{}/{}".format(bucket, key),
target_from_source(key)))
return result[:15]
# Edit this if you want control of how the COGs are created.
def gdal_cog_commands(input_path, tmp_dir):
"""
GDAL commands to create a COG from an input file.
Modify here if you want diffent options or processes.
Returns a tuple (commands, output_path)
"""
def get_output_path(command):
fname = os.path.splitext(os.path.basename(input_path))[0]
return os.path.join(tmp_dir, "{}-{}.tif".format(fname, command))
## Step 1: Translate to a GeoTiff.
translate_path = get_output_path("translate")
translate = ["gdal_translate",
"-of", "GTiff",
"-co", "tiled=YES",
input_path,
translate_path]
## Step 2: Add overviews
overviews = [2, 4, 8, 16, 32]
add_overviews = ["gdaladdo",
"-r", "bilinear",
translate_path] + list(map(lambda x: str(x), overviews))
## Step 3: Translate to COG
output_path = get_output_path("cog")
create_cog = ["gdal_translate",
"-co", "TILED=YES",
"-co", "COMPRESS=deflate",
"-co", "COPY_SRC_OVERVIEWS=YES",
"-co", "BLOCKXSIZE=512",
"-co", "BLOCKYSIZE=512",
"--config", "GDAL_TIFF_OVR_BLOCKSIZE", "512",
translate_path,
output_path]
return ([translate,
add_overviews,
create_cog], output_path)
## Utility Methods and the run command ##
def do_run(cmd):
p = Popen(cmd)
(out,err) = p.communicate(input)
if p.returncode != 0:
s = "Command failed:\n"
s += ' '.join(cmd) + "\n\n"
if out:
s += out + "\n\n"
if err:
s += err
raise Exception(s)
def target_partition_count(number_of_images):
return min(number_of_images, NUM_PARTITIONS)
def makedirs_p(d):
if not os.path.exists(d):
os.makedirs(d)
return d
def create_tmp_directory(prefix):
tmp = tempfile.mktemp(prefix=prefix, dir=os.path.join(os.environ['PWD'], "cog-temp"))
return makedirs_p(tmp)
def get_local_copy(uri, local_dir):
parsed = urlparse(uri)
local_path = tempfile.mktemp(dir=local_dir)
if parsed.scheme == "s3":
cmd = ["aws", "s3", "cp", uri, local_path]
elif parsed.scheme == "http":
cmd = ["wget", "-O", local_path, uri]
else:
cmd = ["cp", uri, local_path]
do_run(cmd)
return local_path
def upload_to_dest(local_src, dest):
parsed = urlparse(dest)
if parsed.scheme == "s3":
cmd = ["aws", "s3", "cp",
"--content-type", "image/tiff",
local_src, dest]
else:
d = os.path.dirname(dest)
if not os.path.exists(d):
os.makedirs(d)
cmd = ["cp", local_src, dest]
do_run(cmd)
return dest
def create_cog(source_uri, dest, local_dir):
local_path = get_local_copy(source_uri, local_dir)
commands, output_path = gdal_cog_commands(local_path, local_dir)
for command in commands:
do_run(command)
upload_to_dest(output_path, dest)
def create_cogs(partition):
partition = list(partition)
if not partition:
raise Exception("EMPTY PARTITION")
if len(partition) > 1:
raise Exception("TOO MANY IN PARTITION {}".format(len(partition)))
local_dir = create_tmp_directory("cog-creator")
try:
for (source_uri, dest) in partition:
create_cog(source_uri, dest, local_dir)
shutil.rmtree(local_dir)
finally:
if local_dir:
shutil.rmtree(local_dir, ignore_errors=True)
def run_spark_job():
from pyspark import SparkConf, SparkContext
image_uris = get_input_and_output_paths()
conf = SparkConf().setAppName("Spark COG Generator")
sc = SparkContext(conf=conf)
sc.parallelize(enumerate(image_uris)) \
.partitionBy(target_partition_count(len(image_uris))) \
.map(lambda (i, v): v) \
.foreachPartition(create_cogs)
print "Done."
if __name__ == "__main__":
run_spark_job()
|
# -*- coding : utf-8 -*-
from __future__ import absolute_import
import requests
from celery_app import app
from celery.utils.log import get_task_logger
from env import SERVER_WEBHOOK_URL
from pprint import pprint
logger = get_task_logger(__name__)
@app.task
def trigger_webhook(data):
r = requests.post(SERVER_WEBHOOK_URL, json=data)
logger.info(r.json())
|
"""
Contains methods for performing validation of learning models
"""
import time
from sklearn.model_selection import StratifiedKFold
from src.common import LABELS, FOLDS_COUNT
from src.common import SENTENCES
from src.data import dataset
from src.features.word_embeddings.word2vec_embedding import Word2VecEmbedding
from src.features.sentence_embeddings import sentence_embeddings
from src.features.build_features import FeatureBuilder
from src.models.algorithms.svm_algorithm import SvmAlgorithm
def single_fold_validation(training_features, training_labels, test_sentences, test_labels,
classifier_class, sentence_embedding, **kwargs):
include_wrong_sentences = "include_wrong_sentences" in kwargs and kwargs["include_wrong_sentences"]
if "include_wrong_sentences" in kwargs: del kwargs["include_wrong_sentences"]
# test accuracy on a single fold with already built embeddings
classifier = classifier_class(sentence_embedding, **kwargs)
classifier.fit(training_features, training_labels)
successes = 0
wrong_sentences = []
for i, label in enumerate(test_labels):
prediction = classifier.predict(test_sentences[i])
if int(prediction) == int(label):
successes += 1
elif include_wrong_sentences:
wrong_sentences.append((' '.join(test_sentences[i]), label, prediction))
ratio = float(successes) / len(test_labels)
if include_wrong_sentences:
return ratio, wrong_sentences
return ratio
def test_cross_validation(labels, sentences, word_embedding, sentence_embedding,
classifier_class, folds_count, verbose=False, measure_times=False, **kwargs):
# test accuracy for all folds combinations
skf = StratifiedKFold(n_splits=folds_count)
validation_results = []
include_wrong_sentences = "include_wrong_sentences" in kwargs and kwargs["include_wrong_sentences"]
if "include_wrong_sentences" in kwargs: del kwargs["include_wrong_sentences"]
if verbose:
print("Building word embedding...")
word_embedding.build()
fb = FeatureBuilder()
training_time = 0
testing_time = 0
start_time = None
if not sentence_embedding.use_pca:
if measure_times: # measure training time -------------------
start_time = time.time()
if verbose:
print("Building sentence embedding...")
sentence_embedding.build(word_embedding)
if verbose:
print("Building features...")
fb.build(sentence_embedding, labels, sentences)
if measure_times: # measure training time -------------------
# to be fair, multiply time with number of folds, because with PCA we will
# have to build features folds_count times for cross-validation
training_time += folds_count * (time.time() - start_time)
for fold, (train_index, test_index) in enumerate(skf.split(sentences, labels)):
if measure_times: # measure training time -------------------
start_time = time.time()
if verbose:
print("Testing fold {0}/{1}...".format(fold + 1, folds_count))
if sentence_embedding.use_pca:
if verbose:
print("Building sentence embedding...")
sentence_embedding.build(word_embedding, sentences[train_index])
if verbose:
print("Building features...")
fb = FeatureBuilder()
fb.build(sentence_embedding, labels, sentences)
if verbose:
print("Building classifier model and testing predictions...")
classifier = classifier_class(sentence_embedding, **kwargs)
classifier.fit(fb.features[train_index], fb.labels[train_index])
if measure_times: # measure training time -------------------
training_time += time.time() - start_time
if measure_times: # measure testing time -------------------
start_time = time.time()
success_rate = classifier.clf.score(fb.features[test_index], fb.labels[test_index])
if measure_times: # measure testing time -------------------
testing_time += time.time() - start_time
if verbose:
rate = success_rate[0] if include_wrong_sentences else success_rate
print("Result in fold {:d}: {:4.2f}%".format(fold + 1, rate * 100))
validation_results.append(success_rate)
if measure_times:
return validation_results, training_time, testing_time
return validation_results
if __name__ == "__main__":
""" Example of how cross validation works"""
word_embedding = Word2VecEmbedding('google/GoogleNews-vectors-negative300.bin', 300)
sentence_embedding = sentence_embeddings.ConcatenationEmbedding()
classifier = SvmAlgorithm
c = 100
gamma = 0.1
print("Testing parameter C={0}, gamma={1}...".format(c, gamma))
print("." * 20)
results = test_cross_validation(LABELS, SENTENCES, word_embedding, sentence_embedding, classifier,
FOLDS_COUNT, True, C=c, gamma=gamma)
print results
|
class SmoothProp(Optimizer):
def __init__(self, lr=1e-4, beta=0.9, l2=1e-5, epsilon=1e-6, *args, **kwargs):
super(SmoothProp, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = K.variable(0.0)
self.lr = K.variable(lr)
self.l2 = l2
self.beta = K.variable(beta)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
self.updates = [(self.iterations, self.iterations+1)]
t = self.iterations + 1
ag_comp = 1.-K.exp(-t*(1.0-self.beta)/self.beta)
for p, g, c in zip(params, grads, constraints):
# filtered gradient
sg = K.variable(np.zeros(K.get_value(p).shape))
ag = K.variable(np.zeros(K.get_value(p).shape))
# l2 penalty
if self.l2 > 0.:
g = g + self.l2*p
# update gradient filter
filter = lambda beta, x0, x1: beta*x0 + (1.-beta)*x1
sg_t = filter(self.beta, sg, g)
ag_t = filter(self.beta, ag, K.abs(g))
step = sg_t * (ag_comp / (ag_t + self.epsilon))
# update parameter
p_t = p - self.lr*step
self.updates.append((sg, sg_t))
self.updates.append((ag, ag_t))
self.updates.append((p, c(p_t))) # apply constraints
return self.updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": float(K.get_value(self.lr)),
"l2": float(K.get_value(self.l2)),
"beta": float(K.get_value(self.beta)),
"epsilon": self.epsilon} |
#!/usr/bin/python
################################################################################
from test import *
from diamond.collector import Collector
from nagios import NagiosStatsCollector
################################################################################
class TestNagiosStatsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NagiosStatsCollector', {
'interval': 10,
'bin' : 'true',
'use_sudo' : False
})
self.collector = NagiosStatsCollector(config, None)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
with patch('subprocess.Popen.communicate', Mock(return_value =
( self.getFixture('nagiostat').getvalue() , '')
)):
self.collector.collect()
metrics = {
'AVGACTHSTLAT' : 196,
'AVGACTSVCLAT' : 242,
'AVGACTHSTEXT' : 4037,
'AVGACTSVCEXT' : 340,
'NUMHSTUP' : 63,
'NUMHSTDOWN' : 0,
'NUMHSTUNR' : 0,
'NUMSVCOK' : 1409,
'NUMSVCWARN' : 3,
'NUMSVCUNKN' : 0,
'NUMSVCCRIT' : 7,
'NUMHSTACTCHK5M' : 56,
'NUMHSTPSVCHK5M' : 0,
'NUMSVCACTCHK5M' : 541,
'NUMSVCPSVCHK5M' : 0,
'NUMACTHSTCHECKS5M' : 56,
'NUMOACTHSTCHECKS5M' : 1,
'NUMCACHEDHSTCHECKS5M' : 1,
'NUMSACTHSTCHECKS5M' : 55,
'NUMPARHSTCHECKS5M' : 55,
'NUMSERHSTCHECKS5M' : 0,
'NUMPSVHSTCHECKS5M' : 0,
'NUMACTSVCCHECKS5M' : 1101,
'NUMOACTSVCCHECKS5M' : 0,
'NUMCACHEDSVCCHECKS5M' : 0,
'NUMSACTSVCCHECKS5M' : 1101,
'NUMPSVSVCCHECKS5M' : 0,
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
# -*- conding utf-8 -*-
# 作者:彭静
# 开发时间:上午 11:35
# 开发工具:PyCharm
# 列表中的元素的类型可以不相同,支持数字,字符串,也可包含列表
# nameList = []#定义一个空的列表
nameList = ['小张','小王','小李']
# testList = [1,'测试']
#
# print(type(nameList[0]),nameList[0])
# print(type(testList[0]))
length = len(nameList)
# print(len(nameList))#获得列表长度
# for name in nameList:
# print(name)
# i = 0
# while i<length:
# print(nameList[i])
# i = i +1
#
# 增 [append] [extend] [insert]
# print("----------增加前,列表数据------------")
# for name in nameList:
# print(name)
#
#
# nameTemp = input('请输入添加学生的姓名:')
# 1 append
# nameList.append(nameTemp)#在末尾追加一个元素
# print("----------增加后,列表数据------------")
# for name in nameList:
# print(name)
a = [1,2,]
b = [3,4]
a.append(b)
print(a)
# 2 extend
a.extend(b)#将吧列表的每个元素,注意追加到啊列表中
print(a)
# 3 insert
a.insert(1,3)#第一个变量表示下标,第二个表示元素(对象)
print(a)
# 删 [del] [pop] [remove]
print(a)
del a[0]#在指定位置删除一个元素
print(a)
a.pop()#删除末尾最后一个元素
print(a)
a.remove(3)#删除指定内容的元素
print(a)
# 改
a[0] = 9# 指定位置的更改元素内容
print(a)
# 查 in index count
num = input('请输入要查找的数字:')
if num in a :
print('找到了')
else:
print('没有找到')
print(a.index(3,0,3))#可以查找指定下标范围的元素,并返回找到对应数据的下标范围区间,左闭右开[0, 3)
print(a.count(3))#返回查找元素出现的次数
c = [1,2,3,4]
print(c)
c.reverse()
print(c) |
class Node(object):
pass
def loop_size(node):
nodes = []
tail_node = None
while(not node in nodes):
nodes.append(node)
node = node.next
return len(nodes) - nodes.index(node)
node1 = Node()
node1.next = node1
print loop_size(node1) # 1
node1 = Node()
node2 = Node()
node1.next = node2
node2.next = node1
print loop_size(node1) # 2
node1 = Node()
node2 = Node()
node3 = Node()
node1.next = node2
node2.next = node3
node3.next = node2
print loop_size(node1) # 2
# Make a short chain with a loop of 3
node1 = Node()
node2 = Node()
node3 = Node()
node4 = Node()
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node2
print loop_size(node1) # 3
nodes = [Node() for _ in xrange(50)]
for node, next_node in zip(nodes, nodes[1:]):
node.next = next_node
nodes[49].next = nodes[21]
print loop_size(nodes[0]) # 29
nodes = [Node() for _ in xrange(3904)]
for node, next_node in zip(nodes, nodes[1:]):
node.next = next_node
nodes[3903].next = nodes[2817]
print loop_size(nodes[0]) # 1087 |
# -*- coding: utf-8 -*-
""" Custom Sizer for Cryptocurrencies, allowing for fractional orders.
"""
from common import *
import backtrader as bt
from decimal import Decimal, ROUND_DOWN
class CryptoSizer(bt.Sizer):
""" Custom Crypto Sizer.
"""
params = (
('stake', 0.1),
)
def _getsizing(self, comminfo, cash, data, isbuy):
if isbuy:
size = Decimal(cash / data.close[0])
size = Decimal(size.quantize(Decimal('0.01'), rounding=ROUND_DOWN))
return float(size)
position = self.broker.getposition(data)
if not position.size:
return 0
else:
return position.size
|
from flask import Flask
### Creates a WSGI application
# WSGI is a standard protocol we follow while communication between our web server
# and web application takes place
app = Flask(__name__)
# Initializing the Flask object will tell the Flask app to follow the
# WSGI protocol while communicating with the server
@app.route('/') # This decorator usually takes two parameters, rule and options
# The rule parameter takes a string which will specify the URL that I am going to visit
# in that specific webpage
def welcome():
return "Welcome to the FLASK"
@app.route('/members')
def members():
return "Welcome to the FLASK members page"
# When we define a function beneath a decorator then that function will automatically
# Get triggered whenever we are visiting the URL mentioned in the 'rule' parameter
# of the deorator
if __name__ == '__main__':
app.run(debug=True) |
#!/usr/bin/python3
import os, sys
sys.path.append(os.getcwd())
from Utilities import python_helpers
import fractions
def main():
# this solution is a little bit verbose in the number of variables and
# conditionals, but I've tried to be clear about how I arrive at the solution,
# rather than to write a terse statement that's much harder to parse.
target_fractions = []
# structure the loops to ensure numerator/denominator < 1
for denominator in range(10,100):
for numerator in range(10,denominator):
if numerator%10 == 0 or numerator == denominator:
# skip trivial cases
continue
str_d = str(denominator)
str_n = str(numerator)
intersection = [x for x in str_d if x in str_n]
for common_element in intersection:
# note that the only possibilities are for there to be zero or one common elements
new_numerator = str_n.replace(common_element, '')
new_denominator = str_d.replace(common_element, '')
if len(new_numerator) == 1 and len(new_denominator) == 1 and new_denominator != '0' and new_numerator != '0':
incorrectly_reduced_fraction = fractions.Fraction(int(new_numerator), int(new_denominator))
correctly_reduced_fraction = fractions.Fraction(numerator, denominator)
if incorrectly_reduced_fraction == correctly_reduced_fraction:
target_fractions.append(correctly_reduced_fraction)
return python_helpers.product(target_fractions)._denominator
if __name__=='__main__':
print(main())
|
import argparse
import numpy as np
from PIL import Image
from scipy import fftpack
import cv2
import os
import collections
from dahuffman import HuffmanCodec
import ast
def largest_N_value_DCT(img_dct, num_of_coefficients):
rows_1, cols_1, no_of_blocks = img_dct.shape[0], img_dct.shape[1], img_dct.shape[2]
img_dct_abs = np.abs(img_dct)
img_dct_m = np.zeros((rows_1, cols_1, no_of_blocks))
i = 0
while (i < no_of_blocks):
j =0
while (j < num_of_coefficients):
index = np.where(img_dct_abs[:,:,i] == np.amax(img_dct_abs[:,:,i]))
k = 0
while k < len(index[0]) and np.amax(img_dct_abs[:,:,i]) !=0:
img_dct_m [index[0][k], index[1][k], i]= img_dct[index[0][k], index[1][k],i]
img_dct_abs[index[0][k], index[1][k],i]= -100
k = k + 1
j = j + 1
i = i + 1
return img_dct_m
def check_and_zero_pad_img(img):
rows, cols = img.shape[0], img.shape[1]
if rows == cols:
if rows % 8 != 0:
# zero padding both rows and cols
temp = rows % 8
zero_pad_img = np.pad(img, (temp), 'constant', constant_values= (0) )
else:
# No zero padding
zero_pad_img = img
else:
if rows % 8 != 0 and cols % 8 ==0:
# zero pad row
temp = rows % 8
zero_pad_img = np.pad(img, (temp,0), 'constant', constant_values= (0))
zero_pad_img = zero_pad_img[temp:,:]
elif rows % 8 == 0 and cols % 8 != 0:
# zero pad columns
temp = cols % 8
zero_pad_img = np.pad(img, (0, temp), 'constant', constant_values=(0))
zero_pad_img = zero_pad_img[0:rows, :]
elif rows % 8 !=0 and cols % 8 != 0:
# zero pad both rows and cols with different padding size
temp_1 = rows % 8
temp_2 = cols % 8
zero_pad_img = np.pad(img, (temp_1, temp_2), 'constant', constant_values = (0))
else:
# No zero Padding
zero_pad_img = img
return zero_pad_img
def partitions_in_8X8(zero_pad_image, block_size = 8):
#import pdb; pdb.set_trace()
rows, cols = zero_pad_image.shape[0] , zero_pad_image.shape[1]
no_of_blocks = int(rows*cols/(block_size*block_size))
img_subblock = np.zeros((block_size,block_size,no_of_blocks))
count = 0
i = 0
while(i < int(rows/8)):
j = 0
while(j < int(cols/8)):
img_subblock[:,:,count] = zero_pad_image[8*i:8*(i+1), 8*j:8*(j+1)]
j = j + 1
count = count + 1
i = i + 1
return img_subblock
def convert_back_to_original_image(img_subblock, size):
rows, cols, no_of_blocks = img_subblock.shape[0], img_subblock.shape[1], img_subblock.shape[2]
desired_rows = size[0]
desired_cols = size[1]
image_reconstructed = np.zeros((size[0], size[1]))
count = 0
i = 0
while(i < int(desired_rows/8)):
j = 0
while(j < int(desired_cols/8)):
image_reconstructed[8*i:8*(i+1), 8*j:8*(j+1)] = img_subblock[:,:,count]
j = j + 1
count = count + 1
i = i + 1
return image_reconstructed
def DCT_of_each_subblock(img_subblock):
normalized_img = np.float32(img_subblock) / 255.0
rows, cols, total_blocks = img_subblock.shape[0], img_subblock.shape[1], img_subblock.shape[2]
img_dct = np.zeros((rows, cols, total_blocks), dtype = 'float32')
for i in range(total_blocks):
img_dct[:,:,i] = cv2.dct(normalized_img[:,:,i])
img_dct = img_dct * 255
img_dct = img_dct.astype(dtype='int32')
return img_dct
def IDCT_of_each_subblock(img_subblock_dct_N):
rows, cols, total_blocks = img_subblock_dct_N.shape[0], img_subblock_dct_N.shape[1], img_subblock_dct_N.shape[2]
img_subblock_reconstructed = np.zeros((rows, cols, total_blocks), dtype = 'int32')
for i in range(total_blocks):
img_subblock_reconstructed[:,:,i] = cv2.idct(img_subblock_dct_N[:,:,i])
return img_subblock_reconstructed
def Quantization(img_dct, q_matrix):
rows, cols, no_of_blocks = img_dct.shape[0], img_dct.shape[1], img_dct.shape[2]
img_dct_q = np.zeros((rows,cols, no_of_blocks))
for i in range(no_of_blocks):
img_dct_q[:,:,i] = np.divide(img_dct[:,:, i], q_matrix)
img_dct_q = img_dct_q.astype(dtype='int32')
return img_dct_q
def denorm_Quantization(revert_zig_zag, q_matrix):
rows, cols, no_of_blocks = revert_zig_zag.shape[0], revert_zig_zag.shape[1], revert_zig_zag.shape[2]
denorm_q = np.zeros((rows, cols, no_of_blocks))
for i in range(no_of_blocks):
denorm_q[:,:,i] = np.multiply(revert_zig_zag[:,:,i], q_matrix)
return denorm_q
def zig_zag_index(k ,n):
# credit to Tomas Bouda (Coells)
if k >= n * (n+1) // 2:
i , j = zig_zag_index(n * n - 1 -k, n)
return n - 1 - i, n - 1 - j
i = int((np.sqrt(1 + 8 * k) - 1) / 2)
j = k - i * (i + 1) // 2
return (j, i -j) if i & 1 else (i -j, j)
def zig_zag_scanning(img_dct_q):
rows, cols, no_of_blocks = img_dct_q.shape[0], img_dct_q.shape[1], img_dct_q.shape[2]
img_zig_zag = np.zeros((rows*cols, no_of_blocks))
for count in range(no_of_blocks):
for i in range(rows * cols):
index = zig_zag_index(i, rows)
img_zig_zag[i, count] = img_dct_q[index[0], index[1], count]
return img_zig_zag
def revert_zig_zag_scanning(RLE_total_decode):
no_of_elements, total_no_blocks = RLE_total_decode.shape[0], RLE_total_decode.shape[1]
rows, cols = int(np.sqrt(no_of_elements)), int(np.sqrt(no_of_elements))
revert_zig_zag = np.zeros((rows,cols,total_no_blocks))
for count in range(total_no_blocks):
for i in range(no_of_elements):
index = zig_zag_index(i, rows)
revert_zig_zag[index[0], index[1], count] = RLE_total_decode[i, count]
return revert_zig_zag
def run_length_encoding(img_zig_zag):
length, total_no_blocks = img_zig_zag.shape[0], img_zig_zag.shape[1]
# import pdb; pdb.set_trace()
RLE_total = []
for count in range(total_no_blocks):
RLE = []
zero_count = 0
for i in range(length):
if img_zig_zag[i, count] == 0 and i < length -1:
zero_count = zero_count + 1
else:
RLE.append((zero_count,img_zig_zag[i, count]))
zero_count = 0
# import pdb; pdb.set_trace()
RLE_total.append(RLE)
# import pdb; db.set_trace()
return RLE_total
def run_length_decoding(RLE_total):
#import pdb; pdb.set_trace()
total_no_blocks = len(RLE_total)
no_elements_in_each_block = 64
RLE_decode = np.zeros((no_elements_in_each_block, total_no_blocks))
for count in range(total_no_blocks):
i = 0
j_1 = 0
while i < len(RLE_total[count]):
if RLE_total[count][i][0] == 0:
RLE_decode[j_1,count] = RLE_total[count][i][1]
j_1 = j_1+1
i = i + 1
else :
j_2 = RLE_total[count][i][0]
j_1 = j_1 + j_2
RLE_decode[j_1, count] = RLE_total[count][i][1]
j_1 = j_1 + 1
i = i + 1
return RLE_decode
def huffman_coding_decoding(RLE_total):
RLE_total_new = str(RLE_total)
codec = HuffmanCodec.from_data(RLE_total_new)
print("Huffman Code Table: \n")
codec.print_code_table()
coded_string = codec.encode(RLE_total_new)
decoded_string = codec.decode(coded_string)
return codec, coded_string, decoded_string
def main(args):
PATH = args["input"]
num_of_coefficients = args["no_of_coefficient"]
q_matrix = np.array( [[16, 11, 10, 16, 24, 40, 51, 61],
[12, 12, 14, 19, 26, 58, 60, 55],
[14, 13, 16, 24, 40, 57, 69, 56],
[14, 17, 22, 29, 51, 87, 80, 62],
[18, 22, 37, 56, 68, 109, 103, 77],
[24, 35, 55, 64, 81, 104, 113, 92],
[49, 64, 78, 87, 103, 121, 120, 101],
[72, 92, 95, 98, 112, 100, 103, 99]])
# Reading the Image
img = cv2.imread(PATH, cv2.IMREAD_GRAYSCALE)
rows, cols = img.shape[0], img.shape[1]
# Condition to check the image size is multiple of 8 X 8
zero_pad_image = check_and_zero_pad_img(img)
zero_pad_image = zero_pad_image.astype('int32')
zero_pad_image = zero_pad_image - 128
# partitions into 8 X 8 blocks
img_subblock = partitions_in_8X8(zero_pad_image)
# DCT
img_dct = DCT_of_each_subblock(img_subblock)
if (args["method"] == 'threshold_coding'):
# Keep only N coefficients for each subblock
img_subblock_dct_N = largest_N_value_DCT(img_dct, num_of_coefficients)
denorm_q = img_subblock_dct_N
else:
# Quantization
img_dct_q = Quantization(img_dct, q_matrix)
# Zig-Zag Scanning
img_zig_zag = zig_zag_scanning(img_dct_q)
# Run Length Encoding
RLE_total = run_length_encoding(img_zig_zag)
# Huffman Coding
codec, coded_string, decoded_string = huffman_coding_decoding(RLE_total)
decoded_list = ast.literal_eval(decoded_string)
# Run Length Decoding
RLE_total_decode = run_length_decoding(decoded_list)
# Invert Zig-Zag Operation
revert_zig_zag = revert_zig_zag_scanning(RLE_total_decode)
# Denormalization by Quantization
denorm_q = denorm_Quantization(revert_zig_zag, q_matrix)
# IDCT
img_subblock_reconstructed = IDCT_of_each_subblock(denorm_q)
# convet back to 3 Dimension to 2 Dimension
img_reconstructed = convert_back_to_original_image(img_subblock_reconstructed, (rows, cols))
# Level Shifting
img_reconstructed = img_reconstructed + 128
img_reconstructed = img_reconstructed.astype(dtype ='uint8')
# Error
rmse = np.sqrt(np.sum((img - img_reconstructed)** 2) / (rows * cols))
print("RMSE : ", rmse)
# save reconstructed Image
img_name = os.path.basename(PATH)
img_name = img_name.replace('.png', '.jpg')
#save_img = '{}_'.format(N) + img_name
results_jpeg = f'results/jpeg'
if not os.path.isdir(f'{results_jpeg}'):
os.makedirs(results_jpeg)
save_img = f'{results_jpeg}/{num_of_coefficients}_{args["method"]}_{img_name}'
cv2.imwrite( save_img, img_reconstructed)
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True, help="path to input image for jpeg compression", type =str)
ap.add_argument("-n", "--no_of_coefficient", required=True, help = "number of coefficients between 1 to 64", type =int)
ap.add_argument("-m", "--method", required=True, help = "enter zonal_coding or threshold_coding", type = str)
args = vars(ap.parse_args())
main(args)
|
__author__ = 'vladimir'
from re import match
from pymorphy2 import MorphAnalyzer
all = 0
nonnum = 0
all_postings = 0
alpha_postings = 0
no_stops_postings = 0
low_reg = dict()
lemmatized = dict()
with open("Dictionary") as f:
for line in f.readlines():
all += 1
word = line.split(" ")[0]
cnt = int(line.split(" ")[1])
all_postings += cnt
if match("^[^\W\d]+$", word):
nonnum += 1
alpha_postings += cnt
lo = word.lower()
if lo in low_reg:
low_reg[lo] += cnt
else:
low_reg[lo] = cnt
just_ru = {k: v for (k, v) in low_reg.items() if match(u"^[\u0400-\u0500]+$", k)}
ru_postings = sum(just_ru.values())
morph = MorphAnalyzer()
c = 0
for k, v in just_ru.items():
if c % 100000 == 0:
print(c)
c += 1
lem = morph.parse(k)[0].normal_form
if lem in lemmatized:
lemmatized[lem] += int(v)
else:
lemmatized[lem] = int(v)
with open("stopwords", "r") as st:
stops = set(st.read().split('\n'))
for k, v in just_ru.items():
if not k in stops:
no_stops_postings += v
print("Raw dictionary size = {0}\n"
"Without numbers = {1}\n"
"Lowered = {2}\n"
"Just russian = {3}\n".format(all, nonnum, len(low_reg), len(just_ru)))
print("Lemmatized = {0}\n\n".format(len(lemmatized)))
print("All postings = {0}\n"
"Just alpha = {1}\n"
"Just russian = {2}\n"
"No stops = {3}".format(all_postings, alpha_postings, ru_postings, no_stops_postings))
with open("lem_dict", "w") as f:
for k, v in sorted(lemmatized.items(), reverse=True, key=lambda pair: pair[1]):
f.write("{0} {1}\n".format(k, v)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.