text stringlengths 38 1.54M |
|---|
#! /usr/bin/python3
import time
import iota.harness.api as api
import iota.protos.pygen.topo_svc_pb2 as topo_svc_pb2
import iota.test.iris.testcases.penctl.penctldefs as penctldefs
import iota.test.iris.testcases.penctl.common as common
def Setup(tc):
tc.Nodes = api.GetNaplesHostnames()
tc.venice_ips = [["1.1.1.1", "2.2.2.2", "3.3.3.3"], ["4.4.4.4", "5.5.5.5", "6.6.6.6"]]
#tc.venice_ips = ["1.1.1.1", "2.2.2.2"]
tc.controller_ip_penctl = []
return api.types.status.SUCCESS
def Trigger(tc):
if len(tc.Nodes) > 0:
n = tc.Nodes[0]
common.PrepareDhcpConfig(tc.venice_ips[0])
common.SetupRemoteDhcp(n)
common.SetNaplesModeInband_Dynamic(n)
api.Logger.info("DHCP Server updated. Waiting 5 seconds for DHCP handshake between Naples and DHCP Server.")
time.sleep(5)
common.PrepareDhcpConfig(tc.venice_ips[1])
common.SetupRemoteDhcp(n)
common.RebootHost(n)
tc.controller_ip_penctl.append(common.PenctlGetControllersStatus(n)[0])
return api.types.status.SUCCESS
def Verify(tc):
api.Logger.info("Verification pending.")
return api.types.status.SUCCESS
def Teardown(tc):
return api.types.status.SUCCESS
|
#!/usr/bin/env python3
import util
import time
class DeviceInfo:
'''
Class representing the contents of the IDXH section
'''
def __init__(self, data, sectionName):
self.data = data
self.dataSize = len(data)
self.sectionName = sectionName
# 1st 32-bit (LE) int
self.device = None
# 2nd 32-bit (LE) int
self.deviceVersion = None
self.deviceVersionStr = None
# 5th 32-bit (LE) int
self.backupDate = None
self.backupDateStr = None
self.analyze()
def analyze(self):
self.device = util.getInt(self.data, 0)
self.deviceVersion = util.getInt(self.data, 1 * util.intSize)
# convert int to hex string gives version string without '.' and with trailing zeros: '2810000'
verStr = "{:x}".format(int(self.deviceVersion / 0x10000))
self.deviceVersionStr = verStr[:1] + '.' + verStr[1:]
self.backupDate = util.getInt(self.data, 4 * util.intSize)
self.backupDateStr = time.asctime(time.localtime(self.backupDate))
def printInfo(self):
print("Device: {}".format(self.device))
print("Version: {}".format(self.deviceVersionStr))
print("Backup Date: {}".format(self.backupDateStr))
|
#import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
X= ["KERELA","MAHARASHTRA","TAMIL NADU","J&K", "UP","KARNATAKA","BIHAR","RAJASTHAN","PUNJAB","GUJURAT","AP","ASSAM","TELENGANA","WEST BENGAL","GOA","MP","ODISHA","JHARKHAND","HARYANA","MANIPUR","CHATTISHGARH","UTTARAKHAND","HIMACHAL PRADESH","NAGALAND","DELHI","TRIPURA","MEGHALAYA","ARUNACHAL","MIZORAM","LADAKH","PUDUCHERRY"]
Y= [11,116,26,11,38,51,5,36,29,38,10,0,39,10,3,18,2,0,21,1,7,4,4,0,30,0,0,0,1,13,1]
Z= [0,3,1,0,0,1,1,0,1,2,0,0,0,1,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0]
plt.bar( X,Y,label = "Affected" , color ='y')
plt.bar( X,Z,label = "Died",color = 'r')
plt.xticks(fontsize = 6, rotation=45)
plt.legend()
plt.xlabel('Corona hua')
plt.ylabel('mar gye')
plt.title('Corona Statistics in India')
plt.show()
|
# -*- coding: utf-8 -*-
from dp_tornado.engine.helper import Helper as dpHelper
from dp_tornado.engine.handler import Handler as dpHandler
class SessionHelper(dpHelper):
def key(self, identifier):
return 'session_%s' % (identifier.replace('-', '_'))
def is_authorized(self, controller, identifier):
return True if self.authorized(controller=controller, identifier=identifier) else False
def authorize(self, controller, identifier, payload, after=None, before=None):
if not isinstance(payload, dict):
return False
if before and before(payload) is False:
return False
controller.session(
name=self.key(identifier),
value=self.helper.serialization.json.stringify(payload),
expire_in=self.ini.session.expire_in)
if after and self.is_authorized(identifier=identifier, controller=controller):
after(payload)
return True
def unauthorize(self, controller, identifier):
controller.session(self.key(identifier), '', expire_in=0)
return True
def authorized(self, controller, identifier):
controller = controller if isinstance(controller, dpHandler) else controller.parent
# Return temporary session if exist
if hasattr(controller, self.key(identifier)):
return getattr(controller, self.key(identifier))
payload = controller.session(name=self.key(identifier), expire_in=self.ini.session.expire_in)
if not payload:
return False
payload = self.helper.serialization.json.parse(payload)
# Save temporary session for current session
if payload:
setattr(controller, self.key(identifier), payload)
return payload
|
import requests
import json
def getIPCity(ip):
url="http://ip-api.com/json/"+ip
res=requests.get(url)
resStr = res.text
jsonStr = json.loads(resStr)
IPCity = jsonStr["country"]+"/"+jsonStr["regionName"]+"/"+jsonStr["city"]
# for ["MACAO","TAIWAN","HONG KONG"] country should be "CHINA"
if(jsonStr["country"] in ["MACAO","TAIWAN","HONG KONG"]):
IPCity="CHINA/"+jsonStr["country"]+"/"+jsonStr["regionName"]
# print(IPCity)
return IPCity
# getIPCity("145.253.172.234") |
import sqlite3
from flask_restful import Resource , reqparse
class User:
def __init__(self , _id , username , password):
self.id = _id
self.username = username
self.password = password
@classmethod
def find_by_username(cls , username):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "SELECT * FROM users WHERE username=?"
result = cursor.execute(query , (username,)) #for a single value tuple a comma is must
row = result.fetchone() #fetches 1
if row:
#user = cls(row[0] , row[1] , row[2])
#row[0] is id , row[1] is username
user = cls(*row)
else: #cls is User class
user = None
connection.close() #connection.commit isnt requred since we didnt add any data
return user
@classmethod
def find_by_id(cls, _id):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "SELECT * FROM users WHERE id=?"
result = cursor.execute(query, (_id,)) # for a single value tuple a comma is must
row = result.fetchone() # fetches 1
if row:
#user = cls(row[0], row[1], row[2])
# row[0] is id , row[1] is username
user = cls(*row)
else: # cls is User class
user = None
connection.close() # connection.commit isnt requred since we didnt add any data
return user
class UserRegister(Resource):
#getting data from json payload
parser = reqparse.RequestParser()
parser.add_argument('username',
type=str,
required=True,
help="this field cannot be blank."
)
parser.add_argument('password',
type=str,
required=True,
help="this field cannot be blank."
)
#parser for getting inputs
def post(self):
data = UserRegister.parser.parse_args()
if User.find_by_username(data['username']):
return {'message':'A user with that username already exists'}
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "INSERT INTO users VALUES (NULL , ? , ?)" #here in Null it will auto increment
cursor.execute(query , (data['username'] , data['password'],))
connection.commit()
connection.close()
return {"message": "user created sucessfully"} , 201 #201 for created
|
#!/usr/bin/env python
#Allow for commandline arguments
import sys
#define the commandline arguments to a variable
FilesToRead = sys.argv
#remove program name,starting at value 1
listoffiles = FilesToRead[1:]
print(listoffiles)
number = 13
#for files in command line open each file and read the firstline
for files in listoffiles:
file = open(files, 'r')
firstline = file.readline()
#create a variable for the first short read without the matching end
first = firstline[:-number]
print(first)
#create a variable for the matching end
Match_seq = []
Match_nuc = firstline[-number:]
Match_seq.append(Match_nuc)
Match_seq[:] = [line.rstrip('\n') for line in Match_seq]
print(Match_seq)
#reads the rest of the lines in the file and creates a variable for the first bp
for restoflines in file.readlines():
otherlineStart = []
otherline = restoflines[:number-1]
otherlineStart.append(otherline)
print(otherlineStart)
#if match seq equals the other lines bp then it puts the first line and the matched line in a file
if Match_seq == otherlineStart:
print(restoflines)
MatchedLine = restoflines
OutFileName = "CompleteDNAsequence.txt"
outfile = open(OutFileName,'a')
outfile.write("%s \n" %first)
outfile.write("%s \n" %MatchedLine)
outfile.close()
#removes the first line from the shortread file
removefile = open("shortread.txt", "r+")
d = removefile.readlines()
removefile.seek(0)
for i in d:
if i != firstline:
removefile.write(i)
removefile.truncate()
removefile.close()
#removes the line that matched with the short reads file
removefile = open("shortread.txt", "r+")
d = removefile.readlines()
removefile.seek(0)
for i in d:
if i != MatchedLine:
removefile.write(i)
removefile.truncate()
removefile.close()
#combines the 2 short reads that matched
OutFileName = "CompleteDNAsequence.txt"
outfile = open(OutFileName,'r')
filecontains = outfile.readlines()
print(filecontains)
filecontains[:] = [line.rstrip('\n') for line in filecontains]
filecontains = filecontains[:-1]
print(filecontains)
joins = ''.join(filecontains)
complete = joins.replace(" ","")
print(complete)
outfile.close()
#outputs the matched seq back into the shortread file
outfilew = open(OutFileName,'w')
outfilew.write("%s \n" %complete)
outoriginalfile = "shortread.txt"
originalfile = open(outoriginalfile, 'r+')
lin = originalfile.readlines()
originalfile.seek(0)
originalfile.write("%s \n" %complete)
for lined in lin:
originalfile.write(lined)
originalfile.close()
#if it doesnt match then does a while loop
else:
while Match_seq != otherlineStart:
#changes the value of number so will decrease the amount of ends it's looking for
number = number - 1
print(number)
file = open('shortread.txt', 'r')
firstline = file.readline()
print(firstline)
#redefines what the match seq and otherline start is (wiht the new number)
first = firstline[:-number]
Match_seq.clear()
Match_nuc = firstline[-number:]
Match_seq.append(Match_nuc)
Match_seq[:] = [line.rstrip('\n') for line in Match_seq]
Match_seq = [x.strip(' ') for x in Match_nuc]
Match_seq.pop()
what = ''.join(Match_seq)
Match_seq.clear()
Match_seq.append(what)
print(Match_seq)
for restoflines in file.readlines():
otherlineStart.clear()
otherline = restoflines[:number-2]
otherlineStart.append(otherline)
otherlineStart[:] = [line.rstrip('\n') for line in otherlineStart]
print(otherlineStart)
file.close()
#if it does match it does the same thing as it did above
#***we can put in funcitons later
print(restoflines)
MatchedLine = restoflines
OutFileName = "CompleteDNAsequence.txt"
outfile = open(OutFileName,'a')
outfile.write("%s \n" %first)
outfile.write("%s \n" %MatchedLine)
outfile.close()
removefile = open("shortread.txt", "r+")
d = removefile.readlines()
removefile.seek(0)
for i in d:
if i != firstline:
removefile.write(i)
removefile.truncate()
removefile.close()
removefile = open("shortread.txt", "r+")
d = removefile.readlines()
removefile.seek(0)
for i in d:
if i != MatchedLine:
removefile.write(i)
removefile.truncate()
removefile.close()
OutFileName = "CompleteDNAsequence.txt"
outfile = open(OutFileName,'r')
filecontains = outfile.readlines()
print(filecontains)
filecontains[:] = [line.rstrip('\n') for line in filecontains]
filecontains = filecontains[:-1]
print(filecontains)
joins = ''.join(filecontains)
complete = joins.replace(" ","")
print(complete)
outfile.close()
outfilew = open(OutFileName,'w')
outfilew.write("%s \n" %complete)
outoriginalfile = "shortread.txt"
originalfile = open(outoriginalfile, 'r+')
lin = originalfile.readlines()
originalfile.seek(0)
originalfile.write("%s \n" %complete)
for lined in lin:
originalfile.write(lined)
originalfile.close()
|
import utils
import requests
import json
import pymysql
import traceback
from datetime import datetime
from pandas.io.json import json_normalize
from sqlalchemy import create_engine
import pandas as pd
f=open(r"weatherload.json", "r")
out = f.read()
f.close()
tmp = json.dumps(out)
tmp = json.loads(out)
num = len(tmp)
conn,cur = utils.get_conn_cur()
i=0
while i < num:
weather_main = tmp[i]['weather_main']
weather_description = tmp[i]['weather_description']
temp = tmp[i]['temp']
feels_like = tmp[i]['feels_like']
temp_min = tmp[i]['temp_min']
temp_max = tmp[i]['temp_max']
pressure = tmp[i]['pressure']
humidity = tmp[i]['humidity']
visibility = tmp[i]['visibility']
wind_speed = tmp[i]['wind_speed']
wind_deg = tmp[i]['wind_deg']
clouds_all = tmp[i]['clouds_all']
rain_1h = tmp[i]['rain_1h']
rain_3h = tmp[i]['rain_3h']
snow_1h = tmp[i]['snow_1h']
snow_3h = tmp[i]['snow_3h']
dt = tmp[i]['dt']
sunrise = tmp[i]['sunrise']
sunset = tmp[i]['sunset']
value = (weather_main, weather_description, temp, feels_like, temp_min, temp_max, pressure, humidity, visibility, wind_speed, wind_deg, clouds_all, rain_1h, rain_3h, snow_1h, snow_3h, dt, sunrise, sunset)
sql_insert ="insert into weather(weather_main, weather_description, temp, feels_like, temp_min, temp_max, pressure, humidity, visibility, wind_speed, wind_deg, clouds_all, rain_1h, rain_3h, snow_1h, snow_3h, dt, sunrise, sunset) values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s');"
print(sql_insert % value)
try:
cur.execute(sql_insert % value)
conn.commit()
except Exception as e:
print(e)
i+=1
cur.close()
conn.close() |
import scrapy
from scrapy.selector import Selector
import re
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor as selink
from scrapy.contrib.spiders import CrawlSpider,Rule
from huxiuspider.items import BookItem
class BookSpider(CrawlSpider):
name = "books"
allow_domains = ["huxiu.com"]
start_urls = ["http://www.huxiu.com/books"]
rules = (
Rule(selink(allow=("books/([0-9]+)\.html",))
,),
Rule(selink(allow=("article/([0-9]+)/1\.html"))
,callback='parse_3')
)
def parse_2(self,response):
for sel in response.xpath("//div[@class='clearfix mod-b mod-list-book']"):
item = BookItem()
item["title"] = sel.xpath("//ul[@class='clearfix ul-list']/li[1]/i/text()").extract()
item["name"] = sel.xpath("div[@class='b-info-list']/h3/a/@href").extract()
return item
def parse_3(self,response):
sel = Selector(response)
item = BookItem()
item["title"] = sel.xpath("//h1[@class='t-h1']/text()").extract()
item["name"] = sel.xpath("//ul[@class='clearfix ul-list']/li[1]/i/text()").extract()
yield item
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 8 02:20:41 2018
@author: Hager - Lab
"""
from prepareLearning import prepare
from GenderModule.genderExtract import GenderDetect
from AgeModule.AgeExtract import AgeDetect
from EmotionModule.EmotionExtract import EmotionDetect
from TextRecognation.TextRecognation import TextRecognation
from EnvironmentModule.EnvironmentExtract import EnvironmentDetect
#s="TextRecognation/Kinds/blue_christmas.txt"
s="TextRecognation/Kinds/family_2_0.txt"
#s="TextRecognation/Kinds/while_the_auto_waits.txt"
#s="TextRecognation/Kinds/traces_of_memory.txt"
#s="TextRecognation/Kinds/the_death_of_the_hired_man.txt"
#s="TextRecognation/Kinds/10,000 CIGARETTES.txt"
if s.lower().endswith('.txt'):#check this file is text document
file=open(s, "r")#open text file
Data=file.read() #read play
file.close()#close file
s=Data.splitlines();
# print(s)
Fname,Body,Characters,Time,Place,Caution,Scene,Note,Reqs,props=TextRecognation.init(s)
Names=[]
Gender=[]
Age=[]
Said=[]
Sentence=[]
BetweenBraces=[] ##it is used for environment
BetweenBracesLines=[] ##it is an index for environment (when it appears to dispaly the sound)
audio_files=[]
continuous_or_not=[]
audio_index=[]
Names,Gender,Age=GenderDetect.init(Characters)
Said,Sentence,SemiSentence,BetweenBraces,BetweenBracesLines=prepare.__init__(Body,Names,Fname)
print('BetweenBraces')
print(BetweenBraces)
print('BetweenBracesLines')
print(BetweenBracesLines)
i=0
for line in BetweenBracesLines:
print(str(BetweenBracesLines[i]) + ' : ' + BetweenBraces[i])
i+=1
Age=AgeDetect.__init__(Names,Age, Said,SemiSentence)
Emotion=EmotionDetect.init(SemiSentence)
#Characters File
path = 'E:/GP/Graduation-Project-master (1)/Graduation-Project-master/Final/Characters.txt'
Characters = open(path,'w')
k=0
while k<len(Names):
Characters.write(Names[k]+'\n'+str(Age[k])+'\n'+Gender[k]+'\n')
k+=1
Characters.close()
#Sentences File
path = 'E:/GP/Graduation-Project-master (1)/Graduation-Project-master/Final/Sentences.txt'
Sentences = open(path,'w')
l=0
while l<len(Said):
Sentences.write(Said[l]+'\n'+str(Emotion[i])+'\n'+Sentence[l]+'\n')
l+=1
Sentences.close()
audio_files,continuous_or_not,audio_index=EnvironmentDetect.__init__(Scene,Place,BetweenBraces,BetweenBracesLines)
#Environment File
path = 'E:/GP/Graduation-Project-master (1)/Graduation-Project-master/Final/Environment.txt'
Environment = open(path,'w')
m=0
while m<len(audio_files):
Environment.write(audio_files[m]+'\n'+str(continuous_or_not[m])+'\n'+str(audio_index[m])+'\n')
m+=1
Environment.close()
'''
print(audio_files)
print(continuous_or_not)
print(audio_index)
'''
'''
i=0
print(Emotion)
# emotions 1--> angry 2-->fear 3-->Happy 4-->neutral 5-->sad
for line in SemiSentence:
print(str(Emotion[i]) + ' : ' + SemiSentence[i])
i+=1
print()
print()
print(Names)
print()
print()
print(Gender)
print()
print()
print(Age)
# print(Body)
# print()
# print()
# print(Caution)
# print()
# print()
# print(Characters)
# print()
# print()
# print(Time)
# print()
# print()
# print(Place)
# print()
# print()
# print(Scene)
# print()
# print()
# print(Note)
# print()
# print()
# print(Reqs)
# print()
# print()
# print(props)
# print()
# print()
else: #any other Documents
print( "you must insert txt file")#error
''' |
# -*- coding: utf-8 -*-
"""
Importance nested sampler.
"""
import datetime
import logging
import os
from typing import Any, List, Literal, Optional, Union
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import logsumexp
from .base import BaseNestedSampler
from .. import config
from ..evidence import _INSIntegralState
from ..model import Model
from ..posterior import draw_posterior_samples
from ..proposal.importance import ImportanceFlowProposal
from ..plot import nessai_style, plot_1d_comparison
from ..livepoint import (
add_extra_parameters_to_live_points,
get_dtype,
numpy_array_to_live_points,
)
from ..utils.hist import auto_bins
from ..utils.information import differential_entropy
from ..utils.optimise import optimise_meta_proposal_weights
from ..utils.stats import (
effective_sample_size,
weighted_quantile,
)
from ..utils.structures import get_subset_arrays, get_inverse_indices
logger = logging.getLogger(__name__)
class ImportanceNestedSampler(BaseNestedSampler):
"""
Parameters
----------
model
User-defined model.
nlive
Number of live points.
tolerance
Tolerance for determining when to stop the sampler.
stopping_criterion
Choice of stopping criterion to use.
check_criteria
If using multiple stopping criteria determines whether any or all
criteria must be met.
threshold_method
Method for determining new likelihood threshold.
threshold_kwargs
Keyword arguments for function that determines the likelihood
threshold.
draw_constant
If specified the sampler will always add a constant number of samples
from each proposal whilst removing a variable amount. If False, the
the number will depend on the level method chosen. Note that this will
override the choice of live points. The number of points draw is set
by the live points.
min_samples
Minimum number of samples that are used for training the next
normalising flow.
min_remove
Minimum number of samples that can be removed when creating the next
level. If less than one, the sampler will stop if the level method
determines no samples should be removed.
plot_likelihood_levels
Enable or disable plotting the likelihood levels.
trace_plot_kwargs
Keyword arguments for the trace plot.
strict_threshold : bool
If true, when drawing new samples, only those with likelihoods above
the current threshold will be added to the live points. If false, all
new samples are added to the live points.
"""
stopping_criterion_aliases = dict(
ratio=["ratio", "ratio_all"],
ratio_ns=["ratio_ns"],
Z_err=["Z_err", "evidence_error"],
log_dZ=["log_dZ", "log_evidence"],
ess=[
"ess",
],
)
"""Dictionary of available stopping criteria and their aliases."""
def __init__(
self,
model: Model,
nlive: int = 5000,
n_initial: Optional[int] = None,
output: Optional[str] = None,
seed: Optional[int] = None,
checkpointing: bool = True,
checkpoint_interval: int = 600,
checkpoint_on_iteration: bool = False,
save_existing_checkpoint: bool = False,
logging_interval: int = None,
log_on_iteration: bool = True,
resume_file: Optional[str] = None,
plot: bool = True,
plotting_frequency: int = 5,
min_iteration: Optional[int] = None,
max_iteration: Optional[int] = None,
min_samples: int = 500,
min_remove: int = 1,
stopping_criterion: str = "ratio",
tolerance: float = 0.0,
n_update: Optional[int] = None,
plot_pool: bool = False,
plot_level_cdf: bool = False,
plot_trace: bool = True,
plot_likelihood_levels: bool = True,
plot_training_data: bool = False,
plot_extra_state: bool = False,
trace_plot_kwargs: Optional[dict] = None,
replace_all: bool = False,
threshold_method: Literal["entropy", "quantile"] = "entropy",
threshold_kwargs: Optional[dict] = None,
n_pool: Optional[int] = None,
pool: Optional[Any] = None,
check_criteria: Literal["any", "all"] = "any",
weighted_kl: bool = False,
draw_constant: bool = True,
train_final_flow: bool = False,
bootstrap: bool = False,
close_pool: bool = False,
strict_threshold: bool = False,
**kwargs: Any,
):
self.add_fields()
super().__init__(
model,
nlive,
output=output,
seed=seed,
checkpointing=checkpointing,
checkpoint_interval=checkpoint_interval,
checkpoint_on_iteration=checkpoint_on_iteration,
logging_interval=logging_interval,
log_on_iteration=log_on_iteration,
resume_file=resume_file,
plot=plot,
n_pool=n_pool,
pool=pool,
)
self._posterior_samples = None
self.initialised = False
self.finalised = False
self.history = None
self.live_points_ess = np.nan
self.tolerance = None
self.criterion = None
self._stop_any = None
self._current_proposal_entropy = None
self.importance = dict(total=None, posterior=None, evidence=None)
self.n_initial = self.nlive if n_initial is None else n_initial
self.min_samples = min_samples
self.min_remove = min_remove
self.n_update = n_update
self.plot_pool = plot_pool
self._plot_level_cdf = plot_level_cdf
self._plot_trace = plot_trace
self._plot_likelihood_levels = plot_likelihood_levels
self._plot_extra_state = plot_extra_state
self.trace_plot_kwargs = (
{} if trace_plot_kwargs is None else trace_plot_kwargs
)
self.plot_training_data = plot_training_data
self.plotting_frequency = plotting_frequency
self.replace_all = replace_all
self.threshold_method = threshold_method
self.threshold_kwargs = (
{} if threshold_kwargs is None else threshold_kwargs
)
self.strict_threshold = strict_threshold
self.logX = 0.0
self.logL_threshold = -np.inf
self.logL_pre = -np.inf
self.logL = -np.inf
self.draw_constant = draw_constant
self._train_final_flow = train_final_flow
self.bootstrap = bootstrap
self.bootstrap_log_evidence = None
self.bootstrap_log_evidence_error = None
self.weighted_kl = weighted_kl
self.save_existing_checkpoint = save_existing_checkpoint
self.log_dZ = np.inf
self.ratio = np.inf
self.ratio_ns = np.inf
self.ess = 0.0
self.Z_err = np.inf
self.state = _INSIntegralState()
self.final_state = None
self.final_samples = None
self.proposal = self.get_proposal(**kwargs)
self.configure_iterations(min_iteration, max_iteration)
self.configure_stopping_criterion(
stopping_criterion,
tolerance,
check_criteria,
)
self.samples = np.empty(0, dtype=get_dtype(self.model.names))
self.log_q = None
self.live_points_indices = None
self.nested_samples_indices = np.empty(0, dtype=int)
self.training_time = datetime.timedelta()
self.draw_samples_time = datetime.timedelta()
self.add_and_update_samples_time = datetime.timedelta()
self.draw_final_samples_time = datetime.timedelta()
if self.replace_all:
logger.warning("Replace all is experimental")
if close_pool:
logger.critical(
"ImportanceNestedSampler will NOT close the multiprocessing "
"pool automatically. This must be done manually."
)
self.check_configuration()
@property
def log_evidence(self) -> float:
return self.state.logZ
@property
def log_evidence_error(self) -> float:
return self.state.compute_uncertainty()
@property
def final_log_evidence(self) -> float:
if self.final_state:
return self.final_state.log_evidence
else:
return None
@property
def final_log_evidence_error(self) -> float:
if self.final_state:
return self.final_state.log_evidence_error
else:
return None
@property
def posterior_effective_sample_size(self) -> float:
"""The effective sample size of the posterior distribution.
Returns the value for the posterior samples from the resampling step if
they are available, otherwise falls back to the samples from the
initial sampling.
"""
if self.final_state:
return self.final_state.effective_n_posterior_samples
else:
return self.state.effective_n_posterior_samples
@property
def samples_entropy(self) -> float:
"""Differential entropy of all of the samples (nested + live).
Notes
-----
Compute the Monte Carlo approximation of
.. math::
-\\int W(x) \\log W(x) dx
where :math:`W(x) = \\pi(x)/Q(x)`.
"""
return differential_entropy(self.samples["logW"])
@property
def current_proposal_entropy(self) -> float:
"""Differential entropy of the current proposal"""
return self._current_proposal_entropy
@property
def live_points(self) -> np.ndarray:
"""The current set of live points"""
if self.live_points_indices is None:
return None
else:
return self.samples[self.live_points_indices]
@live_points.setter
def live_points(self, value):
if value is not None:
raise ValueError("Can only set live points to None!")
self.live_points_indices = None
@property
def nested_samples(self) -> np.ndarray:
"""The current set of discarded points"""
if self.nested_samples_indices is None:
return None
else:
return self.samples[self.nested_samples_indices]
@property
def reached_tolerance(self) -> bool:
"""Indicates if tolerance has been reached.
Checks if any or all of the criteria have been met, this depends on the
value of :code:`check_criteria`.
"""
if self._stop_any:
return any(
[c <= t for c, t in zip(self.criterion, self.tolerance)]
)
else:
return all(
[c <= t for c, t in zip(self.criterion, self.tolerance)]
)
@staticmethod
def add_fields():
"""Add extra fields logW and logQ"""
add_extra_parameters_to_live_points(["logW", "logQ"])
def configure_stopping_criterion(
self,
stopping_criterion: Union[str, List[str]],
tolerance: Union[float, List[float]],
check_criteria: Literal["any", "all"],
) -> None:
"""Configure the stopping criterion"""
if isinstance(stopping_criterion, str):
stopping_criterion = [stopping_criterion]
if isinstance(tolerance, list):
self.tolerance = [float(t) for t in tolerance]
else:
self.tolerance = [float(tolerance)]
self.stopping_criterion = []
for c in stopping_criterion:
for criterion, aliases in self.stopping_criterion_aliases.items():
if c in aliases:
self.stopping_criterion.append(criterion)
if not self.stopping_criterion:
raise ValueError(
f"Unknown stopping criterion: {stopping_criterion}"
)
for c, c_use in zip(stopping_criterion, self.stopping_criterion):
if c != c_use:
logger.info(
f"Stopping criterion specified ({c}) is "
f"an alias for {c_use}. Using {c_use}."
)
if len(self.stopping_criterion) != len(self.tolerance):
raise ValueError(
"Number of stopping criteria must match tolerances"
)
self.criterion = len(self.tolerance) * [np.inf]
logger.info(f"Stopping criteria: {self.stopping_criterion}")
logger.info(f"Tolerance: {self.tolerance}")
if check_criteria not in {"any", "all"}:
raise ValueError("check_criteria must be any or all")
if check_criteria == "any":
self._stop_any = True
else:
self._stop_any = False
def get_proposal(self, subdir: str = "levels", **kwargs):
"""Configure the proposal."""
output = os.path.join(self.output, subdir, "")
proposal = ImportanceFlowProposal(
self.model, output, self.n_initial, **kwargs
)
return proposal
def configure_iterations(
self,
min_iteration: Optional[int] = None,
max_iteration: Optional[int] = None,
) -> None:
"""Configure the minimum and maximum iterations.
Note: will override any existing values when called.
"""
if min_iteration is None:
self.min_iteration = -1
else:
self.min_iteration = int(min_iteration)
if max_iteration is None:
self.max_iteration = np.inf
else:
self.max_iteration = int(max_iteration)
def check_configuration(self) -> bool:
"""Check sampler configuration is valid.
Returns true if all checks pass.
"""
if self.min_samples > self.nlive:
raise ValueError("`min_samples` must be less than `nlive`")
if self.min_remove > self.nlive:
raise ValueError("`min_remove` must be less than `nlive`")
return True
def sort_points(self, x: np.ndarray, *args) -> np.ndarray:
"""Correctly sort new live points.
Parameters
----------
x
Array to sort
args
Any extra iterables to sort in the same way as x.
"""
idx = np.argsort(x, order="logL")
if len(args):
return get_subset_arrays(idx, x, *args)
else:
return x[idx]
def populate_live_points(self) -> None:
"""Draw the initial live points from the prior.
The live points are automatically sorted and assigned the iteration
number -1.
"""
live_points = np.empty(
self.n_initial, dtype=get_dtype(self.model.names)
)
n = 0
logger.debug(f"Drawing {self.n_initial} initial points")
while n < self.n_initial:
points = self.model.from_unit_hypercube(
numpy_array_to_live_points(
np.random.rand(self.n_initial, self.model.dims),
self.model.names,
)
)
points["logP"] = self.model.log_prior(points)
accept = np.isfinite(points["logP"])
n_it = accept.sum()
m = min(n_it, self.n_initial - n)
live_points[n : (n + m)] = points[accept][:m]
n += m
live_points["logL"] = self.model.batch_evaluate_log_likelihood(
live_points
)
if not np.isfinite(live_points["logL"]).all():
logger.warning("Found infinite values in the log-likelihood")
if np.any(live_points["logL"] == np.inf):
raise RuntimeError("Live points contain +inf log-likelihoods")
live_points["it"] = -np.ones(live_points.size)
# Since log_Q is computed in the unit-cube
live_points["logQ"] = np.zeros(live_points.size)
live_points["logW"] = -live_points["logQ"]
self.samples = self.sort_points(live_points)
self.live_points_indices = np.arange(live_points.size, dtype=int)
self.log_q = np.zeros([live_points.size, 1])
def initialise(self) -> None:
"""Initialise the nested sampler.
Draws live points, initialises the proposal.
"""
if self.initialised:
logger.warning("Nested sampler has already initialised!")
if self.live_points is None:
self.populate_live_points()
self.initialise_history()
self.proposal.initialise()
self.initialised = True
def initialise_history(self) -> None:
"""Initialise the dictionary to store history"""
if self.history is None:
logger.debug("Initialising history dictionary")
self.history = dict(
min_logL=[],
max_logL=[],
logL_threshold=[],
logX=[],
gradients=[],
median_logL=[],
leakage_live_points=[],
leakage_new_points=[],
logZ=[],
n_live=[],
n_added=[],
n_removed=[],
n_post=[],
live_points_ess=[],
pool_entropy=[],
samples_entropy=[],
proposal_entropy=[],
likelihood_evaluations=[],
stopping_criteria={
k: [] for k in self.stopping_criterion_aliases.keys()
},
)
else:
logger.debug("History dictionary already initialised")
def update_history(self) -> None:
"""Update the history dictionary"""
self.history["min_logL"].append(np.min(self.live_points["logL"]))
self.history["max_logL"].append(np.max(self.live_points["logL"]))
self.history["median_logL"].append(np.median(self.live_points["logL"]))
self.history["logL_threshold"].append(self.logL_threshold)
self.history["logX"].append(self.logX)
self.history["gradients"].append(self.gradient)
self.history["logZ"].append(self.state.logZ)
self.history["n_post"].append(self.state.effective_n_posterior_samples)
self.history["samples_entropy"].append(self.samples_entropy)
self.history["proposal_entropy"].append(self.current_proposal_entropy)
self.history["live_points_ess"].append(self.live_points_ess)
self.history["likelihood_evaluations"].append(
self.model.likelihood_evaluations
)
for k in self.stopping_criterion_aliases.keys():
self.history["stopping_criteria"][k].append(
getattr(self, k, np.nan)
)
def determine_threshold_quantile(
self, q: float = 0.8, include_likelihood: bool = False
) -> int:
"""Determine where the next likelihood threshold should be located.
Computes the q'th quantile based on log-likelihood and log-weights.
Parameters
----------
q : float
Quantile to use. Defaults to 0.8
include_likelihood : bool
If True, the likelihood is included in the weights.
Returns
-------
int
The number of live points to discard.
"""
logger.debug(f"Determining {q:.3f} quantile")
a = self.live_points["logL"]
if include_likelihood:
log_weights = self.live_points["logW"] + self.live_points["logL"]
else:
log_weights = self.live_points["logW"].copy()
cutoff = weighted_quantile(
a, q, log_weights=log_weights, values_sorted=True
)
if not np.isfinite(cutoff):
raise RuntimeError("Could not determine valid quantile")
n = np.argmax(a >= cutoff)
logger.debug(f"{q:.3} quantile is logL ={cutoff}")
return int(n)
def determine_threshold_entropy(
self,
q: float = 0.5,
include_likelihood: bool = False,
use_log_weights: bool = True,
) -> int:
"""Determine where the next likelihood threshold should be located
using the entropy method.
Parameters
----------
q
Fraction by which to shrink the current level.
include_likelihood
Boolean to indicate whether the likelihood is included in the
weights for each samples.
use_log_weights
Boolean to determine if the CDF is computed using the weights or
log-weights.
"""
if include_likelihood:
log_weights = self.live_points["logW"] + self.live_points["logL"]
else:
log_weights = self.live_points["logW"]
if use_log_weights:
p = log_weights
else:
p = np.exp(log_weights)
cdf = np.cumsum(p)
if cdf.sum() == 0:
cdf = np.arange(len(p), dtype=float)
cdf /= cdf[-1]
n = np.argmax(cdf >= q)
if self.plot and self._plot_level_cdf:
output = os.path.join(
self.output, "levels", f"level_{self.iteration}"
)
os.makedirs(output, exist_ok=True)
self.plot_level_cdf(
cdf,
threshold=self.live_points["logL"][n],
q=q,
filename=os.path.join(output, "cdf.png"),
)
return int(n)
@nessai_style
def plot_level_cdf(
self,
cdf: np.ndarray,
threshold: float,
q: float,
filename: Optional[str] = None,
) -> Union[matplotlib.figure.Figure, None]:
"""Plot the CDF of the log-likelihood
Parameters
----------
cdf : np.ndarray
The CDF to plot
filename : Optional[str]
Filename for saving the figure. If not specified the figure will
be returned instead.
Returns
-------
matplotlib.figure.Figure
Level CDF figure. Only returned when the filename is not
specified.
"""
fig = plt.figure()
plt.plot(self.live_points["logL"], cdf)
plt.xlabel("Log-likelihood")
plt.title("CDF")
plt.axhline(q, c="C1")
plt.axvline(threshold, c="C1")
if filename is not None:
fig.savefig(filename)
plt.close()
else:
return fig
def determine_likelihood_threshold(
self, method="entropy", **kwargs
) -> int:
"""Determine the next likelihood threshold
Returns
-------
int :
The number of samples to remove from the current live points.
"""
if method == "quantile":
n = self.determine_threshold_quantile(**kwargs)
elif method == "entropy":
n = self.determine_threshold_entropy(**kwargs)
else:
raise ValueError(method)
logger.debug(f"Next iteration should remove {n} points")
if n == 0:
if self.min_remove < 1:
return 0
else:
n = 1
if (self.live_points.size - n) < self.min_samples:
logger.warning(
f"Cannot remove {n} from {self.live_points.size}, "
f"min_samples={self.min_samples}"
)
n = max(0, self.live_points.size - self.min_samples)
elif n < self.min_remove:
logger.warning(
f"Cannot remove less than {self.min_remove} samples"
)
n = self.min_remove
logger.info(
f"Removing {n}/{self.live_points.size} samples to train next "
"proposal"
)
self.logL_threshold = self.live_points[n]["logL"].copy()
logger.info(f"Log-likelihood threshold: {self.logL_threshold}")
return n
def add_new_proposal(self):
"""Add a new proposal to the meta proposal"""
st = datetime.datetime.now()
# Implicitly includes all samples
n_train = np.argmax(self.samples["logL"] >= self.logL_threshold)
self.training_samples = self.samples[n_train:].copy()
self.training_log_q = self.log_q[n_train:, :].copy()
logger.info(
f"Training next proposal with {len(self.training_samples)} samples"
)
logger.debug("Updating the contour")
logger.debug(
"Training data ESS: "
f"{effective_sample_size(self.training_samples['logW'])}"
)
if self.replace_all:
weights = -np.exp(self.training_log_q[:, -1])
elif self.weighted_kl:
log_w = self.training_samples["logW"].copy()
log_w -= logsumexp(log_w)
weights = np.exp(log_w)
else:
weights = None
self.proposal.train(
self.training_samples,
plot=self.plot_training_data,
weights=weights,
)
self.training_time += datetime.datetime.now() - st
def draw_n_samples(self, n: int):
"""Draw n samples from the current proposal
Includes computing the log-likelihood of the samples
"""
st = datetime.datetime.now()
logger.info(f"Drawing {n} samples from the new proposal")
new_points, log_q = self.proposal.draw(n)
logger.debug("Evaluating likelihood for new points")
new_points["logL"] = self.model.batch_evaluate_log_likelihood(
new_points
)
logger.debug(
"Min. log-likelihood of new samples: "
f"{np.min(new_points['logL'])}"
)
if not np.isfinite(new_points["logL"]).all():
logger.warning("Log-likelihood contains infs")
if np.any(new_points["logL"] == -np.inf):
logger.warning("New points contain points with zero likelihood")
self.history["leakage_new_points"].append(
self.compute_leakage(new_points)
)
self.draw_samples_time += datetime.datetime.now() - st
return new_points, log_q
def compute_leakage(
self, samples: np.ndarray, weights: bool = True
) -> float:
"""Compute the leakage for a number of samples.
Parameters
----------
samples : numpy.ndarray
Array of samples.
weights : bool
If True, the weight of each sample is accounted for in the
calculation.
Returns
-------
float
The leakage as a fraction of the total number of samples
(or effective sample size if weights is True).
"""
if weights:
return (
np.sum(samples["logW"][samples["logL"] < self.logL_threshold])
/ samples["logW"].sum()
)
else:
return (samples["logL"] < self.logL_threshold).sum() / samples.size
def compute_importance(self, G: float = 0.5):
"""Compute the importance
Parameters
----------
G :
relative importance of the posterior versus the evidence. G=1 is
only the posterior and G=0 is only the evidence,
Returns
-------
dict
Dictionary containing the total, posterior and evidence importance
as a function of iteration.
"""
log_imp_post = np.empty(self.log_q.shape[1])
log_imp_z = np.empty(self.log_q.shape[1])
for i, it in enumerate(range(-1, self.log_q.shape[-1] - 1)):
sidx = np.where(self.samples["it"] == it)[0]
zidx = np.where(self.samples["it"] >= it)[0]
log_imp_post[i] = logsumexp(
self.samples["logL"][sidx] + self.samples["logW"][sidx]
) - np.log(len(sidx))
log_imp_z[i] = logsumexp(
self.samples["logL"][zidx] + self.samples["logW"][zidx]
) - np.log(len(zidx))
imp_z = np.exp(log_imp_z - logsumexp(log_imp_z))
imp_post = np.exp(log_imp_post - logsumexp(log_imp_post))
imp = (1 - G) * imp_z + G * imp_post
return {"total": imp, "posterior": imp_post, "evidence": imp_z}
def add_samples(self, samples: np.ndarray, log_q: np.ndarray) -> None:
"""Add samples the existing samples
Samples MUST be sorted by logL.
"""
# Insert samples into existing samples
indices = np.searchsorted(self.samples["logL"], samples["logL"])
self.samples = np.insert(self.samples, indices, samples)
self.log_q = np.insert(self.log_q, indices, log_q, axis=0)
if self.strict_threshold:
n = np.argmax(self.samples["logL"] >= self.logL_threshold)
indices = np.arange(len(self.samples))
self.nested_samples_indices = indices[:n]
self.live_points_indices = indices[n:]
else:
# Indices after insertion are indices + n before
new_indices = indices + np.arange(len(indices))
# Indices of all previous samples
old_indices = get_inverse_indices(self.samples.size, new_indices)
if len(old_indices) != (self.samples.size - samples.size):
raise RuntimeError("Mismatch in updated_indices!")
# Updated indices of nested samples
self.nested_samples_indices = old_indices[
self.nested_samples_indices
]
if self.live_points_indices is None:
self.live_points_indices = new_indices
else:
self.live_points_indices = old_indices[
self.live_points_indices
]
insert_indices = np.searchsorted(
self.live_points_indices, new_indices
)
self.live_points_indices = np.insert(
self.live_points_indices,
insert_indices,
new_indices,
)
def add_and_update_points(self, n: int):
"""Add new points to the current set of live points.
Parameters
----------
n : int
The number of points to add.
"""
st = datetime.datetime.now()
logger.debug(f"Adding {n} points")
new_samples, log_q = self.draw_n_samples(n)
new_samples, log_q = self.sort_points(new_samples, log_q)
self._current_proposal_entropy = differential_entropy(-log_q[:, -1])
new_samples["it"] = self.iteration
logger.debug(
"New samples ESS: " f"{effective_sample_size(new_samples['logW'])}"
)
if self.plot and self.plot_pool:
plot_1d_comparison(
self.training_samples,
new_samples,
filename=os.path.join(
self.output, "levels", f"pool_{self.iteration}.png"
),
)
self.log_q = self.proposal.update_log_q(self.samples, self.log_q)
self.samples["logQ"] = self.proposal.compute_meta_proposal_from_log_q(
self.log_q
)
self.samples["logW"] = -self.samples["logQ"]
self.history["n_added"].append(new_samples.size)
self.add_samples(new_samples, log_q)
live_points = self.live_points
self.history["n_live"].append(live_points.size)
self.live_points_ess = effective_sample_size(live_points["logW"])
self.history["leakage_live_points"].append(
self.compute_leakage(live_points)
)
logger.debug(f"Current live points ESS: {self.live_points_ess:.2f}")
self.add_and_update_samples_time += datetime.datetime.now() - st
def add_to_nested_samples(self, indices: np.ndarray) -> None:
"""Add an array of samples to the nested samples."""
sort_indices = np.searchsorted(self.nested_samples_indices, indices)
self.nested_samples_indices = np.insert(
self.nested_samples_indices,
sort_indices,
indices,
)
def remove_samples(self, n: int) -> None:
"""Remove samples from the current set of live points.
Parameters
----------
n : int
The number of samples to remove.
"""
if self.replace_all:
self.history["n_removed"].append(self.live_points.size)
else:
self.history["n_removed"].append(n)
logger.debug(f"Removing {n} points")
if self.replace_all:
self.add_to_nested_samples(self.live_points_indices)
self.live_points_indices = None
else:
self.add_to_nested_samples(self.live_points_indices[:n])
self.live_points_indices = np.delete(
self.live_points_indices, np.s_[:n]
)
def adjust_final_samples(self, n_batches=5):
"""Adjust the final samples"""
orig_n_total = self.samples.size
its, counts = np.unique(self.samples["it"], return_counts=True)
assert counts.sum() == orig_n_total
weights = counts / orig_n_total
original_unnorm_weight = counts.copy()
norm_weight = original_unnorm_weight / original_unnorm_weight.sum()
logger.debug(f"Final counts: {counts}")
logger.debug(f"Final weights: {weights}")
logger.debug(f"Final its: {list(self.proposal.n_requested.keys())}")
sort_idx = np.argsort(self.samples, order="it")
samples = self.samples[sort_idx].copy()
log_q = self.log_q[sort_idx].copy()
n_total = samples.size
# This changes the proposal because the number of samples changes
log_evidences = np.empty(n_batches)
log_evidence_errors = np.empty(n_batches)
proposal = self.proposal
for i in range(n_batches):
new_counts = np.random.multinomial(
orig_n_total,
weights,
)
logger.debug(f"New counts: {new_counts}")
logger.debug(new_counts.sum())
# Draw missing samples
for it, c, nc in zip(its, counts, new_counts):
if nc > c:
logger.debug(f"Drawing {nc - c} samples from {it}")
if it == -1:
new_samples, new_log_q = proposal.draw_from_prior(
nc - c
)
else:
new_samples, new_log_q = proposal.draw(
n=(nc - c),
flow_number=it,
update_counts=False,
)
new_samples["it"] = it
new_samples[
"logL"
] = self.model.batch_evaluate_log_likelihood(new_samples)
new_loc = np.searchsorted(samples["it"], new_samples["it"])
samples = np.insert(samples, new_loc, new_samples)
log_q = np.insert(log_q, new_loc, new_log_q, axis=0)
n_total = samples.size
counts = np.unique(samples["it"], return_counts=True)[1]
logger.debug(f"Updated counts: {counts}")
idx_keep = np.zeros(n_total, dtype=bool)
cc = 0
for it, c, nc in zip(its, counts, new_counts):
assert c >= nc
idx = np.random.choice(
np.arange(cc, cc + c), size=nc, replace=False
)
idx_keep[idx] = True
assert np.all(samples[idx]["it"] == it)
cc += c
batch_samples = samples[idx_keep]
batch_log_q = log_q[idx_keep]
assert batch_samples.size == orig_n_total
log_Q = logsumexp(batch_log_q, b=norm_weight, axis=1)
# Weights are normalised because the total number of samples is the
# same.
batch_samples["logQ"] = log_Q
batch_samples["logW"] = -log_Q
state = _INSIntegralState()
state.log_meta_constant = 0.0
state.update_evidence(batch_samples)
log_evidences[i] = state.log_evidence
log_evidence_errors[i] = state.log_evidence_error
logger.debug(f"Log-evidence batch {i} = {log_evidences[i]:.3f}")
mean_log_evidence = np.mean(log_evidences)
standard_error = np.std(log_evidences, ddof=1)
logger.info(f"Mean log evidence: {mean_log_evidence:.3f}")
logger.info(f"SE log evidence: {standard_error:.3f}")
self.bootstrap_log_evidence = mean_log_evidence
self.bootstrap_log_evidence_error = standard_error
def finalise(self) -> None:
"""Finalise the sampling process."""
if self.finalised:
logger.warning("Sampler already finalised")
return
logger.info("Finalising")
if self._train_final_flow:
self.train_final_flow()
self.add_to_nested_samples(self.live_points_indices)
self.live_points = None
self.state.update_evidence(self.samples)
if self.bootstrap:
self.adjust_final_samples()
final_kl = self.kl_divergence(self.samples)
logger.info(
f"Final log Z: {self.state.logZ:.3f} "
f"+/- {self.state.compute_uncertainty():.3f}"
)
logger.info(f"Final KL divergence: {final_kl:.3f}")
logger.info(
f"Final ESS: {self.state.effective_n_posterior_samples:.3f}"
)
self.finalised = True
self.checkpoint(periodic=True, force=True)
self.produce_plots()
def add_level_post_sampling(self, samples: np.ndarray, n: int) -> None:
"""Add a level to the nested sampler after initial sampling has \
completed.
"""
self.proposal.train(samples)
new_samples, log_q = self.draw_n_samples(n)
log_q = self.update_live_points(new_samples, log_q)
self.update_nested_samples()
self.add_to_nested_samples(new_samples)
self.state.update_evidence(self.nested_samples)
def compute_stopping_criterion(self) -> List[float]:
"""Compute the stopping criterion.
The method used will depend on how the sampler was configured.
"""
if self.iteration > 0:
self.log_dZ = np.abs(self.log_evidence - self.history["logZ"][-1])
else:
self.log_dZ = np.inf
self.ratio = self.state.compute_evidence_ratio(ns_only=False)
self.ratio_ns = self.state.compute_evidence_ratio(ns_only=True)
self.kl = self.kl_divergence(self.samples)
self.ess = self.state.effective_n_posterior_samples
self.Z_err = np.exp(self.log_evidence_error)
cond = [getattr(self, sc) for sc in self.stopping_criterion]
logger.info(
f"Stopping criteria ({self.stopping_criterion}): {cond} "
f"- Tolerance: {self.tolerance}"
)
return cond
def checkpoint(self, periodic: bool = False, force: bool = False):
"""Checkpoint the sampler."""
if periodic is False:
logger.warning(
"Importance Sampler cannot checkpoint mid iteration"
)
return
super().checkpoint(
periodic=periodic,
force=force,
save_existing=self.save_existing_checkpoint,
)
def _compute_gradient(self) -> None:
self.logX_pre = self.logX
self.logX = logsumexp(self.live_points["logW"])
self.logL_pre = self.logL
self.logL = logsumexp(
self.live_points["logL"] - self.live_points["logQ"]
)
self.dlogX = self.logX - self.logX_pre
self.dlogL = self.logL - self.logL_pre
self.gradient = self.dlogL / self.dlogX
def log_state(self):
"""Log the state of the sampler"""
logger.info(
f"Update {self.iteration} - "
f"log Z: {self.state.logZ:.3f} +/- "
f"{self.state.compute_uncertainty():.3f} "
f"ESS: {self.ess:.1f} "
f"logL min: {self.live_points['logL'].min():.3f} "
f"logL median: {np.nanmedian(self.live_points['logL']):.3f} "
f"logL max: {self.live_points['logL'].max():.3f}"
)
def nested_sampling_loop(self):
"""Main nested sampling loop."""
if self.finalised:
logger.warning("Sampler has already finished sampling! Aborting")
return self.log_evidence, self.nested_samples
self.initialise()
logger.info("Starting the nested sampling loop")
while True:
if self.reached_tolerance and self.iteration >= self.min_iteration:
break
self._compute_gradient()
if self.n_update is None:
n_remove = self.determine_likelihood_threshold(
method=self.threshold_method, **self.threshold_kwargs
)
if n_remove == 0:
logger.warning("No points to remove")
logger.warning("Stopping")
break
else:
n_remove = self.n_update
self.remove_samples(n_remove)
self.add_new_proposal()
if self.draw_constant or self.replace_all:
n_add = self.nlive
else:
n_add = n_remove
self.add_and_update_points(n_add)
self.importance = self.compute_importance(G=0.5)
self.state.update_evidence(self.nested_samples, self.live_points)
self.criterion = self.compute_stopping_criterion()
self.log_state()
self.update_history()
self.iteration += 1
if not self.iteration % self.plotting_frequency:
self.produce_plots()
if self.checkpointing:
self.checkpoint(periodic=True)
if self.iteration >= self.max_iteration:
break
logger.info(
f"Finished nested sampling loop after {self.iteration} iterations "
f"with {self.stopping_criterion} = {self.criterion}"
)
self.finalise()
logger.info(f"Training time: {self.training_time}")
logger.info(f"Draw samples time: {self.draw_samples_time}")
logger.info(
f"Add and update samples time: {self.add_and_update_samples_time}"
)
logger.info(f"Log-likelihood time: {self.likelihood_evaluation_time}")
return self.log_evidence, self.nested_samples
def draw_posterior_samples(
self,
sampling_method: str = "rejection_sampling",
n: Optional[int] = None,
use_final_samples: bool = True,
) -> np.ndarray:
"""Draw posterior samples from the current nested samples."""
if use_final_samples and self.final_samples is not None:
samples = self.final_samples
log_w = self.final_state.log_posterior_weights
else:
samples = self.nested_samples
log_w = self.state.log_posterior_weights
posterior_samples, indices = draw_posterior_samples(
samples,
log_w=log_w,
method=sampling_method,
n=n,
return_indices=True,
)
# TODO: check this is correct
log_p = log_w[indices] - log_w[indices].max()
h = differential_entropy(log_p)
logger.debug(f"Information in the posterior: {h:.3f} nats")
logger.info(f"Produced {posterior_samples.size} posterior samples.")
return posterior_samples
@staticmethod
def kl_divergence(samples: np.ndarray) -> float:
"""Compute the KL divergence between the meta-proposal and posterior.
Uses all samples drawn from the meta-proposal
"""
if not len(samples):
return np.inf
# logQ is computed on the unit hyper-cube where the prior is 1/1^n
# so logP = 0
return np.mean(
2 * samples["logQ"]
+ samples["logP"]
+ np.log(samples.size)
- samples["logL"]
)
def draw_more_nested_samples(self, n: int) -> np.ndarray:
"""Draw more nested samples from g"""
samples = self.proposal.draw_from_flows(n)
samples["logL"] = self.model.batch_evaluate_log_likelihood(samples)
state = _INSIntegralState()
state.update_evidence(samples)
logger.info(
"Evidence in new nested samples: "
f"{state.logZ:3f} +/- {state.compute_uncertainty():.3f}"
)
logger.info(
"Effective number of posterior samples: "
f"{state.effective_n_posterior_samples:3f}"
)
return samples
def draw_final_samples(
self,
n_post: Optional[int] = None,
n_draw: Optional[int] = None,
max_its: int = 1000,
max_batch_size: int = 20_000,
max_samples_ratio: Optional[float] = 1.0,
use_counts: bool = False,
optimise_weights: bool = False,
optimise_kwargs: Optional[dict] = None,
optimisation_method: Literal["evidence", "kl"] = "kl",
):
"""Draw final unbiased samples until a desired ESS is reached.
The number of samples drawn is based on the efficiency of the existing
nested samples up to a maximum size determined by
:code:`max_batch_size` or on the value of :code:`n_draw. The number
is increased by 1% to account for samples being rejected.
Returns nested samples, NOT posterior samples.
Restarts the multiprocessing pool for evaluations the likelihood.
Parameters
----------
n_post
Target effective sample size for the posterior distribution. May
not be reached if max_its is reached first. If not specified then
the number of samples drawn will match the nested samples.
n_draw
Number of samples to draw from the meta proposal. Should only be
specified if not specifying :code:`n_post`.
max_its
Maximum number of iterations before stopping.
max_batch_size
Maximum number of samples to draw in a single batch.
max_samples_ratio
Maximum number of samples in terms of the number of samples drawn
during sampling. For example if :code:`max_samples=1`, up to half
the initial number of samples will be drawn. If None, no limit is
set.
optimise_weights
If True, the weights for each proposal are optimised before
redrawing the samples.
optimise_kwargs
Keyword arguments passed to the optimiser function.
use_counts
Use the exact counts for each proposal rather than the weights.
Not recommended. Ignored if :code:`optimise_weights` is True.
Returns
-------
log_evidence
The log evidence for the new samples
samples
Structured array with the new nested samples.
"""
logger.info("Drawing final samples")
if n_post and n_draw:
raise RuntimeError("Specify either `n_post` or `n_draw`")
start_time = datetime.datetime.now()
if self.final_state:
logger.warning("Existing final state will be overridden")
self.final_state = _INSIntegralState()
eff = (
self.state.effective_n_posterior_samples / self.nested_samples.size
)
max_samples = int(max_samples_ratio * self.nested_samples.size)
max_logL = np.max(self.nested_samples["logL"])
logger.debug(f"Expected efficiency: {eff:.3f}")
if not any([n_post, n_draw]):
n_draw = self.nested_samples.size
if n_post:
n_draw = int(n_post / eff)
logger.info(f"Redrawing samples with target ESS: {n_post:.1f}")
logger.info(f"Expect to draw approximately {n_draw:.0f} samples")
if n_draw > max_samples:
logger.warning(
f"Expected number of samples ({n_draw}) is greater than "
f"the maximum number of samples ({max_samples}). Final "
"ESS will most likely be less than the specified value."
)
else:
logger.info(f"Drawing at least {n_draw} final samples")
batch_size = int(1.05 * n_draw)
while batch_size > max_batch_size:
if batch_size <= 1:
raise RuntimeError(
"Could not determine a valid batch size. "
"Consider changing the maximum batch size."
)
batch_size //= 2
logger.debug(f"Batch size: {batch_size}")
if optimise_weights:
weights = self.imp_post
if optimisation_method == "evidence":
pass
elif optimisation_method == "kl":
if optimise_kwargs is None:
optimise_kwargs = {}
weights = optimise_meta_proposal_weights(
self.nested_samples,
self._log_q_ns,
initial_weights=weights,
**optimise_kwargs,
)
else:
raise ValueError(optimisation_method)
target_counts = None
elif use_counts:
logger.warning("Using counts is not recommended!")
target_counts = np.array(
np.fromiter(self.proposal.unnormalised_weights.values(), int)
* (batch_size / self.proposal.normalisation_constant),
dtype=int,
)
batch_size = target_counts.sum()
weights = target_counts / target_counts.sum()
else:
weights = np.fromiter(
self.proposal.unnormalised_weights.values(), float
)
weights /= weights.sum()
target_counts = None
n_models = self.proposal.n_proposals
samples = np.empty([0], dtype=self.proposal.dtype)
log_q = np.empty([0, n_models])
counts = np.zeros(n_models)
it = 0
ess = 0
while True:
if n_post and (ess > n_post):
break
if it >= max_its:
logger.warning("Reached maximum number of iterations.")
logger.warning("Stopping drawing final samples.")
break
if n_post is None and (samples.size > n_draw):
break
if max_samples_ratio and (len(samples) > max_samples):
logger.warning(
f"Reached maximum number of samples: {max_samples}"
)
logger.warning("Stopping")
break
it_samples = np.empty([0], dtype=self.proposal.dtype)
# Target counts will be None if use_counts is False
it_samples, new_log_q, new_counts = self.proposal.draw_from_flows(
batch_size,
counts=target_counts,
weights=weights,
)
log_q = np.concatenate([log_q, new_log_q], axis=0)
counts += new_counts
it_samples["logL"] = self.model.batch_evaluate_log_likelihood(
it_samples
)
if np.any(it_samples["logL"] > max_logL):
logger.warning(
f"Max logL increased from {max_logL:.3f} to "
f"{it_samples['logL'].max():.3f}"
)
samples = np.concatenate([samples, it_samples])
log_Q = logsumexp(log_q, b=weights, axis=1)
if np.isposinf(log_Q).any():
logger.warning("Log meta proposal contains +inf")
samples["logQ"] = log_Q
samples["logW"] = -log_Q
self.final_state.update_evidence(samples)
ess = self.final_state.effective_n_posterior_samples
logger.debug(f"Sample count: {samples.size}")
logger.debug(f"Current ESS: {ess}")
it += 1
logger.info(f"Drawn {samples.size} - ESS: {ess:2f}")
logger.debug(f"Original weights: {self.proposal.unnormalised_weights}")
logger.debug(f"New weights: {counts}")
logger.info(f"Drew {samples.size} final samples")
logger.info(
f"Final log-evidence: {self.final_state.logZ:.3f} "
f"+/- {self.final_state.compute_uncertainty():.3f}"
)
logger.info(f"Final ESS: {ess:.1f}")
self.final_samples = samples
self.draw_final_samples_time += datetime.datetime.now() - start_time
return self.final_state.logZ, samples
def train_final_flow(self):
"""Train a final flow using all of the nested samples"""
logger.warning("Training final flow")
from ..flowmodel import FlowModel
weights = np.exp(self.state.log_posterior_weights)
weights /= weights.sum()
samples, log_j = self.proposal.rescale(self.nested_samples)
flow = FlowModel(
output=os.path.join(self.output, "levels", "final_level", ""),
config=self.proposal.flow_config,
)
flow.initialise()
flow.train(samples, weights=weights)
x_p_out, log_prob = flow.sample_and_log_prob(self.nested_samples.size)
x_out, log_j_out = self.proposal.inverse_rescale(x_p_out)
x_out["logQ"] = log_prob - log_j_out
x_out["logW"] = -x_out["logQ"]
x_out["logL"] = self.model.batch_evaluate_log_likelihood(x_out)
state = _INSIntegralState(normalised=False)
state.log_meta_constant = 0.0
state.update_evidence(x_out)
@nessai_style()
def plot_state(
self, filename: Optional[str] = None
) -> Optional[matplotlib.figure.Figure]:
"""
Produce plots with the current state of the nested sampling run.
Plots are saved to the output directory specified at initialisation.
Parameters
----------
filename
If specified the figure will be saved, otherwise the figure is
returned.
"""
n_subplots = 8
fig, ax = plt.subplots(n_subplots, 1, sharex=True, figsize=(15, 15))
ax = ax.ravel()
its = np.arange(self.iteration)
for a in ax:
a.vlines(self.checkpoint_iterations, 0, 1, color="C2")
# Counter for each plot
m = 0
ax[m].plot(
its,
self.history["min_logL"],
label="Min. Log L",
)
ax[m].plot(
its,
self.history["max_logL"],
label="Max. Log L",
)
ax[m].plot(
its,
self.history["median_logL"],
label="Median Log L",
)
ax[m].set_ylabel("Log-likelihood")
ax[m].legend()
m += 1
ax[m].plot(
its,
self.history["logL_threshold"],
)
ax[m].set_ylabel(r"$\log L_t$")
m += 1
ax[m].plot(
its,
self.history["logZ"],
label="Log Z",
)
ax[m].set_ylabel("Log-evidence")
ax[m].legend()
ax_dz = plt.twinx(ax[m])
ax_dz.plot(
its,
self.history["stopping_criteria"]["log_dZ"],
label="log dZ",
c="C1",
ls=config.plotting.line_styles[1],
)
ax_dz.set_ylabel("log dZ")
ax_dz.set_yscale("log")
handles, labels = ax[m].get_legend_handles_labels()
handles_dz, labels_dz = ax_dz.get_legend_handles_labels()
ax[m].legend(handles + handles_dz, labels + labels_dz)
m += 1
ax[m].plot(its, self.history["likelihood_evaluations"])
ax[m].set_ylabel("# likelihood \n evaluations")
m += 1
ax[m].plot(
its,
self.history["n_post"],
label="Posterior",
)
ax[m].plot(
its,
self.history["live_points_ess"],
label="Live points",
)
ax[m].set_ylabel("ESS")
ax[m].legend()
m += 1
ax[m].plot(its, self.importance["total"][1:], label="Total")
ax[m].plot(its, self.importance["posterior"][1:], label="Posterior")
ax[m].plot(its, self.importance["evidence"][1:], label="Evidence")
ax[m].legend()
ax[m].set_ylabel("Level importance")
m += 1
ax[m].plot(
its,
self.history["n_removed"],
label="Removed",
)
ax[m].plot(its, self.history["n_added"], label="Added")
ax[m].plot(its, self.history["n_live"], label="Total")
ax[m].set_ylabel("# samples")
ax[m].legend()
ax[m].legend()
m += 1
for (i, sc), tol in zip(
enumerate(self.stopping_criterion), self.tolerance
):
ax[m].plot(
its,
self.history["stopping_criteria"][sc],
label=sc,
c=f"C{i}",
ls=config.plotting.line_styles[i],
)
ax[m].axhline(tol, ls=":", c=f"C{i}")
ax[m].legend()
ax[m].set_ylabel("Stopping criterion")
ax[-1].set_xlabel("Iteration")
fig.suptitle(
f"Sampling time: {self.current_sampling_time}", fontsize=16
)
fig.tight_layout()
fig.subplots_adjust(top=0.95)
if filename is not None:
fig.savefig(filename)
plt.close(fig)
else:
return fig
@nessai_style
def plot_extra_state(
self,
filename: Optional[str] = None,
) -> Union[matplotlib.figure.Figure, None]:
"""Produce a state plot that contains extra tracked statistics.
Parameters
----------
filename : Optional[str]
Filename name for the plot when saved. If specified the figure will
be saved, otherwise the figure is returned.
Returns
-------
Union[matplotlib.figure.Figure, None]
Returns the figure if a filename name is not given.
"""
n_subplots = 5
fig, ax = plt.subplots(n_subplots, 1, sharex=True, figsize=(15, 15))
ax = ax.ravel()
its = np.arange(self.iteration)
for a in ax:
a.vlines(self.checkpoint_iterations, 0, 1, color="C2")
# Counter for each plot
m = 0
ax[m].plot(its, self.history["logX"])
ax[m].set_ylabel("Log X")
m += 1
ax[m].plot(its, self.history["gradients"])
ax[m].set_ylabel("dlogL/dlogX")
m += 1
ax[m].plot(
its,
self.history["leakage_live_points"],
label="Total leakage",
)
ax[m].plot(
its,
self.history["leakage_new_points"],
label="New leakage",
)
ax[m].set_ylabel("Leakage")
ax[m].legend()
m += 1
ax[m].plot(
its,
self.history["samples_entropy"],
label="Overall",
)
ax[m].plot(
its,
self.history["proposal_entropy"],
label="Current",
)
ax[m].set_ylabel("Differential\n entropy")
ax[m].legend()
m += 1
ax[m].plot(its, self.history["stopping_criteria"]["kl"])
ax[m].set_ylabel("KL(Q||posterior)")
m += 1
ax[-1].set_xlabel("Iteration")
fig.suptitle(
f"Sampling time: {self.current_sampling_time}", fontsize=16
)
fig.tight_layout()
fig.subplots_adjust(top=0.95)
if filename is not None:
fig.savefig(filename)
plt.close(fig)
else:
return fig
@nessai_style()
def plot_trace(
self,
enable_colours: bool = True,
filename: Optional[str] = None,
) -> Union[matplotlib.figure.Figure, None]:
"""Produce a trace-like plot of the nested samples.
Parameters
----------
enable_colours : bool
If True, the iteration will be plotted on the colour axis. If
False, the points will be plotted with a single colour.
filename : Optional[str]
Filename for saving the figure. If not specified the figure will
be returned instead.
Returns
-------
matplotlib.figure.Figure
Trace plot figure. Only returned when the filename is not
specified.
"""
parameters = list(self.samples.dtype.names)
for p in ["logW"]:
parameters.remove(p)
n = len(parameters)
fig, axs = plt.subplots(n, 1, sharex=True, figsize=(5, 2 * n))
if enable_colours:
colour_kwargs = dict(
c=self.samples["it"],
vmin=-1,
vmax=self.samples["it"].max(),
)
else:
colour_kwargs = {}
for ax, p in zip(axs, parameters):
ax.scatter(
self.samples["logW"],
self.samples[p],
s=1.0,
**colour_kwargs,
)
ax.set_ylabel(p)
axs[-1].set_xlabel("Log W")
fig.tight_layout()
if filename is not None:
fig.savefig(filename)
plt.close(fig)
else:
return fig
@nessai_style(line_styles=False)
def plot_likelihood_levels(
self,
filename: Optional[str] = None,
cmap: str = "viridis",
max_bins: int = 50,
) -> Optional[matplotlib.figure.Figure]:
"""Plot the distribution of the likelihood at each level.
Parameters
----------
filename
Name of the file for saving the figure. If not specified, then
the figure is returned.
cmap
Name of colourmap to use. Must be a valid colourmap in matplotlib.
max_bins
The maximum number of bins allowed.
"""
its = np.unique(self.samples["it"])
colours = plt.get_cmap(cmap)(np.linspace(0, 1, len(its)))
vmax = np.max(self.samples["logL"])
vmin = np.ma.masked_invalid(
self.samples["logL"][self.samples["it"] == its[-1]]
).min()
fig, axs = plt.subplots(1, 2)
for it, c in zip(its, colours):
data = self.samples["logL"][self.samples["it"] == it]
data = data[np.isfinite(data)]
if not len(data):
continue
bins = auto_bins(data, max_bins=max_bins)
for ax in axs:
ax.hist(
data,
bins,
histtype="step",
color=c,
density=True,
)
ax.set_xlabel("Log-likelihood")
axs[0].set_ylabel("Density")
axs[1].set_xlim(vmin, vmax)
plt.tight_layout()
if filename is not None:
fig.savefig(filename)
plt.close(fig)
else:
return fig
def produce_plots(self, override: bool = False) -> None:
"""Produce all of the relevant plots.
Checks if plotting is enabled.
Parameters
----------
force : bool
Override the plotting setting and force the plots to be produced.
"""
if self.plot or override:
logger.debug("Producing plots")
self.plot_state(os.path.join(self.output, "state.png"))
if self._plot_trace:
self.plot_trace(
filename=os.path.join(self.output, "trace.png"),
**self.trace_plot_kwargs,
)
if self._plot_likelihood_levels:
self.plot_likelihood_levels(
os.path.join(self.output, "likelihood_levels.png")
)
if self._plot_extra_state:
self.plot_extra_state(
os.path.join(self.output, "state_extra.png")
)
else:
logger.debug("Skipping plots")
def get_result_dictionary(self):
"""Get a dictionary contain the main results from the sampler."""
d = super().get_result_dictionary()
d["history"] = self.history
d["initial_samples"] = self.samples
d["initial_log_evidence"] = self.log_evidence
d["initial_log_evidence_error"] = self.log_evidence_error
# Will all be None if the final samples haven't been drawn
d["bootstrap_log_evidence"] = self.bootstrap_log_evidence
d["bootstrap_log_evidence_error"] = self.bootstrap_log_evidence_error
d["samples"] = self.final_samples
d["log_evidence"] = self.final_log_evidence
d["log_evidence_error"] = self.final_log_evidence_error
d["training_time"] = self.training_time.total_seconds()
d["draw_samples_time"] = self.draw_samples_time.total_seconds()
d[
"add_and_update_samples_time"
] = self.add_and_update_samples_time.total_seconds()
d[
"draw_final_samples_time"
] = self.draw_final_samples_time.total_seconds()
d["proposal_importance"] = self.importance
return d
@classmethod
def resume(cls, filename, model, flow_config={}, weights_path=None):
"""
Resumes the interrupted state from a checkpoint pickle file.
Parameters
----------
filename : str
Pickle pickle to resume from
model : :obj:`nessai.model.Model`
User-defined model
flow_config : dict, optional
Dictionary for configuring the flow
weights_path : str, optional
Path to the weights files that will override the value stored in
the proposal.
Returns
-------
obj
Instance of ImportanceNestedSampler
"""
cls.add_fields()
obj = super().resume(filename, model)
obj.proposal.resume(model, flow_config, weights_path=weights_path)
logger.info(f"Resuming sampler at iteration {obj.iteration}")
logger.info(f"Current number of samples: {len(obj.nested_samples)}")
logger.info(
f"Current logZ: {obj.log_evidence:3f} "
f"+/- {obj.log_evidence_error:.3f}"
)
return obj
def __getstate__(self):
d = self.__dict__
exclude = {"model", "proposal"}
state = {k: d[k] for k in d.keys() - exclude}
state["_previous_likelihood_evaluations"] = d[
"model"
].likelihood_evaluations
state["_previous_likelihood_evaluation_time"] = d[
"model"
].likelihood_evaluation_time.total_seconds()
return state, self.proposal
def __setstate__(self, state):
self.__dict__.update(state[0])
self.proposal = state[1]
|
def search(item,loof) :
t1 = item
i = loof
result = 0
for k in range(i,0,-1) :
temp = t1//10**k
for p in range(1,temp) :
result += p*10**k
result += temp*(t1%10**k+1)
#print(result)
result += k*(t1//10**k) * (10**(k-1))*45
#print(k,k*t1//10**k * 10**(k-1)*45)
t1 = t1%10**k
result += (t1*(t1+1))//2
return result
for a in range(int(input())) :
b,c = list(map(int,input().split()))
i = j = 0
t1 = b
t2 = c
l1 = []
l2 = []
while True :
if t1 < 10 and t2 < 10 :
break
if t1 > 9 :
t1 //= 10
i += 1
if t2 > 9 :
t2 //= 10
j += 1
#sprint(search(b,i))
if b == 0 :
b=1
print(f'#{a+1} {search(c,j)-search(b-1,i)}')
#최대 자리에 대한 그다음자리
#b//10**i * 10**(i-1)
# t1 = b
# result = 0
# for k in range(i,0,-1) :
# temp = t1//10**k
# for p in range(1,temp) :
# result += p*10**k
# result += temp*(t1%10**k+1)
# print(result)
# result += k*t1//10**k * 10**(k-1)*45
# print(k,k*t1//10**k * 10**(k-1)*45)
# t1 = t1%10**k
# result += (t1*(t1+1))//2
# print(result)
|
import unittest
import unittest.mock as mock
import json
import math
import sys
sys.path.append('.')
from sunlight.sunlight_calculator import SunlightCalculator
class SunlightCalculatorTest(unittest.TestCase):
def setUp(self):
self.calculator = SunlightCalculator()
self.s = """[
{"neighborhood":"N1",
"apartments_height":2,
"buildings" : [
{"name":"N1_B1", "apartments_count":2, "distance":0},
{"name":"N1_B2", "apartments_count":4, "distance":4},
{"name":"N1_B3", "apartments_count":2, "distance":8}
]},
{"neighborhood":"N2",
"apartments_height":3,
"buildings" : [
{"name":"N2_B1", "apartments_count":4, "distance":0},
{"name":"N2_B2", "apartments_count":2, "distance":20},
{"name":"N2_B3", "apartments_count":4, "distance":40}
]}
]"""
self.d = json.loads(self.s)
def test_init(self):
self.calculator.init(self.s)
for n in self.d:
for b in n["buildings"]:
for a in range(b["apartments_count"]):
self.assertEqual(a, self.calculator.city.get_apartment(n["neighborhood"], b["name"], a).get_number())
def test_compute_sunlight_angles(self):
self.calculator.init(self.s)
# start angles of eastern buildings
self.assertEqual(0.0, self.calculator.get_sunlight_angles("N1", "N1_B1", 0)[0])
self.assertEqual(0.0, self.calculator.get_sunlight_angles("N2", "N2_B1", 3)[0])
# end angles of westen buildings
self.assertEqual(math.pi, self.calculator.get_sunlight_angles("N1", "N1_B3", 0)[1])
self.assertEqual(math.pi, self.calculator.get_sunlight_angles("N2", "N2_B3", 0)[1])
# N1 -> 2 - 4 - 2
self.assertEqual(math.pi/4, self.calculator.get_sunlight_angles("N1","N1_B2", 0)[0])
self.assertEqual(True, self.calculator.get_sunlight_angles("N1","N1_B2", 1)[0] - math.atan(0.5) < 1e-5)
self.assertEqual(0.0, self.calculator.get_sunlight_angles("N1","N1_B2", 2)[0])
def test_get_sunlight_hours(self):
self.calculator.init(self.s)
self.assertEqual("10:31:45-15:07:15", self.calculator.get_sunlight_hours("N1","N1_B2", 0))
if __name__ == '__main__':
unittest.main() |
from orm import ORM
from computer import Computer
class User(ORM):
tablename = "users"
fields = ["name", "phone", "email", "credit_card"]
def __init__(self, **kwargs):
self.pk = kwargs.get('pk')
self.name = kwargs.get('name')
self.phone = kwargs.get('phone')
self.email = kwargs.get('email')
self.credit_card = kwargs.get('credit_card')
def computers_for(self):
return Computer.all_from_where_clause("WHERE user_pk=?", (self.pk,))
if __name__=="__main__":
test = User(name="Greg", pk=1)
print(test.computers_for())
|
from django.db import models
class Kouka2(models.Model):
product = models.CharField(max_length=1000)
area = models.CharField(max_length=1000)
delivery = models.CharField(max_length=1000)
price = models.IntegerField(default=0)
attachment = models.CharField(max_length=1000)
def __str__(self):
return self.product + ' / ' + self.area |
from threading import Thread
from time import sleep
import pygame
import pygame.gfxdraw
class Game(object):
def __init__(self):
self.__canvas = None
self.__model = None
self.__running = False
self.__surface = pygame.Surface((400, 400), pygame.SRCALPHA, 32)
def get_surface(self):
return self.__surface
def resize_surface(self, w, h):
self.__surface = pygame.Surface((w, h), pygame.SRCALPHA, 32)
def start(self, canvas):
if self.__running:
print("game thread is already running")
return
self.__canvas = canvas
self.__running = True
self.__thread = Thread(target=self.game_loop)
self.__thread.start()
def restart(self):
if self.__running:
print("game thread is already running")
return
self.__running = True
self.__thread = Thread(target=self.game_loop)
self.__thread.start()
def game_loop(self):
time = 0
while self.__running:
sleep(1/30)
time += 8/30
self.__surface.fill((0, 0, 0, 0))
if self.__model is not None:
self.__model.draw(self.__surface, time)
if self.__canvas is None:
print("missing canvas to update")
else:
self.__canvas.update()
def stop(self):
if not self.__running:
print("game thread is not running")
return
self.__running = False
self.__thread.join()
def set_model(self, model):
self.__model = model
|
# -*- coding: utf-8 -*-
"""The compressed stream file entry implementation."""
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.vfs import root_only_file_entry
from dfvfs.vfs import vfs_stat
class CompressedStreamFileEntry(root_only_file_entry.RootOnlyFileEntry):
"""Class that implements a compressed stream file entry object."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_COMPRESSED_STREAM
def _GetStat(self):
"""Retrieves the stat object.
Returns:
The stat object (instance of vfs.VFSStat).
Raises:
BackEndError: when the compressed stream is missing.
"""
compressed_stream = self.GetFileObject()
if not compressed_stream:
raise errors.BackEndError(
u'Unable to open compressed stream: {0:s}.'.format(
self.path_spec.comparable))
try:
stat_object = vfs_stat.VFSStat()
# File data stat information.
stat_object.size = compressed_stream.get_size()
# Date and time stat information.
# Ownership and permissions stat information.
# File entry type stat information.
stat_object.type = stat_object.TYPE_FILE
# Other stat information.
finally:
compressed_stream.close()
return stat_object
|
import sqlite3
connection = sqlite3.connect("tresor_sql.db") # create database
cursor = connection.cursor() # set cursor
# create database with uniqueID, user or email, password, storage, creation date
sql_command = """
CREATE TABLE entries (
unique_id INTEGER PRIMARY KEY,
user_email VARCHAR(50),
password VARCHAR(255),
url VARCHAR(255),
storage VARCHAR(255),
created VARCHAR(10));"""
cursor.execute(sql_command) # execute
connection.commit()
connection.close()
|
def get_alipay_user():
url = "https://openauth.alipaydev.com/oauth2/publicAppAuthorize.htm?app_id=2016091700530193&scope=auth_user&redirect_uri=http://neverqaz.cn/ali_login/"
return url
|
#Under MIT License, see LICENSE.txt
import unittest
from ai.STP.Play.pQueueLeuLeu import *
class TestPQueueLeuLeu(unittest.TestCase):
def setUp(self):
self.pTestQueueLeuLeu = pQueueLeuLeu()
def test_getTactics_with_no_args(self):
self.assertEqual(SEQUENCE_QUEUELEULEU, self.pTestQueueLeuLeu.getTactics())
def test_getTactics_with_index(self):
self.assertEqual(SEQUENCE_QUEUELEULEU, self.pTestQueueLeuLeu.getTactics(0))
def test_get_Tactics_with_invalid_index(self):
self.assertRaises(IndexError, self.pTestQueueLeuLeu.getTactics, 6)
if __name__ == '__main__':
unittest.main()
|
""" Contains the non-database models for our app.
Purpose: contains the models for unsaved data and read-only data in json format
Author: Tom W. Hartung
Date: Winter, 2017.
Copyright: (c) 2017 Tom W. Hartung, Groja.com, and JooMoo Websites LLC.
Reference:
(none, yet)
"""
import json
import os
from django.contrib import messages
from .database import Questionnaire
DJANGO_DEBUG = os.environ.get('DJANGO_DEBUG')
class Score:
""" Class to calculate, contain, and display the score for the quiz """
def __init__(self):
self.score_is_complete = False
self.unanswered_question_count = -1
self.e_score = 0
self.i_score = 0
self.n_score = 0
self.s_score = 0
self.f_score = 0
self.t_score = 0
self.j_score = 0
self.p_score = 0
self.opposite_type = {
"E": "I", "I": "E",
"N": "S", "S": "N",
"F": "T", "T": "F",
"J": "P", "P": "J",
}
self.e_pct = None
self.i_pct = None
self.n_pct = None
self.s_pct = None
self.f_pct = None
self.t_pct = None
self.j_pct = None
self.p_pct = None
def score_quiz(self, quiz_size_slug, cleaned_data):
""" Process the data from the form and set the scores """
""" question_list is 0 based, the form questions are 1-based """
# self.print_cleaned_data(cleaned_data)
questions = Questions()
questions_in_form = Questionnaire.get_question_count_for_slug(quiz_size_slug)
questions_answered = 0
for form_question_str in sorted(cleaned_data):
if not form_question_str.startswith("question_"):
continue
question_int = int(form_question_str.replace("question_", ""))
answer_123_type = questions.get_answer_123_type(question_int)
answer_str = cleaned_data[form_question_str]
if len(answer_str) > 0:
answer_int = int(answer_str)
answer_weight_str = questions.get_answer_weight(question_int, answer_str)
answer_weight_int = int(answer_weight_str)
self.tally_answer(answer_123_type, answer_int, answer_weight_int)
questions_answered += 1
if DJANGO_DEBUG:
answer_text = questions.get_answer_text(question_int, answer_str)
question_text = questions.get_question_text(question_int)
print('Score.score_quiz -',
str(question_int) + ' (' + answer_123_type + ')', '/',
str(answer_int) + ' (' + answer_weight_str + ')',
question_text, '/',
answer_text)
print('Score - score_quiz: questions_answered/questions_in_form',
str(questions_answered) + '/' + str(questions_in_form))
self.unanswered_question_count = questions_in_form - questions_answered
if self.unanswered_question_count == 0:
self.score_is_complete = True
return self
def save_questionnaire(self, cleaned_data, quiz_size_slug):
email = cleaned_data["email"]
if email == '':
print( 'views.quiz: No email given, not saving quiz')
else:
print( 'views.quiz: saving quiz for "' + email + '"')
quiz_db = Questionnaire()
quiz_db.save_questionnaire(cleaned_data, quiz_size_slug)
def print_cleaned_data(self, cleaned_data):
""" print out the cleaned data, in order by question number """
print('Score.print_cleaned_data - cleaned_data:')
for question_xx in sorted(cleaned_data):
print('\tanswer for ' + question_xx + ': ' + cleaned_data[question_xx])
def tally_answer(self, answer_123_type, answer_int, answer_weight_int):
""" Add the answer_weight to the appropriate score data member """
if answer_int <= 3:
type_for_answer = answer_123_type
else:
type_for_answer = self.opposite_type[answer_123_type]
if type_for_answer is "E":
self.e_score += answer_weight_int
elif type_for_answer is "I":
self.i_score += answer_weight_int
elif type_for_answer is "N":
self.n_score += answer_weight_int
elif type_for_answer is "S":
self.s_score += answer_weight_int
elif type_for_answer is "F":
self.f_score += answer_weight_int
elif type_for_answer is "T":
self.t_score += answer_weight_int
elif type_for_answer is "J":
self.j_score += answer_weight_int
elif type_for_answer is "P":
self.p_score += answer_weight_int
if DJANGO_DEBUG:
print('Score.tally_answer - added',
str(answer_weight_int) + ' to '+ type_for_answer + ': ',
self.__str__())
return True
def is_complete(self):
return self.score_is_complete
def set_incomplete_message(self, request):
if self.unanswered_question_count == 1:
incomplete_msg = 'There is ' + \
str(self.unanswered_question_count) + ' unanswered question'
else:
incomplete_msg = 'There are ' + \
str(self.unanswered_question_count) + ' unanswered questions'
messages.add_message(request, messages.ERROR, incomplete_msg)
return True
def set_quiz_results_messages(self, request):
""" Set the messages we display on the results page """
four_letter_type = "Type: " + self.as_four_letter_type()
pcts_and_counts_html = self.get_pcts_and_counts_html()
messages.add_message(request, messages.INFO, four_letter_type)
messages.add_message(request, messages.INFO, pcts_and_counts_html)
return True
def as_four_letter_type(self):
""" Return a string containing the four letter type """
four_letter_type = ''
if self.i_score < self.e_score:
four_letter_type += 'E'
elif self.i_score == self.e_score:
four_letter_type += 'X'
else:
four_letter_type += 'I'
if self.s_score < self.n_score:
four_letter_type += 'N'
elif self.s_score == self.n_score:
four_letter_type += 'X'
else:
four_letter_type += 'S'
if self.t_score < self.f_score:
four_letter_type += 'F'
elif self.t_score == self.f_score:
four_letter_type += 'X'
else:
four_letter_type += 'T'
if self.p_score < self.j_score:
four_letter_type += 'J'
elif self.p_score == self.j_score:
four_letter_type += 'X'
else:
four_letter_type += 'P'
return four_letter_type
def get_pcts_and_counts_html(self):
""" Return an html string containing the score's percents and counts """
score_list = self.as_list_of_pcts_and_counts()
pcts_and_counts_html = '<ul>'
for score_pair in score_list:
pcts_and_counts_html += '<li>'
for single_score in score_pair:
pcts_and_counts_html += single_score + ' '
pcts_and_counts_html += '</li>'
pcts_and_counts_html += '</ul>'
return pcts_and_counts_html
def calculate_percentages(self):
""" Calculate the percentages """
total_ei_score = self.e_score + self.i_score
total_ns_score = self.n_score + self.s_score
total_ft_score = self.f_score + self.t_score
total_jp_score = self.j_score + self.p_score
if total_ei_score > 0:
self.e_pct = round(100 * self.e_score / total_ei_score)
self.i_pct = round(100 * self.i_score / total_ei_score)
else:
self.e_pct = 0
self.i_pct = 0
if total_ns_score > 0:
self.n_pct = round(100 * self.n_score / total_ns_score)
self.s_pct = round(100 * self.s_score / total_ns_score)
else:
self.n_pct = 0
self.s_pct = 0
if total_ft_score > 0:
self.f_pct = round(100 * self.f_score / total_ft_score)
self.t_pct = round(100 * self.t_score / total_ft_score)
else:
self.f_pct = 0
self.t_pct = 0
if total_jp_score > 0:
self.j_pct = round(100 * self.j_score / total_jp_score)
self.p_pct = round(100 * self.p_score / total_jp_score)
else:
self.j_pct = 0
self.p_pct = 0
def as_list_of_pcts_and_counts(self):
""" Return a list containing both percentages and counts """
if self.e_pct is None:
self.calculate_percentages()
score_list = [
['E: ' + str(self.e_pct) + '% (' + str(self.e_score) + ')',
'I: ' + str(self.i_pct) + '% (' + str(self.i_score) + ')'],
['N: ' + str(self.n_pct) + '% (' + str(self.n_score) + ')',
'S: ' + str(self.s_pct) + '% (' + str(self.s_score) + ')'],
['F: ' + str(self.f_pct) + '% (' + str(self.f_score) + ')',
'T: ' + str(self.t_pct) + '% (' + str(self.t_score) + ')'],
['J: ' + str(self.j_pct) + '% (' + str(self.j_score) + ')',
'P: ' + str(self.p_pct) + '% (' + str(self.p_score) + ')']
]
return score_list
def to_kv_pairs(self):
""" Returns the current score as a list of key-value pairs """
score = {
"E": self.e_score,
"I": self.i_score,
"N": self.n_score,
"S": self.s_score,
"F": self.f_score,
"T": self.t_score,
"J": self.j_score,
"P": self.p_score,
}
return score
#
# Reference for purpose of __str__() and __repl__():
# http://stackoverflow.com/questions/3691101/what-is-the-purpose-of-str-and-repr-in-python
#
def __repl__(self):
return str(self.to_kv_pairs())
def __str__(self):
score_str = 'E/I: ' + str(self.e_score) + '/' + str(self.i_score) + '; '
score_str += 'N/S: ' + str(self.n_score) + '/' + str(self.s_score) + '; '
score_str += 'F/T: ' + str(self.f_score) + '/' + str(self.t_score) + '; '
score_str += 'J/P: ' + str(self.j_score) + '/' + str(self.p_score)
return score_str
class Questions:
""" Read in and work with all the questions in the entire quiz """
def __init__(self):
""" Populate the question_list with questions from the json file """
self.question_list = self.read_quiz_json()
def read_quiz_json(self):
""" Read the quiz questions and answers from the json file """
site_content_dir = os.path.abspath(os.path.dirname(__file__))
QUIZ_FILE_DIR = site_content_dir + '/static/content/json/quiz/'
QUIZ_FILE_NAME = 'seeourminds_quiz.json'
quiz_file_path = QUIZ_FILE_DIR + QUIZ_FILE_NAME
quiz_json_file = open(quiz_file_path)
quiz_json_string = quiz_json_file.read()
quiz_json_file.close()
question_list = json.loads(quiz_json_string)
return(question_list)
def get_quiz_question(self, question_int):
""" Return the entire quiz question (answers, weights, etc.)"""
quiz_question = self.question_list[question_int]
# print('Questions.get_quiz_question - question_int:', question_int)
# print('Questions.get_quiz_question - quiz_question:', quiz_question)
return quiz_question
def get_question_text(self, question_int):
""" Get and return the question_text for the question """
quiz_question = self.get_quiz_question(question_int)
question_text = quiz_question['question_text']
return question_text
def get_choices(self, question_int):
""" Return the answer choices for the given question """
quiz_question = self.get_quiz_question(question_int)
choices = []
if len(quiz_question['answer_1_text']) > 0 and \
int(quiz_question['answer_1_weight']) > 0:
choice_1 = ['1', quiz_question['answer_1_text']]
choices.append(choice_1)
if len(quiz_question['answer_2_text']) > 0 and \
int(quiz_question['answer_2_weight']) > 0:
choice_2 = ['2', quiz_question['answer_2_text']]
choices.append(choice_2)
if len(quiz_question['answer_3_text']) > 0 and \
int(quiz_question['answer_3_weight']) > 0:
choice_3 = ['3', quiz_question['answer_3_text']]
choices.append(choice_3)
if len(quiz_question['answer_4_text']) > 0 and \
int(quiz_question['answer_4_weight']) > 0:
choice_4 = ['4', quiz_question['answer_4_text']]
choices.append(choice_4)
if len(quiz_question['answer_5_text']) > 0 and \
int(quiz_question['answer_5_weight']) > 0:
choice_5 = ['5', quiz_question['answer_5_text']]
choices.append(choice_5)
if len(quiz_question['answer_6_text']) > 0 and \
int(quiz_question['answer_6_weight']) > 0:
choice_6 = ['6', quiz_question['answer_6_text']]
choices.append(choice_6)
answer_7_text = quiz_question.get('answer_7_text')
# print("answer_7_text:", answer_7_text)
if answer_7_text is not None:
choice_7 = ['7', answer_7_text]
choices.append(choice_7)
# print('Questions.get_choices - question_int:', question_int)
# print('Questions.get_choices - len(choices):', len(choices))
return choices
def get_answer_123_type(self, question_int):
""" Get and return the answer_123_type (e.g., "E") for the question """
quiz_question = self.get_quiz_question(question_int)
answer_123_type = quiz_question['answer_123_type']
return answer_123_type
def get_answer_text(self, question_int, answer_str):
""" Get and return the answer_X_text for the selected answer 'X' """
quiz_question = self.get_quiz_question(question_int)
answer_text_key = "answer_" + answer_str + "_text"
answer_text = quiz_question[answer_text_key]
return answer_text
def get_answer_weight(self, question_int, answer_str):
""" Get and return the answer_X_weight for the selected answer 'X' """
quiz_question = self.get_quiz_question(question_int)
answer_weight_key = "answer_" + answer_str + "_weight"
answer_weight = quiz_question[answer_weight_key]
return answer_weight
|
# coding: utf-8
import os, json, random, nltk
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
random.seed(0)
misinfo = {
# Snopes verdicts.
"unproven", "unconfirmed", "undetermined", "probably", "maybe",
"mixture", "incomplete", "partly", "outdate",
"legend", "ficti", "satir", "superstition",
"fals", "hoax", "scam", "fraud",
"incorrect", "inaccurate", "miscaption", "misattribut",
# For verdict only.
"sort of", "not quite", "not likely", "in progress",
"not any", "no longer", "was true", "true but",
}
info = {
# Snopes verdicts.
"true", "real", "correct", "accurate",
}
domain_knowledge = {
# Snopes verdicts.
"unproven", "outdate", "legend", "satir", "scam", "miscaption", "misattribut",
# Fake news. It’s complicated.
"fabricat", "manipulat", "imposter", "mislead", "parody",
}
masks = {
"false", "true", "claim", "stat", "quot",
"origin", "story", "article", "rumor", "evidence", "proof"
}
def process_verdict(verdict):
for label, words in [("misinfo", misinfo), ("info", info)]:
for word in words:
if word in verdict:
return label
return np.nan
def process_tokens(c):
tokens = tokenizer.tokenize(c)
tokens = ["<" + t[:-5] + ">" if t.endswith("TOKEN") else t.lower() for t in tokens]
if len(tokens) > 1000:
tokens = tokens[:500] + ["<MORE>"] + tokens[-500:]
for i, t in enumerate(tokens): # Mask too signaling words.
for word in masks:
if word in t:
tokens[i] = "<MASK>"
return pd.Series([" ".join(tokens), len(tokens)])
def process_domain(r):
tokens = r["tokens"].split(" ")
domain = ["0" for t in tokens]
for i, t in enumerate(tokens):
for word in domain_knowledge:
if word in t:
domain[i] = "1"
return " ".join(domain)
class DataCleaner(object):
"""
Dataset cleaner for fact-checks.
"""
def __init__(self, data_dir="raw"):
"""
Inputs:
data_dir -- the directory of the dataset.
"""
self.data_dir = data_dir
def clean(self):
# Load factchecks.
factcheck_path = os.path.join(self.data_dir, "snopes.tsv")
factcheck = pd.read_csv(factcheck_path, delimiter="\t")
factcheck = factcheck.drop_duplicates()
# _ = factcheck.groupby("verdict").count()["url"].sort_values(ascending=False).index.tolist()
# Process verdict.
factcheck["label"] = factcheck["verdict"].apply(process_verdict)
factcheck = factcheck.dropna()
# Process tokens.
factcheck[["tokens", "len"]] = factcheck["content"].apply(process_tokens)
factcheck = factcheck.dropna()
# Print symbols.
tokens = set(" ".join(factcheck["tokens"].tolist()).split(" "))
symbols = {t for t in tokens if t.startswith("<") and t.endswith(">")}
print(symbols)
# Print stats.
labels = factcheck.groupby("label").count()["url"]
print(labels)
print(factcheck["len"].median())
for lim in [512, 1024, 2048, 4096]:
percentage = len(factcheck[factcheck["len"] <= lim]) / len(factcheck)
print(lim, "\t", percentage)
# Process domain knowledge.
factcheck["domain_knowledge"] = factcheck.apply(process_domain, axis=1)
# Other information.
factcheck["rationale_annotation"] = " "
factcheck["linear_signal"] = " "
# Split and save.
selected_cols = ["label", "tokens", "rationale_annotation",
"linear_signal", "domain_knowledge", "date"]
train = factcheck.sample(frac=0.8)
train[selected_cols].to_csv("train.tsv", sep="\t", index=False)
factcheck = factcheck.drop(train.index)
dev = factcheck.sample(frac=0.5)
dev[selected_cols].to_csv("dev.tsv", sep="\t", index=False)
test = factcheck.drop(dev.index)
test[selected_cols].to_csv("test.tsv", sep="\t", index=False)
if __name__ == "__main__":
tokenizer = nltk.tokenize.WordPunctTokenizer()
DataCleaner().clean()
|
import numpy as np
import matplotlib.pyplot as plt
import sys
"""
Top-5 accuracy
Trained the database with original dataset, query with rotated from all 3 axes.
"""
# With all rotated
# Class : new_partial_exp/rot-z-y-x-Brackets_slices4_fanout10_minsig5.txt
# Total queries : 52
# Accuracy (naive) : 0.85
# Accuracy (neighborhoods) : 0.65
# rot 30
# Total accuracy (naive) : 0.885
# Total accuracy (neighborhoods) : 0.808
# 167.06s user 34.11s system 224% cpu 1:29.41 total
# 1.90 sec per file
# rot 20
# Total accuracy (naive) : 0.942
# Total accuracy (neighborhoods) : 0.962
# 219.62s user 45.22s system 235% cpu 1:52.24 total
# 2.15 sec per file
# rot 10
# Total accuracy (naive) : 1.0
# Total accuracy (neighborhoods) : 1.0
# 381.31s user 75.39s system 242% cpu 3:08.61 total
# 3.6153846153846154
mydpi = 300
pltsize = (7, 3)
naive_accuracy = [ 0.85, 0.885, 0.942, 1 ]
neighborhoods_accuracy = [ 0.65, 0.808, 0.962, 1 ]
Time = [ 0.51 , 1.9, 2.15, 3.61 ]
N = len(naive_accuracy)
index = np.arange(N) # the x locations for the groups
fig, ax = plt.subplots(figsize=pltsize)
ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis
ax.set_xticks(index)
ax.set_xlabel('Number of slices using star rotation', fontsize=13)
ax.set_xticklabels(['$0$', '$12$', '$18$', '$36$'], fontsize=11)
ax.tick_params(axis='both', which='major', labelsize=11)
ax.tick_params(axis='both', which='minor', labelsize=11)
l2 = ax.plot(index, neighborhoods_accuracy, linestyle='solid', color='black', markerfacecolor='#14b4ff', marker='D', linewidth=1, markersize=8)
l1 = ax.plot(index, naive_accuracy, linestyle='solid', color='black', markerfacecolor='#076794', marker='^', linewidth=1, markersize=8)
ax.set_ylim([0.58, 1.02])
ax.set_ylabel('Accuracy', fontsize=13)
l3 = ax2.plot(index, Time, linestyle='solid', color='black', markerfacecolor='xkcd:grey', marker='s', linewidth=1, markersize=8)
ax2.set_ylabel('Time (sec.)', fontsize=13)
# ax2.set_yscale('log')
ax2.tick_params(axis='both', which='major', labelsize=11)
ax2.tick_params(axis='both', which='minor', labelsize=11)
ax2.set_ylim([0, 4])
ax.legend((l1[0], l2[0], l3[0]), ['Fine-grained', 'Neighborhoods', 'Average time per model'], fontsize=11, ncol=1, loc='lower right')
plt.tight_layout()
plt.savefig('../images/star_rotation_exp.png', dpi=mydpi, bbox_inches='tight', pad_inches=0.03)
|
"""Implementation of InvertedPendulum System."""
from .ode_system import ODESystem
import numpy as np
from scipy import signal
from .linear_system import LinearSystem
import os
from gym.envs.classic_control import rendering
class InvertedPendulum(ODESystem):
"""Inverted Pendulum system.
Parameters
----------
mass : float
length : float
friction : float
gravity: float, optional
step_size : float, optional
The duration of each time step.
"""
def __init__(self, mass, length, friction, gravity=9.81, step_size=0.01):
"""Initialization; see `InvertedPendulum`."""
self.mass = mass
self.length = length
self.friction = friction
self.gravity = gravity
super().__init__(
func=self._ode, step_size=step_size, dim_action=1, dim_state=2,
)
self.viewer = None
self.last_action = None
@property
def inertia(self):
"""Return the inertia of the pendulum."""
return self.mass * self.length ** 2
def linearize(self):
"""Return the linearized system.
Returns
-------
a : ndarray
The state matrix.
b : ndarray
The action matrix.
"""
gravity = self.gravity
length = self.length
friction = self.friction
inertia = self.inertia
a = np.array([[0, 1], [gravity / length, -friction / inertia]])
b = np.array([[0], [1 / inertia]])
sys = signal.StateSpace(a, b, np.eye(2), np.zeros((2, 1)))
sysd = sys.to_discrete(self.step_size)
return LinearSystem(sysd.A, sysd.B)
def render(self, mode="human"):
"""Render pendulum."""
if self.viewer is None:
self.viewer = rendering.Viewer(500, 500)
self.viewer.set_bounds(-2.2, 2.2, -2.2, 2.2)
rod = rendering.make_capsule(1, 0.2)
rod.set_color(0.8, 0.3, 0.3)
self.pole_transform = rendering.Transform()
rod.add_attr(self.pole_transform)
self.viewer.add_geom(rod)
axle = rendering.make_circle(0.05)
axle.set_color(0, 0, 0)
self.viewer.add_geom(axle)
fname = os.path.dirname(rendering.__file__) + "/assets/clockwise.png"
self.img = rendering.Image(fname, 1.0, 1.0)
self.imgtrans = rendering.Transform()
self.img.add_attr(self.imgtrans)
self.viewer.add_onetime(self.img)
self.pole_transform.set_rotation(self.state[0] + np.pi / 2)
if self.last_action:
self.imgtrans.scale = (-self.last_action / 2, np.abs(self.last_action) / 2)
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def _ode(self, _, state, action):
"""Compute the state time-derivative.
Parameters
----------
state : ndarray
action : ndarray
Returns
-------
x_dot : Tensor
The derivative of the state.
"""
# Physical dynamics
gravity = self.gravity
length = self.length
friction = self.friction
inertia = self.inertia
self.last_action = action[0]
angle, angular_velocity = state
x_ddot = (
gravity / length * np.sin(angle)
+ action / inertia
- friction / inertia * angular_velocity
)
return np.array((angular_velocity, x_ddot))
|
import pytest
from app import create_app, db
from app.models import Host
"""
These tests are for testing the db model classes outside of the api.
"""
@pytest.fixture
def app():
"""
Temporarily rename the host table while the tests run. This is done
to make dropping the table at the end of the tests a bit safer.
"""
temp_table_name_suffix = "__unit_tests__"
if temp_table_name_suffix not in Host.__table__.name:
Host.__table__.name = Host.__table__.name + temp_table_name_suffix
if temp_table_name_suffix not in Host.__table__.fullname:
Host.__table__.fullname = Host.__table__.fullname + temp_table_name_suffix
app = create_app(config_name="testing")
# binds the app to the current context
with app.app_context() as ctx:
# create all tables
db.create_all()
ctx.push()
yield app
ctx.pop
db.session.remove()
db.drop_all()
def _create_host(fqdn=None, display_name=None):
canonical_facts = {}
if fqdn is not None:
canonical_facts = {'fqdn': fqdn}
host = Host(canonical_facts, display_name=display_name, account="00102")
db.session.add(host)
db.session.commit()
return host
def test_create_host_with_canonical_facts_as_None(app):
# Test to make sure canonical facts that are None or '' do
# not get inserted into the db
invalid_canonical_facts = {"fqdn": None,
"insights_id": '', }
valid_canonical_facts = {"bios_uuid": "1234"}
host_dict = {**invalid_canonical_facts, **valid_canonical_facts}
host = Host.from_json(host_dict)
assert valid_canonical_facts == host.canonical_facts
def test_create_host_with_fqdn_and_display_name_as_empty_str(app):
# Verify that the display_name is populated from the fqdn
fqdn = "spacely_space_sprockets.orbitcity.com"
created_host = _create_host(fqdn=fqdn, display_name="")
assert created_host.display_name == fqdn
def test_create_host_with_display_name_and_fqdn_as_empty_str(app):
# Verify that the display_name is populated from the id
created_host = _create_host(fqdn="", display_name="")
assert created_host.display_name == str(created_host.id)
def test_update_existing_host_fix_display_name_using_existing_fqdn(app):
expected_fqdn = 'host1.domain1.com'
existing_host = _create_host(fqdn=expected_fqdn, display_name=None)
# Clear the display_name
existing_host.display_name = None
db.session.commit()
assert existing_host.display_name is None
# Update the host
input_host = Host({}, display_name='')
existing_host.update(input_host)
assert existing_host.display_name == expected_fqdn
def test_update_existing_host_fix_display_name_using_input_fqdn(app):
# Create an "existing" host
fqdn = 'host1.domain1.com'
existing_host = _create_host(fqdn=fqdn, display_name=None)
# Clear the display_name
existing_host.display_name = None
db.session.commit()
assert existing_host.display_name is None
# Update the host
expected_fqdn = "different.domain1.com"
input_host = Host({"fqdn": expected_fqdn}, display_name='')
existing_host.update(input_host)
assert existing_host.display_name == expected_fqdn
def test_update_existing_host_fix_display_name_using_id(app):
# Create an "existing" host
existing_host = _create_host(fqdn=None, display_name=None)
# Clear the display_name
existing_host.display_name = None
db.session.commit()
assert existing_host.display_name is None
# Update the host
input_host = Host({}, display_name='')
existing_host.update(input_host)
assert existing_host.display_name == existing_host.id
|
from youtube_searcher import extract_videos
import pafy
class EuroNewsLiveStream:
lang2url = {
"en": "https://www.youtube.com/user/Euronews",
"ru": "https://www.youtube.com/user/euronewsru",
"pt": "https://www.youtube.com/user/euronewspt",
"it": "https://www.youtube.com/user/euronewsit",
"fr": "https://www.youtube.com/user/euronewsfr",
"de": "https://www.youtube.com/user/euronewsde",
"es": "https://www.youtube.com/user/euronewses"
}
def __init__(self, lang="en-us"):
lang = lang.lower()
if lang not in self.lang2url:
lang = lang.lower()[:2]
self.lang = lang
if self.lang not in self.lang2url:
raise ValueError("Unsupported language")
self._stream = None
@property
def url(self):
return EuroNewsLiveStream.lang2url[self.lang]
@property
def stream(self):
if self._stream is None:
self._stream = self.get_stream(self.lang)
return self._stream
@staticmethod
def get_stream(lang):
if lang not in EuroNewsLiveStream.lang2url:
raise ValueError("Unsupported language")
url = EuroNewsLiveStream.lang2url[lang]
for e in extract_videos(url):
if not e["is_live"]:
continue
return pafy.new(e["videoId"]).getbest().url
e = EuroNewsLiveStream()
print(e.lang)
print(e.url)
print(e.stream) |
import urllib.request
import csv
from bs4 import BeautifulSoup
BASE_URL = "http://www.nfl.com/teams/roster?team="
teams = ["NE", "BUF", "NYJ", "MIA", "BAL", "CLE", "PIT", "CIN", "IND", "HOU", "JAX", "TEN", "KC", "OAK", "DEN", "LAC", "DAL", "PHI", "NYG", "WAS", "GB", "MIN", "CHI", "DET", "NO", "CAR", "TB", "ATL", "SF", "SEA", "LA", "ARI"]
def remove_td(s):
n = s[4:-5]
return n
def bday(s):
a = s.split(r"/")
return (a[0], a[1], a[2])
def split_name(s):
a = s.split(",")
return (a[0].strip(), a[1].strip())
print(len(teams))
with open("nfl-data.csv", 'w') as csv_file:
writer = csv.writer(csv_file)
for name in teams:
team = name
url = BASE_URL + team
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
players = soup.find(id="result").find("tbody").find_all("tr")
for player in players:
player_soup = player.find_all("td")
number = remove_td(str(player_soup[0]))
(last_name, first_name) = split_name(player_soup[1].find("a").string)
active_status = remove_td(str(player_soup[3]))
(month, day, year) = bday(remove_td(str(player_soup[6])))
if active_status == "ACT":
writer.writerow([team, number, first_name, last_name, month, day, year])
# with open('nfl-data.csv', 'a') as csv_file:
# writer = csv.writer(csv_file)
# writer.writerow([name, price, birthday])
|
"""Custom exceptions used by Annif"""
from click import ClickException
class AnnifException(ClickException):
"""Base Annif exception. We define this as a subclass of ClickException so
that the CLI can automatically handle exceptions. This exception cannot be
instantiated directly - subclasses should be used instead."""
def __init__(self, message, project_id=None, backend_id=None):
super().__init__(message)
self.project_id = project_id
self.backend_id = backend_id
if self.prefix is None:
raise TypeError("Cannot instantiate exception without a prefix.")
# subclasses should set this to a descriptive prefix
prefix = None
def format_message(self):
if self.project_id is not None:
return "{} project '{}': {}".format(self.prefix,
self.project_id,
self.message)
if self.backend_id is not None:
return "{} backend '{}': {}".format(self.prefix,
self.backend_id,
self.message)
return "{}: {}".format(self.prefix, self.message)
class NotInitializedException(AnnifException):
"""Exception raised for attempting to use a project or backend that
cannot be initialized, most likely since it is not yet functional
because of lack of vocabulary or training."""
prefix = "Couldn't initialize"
class ConfigurationException(AnnifException):
"""Exception raised when a project or backend is misconfigured."""
prefix = "Misconfigured"
class NotSupportedException(AnnifException):
"""Exception raised when an operation is not supported by a project or
backend."""
prefix = "Not supported"
class OperationFailedException(AnnifException):
"""Exception raised when an operation fails for some unknown reason."""
prefix = "Operation failed"
|
from utils import *
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.metrics import log_loss
from sklearn.cross_validation import cross_val_score, StratifiedKFold
train_features, train_labels, test_features, ids, outfile = read_data()
model = RandomForestClassifier(n_estimators=1000, criterion= "entropy",n_jobs=-1, max_depth = 35, min_samples_split=4, min_samples_leaf = 2)
for i in xrange(5):
probs, tprobs = cross_val_model(model, train_features, train_labels, test_features, return_trainprobs=True)
np.savez(outfile+"_feature"+str(i), train=tprobs, test=probs)
#save_submission(outfile, ids=ids, probs=probs)
#print "saved model, validating now"
#print cross_val_score(model, train_features, train_labels, scoring= "log_loss")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Minos Daemon :
Minos is the name of one of the judge on the underworld in the greek mythology.
The role of the daemon is get as input the list of PFNs declared bad
and to class them into 2 categories :
- The ones temporary available
- The ones that have a real problem and that will need to be recovered by the necromancer
'''
import argparse
import signal
from rucio.daemons.badreplicas.minos import run, stop
def get_parser():
"""
Returns the argparse parser.
"""
parser = argparse.ArgumentParser(description="The role of the daemon is get as input the list of PFNs declared bad and to classify them into 2 categories: the temporary available ones, and the ones that have a real problem and that will need to be recovered by the necromancer.") # noqa E501
parser.add_argument("--run-once", action="store_true", default=False, help='Runs one loop iteration')
parser.add_argument("--threads", action="store", default=1, type=int, help='Concurrency control: number of threads')
parser.add_argument("--bulk", action="store", default=1000, type=int, help='Bulk control: number of requests per cycle')
parser.add_argument('--sleep-time', action="store", default=60, type=int, help='Seconds to sleep if few requests')
return parser
if __name__ == "__main__":
# Bind our callback to the SIGTERM signal and run the daemon:
signal.signal(signal.SIGTERM, stop)
parser = get_parser()
args = parser.parse_args()
try:
run(threads=args.threads, bulk=args.bulk, once=args.run_once, sleep_time=args.sleep_time)
except KeyboardInterrupt:
stop()
|
#!/usr/bin/env python3
"""PWM Led using RPi.GPIO."""
import RPi.GPIO as GPIO
import time
import sys
def do_led_things(pin_led):
# setup pin mode
GPIO.setup(pin_led, GPIO.OUT)
# setup pwm signal
pwm = GPIO.PWM(pin_led, 50)
pwm.start(0)
try:
while True:
for dc in range(0, 100, 5):
pwm.ChangeDutyCycle(dc)
time.sleep(0.1)
for dc in range(100, 0, -5):
pwm.ChangeDutyCycle(dc)
time.sleep(0.1)
except KeyboardInterrupt:
pwm.stop()
if __name__ == "__main__":
argc = len(sys.argv)
if(argc != 2):
print("uso: " + sys.argv[0] + " <led_BCM_pin>")
exit()
# init GPIO mode
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# send pwm signals to the led
do_led_things(int(sys.argv[1]))
# cleanup GPIO pin's
GPIO.cleanup()
print("\nBye!!")
|
from flask import Flask, render_template, request, url_for, jsonify
from numpy import matrix
from math import *
# globals() returns a dictionary of globals variables. You can create a new variable by simply writing globals()['a'] = 1, and that would lead to creation of a variable a = 1
globals()['ans'] = 0 # Default value for answer
# Initialize the Flask application
app = Flask(__name__)
# Route default URL, changes as per the HTTP request is GET or POST
@app.route("/", methods = ['GET','POST'])
def index():
if request.method == 'POST':
query = request.form['query'] # Query received from the form's textarea - from whatever form that makes a POST request to this URL
assign_flag = False # Keep a flag in case user tries to assign something to a variable
# Allowance for assignments in input query
if "=" in query:
assign_flag = True # Set flag true - user trying to assign a variable
equation = [el.strip() for el in query.split("=")]
query = equation[1] # The expression to be evaluated
# globals()[temp[0]] = eval(query) # The variable to assign the result of that expression
try:
if not '.' in query:
query = query.lower() # Get rid of case-sensitivity only if matrix methods aren't accessed
result = eval(query) # eval() evaluates a string which contains a method - built-in python function
try: # Handle matrices
if(type(result)==type(list())):
result = matrix(result)
except Exception as e:
print e
print 'Calculations: Error parsing matrix' # Log error to server console
globals()['ans'] = result # Stores the last result in the variable answer
except Exception as e:
print e
result = "Invalid expression"
if assign_flag: # If user intended to create a variable
globals()[equation[0]] = result # Store evaluated expression in the variable
else: # If request method is GET or something else
result = ''
return render_template('index.html', result = result)
if __name__ == "__main__":
app.run()
|
import twitter
import time
from xml.sax.saxutils import unescape
'''
Basic twitter corpuss script that gather tweets based on smileys and
tags them correspondely to negative and positive and put them in our corpus
so they can later be used by our machine learning algorithm
'''
CORPUS="corpusnew3"
api=twitter.Api()
#determins sleeptime based on how many calls (to not exceed twitters calllimit)
#max ammount for unautherized twitter api gathering is 1500 tweets per hour
#or 125calls
hour=3600
calls_per_hour=6
timetosleep=hour/calls_per_hour
sentimentcorpus = open(CORPUS, 'a')
page=1
# the loop which runs indefinatly to gather data
while 1:
posstatuses=api.GetSearch(term=":-)",per_page=100,lang="en",page=page)
negstatuses=api.GetSearch(term=":-(",per_page=100,lang="en",page=page)
page=page+1
for s in posstatuses:
par=(unescape(s.text),"positive");
sentimentcorpus.write(repr(par))
sentimentcorpus.write('\n')
for s in negstatuses:
par=(unescape(s.text),"negative");
sentimentcorpus.write(repr(par))
sentimentcorpus.write('\n')
time.sleep(timetosleep)
print "sleeping for some time cause of twitter max api calls"
|
from playwright.sync_api import sync_playwright
with sync_playwright() as p:
browser = p.chromium.launch(headless=False, proxy={
'server': 'http://127.0.0.1:7890'
})
page = browser.new_page()
page.goto('https://httpbin.org/get')
print(page.content())
browser.close()
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Header
from sensor_msgs.msg import PointCloud,ChannelFloat32
from geometry_msgs.msg import Point32
import random
def generate_simulated_points(stage):
x_mod, y_mod, z_mod = 0,0,0
if stage == 'A':
x_mod, y_mod, z_mod = 85,0,-10
elif stage == 'B':
x_mod, y_mod, z_mod = 9,0,0
elif stage == 'C':
x_mod, y_mod, z_mod = -67,0,-6
colors = ["blue","blue","blue","blue","red","red","red","red","yellow","yellow","yellow","yellow","green","green","green","green"]
coordinates = [(0,215.3,25),(0,215.3,28.81),(6.35,215.3,25),(6.35,215.3,28.81),(6.35*2,215.3,25),(6.35*2,215.3,28.81),(6.35*3,215.3,25),(6.35*3,215.3,28.81),(6.35*4,215.3,25),(6.35*4,215.3,28.81),(6.35*5,215.3,25),(6.35*5,215.3,28.81),(6.35*6,215.3,25),(6.35*6,215.3,28.81),(6.35*7,215.3,25),(6.35*7,215.3,28.81)]
found_points = []
for p in range(16):
color = random.randint(0,len(colors)-1)
coor = random.randint(0,len(coordinates)-1)
found_points.append([colors[color],coordinates[coor]])
del colors[color]
del coordinates[coor]
print found_points
points = []
channels = [[],[],[]]
for p in found_points:
if p[0] == "blue":
channels[0].append(0) #R
channels[1].append(0) #G
channels[2].append(1) #B
elif p[0] == "red":
channels[0].append(1) #R
channels[1].append(0) #G
channels[2].append(0) #B
elif p[0] == "green":
channels[0].append(0) #R
channels[1].append(1) #G
channels[2].append(0) #B
elif p[0] == "yellow":
channels[0].append(1) #R
channels[1].append(1) #G
channels[2].append(0) #B
points.append(Point32(
x=p[1][0]/100.0 + random.uniform(-.005, .005) + x_mod,
y=p[1][1]/100.0 + random.uniform(-.005, .005) + y_mod,
z=p[1][2]/100.0 + random.uniform(-.005, .005) + z_mod
)
)
rospy.init_node('temp')
point_pub = rospy.Publisher("/camera/block_point_cloud", PointCloud, queue_size=1)
rgb_channels = [ChannelFloat32(name="r", values=channels[0]),
ChannelFloat32(name="g", values=channels[1]),
ChannelFloat32(name="b", values=channels[2])]
print "Publishing..."
point_pub.publish(PointCloud(
header=Header(
stamp=rospy.Time.now(),
frame_id="map"
),
points=points,
channels=rgb_channels
)
) |
# Flow control
##############################################################################################
# if / else
mood = input("How are you felling today? >> ")
"""
if mood == "happy":
print("It is great to see you happy!")
else:
print("Cheer up, mate!")
"""
if mood == "happy":
print("It is great to see you happy!")
elif mood == "nervous":
print("Take a deep breath 3 times.")
elif mood == "angry":
print("Don't be mad.")
else:
print("Cheer up, mate!")
|
x = input().split()
x = sorted(x)
y = "yes"
for i in range(1,len(x)):
if(x[i] == x[i - 1]):
y = "no"
break
print(y)
|
#To reverse any inputted number using python
number=int(input("Enter the number:\n"))
reverse=0
while(number>0):
digit=number%10
reverse=reverse*10+digit
number=number//10
print("The number when reversed is:",reverse) |
import numpy as np
import scipy.io as sio
from sklearn.feature_selection import VarianceThreshold as VarThresh
# for feature selection i have a few ideas. 1) run feature selection over the whole matrix of features.
#2) remove some of the recordings and do it a few times (so manually k-folding), because that way if the same features are removed
#then we know that for real those are the features not helpful
xtrain_aud = sio.loadmat('xtrain_all_aud.mat')
xtrain_aud = xtrain_aud['xtrain']
ytrain_aud = sio.loadmat('ytrain_all_aud.mat')
ytrain_aud = ytrain_aud['ytrain']
# method 1: variance threshold
Var_selector = VarThresh(.5)
# without any parameters passed to varthresh it defaults to anything with all feautres the exact same
# am going to start with .1
Var_selector.fit(xtrain_aud)
which_feats = Var_selector.get_support()
x_aud_fitted = Var_selector.transform(xtrain_aud)
print x_aud_fitted.shape
xtrunclength = sio.loadmat('xtrunclength.mat')
xtrunclength = xtrunclength['xtrunclength']
xtesting = sio.loadmat('xtesting.mat')
xtesting = xtesting['xtesting']
xtesting = xtesting[~np.isnan(xtesting).any(axis=1),:]
xtesting = xtesting[~np.isinf(xtesting).any(axis=1),:]
from CurrentThingsNeededtoRun import FinalClassifier
xtesting = sio.loadmat('xtesting.mat')
xtesting = xtesting['xtesting']
xtesting = xtesting[~np.isnan(xtesting).any(axis=1),:]
xtesting = xtesting[~np.isinf(xtesting).any(axis=1),:]
xtesting_new = xtesting[:,which_feats]
#xtesting_new = xtesting[:,0:1]
#x_aud_fitted = xtrain_aud[:,0:1]
print x_aud_fitted.shape
print xtesting_new.shape
#xtesting_shortened = xtesting[:,0:6]
print 'getting ready to test'
pow_class_target_string = FinalClassifier.myclassify_practice_set(numfiers=3, xtrain=xtrain_aud, ytrain=ytrain_aud, xtest=xtesting)
print 'power classifier results'
print pow_class_target_string
pow_class_target_string1 = FinalClassifier.myclassify_practice_set(numfiers=3, xtrain=x_aud_fitted, ytrain=ytrain_aud, xtest=xtesting_new)
print 'power classifier results'
print pow_class_target_string1 |
# Generated by Django 2.1 on 2018-08-07 09:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mpcontroller', '0007_auto_20180807_0902'),
]
operations = [
migrations.RenameField(
model_name='muse_device',
old_name='MAC_address',
new_name='mac_address',
),
]
|
import contextlib
import random
import socket
import warnings
import eventlet
from eventlet import greenio
from eventlet.green import socket
try:
from eventlet.green import ssl
except ImportError:
__test__ = False
import six
import tests
def listen_ssl_socket(address=('localhost', 0), **kwargs):
sock = ssl.wrap_socket(
socket.socket(),
tests.private_key_file,
tests.certificate_file,
server_side=True,
**kwargs
)
sock.bind(address)
sock.listen(50)
return sock
class SSLTest(tests.LimitedTestCase):
def setUp(self):
# disabling socket.ssl warnings because we're testing it here
warnings.filterwarnings(
action='ignore',
message='.*socket.ssl.*',
category=DeprecationWarning)
super(SSLTest, self).setUp()
def test_duplex_response(self):
def serve(listener):
sock, addr = listener.accept()
sock.recv(8192)
sock.sendall(b'response')
sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, sock)
client = ssl.wrap_socket(eventlet.connect(sock.getsockname()))
client.sendall(b'line 1\r\nline 2\r\n\r\n')
self.assertEqual(client.recv(8192), b'response')
server_coro.wait()
def test_ssl_context(self):
def serve(listener):
sock, addr = listener.accept()
sock.recv(8192)
sock.sendall(b'response')
sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, sock)
context = ssl.create_default_context()
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(tests.certificate_file)
client = context.wrap_socket(
eventlet.connect(sock.getsockname()),
server_hostname='Test')
client.sendall(b'line 1\r\nline 2\r\n\r\n')
self.assertEqual(client.recv(8192), b'response')
server_coro.wait()
def test_ssl_close(self):
def serve(listener):
sock, addr = listener.accept()
sock.recv(8192)
try:
self.assertEqual(b'', sock.recv(8192))
except greenio.SSL.ZeroReturnError:
pass
sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, sock)
raw_client = eventlet.connect(sock.getsockname())
client = ssl.wrap_socket(raw_client)
client.sendall(b'X')
greenio.shutdown_safe(client)
client.close()
server_coro.wait()
def test_ssl_connect(self):
def serve(listener):
sock, addr = listener.accept()
sock.recv(8192)
sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, sock)
raw_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_client = ssl.wrap_socket(raw_client)
ssl_client.connect(sock.getsockname())
ssl_client.sendall(b'abc')
greenio.shutdown_safe(ssl_client)
ssl_client.close()
server_coro.wait()
def test_recv_after_ssl_connect(self):
def serve(listener):
sock, addr = listener.accept()
sock.sendall(b'hjk')
sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, sock)
raw_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_client = ssl.wrap_socket(raw_client)
# Important: We need to call connect() on an SSL socket, not a plain one.
# The bug was affecting that particular combination (create plain socket,
# wrap, call connect() on the SSL socket and try to recv) on Python 3.5.
ssl_client.connect(sock.getsockname())
# The call to recv used to fail with:
# Traceback (most recent call last):
# File "tests/ssl_test.py", line 99, in test_recv_after_ssl_connect
# self.assertEqual(ssl_client.recv(3), b'hjk')
# File "eventlet/green/ssl.py", line 194, in recv
# return self._base_recv(buflen, flags, into=False)
# File "eventlet/green/ssl.py", line 227, in _base_recv
# read = self.read(nbytes)
# File "eventlet/green/ssl.py", line 139, in read
# super(GreenSSLSocket, self).read, *args, **kwargs)
# File "eventlet/green/ssl.py", line 113, in _call_trampolining
# return func(*a, **kw)
# File "PYTHONLIB/python3.5/ssl.py", line 791, in read
# return self._sslobj.read(len, buffer)
# TypeError: read() argument 2 must be read-write bytes-like object, not None
self.assertEqual(ssl_client.recv(3), b'hjk')
greenio.shutdown_safe(ssl_client)
ssl_client.close()
server_coro.wait()
def test_ssl_unwrap(self):
def serve():
sock, addr = listener.accept()
self.assertEqual(sock.recv(6), b'before')
sock_ssl = ssl.wrap_socket(sock, tests.private_key_file, tests.certificate_file,
server_side=True)
sock_ssl.do_handshake()
self.assertEqual(sock_ssl.recv(6), b'during')
sock2 = sock_ssl.unwrap()
self.assertEqual(sock2.recv(5), b'after')
sock2.close()
listener = eventlet.listen(('127.0.0.1', 0))
server_coro = eventlet.spawn(serve)
client = eventlet.connect(listener.getsockname())
client.sendall(b'before')
client_ssl = ssl.wrap_socket(client)
client_ssl.do_handshake()
client_ssl.sendall(b'during')
client2 = client_ssl.unwrap()
client2.sendall(b'after')
server_coro.wait()
def test_sendall_cpu_usage(self):
"""SSL socket.sendall() busy loop
https://bitbucket.org/eventlet/eventlet/issue/134/greenssl-performance-issues
Idea of this test is to check that GreenSSLSocket.sendall() does not busy loop
retrying .send() calls, but instead trampolines until socket is writeable.
BUFFER_SIZE and SENDALL_SIZE are magic numbers inferred through trial and error.
"""
# Time limit resistant to busy loops
self.set_alarm(1)
stage_1 = eventlet.event.Event()
BUFFER_SIZE = 1000
SENDALL_SIZE = 100000
def serve(listener):
conn, _ = listener.accept()
conn.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, BUFFER_SIZE)
self.assertEqual(conn.recv(8), b'request')
conn.sendall(b'response')
stage_1.wait()
conn.sendall(b'x' * SENDALL_SIZE)
server_sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, server_sock)
client_sock = eventlet.connect(server_sock.getsockname())
client_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, BUFFER_SIZE)
client = ssl.wrap_socket(client_sock)
client.sendall(b'request')
self.assertEqual(client.recv(8), b'response')
stage_1.send()
tests.check_idle_cpu_usage(0.2, 0.1)
server_coro.kill()
def test_greensslobject(self):
def serve(listener):
sock, addr = listener.accept()
sock.sendall(b'content')
greenio.shutdown_safe(sock)
sock.close()
listener = listen_ssl_socket()
eventlet.spawn(serve, listener)
client = ssl.wrap_socket(eventlet.connect(listener.getsockname()))
self.assertEqual(client.recv(1024), b'content')
self.assertEqual(client.recv(1024), b'')
def test_regression_gh_17(self):
# https://github.com/eventlet/eventlet/issues/17
# ssl wrapped but unconnected socket methods go special code path
# test that path at least for syntax/typo errors
sock = ssl.wrap_socket(socket.socket())
sock.settimeout(0.01)
try:
sock.sendall(b'')
except ssl.SSLError as e:
assert 'timed out' in str(e)
def test_no_handshake_block_accept_loop(self):
listener = listen_ssl_socket()
listener.settimeout(0.3)
def serve(sock):
try:
name = sock.recv(8)
sock.sendall(b'hello ' + name)
except Exception:
# ignore evil clients
pass
finally:
greenio.shutdown_safe(sock)
sock.close()
def accept_loop():
while True:
try:
sock, _ = listener.accept()
except socket.error:
return
eventlet.spawn(serve, sock)
loopt = eventlet.spawn(accept_loop)
# evil no handshake
evil = eventlet.connect(listener.getsockname())
good = ssl.wrap_socket(eventlet.connect(listener.getsockname()))
good.sendall(b'good')
response = good.recv(16)
good.close()
assert response == b'hello good'
evil.close()
listener.close()
loopt.wait()
eventlet.sleep(0)
def test_receiving_doesnt_block_if_there_is_already_decrypted_buffered_data(self):
# Here's what could (and would) happen before the relevant bug was fixed (assuming method
# M was trampolining unconditionally before actually reading):
# 1. One side sends n bytes, leaves connection open (important)
# 2. The other side uses method M to read m (where m < n) bytes, the underlying SSL
# implementation reads everything from the underlying socket, decrypts all n bytes,
# returns m of them and buffers n-m to be read later.
# 3. The other side tries to read the remainder of the data (n-m bytes), this blocks
# because M trampolines uncoditionally and trampoline will hang because reading from
# the underlying socket would block. It would block because there's no data to be read
# and the connection is still open; leaving the connection open /mentioned in 1./ is
# important because otherwise trampoline would return immediately and the test would pass
# even with the bug still present in the code).
#
# The solution is to first request data from the underlying SSL implementation and only
# trampoline if we actually need to read some data from the underlying socket.
#
# GreenSSLSocket.recv() wasn't broken but I've added code to test it as well for
# completeness.
content = b'xy'
def recv(sock, expected):
assert sock.recv(len(expected)) == expected
def recv_into(sock, expected):
buf = bytearray(len(expected))
assert sock.recv_into(buf, len(expected)) == len(expected)
assert buf == expected
for read_function in [recv, recv_into]:
print('Trying %s...' % (read_function,))
listener = listen_ssl_socket()
def accept(listener):
sock, addr = listener.accept()
sock.sendall(content)
return sock
accepter = eventlet.spawn(accept, listener)
client_to_server = None
try:
client_to_server = ssl.wrap_socket(eventlet.connect(listener.getsockname()))
for character in six.iterbytes(content):
character = six.int2byte(character)
print('We have %d already decrypted bytes pending, expecting: %s' % (
client_to_server.pending(), character))
read_function(client_to_server, character)
finally:
if client_to_server is not None:
client_to_server.close()
server_to_client = accepter.wait()
# Very important: we only want to close the socket *after* the other side has
# read the data it wanted already, otherwise this would defeat the purpose of the
# test (see the comment at the top of this test).
server_to_client.close()
listener.close()
def test_context_wrapped_accept(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(tests.certificate_file, tests.private_key_file)
expected = "success:{}".format(random.random()).encode()
def client(addr):
client_tls = ssl.wrap_socket(
eventlet.connect(addr),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=tests.certificate_file,
)
client_tls.send(expected)
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('localhost', 0))
server_sock.listen(1)
eventlet.spawn(client, server_sock.getsockname())
server_tls = context.wrap_socket(server_sock, server_side=True)
peer, _ = server_tls.accept()
assert peer.recv(64) == expected
peer.close()
def test_explicit_keys_accept(self):
expected = "success:{}".format(random.random()).encode()
def client(addr):
client_tls = ssl.wrap_socket(
eventlet.connect(addr),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=tests.certificate_file,
)
client_tls.send(expected)
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('localhost', 0))
server_sock.listen(1)
eventlet.spawn(client, server_sock.getsockname())
server_tls = ssl.wrap_socket(
server_sock, server_side=True,
keyfile=tests.private_key_file, certfile=tests.certificate_file,
)
peer, _ = server_tls.accept()
assert peer.recv(64) == expected
peer.close()
|
import sys
import winreg
from argparse import ArgumentParser
def search(needle):
found = False
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Installer\\UserData", access=winreg.KEY_READ | winreg.KEY_WOW64_64KEY) as userDataParentHandle:
for userDataIndex in range(0, winreg.QueryInfoKey(userDataParentHandle)[0]):
user = winreg.EnumKey(userDataParentHandle, userDataIndex)
with winreg.OpenKey(userDataParentHandle, user) as userDataHandle:
with winreg.OpenKey(userDataHandle, "Components") as componentsParentHandle:
for componentIndex in range(0, winreg.QueryInfoKey(componentsParentHandle)[0]):
with winreg.OpenKey(componentsParentHandle, winreg.EnumKey(componentsParentHandle, componentIndex)) as componentHandle:
for valueIndex in range(0, winreg.QueryInfoKey(componentHandle)[1]):
valueName, valueData = winreg.EnumValue(componentHandle, valueIndex)[0:2]
if needle.casefold() in valueData.casefold():
with winreg.OpenKey(userDataHandle, "Products\\" + valueName + "\\InstallProperties") as propertiesHandle:
if not found:
found = True
else:
print()
print("File: " + valueData)
print("Product: " + winreg.QueryValueEx(propertiesHandle, "DisplayName")[0])
print("Install user: " + user)
print("Cached installer: " + winreg.QueryValueEx(propertiesHandle, "LocalPackage")[0])
if not found:
print('No file path containing "{}" was found in any installed package.'.format(needle))
def search_command(opts):
return search(opts.pattern)
def packages_command(opts):
print('{} - Not yet implemented'.format(opts.command))
def components_command(opts):
print('{} - Not yet implemented'.format(opts.command))
def create_parser(prog_name):
parser = ArgumentParser(prog=prog_name)
sp = parser.add_subparsers(title='commands', dest='command', description='valid commands:')
search = sp.add_parser('search', help='Search for a file within an installed component')
search.add_argument('pattern', help='Name of the file')
search.set_defaults(func=search_command)
packages = sp.add_parser('packages', help='Inventory packages on this system')
packages.set_defaults(func=packages_command)
components = sp.add_parser('components', help='Show components of a package')
components.add_argument('pattern', help='Name of the package')
components.set_defaults(func=components_command)
return parser
def main():
parser = create_parser(sys.argv[0])
opts = parser.parse_args(sys.argv[1:])
if not hasattr(opts, 'func'):
parser.print_help()
else:
opts.func(opts)
if __name__ == '__main__':
main()
|
input = """
a(2).
b(1,3).
e(2).
s(1).
c(X) :- not a(X), not e(Y), b(X,Y), not #count{V:s(V)} = 1.
"""
output = """
{a(2), b(1,3), e(2), s(1)}
"""
|
#!/usr/bin/env python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import json
from io import open
from training import preprocessing
from keras.models import Model, load_model
from keras.layers import merge, concatenate, multiply
from keras import backend as K
from training import constants
from training import fast_qa
from training import glove_embeddings
from training import preprocessing
import numpy as np
from io import open
from keras.utils import to_categorical
model = load_model('models/model-4.hdf5', custom_objects={
'backend': K,
'concatenate': concatenate,
'multiply': multiply,
'wiq_b': fast_qa.wiq_b,
'wiq_w': fast_qa.wiq_w,
'get_question_wiq': fast_qa.get_question_wiq
})
inverted = {v: k for k, v in glove_embeddings.word2idx.iteritems()}
print('Model loaded.')
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write(open('index.html', 'r').read())
def do_HEAD(self):
self._set_headers()
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = json.loads(self.rfile.read(content_length))
e_paragraph, e_question = preprocessing.get_encoded_input(post_data["paragraph"], post_data["question"])
paragraph = np.array(e_paragraph)
question = np.array(e_question)
paragraph.resize(constants.MAX_PARAGRAPH_LENGTH, refcheck=False)
question.resize(constants.MAX_QUESTION_LENGTH, refcheck=False)
paragraphs = np.concatenate([[paragraph], np.zeros((39, constants.MAX_PARAGRAPH_LENGTH))], axis=0)
questions = np.concatenate([[question], np.zeros((39, constants.MAX_QUESTION_LENGTH))], axis=0)
predictions = model.predict([paragraphs, questions], verbose=1, batch_size=40)
start = np.argmax(predictions[0][0], axis=0)
end = np.argmax(predictions[1][0], axis=0)
answer = e_paragraph[start:end+1]
result = [inverted[index] for index in answer]
self._set_headers()
self.wfile.write(" ".join(result))
def run(server_class=HTTPServer, handler_class=S, port=80):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print 'Starting httpd...'
httpd.serve_forever()
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
|
# 3_7
# number of islands
def deleteIsland(grid, i, j):
grid[i][j] = "0"
if i - 1 >= 0 and grid[i-1][j] == "1":
deleteIsland(grid, i-1, j)
if i + 1 < len(grid) and grid[i+1][j] == "1":
deleteIsland(grid, i+1, j)
if j - 1 >= 0 and grid[i][j-1] == "1":
deleteIsland(grid, i, j-1)
if j + 1 < len(grid[0]) and grid[i][j+1] == "1":
deleteIsland(grid, i, j+1)
def numIslands(grid):
if len(grid) == 0:
return 0
count = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == "1":
count += 1
deleteIsland(grid, i, j)
return count
grid_new = [[1,1,0,0,0],
[1,1,0,0,0],
[0,0,1,0,0],
[0,0,0,1,1]]
print(numIslands(grid_new)) |
"""
Classes.py that contains different classes and methods
for model.py for the labyrinth's game
Class : Labyrinth, Item, Character
Ludovic GROS
"""
import pygame
import random
from pygame.locals import *
from constant import *
class Labyrinth:
def __init__(self, file):
self.file = file
self.structure = 0
def generator(self):
# open the text file with 'r' for reading
with open(self.file, 'r') as file:
# list the structure
structure_lab = []
for line in file:
# list each line
line_lab = []
for sprite in line:
# add the character of each sprite in the list for a line
line_lab.append(sprite)
# add each line in the character list
structure_lab.append(line_lab)
# list with each sprite of each lines
self.structure = structure_lab
def show(self, window):
# load the image
wall_ori = pygame.image.load(WALL_IMG).convert()
# scale the image with the size of the sprite size
wall = pygame.transform.scale(wall_ori, (SPT_SZ, SPT_SZ))
# load the image
floor_ori = pygame.image.load(FLOOR_IMG).convert()
# scale the image with the size of the sprite size
floor = pygame.transform.scale(floor_ori, (SPT_SZ, SPT_SZ))
# load the image
floor_f_ori = pygame.image.load(FLOOR_F).convert()
# scale the image with the size of the sprite size
floor_f = pygame.transform.scale(floor_f_ori, (SPT_SZ, SPT_SZ))
# load the image
guardian_ori = pygame.image.load(GUARDIAN_IMG).convert_alpha()
# scale the image with a better rendering
guardian = pygame.transform.smoothscale(guardian_ori, (SPT_SZ, SPT_SZ))
# position of the starting line number in the structure list
num_line = 0
# loop for each line in structure
for line in self.structure:
# position of the starting tile in the structure list
num_tile = 0
# loop for each sprite in lines
for sprite in line:
# X-axis position depending of the sprite position in the list
x = num_tile * SPT_SZ
# Y-axis position depending of the line position in the list
y = num_line * SPT_SZ
# check the tile if it is a wall
if sprite == 'w':
# paste wall image
window.blit(wall, (x, y))
# check the tile if it is the ending tile
elif sprite == 'e':
# paste the f floor
window.blit(floor_f, (x, y))
# check the tile if it is the guardian position
elif sprite == 'g':
# paste the floor image
window.blit(floor, (x, y))
# paste the guardian image
window.blit(guardian, (x, y))
# check the tile if it is the dead guardian
elif sprite == 'd':
# paste the floor image instead of the guardian
window.blit(floor, (x, y))
# check the tile if it is the floor
elif sprite == '0':
# paste the floor image
window.blit(floor, (x, y))
# when the loop finish, go to the next sprite
num_tile += 1
# when the loop finish, go to the next line
num_line += 1
class Item:
def __init__(self, items, lab):
# load the items image
item_ori = pygame.image.load(items).convert_alpha()
# scale the image with the sprite size
self.items = pygame.transform.smoothscale(item_ori, (SPT_SZ, SPT_SZ))
self.lab = lab
def position1(self, window):
count_max = 1
count = 0
# until the maximum objects counter is reach (loop)
while count < count_max:
# random number for the x-axis tile position
self.tile_x = random.randint(0, 14)
# random number for the y-axis tile position
self.tile_y = random.randint(0, 14)
# check if the position is free
if self.lab.structure[self.tile_y][self.tile_x] == '0':
# change the list's sprite with the object's tag
self.lab.structure[self.tile_y][self.tile_x] = 'o1'
# add one to the object 1 counter
count += 1
# if the position is not free
elif self.lab.structure[self.tile_y][self.tile_x] != '0':
# nothing happen
pass
num_line = 0
for line in self.lab.structure:
num_tile = 0
for sprite in line:
x = num_tile * SPT_SZ
y = num_line * SPT_SZ
# if the sprite is the object's tag
if sprite == 'o1':
# paste item's image
window.blit(self.items, (x, y))
num_tile += 1
num_line += 1
def position2(self, window):
count_max = 1
count = 0
while count < count_max:
self.tile_x = random.randint(0, 14)
self.tile_y = random.randint(0, 14)
if self.lab.structure[self.tile_y][self.tile_x] == '0':
self.lab.structure[self.tile_y][self.tile_x] = 'o2'
count += 1
elif self.lab.structure[self.tile_y][self.tile_x] != '0':
pass
num_line = 0
for line in self.lab.structure:
num_tile = 0
for sprite in line:
x = num_tile * SPT_SZ
y = num_line * SPT_SZ
if sprite == 'o2':
window.blit(self.items, (x, y))
num_tile += 1
num_line += 1
def position3(self, window):
count_max = 1
count = 0
while count < count_max:
self.tile_x = random.randint(0, 14)
self.tile_y = random.randint(0, 14)
if self.lab.structure[self.tile_y][self.tile_x] == '0':
self.lab.structure[self.tile_y][self.tile_x] = 'o3'
count += 1
elif self.lab.structure[self.tile_y][self.tile_x] != '0':
pass
num_line = 0
for line in self.lab.structure:
num_tile = 0
for sprite in line:
x = num_tile * SPT_SZ
y = num_line * SPT_SZ
if sprite == 'o3':
window.blit(self.items, (x, y))
num_tile += 1
num_line += 1
class Character:
def __init__(self, character, lab):
# load macgyver's character image
charac_ori = pygame.image.load(character).convert_alpha()
# scale macgyver's image with the sprite size
self.character = pygame.transform.smoothscale(charac_ori, (SPT_SZ, SPT_SZ))
# x-axis tile position of macgyver
self.tile_x = 0
# y-axis tile position of macgyver
self.tile_y = 7
# x-axis position of macgyver
self.x = 0
# y-axis position of macgyver
self.y = self.tile_y * SPT_SZ
self.lab = lab
# craft counter to craft the f object when all objects are picked up
self.craft = 0
def movement(self, character, window):
floor_ori = pygame.image.load(FLOOR_IMG).convert()
floor = pygame.transform.scale(floor_ori, (SPT_SZ, SPT_SZ))
item_ori = pygame.image.load(ITEM_IMG).convert_alpha()
items1 = pygame.transform.smoothscale(item_ori, (SPT_SZ, SPT_SZ))
item_ori = pygame.image.load(ITEM2_IMG).convert_alpha()
items2 = pygame.transform.smoothscale(item_ori, (SPT_SZ, SPT_SZ))
item_ori = pygame.image.load(ITEM3_IMG).convert_alpha()
items3 = pygame.transform.smoothscale(item_ori, (SPT_SZ, SPT_SZ))
item_ori = pygame.image.load(ITEM4_IMG).convert_alpha()
items4 = pygame.transform.smoothscale(item_ori, (SPT_SZ, SPT_SZ))
inventory_ori = pygame.image.load(INVENTORY).convert_alpha()
inventory = pygame.transform.scale(inventory_ori, (SPT_SZ, SPT_SZ))
# if the player press the right arrow
if character == 'right':
# check if x-axis where is macgyver, is < to max sprite nnb
if self.tile_x < (SPT_NB_SD - 1):
# check if the position at the right of mcgyver is not a wall
if self.lab.structure[self.tile_y][self.tile_x+1] != 'w':
# moves macgyver to the right
self.tile_x += 1
self.x = self.tile_x * SPT_SZ
# check if the resultant position is an object
if self.lab.structure[self.tile_y][self.tile_x] == 'o1':
# tag the sprite with the object
self.lab.structure[self.tile_y][self.tile_x] = 'i1'
# add 1 to the craft counter
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'o2':
self.lab.structure[self.tile_y][self.tile_x] = 'i2'
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'o3':
self.lab.structure[self.tile_y][self.tile_x] = 'i3'
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'g' and self.craft == 3:
self.lab.structure[self.tile_y][self.tile_x] = 'd'
# if the player press the left arrow
if character == 'left':
# check if the x-axis tile where is mcgyver, is > to min sprite nb
if self.tile_x > 0:
# check if the position at the left of macgyver is not a wall
if self.lab.structure[self.tile_y][self.tile_x-1] != 'w':
self.tile_x -= 1
self.x = self.tile_x * SPT_SZ
if self.lab.structure[self.tile_y][self.tile_x] == 'o1':
self.lab.structure[self.tile_y][self.tile_x] = 'i1'
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'o2':
self.lab.structure[self.tile_y][self.tile_x] = 'i2'
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'o3':
self.lab.structure[self.tile_y][self.tile_x] = 'i3'
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'g' and self.craft == 3:
self.lab.structure[self.tile_y][self.tile_x] = 'd'
# if the player press the up arrow
if character == 'up':
# check if y-axis where is mcgyver, is > to the min sprite nb
if self.tile_y > 0:
# check if the position above macgyver is not a wall
if self.lab.structure[self.tile_y-1][self.tile_x] != 'w':
self.tile_y -= 1
self.y = self.tile_y * SPT_SZ
if self.lab.structure[self.tile_y][self.tile_x] == 'o1':
self.lab.structure[self.tile_y][self.tile_x] = 'i1'
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'o2':
self.lab.structure[self.tile_y][self.tile_x] = 'i2'
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'o3':
self.lab.structure[self.tile_y][self.tile_x] = 'i3'
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'g' and self.craft == 3:
self.lab.structure[self.tile_y][self.tile_x] = 'd'
# if the player press the up arrow
if character == 'down':
# check if y-axis tile where is macgyver, is < to the max sprite nb
if self.tile_y < (SPT_NB_SD - 1):
# check if the position below macgyver is not a wall
if self.lab.structure[self.tile_y+1][self.tile_x] != 'w':
self.tile_y += 1
self.y = self.tile_y * SPT_SZ
if self.lab.structure[self.tile_y][self.tile_x] == 'o1':
self.lab.structure[self.tile_y][self.tile_x] = 'i1'
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'o2':
self.lab.structure[self.tile_y][self.tile_x] = 'i2'
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'o3':
self.lab.structure[self.tile_y][self.tile_x] = 'i3'
self.craft += 1
elif self.lab.structure[self.tile_y][self.tile_x] == 'g' and self.craft == 3:
self.lab.structure[self.tile_y][self.tile_x] = 'd'
num_line = 0
for line in self.lab.structure:
num_tile = 0
for sprite in line:
x = num_tile * SPT_SZ
y = num_line * SPT_SZ
if sprite == 'i1':
# show the item picked up in the inventory
window.blit(items1, (0, SPT_NB_SD * SPT_SZ))
# replace the item picked up in the lab with the floor
window.blit(floor, (x, y))
elif sprite == 'i2':
window.blit(items2, (1 * SPT_SZ, SPT_NB_SD * SPT_SZ))
window.blit(floor, (x, y))
elif sprite == 'i3':
window.blit(items3, (2 * SPT_SZ, SPT_NB_SD * SPT_SZ))
window.blit(floor, (x, y))
# if the craft number reach 3, craft the 4th item
elif self.craft == 3:
# show the item in the inventory bar below the labyrinth
window.blit(items4, (3 * SPT_SZ, SPT_NB_SD * SPT_SZ))
# replace the image of items to remove its
window.blit(inventory, (0, SPT_NB_SD * SPT_SZ))
window.blit(inventory, (1 * SPT_SZ, SPT_NB_SD * SPT_SZ))
window.blit(inventory, (2 * SPT_SZ, SPT_NB_SD * SPT_SZ))
elif sprite == 'd':
window.blit(floor, (x, y))
num_tile += 1
num_line += 1
|
class RhinoObjectSelectionEventArgs(EventArgs):
# no doc
Document=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Document(self: RhinoObjectSelectionEventArgs) -> RhinoDoc
"""
RhinoObjects=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RhinoObjects(self: RhinoObjectSelectionEventArgs) -> Array[RhinoObject]
"""
Selected=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns true if objects are being selected.
Returns false if objects are being deseleced.
Get: Selected(self: RhinoObjectSelectionEventArgs) -> bool
"""
|
def letter_found(letter, secret_word, gamer_word):
for idx, symbol in enumerate(secret_word):
# print(idx, symbol)
if symbol == letter:
gamer_word[idx] = symbol.upper()
return gamer_word
|
import os
import sys
path = os.getcwd()+"/dont_remove/"
primaryContents = os.listdir(path)
for i in primaryContents:
if(os.path.isdir(path+i)):
imageList = os.listdir(path+i+"/.image/")
for image in imageList:
os.popen('cp ' + path+i+"/.image/"+image+" " +
os.getcwd()+"/images/"+image+".png")
for i in primaryContents:
if(os.path.isdir(path+i)):
imageList = os.listdir(path+i+"/.video/")
for image in imageList:
os.popen('cp ' + path+i+"/.video/"+image+" " +
os.getcwd()+"/videos/"+image+".mp4")
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.special as sp
#Create a plot of \theta vs L(\theta)
step = 0.01
theta_0 = 0
theta_end = 1
def likelyhood(theta,success,total):
binomial = sp.binom(total,success)
failure = total - success
return (theta)**success * (1-theta)**failure
def prior(theta):
return (theta)**2 * (1-theta)**2 *(1./0.0333)
array_length = int((theta_end - theta_0)/step)
theta = theta_0
likelyhood_arrayMLE = np.zeros([array_length,2])
likelyhood_arrayMAP = np.zeros([array_length,2])
i = 0
s = 6
t = 10
while(theta<theta_end):
likelyhood_arrayMLE[i,0] = theta
likelyhood_arrayMAP[i,0] = theta
likelyhood_arrayMLE[i,1] = likelyhood(theta,s,t)
likelyhood_arrayMAP[i,1] = likelyhood(theta,s,t)*prior(theta)
i +=1
theta += step
plt.plot(likelyhood_arrayMLE[:,0],likelyhood_arrayMLE[:,1],label="MLE")
plt.plot(likelyhood_arrayMAP[:,0],likelyhood_arrayMAP[:,1],label="MAP")
plt.legend()
plt.xlabel('Theta')
plt.ylabel('L(Theta)')
plt.show()
|
import logging
from abc import ABC
from autoconf import conf
from autofit.mapper.prior_model.collection import CollectionPriorModel
from autofit.non_linear.analysis.multiprocessing import AnalysisPool
from autofit.non_linear.paths.abstract import AbstractPaths
from autofit.non_linear.result import Result
from autofit.non_linear.samples import OptimizerSamples
logger = logging.getLogger(
__name__
)
class Analysis(ABC):
"""
Protocol for an analysis. Defines methods that can or
must be implemented to define a class that compute the
likelihood that some instance fits some data.
"""
def log_likelihood_function(self, instance):
raise NotImplementedError()
def visualize(self, paths: AbstractPaths, instance, during_analysis):
pass
def save_attributes_for_aggregator(self, paths: AbstractPaths):
pass
def save_results_for_aggregator(self, paths: AbstractPaths, model: CollectionPriorModel,
samples: OptimizerSamples):
pass
def make_result(self, samples, model, search):
return Result(samples=samples, model=model, search=search)
def __add__(
self,
other: "Analysis"
) -> "CombinedAnalysis":
"""
Analyses can be added together. The resultant
log likelihood function returns the sum of the
underlying log likelihood functions.
Parameters
----------
other
Another analysis class
Returns
-------
A class that computes log likelihood based on both analyses
"""
if isinstance(
other,
CombinedAnalysis
):
return other + self
return CombinedAnalysis(
self, other
)
class CombinedAnalysis(Analysis):
def __init__(self, *analyses: Analysis):
"""
Computes the summed log likelihood of multiple analyses
applied to a single model.
Either analyses are performed sequentially and summed,
or they are mapped out to processes.
If the number of cores is greater than one then the
analyses are distributed across a number of processes
equal to the number of cores.
Parameters
----------
analyses
"""
self.analyses = analyses
n_cores = conf.instance[
"general"
][
"analysis"
][
"n_cores"
]
if n_cores > 1:
self.log_likelihood_function = AnalysisPool(
analyses,
n_cores
)
else:
self.log_likelihood_function = lambda instance: sum(
analysis.log_likelihood_function(
instance
)
for analysis in analyses
)
def _for_each_analysis(self, func, paths):
"""
Convenience function to call an underlying function for each
analysis with a paths object with an integer attached to the
end.
Parameters
----------
func
Some function of the analysis class
paths
An object describing how data should be saved
"""
for i, analysis in enumerate(self.analyses):
child_paths = paths.create_child(
name=f"{paths.name}_{i}"
)
func(child_paths, analysis)
def save_attributes_for_aggregator(self, paths: AbstractPaths):
def func(child_paths, analysis):
analysis.save_attributes_for_aggregator(
child_paths,
)
self._for_each_analysis(
func,
paths
)
def save_results_for_aggregator(
self,
paths: AbstractPaths,
model: CollectionPriorModel,
samples: OptimizerSamples
):
def func(child_paths, analysis):
analysis.save_results_for_aggregator(
child_paths,
model,
samples
)
self._for_each_analysis(
func,
paths
)
def visualize(
self,
paths: AbstractPaths,
instance,
during_analysis
):
"""
Visualise the instance according to each analysis.
Visualisation output is distinguished by using an integer suffix
for each analysis path.
Parameters
----------
paths
Paths object for overall fit
instance
An instance of the model
during_analysis
Is this visualisation during analysis?
"""
def func(child_paths, analysis):
analysis.visualize(
child_paths,
instance,
during_analysis
)
self._for_each_analysis(
func,
paths
)
def make_result(
self, samples, model, search
):
return [analysis.make_result(samples, model, search) for analysis in self.analyses]
def __len__(self):
return len(self.analyses)
def __add__(self, other: Analysis):
"""
Adding anything to a CombinedAnalysis results in another
analysis containing all underlying analyses (no combined
analysis children)
Parameters
----------
other
Some analysis
Returns
-------
An overarching analysis
"""
if isinstance(
other,
CombinedAnalysis
):
return CombinedAnalysis(
*self.analyses,
*other.analyses
)
return CombinedAnalysis(
*self.analyses,
other
)
def log_likelihood_function(
self,
instance
) -> float:
"""
The implementation of this function is decided in the constructor
based on the number of cores available
Parameters
----------
instance
An instance of a model
Returns
-------
The likelihood that model corresponds to the data encapsulated
by the child analyses
"""
|
from rest_framework import serializers
from .models import FoodItem
class FoodItemSerializer(serializers.ModelSerializer):
class Meta:
model = FoodItem
fields = ('name', 'sd', 'group', 'cf', 'ff', 'pf', 'ru') |
print("This is just a small survey before login")
print(input("Did you donate the blood before?"))
print(input("Are you ready to donate the blood in future?"))
print("Donate blood save lives")
a= int(input(print(" Enter your profile:\n 1. Admin \n 2. User\n ")))
if a==1:
print('Admin\n')
b=int(input('Please check the following things and enter your required ones\n 1. for adding New Blood Bank\n 2. For Finding donors\n 3. For checking Blood Requests\n 4. Exit\n'))
if b==1:
add=input(' Please enter the name of the hospital in which you want to add:')
print(add+' hospital added\n')
if b==2:
print(' Chippagiri Karthik (B+)\n Raviteja (B-)\n Jagadeesh(O+)\n Mithun chakravarthy (A-)\n Suraj Reddy (B+)\n Steve smith(O-)\n Anand(O+)')
if b==3:
print('Saveera Hospital Blood Bank needs A+,B+,B- blood groups\n')
if b!=4:
print('Warning! Please enter a valid choice ')
if a==2:
print('User')
print(input('Please enter your name:'))
print(int(input('Please enter your age')))
print(int(input('Please enter your phone number')))
print(int(input('Please enter the OTP sent to your phone number')))
print('Now you can proceed')
c=int(input('Please check the following things and enter your required ones\n 1. for asking Blood\n 2. for donating Blood\n 3. for seeing blood Requests\n 4. Exit\n'))
if c==1:
request=input('Please enter your blood group in which you want:')
print('Your request for blood donation of '+request+' type has been sent\n')
if c==2:
donate=input('Please enter your blood group for donating:')
print('Your blood type '+donate+' has beeen registered for blood donations\n')
if c==3:
d=int(input('Raghuveera hospital is in need of O+,O-,A+ blood groups\n 1.ACCEPT\n 2.IGNORE\n'))
if d==1:
print(' We are appreciating you for accepting the blood donation request and you will get all details further through notifications\n')
while c!=4:
c=int(input('Please check the following things and enter your required ones\n 1. for asking Blood\n 2. for donating Blood\n 3. for seeing blood Requests\n 4. Exit\n'))
if c==1:
request=input('Please enter your blood group in which you want:')
print('Your request for blood donation of '+request+' type has been sent\n')
if c==2:
donate=input('Please enter your blood group for donating:')
print('Your blood type '+donate+' has beeen registered for blood donations\n')
if c==3:
d=int(input('Raghuveera hospital is in need of O+,O-,A+ blood groups\n 1.ACCEPT\n 2.IGNORE\n'))
if d==1:
print(' We are appreciating you for accepting the blood donation request and you will get all details further through notifications\n')
|
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import imath
import IECore
import IECoreScene
import IECoreMaya
class ToMayaGroupConverterTest( IECoreMaya.TestCase ) :
def testConversion( self ) :
g = IECoreScene.Group()
g.setTransform( IECoreScene.MatrixTransform( imath.M44f().scale( imath.V3f( 2 ) ) ) )
c1 = IECoreScene.Group()
c1.addChild( IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) ) )
c1.setTransform( IECoreScene.MatrixTransform( imath.M44f().translate( imath.V3f( 2, 0, 0 ) ) ) )
g.addChild( c1 )
c2 = IECoreScene.Group()
c2.addChild( IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) ) )
c2.setTransform( IECoreScene.MatrixTransform( imath.M44f().translate( imath.V3f( -2, 0, 0 ) ) ) )
g.addChild( c2 )
p = maya.cmds.createNode( "transform" )
IECoreMaya.ToMayaGroupConverter( g ).convert( p )
mg = maya.cmds.listRelatives( p, fullPath=True )
self.assertEqual( len( mg ), 1 )
self.assertEqual( maya.cmds.nodeType( mg[0] ), "transform" )
self.assertEqual( maya.cmds.getAttr( mg[0] + ".translate" ), [ ( 0, 0, 0 ) ] )
self.assertEqual( maya.cmds.getAttr( mg[0] + ".rotate" ), [ ( 0, 0, 0 ) ] )
self.assertEqual( maya.cmds.getAttr( mg[0] + ".scale" ), [ ( 2, 2, 2 ) ] )
mgg = maya.cmds.listRelatives( mg[0], fullPath=True )
self.assertEqual( len( mgg ), 2 )
self.assertEqual( maya.cmds.nodeType( mgg[0] ), "transform" )
self.assertEqual( maya.cmds.getAttr( mgg[0] + ".translate" ), [ ( 2, 0, 0 ) ] )
self.assertEqual( maya.cmds.getAttr( mgg[0] + ".rotate" ), [ ( 0, 0, 0 ) ] )
self.assertEqual( maya.cmds.getAttr( mgg[0] + ".scale" ), [ ( 1, 1, 1 ) ] )
self.assertEqual( maya.cmds.nodeType( mgg[1] ), "transform" )
self.assertEqual( maya.cmds.getAttr( mgg[1] + ".translate" ), [ ( -2, 0, 0 ) ] )
self.assertEqual( maya.cmds.getAttr( mgg[1] + ".rotate" ), [ ( 0, 0, 0 ) ] )
self.assertEqual( maya.cmds.getAttr( mgg[1] + ".scale" ), [ ( 1, 1, 1 ) ] )
m1 = maya.cmds.listRelatives( mgg[0], fullPath=True )
self.assertEqual( len( m1 ), 1 )
self.assertEqual( maya.cmds.nodeType( m1[0] ), "mesh" )
self.assertEqual( maya.cmds.polyEvaluate( m1[0], face=True ), 6 )
m2 = maya.cmds.listRelatives( mgg[1], fullPath=True )
self.assertEqual( len( m2 ), 1 )
self.assertEqual( maya.cmds.nodeType( m2[0] ), "mesh" )
self.assertEqual( maya.cmds.polyEvaluate( m2[0], face=True ), 6 )
def testNamedConversion( self ):
g = IECoreScene.Group()
g.addState( IECoreScene.AttributeState( { "name" : IECore.StringData( "topLevel" ) } ) )
c1 = IECoreScene.Group()
c1.addState( IECoreScene.AttributeState( { "name" : IECore.StringData( "topLevel/child1" ) } ) )
g.addChild( c1 )
c2 = IECoreScene.Group()
c2.addState( IECoreScene.AttributeState( { "name" : IECore.StringData( "child2" ) } ) )
g.addChild( c2 )
c3 = IECoreScene.Group()
c3.addState( IECoreScene.AttributeState( { "name" : IECore.StringData( "topLevel/child1/child3" ) } ) )
c1.addChild( c3 )
# nameless group
g.addChild( IECoreScene.Group() )
p = maya.cmds.createNode( "transform" )
IECoreMaya.ToMayaGroupConverter( g ).convert( p )
mg = maya.cmds.listRelatives( p, fullPath=True, ad=True )
expectedNames = set( [ "|transform1|topLevel|child1|child3", "|transform1|topLevel|child1", "|transform1|topLevel|child2", "|transform1|topLevel|transform2", "|transform1|topLevel" ] )
actualNames = set()
for name in mg:
actualNames.add( str( name ) )
self.assertEqual( expectedNames, actualNames )
if __name__ == "__main__":
IECoreMaya.TestProgram()
|
import os
import tushare as ts
import requests.exceptions
import pandas as pd
from conf.conf import get_conf
from conf.log import server_logger
def get_stocks():
try:
ts.set_token(get_conf("tushare_token"))
pro = ts.pro_api()
data = pro.query('stock_basic', exchange='', list_status='L', fields='ts_code,symbol,name')
server_logger.debug("stock data fetched {}".format(len(data)))
data.to_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..',"data","stocks.csv"), encoding="utf-8")
except Exception:
data = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..',"data","stocks.csv"), encoding = "utf-8")
server_logger.warning("tushare connect failed , data from local storage")
stocks = []
for _, row in data.iterrows():
stocks.append({
"code":row.get("symbol"),
"name":row.get("name")
})
return stocks
if __name__ == "__main__":
print(get_stocks()) |
# Variables
# https://www.youtube.com/watch?v=BJ-VvGyQxho
class Employee:
raise_amount = 1.04
num_of_emps = 0 # Counting employees. This is constant for all instances
def __init__(self, first, last, pay): # Instance is passed always
self.first = first
self.last = last
self.pay = pay
Employee.num_of_emps += 1
def fullname(self):
return "{} {}".format(self.first, self.last)
def apply_raise(self):
self.pay = int(self.pay * self.raise_amount)
emp_1 = Employee("Abdul", "Kalam", 1000)
emp_2 = Employee("Satish", "Dhawan", 2000)
# Instance doesn't have raise_amount attribute but Employee class has.
print(emp_1.__dict__)
print(Employee.__dict__)
# Raise amount of only emp_1 is changed
emp_1.raise_amount = 1.05
print(emp_1.raise_amount)
print(emp_2.raise_amount)
print(Employee.raise_amount)
# num_of_emps
print(emp_1.num_of_emps)
print(emp_2.num_of_emps)
|
#!/usr/bin/env python
from peyotl.api import APIWrapper
ps = APIWrapper().phylesystem_api
studies = ps.get_study_list()
print(studies[0])
blob = ps.get(studies[0])
nexson = blob['data']['nexml']
print(nexson['^ot:studyId'], ':', nexson['^ot:studyPublicationReference'])
|
import os
data_path = '/run/media/ashbylepoc/b79b0a3e-a5b9-41ed-987f-8fa4bdb6b2e6/tmp/data/nlp_dev_2/'
train_y = os.path.join(data_path, 'train.y')
train_en = os.path.join(data_path, 'train.en')
train_fr = os.path.join(data_path, 'train.fr')
lexicon = os.path.join(data_path, 'lexique.en-fr')
lines_train_y = open(train_y).readlines()
lines_train_en = open(train_en).readlines()
lines_train_fr = open(train_fr).readlines()
lines_lexicon = open(lexicon).readlines()
# get max number of tokens for en and fr
# max_en = 304, max_fr = 402
max_en = 0
max_fr = 0
for i, _ in enumerate(lines_train_en):
n_tokens_en = len(lines_train_en[i].split(' '))
n_tokens_fr = len(lines_train_fr[i].split(' '))
if n_tokens_en > max_en:
max_en = n_tokens_en
if n_tokens_fr > max_fr:
max_fr = n_tokens_fr
# get number of words for french and english
# and frequencies
en_words = {}
fr_words = {}
for i, l in enumerate(lines_lexicon):
if i % 10000 == 0:
print(i)
en_word, fr_word = l.split('\t')
fr_word = fr_word[:-1]
if en_word in en_words:
en_words[en_word] += 1.
else:
en_words[en_word] = 1.
if fr_word in fr_words:
fr_words[fr_word] += 1.
else:
fr_words[fr_word] = 1.
# Shuffling sentences
|
from PyPDF2 import PdfFileMerger
filelist = ["Yakisoba.pdf","Teriyaki.pdf"]
merger = PdfFileMerger()
for fh in filelist:
#just put files in order
print(fh)
merger.append(fh)
output = open("./joined.pdf","wb")
merger.write(output)
"""
merger = PdfFileMerger()
merger.append("Yakisoba.pdf")
merger.append("Teriyaki.pdf")
output = open("./joined.pdf","wb")
merger.write(output)
"""
|
# Applying Linear Regression to an imaginary example
# The example is the relation between page speed and amount purchased of an online shop
# The idea is to show that the faster the page loads, the more are people spending
# Or, in other words, displaying the correlation between page speed and amount purchased
# Interesting question: is more spend because the page loads faster or are wealthy people able to spend more for a fast internet connection?)
# This analysis can't answer this question. Good example that correlation often does not mean causation)
# Whatever the causation, this script visualizes the correlation anyway
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
# creating random data showing pagespeed and purchase amounts of an online shop in relation to eachother
np.random.seed(2)
pageSpeeds = np.random.normal(3.0, 1.0, 1000)
purchaseAmount = np.random.normal(50.0, 10.0, 1000) / pageSpeeds
#plt.scatter (pageSpeeds, purchaseAmount)
#plt.show()
# creating a 4th degree polynominal model using numpy's polyfit function
x = np.array(pageSpeeds)
y = np.array(purchaseAmount)
p4 = np.poly1d(np.polyfit(x, y, 4)) #creates y = ax^4 + bx^3 + cx^2 + dx + e
xp = np.linspace(0, 7, 100) #from 0 to 7 secons of page speed
plt.scatter(x, y)
plt.plot(xp, p4(xp), c='r')
plt.show() #not sure if it isn't overfitting in the end after 6 seconds page load, but it looks reasonable from 0 to 6 seconds page load
# getting r_value etc.
from sklearn.metrics import r2_score
r2 = r2_score(y, p4(x))
print('r^2 of our 4th degree polynominal model is: ', r2)
# to test, let's create a 10th degree polynominal model and see if the data fits better or worse (-> overfitting)
p10 = np.poly1d(np.polyfit(x, y, 10)) #creates y = ax^10 + bx^9 + ....)
plt.scatter(x, y)
plt.plot(xp, p4(xp), c='r') #4th dimensional again in red
plt.plot(xp, p10(xp), c='g') #10th dimensional in green
plt.show() #as we see, it doesn't really fit better
# getting r_value of the 10th degree polynominal model
r2_of10thdegree = r2_score(y, p10(x))
print('r^2 of our 10th degree polynominal model is: ', r2_of10thdegree)
|
import numpy as np
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
BlueLED = 21
Hexa = 20
GPIO.setup(BlueLED, GPIO.OUT)
GPIO.setup(Hexa, GPIO.OUT)
toggling = True
while toggling == True:
user_input = input()
if user_input == 'Hexagon':
GPIO.output(Hexa, 1)
elif user_input == 'Blue LED':
GPIO.output(BlueLED, 1)
elif user_input == 'Both':
GPIO.output(Hexa, 0)
GPIO.output(BlueLED, 0)
elif user_input == 'Q':
toggling == False
break
|
def magicSquare(matrix):
N=len(matrix)
M=len(matrix[0])
maxD=1
for i in range(N-1):
for j in range(M-1):
print()
print("start point is: ",matrix[i][j])
isMagic=True
d=2
while i+(d-1)<=N-1 and j+(d-1)<=M-1:
print("dimension this time is: ",d)
target=None
# check if row sums equal to target
for row in range(d):
rowSum=0
for c in range(d):
rowSum+=matrix[i+row][j+c]
print("rowSum is: ",rowSum)
if not target:
target=rowSum
print("target becomes: ",target)
else:
if rowSum!=target:
isMagic=False
break
if isMagic==False:
break
if isMagic==False:
d+=1
isMagic = True
continue
# check if col sums equal to target
for col in range(d):
colSum=0
for r in range(d):
colSum+=matrix[i+r][j+col]
print("colSum is: ", colSum)
if colSum!=target:
isMagic=False
break
if isMagic==False:
d+=1
isMagic = True
continue
# check if two diagnol sums equal to target
diagSum1=0
for k in range(d):
diagSum1+=matrix[i+k][j+k]
print("diagSum1 is: ", diagSum1)
if diagSum1!=target:
d+=1
isMagic = True
continue
diagSum2=0
for k in range(d):
diagSum2+=matrix[i+d-1-k][j+d-1-k]
print("diagSum2 is: ", diagSum2)
if diagSum2!=target:
d+=1
isMagic = True
continue
maxD=max(maxD,d)
print("maxD becomes:",maxD)
d+=1
isMagic = True
return maxD
matrix=[[7,2,4],
[2,7,6],
[9,5,1],
[4,3,8],
[3,5,4]]
print(magicSquare(matrix))
|
import socket
host = "localhost"
server_port = 2333
port = 6666
server = (host, server_port)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host, port))
transport = raw_input("Input your data to server->\t")
while 1:
s.sendto(transport, server)
if transport == "exit":
break
data,addr = s.recvfrom(1024)
print "Received data from server: " + str(data)
transport = raw_input("Input your data to server->\t")
s.close()
|
import sys, os, io, time, base64
from datetime import datetime
platform = sys.platform
if platform == 'win32':
import win32gui, win32console, win32clipboard
from PIL import ImageGrab
cbData= ' ' #global clipboard data
flePath= 'help.jpg' #output file
slpDur = 3 #how often to check clipboard
imgQly = 10 #image quality
def writeData(data): #wirte clipboard data to log file
with open(flePath, 'a') as fle:
fle.write(data)
fle.close()
def hideWindow():
window = win32console.GetConsoleWindow()
win32gui.ShowWindow(window,0)
return True
def getWinClip():
global cbData
win32clipboard.OpenClipboard()
try : #get text
text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)
win32clipboard.CloseClipboard()
if cbData != text:
cbData = text
writeData('::t:'+ str(datetime.now())+ '\n' + str(text.decode()) + '\n')
except:
win32clipboard.CloseClipboard()
img = ImageGrab.grabclipboard() #grap image from clipboard
buf = BytesIO()
img.save(buf, format='jepg', optimize=True, quality=imgQLy)
text = str(base64.b64encode(buf.getvalue()).decode())
if cbData != text: #new data, write to log file
cbData = text
writeData('::i:'+ str(datetime.now())+ '\n' + str(text) + '\n')
if platform=='win32':
hideWindow()
while True:
if platform == 'win32':
getWinClip()
|
"""
Import sample data for E-Commerce Recommendation Engine Template
"""
import predictionio
import argparse
import random
SEED = 3
def import_events(client):
random.seed(SEED)
count = 0
print(client.get_status())
print("Importing data...")
# generate 10 users, with user ids u1,u2,....,u10
user_ids = ["u%s" % i for i in range(1, 11)]
for user_id in user_ids:
print("Set user", user_id)
client.create_event(
event="$set",
entity_type="user",
entity_id=user_id
)
count += 1
# generate 50 items, with item ids i1,i2,....,i50
# random assign 1 to 4 categories among c1-c6 to items
categories = ["c%s" % i for i in range(1, 7)]
item_ids = ["i%s" % i for i in range(1, 51)]
for item_id in item_ids:
print("Set item", item_id)
client.create_event(
event="$set",
entity_type="item",
entity_id=item_id,
properties={
"categories" : random.sample(categories, random.randint(1, 4))
}
)
count += 1
# each user randomly viewed 10 items
for user_id in user_ids:
for viewed_item in random.sample(item_ids, 10):
print("User", user_id ,"views item", viewed_item)
client.create_event(
event="view",
entity_type="user",
entity_id=user_id,
target_entity_type="item",
target_entity_id=viewed_item
)
count += 1
# randomly buy some of the viewed items
if random.choice([True, False]):
print("User", user_id ,"buys item", viewed_item)
client.create_event(
event="buy",
entity_type="user",
entity_id=user_id,
target_entity_type="item",
target_entity_id=viewed_item
)
count += 1
print("%s events are imported." % count)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Import sample data for e-commerce recommendation engine")
parser.add_argument('--access_key', default='invald_access_key')
parser.add_argument('--url', default="http://localhost:7070")
args = parser.parse_args()
print(args)
client = predictionio.EventClient(
access_key=args.access_key,
url=args.url,
threads=5,
qsize=500)
import_events(client)
|
from django.apps import AppConfig
class CongressopaisAppConfig(AppConfig):
name = 'congressoPais_app'
|
#Base code from: https://heartbeat.fritz.ai/real-time-object-detection-on-raspberry-pi-using-opencv-dnn-98827255fa60
import cv2
import time
import numpy as np
from intersectionOld import *
from imutils import object_detection
import pdb
import os
import serial
# Pretrained classes in the model
classNames = {0: 'background',
1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane', 6: 'bus',
7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light', 11: 'fire hydrant',
13: 'stop sign', 14: 'parking meter', 15: 'bench', 16: 'bird', 17: 'cat',
18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow', 22: 'elephant', 23: 'bear',
24: 'zebra', 25: 'giraffe', 27: 'backpack', 28: 'umbrella', 31: 'handbag',
32: 'tie', 33: 'suitcase', 34: 'frisbee', 35: 'skis', 36: 'snowboard',
37: 'sports ball', 38: 'kite', 39: 'baseball bat', 40: 'baseball glove',
41: 'skateboard', 42: 'surfboard', 43: 'tennis racket', 44: 'bottle',
46: 'wine glass', 47: 'cup', 48: 'fork', 49: 'knife', 50: 'spoon',
51: 'bowl', 52: 'banana', 53: 'apple', 54: 'sandwich', 55: 'orange',
56: 'broccoli', 57: 'carrot', 58: 'hot dog', 59: 'pizza', 60: 'donut',
61: 'cake', 62: 'chair', 63: 'couch', 64: 'potted plant', 65: 'bed',
67: 'dining table', 70: 'toilet', 72: 'tv', 73: 'laptop', 74: 'mouse',
75: 'remote', 76: 'keyboard', 77: 'cell phone', 78: 'microwave', 79: 'oven',
80: 'toaster', 81: 'sink', 82: 'refrigerator', 84: 'book', 85: 'clock',
86: 'vase', 87: 'scissors', 88: 'teddy bear', 89: 'hair drier', 90: 'toothbrush'}
#https://www.arduino.cc/en/Tutorial.StringToIntExample
def id_class_name(class_id, classes):
for key, value in classes.items():
if class_id == key:
return value
#Works well 16fps
model = cv2.dnn.readNetFromTensorflow('models/MobileNet-SSDLite-v2/frozen_inference_graph.pb','models/MobileNet-SSDLite-v2/ssdlite_mobilenet_v2_coco.pbtxt')
#Works well 14fps
model1 = cv2.dnn.readNetFromTensorflow('models/MobileNet-SSD-v2/frozen_inference_graph.pb','models/MobileNet-SSD-v2/ssd_mobilenet_v2_coco_2018_03_29.pbtxt')
detectionThreshold = 0.43
colors = np.array([(255,0,0), (255,128,0), (255,255,0), (128,255,0), (0,255,0), (0,255,128), (0,255,255), (128,255), (0,0,255), (127,0,255), (255,0,255), (255,0,127)])
# Red Orange Yellow Yellow-Green Green Blue-Green Cyan Light-Blue Blue Violet Magenta Pink
cap = cv2.VideoCapture(0)
#out = cv2.VideoWriter(movieOut,cv2.VideoWriter_fourcc('M','J','P','G'), 1, (1280,720))
frameCounter = -1
trackedBoxes = np.empty((0,5))#x1, y1, x2, y2, numDetections
kImageWidthPx = 1142*1920/1080
kCameraFOVRads = np.pi/2;
def writeServoPos(spd):
ser.write((str(spd) + "\n").encode())
ser = serial.Serial('/dev/cu.usbmodem14111')#Set this to the actual serial port name
#curServoPosDeg = 90
#targetServoPosDeg = 90
out = cv2.VideoWriter("trackerOut.avi",cv2.VideoWriter_fourcc('M','J','P','G'), 1, (1280,720))
while(True):
frameCounter += 1
r, image = cap.read()
if r:
start_time = time.time()
image_height, image_width, _ = image.shape
model.setInput(cv2.dnn.blobFromImage(image, size=(480, 320), swapRB=True))#Blob is 480x320 which gives decent accuracy and a speed of 10 fps
output = model.forward()#Finds the detections
#Looks at all detections and adds all the person detection bounding boxes to allCurBoxes
allCurBoxes = np.empty((0,5))
for detection in output[0, 0, :, :]:
confidence = detection[2]
class_id = detection[1]
if confidence > detectionThreshold and class_id == 1:
class_name=id_class_name(class_id,classNames)
#print(str(str(class_id) + " " + str(detection[2]) + " " + class_name))
box_x = detection[3] * image_width
box_y = detection[4] * image_height
box_width = detection[5] * image_width
box_height = detection[6] * image_height
allCurBoxes = np.vstack((allCurBoxes, [int(box_x), int(box_y), int(box_width), int(box_height), confidence]))
#Uses non-max supression to remove redundant detections, and leave only the detection with the highest network confidence
nmsOut = np.array(object_detection.non_max_suppression(allCurBoxes[:,0:4],allCurBoxes[:,4]))
curBoxes = np.empty((0,4))
if nmsOut.shape[0] != 0:
curBoxes = nmsOut[:,0:4]
#Matches previous detections with current detections and returns an intersection Over Union Matrix
allMatches, IoUMatrix = matchBoxes(trackedBoxes,curBoxes)
matches = allMatches
#Deletes unmatched boxes from matches, leaving only pairs of tracked and current boxes
for i in range(allMatches[0].shape[0]):
if IoUMatrix[allMatches[0][i],allMatches[1][i]] == 1: #Positive Sentinel value because matrix entries are negative if there is an intersection
matches = (np.delete(allMatches[0],i), np.delete(allMatches[1],i))
#Loops through trackedBoxes and draws the tracked box with a new color if it has a match
for i in range(trackedBoxes.shape[0]):
trackedBox = trackedBoxes[i]
color = (0, 0, 255) #Red tracked box if no match.
if i in matches[0]:
color = colors[i]#Sets a new color for each detection.
cv2.rectangle(image, (int(trackedBox[0]), int(trackedBox[1])), (int(trackedBox[2]), int(trackedBox[3])), color, thickness=1)
#Loops through curBoxes and draws the tracked box with a new color if it has a match
for i in range(curBoxes.shape[0]):
curBox = curBoxes[i]
color = (255,0,0)#Blue curBox if no match.
if i in matches[1]:
color = colors[i]#Sets a new color for each detection. Same as the tracked one.
cv2.rectangle(image, (int(curBox[0]), int(curBox[1])), (int(curBox[2]), int(curBox[3])), color, thickness=1)
trackedBoxes = curBoxes #Saves current boxes to tracked boxes
#out.write(image)
#servoTimerStart = time.time()
if(curBoxes.shape[0] > 0):
curBoxCenter = ((curBoxes[0,0] + curBoxes[0,2])/2 + (trackedBoxes[0,0] + trackedBoxes[0,2])/2 )/2
#pdb.set_trace()
cv2.circle(img = image, center = (int(curBoxCenter), int(480)) , radius = 20 , color = (0, 255, 0), thickness = 4)
servoTargetDeg = int(np.round(-180 / np.pi * np.arctan2( -(curBoxCenter - kImageWidthPx/2) / (kImageWidthPx / (2 * np.tan(kCameraFOVRads/2) ) ), 1)))
#targetServoPosDeg += servoIncrementDeg
writeServoPos(int(servoTargetDeg*1.3 + 85))
#servoDelay = int(0.2/60*abs(servoIncrementDeg)*1.2+1)
#print("delay ", servoDelay)
#cv2.waitKey(servoDelay)
print(curBoxCenter,servoTargetDeg)
tinyIMG = np.empty((960,540))
tinyIMG = cv2.resize(src = image, dst = tinyIMG, dsize = tinyIMG.shape)
cv2.imshow('image', tinyIMG)
k = cv2.waitKey(1)
if k == 0xFF & ord("q"):
break
end_time = time.time()#Checks the time to label a single frame. Allows for easy comparison of networks.
#print("Elapsed Time:",end_time-start_time)
#cv2.waitKey(0)
cv2.destroyAllWindows()
|
#coding=utf-8
from collective.constants import AbstractConstant
class TicketsStates(AbstractConstant):
def __init__(self):
super(TicketsStates, self).__init__()
self.set_ids({
'open': 0,
'closed': 1,
})
def open(self):
return self.ids['open']
def closed(self):
return self.ids['closed']
TICKETS_STATES = TicketsStates()
class MessagesTypes(AbstractConstant):
def __init__(self):
super(MessagesTypes, self).__init__()
self.set_ids({
'clients_message': 0, # повідомлення від клієнта
'supports_message': 1, # повідомлення від агента підтримки
})
def clients_message(self):
return self.ids['clients_message']
def supports_message(self):
return self.ids['supports_message']
TICKETS_MESSAGES_TYPES = MessagesTypes() |
from flask import Flask
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://tara:mypassword@localhost/todoapp'
from views import *
if __name__ == '__main__':
app.run()
|
############# Final Project ##############
###### Authors: ######
#
import os
import pandas as pd
import numpy as np
from datetime import date
import random
import statsmodels.api as sm
import cvxopt as opt
from cvxopt import matrix
from cvxopt import blas,solvers
from sklearn.linear_model import Lasso
from tqdm import tqdm
from scipy.linalg import sqrtm
import matplotlib.pyplot as plt
from datetime import datetime
#from qpsolvers import solve_qp
######### Header Code - def of Functions #########
#Function to compute Info Ratio for given returns
def calcInfoRatio(ReturnArray):
IR = np.mean(ReturnArray)/np.std(ReturnArray)
return IR
#Function to compute Info Ratio for given returns
def calcMaxDrawDown(RETvalues):
RETvalues = np.array(RETvalues)
MaxDD = 0
peakValue = RETvalues[0]
for ret in RETvalues:
if ret > peakValue:
peakValue = ret
DD = (ret - peakValue) / peakValue
if DD < MaxDD:
MaxDD = DD
return MaxDD
#Funnction to filter out stocks as per part 2
def shortlist_stocks(tempdata):
#tempdata = tempdata.dropna(subset=['ES'])
#Filter out top 4000 stocks based on Market-capitalization
#tempdata = tempdata.sort_values(by=['mcap'],ascending=False)
#tempdata = tempdata[:4000]
#Select stocks with ES values falling in top 70 percentile
#tempdata.ES = tempdata.ES.astype(float)
#tempdata = tempdata[tempdata.ES >= tempdata.ES.quantile(0.7)]
startdate = date(2004,12,1)
enddate = date(2004,12,30)
tempdata = mergeddata[(mergeddata.DATE >= startdate) & (mergeddata.DATE <= enddate)]
finallist = set(tempdata['SEDOL'])
return finallist
#Function to compute the Covariance Matrix of Returns
def CalCovMatrix(stockdata):
#Converting data to wide format for easy formulation of covariance
stackeddata = stockdata[['DATE','SEDOL', 'RETURN']]
stackeddata = stackeddata.set_index(['DATE','SEDOL'])
stackeddata = stackeddata.unstack()
stackeddata.columns = [x[1] for x in stackeddata.columns]
stackeddata = stackeddata.dropna(axis=1)
stackeddata = stackeddata.astype(float)
#Use pandas functions to compute covariance
covmatrix = stackeddata.cov()
updatedlist = stackeddata.columns
#Convert Covariance Matrix to positive-definite matrix
if np.all(np.linalg.eigvals(covmatrix) > 0):
covmatrix = covmatrix
else:
covmatrix = np.real(sqrtm(covmatrix*covmatrix.T))
return covmatrix,updatedlist
#Function to compute the Expected Returns based on the given factor
#using Regression Model
def Reg_ExpectedReturns(stockdata,factor,i):
##RegResults = pd.DataFrame(index=['const','EP','BP','CP','SP','REP','RBP','RCP','RSP','CTEF','PM1'])
RegResults = pd.DataFrame(index=['const','EP1','EP2','RV1','RV2','REP','RBP','RCP','RSP','CTEF','LIT'])
#Run regression for each month in the dataframe
for d in set(stockdata['DATE']):
tempdata = stockdata[stockdata['DATE'] == d]
Y = tempdata['RETURN']
Y = Y.astype(float)
##X = tempdata[['EP','BP','CP','SP','REP','RBP','RCP','RSP','CTEF','PM1']]
X = tempdata[['EP1','EP2','RV1','RV2','REP','RBP','RCP','RSP','CTEF','LIT']]
X = X.astype(float)
if (factor == 'CTEF'):
X.CTEF = 0
X = sm.add_constant(X)
model = sm.OLS(Y,X)
reg = model.fit()
RegResults[d.strftime('%Y-%m')] = reg.params
RegResults['Final Coeff'] = RegResults.mean(axis=1)
#Obtain beta values (average of 24 past months' beta)
beta = RegResults['Final Coeff']
ERdata = stockdata[stockdata.DATE == pd.to_datetime(i)]
##cols = ['EP','BP','CP','SP','REP','RBP','RCP','RSP','CTEF','PM1']
cols = ['EP1','EP2','RV1','RV2','REP','RBP','RCP','RSP','CTEF','LIT']
ERdata = ERdata[['SEDOL'] + cols]
ERdata['CONST'] = 1
ERdata[cols] = ERdata[cols].astype(float)
#Compute the Expected Returns using the beta values and given factors
##ERdata['Expected ' + factor] = beta[0]*ERdata.CONST + beta[1]*ERdata.EP + beta[2]*ERdata.BP + beta[3]*ERdata.CP + beta[4]*ERdata.SP + beta[5]*ERdata.REP + beta[6]*ERdata.RBP + beta[7]*ERdata.RCP + beta[8]*ERdata.RSP + beta[9]*ERdata.CTEF + beta[10]*ERdata.PM1
ERdata['Expected ' + factor] = beta[0]*ERdata.CONST + beta[1]*ERdata.EP1 + beta[2]*ERdata.EP2 + beta[3]*ERdata.RV1 + beta[4]*ERdata.RV2 + beta[5]*ERdata.REP + beta[6]*ERdata.RBP + beta[7]*ERdata.RCP + beta[8]*ERdata.RSP + beta[9]*ERdata.CTEF + beta[10]*ERdata.LIT
ERdata = ERdata.set_index('SEDOL')
return ERdata['Expected ' + factor]
#Function to compute the Expected Returns based on the given factor
#using Regression Model
def Reg_ExpectedReturns_newmodel(stockdata,factor,i):
#RegResults = pd.DataFrame(index=['const','BP','CP','REP','FEP1','CTEF','PM1','FGR1'])
RegResults = pd.DataFrame(index=['const','EP2','RV1','REP','FEP1','CTEF','LIT','RPM71'])
#Run regression for each month in the dataframe
for d in set(stockdata['DATE']):
tempdata = stockdata[stockdata['DATE'] == d]
Y = tempdata['RETURN']
Y = Y.astype(float)
##X = tempdata[['BP','CP','REP','FEP1','CTEF','PM1','FGR1']]
X = tempdata[['EP2','RV1','REP','FEP1','CTEF','LIT','RPM71']]
X = X.astype(float)
if (factor == 'CTEF'):
X.CTEF = 0
X = sm.add_constant(X)
model = sm.OLS(Y,X)
reg = model.fit()
RegResults[d.strftime('%Y-%m')] = reg.params
RegResults['Final Coeff'] = RegResults.mean(axis=1)
#Obtain beta values (average of 24 past months' beta)
beta = RegResults['Final Coeff']
ERdata = stockdata[stockdata.DATE == pd.to_datetime(i)]
##cols = ['BP','CP','REP','FEP1','CTEF','PM1','FGR1']
cols = ['EP2','RV1','REP','FEP1','CTEF','LIT','RPM71']
ERdata = ERdata[['SEDOL'] + cols]
ERdata['CONST'] = 1
ERdata[cols] = ERdata[cols].astype(float)
#Compute the Expected Returns using the beta values and given factors
ERdata['Expected ' + factor] = beta[0]*ERdata.CONST + beta[1]*ERdata.EP2 + beta[2]*ERdata.RV1 + beta[3]*ERdata.REP + beta[4]*ERdata.FEP1 + beta[5]*ERdata.CTEF + beta[6]*ERdata.LIT + beta[7]*ERdata.RPM71
ERdata = ERdata.set_index('SEDOL')
return ERdata['Expected ' + factor]
def optimal_portfolio(Returns, CovMatrix):
# Generate mean return vector
pbar = Returns
SIGMA = CovMatrix
numPOS = pbar.size
varmax = 0.0064
# Compute A matrix in optimization
# A is the square root of SIGMA
U,V = np.linalg.eig(SIGMA)
# Project onto PSD
U[U<0] = 0
Usqrt = np.sqrt(U)
A = np.dot(np.diag(Usqrt),V.T)
# Generate G and h matrices
G1temp = np.zeros((A.shape[0]+1,A.shape[1]))
G1temp[1:,:] = -A
h1temp = np.zeros((A.shape[0]+1,1))
h1temp[0] = np.sqrt(varmax)
for i in np.arange(numPOS):
ei = np.zeros((1,numPOS))
ei[0,i] = 1
if i == 0:
G2temp = [matrix(-ei)]
h2temp = [matrix(np.zeros((1,1)))]
else:
G2temp += [matrix(-ei)]
h2temp += [matrix(np.zeros((1,1)))]
# Construct list of matrices
Ftemp = np.ones((1,numPOS))
F = matrix(Ftemp)
g = matrix(np.ones((1,1)))
G = [matrix(G1temp)] + G2temp
H = [matrix(h1temp)] + h2temp
# Solve QCQP
# Passing in -matrix(pbar) since maximizing
solvers.options['show_progress'] = False
sol = solvers.socp(-matrix(pbar),Gq=G,hq=H,A=F,b=g)
xsol = np.array(sol['x'])
# return answer
return xsol
#Function to return the returns generated using the Porttfolio Strategy
def PortfolioStrategy_returns(mergeddata,factor):
FinalPortfolioRET = pd.DataFrame(columns=['RETURN'])
#Define daterange from Dec-2004 to Nov-2017 to compute returns from Jan-2005 to Dec-2017
startdate = date(2004,11,30)
enddate = date(2005,11,1)
rangeofdates = set(mergeddata.DATE[(mergeddata.DATE >= startdate) & (mergeddata.DATE <= enddate)])
#Iterate through each date in the daterange
for i in tqdm(rangeofdates):
tempdata = mergeddata[mergeddata['DATE'] == i]
print(i)
#Call function to shortlist data based on given conditions in part 2
stocklist = shortlist_stocks(tempdata)
finaldata = mergeddata[mergeddata.SEDOL.isin(stocklist)]
#Define startdate of evaluation data 2 year prior to current month-year
startdate = i.replace(year=i.year-2)
finaldata = finaldata[(finaldata['DATE'] > startdate) & (finaldata['DATE'] <= i)]
#Completing the dataframe using forward and then backward fill
stockdata = pd.DataFrame()
for SEDOL in set(finaldata['SEDOL']):
filterdata = finaldata[finaldata.SEDOL == SEDOL]
filterdata = filterdata.sort_values(by=['DATE'])
filterdata = filterdata.fillna(method='ffill')
filterdata = filterdata.fillna(method='bfill')
stockdata = stockdata.append(filterdata)
#Defining the required factors and dropping NAs if present
##factors = ['EP','BP','CP','SP','REP','RBP','RCP','RSP','CTEF','PM1','FEP1','FGR1']
factors = ['EP1','EP2','RV1','RV2','REP','RBP','RCP','RSP','CTEF','LIT']
#stockdata = stockdata.dropna(subset=factors)
#Compute excess return for the given stocksdata
#stockdata['Excess RET'] = stockdata['RET'] - stockdata['Idx RET']
stockdata = stockdata.sort_values(by=['SEDOL','DATE'])
#Call function to generate the Covariance Matrix
CovMatrix, stocklist = CalCovMatrix(stockdata)
stockdata = stockdata[stockdata.SEDOL.isin(stocklist)]
#Calling function to compute Expected Return selectively based on whether it is a ML model or Regression model
if (factor == 'ML'):
ExpectedReturn = Reg_ExpectedReturns_newmodel(stockdata,factor,i)
else:
ExpectedReturn = Reg_ExpectedReturns(stockdata,factor,i)
#Call function to generate optimized portfolio weights using Expected return and Covariance matrix computed above
portfolioweights = pd.DataFrame(optimal_portfolio(ExpectedReturn,CovMatrix), index=ExpectedReturn.index,columns=['weights'])
#Compute the next month-year combo from the current date in question
if (i.month == 12):
try:
nextdate = i.replace(year=i.year+1,month=1)
except:
nextdate = i.replace(month=i.month+1)
else:
continue
#Filter data out for the nextdate and the portfolio stocks
newdata = mergeddata[(mergeddata.DATE == nextdate) & (mergeddata.SEDOL.isin(portfolioweights.index))]
newdata = newdata.set_index('SEDOL')
newdata = newdata['RETURN']
#Compute final portfolio with optimized weights and returns
portfolio = pd.concat([portfolioweights,newdata],axis=1).dropna()
portfolio['weighted RET'] = portfolio['weights']*portfolio['RETURN']
PortfolioRET = portfolio['weighted RET'].sum()
#Compute PortfolioReturn for the month-year in question
final = pd.DataFrame([PortfolioRET],index=[nextdate],columns=['RETURN'])
#Store Portfolio Return in a dataframe
FinalPortfolioRET = FinalPortfolioRET.append(final)
return FinalPortfolioRET
######### Main Code begins #########
#Read in Factor Data as provided by the company
#FactorData = pd.read_csv('Total_Data5.csv', low_memory = False)
FactorData = pd.read_csv('C:/Users/TRANSFORMER/Desktop/QCF CLasswork/Computational finance/Final project/rus1000_stocks_factors.csv',
skiprows=4, low_memory = False, names=['Symbol', 'Company Name', 'DATE', 'SEDOL', 'FS_ID', 'RETURN', 'RCP', 'RBP', 'RSP', 'REP', 'RDP', 'RPM71', 'RSTDEV', 'ROE1', 'ROE3', 'ROE5', 'ROA1', 'ROA3', 'ROIC', 'BR1', 'BR2', 'EP1', 'EP2', 'RV1', 'RV2', 'CTEF', '9MFR', '8MFR', 'LIT', 'extra'])
print(FactorData.columns)
rightcol = FactorData.columns[2:29]
wrongcol = FactorData.columns[3:30]
FactorData.loc[FactorData['DATE']==' INC.', rightcol]=FactorData.loc[FactorData['DATE']==' INC.', wrongcol].values
FactorData.loc[FactorData['DATE']==' INC', rightcol]=FactorData.loc[FactorData['DATE']==' INC', wrongcol].values
FactorData = FactorData.drop([0])
#FactorData.to_csv('C:/Users/TRANSFORMER/Desktop/QCF CLasswork/Computational finance/Final project/factordata2.csv')
#print(FactorData)
FactorData['DATE'] = FactorData['DATE'].apply(lambda x: x.strip())
FactorData['DATE'] = FactorData['DATE'].apply(lambda x: str(datetime.strptime(str(x), '%m/%d/%Y').month)
+ '-' + str(datetime.strptime(str(x), '%m/%d/%Y').day)
+ '-' + str(datetime.strptime(str(x), '%m/%d/%Y').year))
#FactorData.to_csv('C:/Users/TRANSFORMER/Desktop/QCF CLasswork/Computational finance/Final project/factordata.csv')
type(FactorData['DATE'][1])
#FactorData['DATE'] = pd.to_datetime(FactorData.DATE, format='%Y%m%d', errors='ignore')
#print(FactorData)
#del FactorData['Unnamed: 0']
#Read in Russell 3000 index monthly return data
RUAData = pd.read_csv('C:/Users/TRANSFORMER/Desktop/QCF CLasswork/Computational finance/Final project/Benchmark Returns.csv')
#RUAData['Idx RET'] = (RUAData['Adj Close'] / RUAData['Adj Close'].shift(1))-1
RUAData.rename(columns={'Date':'DATE','Russell 1000 Bench Return':'Idx RET'}, inplace=True)
#print(RUAData)
RUAData['DATE'] = RUAData['DATE'].apply(lambda x: str(datetime.strptime(str(x), '%Y%m%d').month)
+ '-' + str(datetime.strptime(str(x), '%Y%m%d').day)
+ '-' + str(datetime.strptime(str(x), '%Y%m%d').year))
#print(RUAData)
#RUAData['DATE'] = pd.to_datetime(RUAData.DATE, format='%Y%m%d', errors='ignore')
#benchmark_data = benchmark_data.rename(columns={'Date':'DATE'})
#RUAData['DATE'] = pd.to_datetime(RUAData.DATE, format='%Y-%m-%d')
#Filter Russell Index Data for required columns
cols = ['DATE','Idx RET']
RUAData = RUAData[cols]
#Convert Return values to numbers from percentages in Factordata
#FactorData['DATE'] = pd.to_datetime(FactorData.DATE, format='%m/%d/%Y')
col_list = list(FactorData.columns)
del col_list[0:5]
#FactorData = FactorData.drop([0])
for col in col_list:
FactorData[col] = pd.to_numeric(FactorData[col])
FactorData['RETURN'] = FactorData['RETURN']/100
#Merge the two dataframes on Date
mergeddata = FactorData.merge(RUAData, on=['DATE'], how='inner')
type(mergeddata['DATE'][0])
mergeddata['DATE'] = pd.to_datetime(mergeddata.DATE, format='%m-%d-%Y')
#Filter and clean merged data
mergeddata = mergeddata[mergeddata.DATE >= (2004,1,1)]
fctrs = ['EP1','EP2','RV1','RV2','REP','RBP','RCP','RSP','CTEF','LIT']
mergeddata = mergeddata.dropna(subset=fctrs)
print(mergeddata)
#dropcols = ['CUSIP','TICKER','GVKey','STATPERS','USFIRM','CURCODE','MRV1','MRV2','TOT']
#mergeddata = mergeddata.drop(columns=dropcols)
mergeddata = mergeddata.sort_values(by=['SEDOL', 'DATE'])
mergeddata = mergeddata.drop_duplicates(subset=['DATE','SEDOL'],keep='first')
mergeddata = mergeddata.replace(to_replace='.',value=np.nan)
mergeddata.to_csv('C:/Users/TRANSFORMER/Desktop/QCF CLasswork/Computational finance/Final project/mergeddatana.csv')
#Define the 3 factors
factor1 = 'RETURN'
factor2 = 'CTEF'
factor3 = 'ML'
#Compute Portfolio Returns using factor = RETURN
#Calculate Info Ratio and Max Drawdown for the portfolio strategy
PortfolioReturns_RET = PortfolioStrategy_returns(mergeddata,factor1)
PortfolioReturns_RET.to_csv('RET Return.csv')
InfoRatio_RET = calcInfoRatio(PortfolioReturns_RET)
Maxdrawdown_RET = calcMaxDrawDown(PortfolioReturns_RET)
#Compute Portfolio Returns using factor = CTEF
#Calculate Info Ratio and Max Drawdown for the portfolio strategy
# PortfolioReturns_CTEF.to_csv('CTEF Return.csv')
# InfoRatio_CTEF = calcInfoRatio(PortfolioReturns_CTEF)
# Maxdrawdown_CTEF = calcMaxDrawDown(PortfolioReturns_CTEF)
#Compute Portfolio Returns using LASSO model
#Calculate Info Ratio and Max Drawdown for the portfolio strategy
PortfolioReturns_Lasso = PortfolioStrategy_returns(mergeddata,factor3)
PortfolioReturns_Lasso.to_csv('ML Return_jugaad.csv')
InfoRatio_Lasso = calcInfoRatio(PortfolioReturns_Lasso)
Maxdrawdown_Lasso = calcMaxDrawDown(PortfolioReturns_Lasso)
|
# Advent of Code 2019: https://adventofcode.com/2019/day/11
#
#
from AoC13_classes import ArcadeCabinet
infile = open('data/input_13.txt','r')
inputData1 = infile.readline().strip().split(',')
# Part 1
e = ArcadeCabinet(inputData1)
e.RunGame()
# e.PlotPanels()
print("Part 1: ", e.NumberOfBlocks())
# Part 2
# result = w.RunAgain()
# print("Part 2: ", result)
|
from sys import argv
script, filename = argv
txt = open(filename)
print "Here's your file %r:" % filename
print txt.read()
print "Press y to close the file or N to keep open"
x = raw_input("")
if x == "y":
txt.close()
elif x == "n":
txt.read()
print "Lets open a second file:"
file_again = raw_input ("> ")
txt_again = open(file_again)
print txt_again.read()
|
"""
This is the ADT for Queue data structure that can be used
Assumption : Rear end of the queue is at index 0 and front is at the end of the list
"""
class QueueADT:
def __init__(self):
self.items = []
def isEmpty(self):
return len(self.items) == 0
def enqueue(self,item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def peek_front(self):
return self.items[-1]
def peek_rear(self):
return self.items[0]
# Verification
#q = QueueADT()
#q.enqueue(4)
#q.enqueue('dog')
#q.enqueue(True)
#print(q.size())
|
# Generated by Django 2.2.2 on 2019-07-22 08:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0005_auto_20190719_0917'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='Name')),
('surname', models.CharField(max_length=20, verbose_name='Surname')),
],
),
migrations.DeleteModel(
name='Guardian',
),
migrations.DeleteModel(
name='Student',
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-22 10:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BroadcastCompany',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bc_name', models.CharField(max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='LatestUpdate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('latest_update', models.DateTimeField()),
('broadcast_company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scheduler_core.BroadcastCompany')),
],
),
migrations.CreateModel(
name='MovieSchedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField()),
('ratings', models.SmallIntegerField(blank=True)),
('broadcast_company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scheduler_core.BroadcastCompany')),
],
),
]
|
from django.contrib import admin
from django_mongoengine import mongo_admin as mongo
# Register your models here.
from mongoapp import models
mongo.site.register(models.Profile)
|
from controllers.relations.group_course_relation import group_course_controller
from controllers.course.group_project import GroupProjectController
from methods.errors import *
from flask_restful import Resource, reqparse
from flask import jsonify
controller = group_course_controller()
# /courses/<course_code>/groups
class all_group_course(Resource):
def get(self, course_code):
try:
data = []
q = controller.get_all_course_groups(course_code)
for g in q:
data.append(GroupProjectController.get_group(g["group_id"]))
except ErrorHandler as e:
return e.error
return {'groups': data, "status_code": 200} |
from collections import defaultdict
class Solution:
def checkIfPrerequisite(self, n: int, prerequisites: List[List[int]], queries: List[List[int]]) -> List[bool]:
graph = defaultdict(set)
for pair in prerequisites:
graph[pair[1]].add(pair[0])
def bfs(start):
visited = set()
q = [start]
while len(q) > 0:
curr = q.pop(0)
graph[start].add(curr)
for dest in graph[curr]:
if dest not in visited:
q.append(dest)
visited.add(dest)
for i in range(n):
bfs(i)
ans = []
for q in queries:
ans.append(q[0] in graph[q[1]] if q[0] != q[1] else False)
return ans
|
import os
from argparse import ArgumentParser
from pytorch_lightning import Trainer, seed_everything
from models.ComposerVAE import InfoVAE
from datasets.collection import *
_MODELS = dict(InfoVAE=InfoVAE)
_DATASETS = dict(BigMIDI=BigMIDISet,
VideoGameMIDI=VideoGameMIDI)
distributed = False
def main(args):
""" Main training routine specific for this project. """
# 1 INIT LIGHTNING MODEL
model = ComposerVAE(**vars(args))
# 2 INIT TRAINER
trainer = Trainer.from_argparse_args(args)
# 3 START TRAINING
trainer.fit(model)
def run_cli():
# ------------------------
# TRAINING ARGUMENTS
# ------------------------
# these are project-wide arguments
root_dir = os.path.dirname(os.path.realpath(__file__))
parent_parser = ArgumentParser(add_help=False)
# each LightningModule defines arguments relevant to it
parser = ComposerVAE.add_model_specific_args(parent_parser, root_dir)
parser = Trainer.add_argparse_args(parser)
parser.set_defaults(gpus=2)
args = parser.parse_args()
# ---------------------
# RUN TRAINING
# ---------------------
main(args)
if __name__ == '__main__':
run_cli() |
from django import forms
from django.forms import ModelForm
from ckeditor.widgets import CKEditorWidget
from pages.models import Page
class PageForm(ModelForm):
content = forms.CharField(widget=CKEditorWidget())
class Meta:
model = Page
|
import pandas as pd
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
from collections import Counter
reviews = pd.read_csv('reviews.txt', header=None)
labels = pd.read_csv('labels.txt', header=None)
total_counts = Counter()
for idx, row in reviews.iterrows():
review = row[0]
for word in review.split(' '):
total_counts[word] += 1
print("Total words in data set: ", len(total_counts))
vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]
print(vocab[:60])
word2idx = {word: index for index, word in enumerate(vocab)} ## create the word-to-index dictionary here
def text_to_vector(text):
vector = np.zeros(len(vocab))
for word in text.split(' '):
index = word2idx.get(word, None)
if index:
vector[index] += 1
return vector
word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)
for ii, (_, text) in enumerate(reviews.iterrows()):
word_vectors[ii] = text_to_vector(text[0])
Y = (labels == 'positive').astype(np.int_)
records = len(labels)
shuffle = np.arange(records)
np.random.shuffle(shuffle)
test_fraction = 0.9
train_split, test_split = shuffle[:int(records * test_fraction)], shuffle[int(records * test_fraction):]
trainX, trainY = word_vectors[train_split, :], to_categorical([yv[0] for yv in Y.values[train_split]], 2)
testX, testY = word_vectors[test_split, :], to_categorical([yv[0] for yv in Y.values[test_split]], 2)
# build network
# Network building
def build_model(learning_rate=0.1):
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
# Your code #
net = tflearn.input_data([None, len(vocab)])
net = tflearn.fully_connected(net, 5, activation='ReLU')
net = tflearn.fully_connected(net, 5, activation='ReLU')
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='sgd', learning_rate=learning_rate, loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=3)
return model
model = build_model()
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=100)
predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)
test_accuracy = np.mean(predictions == testY[:,0], axis=0)
print("Test accuracy: ", test_accuracy)
# Helper function that uses your model to predict sentiment
def test_sentence(sentence):
positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1]
print('Sentence: {}'.format(sentence))
print('P(positive) = {:.3f} :'.format(positive_prob),
'Positive' if positive_prob > 0.5 else 'Negative')
while True:
sentence = input('Test your sentence:')
test_sentence(sentence)
if sentence == 'q':
exit(0)
|
#Set is unordered and unindexed .
#so we cannot change set values or access values using index. But we can add values
"""
s={1,21,2,2,"ram","ram"}
print("len :",len(s))
print("type :",type(s))
for x in s:
print("set :",x)
"""
#Set constructor
"""
l=[1,2,3,4,2,"ram",True,30.6]
print("list :",l)
s=set(l)
print("set : ",s)
s1=input("enter a value :")
if s1 in s:
print(f"{s1} is present")
else:
print(f"{s1} is not there")
"""
#Adding items in set
"""
s={1,2,3,"ram",2,3,5}
print("set : ",s)
s.add("krish")
print("new set :",s)
"""
#concatinating two sets
"""
s1={1,2,3,"ram",2,4,5}
print("set 1 :",s1)
s2={1,2,3,4,"krish"}
print("set 2 :",s2)
s1.update(s2)
print("new set :",s1)
"""
#interseting two sets
"""
s1={1,2,3,"ram",2,4,5}
print("set 1 :",s1)
s2={1,2,3,4,"krish"}
print("set 2 :",s2)
s3=s1.intersection(s2)
print("new set :",s3)
"""
#removing element from set
"""
s1={1,2,3,"ram",2,4,5}
print("set 1 :",s1)
s1.remove(2)#if value is not there remove will raise an error
s1.discard(6)#if value is not there if will go with flow without raising error
print("new set :",s1)
print("removed value from set :",s1.pop())#pop will remove last item in a set
s1.clear()
print("After clearing set :",s1)
del s1
print("after deleting set :",s1)# this will raise error as s1 set is deleted
"""
#union of sets
"""
s1={1,2,3,"ram",2,4,5}
print("set 1 :",s1)
s2={1,2,3,4,"krish"}
print("set 2 :",s2)
s3=s1.union(s2)
print("Union s3 :",s3)
"""
#intersetion_update method in set
"""
s1={1,2,3,"ram",2,4,5}
print("set 1 :",s1)
s2={1,2,3,4,"krish"}
print("set 2 :",s2)
s1.intersection_update(s2)
print("intereseting values in s1:",s1)
"""
#symetric_difference_update method works opposite to intersection
s1={1,2,3,"ram",2,4,5}
print("set 1 :",s1)
s2={1,2,3,4,"krish"}
print("set 2 :",s2)
s1.symmetric_difference_update(s2)
print("intereseting values in s1:",s1) |
#! /usr/bin/env python2.6
# $Author: ee364d02 $
# $Date: 2013-10-30 13:18:33 -0400 (Wed, 30 Oct 2013) $
# $HeadURL: svn+ssh://ece364sv@ecegrid-lnx/home/ecegrid/a/ece364sv/svn/F13/students/ee364d02/Prelab11/bitWorker.py $
# $Revision: 62157 $
import os
import re
import math
import sys
if len(sys.argv) != 2:
sys.stderr.write("usgae: parseTransactions.py <accounts filename> <transactions filename>\n")
sys.exit(1)
if not os.path.isfile(sys.argv[1]) or not os.access(sys.argv[1], os.R_OK):
sys.stderr.write("error: %s is not a readable file.\n" % (sys.argv[1],))
sys.exit(2)
file = sys.argv[1]
good = ".good"
bad = ".bad"
fileOutGood = str(file) + good
fileOutBad = str(file) + bad
#if not os.path.isfile(fileOutGood):
# sys.stderr.write("error: %s already exists.\n" % (fileOutGood,))
# sys.exit(2)
#
#if not os.path.isfile(fileOutBad):
# sys.stderr.write("error: %s already exists.\n" % (fileOutBad,))
# sys.exit(2)
#
infile = open(sys.argv[1], "r")
outfileGood = open(fileOutGood,"w")
outfileBad = open(fileOutBad, "w")
regex1 = "^[0-9]{2}[A-Z][0-9]{4}(\-(NJ|IN|FL)20(0[7-9]|1[0-3]))?(\*)?$"
regex2 = "^[0-9]+$"
for line in infile:
line = line.strip()
sline = line.split(",")
partNum = sline[0]
quantity = sline[2]
if re.match(regex1, partNum):
if re.match(regex2, quantity):
outfileGood.write(line+"\n")
else:
outfileBad.write(line+"\n")
else:
outfileBad.write(line+"\n")
|
# -*- coding:utf-8 -*-
import unittest
import mock
from ...haystack.utils import HaystackActionTask
class HaystackActionTaskTestCase(unittest.TestCase):
@mock.patch('libs.haystack.utils.get_indexes')
@mock.patch('libs.haystack.utils.get_instance_from_identifier')
def test_run_should_call_remove_object_for_each_index(
self, get_instance_from_identifier, get_indexes):
# setup
task = HaystackActionTask()
identifier = mock.Mock()
instance = mock.Mock()
get_instance_from_identifier.return_value = instance
index = mock.Mock()
get_indexes.return_value = [index]
# action
task.run(HaystackActionTask.REMOVE_ACTION, identifier)
# assert
self.assertTupleEqual((identifier,),
get_instance_from_identifier.call_args[0])
self.assertTupleEqual((type(instance),), get_indexes.call_args[0])
self.assertTupleEqual((instance,), index.remove_object.call_args[0])
self.assertEqual(0, index.update_object.call_count)
@mock.patch('libs.haystack.utils.get_indexes')
@mock.patch('libs.haystack.utils.get_instance_from_identifier')
def test_run_should_call_update_object_for_each_index(
self, get_instance_from_identifier, get_indexes):
# setup
task = HaystackActionTask()
identifier = mock.Mock()
instance = mock.Mock()
get_instance_from_identifier.return_value = instance
index = mock.Mock()
get_indexes.return_value = [index]
# action
task.run(HaystackActionTask.UPDATE_ACTION, identifier)
# assert
self.assertTupleEqual((identifier,),
get_instance_from_identifier.call_args[0])
self.assertTupleEqual((type(instance),), get_indexes.call_args[0])
self.assertTupleEqual((instance,), index.update_object.call_args[0])
self.assertEqual(0, index.remove_object.call_count)
|
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import paramiko
class Window(QWidget):
def __init__(self, parent = None):
QWidget.__init__(self, parent)
self.setGeometry(100,100,256,128)
self.loginFrame = QFrame()
self.usernameBox = QLineEdit()
self.hostIpInput = QLineEdit()
self.hostNameInput = QLineEdit()
self.pwordBox = QLineEdit()
self.pwordLabel = QLabel("Password")
self.usernameLabel = QLabel("url")
self.ipLabel = QLabel("IP")
self.devveLabel = QLabel("device username")
self.enterButton = QPushButton("execute")
self.exitButton = QPushButton("stop video")
self.loginLayout = QVBoxLayout()
self.pwordBox.setEchoMode(QLineEdit.Password)
self.loginLayout.addWidget(self.ipLabel)
self.loginLayout.addWidget(self.hostIpInput)
self.loginLayout.addWidget(self.devveLabel)
self.loginLayout.addWidget(self.hostNameInput)
self.loginLayout.addWidget(self.usernameLabel)
self.loginLayout.addWidget(self.usernameBox)
self.loginLayout.addWidget(self.pwordLabel)
self.loginLayout.addWidget(self.pwordBox)
self.loginLayout.addWidget(self.enterButton)
self.loginLayout.addWidget(self.exitButton)
self.exitButton.hide()
self.enterButton.clicked.connect(self.executeSSH)
self.setWindowTitle(self.tr("remote live streamer"))
self.setLayout(self.loginLayout)
def executeSSH(self):
self.thread = Worker(self)
self.thread.start()
self.exitButton.show()
self.exitButton.clicked.connect(self.thread.exit)
class Worker(QThread):
def __init__(self, window, parent=None):
QThread.__init__(self, parent)
self.exiting = False
def run(self):
print("0")
window.url = window.usernameBox.text()
window.pwordPlain = window.pwordBox.text()
window.hostName = window.hostNameInput.text()
window.hostIp = window.hostIpInput.text()
window.pwordBox.setText("")
ssh_client=paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=window.hostIp,username=window.hostName,password=window.pwordPlain)
command = "cvlc "+ window.url + "--caching=60000"
stdin,stdout,stderr = ssh_client.exec_command(command)
print(stdout.readlines())
print(stderr.readlines())
print("2")
def exit(self):
ssh_client=paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=window.hostIp,username=window.hostName,password=window.pwordPlain)
command = "sudo killall vlc"
stdin,stdout,stderr = ssh_client.exec_command(command)
window.exitButton.hide()
app = QApplication(sys.argv)
window = Window()
window.show()
app.setWindowIcon(QIcon('streaming.ico'))
sys.exit(app.exec_())
|
#to remove duplicate from a list
lst=[]
n=int(input("enter the number of elements:\n"))
for i in range(n):
x=input("enter elements:\n")
lst.append(x)
lst=(dict.fromkeys(lst))
print(lst)
lst=list(dict.fromkeys(lst))#this prints in list form
print(lst)
#print(set(lst)) this prints in the form of set
|
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import ListView
from django.views.generic.base import View
from django.views.generic.edit import FormView, UpdateView
from .forms import CreateUserForm, AccountSettingsForm, CreatePost
from .models import CustomUser, Post
from .emailConfirmation import send_confirmation
from testing.settings import MAIN_ADR
from PIL import Image
import os
from testing.settings import MEDIA_ROOT
main_adr = MAIN_ADR
def logout_(request):
logout(request)
return redirect(MAIN_ADR)
class PostList(ListView):
model = Post
success_url = main_adr
template_name = 'index.html'
def get(self, request, *args, **kwargs):
return render(self.request, 'index.html', {'posts': Post.objects.filter(published=True)})
class CreateUser(FormView):
form_class = CreateUserForm
success_url = main_adr
template_name = 'reg_log.html'
def get(self, request, *args, **kwargs):
if not request.user.is_anonymous:
return redirect(self.get_success_url())
else:
return render(request, self.template_name, {'form': self.form_class})
def form_valid(self, form):
if not self.request.user.is_authenticated:
email = form.cleaned_data['email']
passw = form.cleaned_data['password']
if 'reg' in self.request.POST:
try:
CustomUser.objects.get(email=email)
except ObjectDoesNotExist:
send_confirmation(email, passw)
return redirect(main_adr + 'confirm/')
else:
return render(self.request, 'reg_log.html', {'error_r': 'This profile exists. Log in.', 'form': self.get_form_class()})
user = authenticate(self.request, username=email, password=passw)
if user is not None:
login(self.request, user)
else:
return render(self.request, self.template_name, {'form': self.form_class, 'error_l': 'Email or password is incorrect'})
return redirect(self.get_success_url())
class AccountSettings(FormView):
form_class = AccountSettingsForm
success_url = main_adr
template_name = 'settings.html'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
return render(request, self.template_name, {'form': self.get_form_class()})
else:
return redirect(main_adr)
def form_valid(self, form):
passw = form.cleaned_data['old_password']
user = authenticate(self.request, username=self.request.user.email, password=passw)
if user is not None:
avatar = form.cleaned_data['avatar']
n_password = form.cleaned_data['password']
email = form.cleaned_data['email']
current_user = CustomUser.objects.get(email=self.request.user.email)
if avatar is not None:
current_user.avatar = form.cleaned_data['avatar']
if n_password != '':
current_user.set_password(n_password)
user_with_new_passw = authenticate(self.request, username=self.request.user.email, password=n_password)
login(self.request, user_with_new_passw)
if email != '':
if self.request.user.email == email:
return render(self.request, self.template_name, {'form': self.get_form_class(), 'error': "It's your currect email"})
try:
CustomUser.objects.get(email=email)
except ObjectDoesNotExist:
send_confirmation(email, passw, self.request.user.email)
else:
return render(self.request, self.template_name, {'form': self.get_form_class(), 'error': 'Email you entered belong to an another account'})
current_user.save()
return redirect(self.get_success_url())
return render(self.request, self.template_name, {'form': self.get_form_class(), 'error': 'Wrong password'})
class CreatePostView(FormView):
form_class = CreatePost
success_url = main_adr + '/post-create/prelook/'
template_name = 'postform.html'
def form_valid(self, form):
if not self.request.user.is_authenticated:
return redirect(self.get_success_url())
data = form.cleaned_data
data['author'] = self.request.user
post = Post(**data)
post.save()
path = os.path.join(MEDIA_ROOT, str(post.img))
im = Image.open(path)
x, y = im.size[0], im.size[1]
y = y / (x / 950) if x > 950 else y
x = x / (x / 950) if x > 950 else x
n_size = (int(x), int(y))
im = im.resize(n_size)
im.save(path)
return redirect(main_adr + 'myposts/')
class UserPosts(UpdateView):
model = Post
template_name = 'postlist.html'
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect(main_adr)
userPosts = Post.objects.filter(author=request.user).filter(published=False)
context = {'posts': userPosts}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
if request.is_ajax() and request.user.is_authenticated:
if request.POST.get('id'):
post_id = request.POST.get('id')
p = Post.objects.get(pk=post_id)
if p.author == request.user:
p.published = True if not p.published else False
p.save()
return HttpResponse()
else:
return HttpResponse('There is unknown error')
class PostContent(View):
template_name = 'post.html'
model = Post
def get(self, request, *args, **kwargs):
post = get_object_or_404(Post, pk=kwargs['pk'])
if not post.published:
if not request.user.is_authenticated or request.user != post.author:
return redirect(main_adr)
return render(request, 'post.html', {'post': post})
|
#!/usr/bin/python
# encoding: utf-8
"""
@author: dong.lu
@contact: ludong@cetccity.com
@software: PyCharm
@file: infer.py
@time: 2019/04/9 10:30
@desc: 模型推理部分,分为本地载入模型推理或者tensorflow serving grpc 推理
"""
import os
import grpc
import codecs
import pickle
import warnings
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from tensorflow.core.framework import types_pb2
import tokenization
import re
import requests
class SentenceProcessor(object):
def __init__(self):
self.sentence_index = 0
@staticmethod
def cut_sentence(sentence):
"""
分句
:arg
sentence: string类型,一个需要分句的句子
:return
返回一个分好句的列表
"""
sentence = re.sub('([。!?\?])([^”’])', r"\1\n\2", sentence)
sentence = re.sub('(\.{6})([^”’])', r"\1\n\2", sentence)
sentence = re.sub('(\…{2})([^”’])', r"\1\n\2", sentence)
sentence = re.sub('([。!?\?][”’])([^,。!?\?])', r'\1\n\2', sentence)
sentence = sentence.rstrip()
return sentence.split("\n")
def concat_sentences(self, sentences, max_seq_length):
"""
把一个列表里的句子按照小于最大长度拼接为另一个句子,当某几个句子拼接
达到max_seq_length长度的时候,把这个新的句子保存到新的列表当中。
:arg
sentences: list类型,一个要别拼接的句子列表
max_seq_length: 拼接的新句子的最大长度
:return
一个新的拼接句子的列表,元素为string类型,即句子
"""
# 分句后并从新拼接的句子
new_sentences = []
# 句子的index,是一个两层的列表,例如 [[0], [1, 2]], 列表内的每一个列表,
# 表示来源于同一个句子,这里[1, 2]就表示是同一个句子被分割的两个句子
sentences_index = []
for sentence in sentences:
sentence = self.clean_sentence(sentence)
# 如果句子小于且等于最大长度的话,不进行处理
if len(sentence) <= max_seq_length:
new_sentences.append(sentence)
sentences_index.append([self.sentence_index])
self.sentence_index += 1
# 如果句子大于最大长度就需要进行切割句子再拼接的操作了
else:
# 产生拼接句子列表(列表内每个句子小于最大长度)和同一个句子的index列表
single_sentences, singe_index = self.concat_single_sentence(sentence, max_seq_length)
new_sentences.extend(single_sentences)
sentences_index.append(singe_index)
# 当调用完此函数后,需要把sentence_index设为0,否则下次再次使用时候,将不会从0开始记录
self.sentence_index = 0
return new_sentences, sentences_index
def concat_single_sentence(self, sentence, max_seq_length):
"""
把一个句子分句为多个句子,把这些句子再拼接成若干个小于
max_seq_length的句子
:arg
sentence: string类型,待分割的句子
:return
拼接后的句子列表和同一个句子的index列表
"""
# 拼接后的句子列表
single_sentences = []
# 同一个句子的index列表
singe_index = []
tmp = ''
# 分句, 注意此时sentence为list类型
sentence = self.cut_sentence(sentence)
for i, sent in enumerate(sentence):
tmp = tmp + sent
if len(tmp) > max_seq_length:
pre = tmp[0: len(tmp) - len(sent)]
if len(pre) >= 2:
single_sentences.append(pre)
singe_index.append(self.sentence_index)
self.sentence_index += 1
tmp = sent
# 当遍历到最后一个的时候,且tmp不为空字符串,就把tmp存入single_sentences中
if i == len(sentence) - 1 and len(tmp) >= 2:
single_sentences.append(tmp)
singe_index.append(self.sentence_index)
self.sentence_index += 1
return single_sentences, singe_index
@staticmethod
def clean_sentence(sentence):
sentence = sentence.strip()
sentence = re.sub('\t| ', '', sentence)
return sentence
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
class Entity(object):
def __init__(self, types):
self.__begin = None
self.types = types
self.__intermediate = []
@property
def intermediate(self):
return self.__intermediate
@intermediate.setter
def intermediate(self, intermediate):
self.__intermediate.append(intermediate)
@property
def begin(self):
return self.__begin
@begin.setter
def begin(self, begin):
self.__begin = begin
def get_entity_types(self):
return self.__begin + ''.join(self.__intermediate), self.types
class InferenceBase(object):
def __init__(self, vocab_file, labels, url=None, model_name=None,
signature_name=None, export_dir=None, do_lower_case=True):
"""
预测的基类,分为两种方式预测
a. grpc 请求方式
b. 本地导入模型方式
:arg
vocab_file: bert 预训练词典的地址,这里在 'chinese_L-12_H-768_A-12/vocab.txt '中
labels: str 或 list 类型,需要被转化为id的label,当为str类型的时候,即为标签-id的pkl文件名称;
当为list时候,即为标签列表。
url: string类型,用于调用模型测试接口,host:port,例如'10.0.10.69:8500'
export_dir: string类型,模型本地文件夹目录,r'model\1554373222'
model_name: string类型,tensorflow serving 启动的时候赋予模型的名称,当
url被设置的时候一定要设置。
signature_name: string类型,tensorflow serving 的签名名称,当
url被设置的时候一定要设置。
do_lower_case: 是否进行小写处理
:raise
url和export_dir至少选择一个,当选择url的时候,model_name和signature_name不能为
None。
"""
self.url = url
self.export_dir = export_dir
if export_dir:
self.predict_fn = tf.contrib.predictor.from_saved_model(self.export_dir)
if self.url:
channel = grpc.insecure_channel(self.url)
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
self.request = predict_pb2.PredictRequest()
self.model_name = model_name
self.signature_name = signature_name
self.request.model_spec.name = self.model_name
self.request.model_spec.signature_name = self.signature_name
if self.model_name is None or self.signature_name is None:
raise ValueError('`model_name` and `signature_name` should not NoneType')
if url is None and export_dir is None:
raise ValueError('`url` or `export_dir`is at least of one !')
self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)
self.id2label = self._load_id_map_label(labels)
def local_infer(self, examples):
"""
导入本地的PB文件进行预测
"""
pass
def tf_serving_infer(self, examples):
"""
使用tensorflow serving进行grpc请求预测
"""
pass
def preprocess(self, sentences, max_seq_length):
pass
def create_example(self):
pass
@staticmethod
def _load_id_map_label(labels=None):
id2label = {}
if isinstance(labels, list):
for i, label in enumerate(labels, 1):
id2label[i] = labels
elif isinstance(labels, str):
with codecs.open(labels, 'rb') as rf:
label2id = pickle.load(rf)
id2label = {value: key for key, value in label2id.items()}
return id2label
class NerInfer(InferenceBase):
def __init__(self, vocab_file, labels, url=None, model_name=None,
signature_name=None, export_dir=None, do_lower_case=True):
"""
bert ner, 参数解释查看 `InferenceBase`
"""
super(NerInfer, self).__init__(vocab_file, labels, url, model_name, signature_name, export_dir, do_lower_case)
self.sentenceprocessor = SentenceProcessor()
def preprocess(self, sentences, max_seq_length):
"""
对sentences进行预处理,并生成examples
:arg
sentences: 二维列表,即输入的句子,输入有一下要求:
(1)可以是一段话,但是每个句子最好小于64个字符串长度
(2)长度不可以小于2
max_seq_length: 输入的每一个句子的最大长度
:return
examples: tf.train.Example对象
new_tokens: 二维列表,sentences清洗后的tokens
sentences_index: 二维列表,分句后,对应到原始句子的下标
例如:[[0], [1, 2]...]
"""
if not sentences or not isinstance(sentences, list):
raise ValueError('`sentences` must be list object and not a empty list !')
# 把sentences中的句子是不是小于或等于max_seq_length的,进行分句,然后在拼接成若干个
# 小于或等于max_seq_length的的句子
new_sentences, sentences_index = self.sentenceprocessor.concat_sentences(sentences, max_seq_length)
examples, new_tokens = [], []
for sentence in new_sentences:
feature, ntokens = self.convert_single_example(sentence, max_seq_length)
features = dict()
features['input_ids'] = tf.train.Feature(int64_list=tf.train.Int64List(value=feature.input_ids))
features['input_mask'] = tf.train.Feature(int64_list=tf.train.Int64List(value=feature.input_mask))
features['segment_ids'] = tf.train.Feature(int64_list=tf.train.Int64List(value=feature.segment_ids))
features['label_ids'] = tf.train.Feature(int64_list=tf.train.Int64List(value=feature.label_ids))
example = tf.train.Example(features=tf.train.Features(feature=features))
examples.append(example.SerializeToString())
new_tokens.append(ntokens)
return examples, new_tokens, sentences_index
def convert_single_example(self, sentence, max_seq_length):
"""
对单个句子进行token、转id、padding等处理
:arg
sentence: string类型,单个句子
max_seq_length: 句子的最大长度
:return
feature: InputFeatures对象
ntokens: 处理句子后得到的token
"""
tokens = self.tokenizer.tokenize(sentence)
# 序列截断
if len(tokens) >= max_seq_length - 1:
# -2 的原因是因为序列需要加一个句首和句尾标志
tokens = tokens[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
label_ids.append(0)
for token in tokens:
ntokens.append(token)
segment_ids.append(0)
label_ids.append(0)
# 句尾添加[SEP]标志
ntokens.append("[SEP]")
segment_ids.append(0)
label_ids.append(0)
input_ids = self.tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
# padding
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
# ntokens.append("**NULL**")
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
# 结构化为一个类
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids
)
return feature, ntokens[1: -1]
def infer(self, sentences, max_seq_length):
"""
对外的测试接口
:arg
sentences: 二维列表,即输入的句子,输入有一下要求:
(1)可以是一段话,但是每个句子最好小于64个字符串长度
(2)长度不可以小于2
max_seq_length: 输入的每一个句子的最大长度
sentences_entities: 返回每一个句子的实体
"""
examples, new_tokens, sentences_index = self.preprocess(sentences, max_seq_length)
if self.url:
predictions = self.tf_serving_infer(examples)
else:
predictions = self.local_infer(examples)
result = self.convert_id_to_label(predictions['output'])
# debug
# for t, r in zip(new_tokens, result):
# if len(r) != len(t):
# warnings.warn('Token and tags have different lengths.\ndetails:\n{}\n{}'.format(t, r))
# print(list(zip(t, r)))
sentences_entities = self.get_entity(new_tokens, result, sentences_index)
return sentences_entities
def tf_serving_infer(self, examples):
"""
使用tensorflow serving预测
:arg
examples: tf.train.Example 对象
:return
二维列表,预测结果
"""
self.request.inputs['examples'].CopyFrom(tf.make_tensor_proto(examples, dtype=types_pb2.DT_STRING))
response = self.stub.Predict(self.request, 5.0)
predictions = {}
for key in response.outputs:
tensor_proto = response.outputs[key]
nd_array = tf.contrib.util.make_ndarray(tensor_proto)
predictions[key] = nd_array
return predictions
def local_infer(self, examples):
"""
本地进行预测,参数解释同上
"""
predictions = self.predict_fn({'examples': examples})
return predictions
def convert_id_to_label(self, predictions):
"""
把预测结果变为label
:arg
predictions: 二维列表,测试结果,[[1,2,3], [2,3,4]...]
:return
result: 二维列表,转变的结果
"""
result = []
for prediction in predictions:
curr_result = []
for idx in prediction:
if idx == 0:
break
curr_label = self.id2label[idx]
if curr_label in ['[CLS]', '[SEP]']:
continue
curr_result.append(curr_label)
result.append(curr_result)
return result
@staticmethod
def get_entity(tokens, tags, sentences_index):
"""
提取实体
:arg
tokens: 二维列表,句子处理后得到的token
tags: 二维列表,预测的结果
sentences_index: 二维列表,句子拆分后,对应到原句的index
:return
sentences_entities: 二维列表,返回实体结果,例如[('昆凌', 'PER')...]
"""
sentences_entities = []
for sent in sentences_index:
entities = []
for i in sent:
if len(tokens[i]) != len(tags[i]):
warnings.warn('Token and tags have different lengths.\ndetails:\n{}\n{}'.format(tokens[i], tags[i]))
entity = Entity(None)
t_zip = zip(tokens[i], tags[i])
for token, tag in t_zip:
if tag == 'O':
if entity.types:
entities.append(entity.get_entity_types())
entity = Entity(None)
continue
elif tag[0] == 'B':
if entity.types:
entities.append(entity.get_entity_types())
entity = Entity(tag[2:])
entity.begin = token
elif tag[0] == 'I':
try:
entity.intermediate = token
except Exception as e:
print(e)
sentences_entities.append(entities)
return sentences_entities
if __name__ == '__main__':
project_path = os.path.dirname(os.path.abspath(__file__))
export_dir = project_path + os.sep + 'albert_base_ner_checkpoints' + os.sep + 'export' + os.sep + 'Servo' + os.sep + '1591272288'
vocab_file = project_path + '{}albert_base_zh{}vocab.txt'.format(os.sep, os.sep)
labels = project_path + '{}albert_base_ner_checkpoints{}label2id.pkl'.format(os.sep, os.sep)
nerinfer = NerInfer(vocab_file, labels, url='localhost:8500', model_name='albert_chinese_ner_model', signature_name='serving_default')
# sentence = '因有关日寇在京掠夺文物详情,藏界较为重视,也是我们收藏北京史料中的要件之一。'
sentence = '美国的华莱士,我和他谈笑风生。'
print(nerinfer.infer([sentence], 128))
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.example.foreign_example
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Foreign library example.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import ctypes as ct
from lantz import Feat, Action, DictFeat
from lantz.foreign import LibraryDriver
from lantz.errors import InstrumentError
class ForeignTemplate(LibraryDriver):
"""Template for Drivers using a library.
"""
LIBRARY_NAME = 'mylibrary.dll'
def _return_handler(self, func_name, ret_value):
if ret_value != 0:
raise InstrumentError('{} ({})'.format(ret_value, _ERRORS[ret_value]))
return ret_value
@Feat()
def idn(self):
return self.query('*IDN?')
@Feat(units='V', limits=(10,))
def amplitude(self):
"""Amplitude.
"""
return float(self.query('?AMP'))
@amplitude.setter
def amplitude(self, value):
self.query('!AMP {:.1f}'.format(value))
@DictFeat(values={True: '1', False: '0'}, keys=list(range(1,9)))
def dout(self, key):
"""Digital output state.
"""
return self.query('?DOU {}'.format(key))
@dout.setter
def dout(self, key, value):
self.query('!DOU {} {}'.format(key, value))
@Action()
def do_something(self):
"""Help for do_something
"""
return self.lib.something()
if __name__ == '__main__':
import argparse
import lantz.log
parser = argparse.ArgumentParser(description='Test Kentech HRI')
parser.add_argument('-i', '--interactive', action='store_true',
default=False, help='Show interactive GUI')
args = parser.parse_args()
lantz.log.log_to_socket(lantz.log.DEBUG)
with ForeignTemplate() as inst:
if args.interactive:
from lantz.ui.app import start_test_app
start_test_app(inst)
else:
# Add your test code here
print('Non interactive mode')
|
from typing import Optional, Generic, List, TypeVar
from project_name.exceptions import NotFoundInRepository
from project_name.storage.database import db
from project_name.storage.database.base import CommonQueryBuilderMixin, CommonSerializerMixin
from project_name.storage.database.sessions import Session
T_ID = TypeVar('T_ID') # pylint: disable=invalid-name
T = TypeVar('T') # pylint: disable=invalid-name
class BaseSyncRepository(Generic[T, T_ID], CommonQueryBuilderMixin, CommonSerializerMixin[T, T_ID]):
used_db = 'main'
not_found_exception_cls = NotFoundInRepository
def _get_db_for_query(self, db_name: str) -> Session:
db_name = self.used_db or db_name
return getattr(db, db_name)
def _fetchall(self, query, use_db='', **params):
with self._get_db_for_query(use_db) as session:
records = session.execute(query, params).fetchall()
return records
def _fetchone(self, query, use_db='', **params):
with self._get_db_for_query(use_db) as session:
record = session.execute(query, params).fetchone()
return record
def _execute(self, query, use_db='', **params):
with self._get_db_for_query(use_db) as session:
return session.execute(query, params)
def get_by_id(self, instance_id: T_ID, for_update: bool = False) -> Optional[T]:
if for_update:
query = self.get_by_id_for_update_query()
else:
query = self.get_by_id_query()
record = self._fetchone(query, **self.instance_id_as_dict(instance_id))
if not record:
return None
return self.get_instance(record)
def get_or_raise_by_id(self, instance_id: T_ID, for_update: bool = False) -> T:
instance = self.get_by_id(instance_id, for_update=for_update)
if not instance:
raise self.not_found_exception_cls()
return instance
def insert(self, instance: T) -> int:
query = self.insert_query()
params = self.instance_to_dict(instance)
result = self._execute(query, **params)
return result.rowcount
def update(self, instance: T) -> int:
params = self.instance_to_dict(instance)
if 'id' in params:
params['instance_id'] = params['id']
# Any other cases should be treated directly because general solution can be very complicated.
query = self.update_query()
result = self._execute(query, **params)
return result.rowcount
def delete_by_id(self, instance_id: T_ID) -> None:
self._execute(self.delete_by_id_query(), **self.instance_id_as_dict(instance_id))
def delete(self, instance: T) -> None:
id_ = self.get_instance_id(instance)
self.delete_by_id(id_)
def delete_all(self):
return self._execute(self.delete_all_query())
def get_all(self) -> List[T]:
return self.get_instances(self._execute(self.get_all_query()))
def insert_many(self, instances: List[T]) -> None:
if not instances:
return
query = self.insert_query()
params_list = [self.instance_to_dict(instance) for instance in instances]
with self._get_db_for_query(self.used_db) as session:
session.execute(query, params_list)
|
# python中的元组 和列表类似,不同之处是元组的 元素不能修改
info_tuple = ("zhangsan", 18, 1.75)
print(info_tuple[0])
print(info_tuple[1])
print(info_tuple[2])
# 创建空元组 ,一般不建议这么操作,因为一旦元组被定义就不能修改了
empty_tuple = ()
# 定义一个只包含一个元素的元组
# single_tuple = (5) 这样是不行,解析器会识别为一个整数
single_tuple = (5,)
# count 指定元素在元组中出现的次数
print(info_tuple.count(18))
# index 取索引值,(已经知道该数据的值,想获取该数组再元组中的索引)
print(info_tuple.index("zhangsan"))
# len 同意元组的长度
print(len(info_tuple))
|
from django.shortcuts import render, redirect
from django.views.generic import CreateView,UpdateView,FormView,DeleteView,DetailView,ListView
from django.contrib.auth.decorators import login_required
from consultant.models import ( User,
PersonalDetails,
OfficialDetails,
ProjectDetails,
TrainingDetails,
FinancialDetails,
InvoicingDetails,
BenchDetails,
ProspectDetails,
)
from consultant.forms import ( SignUpForm,
PersonalDetailsForm,
OfficialDetailsForm,
ProjectDetailsForm,
TrainingDetailsForm,
FinancialDetailsForm,
InvoicingDetailsForm,
BenchDetailsForm,
ProspectDetailsForm,
)
class UserCreateView(CreateView):
template_name = "connect/signup.html"
form_class = SignUpForm
success_url = "/consultant/login/"
class PersonalDetailsView(CreateView):
model = PersonalDetails
form_class = PersonalDetailsForm
template_name = "consultant/personal-details.html"
success_url = '/consultant'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
employee = User.objects.get(username= self.request.user)
kwargs.update({'initial':{'employee': employee}})
return kwargs
def form_valid(self, form):
form.instance.employee = self.request.user
return super().form_valid(form)
class PersonalDetailsUpdateView(UpdateView):
model = PersonalDetails
form_class = PersonalDetailsForm
template_name = "consultant/personal-details.html"
success_url = '/consultant'
class OfficialDetailsView(CreateView):
model = OfficialDetails
form_class = OfficialDetailsForm
template_name = "consultant/official-details.html"
success_url = '/consultant'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
employee = User.objects.get(username= self.request.user)
kwargs.update({'initial':{'employee': employee}})
return kwargs
def form_valid(self, form):
form.instance.employee = self.request.user
return super().form_valid(form)
class OfficialDetailsUpdateView(UpdateView):
model = OfficialDetails
form_class = OfficialDetailsForm
template_name = "consultant/official-details.html"
success_url = '/consultant'
class ProjectDetailsView(CreateView):
model = ProjectDetails
form_class = ProjectDetailsForm
template_name = "consultant/project-details.html"
success_url = '/consultant'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
employee = User.objects.get(username= self.request.user)
kwargs.update({'initial':{'employee': employee}})
return kwargs
def form_valid(self, form):
form.instance.employee = self.request.user
return super().form_valid(form)
class ProjectDetailsUpdateView(UpdateView):
model = ProjectDetails
form_class = ProjectDetailsForm
template_name = "consultant/project-details.html"
success_url = '/consultant'
class TrainingDetailsView(CreateView):
model = TrainingDetails
form_class = TrainingDetailsForm
template_name = "consultant/training-details.html"
success_url = '/consultant'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
employee = User.objects.get(username= self.request.user)
kwargs.update({'initial':{'employee': employee}})
return kwargs
def form_valid(self, form):
form.instance.employee = self.request.user
return super().form_valid(form)
class TrainingDetailsUpdateView(UpdateView):
model = TrainingDetails
form_class = TrainingDetailsForm
template_name = "consultant/training-details.html"
success_url = '/consultant'
class FinancialDetailsView(CreateView):
model = FinancialDetails
form_class = FinancialDetailsForm
template_name = "consultant/finance-details.html"
success_url = '/consultant'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
employee = User.objects.get(username= self.request.user)
kwargs.update({'initial':{'employee': employee}})
return kwargs
def form_valid(self, form):
form.instance.employee = self.request.user
return super().form_valid(form)
class FinancialDetailsUpdateView(UpdateView):
model = FinancialDetails
form_class = FinancialDetailsForm
template_name = "consultant/finance-details.html"
success_url = '/consultant'
class InvoicingDetailsView(CreateView):
model = InvoicingDetails
form_class = InvoicingDetailsForm
template_name = "consultant/invoicing-details.html"
success_url = '/consultant'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
employee = User.objects.get(username= self.request.user)
kwargs.update({'initial':{'employee': employee}})
return kwargs
def form_valid(self, form):
form.instance.employee = self.request.user
return super().form_valid(form)
class InvoicingDetailsUpdateView(UpdateView):
model = InvoicingDetails
form_class = InvoicingDetailsForm
template_name = "consultant/invoicing-details.html"
success_url = '/consultant'
class BenchDetailsView(CreateView):
model = BenchDetails
form_class = BenchDetailsForm
template_name = "consultant/bench-details.html"
success_url = '/consultant'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
employee = User.objects.get(username= self.request.user)
kwargs.update({'initial':{'employee': employee}})
return kwargs
def form_valid(self, form):
form.instance.employee = self.request.user
return super().form_valid(form)
class BenchDetailsUpdateView(UpdateView):
model = BenchDetails
form_class = BenchDetailsForm
template_name = "consultant/bench-details.html"
success_url = '/consultant'
class ProspectDetailsView(CreateView):
model = ProjectDetails
form_class = ProspectDetailsForm
template_name = "consultant/prospect-details.html"
success_url = '/consultant'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
employee = User.objects.get(username= self.request.user)
kwargs.update({'initial':{'employee': employee}})
return kwargs
def form_valid(self, form):
form.instance.employee = self.request.user
return super().form_valid(form)
class ProspectDetailsUpdateView(UpdateView):
model = ProspectDetails
form_class = ProspectDetailsForm
template_name = "consultant/prospect-details.html"
success_url = '/consultant'
@login_required(login_url='login')
def employee_details(request, *args, **kwargs):
try:
emp_personal = PersonalDetails.objects.get(employee_id=request.user.id)
except:
emp_personal=None
try:
emp_official = OfficialDetails.objects.get(employee_id=request.user.id)
except:
emp_official=None
try:
emp_training = TrainingDetails.objects.get(employee_id=request.user.id)
except:
emp_training=None
try:
emp_project = ProjectDetails.objects.get(employee_id=request.user.id)
except:
emp_project=None
try:
emp_finance = FinancialDetails.objects.get(employee_id=request.user.id)
except:
emp_finance=None
try:
emp_invoice = InvoicingDetails.objects.get(employee_id=request.user.id)
except:
emp_invoice=None
try:
emp_bench = BenchDetails.objects.get(employee_id=request.user.id)
except:
emp_bench=None
try:
emp_prospect = ProspectDetails.objects.get(employee_id=request.user.id)
except:
emp_prospect=None
context = {
'emp_personal':emp_personal,
'emp_official':emp_official,
'emp_training':emp_training,
'emp_project':emp_project,
'emp_finance':emp_finance,
'emp_invoice':emp_invoice,
'emp_bench':emp_bench,
'emp_prospect':emp_prospect,
}
return render(request, 'consultant/emp-details.html', context)
|
from .lenet import lenet
from .vgg16 import vgg16
from .vgg19 import vgg19
from .alexnet import alexnet
from .mobilenet import mobilenet
from .full_mobilenet import full_mobilenet
from .xception import xception
from .resnet50v2 import resnet50v2
from .resnet152v2 import resnet152v2 |
#######################################################################
##
## CS 101
## Program #7
## Name: Harrison Lara
## Email: hrlwwd@mail.umkc.edu
##
## PROBLEM :
## You’ll have 2 files to work with; all_words.csv and total_counts.csv. All words will contain all the words that
## the user can compare with their counts in literature for each year. total _counts has the counts of all words by
## year, so that we can find the relative frequency.
##
##
## ALGORITHM :
##
## • Need to find the relative frequency and word commonality
## • Use functions
## • Classes/ objected oriented programming not needed
## • Ask user if they want to use ngram viewer or quit
## o 1 or 2
## o Try and except
## • For all_words.csv
## o Each line is how often the word is used and how many books it came out of
## o Last value does not matter (skip)
## • For total_count.csv
## o Each line is the year and how many times a word occurred in the literature
## • Relative frequency formula
## o Amount of times word used in a year/the total words in the year
## o Times by 100 after
## • Get user input
## o Ask for what word needs to be searched for
## What word do you want to get the frequencies from:
## o Ask for starting year
## Must be between 1505 and 2008
## Must be an integer
## • Int(input
## Must have range from only the start year to 2008 (ex. User inputs 1900 so it must be from 1900 – 2008
## or from start-end date)
## Try and except
## o Ask for ending year
## Must be between 1505 and 2008
## Must be an integer
## • Int(input
## (ex. User inputs 1900 so it must be from 1900 – 2008, or from start- end date)
## • Try and except
## • Display results in a table
## • Use Ngram Viewer
## o Table to show output of data to user
## o Three columns
## Year
## Word1 with frequencies
## Word2 with frequencies
## • Import csv module
## • Open file
## o File=open(‘all_words.csv’)
## o Csv_file = csv.reader(file)
## • Get list of strings for each line
## o For line in csv_file:
## o Print(line)
## o File.close()
## Possible functions to use from prior programs (maybe)
## o Relative frequency and word commonality
##
## ERROR HANDLING:
## Value Error, date must be higher than the start date or 1505
##
## OTHER COMMENTS:
## None.
##
########################################################################
# Imports
import csv
#Functions
def input_word(validate):
''' User input of word'''
while True:
word = input("Enter a word to get the frequencies of ==> ")
if word in validate:
return word
else:
print (word , "was not found, please input a new word")
def start_year():
''' User input the start year'''
while True:
try:
year1 = input("Enter a start date ==> ")
if year1 == "":
year1 = 1900
if int(year1) >= 1505 and int(year1) <= 2008:
return int(year1)
else:
print("Please enter a date between 1505 and 2008")
except ValueError:
print("You must enter a date")
def end_year(year1):
''' User input the end year'''
while True:
try:
year2 = input("Enter the end date ==> ")
if year2 == "":
year2 = 2008
if int(year2) >= int(year1) and int(year2) <= 2008:
return int(year2)
else:
print("You must enter a date between", year1," and 2008")
except ValueError:
print("You must enter a date")
def get_word(file):
'''Find word for dictionary'''
validate = {}
for line in file:
word = line[0]
date = int(line[1])
count = int(line[2])
if word in validate:
validate[word].append([date , count])
else:
validate[word] = [[date,count]]
return validate
def get_count(file):
''' Find how often the word appears'''
counter = {}
for line in file:
counter[int(line[0])] = int(line[1])
return counter
def frequency(word, validate, counter):
'''Find the frequency of the words'''
freq = {}
for total_year in validate[word]:
freq[total_year[0]] = (total_year[1]/counter[total_year[0]])*100
return freq
def table_format(year1, year2 ,one_word, two_word, one_freq, two_freq):
'''Create how the table should appear to the user'''
print("{:^}".format("Ngram Table \n"))
print("{:^8s}{:^12s}{:^12s}".format("Year", one_word, two_word))
print("=" * 30)
for date in range (year1, year2 + 1):
freq1 = one_freq[date] if date in one_freq else 0
freq2 = two_freq[date] if date in two_freq else 0
print("{:^10d}{:^10.6f}{:^10.6f}".format(date,freq1,freq2))
print("")
def print_table():
''' Print out the results on the table for the user'''
total_count = csv.reader(open("total_counts.csv"))
all_words = csv.reader(open("all_words.csv"))
all_words = get_word(all_words)
total_count = get_count(total_count)
word_one = input_word(all_words)
word_two = input_word(all_words)
year1 = start_year()
year2 = end_year(year1)
one_freq = frequency(word_one, all_words, total_count)
two_freq = frequency(word_two, all_words, total_count)
table_format(year1, year2 ,word_one, word_two, one_freq, two_freq)
def main():
'''combines the final steps to run the entire program as one'''
user = "y"
while user != "q":
print("Ngram Viewer \n")
print("1. Ngram Table")
print("Q. Quit")
print("")
user = input("==> ").lower()
if user == "1":
print_table()
elif user != "q":
print("Please choose a valid option from either '1' or 'Q' \n")
# Main
main() # Main program running all functions as a whole
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods,too-many-arguments,no-self-use,too-many-locals,line-too-long,unused-argument
from __future__ import print_function
from collections import OrderedDict
import json
import ssl
import sys
import os
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from knack.log import get_logger
from knack.prompting import prompt, prompt_pass, prompt_t_f, prompt_choice_list, prompt_int, NoTTYException
from knack.util import CLIError
from azure.cli.core.util import get_file_json, shell_safe_json_parse, sdk_no_wait
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType, get_sdk
from sfmergeutility import SFMergeUtility # pylint: disable=E0611,import-error
logger = get_logger(__name__)
def _ssl_context():
if sys.version_info < (3, 4):
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url):
req = urlopen(url, context=_ssl_context())
return req.read()
def _process_parameters(template_param_defs, parameter_lists):
def _try_parse_json_object(value):
try:
parsed = shell_safe_json_parse(value)
return parsed.get('parameters', parsed)
except CLIError:
return None
def _try_load_file_object(value):
if os.path.isfile(value):
parsed = get_file_json(value, throw_on_empty=False)
return parsed.get('parameters', parsed)
return None
def _try_parse_key_value_object(template_param_defs, parameters, value):
try:
key, value = value.split('=', 1)
except ValueError:
return False
param = template_param_defs.get(key, None)
if param is None:
raise CLIError("unrecognized template parameter '{}'. Allowed parameters: {}"
.format(key, ', '.join(sorted(template_param_defs.keys()))))
param_type = param.get('type', None)
if param_type:
param_type = param_type.lower()
if param_type in ['object', 'array']:
parameters[key] = {'value': shell_safe_json_parse(value)}
elif param_type in ['string', 'securestring']:
parameters[key] = {'value': value}
elif param_type == 'bool':
parameters[key] = {'value': value.lower() == 'true'}
elif param_type == 'int':
parameters[key] = {'value': int(value)}
else:
logger.warning("Unrecognized type '%s' for parameter '%s'. Interpretting as string.", param_type, key)
parameters[key] = {'value': value}
return True
parameters = {}
for params in parameter_lists or []:
for item in params:
param_obj = _try_load_file_object(item) or _try_parse_json_object(item)
if param_obj:
parameters.update(param_obj)
elif not _try_parse_key_value_object(template_param_defs, parameters, item):
raise CLIError('Unable to parse parameter: {}'.format(item))
return parameters
def _find_missing_parameters(parameters, template):
if template is None:
return {}
template_parameters = template.get('parameters', None)
if template_parameters is None:
return {}
missing = OrderedDict()
for parameter_name in template_parameters:
parameter = template_parameters[parameter_name]
if 'defaultValue' in parameter:
continue
if parameters is not None and parameters.get(parameter_name, None) is not None:
continue
missing[parameter_name] = parameter
return missing
def _prompt_for_parameters(missing_parameters, fail_on_no_tty=True): # pylint: disable=too-many-statements
prompt_list = missing_parameters.keys() if isinstance(missing_parameters, OrderedDict) \
else sorted(missing_parameters)
result = OrderedDict()
no_tty = False
for param_name in prompt_list:
param = missing_parameters[param_name]
param_type = param.get('type', 'string')
description = 'Missing description'
metadata = param.get('metadata', None)
if metadata is not None:
description = metadata.get('description', description)
allowed_values = param.get('allowedValues', None)
prompt_str = "Please provide {} value for '{}' (? for help): ".format(param_type, param_name)
while True:
if allowed_values is not None:
try:
ix = prompt_choice_list(prompt_str, allowed_values, help_string=description)
result[param_name] = allowed_values[ix]
except NoTTYException:
result[param_name] = None
no_tty = True
break
elif param_type == 'securestring':
try:
value = prompt_pass(prompt_str, help_string=description)
except NoTTYException:
value = None
no_tty = True
result[param_name] = value
break
elif param_type == 'int':
try:
int_value = prompt_int(prompt_str, help_string=description)
result[param_name] = int_value
except NoTTYException:
result[param_name] = 0
no_tty = True
break
elif param_type == 'bool':
try:
value = prompt_t_f(prompt_str, help_string=description)
result[param_name] = value
except NoTTYException:
result[param_name] = False
no_tty = True
break
elif param_type in ['object', 'array']:
try:
value = prompt(prompt_str, help_string=description)
except NoTTYException:
value = ''
no_tty = True
if value == '':
value = {} if param_type == 'object' else []
else:
try:
value = shell_safe_json_parse(value)
except Exception as ex: # pylint: disable=broad-except
logger.error(ex)
continue
result[param_name] = value
break
else:
try:
result[param_name] = prompt(prompt_str, help_string=description)
except NoTTYException:
result[param_name] = None
no_tty = True
break
if no_tty and fail_on_no_tty:
raise NoTTYException
return result
def _get_missing_parameters(parameters, template, prompt_fn):
missing = _find_missing_parameters(parameters, template)
if missing:
prompt_parameters = prompt_fn(missing)
for param_name in prompt_parameters:
parameters[param_name] = {
"value": prompt_parameters[param_name]
}
return parameters
def _invoke_mergeutil(input_yaml_files=None, parameters=None):
# call merge utility
file_path_list = []
prefix = "merged-"
output_file_path = os.path.join(os.getcwd(), prefix + 'arm_rp.json')
if os.path.isdir(input_yaml_files):
for root, _, files in os.walk(input_yaml_files):
for filename in files:
if filename.endswith(".yaml"):
file_path_list.append(os.path.join(root, filename))
else:
file_path_list = input_yaml_files.split(',')
if os.path.exists(output_file_path):
os.remove(output_file_path)
SFMergeUtility.sf_merge_utility(file_path_list, "SF_SBZ_RP_JSON", parameters=parameters, output_dir=None, prefix=prefix)
return output_file_path
def _generate_arm_template_core(input_yaml_files=None, parameters=None):
output_file_path = _invoke_mergeutil(input_yaml_files, parameters)
logger.warning("Generated ARM template file at %s.", output_file_path)
def _deploy_arm_template_core(cmd, resource_group_name, # pylint: disable=too-many-arguments
template_file=None, template_uri=None, input_yaml_files=None, deployment_name=None,
parameters=None, mode=None, validate_only=False,
no_wait=False):
DeploymentProperties, TemplateLink, Deployment = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
'DeploymentProperties', 'TemplateLink', 'Deployment',
mod='models')
template = None
template_link = None
template_obj = None
if template_uri:
template_link = TemplateLink(uri=template_uri)
template_obj = shell_safe_json_parse(_urlretrieve(template_uri).decode('utf-8'), preserve_order=True)
elif template_file:
template = get_file_json(template_file, preserve_order=True)
template_obj = template
else:
output_file_path = _invoke_mergeutil(input_yaml_files, parameters)
parameters = None
template = get_file_json(output_file_path, preserve_order=True)
template_obj = template
template_param_defs = template_obj.get('parameters', {})
template_obj['resources'] = template_obj.get('resources', [])
template = json.loads(json.dumps(template))
if parameters is not None:
parameters = _process_parameters(template_param_defs, parameters) or {}
parameters = _get_missing_parameters(parameters, template_obj, _prompt_for_parameters)
parameters = json.loads(json.dumps(parameters))
properties = DeploymentProperties(template=template, template_link=template_link,
parameters=parameters, mode=mode)
# workaround
properties.mode = 'incremental'
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
deployment = Deployment(properties=properties)
logger.warning("Deploying . . .")
logger.warning("You can get the state of the deployment with the cmd")
logger.warning("az group deployment show --name %s --resource-group %s", deployment_name, resource_group_name)
if validate_only:
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = smc.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
else:
return sdk_no_wait(no_wait, smc.validate, resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def deploy_arm_template(cmd, resource_group_name,
template_file=None, template_uri=None, input_yaml_files=None, deployment_name=None,
parameters=None, mode=None, no_wait=False):
return _deploy_arm_template_core(cmd, resource_group_name, template_file, template_uri,
input_yaml_files, deployment_name, parameters, mode, no_wait=no_wait)
def generate_arm_template(cmd, input_yaml_files=None, parameters=None):
return _generate_arm_template_core(input_yaml_files, parameters)
def create_volume(client, resource_group_name,
name, location,
template_file=None, template_uri=None):
"""Create a volume. """
volume_properties = None
if template_uri:
volume_properties = shell_safe_json_parse(_urlretrieve(template_uri).decode('utf-8'), preserve_order=True)
elif template_file:
volume_properties = get_file_json(template_file, preserve_order=True)
volume_properties = json.loads(json.dumps(volume_properties))
else:
raise CLIError('One of --template-file or --template-uri has to be specified')
volume_properties['location'] = location
return client.create(resource_group_name, name, volume_properties)
def secret_show(client, resource_group_name, secret_name, secret_value_resource_name, show_value=False):
secret_data = client.get(resource_group_name, secret_name, secret_value_resource_name)
if show_value:
secret_value = client.list_value(resource_group_name, secret_name, secret_value_resource_name)
secret_data.value = secret_value.value
return secret_data
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-08-12 18:29
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('annotation', '0023_auto_20160810_2244'),
]
operations = [
migrations.AlterField(
model_name='event',
name='raw_import_json',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
]
|
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.dispatch import receiver
class Problem(models.Model):
class Category:
NONE = "None"
RECURSION = "Recursion"
DP = "Dynamic Programming"
CONTROL_FLOW = "Control Flow"
DIVIDE_AND_CONQUER = "Divide and Conquer"
TREE = "Tree"
OPTIONS = (
(NONE, "None"),
(RECURSION, "Recursion"),
(DP, "Dynamic programming"),
(CONTROL_FLOW, "Control Flow"),
(DIVIDE_AND_CONQUER, "Divide and Conquer"),
(TREE, "Tree")
)
class Type:
NONE = "None"
EXAM = "Exam"
USER_PROVIDED = "User Provided"
TUTORIAL = "Tutorial"
UNASSESSED = "Unassessed Exercise"
OPTIONS = (
(NONE, "None"),
(EXAM, "Exam"),
(USER_PROVIDED, "User Provided"),
(TUTORIAL, "Tutorial"),
(UNASSESSED, "Unassessed Exercise")
)
title = models.CharField(max_length=100)
language = models.CharField(max_length=32)
spec_path = models.CharField(max_length=300)
desc = models.TextField(default="")
year = models.IntegerField()
difficulty = models.IntegerField(validators=[
MaxValueValidator(3),
MinValueValidator(1)
])
category = models.CharField(max_length=50, choices=Category.OPTIONS, default=Category.NONE)
type = models.CharField(max_length=50, choices=Type.OPTIONS, default=Type.NONE)
def __str__(self):
return self.title
class CodeSegment(models.Model):
index = models.IntegerField()
code = models.TextField()
problem = models.ForeignKey(Problem, on_delete=models.CASCADE)
def __str__(self):
return str(self.id)
class Question(models.Model):
question_desc = models.TextField()
question_index = models.IntegerField(default=0)
code_segment = models.ForeignKey(CodeSegment, on_delete=models.CASCADE, default="")
test_script = models.TextField(default="")
problem = models.ForeignKey(Problem, on_delete=models.CASCADE, default="")
def __str__(self):
return self.question_desc
class UserProgress(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
problem = models.ForeignKey(Problem, on_delete=models.CASCADE)
stopped_at = models.PositiveIntegerField(default=0)
progress = ArrayField(models.PositiveIntegerField())
last_modified = models.DateTimeField(auto_now=True)
class QuestionComment(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
parent_comment = models.ForeignKey('self', on_delete=models.CASCADE, null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=100, default="")
desc = models.TextField(default="")
created_at = models.DateTimeField(auto_now_add=True)
views = models.IntegerField(default=0)
def __str__(self):
return self.desc
class UserVotes(models.Model):
UP = 1
NO_VOTE = 0
DOWN = -1
VOTE_OPTIONS = (
(UP, 1),
(NO_VOTE, 0),
(DOWN, -1)
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
problem = models.ForeignKey(Problem, on_delete=models.CASCADE)
vote = models.IntegerField(default=NO_VOTE, choices=VOTE_OPTIONS)
class CommentVotes(models.Model):
UP = 1
NO_VOTE = 0
DOWN = -1
VOTE_OPTIONS = (
(UP, 1),
(NO_VOTE, 0),
(DOWN, -1)
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
comment = models.ForeignKey(QuestionComment, on_delete=models.CASCADE)
vote = models.IntegerField(default=NO_VOTE, choices=VOTE_OPTIONS)
class UserEditorSettings(models.Model):
class Theme:
DRACULA = "dracula"
ECLIPSE = "eclipse"
MONOKAI = "monokai"
THE_MATRIX = "the-matrix"
OPTIONS = (
(DRACULA, "dracula"),
(ECLIPSE, "eclipse"),
(MONOKAI, "monokai"),
(THE_MATRIX, "the-matrix")
)
class KeyBinding:
SUBLIME = "sublime"
EMACS = "emacs"
VIM = "vim"
OPTIONS = (
(SUBLIME, "sublime"),
(EMACS, "emacs"),
(VIM, "vim")
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
font_size = models.IntegerField(default=13)
theme = models.CharField(max_length=20, choices=Theme.OPTIONS, default=Theme.DRACULA)
key_binding = models.CharField(max_length=20, choices=KeyBinding.OPTIONS, default=KeyBinding.SUBLIME)
'''
class ActionLog(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
time = models.DateTimeField(auto_now_add=True)
'''
|
from BaseAI import BaseAI
#from random import randint
depthLimit = 5
log = False
applyStateHeuristic = True
applyMoveHeuristic = True
infinity = float('inf')
class PlayerAI(BaseAI):
def __init__(self):
self.util = TreeFunctions()
def getMove(self, grid):
if log: print(" ")
# ((1)) Apply Heuristics on first move
selectedMoves = None
if applyMoveHeuristic:
if log: print("*** Applying Move Heurisic ***")
selectedMoves = self.ApplyHeuristics (grid)
if log: print(">> Ply1 Move Suggested:",selectedMoves," <<")
# ((2)) Create Tree with Utility Values
rootNode = Node (None, grid, None, 0)
self.util.addSuccessors (rootNode, selectedMoves, 0)
if log: print("DecisionTree:")
if log: self.util.printTree(rootNode)
# ((3)) Get best move with MiniMax & AlphaBeta pruning
moveNode = self.Decision_AlphaBeta (rootNode)
if log: print(">> Move sugg with AlphaBeta:", str(moveNode), " <<")
move = moveNode.getMove()
return move if move!= None else None
#move = randint(0, len(moves) - 1)
#return moves[move] if moves else None
def ApplyHeuristics (self, grid):
moves = grid.getAvailableMoves();
gridCopy = grid.clone()
if log: print (">> Heu for Moves :", moves )
heuTracker = []
gridFunc = GridFunctions()
#iterating through all moves for Grid
for thisMove in moves:
gridCopy = grid.clone()
gridCopy.move(thisMove)
cntSpaces = 0
cnt2n4 = 0
cnt4s = 0
highVal = 1
highCorner = 0
cntAdjSimVal = 0
cntMonotonicity = -1
diffBtwnAdjCell = 0
cntSmoothness = 0
cntDistBetwSimVals = 0
overallHeu = 0
bonus = 0
totGridVal = 0
gridTranspose = gridFunc.GridTranspose(gridCopy.map)
gridAndSelfTrsp = [gridCopy.map , gridTranspose]
# Iterating through each cell for a Grid
# gridCopy.map enables checking left-right move
# gridTranspose enables checking up-down move
for aGrid in gridAndSelfTrsp:
dist = 0
lisOfCellVal = []
for row in aGrid :
onCorner = True
prevCell = -1
monoRowInc = 0
monoRowDec = 0
for col in row : # iterating a row
highVal = max(col, highVal)
totGridVal += col
if col == 0: cntSpaces += 1
if col == 2 or col == 4: cnt2n4 += 1
# Checking for adjacent values
if onCorner : onCorner = False
else:
if col == prevCell : cntAdjSimVal += 1 # Adj similar values
if col > prevCell :
monoRowInc += 1 # Monotonicity increasing
diffBtwnAdjCell += (col - prevCell) # Diff between Adj cell value
if col < prevCell :
monoRowDec += 1 # Monotonicity decreasing
diffBtwnAdjCell += (prevCell - col) # Diff between Adj cell value
# Checking for dist between similar values greater than 4
dist +=1
if col in lisOfCellVal :
if col > 128 : cntDistBetwSimVals += dist + 2
else : cntDistBetwSimVals += dist
elif col > 8 : lisOfCellVal.append(col)
# update prevCell value for next iteration
prevCell = col
cntSmoothness += (diffBtwnAdjCell / highVal)
#End Loop for col
if monoRowInc > 2 : cntMonotonicity +=1
if monoRowDec > 2 : cntMonotonicity +=1
#End Loop for aGrid
#End Loop for gridAndSelfTrsp
aFactor = totGridVal/highVal
#Count High Values in Corner
if highVal < 65: bonus = 1
elif highVal > 65 and highVal < 129 : bonus = 2
elif highVal > 129 and highVal < 257: bonus = 3
elif highVal > 257 and highVal < 513: bonus = 4
elif highVal > 513 and highVal < 1025: bonus = 3
else: bonus = 5
if gridCopy.map[0][0] == highVal : highCorner+= bonus
if gridCopy.map[0][3] == highVal : highCorner+= bonus
if gridCopy.map[3][0] == highVal : highCorner+= bonus
if gridCopy.map[3][3] == highVal : highCorner+= bonus
# Heuristics 1 : Bonus of 0.25 for each open squares
overallHeu = cntSpaces * 1
# Heuristic 2: Penalty of 0.3 for occurance of 2 or 4
overallHeu -= cnt2n4 * 1
# Heuristic 3: Bonus of 3 for large values on edge
overallHeu += highCorner
# Heuristic 4: Bonus of 2 for adjacent equal values counting number of potential merges
overallHeu += cntAdjSimVal * 2
# Heuristic 5: Monotonicity, Bonus of 1 for each row having tiles either increasing or decreasing
overallHeu += cntMonotonicity * 1
# Heuristic 6: Smoothness, Penalty of ( total value of diff between neighboring tiles / maxTileValue )
if highVal < 257: overallHeu -= (cntSmoothness / (aFactor*2))
else : overallHeu -= (cntSmoothness / (aFactor*3))
# Heuristic 7: Distance between similar values, penalty for lenght of dist
if highVal < 257: overallHeu -= (cntDistBetwSimVals / (aFactor*2))
else: overallHeu -= (cntDistBetwSimVals / (aFactor*3))
# Heuristic x : something to do with avg and median
# tbd
heuTracker.append(overallHeu)
if log:print (">> [%s]heu [ Sps:%s, 2n4:%s, hCorner:%s, adjSim:%s, Mono:%s, Smooth:%s, DistOfSim:%s aFactor:%s, bonus:%s"%(thisMove, cntSpaces,
-1*cnt2n4, highCorner,cntAdjSimVal,cntMonotonicity,(-1*cntSmoothness)/aFactor,
(-1*cntDistBetwSimVals)/ aFactor, aFactor, bonus))
#End iterating through all moves for Grid
heuBestMoves = []
maxHeu = max(heuTracker)
for index in range (0, len(heuTracker)):
if heuTracker[index] == maxHeu: heuBestMoves.append(moves[index])
if log: print (">> Final Heu Score:", heuTracker)
#if log:print (">> Highest Heu:", maxHeu )
if log:print (">> Best Move with Heu:", heuBestMoves )
return heuBestMoves
def Decision_AlphaBeta (self, starting_state):
self.util.prt("Initiating AlphaBeta.." )
alpha = -infinity
beta = infinity
best_state = None
for state in starting_state.getSuccessors(): # initial maximizer loop
(aState, utility) = self.Minimize(state, alpha, beta)
if utility > alpha:
alpha = utility
best_state = state
# This is alternate logic and does not work
#(best_state, utility) = self.Maximize(starting_state, alpha, beta)
self.util.prt(">> AlphaBeta: Max Utility : " + str(alpha) +
" | Best State is Move:" + str(best_state.getMove()))
return best_state
def Maximize (self, state, alpha, beta):
#self.util.prt("AlphaBeta-->MAX: Visited Node :: " +
# str(state.getUtility()) + "/" + str(state.getDepth()))
if state.terminalTest():
return (None, state.getUtility())
maxChild = None
maxUtility = -infinity
for child in state.getSuccessors():
(aChild, utility) = self.Minimize(child, alpha, beta)
if utility > maxUtility:
maxUtility = utility
maxChild = child
if maxUtility >= beta:
return (maxChild, maxUtility) # break
if maxUtility > alpha:
alpha = maxUtility
return (maxChild, maxUtility)
def Minimize (self, state, alpha, beta):
#self.util.prt("AlphaBeta-->MIN: Visited Node :: " +
# str(state.getUtility()) + "/" + str(state.getDepth()))
if state.terminalTest():
return (None, state.getUtility())
minChild = None
minUtility = infinity
for child in state.getSuccessors():
(aChild, utility) = self.Maximize(child, alpha, beta)
if utility < minUtility:
minUtility = utility
minChild = child
if minUtility <= alpha:
return (minChild, minUtility) # break
if minUtility < beta:
beta = minUtility
return (minChild, minUtility)
class GridFunctions:
#def __init__(self):
def GridTranspose (self, gridAsList):
#if log: print("input Grid: ", gridAsList)
ret_grid_row1 = []
ret_grid_row2 = []
ret_grid_row3 = []
ret_grid_row4 = []
for grid_row in gridAsList:
ret_grid_row1.append(grid_row [0])
ret_grid_row2.append(grid_row [1])
ret_grid_row3.append(grid_row [2])
ret_grid_row4.append(grid_row [3])
#newGridPointer +=1
tranGrid = [ret_grid_row1, ret_grid_row2, ret_grid_row3, ret_grid_row4 ]
#if log: print("tranGrid : ", tranGrid )
return tranGrid
def evalGrid(self, grid):
# Heuristic for State
if log: print (">> evaluating grid: ", grid.map)
numSpaces = len(grid.getAvailableCells())
faceAdjtoSpace = 0
otherFaces = 0
numMoves = len(grid.getAvailableMoves())
numAlignedVal = 0
for row in grid.map:
#print ("row :",row)
onCorner = True
prevCell = -1
for thisCell in row :
if onCorner:
prevCell = thisCell
onCorner = False
else:
if thisCell == prevCell :
numAlignedVal +=1 # Number of aligned values
if thisCell > 0 and prevCell == 0:
faceAdjtoSpace += 1 # Sum of faces adjacent to a space
else:
otherFaces += 1 # Sum of other faces
#print ("thisCell :",thisCell )
evalu = 128
evalu += ( numSpaces * 128)
evalu += ( numMoves * 256)
if faceAdjtoSpace > 0 :
evalu += (4096 / faceAdjtoSpace )
if otherFaces > 0 :
evalu += (otherFaces * 4)
evalu += (numAlignedVal * 2)
if log: print (">> evaluation=",evalu,
" (%s, %s, %s, %s, %s )"%(numSpaces, numMoves,
faceAdjtoSpace, otherFaces, numAlignedVal), " <<")
return evalu
def getGridAsList (self, grid) :
print ("grid map", grid.map)
return None
class TreeFunctions:
def addSuccessors (self, parentNode, ply1Moves=None, parentDepth=1):
#print (">> add successors called [%d]"%parentDepth)
treeUtil = TreeFunctions()
gridUtil = GridFunctions()
thisDepth = parentDepth +1
aNode = None
if ply1Moves == None:
avblMoves = parentNode.getGrid().getAvailableMoves()
else:
avblMoves = ply1Moves
#print (">> avblMoves ",avblMoves )
for mv in avblMoves:
gridCopy = parentNode.getGrid().clone()
gridCopy.move(mv)
if (thisDepth < (depthLimit-1)):
#aNode = Node (mv, gridCopy, gridCopy.getMaxTile(), thisDepth)
aNode = Node (mv, gridCopy, None, thisDepth)
parentNode.addChild(aNode)
treeUtil.addSuccessors (aNode,[mv],thisDepth)
else:
# Add Terminal Nodes
if applyStateHeuristic :
# Adding Evaluation function to Terminal node
if log: print("*** Applying State Heurisic ***")
aNode = Node (mv, None, gridUtil.evalGrid(gridCopy), thisDepth)
else:
# Adding grid value to Terminal node
aNode = Node (mv, None, gridCopy.getMaxTile(), thisDepth)
parentNode.addChild(aNode)
def printTree (self, node, depth=0):
treeUtil = TreeFunctions()
if (node != None ):
print (self.getInitChars(depth), node,
"(",node.getSuccessorCnt(),")",
"[",node.getUtility(),"/",node.getDepth(),"]",
) # print Node with depth
if node.getSuccessors() == None:
return
elif len(node.getSuccessors()) > 0 :
for childNode in node.getSuccessors() :
treeUtil.printTree(childNode,(depth+1))
else: return
def getInitChars (self, depth):
retStr = ">"
if depth > 0 :
barCnt = depth - 1
while barCnt >= 0 :
retStr += " | "
barCnt -= 1
retStr += " +-"
return retStr
def prt(self, logMesg):
if log: print (logMesg)
def printGrid (self, thisGrid):
print("|%d | %d | %d | %d|"%(thisGrid.map[0][0],thisGrid.map[0][1],thisGrid.map[0][2],thisGrid.map[0][3]))
print("|%d | %d | %d | %d|"%(thisGrid.map[1][0],thisGrid.map[1][1],thisGrid.map[1][2],thisGrid.map[1][3]))
print("|%d | %d | %d | %d|"%(thisGrid.map[2][0],thisGrid.map[2][1],thisGrid.map[2][2],thisGrid.map[2][3]))
print("|%d | %d | %d | %d|"%(thisGrid.map[3][0],thisGrid.map[3][1],thisGrid.map[3][2],thisGrid.map[3][3]))
class Node:
def __init__(self, move=5, grid=None, value=0, depth=0):
self.move = move
self.grid = grid
self.value = value
self.depth = depth
self.children = []
def __repr__(self):
name = str(self.move)
return name
def getGrid (self):
return self.grid
def getUtility (self):
return self.value
def addChild(self, childNode):
self.children.append(childNode)
def getChild(self, index):
return self.children [index]
def removeChild(self, index):
lenth =len(self.children)
if lenth != 0 and index < lenth:
self.children.pop(index)
return True
else:
return False
def getSuccessorCnt(self):
return len(self.children)
def getSuccessors(self):
if len(self.children) == 0: return None
return self.children
def getDepth(self):
return self.depth
def getMove(self):
return self.move
def printIt (self):
printValue = "move:" + str(self.move) + " / value:" + str(self.value) + " / depth:" + str(self.depth)
return printValue
def terminalTest(self):
if self.grid == None : return True
else : return False
|
from typing import List
from django.core.management.base import BaseCommand, CommandParser
from sok.models import Publication
class Command(BaseCommand):
def add_arguments(self, parser: CommandParser):
parser.add_argument('pk', nargs='+', type=int)
def handle(self, *args, **options):
pks: List[int] = options['pk']
publications = [Publication.objects.get(pk=pk) for pk in pks]
cite_keys = [publication.cite_key for publication in publications]
self.stdout.write(r"\cite{" + ",".join(cite_keys) + "}", ending='')
|
import datetime
def printTimeStamp(name):
print('Автор програми: ' + name)
print('Час компіляції: ' + str(datetime.datetime.now()))
n = input("Введите 4-значное число: ")
l = list(n)
d1 = int(l[0])
d2 = int(l[1])
d3 = int(l[2])
d4 = int(l[3])
print("Сумма цифр числа:", d1 + d2 + d3+ d4 )
printTimeStamp("Доброштан і Глигало")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.