text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
import sys
import os
import base64
import hmac, hashlib
from jinja2 import Template
if len(sys.argv) != 2:
print 'usage: python ' + sys.argv[0] + ' <BUCKET_NAME>'
sys.exit()
AWS_ACCESS_KEY = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
BUCKET = sys.argv[1]
with open('policy_doc.json', 'r') as policy_f:
policy = base64.b64encode(policy_f.read())
signature = base64.b64encode(hmac.new(AWS_SECRET_ACCESS_KEY, policy, hashlib.sha1).digest())
with open('template.index.html') as template_f:
template = Template(template_f.read())
tmp = template.render(
bucket=BUCKET,
signature=signature,
policy=policy,
aws_key=AWS_ACCESS_KEY)
print tmp
|
###################################################################
def same_site(self,dc=10, in_place=True, verbose=False):
###################################################################
"""
Check that all gts in the current Sgts are actually the same site. If a given time series is
found to be of two separate sites, then a new gts is added to the return Sgts instance.
param dc: critical distance to decide to split the time series
param in_place: if True modify current Sgts, False retuen a new Sgts
param verbose: verbose mode
return: a new Sgts instance
"""
# import
import numpy as np
from pyacs.gts.Sgts import Sgts
from pyacs.gts.Gts import Gts
if not in_place:
new_Sgts = Sgts(read=False)
# start loop on sites
lcode = self.lcode()
for site in lcode:
if verbose:
print('-- Processing ', site )
my_ts = self.__dict__[site].copy()
if my_ts.data_xyz is not None:
data=my_ts.data_xyz[:,1:4]
ddata=np.copy(my_ts.data_xyz[:,1:4])
else:
# if no data_xyz go to next gts
print("!!! WARNING: data_xyz attribute required for method same_site and not found gts %s" % (site))
# ensure median calculation
if np.mod(data.shape[0],2)==0:
# duplicates the last date
ddata = np.vstack((ddata,ddata[-1,:]))
median = np.median(ddata,axis=0)
dist_data= np.sqrt( np.sum( (data-median)**2,axis=1) )
lindex = np.where(dist_data > dc*1.E3 )
# case gts needs to be split
if len( lindex[0] ) > 0 :
# create a new code
new_code = my_ts.code[:3]+'_'
if new_code in self.lcode():
print("!!! ERROR: try to create a new gts with code %s and it already exists." % (new_code))
new_code = my_ts.code[:2]+'__'
print("-- time series for site %s appears to include different sites because there are coordinates at %d dates %.1lf km from the median position" % ( site, len(lindex) , np.max( ddata )*1.E-3 ) )
print("-- %s time series will be split into code %s and code %s" % (site,site,new_code) )
# create a new gts
new_gts = Gts(code=new_code,data_xyz=np.copy(my_ts.data_xyz[lindex]))
new_gts.xyz2neu(corr=True)
# remove the line from my_ts
my_ts.data_xyz = np.delete( my_ts.data_xyz , lindex , axis=0 )
my_ts.xyz2neu(corr=True)
# update the ouput
if in_place:
self.append(new_gts)
else:
new_Sgts.append( new_gts )
if in_place:
self.__dict__[site] = my_ts
else:
new_Sgts.append( my_ts )
if in_place:
return self
else:
return new_Sgts
|
import requests
import os
import random
import string
import json
from threading import *
def spammer() -> None:
chars = string.ascii_letters + string.digits + '!@#$%^()'
random.seed = (os.urandom(1024))
url = 'https://garina999.win/k_fac.php'
names = json.loads(open('names.json').read())
for x in names:
extra = ''.join(random.choice(string.digits))
username = x.lower() + extra + '@gmail.com'
password = ''.join(random.choice(chars)for i in range(8))
requests.post(url, allow_redirects=False, data={
'email': username,
'pass': password
})
print("Sending Username:" +username ,"and Password:" + password)
threads = []
for i in range(100):
t = Thread(target=spammer())
t.daemon = True
threads.append(t)
for i in range(100):
threads[i].start()
for i in range(100):
threads[i].join()
|
from flask import Flask, request, jsonify, Response
from models import UserModel
import json
app = Flask(__name__)
user_model = UserModel()
@app.route("/slack-response", methods=['POST'])
def slack_proxy_response():
data = json.loads(request.form.get('payload'))
user_model.entered_this_round(data["user"]["id"])
return Response(status=200)
if __name__ == "__main__":
app.run() |
import falcon
def _default_failed(req, resp, **kwargs):
raise falcon.HTTPFound('/auth/login')
class AuthRequiredMiddleware:
"""Requires a cookie be set with a valid JWT or fails
Example:
import falcon
from falcon_helpers.middlewares.auth_required import AuthRequiredMiddleware
class Resource:
auth_required = True
def on_get(self, req, resp):
# ...
def when_fails_auth(req, resp, token_value):
raise TerribleException(token_value)
api = falcon.App(
middleware=[
AuthRequiredMiddleware(when_fails=when_fails_auth)
]
)
api.add_route('/', Resource())
Attributes:
resource_param: The paramater to pull the boolean from
context_key: the key the token will be found on the request
when_fails: (function) A function to execute when the authentication
fails
"""
def __init__(self, resource_param='auth_required',
context_key='auth_token_contents',
when_fails=_default_failed):
self.resource_param = resource_param
self.context_key = context_key
self.failed_action = when_fails
def process_resource(self, req, resp, resource, params):
required = getattr(resource, self.resource_param, True)
token_value = req.context.get(self.context_key, None)
token_value = None if isinstance(token_value, Exception) else token_value
if required and not token_value:
return self.failed_action(req=req, resp=resp, token_value=token_value)
|
import requests
import simplejson
import csv
""" for the example only"""
import trial_file_reader
from user_talk_vandal_vocab_count import *
from user_talk_vandal_vocab_ratio import *
from user_revision_count import *
from user_talk_revision_count import *
from user_article_to_edit_ratio import *
from user_empty_comment_ratio import *
from user_comment_avg_length import *
from user_edit_to_reversion_ratio import *
#from user_external_link_ratio import *
from edited_article_user_num_edits import *
from user_has_edited_talk_page import *
from user_is_ip_address import *
from user_is_bot import *
from user_uses_editing_tool import *
from username_has_capitals import *
from username_has_numbers import *
from username_ends_with_numbers import *
from editor_started_article import *
from article_is_biography import *
from article_is_protected import *
from is_asia_ip import *
from is_europe_ip import *
from is_north_america_ip import *
from is_australia_ip import *
from is_africa_ip import *
from is_south_america_ip import *
from user_num_groups import *
#from user_wiki_age_dif import *
from user_average_article_newness import *
from user_total_num_edits import *
from user_at_least_2_edits import *
from user_at_least_5_edits import *
from user_at_least_10_edits import *
from user_at_least_30_edits import *
from user_at_least_100_edits import *
from user_at_least_200_edits import *
def join_edits_with_feature_on_user(feature_function, edits):
"""edits are the output from parsing trial.xml"""
result = []
for edit in edits:
try:
featureval = 0
featureval = feature_function(edit)
result.append((featureval,
int(edit['isVandalism'] == 'true')))
except:
print 'error requesting for: ', edit['user'], 'moving on...'
return result
def join_editid_with_feature(feature_function, edits):
"""edits are the output from parsing trial.xml"""
result = []
for edit in edits:
try:
featureval = 0
featureval = feature_function(edit)
result.append((edit['EditID'], featureval))
except:
print 'error requesting for: ', edit['user'], 'moving on...'
return result
def dump_to_csv(tuple_list, file_name):
f = open(file_name, "w")
wr = csv.writer(f)
for tup in tuple_list:
wr.writerow(tup)
f.close()
def example():
"""example of how you can use the functions in this file"""
trialxmlpath = "../workspace/cluebotng/editsets/D/trial.xml"
trainingset = trial_file_reader.parse_trial_file(trialxmlpath)
exampleset = trainingset[0:10]
examplefeature = user_talk_revision_count #user feature defined above
x = join_edits_with_feature_on_user(examplefeature, exampleset)
dump_to_csv(x, "examplefeature.csv")
def feature_to_text(feature_function, number_of_examples=10, trialxmlpath = "../workspace/cluebotng/editsets/D/trial.xml"):
"""use this to get a new feature for the dataset stored into a file"""
print 'reading training set from ' + trialxmlpath
trainingset = trial_file_reader.parse_trial_file(trialxmlpath)
print 'done reading training set'
if number_of_examples <= 0:
exampleset = trainingset[:]
else:
exampleset = trainingset[0:number_of_examples]
x = join_edits_with_feature_on_user(feature_function, exampleset)
filename = feature_function.__name__ + '.csv'
dump_to_csv(x, filename)
def editid_to_featurevalues(feature_function, trialxmlpath = "../workspace/cluebotng/editsets/D/trial.xml"):
"""use this to get a new feature for the dataset stored into a file"""
print 'reading training set from ' + trialxmlpath
exampleset = trial_file_reader.parse_trial_file(trialxmlpath)
print 'done reading training set'
x = join_editid_with_feature(feature_function, exampleset)
filename = feature_function.__name__ + '.dat'
dump_to_csv(x, filename)
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
#Copyright 2008 Steffen Jobbagy-Felso
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, version 3 of the License.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#In the "official" distribution you can find the license in
#agpl-3.0.txt in the docs folder of the package.
#This file contains simple functions for fpdb
#Aiming to eventually remove this module, functions will move to, eg:
#fpdb_db db create/re-create/management/etc
#Hands or related files for saving hands to db, etc
import datetime
import time
import re
import sys
from Exceptions import *
import locale
import Card
PS = 1
FTP = 2
# TODO: these constants are also used in fpdb_save_to_db and others, is there a way to do like C #define, and #include ?
# answer - yes. These are defined in fpdb_db so are accessible through that class.
MYSQL_INNODB = 2
PGSQL = 3
SQLITE = 4
LOCALE_ENCODING = locale.getdefaultlocale()[1]
#returns an array of the total money paid. intending to add rebuys/addons here
def calcPayin(count, buyin, fee):
return [buyin + fee for i in xrange(count)]
#end def calcPayin
def checkPositions(positions):
""" verify positions are valid """
if any(not (p == "B" or p == "S" or (p >= 0 and p <= 9)) for p in positions):
raise FpdbError("invalid position '"+p+"' found in checkPositions")
### RHH modified to allow for "position 9" here (pos==9 is when you're a dead hand before the BB
### eric - position 8 could be valid - if only one blind is posted, but there's still 10 people, ie a sitout is present, and the small is dead...
def classifyLines(hand, category, lineTypes, lineStreets):
""" makes a list of classifications for each line for further processing
manipulates passed arrays """
currentStreet = "predeal"
done = False #set this to true once we reach the last relevant line (the summary, except rake, is all repeats)
for i, line in enumerate(hand):
if done:
if "[" not in line or "mucked [" not in line:
lineTypes.append("ignore")
else:
lineTypes.append("cards")
elif line.startswith("Dealt to"):
lineTypes.append("cards")
elif i == 0:
lineTypes.append("header")
elif line.startswith("Table '"):
lineTypes.append("table")
elif line.startswith("Seat ") and ( ("in chips" in line) or "($" in line):
lineTypes.append("name")
elif isActionLine(line):
lineTypes.append("action")
if " posts " in line or " posts the " in line:
currentStreet="preflop"
elif " antes " in line or " posts the ante " in line:
lineTypes.append("ante")
elif line.startswith("*** FLOP *** ["):
lineTypes.append("cards")
currentStreet="flop"
elif line.startswith("*** TURN *** ["):
lineTypes.append("cards")
currentStreet="turn"
elif line.startswith("*** RIVER *** ["):
lineTypes.append("cards")
currentStreet="river"
elif line.startswith("*** 3"):
lineTypes.append("ignore")
currentStreet=0
elif line.startswith("*** 4"):
lineTypes.append("ignore")
currentStreet=1
elif line.startswith("*** 5"):
lineTypes.append("ignore")
currentStreet=2
elif line.startswith("*** 6"):
lineTypes.append("ignore")
currentStreet=3
elif line.startswith("*** 7") or line == "*** RIVER ***":
lineTypes.append("ignore")
currentStreet=4
elif isWinLine(line):
lineTypes.append("win")
elif line.startswith("Total pot ") and "Rake" in line:
lineTypes.append("rake")
done=True
elif "*** SHOW DOWN ***" in line or "*** SUMMARY ***" in line:
lineTypes.append("ignore")
#print "in classifyLine, showdown or summary"
elif " shows [" in line:
lineTypes.append("cards")
else:
raise FpdbError("unrecognised linetype in:"+hand[i])
lineStreets.append(currentStreet)
def convert3B4B(category, limit_type, actionTypes, actionAmounts):
"""calculates the actual bet amounts in the given amount array and changes it accordingly."""
for i in xrange(len(actionTypes)):
for j in xrange(len(actionTypes[i])):
bets = []
for k in xrange(len(actionTypes[i][j])):
if (actionTypes[i][j][k] == "bet"):
bets.append((i,j,k))
if (len(bets)>=2):
#print "len(bets) 2 or higher, need to correct it. bets:",bets,"len:",len(bets)
for betNo in reversed(xrange (1,len(bets))):
amount2 = actionAmounts[bets[betNo][0]][bets[betNo][1]][bets[betNo][2]]
amount1 = actionAmounts[bets[betNo-1][0]][bets[betNo-1][1]][bets[betNo-1][2]]
actionAmounts[bets[betNo][0]][bets[betNo][1]][bets[betNo][2]] = amount2 - amount1
def convertBlindBet(actionTypes, actionAmounts):
""" Corrects the bet amount if the player had to pay blinds """
i = 0#setting street to pre-flop
for j in xrange(len(actionTypes[i])):#playerloop
blinds = []
bets = []
for k in xrange(len(actionTypes[i][j])):
if actionTypes[i][j][k] == "blind":
blinds.append((i,j,k))
if blinds and actionTypes[i][j][k] == "bet":
bets.append((i,j,k))
if len(bets) == 1:
blind_amount=actionAmounts[blinds[0][0]][blinds[0][1]][blinds[0][2]]
bet_amount=actionAmounts[bets[0][0]][bets[0][1]][bets[0][2]]
actionAmounts[bets[0][0]][bets[0][1]][bets[0][2]] = bet_amount - blind_amount
#converts the strings in the given array to ints (changes the passed array, no returning). see table design for conversion details
#todo: make this use convertCardValuesBoard
def convertCardValues(arr):
map(convertCardValuesBoard, arr)
# a 0-card is one in a stud game that we did not see or was not shown
card_map = { 0: 0, "2": 2, "3" : 3, "4" : 4, "5" : 5, "6" : 6, "7" : 7, "8" : 8,
"9" : 9, "T" : 10, "J" : 11, "Q" : 12, "K" : 13, "A" : 14}
def convertCardValuesBoard(arr):
""" converts the strings in the given array to ints
(changes the passed array, no returning). see table design for
conversion details """
for i in xrange(len(arr)):
arr[i] = card_map[arr[i]]
def createArrays(category, seats, card_values, card_suits, antes, winnings,
rakes, action_types, allIns, action_amounts, actionNos,
actionTypeByNo):
""" this creates the 2D/3D arrays. manipulates the passed arrays instead of returning. """
for i in xrange(seats):#create second dimension arrays
card_values.append( [] )
card_suits.append( [] )
antes.append(0)
winnings.append(0)
rakes.append(0)
streetCount = 4 if (category == "holdem" or category == "omahahi" or
category == "omahahilo") else 5
for i in xrange(streetCount): #build the first dimension array, for streets
action_types.append([])
allIns.append([])
action_amounts.append([])
actionNos.append([])
actionTypeByNo.append([])
for j in xrange(seats): # second dimension arrays: players
action_types[i].append([])
allIns[i].append([])
action_amounts[i].append([])
actionNos[i].append([])
# if (category=="holdem" or category=="omahahi" or category=="omahahilo"):
# pass
if category == "razz" or category == "studhi" or category == "studhilo": #need to fill card arrays.
for i in xrange(seats):
for j in xrange(7):
card_values[i].append(0)
card_suits[i].append("x")
# else:
# raise FpdbError("invalid category")
#end def createArrays
def fill_board_cards(board_values, board_suits):
""" fill up the two board card arrays """
while len(board_values) < 5:
board_values.append(0)
board_suits.append("x")
def fillCardArrays(player_count, base, category, card_values, card_suits):
"""fills up the two card arrays"""
if category == "holdem":
cardCount = 2
elif category == "omahahi" or category == "omahahilo":
cardCount = 4
elif base == "stud":
cardCount = 7
else:
raise FpdbError("invalid category:", category)
for i in xrange(player_count):
while len(card_values[i]) < cardCount:
card_values[i].append(0)
card_suits[i].append("x")
#end def fillCardArrays
#filters out a player that folded before paying ante or blinds. This should be called
#before calling the actual hand parser. manipulates hand, no return.
def filterAnteBlindFold(hand):
#todo: this'll only get rid of one ante folder, not multiple ones
#todo: in tourneys this should not be removed but
#print "start of filterAnteBlindFold"
pre3rd = []
for i, line in enumerate(hand):
if line.startswith("*** 3") or line.startswith("*** HOLE"):
pre3rd = hand[0:i]
foldeeName = None
for line in pre3rd:
if line.endswith("folds") or line.endswith("is sitting out") or line.endswith(" stands up"): #found ante fold or timeout
pos = line.find(" folds")
foldeeName = line[0:pos]
if pos == -1 and " in chips)" not in line:
pos = line.find(" is sitting out")
foldeeName = line[0:pos]
if pos == -1:
pos = line.find(" stands up")
foldeeName = line[0:pos]
if pos == -1:
pos1 = line.find(": ") + 2
pos2 = line.find(" (")
foldeeName = line[pos1:pos2]
if foldeeName is not None:
#print "filterAnteBlindFold, foldeeName:",foldeeName
for i, line in enumerate(hand):
if foldeeName in line:
hand[i] = None
return [line for line in hand if line]
def stripEOLspaces(str):
return str.rstrip()
def filterCrap(hand, isTourney):
""" removes useless lines as well as trailing spaces """
#remove trailing spaces at end of line
hand = [line.rstrip() for line in hand]
#general variable position word filter/string filter
for i in xrange(len(hand)):
if hand[i].startswith("Board ["):
hand[i] = False
elif hand[i].find(" out of hand ")!=-1:
hand[i]=hand[i][:-56]
elif "($0 in chips)" in hand[i]:
hand[i] = False
elif hand[i]=="*** HOLE CARDS ***":
hand[i] = False
elif hand[i].endswith("has been disconnected"):
hand[i] = False
elif hand[i].endswith("has requested TIME"):
hand[i] = False
elif hand[i].endswith("has returned"):
hand[i] = False
elif hand[i].endswith("will be allowed to play after the button"):
hand[i] = False
elif hand[i].endswith("has timed out"):
hand[i] = False
elif hand[i].endswith("has timed out while disconnected"):
hand[i] = False
elif hand[i].endswith("has timed out while being disconnected"):
hand[i] = False
elif hand[i].endswith("is connected"):
hand[i] = False
elif hand[i].endswith("is disconnected"):
hand[i] = False
elif hand[i].find(" is low with [")!=-1:
hand[i] = False
elif hand[i].endswith(" mucks"):
hand[i] = False
elif hand[i].endswith(": mucks hand"):
hand[i] = False
elif hand[i] == "No low hand qualified":
hand[i] = False
elif hand[i] == "Pair on board - a double bet is allowed":
hand[i] = False
elif " shows " in hand[i] and "[" not in hand[i]:
hand[i] = False
elif hand[i].startswith("The button is in seat #"):
hand[i] = False
#above is alphabetic, reorder below if bored
elif hand[i].startswith("Time has expired"):
hand[i] = False
elif hand[i].endswith("has reconnected"):
hand[i] = False
elif hand[i].endswith("seconds left to act"):
hand[i] = False
elif hand[i].endswith("seconds to reconnect"):
hand[i] = False
elif hand[i].endswith("was removed from the table for failing to post"):
hand[i] = False
elif "joins the table at seat " in hand[i]:
hand[i] = False
elif (hand[i].endswith("leaves the table")):
hand[i] = False
elif "is high with " in hand[i]:
hand[i] = False
elif hand[i].endswith("doesn't show hand"):
hand[i] = False
elif hand[i].endswith("is being treated as all-in"):
hand[i] = False
elif " adds $" in hand[i]:
hand[i] = False
elif hand[i] == "Betting is capped":
hand[i] = False
elif (hand[i].find(" said, \"")!=-1):
hand[i] = False
if isTourney and not hand[i] == False:
if (hand[i].endswith(" is sitting out") and (not hand[i].startswith("Seat "))):
hand[i] = False
elif hand[i]:
if (hand[i].endswith(": sits out")):
hand[i] = False
elif (hand[i].endswith(" is sitting out")):
hand[i] = False
# python docs say this is identical to filter(None, list)
# which removes all false items from the passed list (hand)
hand = [line for line in hand if line]
return hand
def float2int(string):
""" takes a poker float (including , for thousand seperator) and
converts it to an int """
# Note that this automagically assumes US style currency formatters
pos = string.find(",")
if pos != -1: #remove , the thousand seperator
string = "%s%s" % (string[0:pos], string[pos+1:])
pos = string.find(".")
if pos != -1: #remove decimal point
string = "%s%s" % (string[0:pos], string[pos+1:])
result = int(string)
if pos == -1: #no decimal point - was in full dollars - need to multiply with 100
result *= 100
return result
ActionLines = ( "calls $", ": calls ", "brings in for", "completes it to",
"posts small blind", "posts the small blind", "posts big blind",
"posts the big blind", "posts small & big blinds", "posts $",
"posts a dead", "bets $", ": bets ", " raises")
def isActionLine(line):
if line.endswith("folds"):
return True
elif line.endswith("checks"):
return True
elif line.startswith("Uncalled bet"):
return True
# searches for each member of ActionLines being in line, returns true
# on first match .. neat func
return any(x for x in ActionLines if x in line)
def isAlreadyInDB(db, gametypeID, siteHandNo):
c = db.get_cursor()
c.execute(db.sql.query['isAlreadyInDB'], (gametypeID, siteHandNo))
result = c.fetchall()
if len(result) >= 1:
raise DuplicateError ("dupl")
def isRebuyOrAddon(topline):
"""isRebuyOrAddon not implemented yet"""
return False
#returns whether the passed topline indicates a tournament or not
def isTourney(topline):
return "Tournament" in topline
WinLines = ( "wins the pot", "ties for the ", "wins side pot", "wins the low main pot", "wins the high main pot",
"wins the low",
"wins the high pot", "wins the high side pot", "wins the main pot", "wins the side pot", "collected" )
def isWinLine(line):
""" returns boolean whether the passed line is a win line """
return any(x for x in WinLines if x in line)
#returns the amount of cash/chips put into the put in the given action line
def parseActionAmount(line, atype, isTourney):
#if (line.endswith(" and is all-in")):
# line=line[:-14]
#elif (line.endswith(", and is all in")):
# line=line[:-15]
#ideally we should recognise this as an all-in if category is capXl
if line.endswith(", and is capped"):
line=line[:-15]
if line.endswith(" and is capped"):
line=line[:-14]
if atype == "fold" or atype == "check":
amount = 0
elif atype == "unbet":
pos1 = line.find("$") + 1
if pos1 == 0:
pos1 = line.find("(") + 1
pos2 = line.find(")")
amount = float2int(line[pos1:pos2])
elif atype == "bet" and ": raises $" in line and "to $" in line:
pos = line.find("to $")+4
amount = float2int(line[pos:])
else:
if not isTourney:
pos = line.rfind("$")+1
#print "parseActionAmount, line:", line, "line[pos:]:", line[pos:]
amount = float2int(line[pos:])
else:
#print "line:"+line+"EOL"
pos = line.rfind(" ")+1
#print "pos:",pos
#print "pos of 20:", line.find("20")
amount = int(line[pos:])
if atype == "unbet":
amount *= -1
return amount
#end def parseActionAmount
#doesnt return anything, simply changes the passed arrays action_types and
# action_amounts. For stud this expects numeric streets (3-7), for
# holdem/omaha it expects predeal, preflop, flop, turn or river
def parseActionLine(base, isTourney, line, street, playerIDs, names, action_types, allIns, action_amounts, actionNos, actionTypeByNo):
if street == "predeal" or street == "preflop":
street = 0
elif street == "flop":
street = 1
elif street == "turn":
street = 2
elif street == "river":
street = 3
nextActionNo = 0
for player in xrange(len(actionNos[street])):
for count in xrange(len(actionNos[street][player])):
if actionNos[street][player][count]>=nextActionNo:
nextActionNo=actionNos[street][player][count]+1
(line, allIn) = goesAllInOnThisLine(line)
atype = parseActionType(line)
playerno = recognisePlayerNo(line, names, atype)
for m in names: # Remove the name from the line, just in case the name holds the $ or eur sign
if line.find(m) > -1:
_line = line.replace(m,'')
break
else:
_line = line
amount = parseActionAmount(_line, atype, isTourney)
action_types[street][playerno].append(atype)
allIns[street][playerno].append(allIn)
action_amounts[street][playerno].append(amount)
actionNos[street][playerno].append(nextActionNo)
tmp=(playerIDs[playerno], atype)
actionTypeByNo[street].append(tmp)
def goesAllInOnThisLine(line):
"""returns whether the player went all-in on this line and removes the all-in text from the line."""
isAllIn = False
if (line.endswith(" and is all-in")):
line = line[:-14]
isAllIn = True
elif (line.endswith(", and is all in")):
line = line[:-15]
isAllIn = True
return (line, isAllIn)
#returns the action type code (see table design) of the given action line
ActionTypes = { 'brings in for' :"blind",
' posts $' :"blind",
' posts a dead ' :"blind",
' posts the small blind of $' :"blind",
': posts big blind ' :"blind",
': posts small blind ' :"blind",
' posts the big blind of $' :"blind",
': posts small & big blinds $' :"blind",
': posts small blind $' :"blind",
'calls' :"call",
'completes it to' :"bet",
' bets' :"bet",
' raises' :"bet"
}
def parseActionType(line):
if (line.startswith("Uncalled bet")):
return "unbet"
elif (line.endswith(" folds")):
return "fold"
elif (line.endswith(" checks")):
return "check"
else:
for x in ActionTypes:
if x in line:
return ActionTypes[x]
raise FpdbError ("failed to recognise actiontype in parseActionLine in: "+line)
#parses the ante out of the given line and checks which player paid it, updates antes accordingly.
def parseAnteLine(line, isTourney, names, antes):
for i, name in enumerate(names):
if line.startswith(name.encode(LOCALE_ENCODING)):
pos = line.rfind("$") + 1
if not isTourney:
antes[i] += float2int(line[pos:])
else:
if "all-in" not in line:
pos = line.rfind(" ") + 1
antes[i] += int(line[pos:])
else:
pos1 = line.rfind("ante") + 5
pos2 = line.find(" ", pos1)
antes[i] += int(line[pos1:pos2])
#returns the buyin of a tourney in cents
def parseBuyin(topline):
pos1 = topline.find("$")+1
if pos1 != 0:
pos2 = topline.find("+")
else:
pos1 = topline.find("€")+3
pos2 = topline.find("+")
return float2int(topline[pos1:pos2])
#parses a card line and changes the passed arrays accordingly
#todo: reorganise this messy method
def parseCardLine(category, street, line, names, cardValues, cardSuits, boardValues, boardSuits):
if line.startswith("Dealt to") or " shows [" in line or "mucked [" in line:
playerNo = recognisePlayerNo(line, names, "card") #anything but unbet will be ok for that string
pos = line.rfind("[")+1
if category == "holdem":
for i in (pos, pos+3):
cardValues[playerNo].append(line[i:i+1])
cardSuits[playerNo].append(line[i+1:i+2])
if len(cardValues[playerNo]) != 2:
if (cardValues[playerNo][0] == cardValues[playerNo][2] and
cardSuits[playerNo][1] == cardSuits[playerNo][3]):
cardValues[playerNo]=cardValues[playerNo][0:2]
cardSuits[playerNo]=cardSuits[playerNo][0:2]
else:
print "line:",line,"cardValues[playerNo]:",cardValues[playerNo]
raise FpdbError("read too many/too few holecards in parseCardLine")
elif category == "omahahi" or category == "omahahilo":
for i in (pos, pos+3, pos+6, pos+9):
cardValues[playerNo].append(line[i:i+1])
cardSuits[playerNo].append(line[i+1:i+2])
if (len(cardValues[playerNo])!=4):
if (cardValues[playerNo][0] == cardValues[playerNo][4] and
cardSuits[playerNo][3] == cardSuits[playerNo][7]): #two tests will do
cardValues[playerNo] = cardValues[playerNo][0:4]
cardSuits[playerNo] = cardSuits[playerNo][0:4]
else:
print "line:",line,"cardValues[playerNo]:",cardValues[playerNo]
raise FpdbError("read too many/too few holecards in parseCardLine")
elif category == "razz" or category == "studhi" or category == "studhilo":
if "shows" not in line and "mucked" not in line:
#print "parseCardLine(in stud if), street:", street
if line[pos+2]=="]": #-> not (hero and 3rd street)
cardValues[playerNo][street+2]=line[pos:pos+1]
cardSuits[playerNo][street+2]=line[pos+1:pos+2]
else:
#print "hero card1:", line[pos:pos+2], "hero card2:", line[pos+3:pos+5], "hero card3:", line[pos+6:pos+8],
cardValues[playerNo][street]=line[pos:pos+1]
cardSuits[playerNo][street]=line[pos+1:pos+2]
cardValues[playerNo][street+1]=line[pos+3:pos+4]
cardSuits[playerNo][street+1]=line[pos+4:pos+5]
cardValues[playerNo][street+2]=line[pos+6:pos+7]
cardSuits[playerNo][street+2]=line[pos+7:pos+8]
else:
#print "parseCardLine(in stud else), street:", street
cardValues[playerNo][0]=line[pos:pos+1]
cardSuits[playerNo][0]=line[pos+1:pos+2]
pos+=3
cardValues[playerNo][1]=line[pos:pos+1]
cardSuits[playerNo][1]=line[pos+1:pos+2]
if street==4:
pos=pos=line.rfind("]")-2
cardValues[playerNo][6]=line[pos:pos+1]
cardSuits[playerNo][6]=line[pos+1:pos+2]
#print "cardValues:", cardValues
#print "cardSuits:", cardSuits
else:
print "line:",line,"street:",street
raise FpdbError("invalid category")
#print "end of parseCardLine/playercards, cardValues:",cardValues
elif (line.startswith("*** FLOP ***")):
pos=line.find("[")+1
for i in (pos, pos+3, pos+6):
boardValues.append(line[i:i+1])
boardSuits.append(line[i+1:i+2])
#print boardValues
elif (line.startswith("*** TURN ***") or line.startswith("*** RIVER ***")):
pos=line.find("[")+1
pos=line.find("[", pos+1)+1
boardValues.append(line[pos:pos+1])
boardSuits.append(line[pos+1:pos+2])
#print boardValues
else:
raise FpdbError ("unrecognised line:"+line)
def parseCashesAndSeatNos(lines):
"""parses the startCashes and seatNos of each player out of the given lines and returns them as a dictionary of two arrays"""
cashes = []
seatNos = []
for i in xrange (len(lines)):
pos2=lines[i].find(":")
seatNos.append(int(lines[i][5:pos2]))
pos1=lines[i].rfind("($")+2
if pos1==1: #for tourneys - it's 1 instead of -1 due to adding 2 above
pos1=lines[i].rfind("(")+1
pos2=lines[i].find(" in chips")
cashes.append(float2int(lines[i][pos1:pos2]))
return {'startCashes':cashes, 'seatNos':seatNos}
#returns the buyin of a tourney in cents
def parseFee(topline):
pos1 = topline.find("$")+1
if pos1 != 0:
pos1 = topline.find("$", pos1)+1
pos2 = topline.find(" ", pos1)
else:
pos1 = topline.find("€")+3
pos1 = topline.find("€", pos1)+3
pos2 = topline.find(" ", pos1)
return float2int(topline[pos1:pos2])
#returns a datetime object with the starttime indicated in the given topline
def parseHandStartTime(topline):
#convert x:13:35 to 0x:13:35
counter=0
while counter < 10:
pos = topline.find(" %d:" % counter)
if pos != -1:
topline = "%s0%s" % (topline[0:pos+1], topline[pos+1:])
break
counter += 1
isUTC=False
if topline.find("UTC")!=-1:
pos1 = topline.find("-")+2
pos2 = topline.find("UTC")
tmp=topline[pos1:pos2]
isUTC=True
else:
tmp=topline
#print "parsehandStartTime, tmp:", tmp
pos = tmp.find("-")+2
tmp = tmp[pos:]
#Need to match either
# 2008/09/07 06:23:14 ET or
# 2008/08/17 - 01:14:43 (ET) or
# 2008/11/12 9:33:31 CET [2008/11/12 3:33:31 ET]
rexx = '(?P<YEAR>[0-9]{4})\/(?P<MON>[0-9]{2})\/(?P<DAY>[0-9]{2})[\- ]+(?P<HR>[0-9]+):(?P<MIN>[0-9]+):(?P<SEC>[0-9]+)'
m = re.search(rexx,tmp)
result = datetime.datetime(int(m.group('YEAR')), int(m.group('MON')), int(m.group('DAY')), int(m.group('HR')), int(m.group('MIN')), int(m.group('SEC')))
if not isUTC: #these use US ET
result += datetime.timedelta(hours=5)
return result
#parses the names out of the given lines and returns them as an array
def findName(line):
pos1 = line.find(":") + 2
pos2 = line.rfind("(") - 1
return unicode(line[pos1:pos2], LOCALE_ENCODING)
def parseNames(lines):
return [findName(line) for line in lines]
def parsePositions(hand, names):
positions = [-1 for i in names]
sb, bb = -1, -1
for line in hand:
if sb == -1 and "small blind" in line and "dead small blind" not in line:
sb = line
if bb == -1 and "big blind" in line and "dead big blind" not in line:
bb = line
#identify blinds
#print "parsePositions before recognising sb/bb. names:",names
sbExists = True
if sb != -1:
sb = recognisePlayerNo(sb, names, "bet")
else:
sbExists = False
if bb != -1:
bb = recognisePlayerNo(bb, names, "bet")
# print "sb = ", sb, "bb = ", bb
if bb == sb: # if big and small are same, then don't duplicate the small
sbExists = False
sb = -1
#write blinds into array
if sbExists:
positions[sb]="S"
positions[bb]="B"
#fill up rest of array
arraypos = sb - 1 if sbExists else bb - 1
distFromBtn=0
while arraypos >= 0 and arraypos != bb:
#print "parsePositions first while, arraypos:",arraypos,"positions:",positions
positions[arraypos] = distFromBtn
arraypos -= 1
distFromBtn += 1
# eric - this takes into account dead seats between blinds
if sbExists:
i = bb - 1
while positions[i] < 0 and i != sb:
positions[i] = 9
i -= 1
### RHH - Changed to set the null seats before BB to "9"
i = sb - 1 if sbExists else bb - 1
while positions[i] < 0:
positions[i]=9
i-=1
arraypos=len(names)-1
if (bb!=0 or (bb==0 and sbExists==False) or (bb == 1 and sb != arraypos) ):
while (arraypos > bb and arraypos > sb):
positions[arraypos] = distFromBtn
arraypos -= 1
distFromBtn += 1
if any(p == -1 for p in positions):
print "parsePositions names:",names
print "result:",positions
raise FpdbError ("failed to read positions")
# print str(positions), "\n"
return positions
#simply parses the rake amount and returns it as an int
def parseRake(line):
pos = line.find("Rake")+6
rake = float2int(line[pos:])
return rake
def parseSiteHandNo(topline):
"""returns the hand no assigned by the poker site"""
pos1 = topline.find("#")+1
pos2 = topline.find(":")
return topline[pos1:pos2]
def parseTableLine(base, line):
"""returns a dictionary with maxSeats and tableName"""
pos1=line.find('\'')+1
pos2=line.find('\'', pos1)
#print "table:",line[pos1:pos2]
pos3=pos2+2
pos4=line.find("-max")
#print "seats:",line[pos3:pos4]
return {'maxSeats':int(line[pos3:pos4]), 'tableName':line[pos1:pos2]}
#end def parseTableLine
#returns the hand no assigned by the poker site
def parseTourneyNo(topline):
pos1 = topline.find("Tournament #")+12
pos2 = topline.find(",", pos1)
#print "parseTourneyNo pos1:",pos1," pos2:",pos2, " result:",topline[pos1:pos2]
return topline[pos1:pos2]
#parses a win/collect line. manipulates the passed array winnings, no explicit return
def parseWinLine(line, names, winnings, isTourney):
#print "parseWinLine: line:",line
for i,n in enumerate(names):
n = n.encode(LOCALE_ENCODING)
if line.startswith(n):
if isTourney:
pos1 = line.rfind("collected ") + 10
pos2 = line.find(" ", pos1)
winnings[i] += int(line[pos1:pos2])
else:
pos1 = line.rfind("$") + 1
pos2 = line.find(" ", pos1)
winnings[i] += float2int(line[pos1:pos2])
#returns the category (as per database) string for the given line
def recogniseCategory(line):
if "Razz" in line:
return "razz"
elif "Hold'em" in line:
return "holdem"
elif "Omaha" in line:
if "Hi/Lo" not in line and "H/L" not in line:
return "omahahi"
else:
return "omahahilo"
elif "Stud" in line:
if "Hi/Lo" not in line and "H/L" not in line:
return "studhi"
else:
return "studhilo"
else:
raise FpdbError("failed to recognise category, line:"+line)
#returns the int for the gametype_id for the given line
def recogniseGametypeID(backend, db, cursor, topline, smallBlindLine, site_id, category, isTourney):#todo: this method is messy
#if (topline.find("HORSE")!=-1):
# raise FpdbError("recogniseGametypeID: HORSE is not yet supported.")
#note: the below variable names small_bet and big_bet are misleading, in NL/PL they mean small/big blind
if isTourney:
type = "tour"
pos1 = topline.find("(")+1
if(topline[pos1] == "H" or topline[pos1] == "O" or
topline[pos1] == "R" or topline[pos1]=="S" or
topline[pos1+2] == "C"):
pos1 = topline.find("(", pos1)+1
pos2 = topline.find("/", pos1)
small_bet = int(topline[pos1:pos2])
else:
type = "ring"
pos1 = topline.find("$")+1
pos2 = topline.find("/$")
small_bet = float2int(topline[pos1:pos2])
pos1 = pos2+2
if isTourney:
pos1 -= 1
pos2 = topline.find(")")
if pos2 <= pos1:
pos2 = topline.find(")", pos1)
if isTourney:
big_bet = int(topline[pos1:pos2])
else:
big_bet = float2int(topline[pos1:pos2])
if 'No Limit' in topline:
limit_type = "nl" if 'Cap No' not in topline else "cn"
elif 'Pot Limit' in topline:
limit_type = "pl" if 'Cap Pot' not in topline else "cp"
else:
limit_type = "fl"
#print "recogniseGametypeID small_bet/blind:",small_bet,"big bet/blind:", big_bet,"limit type:",limit_type
if limit_type == "fl":
cursor.execute(db.sql.query['getGametypeFL'], (site_id, type, category,
limit_type, small_bet,
big_bet))
else:
cursor.execute(db.sql.query['getGametypeNL'], (site_id, type, category,
limit_type, small_bet,
big_bet))
result = cursor.fetchone()
#print "recgt1 result=",result
#ret=result[0]
#print "recgt1 ret=",ret
#print "tried SELECTing gametypes.id, result:",result
try:
len(result)
except TypeError:
if category=="holdem" or category=="omahahi" or category=="omahahilo":
base="hold"
else:
base="stud"
if category=="holdem" or category=="omahahi" or category=="studhi":
hiLo='h'
elif category=="razz":
hiLo='l'
else:
hiLo='s'
if (limit_type=="fl"):
big_blind=small_bet
if base=="hold":
if smallBlindLine==topline:
raise FpdbError("invalid small blind line")
else:
if (smallBlindLine.endswith(" and is all-in")):
smallBlindLine=smallBlindLine[:-14]
elif (smallBlindLine.endswith(", and is all in")):
smallBlindLine=smallBlindLine[:-15]
if isTourney:
pos=smallBlindLine.rfind(" ")+1
small_blind=int(smallBlindLine[pos:])
else:
pos=smallBlindLine.rfind("$")+1
small_blind=float2int(smallBlindLine[pos:])
else:
small_blind=0
result = db.insertGameTypes( (site_id, type, base, category, limit_type, hiLo
,small_blind, big_blind, small_bet, big_bet) )
#cursor.execute ("SELECT id FROM Gametypes WHERE siteId=%s AND type=%s AND category=%s
#AND limitType=%s AND smallBet=%s AND bigBet=%s", (site_id, type, category, limit_type, small_bet, big_bet))
else:
result = db.insertGameTypes( (site_id, type, base, category, limit_type, hiLo
,small_bet, big_bet, 0, 0) )#remember, for these bet means blind
#cursor.execute ("SELECT id FROM Gametypes WHERE siteId=%s AND type=%s AND category=%s
#AND limitType=%s AND smallBlind=%s AND bigBlind=%s", (site_id, type, category, limit_type, small_bet, big_bet))
return result[0]
#end def recogniseGametypeID
def recogniseTourneyTypeId(db, siteId, tourneySiteId, buyin, fee, knockout, rebuyOrAddon):
ret = -1
cursor = db.get_cursor()
# First we try to find the tourney itself (by its tourneySiteId) in case it has already been inserted before (by a summary file for instance)
# The reason is that some tourneys may not be identified correctly in the HH toplines (especially Buy-In and Fee which are used to search/create the TourneyTypeId)
#TODO: When the summary file will be dumped to BD, if the tourney is already in, Buy-In/Fee may need an update (e.g. creation of a new type and link to the Tourney)
cursor.execute (db.sql.query['getTourneyTypeIdByTourneyNo'].replace('%s', db.sql.query['placeholder']), (tourneySiteId, siteId))
result = cursor.fetchone()
try:
len(result)
ret = result[0]
except:
cursor.execute( """SELECT id FROM TourneyTypes
WHERE siteId=%s AND buyin=%s AND fee=%s
AND knockout=%s AND rebuyOrAddon=%s""".replace('%s', db.sql.query['placeholder'])
, (siteId, buyin, fee, knockout, rebuyOrAddon) )
result = cursor.fetchone()
#print "tried selecting tourneytypes.id, result:", result
try:
len(result)
ret = result[0]
except TypeError:#this means we need to create a new entry
#print "insert new tourneytype record ..."
try:
cursor.execute( """INSERT INTO TourneyTypes (siteId, buyin, fee, knockout, rebuyOrAddon)
VALUES (%s, %s, %s, %s, %s)""".replace('%s', db.sql.query['placeholder'])
, (siteId, buyin, fee, knockout, rebuyOrAddon) )
ret = db.get_last_insert_id(cursor)
except:
#print "maybe tourneytype was created since select, try selecting again ..."
cursor.execute( """SELECT id FROM TourneyTypes
WHERE siteId=%s AND buyin=%s AND fee=%s
AND knockout=%s AND rebuyOrAddon=%s""".replace('%s', db.sql.query['placeholder'])
, (siteId, buyin, fee, knockout, rebuyOrAddon) )
result = cursor.fetchone()
try:
len(result)
ret = result[0]
except:
print "Failed to find or insert TourneyTypes record"
ret = -1 # failed to find or insert record
#print "tried selecting tourneytypes.id again, result:", result
#print "recogniseTourneyTypeId: returning", ret
return ret
#end def recogniseTourneyTypeId
#recognises the name in the given line and returns its array position in the given array
def recognisePlayerNo(line, names, atype):
#print "recogniseplayerno, names:",names
for i in xrange(len(names)):
encodedName = names[i].encode(LOCALE_ENCODING)
if (atype=="unbet"):
if (line.endswith(encodedName)):
return (i)
elif (line.startswith("Dealt to ")):
#print "recognisePlayerNo, card precut, line:",line
tmp=line[9:]
#print "recognisePlayerNo, card postcut, tmp:",tmp
if (tmp.startswith(encodedName)):
return (i)
elif (line.startswith("Seat ")):
if (line.startswith("Seat 10")):
tmp=line[9:]
else:
tmp=line[8:]
if (tmp.startswith(encodedName)):
return (i)
else:
if (line.startswith(encodedName)):
return (i)
#if we're here we mustve failed
raise FpdbError ("failed to recognise player in: "+line+" atype:"+atype)
#end def recognisePlayerNo
#removes trailing \n from the given array
def removeTrailingEOL(arr):
for i in xrange(len(arr)):
if (arr[i].endswith("\n")):
#print "arr[i] before removetrailingEOL:", arr[i]
arr[i]=arr[i][:-1]
#print "arr[i] after removetrailingEOL:", arr[i]
return arr
#end def removeTrailingEOL
#splits the rake according to the proportion of pot won. manipulates the second passed array.
def splitRake(winnings, rakes, totalRake):
winnercnt=0
totalWin=0
for i in xrange(len(winnings)):
if winnings[i]!=0:
winnercnt+=1
totalWin+=winnings[i]
firstWinner=i
if winnercnt==1:
rakes[firstWinner]=totalRake
else:
totalWin=float(totalWin)
for i in xrange(len(winnings)):
if winnings[i]!=0:
winPortion=winnings[i]/totalWin
rakes[i]=totalRake*winPortion
#end def splitRake
def generateHudCacheData(player_ids, base, category, action_types, allIns, actionTypeByNo
,winnings, totalWinnings, positions, actionTypes, actionAmounts, antes):
"""calculates data for the HUD during import. IMPORTANT: if you change this method make
sure to also change the following storage method and table_viewer.prepare_data if necessary
"""
#print "generateHudCacheData, len(player_ids)=", len(player_ids)
#setup subarrays of the result dictionary.
street0VPI=[]
street0Aggr=[]
street0_3BChance=[]
street0_3BDone=[]
street1Seen=[]
street2Seen=[]
street3Seen=[]
street4Seen=[]
sawShowdown=[]
street1Aggr=[]
street2Aggr=[]
street3Aggr=[]
street4Aggr=[]
otherRaisedStreet1=[]
otherRaisedStreet2=[]
otherRaisedStreet3=[]
otherRaisedStreet4=[]
foldToOtherRaisedStreet1=[]
foldToOtherRaisedStreet2=[]
foldToOtherRaisedStreet3=[]
foldToOtherRaisedStreet4=[]
wonWhenSeenStreet1=[]
wonAtSD=[]
stealAttemptChance=[]
stealAttempted=[]
hudDataPositions=[]
street0Calls=[]
street1Calls=[]
street2Calls=[]
street3Calls=[]
street4Calls=[]
street0Bets=[]
street1Bets=[]
street2Bets=[]
street3Bets=[]
street4Bets=[]
#street0Raises=[]
#street1Raises=[]
#street2Raises=[]
#street3Raises=[]
#street4Raises=[]
# Summary figures for hand table:
result={}
result['playersVpi']=0
result['playersAtStreet1']=0
result['playersAtStreet2']=0
result['playersAtStreet3']=0
result['playersAtStreet4']=0
result['playersAtShowdown']=0
result['street0Raises']=0
result['street1Raises']=0
result['street2Raises']=0
result['street3Raises']=0
result['street4Raises']=0
result['street1Pot']=0
result['street2Pot']=0
result['street3Pot']=0
result['street4Pot']=0
result['showdownPot']=0
firstPfRaiseByNo=-1
firstPfRaiserId=-1
firstPfRaiserNo=-1
firstPfCallByNo=-1
firstPfCallerId=-1
for i, action in enumerate(actionTypeByNo[0]):
if action[1] == "bet":
firstPfRaiseByNo = i
firstPfRaiserId = action[0]
for j, pid in enumerate(player_ids):
if pid == firstPfRaiserId:
firstPfRaiserNo = j
break
break
for i, action in enumerate(actionTypeByNo[0]):
if action[1] == "call":
firstPfCallByNo = i
firstPfCallerId = action[0]
break
firstPlayId = firstPfCallerId
if firstPfRaiseByNo <> -1:
if firstPfRaiseByNo < firstPfCallByNo or firstPfCallByNo == -1:
firstPlayId = firstPfRaiserId
cutoffId=-1
buttonId=-1
sbId=-1
bbId=-1
if base=="hold":
for player, pos in enumerate(positions):
if pos == 1:
cutoffId = player_ids[player]
if pos == 0:
buttonId = player_ids[player]
if pos == 'S':
sbId = player_ids[player]
if pos == 'B':
bbId = player_ids[player]
someoneStole=False
#run a loop for each player preparing the actual values that will be commited to SQL
for player in xrange(len(player_ids)):
#set default values
myStreet0VPI=False
myStreet0Aggr=False
myStreet0_3BChance=False
myStreet0_3BDone=False
myStreet1Seen=False
myStreet2Seen=False
myStreet3Seen=False
myStreet4Seen=False
mySawShowdown=False
myStreet1Aggr=False
myStreet2Aggr=False
myStreet3Aggr=False
myStreet4Aggr=False
myOtherRaisedStreet1=False
myOtherRaisedStreet2=False
myOtherRaisedStreet3=False
myOtherRaisedStreet4=False
myFoldToOtherRaisedStreet1=False
myFoldToOtherRaisedStreet2=False
myFoldToOtherRaisedStreet3=False
myFoldToOtherRaisedStreet4=False
myWonWhenSeenStreet1=0.0
myWonAtSD=0.0
myStealAttemptChance=False
myStealAttempted=False
myStreet0Calls=0
myStreet1Calls=0
myStreet2Calls=0
myStreet3Calls=0
myStreet4Calls=0
myStreet0Bets=0
myStreet1Bets=0
myStreet2Bets=0
myStreet3Bets=0
myStreet4Bets=0
#myStreet0Raises=0
#myStreet1Raises=0
#myStreet2Raises=0
#myStreet3Raises=0
#myStreet4Raises=0
#calculate VPIP and PFR
street=0
heroPfRaiseCount=0
for currentAction in action_types[street][player]: # finally individual actions
if currentAction == "bet":
myStreet0Aggr = True
if currentAction == "bet" or currentAction == "call":
myStreet0VPI = True
if myStreet0VPI:
result['playersVpi'] += 1
myStreet0Calls = action_types[street][player].count('call')
myStreet0Bets = action_types[street][player].count('bet')
# street0Raises = action_types[street][player].count('raise') bet count includes raises for now
result['street0Raises'] += myStreet0Bets
#PF3BChance and PF3B
pfFold=-1
pfRaise=-1
if firstPfRaiseByNo != -1:
for i, actionType in enumerate(actionTypeByNo[0]):
if actionType[0] == player_ids[player]:
if actionType[1] == "bet" and pfRaise == -1 and i > firstPfRaiseByNo:
pfRaise = i
if actionType[1] == "fold" and pfFold == -1:
pfFold = i
if pfFold == -1 or pfFold > firstPfRaiseByNo:
myStreet0_3BChance = True
if pfRaise > firstPfRaiseByNo:
myStreet0_3BDone = True
#steal calculations
if base=="hold":
if len(player_ids)>=3: # no point otherwise # was 5, use 3 to match pokertracker definition
if positions[player]==1:
if firstPfRaiserId==player_ids[player] \
and (firstPfCallByNo==-1 or firstPfCallByNo>firstPfRaiseByNo):
myStealAttempted=True
myStealAttemptChance=True
if firstPlayId==cutoffId or firstPlayId==buttonId or firstPlayId==sbId or firstPlayId==bbId or firstPlayId==-1:
myStealAttemptChance=True
if positions[player]==0:
if firstPfRaiserId==player_ids[player] \
and (firstPfCallByNo==-1 or firstPfCallByNo>firstPfRaiseByNo):
myStealAttempted=True
myStealAttemptChance=True
if firstPlayId==buttonId or firstPlayId==sbId or firstPlayId==bbId or firstPlayId==-1:
myStealAttemptChance=True
if positions[player]=='S':
if firstPfRaiserId==player_ids[player] \
and (firstPfCallByNo==-1 or firstPfCallByNo>firstPfRaiseByNo):
myStealAttempted=True
myStealAttemptChance=True
if firstPlayId==sbId or firstPlayId==bbId or firstPlayId==-1:
myStealAttemptChance=True
if positions[player]=='B':
pass
if myStealAttempted:
someoneStole=True
#calculate saw* values
isAllIn = any(i for i in allIns[0][player])
if isAllIn or len(action_types[1][player]) > 0:
myStreet1Seen = True
if not isAllIn:
isAllIn = any(i for i in allIns[1][player])
if isAllIn or len(action_types[2][player]) > 0:
if all(actiontype != "fold" for actiontype in action_types[1][player]):
myStreet2Seen = True
if not isAllIn:
isAllAin = any(i for i in allIns[2][player])
if isAllIn or len(action_types[3][player]) > 0:
if all(actiontype != "fold" for actiontype in action_types[2][player]):
myStreet3Seen = True
#print "base:", base
if base == "hold":
mySawShowdown = not any(actiontype == "fold" for actiontype in action_types[3][player])
else:
#print "in else"
if not isAllIn:
isAllIn = any(i for i in allIns[3][player])
if isAllIn or len(action_types[4][player]) > 0:
#print "in if"
myStreet4Seen = True
mySawShowdown = not any(actiontype == "fold" for actiontype in action_types[4][player])
if myStreet1Seen:
result['playersAtStreet1'] += 1
if myStreet2Seen:
result['playersAtStreet2'] += 1
if myStreet3Seen:
result['playersAtStreet3'] += 1
if myStreet4Seen:
result['playersAtStreet4'] += 1
if mySawShowdown:
result['playersAtShowdown'] += 1
#flop stuff
street = 1
if myStreet1Seen:
myStreet1Aggr = any(actiontype == "bet" for actiontype in action_types[street][player])
myStreet1Calls = action_types[street][player].count('call')
myStreet1Bets = action_types[street][player].count('bet')
# street1Raises = action_types[street][player].count('raise') bet count includes raises for now
result['street1Raises'] += myStreet1Bets
for otherPlayer in xrange(len(player_ids)):
if player == otherPlayer:
pass
else:
for countOther in xrange(len(action_types[street][otherPlayer])):
if action_types[street][otherPlayer][countOther] == "bet":
myOtherRaisedStreet1 = True
for countOtherFold in xrange(len(action_types[street][player])):
if action_types[street][player][countOtherFold] == "fold":
myFoldToOtherRaisedStreet1 = True
#turn stuff - copy of flop with different vars
street = 2
if myStreet2Seen:
myStreet2Aggr = any(actiontype == "bet" for actiontype in action_types[street][player])
myStreet2Calls = action_types[street][player].count('call')
myStreet2Bets = action_types[street][player].count('bet')
# street2Raises = action_types[street][player].count('raise') bet count includes raises for now
result['street2Raises'] += myStreet2Bets
for otherPlayer in xrange(len(player_ids)):
if player == otherPlayer:
pass
else:
for countOther in xrange(len(action_types[street][otherPlayer])):
if action_types[street][otherPlayer][countOther] == "bet":
myOtherRaisedStreet2 = True
for countOtherFold in xrange(len(action_types[street][player])):
if action_types[street][player][countOtherFold] == "fold":
myFoldToOtherRaisedStreet2 = True
#river stuff - copy of flop with different vars
street = 3
if myStreet3Seen:
myStreet3Aggr = any(actiontype == "bet" for actiontype in action_types[street][player])
myStreet3Calls = action_types[street][player].count('call')
myStreet3Bets = action_types[street][player].count('bet')
# street3Raises = action_types[street][player].count('raise') bet count includes raises for now
result['street3Raises'] += myStreet3Bets
for otherPlayer in xrange(len(player_ids)):
if player == otherPlayer:
pass
else:
for countOther in xrange(len(action_types[street][otherPlayer])):
if action_types[street][otherPlayer][countOther] == "bet":
myOtherRaisedStreet3 = True
for countOtherFold in xrange(len(action_types[street][player])):
if action_types[street][player][countOtherFold] == "fold":
myFoldToOtherRaisedStreet3 = True
#stud river stuff - copy of flop with different vars
street = 4
if myStreet4Seen:
myStreet4Aggr = any(actiontype == "bet" for actiontype in action_types[street][player])
myStreet4Calls = action_types[street][player].count('call')
myStreet4Bets = action_types[street][player].count('bet')
# street4Raises = action_types[street][player].count('raise') bet count includes raises for now
result['street4Raises'] += myStreet4Bets
for otherPlayer in xrange(len(player_ids)):
if player == otherPlayer:
pass
else:
for countOther in xrange(len(action_types[street][otherPlayer])):
if action_types[street][otherPlayer][countOther] == "bet":
myOtherRaisedStreet4 = True
for countOtherFold in xrange(len(action_types[street][player])):
if action_types[street][player][countOtherFold] == "fold":
myFoldToOtherRaisedStreet4 = True
if winnings[player] != 0:
if myStreet1Seen:
myWonWhenSeenStreet1 = winnings[player] / float(totalWinnings)
if mySawShowdown:
myWonAtSD = myWonWhenSeenStreet1
#add each value to the appropriate array
street0VPI.append(myStreet0VPI)
street0Aggr.append(myStreet0Aggr)
street0_3BChance.append(myStreet0_3BChance)
street0_3BDone.append(myStreet0_3BDone)
street1Seen.append(myStreet1Seen)
street2Seen.append(myStreet2Seen)
street3Seen.append(myStreet3Seen)
street4Seen.append(myStreet4Seen)
sawShowdown.append(mySawShowdown)
street1Aggr.append(myStreet1Aggr)
street2Aggr.append(myStreet2Aggr)
street3Aggr.append(myStreet3Aggr)
street4Aggr.append(myStreet4Aggr)
otherRaisedStreet1.append(myOtherRaisedStreet1)
otherRaisedStreet2.append(myOtherRaisedStreet2)
otherRaisedStreet3.append(myOtherRaisedStreet3)
otherRaisedStreet4.append(myOtherRaisedStreet4)
foldToOtherRaisedStreet1.append(myFoldToOtherRaisedStreet1)
foldToOtherRaisedStreet2.append(myFoldToOtherRaisedStreet2)
foldToOtherRaisedStreet3.append(myFoldToOtherRaisedStreet3)
foldToOtherRaisedStreet4.append(myFoldToOtherRaisedStreet4)
wonWhenSeenStreet1.append(myWonWhenSeenStreet1)
wonAtSD.append(myWonAtSD)
stealAttemptChance.append(myStealAttemptChance)
stealAttempted.append(myStealAttempted)
if base=="hold":
pos=positions[player]
if pos=='B':
hudDataPositions.append('B')
elif pos=='S':
hudDataPositions.append('S')
elif pos==0:
hudDataPositions.append('D')
elif pos==1:
hudDataPositions.append('C')
elif pos>=2 and pos<=4:
hudDataPositions.append('M')
elif pos>=5 and pos<=8:
hudDataPositions.append('E')
### RHH Added this elif to handle being a dead hand before the BB (pos==9)
elif pos==9:
hudDataPositions.append('X')
else:
raise FpdbError("invalid position")
elif base=="stud":
#todo: stud positions and steals
pass
street0Calls.append(myStreet0Calls)
street1Calls.append(myStreet1Calls)
street2Calls.append(myStreet2Calls)
street3Calls.append(myStreet3Calls)
street4Calls.append(myStreet4Calls)
street0Bets.append(myStreet0Bets)
street1Bets.append(myStreet1Bets)
street2Bets.append(myStreet2Bets)
street3Bets.append(myStreet3Bets)
street4Bets.append(myStreet4Bets)
#street0Raises.append(myStreet0Raises)
#street1Raises.append(myStreet1Raises)
#street2Raises.append(myStreet2Raises)
#street3Raises.append(myStreet3Raises)
#street4Raises.append(myStreet4Raises)
#add each array to the to-be-returned dictionary
result['street0VPI']=street0VPI
result['street0Aggr']=street0Aggr
result['street0_3BChance']=street0_3BChance
result['street0_3BDone']=street0_3BDone
result['street1Seen']=street1Seen
result['street2Seen']=street2Seen
result['street3Seen']=street3Seen
result['street4Seen']=street4Seen
result['sawShowdown']=sawShowdown
result['street1Aggr']=street1Aggr
result['otherRaisedStreet1']=otherRaisedStreet1
result['foldToOtherRaisedStreet1']=foldToOtherRaisedStreet1
result['street2Aggr']=street2Aggr
result['otherRaisedStreet2']=otherRaisedStreet2
result['foldToOtherRaisedStreet2']=foldToOtherRaisedStreet2
result['street3Aggr']=street3Aggr
result['otherRaisedStreet3']=otherRaisedStreet3
result['foldToOtherRaisedStreet3']=foldToOtherRaisedStreet3
result['street4Aggr']=street4Aggr
result['otherRaisedStreet4']=otherRaisedStreet4
result['foldToOtherRaisedStreet4']=foldToOtherRaisedStreet4
result['wonWhenSeenStreet1']=wonWhenSeenStreet1
result['wonAtSD']=wonAtSD
result['stealAttemptChance']=stealAttemptChance
result['stealAttempted']=stealAttempted
result['street0Calls']=street0Calls
result['street1Calls']=street1Calls
result['street2Calls']=street2Calls
result['street3Calls']=street3Calls
result['street4Calls']=street4Calls
result['street0Bets']=street0Bets
result['street1Bets']=street1Bets
result['street2Bets']=street2Bets
result['street3Bets']=street3Bets
result['street4Bets']=street4Bets
#result['street0Raises']=street0Raises
#result['street1Raises']=street1Raises
#result['street2Raises']=street2Raises
#result['street3Raises']=street3Raises
#result['street4Raises']=street4Raises
#now the various steal values
foldBbToStealChance=[]
foldedBbToSteal=[]
foldSbToStealChance=[]
foldedSbToSteal=[]
for player in xrange(len(player_ids)):
myFoldBbToStealChance=False
myFoldedBbToSteal=False
myFoldSbToStealChance=False
myFoldedSbToSteal=False
if base=="hold":
if someoneStole and (positions[player]=='B' or positions[player]=='S') and firstPfRaiserId!=player_ids[player]:
street=0
for count in xrange(len(action_types[street][player])):#individual actions
if positions[player]=='B':
myFoldBbToStealChance=True
if action_types[street][player][count]=="fold":
myFoldedBbToSteal=True
if positions[player]=='S':
myFoldSbToStealChance=True
if action_types[street][player][count]=="fold":
myFoldedSbToSteal=True
foldBbToStealChance.append(myFoldBbToStealChance)
foldedBbToSteal.append(myFoldedBbToSteal)
foldSbToStealChance.append(myFoldSbToStealChance)
foldedSbToSteal.append(myFoldedSbToSteal)
result['foldBbToStealChance']=foldBbToStealChance
result['foldedBbToSteal']=foldedBbToSteal
result['foldSbToStealChance']=foldSbToStealChance
result['foldedSbToSteal']=foldedSbToSteal
#now CB
street1CBChance=[]
street1CBDone=[]
didStreet1CB=[]
for player in xrange(len(player_ids)):
myStreet1CBChance=False
myStreet1CBDone=False
if street0VPI[player]:
myStreet1CBChance=True
if street1Aggr[player]:
myStreet1CBDone=True
didStreet1CB.append(player_ids[player])
street1CBChance.append(myStreet1CBChance)
street1CBDone.append(myStreet1CBDone)
result['street1CBChance']=street1CBChance
result['street1CBDone']=street1CBDone
#now 2B
street2CBChance=[]
street2CBDone=[]
didStreet2CB=[]
for player in xrange(len(player_ids)):
myStreet2CBChance=False
myStreet2CBDone=False
if street1CBDone[player]:
myStreet2CBChance=True
if street2Aggr[player]:
myStreet2CBDone=True
didStreet2CB.append(player_ids[player])
street2CBChance.append(myStreet2CBChance)
street2CBDone.append(myStreet2CBDone)
result['street2CBChance']=street2CBChance
result['street2CBDone']=street2CBDone
#now 3B
street3CBChance=[]
street3CBDone=[]
didStreet3CB=[]
for player in xrange(len(player_ids)):
myStreet3CBChance=False
myStreet3CBDone=False
if street2CBDone[player]:
myStreet3CBChance=True
if street3Aggr[player]:
myStreet3CBDone=True
didStreet3CB.append(player_ids[player])
street3CBChance.append(myStreet3CBChance)
street3CBDone.append(myStreet3CBDone)
result['street3CBChance']=street3CBChance
result['street3CBDone']=street3CBDone
#and 4B
street4CBChance=[]
street4CBDone=[]
didStreet4CB=[]
for player in xrange(len(player_ids)):
myStreet4CBChance=False
myStreet4CBDone=False
if street3CBDone[player]:
myStreet4CBChance=True
if street4Aggr[player]:
myStreet4CBDone=True
didStreet4CB.append(player_ids[player])
street4CBChance.append(myStreet4CBChance)
street4CBDone.append(myStreet4CBDone)
result['street4CBChance']=street4CBChance
result['street4CBDone']=street4CBDone
result['position']=hudDataPositions
foldToStreet1CBChance=[]
foldToStreet1CBDone=[]
foldToStreet2CBChance=[]
foldToStreet2CBDone=[]
foldToStreet3CBChance=[]
foldToStreet3CBDone=[]
foldToStreet4CBChance=[]
foldToStreet4CBDone=[]
for player in xrange(len(player_ids)):
myFoldToStreet1CBChance=False
myFoldToStreet1CBDone=False
foldToStreet1CBChance.append(myFoldToStreet1CBChance)
foldToStreet1CBDone.append(myFoldToStreet1CBDone)
myFoldToStreet2CBChance=False
myFoldToStreet2CBDone=False
foldToStreet2CBChance.append(myFoldToStreet2CBChance)
foldToStreet2CBDone.append(myFoldToStreet2CBDone)
myFoldToStreet3CBChance=False
myFoldToStreet3CBDone=False
foldToStreet3CBChance.append(myFoldToStreet3CBChance)
foldToStreet3CBDone.append(myFoldToStreet3CBDone)
myFoldToStreet4CBChance=False
myFoldToStreet4CBDone=False
foldToStreet4CBChance.append(myFoldToStreet4CBChance)
foldToStreet4CBDone.append(myFoldToStreet4CBDone)
if len(didStreet1CB)>=1:
generateFoldToCB(1, player_ids, didStreet1CB, street1CBDone, foldToStreet1CBChance, foldToStreet1CBDone, actionTypeByNo)
if len(didStreet2CB)>=1:
generateFoldToCB(2, player_ids, didStreet2CB, street2CBDone, foldToStreet2CBChance, foldToStreet2CBDone, actionTypeByNo)
if len(didStreet3CB)>=1:
generateFoldToCB(3, player_ids, didStreet3CB, street3CBDone, foldToStreet3CBChance, foldToStreet3CBDone, actionTypeByNo)
if len(didStreet4CB)>=1:
generateFoldToCB(4, player_ids, didStreet4CB, street4CBDone, foldToStreet4CBChance, foldToStreet4CBDone, actionTypeByNo)
result['foldToStreet1CBChance']=foldToStreet1CBChance
result['foldToStreet1CBDone']=foldToStreet1CBDone
result['foldToStreet2CBChance']=foldToStreet2CBChance
result['foldToStreet2CBDone']=foldToStreet2CBDone
result['foldToStreet3CBChance']=foldToStreet3CBChance
result['foldToStreet3CBDone']=foldToStreet3CBDone
result['foldToStreet4CBChance']=foldToStreet4CBChance
result['foldToStreet4CBDone']=foldToStreet4CBDone
totalProfit=[]
street1CheckCallRaiseChance=[]
street1CheckCallRaiseDone=[]
street2CheckCallRaiseChance=[]
street2CheckCallRaiseDone=[]
street3CheckCallRaiseChance=[]
street3CheckCallRaiseDone=[]
street4CheckCallRaiseChance=[]
street4CheckCallRaiseDone=[]
#print "b4 totprof calc, len(playerIds)=", len(player_ids)
for pl in xrange(len(player_ids)):
#print "pl=", pl
myTotalProfit=winnings[pl] # still need to deduct other costs
if antes:
myTotalProfit=winnings[pl] - antes[pl]
for i in xrange(len(actionTypes)): #iterate through streets
#for j in xrange(len(actionTypes[i])): #iterate through names (using pl loop above)
for k in xrange(len(actionTypes[i][pl])): #iterate through individual actions of that player on that street
myTotalProfit -= actionAmounts[i][pl][k]
myStreet1CheckCallRaiseChance=False
myStreet1CheckCallRaiseDone=False
myStreet2CheckCallRaiseChance=False
myStreet2CheckCallRaiseDone=False
myStreet3CheckCallRaiseChance=False
myStreet3CheckCallRaiseDone=False
myStreet4CheckCallRaiseChance=False
myStreet4CheckCallRaiseDone=False
#print "myTotalProfit=", myTotalProfit
totalProfit.append(myTotalProfit)
#print "totalProfit[]=", totalProfit
street1CheckCallRaiseChance.append(myStreet1CheckCallRaiseChance)
street1CheckCallRaiseDone.append(myStreet1CheckCallRaiseDone)
street2CheckCallRaiseChance.append(myStreet2CheckCallRaiseChance)
street2CheckCallRaiseDone.append(myStreet2CheckCallRaiseDone)
street3CheckCallRaiseChance.append(myStreet3CheckCallRaiseChance)
street3CheckCallRaiseDone.append(myStreet3CheckCallRaiseDone)
street4CheckCallRaiseChance.append(myStreet4CheckCallRaiseChance)
street4CheckCallRaiseDone.append(myStreet4CheckCallRaiseDone)
result['totalProfit']=totalProfit
#print "res[totalProfit]=", result['totalProfit']
result['street1CheckCallRaiseChance']=street1CheckCallRaiseChance
result['street1CheckCallRaiseDone']=street1CheckCallRaiseDone
result['street2CheckCallRaiseChance']=street2CheckCallRaiseChance
result['street2CheckCallRaiseDone']=street2CheckCallRaiseDone
result['street3CheckCallRaiseChance']=street3CheckCallRaiseChance
result['street3CheckCallRaiseDone']=street3CheckCallRaiseDone
result['street4CheckCallRaiseChance']=street4CheckCallRaiseChance
result['street4CheckCallRaiseDone']=street4CheckCallRaiseDone
return result
#end def generateHudCacheData
def generateFoldToCB(street, playerIDs, didStreetCB, streetCBDone, foldToStreetCBChance, foldToStreetCBDone, actionTypeByNo):
"""fills the passed foldToStreetCB* arrays appropriately depending on the given street"""
#print "beginning of generateFoldToCB, street:", street, "len(actionTypeByNo):", len(actionTypeByNo)
#print "len(actionTypeByNo[street]):",len(actionTypeByNo[street])
firstCBReaction=0
for action in xrange(len(actionTypeByNo[street])):
if actionTypeByNo[street][action][1]=="bet":
for player in didStreetCB:
if player==actionTypeByNo[street][action][0] and firstCBReaction==0:
firstCBReaction=action+1
break
for action in actionTypeByNo[street][firstCBReaction:]:
for player in xrange(len(playerIDs)):
if playerIDs[player]==action[0]:
foldToStreetCBChance[player]=True
if action[1]=="fold":
foldToStreetCBDone[player]=True
#end def generateFoldToCB
|
# Copyright (c) 2013, frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_column()
data = get_data(filters)
return columns,data
def get_column():
return [_("Item Name") + ":Data:180",_("Brand") + ":Data:80",_("UOM") + ":Data:80",
_("Image") + ":Image:150",_("Payment Terms") + ":Data:180",
_("Sales Price") + ":Currency:100",_("Valuation Rate") + ":Currency:100",_("Last Purchase Price") + ":Data:180",
_("Market Price") + ":Currency:100",_("Price A 60 Days") + ":Currency:140",_("Price B 90 Days") + ":Currency:140",
_("Cash Price") + ":Currency:100",_("Sell Off Price") + ":Currency:140",
_("Last Quoted Price") + ":Currency:140",_("Last Sold Price") + ":Currency:140",
_("Main Stock") + ":Float:100",_("Ahmadi Stock") + ":Float:100",_("Decor Stock") + ":Float:100",
_("Shuwaikh Stock") + ":Float:100",_("Store Shuwaikh Stock") + ":Float:160"]
def get_data(filters):
if filters.get("customer","item"):
customer = filters.get("customer")
item = filters.get("item")
item_data = frappe.db.sql("""select item_name,brand,stock_uom,CONCAT("<img src=",image,">"),
(select payment_terms from `tabCustomer` where customer_name = '{0}'),standard_rate,
(select valuation_rate from `tabStock Ledger Entry` where item_code = '{1}' and is_cancelled='No' order by posting_date desc, posting_time desc, name desc limit 1),
(select CONCAT(sitem.rate," ",si.currency) from `tabPurchase Invoice Item` sitem,`tabPurchase Invoice` si
where sitem.parent = si.name and sitem.item_code = '{1}' order by si.creation desc limit 1),
market_price,price_a_60_days,price_a_90_days,cash_price,sell_off_price,
(select qitem.rate from `tabQuotation Item` qitem,`tabQuotation` qt
where qitem.parent = qt.name and qitem.item_code = '{1}' and qt.customer_name = '{0}'
order by qt.creation desc limit 1),
(select sitem.rate from `tabSales Invoice Item` sitem,`tabSales Invoice` si
where sitem.parent = si.name and sitem.item_code = '{1}' and si.customer = '{0}'
order by si.creation desc limit 1),
(select actual_qty from `tabBin` where item_code = '{1}' and warehouse = 'Main SB - SB'),
(select actual_qty from `tabBin` where item_code = '{1}' and warehouse = 'Ahmadi S/R - SB'),
(select actual_qty from `tabBin` where item_code = '{1}' and warehouse = 'Decor - Shuwaikh - SB'),
(select actual_qty from `tabBin` where item_code = '{1}' and warehouse = 'Shuwaikh S/R - SB'),
(select actual_qty from `tabBin` where item_code = '{1}' and warehouse = 'Store - Shuwaikh - SB')
from `tabItem` where item_code = '{1}'; """.format(customer,item), as_list=1)
return item_data
|
# coding: utf-8
"""
Hopsworks api
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.1.0-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.command_dto import CommandDTO # noqa: F401,E501
from swagger_client.models.environment_dto import EnvironmentDTO # noqa: F401,E501
from swagger_client.models.library_dto import LibraryDTO # noqa: F401,E501
class EnvironmentDTO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'href': 'str',
'items': 'list[EnvironmentDTO]',
'count': 'int',
'python_version': 'str',
'conda_channel': 'str',
'libraries': 'LibraryDTO',
'commands': 'CommandDTO'
}
attribute_map = {
'href': 'href',
'items': 'items',
'count': 'count',
'python_version': 'pythonVersion',
'conda_channel': 'condaChannel',
'libraries': 'libraries',
'commands': 'commands'
}
def __init__(self, href=None, items=None, count=None, python_version=None, conda_channel=None, libraries=None, commands=None): # noqa: E501
"""EnvironmentDTO - a model defined in Swagger""" # noqa: E501
self._href = None
self._items = None
self._count = None
self._python_version = None
self._conda_channel = None
self._libraries = None
self._commands = None
self.discriminator = None
if href is not None:
self.href = href
if items is not None:
self.items = items
if count is not None:
self.count = count
if python_version is not None:
self.python_version = python_version
if conda_channel is not None:
self.conda_channel = conda_channel
if libraries is not None:
self.libraries = libraries
if commands is not None:
self.commands = commands
@property
def href(self):
"""Gets the href of this EnvironmentDTO. # noqa: E501
:return: The href of this EnvironmentDTO. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this EnvironmentDTO.
:param href: The href of this EnvironmentDTO. # noqa: E501
:type: str
"""
self._href = href
@property
def items(self):
"""Gets the items of this EnvironmentDTO. # noqa: E501
:return: The items of this EnvironmentDTO. # noqa: E501
:rtype: list[EnvironmentDTO]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this EnvironmentDTO.
:param items: The items of this EnvironmentDTO. # noqa: E501
:type: list[EnvironmentDTO]
"""
self._items = items
@property
def count(self):
"""Gets the count of this EnvironmentDTO. # noqa: E501
:return: The count of this EnvironmentDTO. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this EnvironmentDTO.
:param count: The count of this EnvironmentDTO. # noqa: E501
:type: int
"""
self._count = count
@property
def python_version(self):
"""Gets the python_version of this EnvironmentDTO. # noqa: E501
:return: The python_version of this EnvironmentDTO. # noqa: E501
:rtype: str
"""
return self._python_version
@python_version.setter
def python_version(self, python_version):
"""Sets the python_version of this EnvironmentDTO.
:param python_version: The python_version of this EnvironmentDTO. # noqa: E501
:type: str
"""
self._python_version = python_version
@property
def conda_channel(self):
"""Gets the conda_channel of this EnvironmentDTO. # noqa: E501
:return: The conda_channel of this EnvironmentDTO. # noqa: E501
:rtype: str
"""
return self._conda_channel
@conda_channel.setter
def conda_channel(self, conda_channel):
"""Sets the conda_channel of this EnvironmentDTO.
:param conda_channel: The conda_channel of this EnvironmentDTO. # noqa: E501
:type: str
"""
self._conda_channel = conda_channel
@property
def libraries(self):
"""Gets the libraries of this EnvironmentDTO. # noqa: E501
:return: The libraries of this EnvironmentDTO. # noqa: E501
:rtype: LibraryDTO
"""
return self._libraries
@libraries.setter
def libraries(self, libraries):
"""Sets the libraries of this EnvironmentDTO.
:param libraries: The libraries of this EnvironmentDTO. # noqa: E501
:type: LibraryDTO
"""
self._libraries = libraries
@property
def commands(self):
"""Gets the commands of this EnvironmentDTO. # noqa: E501
:return: The commands of this EnvironmentDTO. # noqa: E501
:rtype: CommandDTO
"""
return self._commands
@commands.setter
def commands(self, commands):
"""Sets the commands of this EnvironmentDTO.
:param commands: The commands of this EnvironmentDTO. # noqa: E501
:type: CommandDTO
"""
self._commands = commands
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EnvironmentDTO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EnvironmentDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import torch
from torchvision.transforms import functional as F
from data import valid_dataloader
from utils import Adder
import os
from skimage.metrics import peak_signal_noise_ratio
def _valid(model, args, ep):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
gopro = valid_dataloader(args.data_dir, batch_size=1, num_workers=0)
model.eval()
psnr_adder = Adder()
with torch.no_grad():
print('Start GoPro Evaluation')
for idx, data in enumerate(gopro):
input_img, label_img = data
input_img = input_img.to(device)
if not os.path.exists(os.path.join(args.result_dir, '%d' % (ep))):
os.mkdir(os.path.join(args.result_dir, '%d' % (ep)))
pred = model(input_img)
pred_clip = torch.clamp(pred[2], 0, 1)
p_numpy = pred_clip.squeeze(0).cpu().numpy()
label_numpy = label_img.squeeze(0).cpu().numpy()
psnr = peak_signal_noise_ratio(p_numpy, label_numpy, data_range=1)
psnr_adder(psnr)
print('\r%03d'%idx, end=' ')
print('\n')
model.train()
return psnr_adder.average()
|
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
import cv2 as cv2
from glob import glob
from unet import unet
tf.executing_eagerly()
capture = cv2.VideoCapture(0)
model = keras.models.load_model('models/model.h5')
while True:
ret, img = capture.read()
y_pred = model.predict(np.array([cv2.resize(img, (256, 256))]).astype(float))
cv2.imshow('', np.sum(y_pred[0,...,1:], axis=-1).round())
if cv2.waitKey(1) & 0xFF == ord('q'):
break |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-04-11 17:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('animales', '0002_auto_20180404_1603'),
]
operations = [
migrations.AddField(
model_name='animal',
name='race',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='races', to='animales.Race', verbose_name='Raza'),
),
]
|
from django.urls import path
from .views import add_show, update_data, delete_data
urlpatterns = [
path('', add_show, name='addandshow'),
path('delete/<int:id>/', delete_data, name='deletedata'),
path('<int:id>/', update_data, name='updatedata'),
] |
# -*- coding: utf-8 -*-
import pygame
import random
from sys import exit
# use sys.exit to closed
pygame.init()
# Initialize pygame
screen = pygame.display.set_mode((450, 800), 0, 32)
# Create 450 * 800 window
pygame.display.set_caption("Hit plane")
# Set title
class Plane:
def restart(self):
self.x = 200
self.y = 600
def __init__(self):
self.restart()
self.image = pygame.image.load('plane.png').convert_alpha()
# load plane image
def move(self):
x, y = pygame.mouse.get_pos()
# get x,y from plane top left corner
x -= self.image.get_width() / 2
y -= self.image.get_height() / 2
self.x = x
self.y = y
plane = Plane()
class Bullet:
def __init__(self):
self.x = 0
self.y = -1
# Initialize location of the bullet
self.image = pygame.image.load('bullet.png').convert_alpha()
# load bullet image
self.active = False
def move(self):
if self.active:
# move when active == True
self.y -= 1
if self.y < 0:
self.active = False
# if bullet's location out of windows,active = False
def restart(self):
mouseX, mouseY = pygame.mouse.get_pos()
self.x = mouseX-self.image.get_width()/2
self.y = mouseY-self.image.get_height()/2
self.active = True
# active bullet
class Enemy:
def __init__(self):
self.restart()
self.image = pygame.image.load('enemy.png').convert_alpha()
def restart(self):
self.x = random.randint(50, 400)
self.y = random.randint(-200, -50)
self.speed = random.random() + 0.1
def move(self):
if self.y < 800:
self.y += self.speed
else:
self.restart()
def checkHit(enemy, bullet):
if (bullet.x > enemy.x and bullet.x < enemy.x + enemy.image.get_width()) and (bullet.y > enemy.y and bullet.y < enemy.y + enemy.image.get_height()):
enemy.restart()
bullet.active = False
return True
return False
def checkCrash(enemy, plane):
if (plane.x + 0.7*plane.image.get_width() > enemy.x) and (plane.x + 0.3*plane.image.get_width() < enemy.x + enemy.image.get_width()) and (plane.y + 0.7*plane.image.get_height() > enemy.y) and (plane.y + 0.3*plane.image.get_height() < enemy.y + enemy.image.get_height()):
return True
return False
pic = 'bg.jpg'
background = pygame.image.load(pic).convert()
# load image
boom = pygame.image.load('boom.png').convert_alpha()
bullets_list = []
# Create bullets_list
for i in range(10):
bullets_list.append(Bullet())
# Add Bullet in bullets_list
count_b = len(bullets_list)
# Bullet Numbers
index_b = 0
# Shoot Bullet's ID
interval_b = 0
# Shoot interval
enemie_list = []
for i in range(5):
enemie_list.append(Enemy())
gameover = False
score = 0
font = pygame.font.Font(None, 32)
while True:
# Main
for event in pygame.event.get():
if event.type == pygame.QUIT:
# Exit Game
pygame.quit()
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
# Change background on MouseClick
if pic == 'bg2.jpg':
pic = 'bg.jpg'
else:
pic = 'bg2.jpg'
background = pygame.image.load(pic).convert()
screen.blit(background, (0, 0))
# draw background
interval_b -= 1
if interval_b < 0:
bullets_list[index_b].restart()
# Shoot bullet
interval_b = 100
index_b = (index_b+1) % count_b
for b in bullets_list:
if b.active:
b.move()
screen.blit(b.image, (b.x, b.y))
# Move bullet
for e in enemie_list:
if checkHit(e, b):
score += 1
# Get score when the bullet hit the enemie
for e in enemie_list:
e.move()
screen.blit(e.image, (e.x, e.y))
if not gameover:
for e in enemie_list:
if checkCrash(e, plane):
gameover = True
# Gameover when the plane crash the enemie
e.move()
screen.blit(e.image, (e.x, e.y))
text = font.render("Socre: %d" % score, 1, (0, 0, 0))
screen.blit(text, (0, 0))
# Show socre
plane.move()
screen.blit(plane.image, (plane.x, plane.y))
# Move plane
else:
interval_b = float("inf")
text = font.render("Socre: %d" % score, 1, (0, 0, 0))
screen.blit(boom, (plane.x, plane.y))
screen.blit(text, (190, 400))
if gameover and event.type == pygame.MOUSEBUTTONUP:
plane.restart()
for e in enemie_list:
e.restart()
for b in bullets_list:
b.active = False
score = 0
gameover = False
interval_b = 0
# Restart
x, y = pygame.mouse.get_pos()
# Get mouse coordinates
pygame.display.set_caption("X:"+str(x)+","+"Y:" + str(y))
pygame.display.update()
# Redraw
|
#!/usr/bin/env python
#Read a Direct Message(Twitter) from a predefined user/users and execute(Recipient) it
__author__ = ["Mad_Dev"]
__email__ = ["mad_dev@linuxmail.org"]
import os
import sys
import twitter
import subprocess
'''
Requires python-twitter
Backdoor was written to test the possibility of issuing
server(linux) commands from twitter.
You can send a direct message to restart a service, or
reboot the server.
It could be used with cron.
It could be used as search bot.
This is an example....
'''
access_secret= ""
access_key= ""
consumer_secret= ""
consumer_key= ""
api=twitter.Api(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_key,
access_token_secret=access_secret)
allow_list = [""] #allowed users.
m = api.GetDirectMessages()
def directMessage():
if not [u.text for u in m]:
print 'No Messages to Parse | [\033[91mEMPTY\033[0m]'
sys.exit(''Exiting'')
elif [u.text for u in m]:
print 'Message [\033[93m%s\033[0m]' %u.text
msg = u.text
msg_id = u.id
msg_sender_name = u.sender_screen_name
return msg_id, msg, msg_sender_name
def checkUser(user, msg):
for name in user:
print 'Checking [\033[93m%s\033[0m]...' %msg[2]
if msg[2] in allow_list:
print '[\033[93m%s\033[0m] is Valid' %name
print '[\033[93m%s\033[0m]' %msg[1] #Should add command test.
cmd = os.popen(msg[1]).readlines() #command Exce
for n in cmd:
pass
return msg[2], str(n), msg[0]
elif msg[2] not in allow_list:
print msg[2] + ' is not in Allow List'
sys.exit(''Exiting'')
def sendReply(re):
name = str(re[0])
msg = str(re[1])
api.PostDirectMessage(text=msg, screen_name=name)
api.DestroyDirectMessage(id=re[2]) #Delete command (let's keep it clean)
if __name__ == ''__main__'':
def run():
msg = directMessage()
user = checkUser(allow_list, msg)
sendReply(user)
run()
'
|
# Generated by Django 2.2.1 on 2019-08-21 10:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('students', '0003_auto_20190820_1200'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.CharField(max_length=15)),
('amount', models.DecimalField(decimal_places=0, default=5.0, max_digits=10)),
],
),
migrations.AlterField(
model_name='student',
name='fee_balance',
field=models.DecimalField(decimal_places=0, default=0.0, max_digits=10),
),
migrations.AlterField(
model_name='student',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='students.Group'),
),
migrations.AlterField(
model_name='student',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='media'),
),
]
|
import webbrowser
from tkinter import *
from tkinter import filedialog
def Ouvrir():
filename = filedialog.askopenfilename(title="Ouvrir une fichier pdf",filetypes=[('pdf files','.pdf'),('all files','.*')])
webbrowser.open_new(filename)
fenetre = Tk()
fenetre.title("Lecture file")
nav = Menu(fenetre)
importFichier = Menu(nav,tearoff=0)
importFichier.add_command(label="lire le fichier",command=Ouvrir)
importFichier.add_command(label="Quitter",command=fenetre.destroy)
nav.add_cascade(label="Fichier", menu=importFichier)
fenetre.config(menu=nav)
Canevas = Canvas(fenetre)
Canevas.pack(padx=5,pady=5)
gifdict={}
fenetre.mainloop() |
#https://leetcode.com/problems/kids-with-the-greatest-number-of-candies/
#STARTED: February 2 2021
"""
Given the array candies and the integer extraCandies,
where candies[i] represents the number of candies that the ith kid has.
For each kid check if there is a way to distribute extraCandies
among the kids such that he or she can have the greatest number of
candies among them. Notice that multiple kids can have the greatest
number of candies.
Example 1:
Input: candies = [2,3,5,1,3], extraCandies = 3
Output: [true,true,true,false,true]
Explanation:
Kid 1 has 2 candies and if he or she receives all extra candies (3) will have 5 candies --- the greatest number of candies among the kids.
Kid 2 has 3 candies and if he or she receives at least 2 extra candies will have the greatest number of candies among the kids.
Kid 3 has 5 candies and this is already the greatest number of candies among the kids.
Kid 4 has 1 candy and even if he or she receives all extra candies will only have 4 candies.
Kid 5 has 3 candies and if he or she receives at least 2 extra candies will have the greatest number of candies among the kids.
"""
def kidsWithCandies(candies, extraCandies):
maxCandies = len(candies)
result = []
greatestNum = 0
for candy in candies:
if candy + extraCandies >= maxCandies and candy + extraCandies <= greatestNum:
elif candy + extraCandies >= maxCandies:
result.append(True)
greatestNum = candy + extraCandies
else:
result.append(False)
return result
#print(kidsWithCandies([2,3,5,1,3], 3))
#print(kidsWithCandies([4,2,1,1,2], 1))
print(kidsWithCandies([12,1,12], 10)) #should be [true,false,true]
#new_list = [expression(i) for i in old_list if filter(i)]
#new_range = [i * i for i in range(5) if i % 2 == 0] #0, 1, 2, 3, 4
#print(new_range) #0, 4, 16
|
"""
Data generator for segment to label problem.
Each segment is padded to length 6000, and labels are one-hot encoded.
"""
import numpy as np
from keras.utils import Sequence, to_categorical
from keras.preprocessing.sequence import pad_sequences
import os.path
DIR_PATH = ''
"""
Generator for training and validation.
"""
class BreakfastActionTrainDataGenerator(Sequence):
def __init__(self, segment_ids, labels, batch_size=100, input_dim=400, output_dim=48, shuffle=True):
self.segment_ids = segment_ids
self.labels = labels
self.batch_size = batch_size
self.input_dim = input_dim
self.output_dim = output_dim
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
"""
Denotes the number of batches per epoch.
"""
return int(np.ceil(len(self.segment_ids) / self.batch_size))
def __getitem__(self, index):
"""
Generates one batch of train/validation data.
"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
# Find list of IDs
segment_ids_temp = [self.segment_ids[k] for k in indexes]
# Generate data
x, y = self.__data_generation(segment_ids_temp)
return x, y
def on_epoch_end(self):
"""
Updates indexes after each epoch
"""
self.indexes = np.arange(len(self.segment_ids))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, segment_ids_temp):
"""
Generates data containing batch_size samples.
"""
x = []
y = []
# Generate data
for i, ID in enumerate(segment_ids_temp):
# Store sample
curr_path = os.path.join(DIR_PATH, 'data/segments/', (ID + '.npy'))
x.append(np.load(curr_path))
# Store class
y.append([self.labels[ID]])
padded_x = pad_sequences(x, padding='post', maxlen=6000, truncating='post', value=0.0)
encoded_y = np.array([to_categorical(i, self.output_dim) for i in y])
return padded_x, encoded_y
"""
Generator for testing.
"""
class BreakfastActionTestDataGenerator(Sequence):
def __init__(self, segment_ids, batch_size=100, input_dim=400):
self.segment_ids = segment_ids
self.indexes = np.arange(len(self.segment_ids))
self.batch_size = batch_size
self.input_dim = input_dim
def __len__(self):
"""
Denotes the number of batches per epoch.
"""
return int(np.ceil(len(self.segment_ids) / self.batch_size))
def __getitem__(self, index):
"""
Generates one batch of test data.
"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
# Find list of IDs
segment_ids_temp = [self.segment_ids[k] for k in indexes]
# Generate data
x = self.__data_generation(segment_ids_temp)
return x
def __data_generation(self, segment_ids_temp):
"""
Generates data containing batch_size samples.
"""
x = []
# Generate data
for i, ID in enumerate(segment_ids_temp):
# Store sample
curr_path = os.path.join(DIR_PATH, 'data/segments/', (ID + '.npy'))
x.append(np.load(curr_path))
padded_x = pad_sequences(x, padding='post')
return padded_x |
# Comparision script for mzn-cpx and muaco
# Author: Vladimir Ulyantsev (ulyantsev@rain.ifmo.ru)
import datetime
import os
import sys
import re
import subprocess
import json
import random
import shutil
# reproducible results
random.seed(239239)
COMMAND_TEMPLATE = 'java -Xmx512M -Xss128M -jar %s %s'
RESULTS_ROOT = 'results'
AG_PATH = 'jars/automaton-generator.jar'
AG_PARAMS_TEMPLATE = '-s %(size)s -ac %(ac)d -ec %(ec)d -vc %(vc)d -o %(path)s -mina %(mina)d -maxa %(maxa)d -p %(persent)d -rs %(seed)d'
SG_PATH = 'jars/scenarios-generator.jar'
SG_PARAMS_TEMPLATE = '-a %(automaton_path)s -cnt %(count)d -o %(path)s -minl %(minl)d -maxl %(maxl)d -rs %(seed)d'
SG_PARAMS_TEMPLATE_COVER = '-a %(automaton_path)s -o %(path)s -suml %(suml)d -c -rs %(seed)d'
NOISER_TEMPLATE = 'python add_noise.py %(input)s %(percent)d %(seed)d > %(output)s'
BUILDER_TEMPLATE = 'python main.py %(scenarios)s %(size)d %(percent)d > %(result)s'
MUACO_TEMPLATE = 'python muaco.py %(scenarios)s %(size)d %(percent)d'
IC_PATH = 'jars/isomorphism-checker.jar'
IC_PARAMS_TEMPLATE = '%(first)s %(second)s > %(output)s'
TR_PERSENT, EVENTS_COUNT, ACTIONS_COUNT, VARS_COUNT, MIN_ACTIONS, MAX_ACTIONS = 100, 2, 3, 0, 1, 2
MIN_LEN_C, MAX_LEN_C = 1, 3
LAUNCHES_COUNT = 100
SIZES = [5, 6, 7, 8, 9]
ERR_PERCENTS = [1, 2]
SUM_LENGTHS = [1000, 1500, 2000]
def generate_automaton(automaton_path, size):
ag_params = {'size' : size,
'ac' : ACTIONS_COUNT,
'ec' : EVENTS_COUNT,
'vc' : VARS_COUNT,
'path' : automaton_path,
'mina' : MIN_ACTIONS,
'maxa' : MAX_ACTIONS,
'persent' : TR_PERSENT,
'seed' : random.randint(1, 100000)}
ag_params_str = AG_PARAMS_TEMPLATE % ag_params
ag_command = COMMAND_TEMPLATE % (AG_PATH, ag_params_str)
os.system(ag_command)
def generate_scenarios(scenarios_path, automaton_path, size, sum_length):
sg_params = {'automaton_path' : automaton_path,
'path' : scenarios_path,
'suml' : sum_length,
'seed' : random.randint(1, 100000)}
sg_params_str = SG_PARAMS_TEMPLATE_COVER % sg_params
sg_command = COMMAND_TEMPLATE % (SG_PATH, sg_params_str)
os.system(sg_command)
def add_noise(scenarios_path, percent, noisy_path):
noise_params = {'input' : scenarios_path,
'percent' : percent,
'output' : noisy_path,
'seed' : random.randint(1, 100000)}
os.system(NOISER_TEMPLATE % noise_params)
def build_noisy_with_mzn(scenarios_path, size, percent, output_path):
builder_params = {'scenarios' : scenarios_path,
'percent' : percent,
'size' : size,
'result' : output_path}
os.system(BUILDER_TEMPLATE % builder_params)
time = float(re.search(r'# Time = (.+)', open(output_path).read()).group(1))
return time
def build_with_muaco(scenarios_path, size, percent, output_path):
builder_params = {'scenarios' : scenarios_path,
'percent' : percent,
'size' : size}
output = os.popen(MUACO_TEMPLATE % builder_params).read()
print >>open(output_path, 'w'), '#', '#'.join(open('muaco_metadata').readlines()), open('muaco_result.gv').read()
time = float(re.search(r'Time = (.+)', output).group(1))
return time
def check_isomorphism(ic_result_path, automaton_path, result_path):
ic_params = {'first' : automaton_path,
'second' : result_path,
'output' : ic_result_path}
ic_params_str = IC_PARAMS_TEMPLATE % ic_params
ic_command = COMMAND_TEMPLATE % (IC_PATH, ic_params_str)
os.system(ic_command)
is_isomorph = open(ic_result_path).readline().startswith('ISOMORPHIC')
return is_isomorph
def launch_mzn(dir_path, size, sum_length, percent):
automaton_path = os.path.join(dir_path, 'automaton.gv')
generate_automaton(automaton_path, size)
scenarios_path = os.path.join(dir_path, 'scenarios-correct')
sc_count = generate_scenarios(scenarios_path, automaton_path, size, sum_length)
noisy_path = os.path.join(dir_path, 'scenarios-noisy')
add_noise(scenarios_path, percent, noisy_path)
result_path = os.path.join(dir_path, 'automaton-res.gv')
time = build_noisy_with_mzn(scenarios_path, size, percent, result_path)
muaco_path = os.path.join(dir_path, 'automaton-muaco.gv')
muaco_time = build_with_muaco(scenarios_path, size, percent, muaco_path)
ic_result_path = os.path.join(dir_path, 'isomorphism-result')
is_isomorph = check_isomorphism(ic_result_path, automaton_path, result_path)
result = {'work_dir' : dir_path,
'size' : size,
'sum_length' : sum_length,
'time' : time,
'muaco_time' : muaco_time,
'is_isomorph' : is_isomorph,
'percent' : percent}
return result
def main():
if not os.path.exists(RESULTS_ROOT):
os.mkdir(RESULTS_ROOT)
dt = datetime.datetime.now()
time_str = dt.strftime('%Y-%m-%d--%H-%M-%S')
results_dir = os.path.join(RESULTS_ROOT, time_str)
os.mkdir(results_dir)
# print properties
results = []
for automaton_size in SIZES:
cur_size_dir = "%s/%02d-states" % (results_dir, automaton_size)
os.mkdir(cur_size_dir)
for sum_length in SUM_LENGTHS:
cur_length_dir = "%s/%04d-sum" % (cur_size_dir, sum_length)
os.mkdir(cur_length_dir)
for percent in ERR_PERCENTS:
cur_percent_dir = "%s/%02d-err" % (cur_length_dir, percent)
os.mkdir(cur_percent_dir)
for i in xrange(LAUNCHES_COUNT):
launch_dir = "%s/%02d" % (cur_percent_dir, i)
os.mkdir(launch_dir)
res = launch_mzn(launch_dir, automaton_size, sum_length, percent)
results.append(res)
print res
results_json_str = '[' + ',\n'.join([json.dumps(r) for r in results]) + ']'
print >>open(results_dir + '/results.json', 'w'), results_json_str
if __name__ == '__main__':
main()
|
"""
Data comes from Microsoft Research WikiQA Corpus
https://www.microsoft.com/en-us/download/details.aspx?id=52419
"""
import numpy as np
import torch
from torchtext.data import BucketIterator, Field, interleave_keys, RawField
from torchtext.data.dataset import TabularDataset
from torchtext.data.pipeline import Pipeline
def get_class_probs(sim, *args):
"""
Convert a single label into class probabilities.
"""
class_probs = np.zeros(WikiQA.num_classes)
class_probs[0], class_probs[1] = 1 - sim, sim
return class_probs
class WikiQA(TabularDataset):
name = 'wikiqa'
num_classes = 2
def __init__(self, path, format, fields, skip_header=True, **kwargs):
super(WikiQA, self).__init__(path, format, fields, skip_header, **kwargs)
# We want to keep a raw copy of the sentence for some models and for debugging
RAW_TEXT_FIELD = RawField()
for ex in self.examples:
raw_sentence_a, raw_sentence_b = ex.sentence_a[:], ex.sentence_b[:]
setattr(ex, 'raw_sentence_a', raw_sentence_a)
setattr(ex, 'raw_sentence_b', raw_sentence_b)
self.fields['raw_sentence_a'] = RAW_TEXT_FIELD
self.fields['raw_sentence_b'] = RAW_TEXT_FIELD
@staticmethod
def sort_key(ex):
return interleave_keys(
len(ex.sentence_a), len(ex.sentence_b))
@classmethod
def splits(cls, text_field, label_field, id_field, path='data/wikiqa', root='', train='train/WikiQA-train.tsv',
validation='dev/WikiQA-dev.tsv', test='test/WikiQA-test.tsv', **kwargs):
return super(WikiQA, cls).splits(path, root, train, validation, test,
format='tsv',
fields=[('id', id_field), ('sentence_a', text_field),
('docid', None), ('document_title', None),
('sid', id_field), ('sentence_b', text_field),
('relatedness_score', label_field)],
skip_header=True)
@classmethod
def iters(cls, batch_size=64, device=-1, shuffle=True, vectors='glove.6B.50d'):
cls.TEXT = Field(sequential=True, tokenize='spacy', lower=True, batch_first=True)
cls.LABEL = Field(sequential=False, use_vocab=False, batch_first=True, tensor_type=torch.FloatTensor, postprocessing=Pipeline(get_class_probs))
cls.ID = RawField()
train, val, test = cls.splits(cls.TEXT, cls.LABEL, cls.ID)
cls.TEXT.build_vocab(train, vectors=vectors)
return BucketIterator.splits((train, val, test), batch_size=batch_size, shuffle=shuffle, repeat=False, device=device)
|
"""
Flask app to store and retrieve a list of bubble tea shops
Data is stored in a SQLite database that looks something like the following:
+------+--------------+----------+-------+-------+-----------+------------+--------------+-------------+------+--------+
| Nam | Address | City | State | Zip | Open Hour | Close Hour | Phone | Website |Drink | Rating |
+======+==============+==========+=======+=======+===========+============+==============+=============+======+========+
| Shop | 124 Some Ave | Portland | OR | 97006 | 8:00 AM | 5:00 PM | 530-123-4567 | milktea.com | Taro | 5 |
+------+--------------+----------+-------+-------+-----------+------------+--------------+-------------+------+--------+
This can be created with the following SQL (see bottom of this file):
create table shoplist (name text, street text, city text, state text, zip int, open_hr text, close_hr text,
phone text, website, drink text, rating int);
"""
from datetime import date
from .Model import Model
import sqlite3
DB_FILE = 'shops.db' # file for our Database
class model(Model):
def __init__(self):
"""
Initializing method. Runs a 'COUNT' query to verify if the table exists. If an exception is returned, it creates the table.
"""
# Make sure our database exists
connection = sqlite3.connect(DB_FILE)
cursor = connection.cursor()
try:
cursor.execute("SELECT COUNT(rowid) FROM shoplist")
except sqlite3.OperationalError:
cursor.execute("CREATE TABLE shoplist (name text, street text, city text, state text, zip integer, open_hr text, close_hr text, phone text, drink text, rating integer, website text)")
cursor.close()
def select(self):
"""
Gets all rows from the database
Each row contains: name, street, city, state, zip, open_hr, close_hr, phone, website, drink, rating
:return: List of lists containing all rows of database
"""
connection = sqlite3.connect(DB_FILE)
cursor = connection.cursor()
try:
cursor.execute("SELECT * FROM shoplist")
except sqlite3.OperationalError:
cursor.execute("CREATE TABLE shoplist (name text, street text, city text, state text, zip integer, open_hr text, close_hr text, phone text, drink text, rating integer, website text)")
return cursor.fetchall()
def insert(self, name, street, city, state, zip, open_hr, close_hr, phone, website, drink, rating):
"""
Inserts entry into database
:param name: String
:param street: String
:param city: String
:param state: String
:param zip: int
:param open_hr: String
:param close_hr: String
:param phone: String
:param website: String
:param drink: String
:param rating: int
:return: none
:raises: Database errors on connection and insertion
"""
params = {'name': name, 'street': street, 'city': city, 'state': state, 'zip': zip, 'open_hr': open_hr,
'close_hr': close_hr, 'phone': phone, 'website':website, 'drink': drink, 'rating': rating}
connection = sqlite3.connect(DB_FILE)
cursor = connection.cursor()
cursor.execute("INSERT INTO shoplist (name, street, city, state, zip, open_hr, close_hr, phone, drink, rating, website) "
"VALUES (:name, :street, :city, :state, :zip, :open_hr, :close_hr, :phone, :drink, :rating, :website)",
params)
connection.commit()
cursor.close()
return True
def delete(self, name):
"""
Deletes an entry from the database
:param name: String
:return: none
:raises: Database errors on connection and deletion
"""
connection = sqlite3.connect(DB_FILE)
try:
cursor = connection.cursor()
cursor.execute("DELETE FROM shoplist where name=?", (name,))
connection.commit()
except sqlite3.Error as error:
print("Failed to delete record from sqlite table", error)
finally:
if connection:
connection.close() |
'''
Created on Mar 20, 2014
input: read training data: llExpTerm[query][term] + lParaSet + K
if len(lParaSet) = 1 then, use it train model
else: K fold cv on llExpTerm(query dimension) choose the best parameter, and train
return a SVM model
@author: cx
'''
import site
site.addsitedir('/bos/usr0/cx/PyCode/Geektools')
site.addsitedir('/bos/usr0/cx/PyCode/QueryExpansion')
site.addsitedir('/bos/usr0/cx/PyCode/cxPylib')
from LibSVMRelate.SVMBase import *
from svmutil import *
from CrossValidation.ParameterSet import *
from CrossValidation.RandomSplit import *
from cxBase.base import *
from base.ExpTerm import *
class SVMModelTrainC(object):
def Init(self):
return
def __init__(self):
self.Init()
@staticmethod
def ShowConf():
return
def OneFoldTrainTest(self,lTrainLabel,lTrainData,lTestLabel,lTestData,lSVMPara):
#for each para, train and test, record performance in lPerformance (1-1 with lSVMPara)
lPerformance = []
SVMProb = svm_problem(lTrainLabel,lTrainData)
for SVMPara in lSVMPara:
ParaStr = SVMPara.dump()
print "start train with para [%s]" %(ParaStr)
param = svm_parameter(ParaStr)
SVMModel = svm_train(SVMProb,param)
p_label,p_acc,p_val = svm_predict(lTestLabel,lTestData,SVMModel)
lPerformance.append(p_acc[0]) #we are classification
return lPerformance
def SingleTrain(self,llExpTerm,SVMPara):
lScore,lhFeature = SplitLabelAndFeature(llExpTerm)
SVMModel = svm_train(lScore,lhFeature,SVMPara.dump())
return SVMModel
def Train(self,llExpTerm,lSVMPara,K=0):
if len(lSVMPara) == 1:
return self.SingleTrain(llExpTerm, lSVMPara[0])
#now we need CV
llSplit = RandomSplit(llExpTerm,K)
lPerformance = []
cnt = 0
for llTrainExpTerm,llTestExpTerm in llSplit:
print "start fold [%d]" %(cnt)
cnt += 1
lTrainLabel,lTrainData = SplitLabelAndFeature(llTrainExpTerm)
lTestLabel,lTestData = SplitLabelAndFeature(llTestExpTerm)
lCurrentFoldPerformance = self.OneFoldTrainTest(lTrainLabel, lTrainData, lTestLabel, lTestData, lSVMPara)
if [] == lPerformance:
lPerformance = lCurrentFoldPerformance
else:
for i in range(len(lPerformance)):
lPerformance[i] += lCurrentFoldPerformance[i]
for i in range(len(lPerformance)):
lPerformance[i] /= float(K)
p = lPerformance.index(max(lPerformance))
print "best para [%s] accuracy [%f]" %(lSVMPara[p].dump(),lPerformance[p])
print "start apply on train"
return self.SingleTrain(llExpTerm, lSVMPara[p])
def SVMModelTrainUnitTest(ConfIn):
print "in\nparaset\nk\nout"
SVMModelTrainC.ShowConf()
conf = cxConf(ConfIn)
ExpTermInName = conf.GetConf('in')
ParaSetInName = conf.GetConf('paraset')
K = int(conf.GetConf('k'))
OutName = conf.GetConf('out')
SVMTrain = SVMModelTrainC()
llExpTerm = ReadQExpTerms(ExpTermInName)
lSVMPara = ReadSVMParaSet(ParaSetInName)
SVMModel = SVMTrain.Train(llExpTerm, lSVMPara, K)
svm_save_model(OutName,SVMModel)
return True
|
from datetime import datetime
class Movimentacao:
def __init__(self, tipo: str, movimentado, quantidade: int, valor_total: float):
self.__data = datetime.now()
self.__tipo = tipo
self.__movimentado = movimentado
self.__quantidade = quantidade
self.__valor_total = valor_total
@property
def tipo(self):
return self.__tipo
@property
def movimentado(self):
return self.__movimentado
@property
def quantidade(self):
return self.__quantidade
@property
def valor_total(self):
return self.__valor_total
@property
def data(self):
return self.__data |
"""
MegaCorp wants to give bonuses to its employees based on
how many lines of codes they have written. They would like
to give the smallest positive amount to each worker consistent
with the constraint that if a developer has written more lines
of code than their neighbor, they should receive more money.
Given an array representing a line of seats of employees at
MegaCorp, determine how much each one should get paid.
For example, given [10, 40, 200, 1000, 60, 30],
you should return [1, 2, 3, 4, 2, 1].
"""
from typing import List
def adjust_neighbors(
loc: List[int], res: List[int], index: int, start: int
) -> List[int]:
res[index] = 1
for j in range(index - 1, start - 1, -1):
if loc[j] > loc[j + 1] and res[j] <= res[j + 1]:
res[j] = res[j + 1] + 1
elif loc[j] < loc[j + 1] and res[j] >= res[j + 1]:
res[j] = res[j + 1] - 1
elif loc[j] == loc[j+1] and res[j] != res[j + 1]:
res[j] = res[j + 1]
else:
break
return res
def smallest_bonus(loc: List[int]) -> List[int]:
size = len(loc)
res = [0] * size
res[0] = 1
for index in range(1, size):
if loc[index] > loc[index - 1]:
res[index] = res[index - 1] + 1
elif loc[index] < loc[index - 1]:
res[index] = min(res[index - 1] - 1, 1)
else:
res[index] = res[index - 1]
min_val = 1
start = 0
for index in range(1, size):
if res[index] < 1:
if res[index] < min_val:
min_val = res[index]
res = adjust_neighbors(loc, res, index, 0)
else:
res = adjust_neighbors(loc, res, index, start)
start = index
return res
if __name__ == "__main__":
assert smallest_bonus([10, 40, 200, 1000, 60, 30]) == [1, 2, 3, 4, 2, 1]
assert smallest_bonus([10, 40, 200, 1000, 900, 800, 30]) == [1, 2, 3, 4, 3, 2, 1]
assert smallest_bonus(
[10, 40, 80, 40, 50, 60, 70, 80, 90, 30, 20, 10]
) == [1, 2, 3, 1, 2, 3, 4, 5, 6, 3, 2, 1]
|
#!/usr/bin/python
import sys
import os
import json
import parser_api
import lib.log_api as log_api
main_dir = sys.path[0]
scenarios_dir = main_dir + os.sep + "scenarios"
scripts_dir = main_dir + os.sep + "scripts"
scripts_parser = scripts_dir + os.sep + "parser"
parserLog = log_api.initLogger("parserLog")
class ScenarioBase:
''' a scenario.
'''
def __init__(self):
self.m_name = ""
self.m_vendor = ""
self.m_serial = ""
self.m_mac = ""
self.m_type = ""
self.m_description = ""
self.m_script = ""
self.m_parameters = {}
self.m_scenarios = []
def set_name(self, n):
self.m_name = name
def set_type(self, t):
self.m_type = t
def set_script(self, fname):
self.m_script = fname
def action(self):
''' excute the scenario.
'''
parserLog.info("Begin to action Scenario: %s ." %(self.m_name))
if(0 != len(self.m_script)):#XXX scripts take priority of subscenarios.
submodule = self.m_script.replace(os.sep, '.')
submodule = submodule.replace('.py', '.run(self)')
submodule = submodule[submodule.find("scenarios"):]
exec "import " + submodule[:submodule.find(".run")]
parserLog.info("module to run is: %s ." %(submodule))
exec submodule
else:
for scenario in self.m_scenarios:
parserLog.info("subscenario:{")
scenario.action()
parserLog.info("}")
class ScenarioParser(parser_api.Parser):
def __init__(self):
parser_api.Parser.__init__(self)
self.m_script = {"":""}
self.m_vendor = ""
self.m_serial = ""
self.m_mac = ""
def __str__(self):
ret = parser_api.Parser.__str__(self)
ret += "m_script:" + str(self.m_script) + "\n"
ret += "m_reporter:" + str(self.m_reporter) + "\n"
return ret
def parse_json(self, fname):
''' parse a json file into this object.
'''
f = file(fname)
s = json.load(f)
f.close()
self.parse_jsons(s)
def parse_jsons(self, jstr):
''' parse a json string into this object.
'''
parser_api.Parser.parse_jsons(self, jstr)
for key in jstr.keys():
if "script" == key:
self.m_script = jstr["script"]
if "vendor" == key:
self.m_vendor = jstr["vendor"]
if "serial" == key:
self.m_serial = jstr["serial"]
if "mac" == key:
self.m_mac = jstr["mac"]
def parse_scenario(self, s_fname, s_type_nam, s_type_param, s_param):
parserLog.info("Begin to parse scenario.")
if "none" == s_fname:
return none
s_fname = scenarios_dir + os.sep + s_fname
if "none" == s_type_nam:
return none
scenario = ScenarioParser()
scenario.parse_json(s_fname)
scenario_obj = ScenarioBase()
scenario_obj.m_name = scenario.m_name
scenario_obj.m_vendor = scenario.m_vendor
scenario_obj.m_serial = scenario.m_serial
scenario_obj.m_mac = scenario.m_mac
if "seq" == s_type_nam:
scenario_obj.m_type = scenario.m_type
scenario_obj.m_description = scenario.m_description
if scenario.m_script.has_key("fname"):
scenario_obj.m_script += scenarios_dir + os.sep
scenario_obj.m_script += scenario.m_vendor.split(':')[0] + os.sep
scenario_obj.m_script += scenario.m_vendor.split(':')[1] + os.sep
scenario_obj.m_script += scenario.m_script["fname"]
scenario_obj.m_parameters = s_param
scenario.parse_subscenario(scenario_obj)
return scenario_obj
def parse_subscenario(self, obj):
for scenario in self.m_scenarios:
scenario_fname = ""
scenario_type_name = ""
scenario_type_param = ""
if(0 == len(scenario.keys())):
return none
if("fname" in scenario[scenario.keys()[0]]):
scenario_fname = scenario[scenario.keys()[0]]["fname"]
if("type" in scenario[scenario.keys()[0]]):
if("name" in scenario[scenario.keys()[0]]["type"]):
scenario_type_name = scenario[scenario.keys()[0]]["type"]["name"]
if("parameter" in scenario[scenario.keys()[0]]["type"]):
scenario_type_param = scenario[scenario.keys()[0]]["type"]["parameter"]
if("parameter" in scenario[scenario.keys()[0]]):
scenario_param = scenario[scenario.keys()[0]]["parameter"]
parserLog.info("Scenario name:%s " %(scenario_fname))
sub_scenario = self.parse_scenario(scenario_fname, scenario_type_name, \
scenario_type_param, scenario_param)
obj.m_scenarios.append(sub_scenario)
class CaseParser(parser_api.Parser):
def __init__(self):
parser_api.Parser.__init__(self)
def __str__(self):
ret = parser_api.Parser.__str__(self)
return ret
def parse_json(self, fname):
''' parse a json file into this object.
'''
parserLog.info("Begin to parse case.")
f = file(fname)
s = json.load(f)
f.close()
self.parse_jsons(s)
def parse_jsons(self, jstr):
''' parse a json string into this object.
'''
parser_api.Parser.parse_jsons(self, jstr)
def parse_scenario(self, s_fname, s_type_nam, s_type_param, s_param):
parserLog.info("Begin to parse scenario.")
if "none" == s_fname:
return none
s_fname = scenarios_dir + os.sep + s_fname
if "none" == s_type_nam:
return none
scenario_obj = ScenarioBase()
scenario = ScenarioParser()
scenario.parse_json(s_fname)
scenario_obj.m_name = scenario.m_name
scenario_obj.m_vendor = scenario.m_vendor
scenario_obj.m_serial = scenario.m_serial
scenario_obj.m_mac = scenario.m_mac
if "seq" == s_type_nam:
scenario_obj.m_type = scenario.m_type
scenario_obj.m_description = scenario.m_description
if scenario.m_script.has_key("fname"):
scenario_obj.m_script += scenarios_dir + os.sep
scenario_obj.m_script += scenario.m_vendor.split(':')[0] + os.sep
scenario_obj.m_script += scenario.m_vendor.split(':')[1] + os.sep
scenario_obj.m_script += scenario.m_script["fname"]
scenario_obj.m_parameters = s_param
scenario.parse_subscenario(scenario_obj)
return scenario_obj
def __get_scenario(self,scenario):
scenario_fname = ""
scenario_type_name = ""
scenario_type_param = {}
if(0 == len(scenario.keys())):
return none
if("fname" in scenario[scenario.keys()[0]]):
scenario_fname = scenario[scenario.keys()[0]]["fname"]
if("type" in scenario[scenario.keys()[0]]):
if("name" in scenario[scenario.keys()[0]]["type"]):
scenario_type_name = scenario[scenario.keys()[0]]["type"]["name"]
if("parameter" in scenario[scenario.keys()[0]]["type"]):
scenario_type_param = scenario[scenario.keys()[0]]["type"]["parameter"]
if("parameter" in scenario[scenario.keys()[0]]):
scenario_param = scenario[scenario.keys()[0]]["parameter"]
parserLog.info("Scenario path:%s " %(scenario_fname))
scenario_obj = self.parse_scenario(scenario_fname, \
scenario_type_name, scenario_type_param, scenario_param)
return scenario_obj
def action(self):
parserLog.info("Begin to action Case: %s ." %(self.m_name))
if "seq" == self.m_type["name"]:
for scenario in self.m_scenarios:
scenario_obj = self.__get_scenario(scenario)
scenario_obj.action()
elif "loop" == self.m_type["name"]:
if self.m_type.has_key("parameter"):
if self.m_type["parameter"].isdigit():
count = int(self.m_type["parameter"])
for i in range(1,count+1):
for scenario in self.m_scenarios:
scenario_obj = self.__get_scenario(scenario)
scenario_obj.action()
def run(case_dir):
cases_dir = main_dir + os.sep + case_dir
level = log_api.getLogLevel()
log_api.setLogLevel(parserLog,level)
if os.path.isfile(cases_dir) and (os.path.splitext(cases_dir)[1] == '.json'):#single file
c = CaseParser()
c.parse_json(cases_dir)
parserLog.info("Case name:%s " %(c.m_name))
c.action()
else:
file_type = ['.json']
cases_list = parser_api.list_type(cases_dir, file_type)
cases_list.sort()
for fname in cases_list:
c = CaseParser()
c.parse_json(fname)
parserLog.info("Case name:%s " %(c.m_name))
c.action()
######Main function.######
if "__main__" == __name__:
cases_dir = main_dir + os.sep + "cases"
run(cases_dir)
|
from typing import List
import torch.nn as nn
import torch.nn.functional as F
class Solution(nn.Module):
def __init__(self):
super(Solution, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
# obejctive is to bring down the image size to single unit-->
# here given image size is 224x224px
self.conv1 = nn.Conv2d(1, 32, 5)
# 224--> 224-5+1=220
self.pool1 = nn.MaxPool2d(2, 2)
# 220/2=110 ...(32,110,110)
self.conv2 = nn.Conv2d(32, 64, 3)
# 110--> 110-3+1=108
self.pool2 = nn.MaxPool2d(2, 2)
# 108/2=54
self.batch_norm_2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, 3)
# 54-->54-3+1=52
self.pool3 = nn.MaxPool2d(2, 2)
# 52/2=26
self.conv4 = nn.Conv2d(128, 256, 3)
# 26-->26-3+1=24
self.pool4 = nn.MaxPool2d(2, 2)
# 24/2=12
self.batch_norm_4 = nn.BatchNorm2d(256)
self.conv5 = nn.Conv2d(256, 512, 1)
# 12-->12-1+1=12
self.pool5 = nn.MaxPool2d(2, 2)
# 12/2=6
# 6x6x512
self.fc1 = nn.Linear(6 * 6 * 512, 1024)
# self.fc2 = nn.Linear(1024,1024)
self.fc2 = nn.Linear(1024, 136)
self.drop1 = nn.Dropout(p=0.1)
self.drop2 = nn.Dropout(p=0.2)
self.drop3 = nn.Dropout(p=0.3)
self.drop4 = nn.Dropout(p=0.4)
self.drop5 = nn.Dropout(p=0.5)
self.drop6 = nn.Dropout(p=0.6)
# self.fc2_drop = nn.Dropout(p=.5)
## Note that among the layers to add, consider including:
# maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
def forward(self, x):
batch_size = x.shape[0]
x = self.drop1(self.pool1(F.relu(self.conv1(x))))
x = self.drop2(self.batch_norm_2(self.pool2(F.relu(self.conv2(x)))))
x = self.drop3(self.pool3(F.relu(self.conv3(x))))
x = self.drop4(self.batch_norm_4(self.pool4(F.relu(self.conv4(x)))))
x = self.drop5(self.pool5(F.relu(self.conv5(x))))
x = x.view(batch_size, -1)
x = self.drop6(F.relu(self.fc1(x)))
x = self.fc2(x)
x = x.view(batch_size, -1, 2)
# a modified x, having gone through all the layers of your model, should be returned
return x
class Final(nn.Module):
def __init__(self):
super(Final, self).__init__()
self.convolution_layers = nn.Sequential(
# First Layer: 32 5x5 Filter, Max-Pooling 2x2
# Outputsize: (224 - 5 + 1)/2 = 220/2 = 110
nn.Conv2d(1, 32, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Dropout(p=0.1),
# Second Layer: 64 3x3 Filter, Max-Pooling 2x2
# Outputsize: (110 - 3 + 1)/2 = 108/2 = 54
nn.Conv2d(32, 64, 3),
nn.ReLU(),
# nn.BatchNorm2d(64),
nn.MaxPool2d(2, 2),
nn.Dropout(p=0.2),
# Third Layer: 128 3x3 Filter, Max-Pooling 2x2
# Outputsize: (54 - 3 + 1)/2 = 52/2 = 26
nn.Conv2d(64, 128, 3),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Dropout(p=0.3),
# Fourth Layer: 256 3x3 Filter, Max-Pooling 2x2
# Outputsize: (26 - 3 + 1)/2 = 24/2 = 12
nn.Conv2d(128, 256, 3),
nn.ReLU(),
# nn.BatchNorm2d(256),
nn.MaxPool2d(2, 2),
nn.Dropout(p=0.4),
# Fourth Layer: 512 1x1 Filter, Max-Pooling 2x2
# Outputsize: (12 - 1 + 1)/2 = 6
nn.Conv2d(256, 512, 1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Dropout(p=0.5),
)
self.linear_layers = nn.Sequential(
nn.Linear(6 * 6 * 512, 1024),
nn.ReLU(),
# nn.Dropout(p=0.6),
# nn.Linear(2056, 1024),
# nn.ReLU(),
nn.Dropout(p=0.6),
nn.Linear(1024, 136)
)
def forward(self, x):
batch_size = x.shape[0]
x = self.convolution_layers(x)
x = x.view(batch_size, -1)
x = self.linear_layers(x)
x = x.view(batch_size, -1, 2)
return x
class Net(nn.Module):
def __init__(self, image_size: int, filter_size: List[int], feature_maps: List[int],
second_linear_layer: bool = True):
super(Net, self).__init__()
self.image_size = image_size
self.filter_size = filter_size
self.features = feature_maps
self.input_size = [1] + feature_maps[:-1]
output_size = 224
conv_layers = []
dropout_prob = 0.1
for input, features, filter in zip(self.input_size, self.features, self.filter_size):
output_size = int((output_size - filter + 1) / 2)
conv_layers.append(nn.Conv2d(input, features, filter))
conv_layers.append(nn.ReLU())
conv_layers.append(nn.BatchNorm2d(features))
conv_layers.append(nn.MaxPool2d(2, 2),)
conv_layers.append(nn.Dropout(p=dropout_prob))
dropout_prob += 0.1
self.convolution_layers = nn.Sequential(*conv_layers)
if second_linear_layer:
self.linear_layers = nn.Sequential(
nn.Linear(output_size ** 2 * self.features[-1], 1024),
# nn.Linear(output_size**2 * self.features[-1], 2056),
# nn.ReLU(),
# nn.BatchNorm1d(2056),
# nn.Dropout(p=0.6),
#
# nn.Linear(2056, 1024),
nn.ReLU(),
nn.Dropout(p=0.6),
nn.Linear(1024, 136)
)
else:
self.linear_layers = nn.Linear(output_size**2 * self.features[-1], 136)
def forward(self, x):
batch_size = x.shape[0]
x = self.convolution_layers(x)
x = x.view(batch_size, -1)
x = self.linear_layers(x)
x = x.view(batch_size, -1, 2)
return x
if __name__ == '__main__':
import torch
import pandas as pd
from torch.optim import Adam
parameter = []
forward_time = []
backward_time = []
feature_maps = []
filter_sizes = []
second_linear = []
models = []
models.append({'Filter Sizes': [3], 'Feature Maps': [128], 'Second Linear Layer': True})
models.append({'Filter Sizes': [3], 'Feature Maps': [128], 'Second Linear Layer': False})
models.append({'Filter Sizes': [5, 3], 'Feature Maps': [32, 64], 'Second Linear Layer': True})
models.append({'Filter Sizes': [5, 3], 'Feature Maps': [32, 64], 'Second Linear Layer': False})
models.append({'Filter Sizes': [3, 1], 'Feature Maps': [32, 64], 'Second Linear Layer': True})
models.append({'Filter Sizes': [3, 1], 'Feature Maps': [32, 64], 'Second Linear Layer': False})
models.append({'Filter Sizes': [5, 3, 1], 'Feature Maps': [16, 32, 64], 'Second Linear Layer': True})
models.append({'Filter Sizes': [5, 3, 1], 'Feature Maps': [16, 32, 64], 'Second Linear Layer': False})
models.append({'Filter Sizes': [5, 3, 3, 1], 'Feature Maps': [16, 32, 64, 128], 'Second Linear Layer': True})
models.append({'Filter Sizes': [5, 3, 3, 1], 'Feature Maps': [16, 32, 64, 128], 'Second Linear Layer': False})
models.append({'Filter Sizes': [5, 5, 3, 3, 1], 'Feature Maps': [16, 32, 64, 128, 256],
'Second Linear Layer': True})
models.append({'Filter Sizes': [5, 5, 3, 3, 1], 'Feature Maps': [16, 32, 64, 128, 256],
'Second Linear Layer': False})
models.append({'Filter Sizes': [5, 5, 3, 3, 1], 'Feature Maps': [32, 64, 128, 256, 512],
'Second Linear Layer': True})
models.append({'Filter Sizes': [5, 5, 3, 3, 1], 'Feature Maps': [32, 64, 128, 256, 512],
'Second Linear Layer': False})
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
for model in models:
# --- Instantiate ---
net = Net(image_size=224, filter_size=model['Filter Sizes'], feature_maps=model['Feature Maps'],
second_linear_layer=model['Second Linear Layer'])
criterion = nn.MSELoss()
optimizer = Adam(net.parameters())
n_param = sum(p.numel() for p in net.parameters())
parameter.append(int(n_param / 1e6))
# --- Forward Step ---
t = torch.rand([20, 1, 224, 224])
start = pd.Timestamp.now()
output = net(t)
time_delta = pd.Timedelta(pd.Timestamp.now() - start).microseconds / 1000
assert output.shape == (20, 68, 2)
forward_time.append(time_delta)
# --- Backward Step ---
start = pd.Timestamp.now()
loss = criterion(output, torch.rand([20, 68, 2]))
optimizer.step()
time_delta = pd.Timedelta(pd.Timestamp.now() - start).microseconds / 1000
backward_time.append(time_delta)
feature_maps.append(model['Feature Maps'])
filter_sizes.append(model['Filter Sizes'])
second_linear.append(model['Second Linear Layer'])
df = pd.DataFrame({'Feature Maps': feature_maps, 'Filter Sizes': filter_sizes, '# Parameter [Mio]': parameter,
'Second Linear Layer': second_linear,
'Forward Time [ms]': forward_time, 'Backward Time [ms]': backward_time})
print(df)
|
from os import listdir
import os
from os.path import isfile, join
onlyfiles = [f for f in listdir("fonts") if isfile(join("fonts", f))]
class File:
def __init__(self, filename):
parts = filename.split(".")
self.extension = parts[1]
self.family = parts[0].split("-")[0]
if(len(parts[0].split("-")) < 2):
raise Exception("fontfile " + filename + " missing variation like -Regular")
self.variation = parts[0].split("-")[1].replace("_", " ")
if(self.extension == "otf"):
self.type = "opentype"
elif(self.extension == "ttf"):
self.type = "truetype"
else:
raise Exception("unknown filetype")
self.weight = "500"
self.style = "normal"
self.source = "url(\"fonts/" + filename + "\") format('" + self.type + "')"
vs = self.variation.lower()
if("italic" in vs):
self.style = "italic"
elif("oblique" in vs):
self.style = "oblique"
if("bold" in vs):
self.weight = "900"
elif("light" in vs):
self.weight = "300"
elif("thin" in vs):
self.weight = "300"
class Font:
def __init__(self, id, family):
self.id = id
self.family = family
self.variations = []
def addVariation(self, variation):
self.variations.append(variation)
class Variation:
def __init__(self, id, style, weight, name):
self.id = id;
self.style = style;
self.weight = weight;
self.name = name;
result = ""
result2 = "[\n"
files = []
fonts = []
try:
for file in onlyfiles:
files.append(File(file))
for file in files:
print(file.extension + " " + file.family + " " + file.variation + " " + file.type + " " + file.style + " " + file.weight)
for file in files:
result += "@font-face {\n"
result += "\tfont-family: '" + file.family + "';\n"
result += "\tfont-style: " + file.style + ";\n"
result += "\tfont-weight: " + file.weight + ";\n"
result += "\tsrc: " + file.source + ";\n"
result += "}\n"
added = []
for file in files:
if(file.family not in added):
fonts.append(Font(len(added) + 1, file.family));
added.append(file.family);
for i in range(0, len(fonts)):
if(fonts[i].family == file.family):
fonts[i].addVariation(Variation(len(fonts[i].variations) + 1, file.style, file.weight, file.variation));
break;
for font in fonts:
result2 += "new Font(" + str(font.id) + ", \"" + font.family + "\", [\n";
for v in font.variations:
result2 += "new Variation(" + str(v.id) + ", \"" + v.style + "\", " + v.weight + ", \"" + v.name + "\"),\n";
result2 = result2[:-2] + "]),\n";
result2 = result2[:-2] + "\n];\n";
fontFile = open("generated_fonts.css", "w+");
fontFile.write(result);
fontFile.close();
jsFile = open("generated_js.css", "w+");
jsFile.write(result2);
jsFile.close();
except Exception as e:
print(e)
input("\nclick any key to continue..."); |
#encoding=utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
#获取当前文件的绝对路径
parentDirPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print parentDirPath
#绝对路径是不是/config/PageElementLocator.ini???
#获取存放页面元素的定位表达式文件的绝对路径
pageElementLocatorPath = parentDirPath + u"/config/PageElementLocator.ini"
#获取存放数据文件的绝对路径
#dataFilePath = parentDirPath + u"/testData/126邮箱联系人3.xlsx"
dataFilePath = u"/Users/wangyanhua/Documents/学习/Python/sele_frame/chapt_15/testData/126邮箱联系人3.xlsx"
#126账户工作表中,每列对应的数字序号
account_username = 2
account_password = 3
account_dataBook = 4
account_isExecute = 5
account_testResult = 6
#联系人工作列表中,每列对应的数字符号
contacts_contactPersonName = 2
contacts_contactPersonEmail = 3
contacts_isStr = 4
contacts_contactPersonMobile = 5
contacts_contactPersonComment = 6
contacts_assertKeyWords = 7
contacts_isExecute = 8
contacts_runTime = 9
contacts_testResult = 10
|
import AppKit
from PyObjCTools.TestSupport import TestCase
import objc
class TestNSColorPickingHelper(AppKit.NSObject):
def supportsMode_(self, m):
return 0
def provideNewView_(self, i):
return None
def initWithPickerMask_colorPanel_(self, m, p):
return 1
class TestNSColorPicking(TestCase):
def testMethods(self):
self.assertResultIsBOOL(TestNSColorPickingHelper.supportsMode_)
self.assertArgIsBOOL(TestNSColorPickingHelper.provideNewView_, 0)
self.assertArgHasType(
TestNSColorPickingHelper.initWithPickerMask_colorPanel_,
0,
objc._C_NSUInteger,
)
def testProtocols(self):
self.assertProtocolExists("NSColorPickingCustom")
self.assertProtocolExists("NSColorPickingDefault")
|
#
# The Python Imaging Library
# $Id$
#
# JPEG2000 file handling
#
# History:
# 2014-03-12 ajh Created
# 2021-06-30 rogermb Extract dpi information from the 'resc' header box
#
# Copyright (c) 2014 Coriolis Systems Limited
# Copyright (c) 2014 Alastair Houghton
#
# See the README file for information on usage and redistribution.
#
import io
import os
import struct
from . import Image, ImageFile, _binary
class BoxReader:
"""
A small helper class to read fields stored in JPEG2000 header boxes
and to easily step into and read sub-boxes.
"""
def __init__(self, fp, length=-1):
self.fp = fp
self.has_length = length >= 0
self.length = length
self.remaining_in_box = -1
def _can_read(self, num_bytes):
if self.has_length and self.fp.tell() + num_bytes > self.length:
# Outside box: ensure we don't read past the known file length
return False
if self.remaining_in_box >= 0:
# Inside box contents: ensure read does not go past box boundaries
return num_bytes <= self.remaining_in_box
else:
return True # No length known, just read
def _read_bytes(self, num_bytes):
if not self._can_read(num_bytes):
msg = "Not enough data in header"
raise SyntaxError(msg)
data = self.fp.read(num_bytes)
if len(data) < num_bytes:
msg = f"Expected to read {num_bytes} bytes but only got {len(data)}."
raise OSError(msg)
if self.remaining_in_box > 0:
self.remaining_in_box -= num_bytes
return data
def read_fields(self, field_format):
size = struct.calcsize(field_format)
data = self._read_bytes(size)
return struct.unpack(field_format, data)
def read_boxes(self):
size = self.remaining_in_box
data = self._read_bytes(size)
return BoxReader(io.BytesIO(data), size)
def has_next_box(self):
if self.has_length:
return self.fp.tell() + self.remaining_in_box < self.length
else:
return True
def next_box_type(self):
# Skip the rest of the box if it has not been read
if self.remaining_in_box > 0:
self.fp.seek(self.remaining_in_box, os.SEEK_CUR)
self.remaining_in_box = -1
# Read the length and type of the next box
lbox, tbox = self.read_fields(">I4s")
if lbox == 1:
lbox = self.read_fields(">Q")[0]
hlen = 16
else:
hlen = 8
if lbox < hlen or not self._can_read(lbox - hlen):
msg = "Invalid header length"
raise SyntaxError(msg)
self.remaining_in_box = lbox - hlen
return tbox
def _parse_codestream(fp):
"""Parse the JPEG 2000 codestream to extract the size and component
count from the SIZ marker segment, returning a PIL (size, mode) tuple."""
hdr = fp.read(2)
lsiz = _binary.i16be(hdr)
siz = hdr + fp.read(lsiz - 2)
lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz = struct.unpack_from(
">HHIIIIIIIIH", siz
)
ssiz = [None] * csiz
xrsiz = [None] * csiz
yrsiz = [None] * csiz
for i in range(csiz):
ssiz[i], xrsiz[i], yrsiz[i] = struct.unpack_from(">BBB", siz, 36 + 3 * i)
size = (xsiz - xosiz, ysiz - yosiz)
if csiz == 1:
if (yrsiz[0] & 0x7F) > 8:
mode = "I;16"
else:
mode = "L"
elif csiz == 2:
mode = "LA"
elif csiz == 3:
mode = "RGB"
elif csiz == 4:
mode = "RGBA"
else:
mode = None
return size, mode
def _res_to_dpi(num, denom, exp):
"""Convert JPEG2000's (numerator, denominator, exponent-base-10) resolution,
calculated as (num / denom) * 10^exp and stored in dots per meter,
to floating-point dots per inch."""
if denom != 0:
return (254 * num * (10**exp)) / (10000 * denom)
def _parse_jp2_header(fp):
"""Parse the JP2 header box to extract size, component count,
color space information, and optionally DPI information,
returning a (size, mode, mimetype, dpi) tuple."""
# Find the JP2 header box
reader = BoxReader(fp)
header = None
mimetype = None
while reader.has_next_box():
tbox = reader.next_box_type()
if tbox == b"jp2h":
header = reader.read_boxes()
break
elif tbox == b"ftyp":
if reader.read_fields(">4s")[0] == b"jpx ":
mimetype = "image/jpx"
size = None
mode = None
bpc = None
nc = None
dpi = None # 2-tuple of DPI info, or None
while header.has_next_box():
tbox = header.next_box_type()
if tbox == b"ihdr":
height, width, nc, bpc = header.read_fields(">IIHB")
size = (width, height)
if nc == 1 and (bpc & 0x7F) > 8:
mode = "I;16"
elif nc == 1:
mode = "L"
elif nc == 2:
mode = "LA"
elif nc == 3:
mode = "RGB"
elif nc == 4:
mode = "RGBA"
elif tbox == b"res ":
res = header.read_boxes()
while res.has_next_box():
tres = res.next_box_type()
if tres == b"resc":
vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields(">HHHHBB")
hres = _res_to_dpi(hrcn, hrcd, hrce)
vres = _res_to_dpi(vrcn, vrcd, vrce)
if hres is not None and vres is not None:
dpi = (hres, vres)
break
if size is None or mode is None:
msg = "Malformed JP2 header"
raise SyntaxError(msg)
return size, mode, mimetype, dpi
##
# Image plugin for JPEG2000 images.
class Jpeg2KImageFile(ImageFile.ImageFile):
format = "JPEG2000"
format_description = "JPEG 2000 (ISO 15444)"
def _open(self):
sig = self.fp.read(4)
if sig == b"\xff\x4f\xff\x51":
self.codec = "j2k"
self._size, self.mode = _parse_codestream(self.fp)
else:
sig = sig + self.fp.read(8)
if sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a":
self.codec = "jp2"
header = _parse_jp2_header(self.fp)
self._size, self.mode, self.custom_mimetype, dpi = header
if dpi is not None:
self.info["dpi"] = dpi
if self.fp.read(12).endswith(b"jp2c\xff\x4f\xff\x51"):
self._parse_comment()
else:
msg = "not a JPEG 2000 file"
raise SyntaxError(msg)
if self.size is None or self.mode is None:
msg = "unable to determine size/mode"
raise SyntaxError(msg)
self._reduce = 0
self.layers = 0
fd = -1
length = -1
try:
fd = self.fp.fileno()
length = os.fstat(fd).st_size
except Exception:
fd = -1
try:
pos = self.fp.tell()
self.fp.seek(0, io.SEEK_END)
length = self.fp.tell()
self.fp.seek(pos)
except Exception:
length = -1
self.tile = [
(
"jpeg2k",
(0, 0) + self.size,
0,
(self.codec, self._reduce, self.layers, fd, length),
)
]
def _parse_comment(self):
hdr = self.fp.read(2)
length = _binary.i16be(hdr)
self.fp.seek(length - 2, os.SEEK_CUR)
while True:
marker = self.fp.read(2)
if not marker:
break
typ = marker[1]
if typ in (0x90, 0xD9):
# Start of tile or end of codestream
break
hdr = self.fp.read(2)
length = _binary.i16be(hdr)
if typ == 0x64:
# Comment
self.info["comment"] = self.fp.read(length - 2)[2:]
break
else:
self.fp.seek(length - 2, os.SEEK_CUR)
@property
def reduce(self):
# https://github.com/python-pillow/Pillow/issues/4343 found that the
# new Image 'reduce' method was shadowed by this plugin's 'reduce'
# property. This attempts to allow for both scenarios
return self._reduce or super().reduce
@reduce.setter
def reduce(self, value):
self._reduce = value
def load(self):
if self.tile and self._reduce:
power = 1 << self._reduce
adjust = power >> 1
self._size = (
int((self.size[0] + adjust) / power),
int((self.size[1] + adjust) / power),
)
# Update the reduce and layers settings
t = self.tile[0]
t3 = (t[3][0], self._reduce, self.layers, t[3][3], t[3][4])
self.tile = [(t[0], (0, 0) + self.size, t[2], t3)]
return ImageFile.ImageFile.load(self)
def _accept(prefix):
return (
prefix[:4] == b"\xff\x4f\xff\x51"
or prefix[:12] == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a"
)
# ------------------------------------------------------------
# Save support
def _save(im, fp, filename):
# Get the keyword arguments
info = im.encoderinfo
if filename.endswith(".j2k") or info.get("no_jp2", False):
kind = "j2k"
else:
kind = "jp2"
offset = info.get("offset", None)
tile_offset = info.get("tile_offset", None)
tile_size = info.get("tile_size", None)
quality_mode = info.get("quality_mode", "rates")
quality_layers = info.get("quality_layers", None)
if quality_layers is not None and not (
isinstance(quality_layers, (list, tuple))
and all(
[
isinstance(quality_layer, (int, float))
for quality_layer in quality_layers
]
)
):
msg = "quality_layers must be a sequence of numbers"
raise ValueError(msg)
num_resolutions = info.get("num_resolutions", 0)
cblk_size = info.get("codeblock_size", None)
precinct_size = info.get("precinct_size", None)
irreversible = info.get("irreversible", False)
progression = info.get("progression", "LRCP")
cinema_mode = info.get("cinema_mode", "no")
mct = info.get("mct", 0)
signed = info.get("signed", False)
comment = info.get("comment")
if isinstance(comment, str):
comment = comment.encode()
plt = info.get("plt", False)
fd = -1
if hasattr(fp, "fileno"):
try:
fd = fp.fileno()
except Exception:
fd = -1
im.encoderconfig = (
offset,
tile_offset,
tile_size,
quality_mode,
quality_layers,
num_resolutions,
cblk_size,
precinct_size,
irreversible,
progression,
cinema_mode,
mct,
signed,
fd,
comment,
plt,
)
ImageFile._save(im, fp, [("jpeg2k", (0, 0) + im.size, 0, kind)])
# ------------------------------------------------------------
# Registry stuff
Image.register_open(Jpeg2KImageFile.format, Jpeg2KImageFile, _accept)
Image.register_save(Jpeg2KImageFile.format, _save)
Image.register_extensions(
Jpeg2KImageFile.format, [".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c"]
)
Image.register_mime(Jpeg2KImageFile.format, "image/jp2")
|
""" Preprocessing for semantic image segmentation
adapted from: https://github.com/aurora95/Keras-FCN
"""
from keras.preprocessing.image import Iterator
from keras.applications.imagenet_utils import preprocess_input
from .. import backend as K
from PIL import Image
import numpy as np
import os
def center_crop(x, center_crop_size, data_format, **kwargs):
if data_format == 'channels_first':
centerh, centerw = x.shape[1] // 2, x.shape[2] // 2
elif data_format == 'channels_last':
centerh, centerw = x.shape[0] // 2, x.shape[1] // 2
lh, lw = center_crop_size[0] // 2, center_crop_size[1] // 2
rh, rw = center_crop_size[0] - lh, center_crop_size[1] - lw
h_start, h_end = centerh - lh, centerh + rh
w_start, w_end = centerw - lw, centerw + rw
if data_format == 'channels_first':
return x[:, h_start:h_end, w_start:w_end]
elif data_format == 'channels_last':
return x[h_start:h_end, w_start:w_end, :]
def pair_center_crop(x, y, center_crop_size, data_format, **kwargs):
if data_format == 'channels_first':
centerh, centerw = x.shape[1] // 2, x.shape[2] // 2
elif data_format == 'channels_last':
centerh, centerw = x.shape[0] // 2, x.shape[1] // 2
lh, lw = center_crop_size[0] // 2, center_crop_size[1] // 2
rh, rw = center_crop_size[0] - lh, center_crop_size[1] - lw
h_start, h_end = centerh - lh, centerh + rh
w_start, w_end = centerw - lw, centerw + rw
if data_format == 'channels_first':
return (x[:, h_start:h_end, w_start:w_end],
y[:, h_start:h_end, w_start:w_end])
elif data_format == 'channels_last':
return (x[h_start:h_end, w_start:w_end, :],
y[h_start:h_end, w_start:w_end, :])
def random_crop(x, random_crop_size, data_format, sync_seed=None, **kwargs):
np.random.seed(sync_seed)
if data_format == 'channels_first':
h, w = x.shape[1], x.shape[2]
elif data_format == 'channels_last':
h, w = x.shape[0], x.shape[1]
rangeh = (h - random_crop_size[0]) // 2
rangew = (w - random_crop_size[1]) // 2
offseth = 0 if rangeh == 0 else np.random.randint(rangeh)
offsetw = 0 if rangew == 0 else np.random.randint(rangew)
h_start, h_end = offseth, offseth + random_crop_size[0]
w_start, w_end = offsetw, offsetw + random_crop_size[1]
if data_format == 'channels_first':
return x[:, h_start:h_end, w_start:w_end]
elif data_format == 'channels_last':
return x[h_start:h_end, w_start:w_end, :]
def pair_random_crop(x, y, random_crop_size, data_format, sync_seed=None, **kwargs):
np.random.seed(sync_seed)
if data_format == 'channels_first':
h, w = x.shape[1], x.shape[2]
elif data_format == 'channels_last':
h, w = x.shape[0], x.shape[1]
rangeh = (h - random_crop_size[0]) // 2
rangew = (w - random_crop_size[1]) // 2
offseth = 0 if rangeh == 0 else np.random.randint(rangeh)
offsetw = 0 if rangew == 0 else np.random.randint(rangew)
h_start, h_end = offseth, offseth + random_crop_size[0]
w_start, w_end = offsetw, offsetw + random_crop_size[1]
if data_format == 'channels_first':
return x[:, h_start:h_end, w_start:w_end], y[:, h_start:h_end, h_start:h_end]
elif data_format == 'channels_last':
return x[h_start:h_end, w_start:w_end, :], y[h_start:h_end, w_start:w_end, :]
class SegDirectoryIterator(Iterator):
'''
Users need to ensure that all files exist.
Label images should be png images where pixel values represents class number.
find images -name *.jpg > images.txt
find labels -name *.png > labels.txt
for a file name 2011_002920.jpg, each row should contain 2011_002920
file_path: location of train.txt, or val.txt in PASCAL VOC2012 format,
listing image file path components without extension
data_dir: location of image files referred to by file in file_path
label_dir: location of label files
data_suffix: image file extension, such as `.jpg` or `.png`
label_suffix: label file suffix, such as `.png`, or `.npy`
loss_shape: shape to use when applying loss function to the label data
'''
def __init__(self, file_path, seg_data_generator,
data_dir, data_suffix,
label_dir, label_suffix, classes, ignore_label=255,
crop_mode='none', label_cval=255, pad_size=None,
target_size=None, color_mode='rgb',
data_format='default', class_mode='sparse',
batch_size=1, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg',
loss_shape=None):
if data_format == 'default':
data_format = K.image_data_format()
self.file_path = file_path
self.data_dir = data_dir
self.data_suffix = data_suffix
self.label_suffix = label_suffix
self.label_dir = label_dir
self.classes = classes
self.seg_data_generator = seg_data_generator
self.target_size = tuple(target_size)
self.ignore_label = ignore_label
self.crop_mode = crop_mode
self.label_cval = label_cval
self.pad_size = pad_size
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
self.nb_label_ch = 1
self.loss_shape = loss_shape
if (self.label_suffix == '.npy') or (self.label_suffix == 'npy'):
self.label_file_format = 'npy'
else:
self.label_file_format = 'img'
if target_size:
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
if self.data_format == 'channels_last':
self.label_shape = self.target_size + (self.nb_label_ch,)
else:
self.label_shape = (self.nb_label_ch,) + self.target_size
elif batch_size != 1:
raise ValueError(
'Batch size must be 1 when target image size is undetermined')
else:
self.image_shape = None
self.label_shape = None
if class_mode not in {'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of '
'"sparse", or None.')
self.class_mode = class_mode
if save_to_dir:
self.palette = None
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp', 'npy'}
# build lists for data files and label files
self.data_files = []
self.label_files = []
fp = open(file_path)
lines = fp.readlines()
fp.close()
self.nb_sample = len(lines)
for line in lines:
line = line.strip('\n')
self.data_files.append(line + data_suffix)
self.label_files.append(line + label_suffix)
super(SegDirectoryIterator, self).__init__(
self.nb_sample, batch_size, shuffle, seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
current_batch_size = len(index_array)
# The transformation of images is not under thread lock so it can be
# done in parallel
if self.target_size:
# TODO(ahundt) make dtype properly configurable
batch_x = np.zeros((current_batch_size,) + self.image_shape)
if self.loss_shape is None and self.label_file_format is 'img':
batch_y = np.zeros((current_batch_size,) + self.label_shape,
dtype=int)
elif self.loss_shape is None:
batch_y = np.zeros((current_batch_size,) + self.label_shape)
else:
batch_y = np.zeros((current_batch_size,) + self.loss_shape,
dtype=np.uint8)
grayscale = self.color_mode == 'grayscale'
# build batch of image data and labels
for i, j in enumerate(index_array):
data_file = self.data_files[j]
label_file = self.label_files[j]
img_file_format = 'img'
img = load_img(os.path.join(self.data_dir, data_file),
grayscale=grayscale, target_size=None)
label_filepath = os.path.join(self.label_dir, label_file)
if self.label_file_format == 'npy':
y = np.load(label_filepath)
else:
label = Image.open(label_filepath)
if self.save_to_dir and self.palette is None:
self.palette = label.palette
# do padding
if self.target_size:
if self.crop_mode != 'none':
x = img_to_array(img, data_format=self.data_format)
if self.label_file_format is not 'npy':
y = img_to_array(
label, data_format=self.data_format).astype(int)
img_w, img_h = img.size
if self.pad_size:
pad_w = max(self.pad_size[1] - img_w, 0)
pad_h = max(self.pad_size[0] - img_h, 0)
else:
pad_w = max(self.target_size[1] - img_w, 0)
pad_h = max(self.target_size[0] - img_h, 0)
if self.data_format == 'channels_first':
x = np.lib.pad(x, ((0, 0), (pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2)), 'constant', constant_values=0.)
y = np.lib.pad(y, ((0, 0), (pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2)),
'constant', constant_values=self.label_cval)
elif self.data_format == 'channels_last':
x = np.lib.pad(x, ((pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2), (0, 0)), 'constant', constant_values=0.)
y = np.lib.pad(y, ((pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2), (0, 0)), 'constant', constant_values=self.label_cval)
else:
x = img_to_array(img.resize((self.target_size[1], self.target_size[0]),
Image.BILINEAR),
data_format=self.data_format)
if self.label_file_format is not 'npy':
y = img_to_array(label.resize((self.target_size[1], self.target_size[
0]), Image.NEAREST), data_format=self.data_format).astype(int)
else:
print('ERROR: resize not implemented for label npy file')
if self.target_size is None:
batch_x = np.zeros((current_batch_size,) + x.shape)
if self.loss_shape is not None:
batch_y = np.zeros((current_batch_size,) + self.loss_shape)
else:
batch_y = np.zeros((current_batch_size,) + y.shape)
x, y = self.seg_data_generator.random_transform(x, y)
x = self.seg_data_generator.standardize(x)
if self.ignore_label:
y[np.where(y == self.ignore_label)] = self.classes
if self.loss_shape is not None:
y = np.reshape(y, self.loss_shape)
batch_x[i] = x
batch_y[i] = y
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
label = batch_y[i][:, :, 0].astype('uint8')
label[np.where(label == self.classes)] = self.ignore_label
label = Image.fromarray(label, mode='P')
label.palette = self.palette
# TODO(ahundt) fix index=i, a hacky workaround since current_index + i is no long available
fname = '{prefix}_{index}_{hash}'.format(prefix=self.save_prefix,
index=i,
hash=np.random.randint(1e4))
img.save(os.path.join(self.save_to_dir, 'img_' +
fname + '.{format}'.format(format=self.save_format)))
label.save(os.path.join(self.save_to_dir,
'label_' + fname + '.png'))
# return
batch_x = preprocess_input(batch_x)
if self.class_mode == 'sparse':
return batch_x, batch_y
else:
return batch_x
class SegDataGenerator(object):
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
channelwise_center=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
zoom_maintain_shape=True,
channel_shift_range=0.,
fill_mode='constant',
cval=0.,
label_cval=255,
crop_mode='none',
crop_size=(0, 0),
pad_size=None,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
data_format='default'):
if data_format == 'default':
data_format = K.image_data_format()
self.__dict__.update(locals())
self.mean = None
self.ch_mean = None
self.std = None
self.principal_components = None
self.rescale = rescale
if data_format not in {'channels_last', 'channels_first'}:
raise Exception('data_format should be channels_last (channel after row and '
'column) or channels_first (channel before row and column). '
'Received arg: ', data_format)
if crop_mode not in {'none', 'random', 'center'}:
raise Exception('crop_mode should be "none" or "random" or "center" '
'Received arg: ', crop_mode)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_index = 1
self.row_index = 2
self.col_index = 3
if data_format == 'channels_last':
self.channel_index = 3
self.row_index = 1
self.col_index = 2
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise Exception('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow_from_directory(self, file_path, data_dir, data_suffix,
label_dir, label_suffix, classes,
ignore_label=255,
target_size=None, color_mode='rgb',
class_mode='sparse',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg',
loss_shape=None):
if self.crop_mode == 'random' or self.crop_mode == 'center':
target_size = self.crop_size
return SegDirectoryIterator(
file_path, self,
data_dir=data_dir, data_suffix=data_suffix,
label_dir=label_dir, label_suffix=label_suffix,
classes=classes, ignore_label=ignore_label,
crop_mode=self.crop_mode, label_cval=self.label_cval,
pad_size=self.pad_size,
target_size=target_size, color_mode=color_mode,
data_format=self.data_format, class_mode=class_mode,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir, save_prefix=save_prefix,
save_format=save_format,
loss_shape=loss_shape)
def standardize(self, x):
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_index = self.channel_index - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_index, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_index, keepdims=True) + 1e-7)
if self.featurewise_center:
x -= self.mean
if self.featurewise_std_normalization:
x /= (self.std + 1e-7)
if self.channelwise_center:
x -= self.ch_mean
return x
def random_transform(self, x, y):
# x is a single image, so it doesn't have image number at index 0
img_row_index = self.row_index - 1
img_col_index = self.col_index - 1
img_channel_index = self.channel_index - 1
if self.crop_mode == 'none':
crop_size = (x.shape[img_row_index], x.shape[img_col_index])
else:
crop_size = self.crop_size
assert x.shape[img_row_index] == y.shape[img_row_index] and x.shape[img_col_index] == y.shape[
img_col_index], 'DATA ERROR: Different shape of data and label!\ndata shape: %s, label shape: %s' % (str(x.shape), str(y.shape))
# use composition of homographies to generate final transform that
# needs to be applied
if self.rotation_range:
theta = np.pi / 180 * \
np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
if self.height_shift_range:
# * x.shape[img_row_index]
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range) * crop_size[0]
else:
tx = 0
if self.width_shift_range:
# * x.shape[img_col_index]
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range) * crop_size[1]
else:
ty = 0
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0], self.zoom_range[1], 2)
if self.zoom_maintain_shape:
zy = zx
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = np.dot(
np.dot(np.dot(rotation_matrix, translation_matrix), shear_matrix), zoom_matrix)
h, w = x.shape[img_row_index], x.shape[img_col_index]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_index,
fill_mode=self.fill_mode, cval=self.cval)
y = apply_transform(y, transform_matrix, img_channel_index,
fill_mode='constant', cval=self.label_cval)
if self.channel_shift_range != 0:
x = random_channel_shift(
x, self.channel_shift_range, img_channel_index)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_index)
y = flip_axis(y, img_col_index)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_index)
y = flip_axis(y, img_row_index)
if self.crop_mode == 'center':
x, y = pair_center_crop(x, y, self.crop_size, self.data_format)
elif self.crop_mode == 'random':
x, y = pair_random_crop(x, y, self.crop_size, self.data_format)
# TODO:
# channel-wise normalization
# barrel/fisheye
return x, y
def fit(self, X,
augment=False,
rounds=1,
seed=None):
'''Required for featurewise_center and featurewise_std_normalization
# Arguments
X: Numpy array, the data to fit on.
augment: whether to fit on randomly augmented samples
rounds: if `augment`,
how many augmentation passes to do over the data
seed: random seed.
'''
X = np.copy(X)
if augment:
aX = np.zeros(tuple([rounds * X.shape[0]] + list(X.shape)[1:]))
for r in range(rounds):
for i in range(X.shape[0]):
aX[i + r * X.shape[0]] = self.random_transform(X[i])
X = aX
if self.featurewise_center:
self.mean = np.mean(X, axis=0)
X -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(X, axis=0)
X /= (self.std + 1e-7)
def set_ch_mean(self, ch_mean):
self.ch_mean = ch_mean
|
import sys
sys.path.insert(0, "../")
sys.path.insert(0, "../../")
import biopy as bp
with open("wk2quiz3a.txt") as dataset:
pairs = dataset.read().splitlines()
print(pairs)
seq = bp.ord_pairs_to_sequence(4, 2, pairs, debug=True)
print(seq)
|
import matplotlib._color_data as mcd
import numpy as np
from abc import ABC,abstractmethod
from Function import *
import math
class Model(ABC):
#_______________________________________________________________________________
def __init__(self, parameters, mass=1., coupling=1.e-06):
self.ns0 = parameters.ns0
self.trigger_eff = parameters.trigger_eff
self.eta_eff = parameters.eta_eff
self.r_min = parameters.r_min
self.r_max = parameters.r_max
self.z_min = parameters.z_min
self.z_max = parameters.z_max
self.pt = parameters.pt
self.mass = mass
self.coupling = coupling
self.tau = -1
self.brs = dict()
for br in parameters.list_brs:
self.brs[br]=-1
super(Model, self).__init__()
#_______________________________________________________________________________
def event_yield(self, debug=False):
###FIX ME: for now r, z value are dummy, the accpetance in the nose region
# is pre-computed, now this is used only to calculate eta
# in the future require this method to compute from MC the real acceptance
# in the nose
theta_min = math.atan(self.r_min/self.z_min)
theta_max = math.atan(self.r_max/self.z_max)
eta_min = - math.log(math.tan(theta_min/2))
eta_max = - math.log(math.tan(theta_max/2))
## cannot take average eta due to steeply falling spectrum
p = 0.75
eta = p*eta_min + (1-p)*eta_max
theta = p*theta_min + (1-p)*theta_max
## compute path length inside fiducial volume
l_min = self.z_min/math.cos(theta)
l_max = self.z_max/math.cos(theta)
c=3.e+8
## compute energy of LLP, assume half of energy of mother meson
energy = 0.5*self.pt*math.cosh(eta)
gamma=energy/self.mass
tau = self.tau
ctau=c*tau
gct = gamma*ctau
acceptance=math.exp(-l_min/gct) - math.exp(-l_max/gct)
event_yield = 0
for b in self.brs.keys():
br = self.brs[b]
yld = self.ns0
yld *= b[0]
yld *= self.trigger_eff
yld *= self.eta_eff
yld *= br
yld *= acceptance
event_yield += yld
if debug:
print ('mass = {:.3f} '.format(self.mass))
print ('coupling = {:.2e} '.format(self.coupling))
print ('eta_min = {:.3f} '.format(eta_min))
print ('eta_max = {:.3f} '.format(eta_max))
print ('l_min = {:.3f} m'.format(l_min))
print ('l_max = {:.3f} m'.format(l_max))
print ('energy = {:.3f} GeV'.format(energy))
print ('tau = {:.3f} ns'.format(tau*1e9))
print ('gamma = {:.3f} '.format(gamma))
print ('ctau = {:.3f} m'.format(c*tau))
print ('gamma*c*tau = {:.3f} m'.format(gct))
print ('----------------------------------------------------------')
print ('number of S. = {:.2e} '.format(self.ns0))
print ('meson BR = {:.2e} '.format(self.meson_br))
print ('trigger eff = {:.2e} '.format(self.trigger_eff))
print ('eta_eff = {:.2e} '.format(self.eta_eff))
print ('br. ratio = {:.2e} '.format(br))
print ('acceptance = {:.2e} '.format(acceptance))
print ('ev. yield = {:.2e} '.format(event_yield))
return event_yield
#_______________________________________________________________________________
@abstractmethod
def compute_lifetime(self):
pass
#_______________________________________________________________________________
@abstractmethod
def compute_branching_ratios(self):
pass
#_______________________________________________________________________________
#def acceptance(self, zmin=10., zmax=10.5):
class MajoranaNeutrinoElectron(Model):
def compute_lifetime(self):
self.tau = 1.e-12/self.mass**5/self.coupling**2
def compute_branching_ratios(self, list_br):
for br in list_br:
br_vs_m = Function('br', br[1])
self.brs[br]=br_vs_m.eval(self.mass) * self.coupling**2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import random
import numpy as np
import torch
from tmarl.configs.config import get_config
from tmarl.runners.base_runner import Runner
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class Evaluator(Runner):
def __init__(self, argv,program_type=None, client=None):
super().__init__(argv)
parser = get_config()
all_args = self.extra_args_func(argv, parser)
all_args.cuda = not all_args.disable_cuda
self.algorithm_name = all_args.algorithm_name
# cuda
if not all_args.disable_cuda and torch.cuda.is_available():
device = torch.device("cuda:0")
if all_args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
print("choose to use cpu...")
device = torch.device("cpu")
# run dir
run_dir = self.setup_run_dir(all_args)
# env init
Env_Class, SubprocVecEnv, DummyVecEnv = self.get_env()
eval_envs = self.env_init(
all_args, Env_Class, SubprocVecEnv, DummyVecEnv)
num_agents = all_args.num_agents
config = {
"all_args": all_args,
"envs": None,
"eval_envs": eval_envs,
"num_agents": num_agents,
"device": device,
"run_dir": run_dir,
}
self.all_args, self.envs, self.eval_envs, self.config \
= all_args, None, eval_envs, config
self.driver = self.init_driver()
def run(self):
# run experiments
self.driver.run()
self.stop()
def stop(self):
pass
def extra_args_func(self, argv, parser):
raise NotImplementedError
def get_env(self):
raise NotImplementedError
def init_driver(self):
raise NotImplementedError
def make_eval_env(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
def get_env_fn(rank):
def init_env():
env = Env_Class(all_args)
env.seed(all_args.seed * 50000 + rank * 10000)
return env
return init_env
if all_args.n_eval_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)])
def env_init(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
eval_envs = self.make_eval_env(
all_args, Env_Class, SubprocVecEnv, DummyVecEnv) if all_args.use_eval else None
return eval_envs
def setup_run_dir(self, all_args):
return None
|
# coding: utf-8
'''
This script provides the following utilies for image classification
- Data transformation
- Data Loading
- Model creation
- Model training
- Saving model checkpoint
- Image processing
- Image Prediction
The utilities all functions and call relevant function invoke the requisite utility
'''
# Imports here
import json
import numpy as np
import torch
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import torch.nn.functional as F
import torchvision
from torch import nn, optim
from torchvision import datasets, transforms, models
from torch.autograd import Variable
from collections import OrderedDict
from PIL import Image
import seaborn as sb
from matplotlib.ticker import FuncFormatter
# load, split ( training,validation,testing ) and tranform datasets
def load_data(datadir = "./flowers" ):
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
means = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
max_image_size = 224
batch_size = 32
training_transform = transforms.Compose([transforms.RandomHorizontalFlip(p=0.25),
transforms.RandomRotation(25),
transforms.RandomGrayscale(p=0.02),
transforms.RandomResizedCrop(max_image_size),
transforms.ToTensor(),
transforms.Normalize(means, std)])
validation_transform = transforms.Compose([transforms.Resize(max_image_size + 1),
transforms.CenterCrop(max_image_size),
transforms.ToTensor(),
transforms.Normalize(means,std)])
testing_transform = transforms.Compose([transforms.Resize(max_image_size + 1),
transforms.CenterCrop(max_image_size),
transforms.ToTensor(),
transforms.Normalize(means, std)])
# Load the datasets with ImageFolder
training_data = datasets.ImageFolder(train_dir, transform=training_transform)
validation_data = datasets.ImageFolder(valid_dir, transform=validation_transform)
testing_data = datasets.ImageFolder(test_dir, transform=testing_transform)
#Using the image datasets and the tranforms, define the dataloaders
trainloader = torch.utils.data.DataLoader(training_data, batch_size=64, shuffle=True)
validationloader = torch.utils.data.DataLoader(validation_data, batch_size=64, shuffle=True)
testingloader = torch.utils.data.DataLoader(testing_data, batch_size=64, shuffle=True)
return trainloader , validationloader, testingloader
def load_dataset(datadir = "./flowers" ):
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
means = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
max_image_size = 224
batch_size = 32
training_transform = transforms.Compose([transforms.RandomHorizontalFlip(p=0.25),
transforms.RandomRotation(25),
transforms.RandomGrayscale(p=0.02),
transforms.RandomResizedCrop(max_image_size),
transforms.ToTensor(),
transforms.Normalize(means, std)])
validation_transform = transforms.Compose([transforms.Resize(max_image_size + 1),
transforms.CenterCrop(max_image_size),
transforms.ToTensor(),
transforms.Normalize(means,std)])
testing_transform = transforms.Compose([transforms.Resize(max_image_size + 1),
transforms.CenterCrop(max_image_size),
transforms.ToTensor(),
transforms.Normalize(means, std)])
# TODO: Load the datasets with ImageFolder
training_data = datasets.ImageFolder(train_dir, transform=training_transform)
validation_data = datasets.ImageFolder(valid_dir, transform=validation_transform)
testing_data = datasets.ImageFolder(test_dir, transform=testing_transform)
return training_data,validation_data,testing_data
def opencatfile(catfile):
with open(catfile, 'r') as f:
cat_to_name = json.load(f)
print("Execution Status: category names in JSON files were read and loaded in cat_to_name variable"+'\n')
# Get model Output Size = Number of Categories
output_size = len(cat_to_name)
return cat_to_name,output_size
# model, optimizer, criterion = imgclassutilv2.nn_setup(arch,hiddenunits,lr,power,output_size)
def nn_setup(arch,hiddenunits,lr,power,output_size):
# Using pre-trained model
# model = models.arch(pretrained=True)
model = getattr(torchvision.models, arch)(pretrained=True)
#print(nn_model)
# check input feature size of the model and assign the same
# input_size = model.classifier[0].in_features
if arch == "vgg16":
input_size = model.classifier[0].in_features
elif arch == "densenet121":
input_size = model.classifier.in_features
hidden_size = [(input_size//8),(input_size//32)]
output_size = output_size
# Prevent backpropagation on parameters
for param in model.parameters():
param.requires_grad = False
# Create nn.Module with Sequential using an OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_size[0])),
('relu1', nn.ReLU()),
('dropout', nn.Dropout(p=0.15)),
('fc2', nn.Linear(hidden_size[0], hidden_size[1])),
('relu2', nn.ReLU()),
('dropout', nn.Dropout(p=0.15)),
('output', nn.Linear(hidden_size[1], output_size)),
# LogSoftmax is needed by NLLLoss criterion
('softmax', nn.LogSoftmax(dim=1))
]))
# Replace classifier
model.classifier = classifier
#print(nn_model)
# The negative log likelihood loss as criterion.
criterion = nn.NLLLoss()
# Choosing optimizer
optimizer = optim.Adam(model.classifier.parameters(), lr)
# model.cuda()
model.to(power)
return model, criterion, optimizer,output_size
# imgclassutilv3.train_network(model, optimizer, criterion, epochs, trainloader,power)
def train_network(model, optimizer,criterion, epochs,trainloader,validationloader, power):
epochs = 3
print_every = 40
steps = 0
loss_show=[]
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), 0.001)
# change to requisite training power ( cpu or cuda )
if power == "cuda":
model.to('cuda')
print(" model.to(cpu)"+ '\n')
else:
model.to('cpu')
print(" model.to(cpu)"+ '\n')
for e in range(epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate(trainloader):
steps += 1
if power == "cuda":
if torch.cuda.is_available():
inputs,labels= inputs.to('cuda') , labels.to('cuda')
# print('\n'"Have set inputs and labels to cuda")
else:
inputs,labels= inputs.to('cpu') , labels.to('cpu')
# model.to('cuda - remove this
else:
inputs,labels= inputs.to('cpu') , labels.to('cpu')
# print('\n'"Have set inputs and labels to cpu")
# model.to('cpu') - remove this
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
validationloss = 0
accuracy=0
for ii, (inputs_val,labels_val) in enumerate(validationloader):
optimizer.zero_grad()
if torch.cuda.is_available():
inputs_val, labels_val= inputs_val.to('cuda') , labels_val.to('cuda')
model.to('cuda')
else:
inputs_val, labels_val= inputs_val.to('cpu') , labels_val.to('cpu')
model.to('cpu')
with torch.no_grad():
outputs = model.forward(inputs_val)
validationloss = criterion(outputs,labels_val)
ps = torch.exp(outputs).data
equality = (labels_val.data == ps.max(1)[1])
accuracy += equality.type_as(torch.FloatTensor()).mean()
validationloss = validationloss / len(validationloader)
accuracy = accuracy /len(validationloader)
print("Epoch: {}/{} ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every),
"Validation Loss {:.4f}".format(validationloss),
"Accuracy: {:.4f}".format(accuracy))
running_loss = 0
# validate(testingloader)
def validate(testingloader):
correct = 0
total = 0
if power == "cuda":
nn_model.to('cuda:0')
else:
nn_model.to('cpu')
# nn_model.to('cuda')
with torch.no_grad():
for data in testingloader:
images, labels = data
images= images.to('cuda')
labels =labels.to('cuda')
outputs = nn_model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy: %d %%' % (100 * correct / total))
# imgclassutilv2.save_checkpoint(path,arch,hiddenunits,lr)
def save_checkpoint(model_state,path):
torch.save(model_state, path)
print("Execution satus: save checkpoint function executed successfuly and will return "+ '\n')
# imgclassutilv2.load_checkpoint_model(path)
def load_checkpoint_model(path):
# Ref : https://discuss.pytorch.org/t/problem-loading-model-trained-on-gpu/17745
# was getting an error when trying to predict using cpu. Amended the torch.load() function as suggesed in the above link and seem to have solved the issue
# model_state = torch.load(path)
model_state = torch.load(path, map_location=lambda storage, loc: storage)
model = getattr(torchvision.models, model_state['arch'])(pretrained=True)
model.classifier = model_state['classifier']
model.class_to_idx = model_state['class_to_idx']
model.load_state_dict(model_state['state_dict'])
return model
def process_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# section below is completed
img_pil = Image.open(image_path)
transform_image = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
img_tensor = transform_image(img_pil)
return img_tensor
# probabilities = imgclassutilv2.predict(image_path, model, outputs, power)
def predict(image_path, model, topk=5):
model.to('cuda:0')
img_torch = process_image(image_path)
img_torch = img_torch.unsqueeze_(0)
img_torch = img_torch.float()
with torch.no_grad():
output = model.forward(img_torch.cuda())
probability = F.softmax(output.data,dim=1)
return probability.topk(topk)
|
import board
from advancedAgent import AdvancedAgent
from minCostAgent import MinCostAgent
from minRiskAgent import MinRiskAgent
def main():
dim = 10
runs = 5
mineStepSize = 10
startMines = 10
endMines = 100
for mines in range(startMines, endMines, mineStepSize):
advanced_risk = 0
min_cost_risk = 0
min_risk_risk = 0
for i in range(runs):
myBoard = board.generateBoard(dim, mines)
advancedAgent = AdvancedAgent(myBoard, mines)
minCostAgent = MinCostAgent(myBoard, mines)
minRiskAgent = MinRiskAgent(myBoard, mines)
advancedAgent.solve()
minCostAgent.solve()
minRiskAgent.solve()
advanced_risk = advanced_risk + advancedAgent.risk
min_cost_risk = min_cost_risk + minCostAgent.risk
min_risk_risk = min_risk_risk + minRiskAgent.risk
print("Advanced Algorithm at Mine Density: " + str(mines/(dim*dim)))
print("Avg Risk: " + str(advanced_risk/runs))
print("Min Cost Algorithm at Mine Density: " + str(mines/(dim*dim)))
print("Avg Risk: " + str(min_cost_risk/runs))
print("Min Risk Algorithm at Mine Density: " + str(mines/(dim*dim)))
print("Avg Risk: " + str(min_risk_risk/runs))
print("")
main() |
import os
import platform
import numpy as np
from qpython.qtemporal import qtemporal
from qpython import qtype
from datetime import datetime
from fxqu4nt import APP_NAME
JAN_1_2000 = datetime(year=2000, month=1, day=1, hour=0, second=0, microsecond=0)
def normalize_path(path: str):
return path.replace("\\", "/")
def _serialize_q_datetime(dt: datetime, type="ns"):
""" Convert python datetime to q datetime
:param dt: Python datetime input
:param type: ns = timestamp, D = date, ms = datetime
:return:
"""
if type == "ns":
return qtemporal(np.datetime64(dt), qtype=qtype.QTIMESTAMP)
elif type == "D":
return qtemporal(np.datetime64(dt), qtype=qtype.QDATE)
elif type == "ms":
return qtemporal(np.datetime64(dt), qtype=qtype.QDATETIME)
sqdt = _serialize_q_datetime
def serialize_symbol_name(name):
return name.decode("utf-8")
def q_dt_str(dt: datetime):
return dt.strftime("%Y.%m.%dT%H:%M:%S.%f")
def get_tmp_dir():
if platform.system() == "Windows":
return os.path.join("C:\\Users\\", os.path.expandvars("%USERNAME%"), "AppData\\Local\\Temp\\"+APP_NAME)
if platform.system() == "Darwin" or platform.system() == "Linux":
return os.path.join("/tmp/"+APP_NAME) |
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
if len(nums) == 0:
return [-1, -1]
def recurse(left, right):
mid = (left + right) // 2
if nums[left] == target == nums[right]:
return [left, right]
if nums[left] <= target <= nums[right]:
l, r = recurse(left, mid), recurse(mid + 1, right)
if -1 in l:
return r
if -1 in r:
return l
return [l[0], r[1]]
return [-1, -1]
return recurse(0, len(nums) - 1) |
#!/usr/bin/env python
import os
from app import create_app, db
from flask.ext.script import Server, Manager, prompt_bool
from flask.ext.socketio import SocketIO, emit, join_room, leave_room, \
close_room, disconnect
from flask import request
import time
from threading import Thread
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
# manager = Manager(app)
# manager.add_command('runserver', Server(host='0.0.0.0', port=3000))
socketio = SocketIO(app)
# @main.route('/', methods=['GET', 'POST'])
# def indextest():
# return redirect(url_for('auth.login'))
thread = None
user_id = []
room_num = 0
sender_id = 0
@socketio.on('join room')
def test_getroom(message):
print "join room"
print message
join_room(message['room'])
emit('set room',{ 'room':message['room'],'sender':message['u_id'] })
@socketio.on('set msg')
def test_setmsg(data):
print "set msg"
print data
emit('set msg',{'sender': data['sender'], 'chat_id': data['chat_id'], 'msg':data['msg'], 'score':data['score']}, room = data['room'])
@socketio.on('update color')
def test_updatecolor(data):
print "update color"
print data
emit('update color',{'sender': data['sender'], 'chat_id': data['chat_id'], 'score': data['score']}, room = data['room'])
@socketio.on('start chat')
def test_startchat(data):
print "start chat"
print data
emit('start chat',{'name_hash':data["name_hash"]},room = data["room"])
@socketio.on('disconnect')
def test_disconnect():
room = list(request.namespace.rooms)[0]
emit('quit room',{},room = room)
print room
print('Client disconnected')
# @manager.command
# def initdb():
# '''Creates all database tables.'''
# db.create_all()
# @manager.command
# def dropdb():
# '''Drops all database tables.'''
# if prompt_bool('Are you sure to drop your databse?'):
# db.drop_all()
if __name__ == '__main__':
# manager.run()
socketio.run(app, host='0.0.0.0', port=3000)
|
#PassowrdManager.py
#This program will manage users passwords
name = ""
passwords_list = []
members_list = []
def menu(name):
print("Hey there", name)
mode = input("""Choose whether you would to like add/remove a password(1), view passwords(2) or exit application(3), """).strip()
return mode
def addpassword():
repeat = True
while repeat == True:
password = input("Enter your password, password must include one capital letter and one number, it must also be greater than 8 characters, type end to return to homepage")
if (any(passreqr.isupper() for passreqr in password) and any(passreqr.isdigit() for passreqr in password) and len(password) >=8):
passwords_list.append(password)
print(password, "has been added")
elif password == "end":
break
else:
print("Your password did not meet the minimum requirements")
print("Welcome to the password manager")
name = input("What is your name")
while True:
member = input("Enter 'L' for login or 'N' to create a new account").upper()
if member == "L":
m_username = input("Enter your username: ")
m_password = input("Enter your password:")
if m_username == "bdsc" and m_password =="Pass1234":
print("username password match!!")
break
else:
print("Incorrect password, please try again")
elif member == "N":
m_username = input("Enter your new account name")
while True:
m_password = input("Create password with at least 8 characters, 1 Capital letter and a number:").strip()
if (any(passreqr.isupper() for passreqr in m_password) and any(passreqr.isdigit() for passreqr in m_password) and len(m_password) >=8):
members_list.append([m_username,m_password])
print("Account created :)")
break
while True:
chosen_option = menu(name)
if chosen_option == "1":
addpassword()
print("Success!")
elif chosen_options == "2":
print("Success!")
print(passwords_list)
elif chosen_option == "3":
break
else:
print("Invaild option, try again")
|
def alphabet_position_versao_1(text):
print(text)
alfabeto = list("abcdefghijklmnopqrstuvwxyz")
result = ""
for word in text:
if word.lower() in alfabeto:
result += str(alfabeto.index(word.lower())+1) + " "
return result.strip()
def alphabet_position_versao_2(text):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
if type(text) == str:
text = text.lower()
result = ''
for letter in text:
if letter.isalpha() == True:
result = result + ' ' + str(alphabet.index(letter) + 1)
return result.lstrip(' ')
def alphabet_position_original(text):
alphabet = {"a":"1",
"b":"2",
"c":"3",
"d":"4",
"e":"5",
"f":"6",
"g":"7",
"h":"8",
"i":"9",
"j":"10",
"k":"11",
"l":"12",
"m":"13",
"n":"14",
"o":"15",
"p":"16",
"q":"17",
"r":"18",
"s":"19",
"t":"20",
"u":"21",
"v":"22",
"w":"23",
"x":"24",
"y":"25",
"z":"26"}
new_text = ""
text = text.lower().strip()
for word in text:
if word in alphabet:
new_text+=" "+(alphabet.get(word))
return new_text.lstrip()
def main():
print(alphabet_position_original(".The sunset sets at twelve o' clock."))
main() |
import certifi
import numpy as np
import os
import pathlib
import urllib3
import torch
from vel.api import Source
class TextIterator:
""" Iterator over a text dataset """
def __init__(self, padded_sequence, sequence_length, batch_size, alphabet_size, num_batches):
self.sequence_length = sequence_length
self.batch_size = batch_size
self.num_batches = num_batches
self.alphabet_size = alphabet_size
self.padded_sequence = padded_sequence[:-1].reshape(self.num_batches * self.batch_size, self.sequence_length)
self.padded_sequence_next = padded_sequence[1:].reshape(self.num_batches * self.batch_size, self.sequence_length)
self.sequence_indices = np.arange(self.num_batches * self.batch_size)
np.random.shuffle(self.sequence_indices)
self.sequence_indices = self.sequence_indices.reshape(self.num_batches, self.batch_size)
self.batch_idx = 0
def __iter__(self):
return self
def __next__(self):
if self.batch_idx == self.num_batches:
raise StopIteration
else:
input_data = torch.from_numpy(self.padded_sequence[self.sequence_indices[self.batch_idx]])
target_data = torch.from_numpy(self.padded_sequence_next[self.sequence_indices[self.batch_idx]])
self.batch_idx += 1
return input_data.to(torch.long), target_data.to(torch.long)
class TextLoader:
""" Loader of sequential text data """
def __init__(self, sequence, sequence_length, batch_size, alphabet_size):
self.sequence = sequence
self.sequence_length = sequence_length
self.batch_size = batch_size
self.alphabet_size = alphabet_size
# 1 is for the last element as the target needs to be shifted by 1
residual_length = (len(self.sequence) - self.sequence_length - 1)
full_size = self.sequence_length * self.batch_size
rest = residual_length % full_size
self.num_batches = residual_length // full_size
if rest > 0:
self.sequence = np.pad(self.sequence, (0, full_size - rest), mode='constant')
self.num_batches += 1
def __iter__(self):
initial_offset = np.random.randint(self.sequence_length)
relevant_subsequence = self.sequence[
# 1 is for the last element as the target needs to be shifted by 1
initial_offset:self.num_batches * self.sequence_length * self.batch_size + initial_offset + 1
]
return TextIterator(
relevant_subsequence, self.sequence_length, self.batch_size,
alphabet_size=self.alphabet_size,
num_batches=self.num_batches
)
def __len__(self):
""" Number of batches in this loader """
return self.num_batches
class TextUrlSource(Source):
""" Download text from source and model it character by character """
def __init__(self, url, absolute_data_path, sequence_length, batch_size, train_val_split=0.8):
super().__init__()
self.url = url
self.data_path = absolute_data_path
self.sequence_length = sequence_length
self.batch_size = batch_size
self.train_val_split = train_val_split
self.text_path = os.path.join(self.data_path, 'text.txt')
self.processed_path = os.path.join(self.data_path, 'processed.data')
self.data_dict = self.download()
content_encoded = self.data_dict['content_encoded']
alphabet_size = len(self.data_dict['alphabet'])
split_idx = int(len(content_encoded) * train_val_split)
self._train_loader = TextLoader(
sequence=content_encoded[:split_idx],
sequence_length=sequence_length,
batch_size=batch_size,
alphabet_size=alphabet_size,
)
self._val_loader = TextLoader(
sequence=content_encoded[split_idx:],
sequence_length=sequence_length,
batch_size=batch_size,
alphabet_size=alphabet_size,
)
def encode_character(self, char):
return self.data_dict['character_to_index'][char]
def decode_character(self, index):
return self.data_dict['index_to_character'][index]
def train_loader(self):
""" PyTorch loader of training data """
return self._train_loader
def val_loader(self):
""" PyTorch loader of validation data """
return self._val_loader
def train_dataset(self):
""" Return the training dataset """
return None
def val_dataset(self):
""" Return the validation dataset """
return None
def train_iterations_per_epoch(self):
""" Return number of iterations per epoch """
return len(self._train_loader)
def val_iterations_per_epoch(self):
""" Return number of iterations per epoch - validation """
return len(self._val_loader)
def download(self):
""" Make sure data file is downloaded and stored properly """
if not os.path.exists(self.data_path):
# Create if it doesn't exist
pathlib.Path(self.data_path).mkdir(parents=True, exist_ok=True)
if not os.path.exists(self.text_path):
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
with open(self.text_path, 'wt') as fp:
request = http.request('GET', self.url)
content = request.data.decode('utf8')
fp.write(content)
if not os.path.exists(self.processed_path):
with open(self.text_path, 'rt') as fp:
content = fp.read()
alphabet = sorted(set(content))
index_to_character = {idx: c for idx, c in enumerate(alphabet, 1)}
character_to_index = {c: idx for idx, c in enumerate(alphabet, 1)}
content_encoded = np.array([character_to_index[c] for c in content], dtype=np.uint8)
data_dict = {
'alphabet': alphabet,
'index_to_character': index_to_character,
'character_to_index': character_to_index,
'content_encoded': content_encoded
}
with open(self.processed_path, 'wb') as fp:
torch.save(data_dict, fp)
else:
with open(self.processed_path, 'rb') as fp:
data_dict = torch.load(fp)
return data_dict
def create(model_config, url, local_dir, sequence_length=64, batch_size=64, train_val_split=0.8):
""" Vel factory function """
if not os.path.isabs(local_dir):
local_dir = model_config.project_data_dir(local_dir)
return TextUrlSource(
url,
absolute_data_path=local_dir,
sequence_length=sequence_length,
batch_size=batch_size,
train_val_split=train_val_split,
)
|
import logging
from pathlib import Path
from typing import TYPE_CHECKING
from mackerel import exceptions, renderers
from mackerel.content import Document
from mackerel.helpers import cached_property, make_config
if TYPE_CHECKING:
from typing import Tuple # noqa
from configparser import ConfigParser # noqa
class Site:
def __init__(self, path: Path) -> None:
self.path = path
self.config = make_config(site_path=path) # type: ConfigParser
self.logger = logging.getLogger('mackerel') # type: logging.Logger
# Site paths
self.content_path = self.path / Path(
self.config['mackerel']['CONTENT_PATH']) # type: Path
self.output_path = self.path / Path(
self.config['mackerel']['OUTPUT_PATH']) # type: Path
self.template_path = self.path / Path(
self.config['mackerel']['TEMPLATE_PATH']) # type: Path
# Site files
self.document_files = tuple(
f for f in self.content_path.rglob('*')
if f.suffix == self.config['mackerel']['DOC_EXT']) # type: Tuple[Path, ...] # noqa
self.other_content_files = tuple(
f for f in self.content_path.rglob('*')
if f.suffix != self.config['mackerel']['DOC_EXT'] and
f.is_file()) # type: Tuple[Path, ...]
self.other_template_files = tuple(
f for f in self.template_path.rglob('*')
if f.suffix != self.config['mackerel']['TEMPLATE_EXT'] and
f.is_file()) # type: Tuple[Path, ...]
# Site renderers
self.document_renderer = getattr(
renderers.document,
self.config['mackerel']['DOCUMENT_RENDERER'])(site=self) # type: renderers.base.DocumentRenderer # noqa
self.template_renderer = getattr(
renderers.template,
self.config['mackerel']['TEMPLATE_RENDERER'])(site=self) # type: renderers.base.TemplateRenderer # noqa
@cached_property
def documents(self) -> 'Tuple[Document, ...]':
documents = []
for file in self.document_files:
try:
documents.append(Document(
document_path=file, content_path=self.content_path,
renderer=self.document_renderer))
except exceptions.DocumentError as exc:
self.logger.warning(str(exc))
return tuple(documents)
|
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
import utils.metrics as metrics_util
import utils.mlp as mlp
import utils.misc as helper
import numpy as np
def get_model(x_size, y_size, config):
reg_type = config["regression_algo"]
if reg_type == "linear":
return LinearRegression()
elif reg_type == "ridge":
return Ridge()
elif reg_type == "lasso":
return Lasso()
elif reg_type == "elastic":
return ElasticNet()
elif reg_type == "kneighbor":
return KNeighborsRegressor()
elif reg_type == "decisiontree":
return DecisionTreeRegressor()
elif reg_type == "randomforest":
job_count = config["job_count"]
max_depth = config["max_depth"]
random_state = config["random_state"]
return RandomForestRegressor(n_jobs=job_count,
max_depth=max_depth, random_state=random_state)
elif reg_type == "mlp":
early_stopping = config["early_stopping"]
random_state = config["random_state"]
shuffle = config["shuffle"]
lr = config["lr"]
hidden_layer_sizes = config["hidden_layer_sizes"]
activation = config["activation"]
return MLPRegressor(early_stopping=early_stopping,
random_state=random_state, shuffle=shuffle, verbose=True,
learning_rate_init=lr, hidden_layer_sizes=hidden_layer_sizes,
activation=activation)
elif reg_type == "mlp_nn":
return mlp.get_model(x_size, y_size, config)
else:
print("unknown regression model type {}".format(reg_type))
exit(-1)
def train(model, x, y, config):
if isinstance(model, mlp.MLP):
return mlp.train(model, x, y, config)
if not isinstance(x, np.ndarray):
x = x.to_numpy()
y = y.to_numpy()
x = helper.add_gaussian_noise(x)
model.fit(x, y)
return model
def evaluate(model, x, y, metrics, config):
if isinstance(model, mlp.MLP):
return mlp.evaluate(model, x, y, metrics, config)
y_pred = model.predict(x)
metrics_info = metrics_util.compute_metrics(y, y_pred, metrics,
multioutput="raw_values")
return (y_pred, metrics_info)
def infer(model, x, config, output_size=1):
if isinstance(model, mlp.MLP):
return mlp.infer(model, x, output_size, config)
y_pred = model.predict(x)
return y_pred
|
class Graph:
def __init__(self,Nodes):
self.nodes = Nodes
self.adjacent_list = {}
for node in nodes:
self.adjacent_list[node] = []
def add_edge(self,u,v):
self.adjacent_list[u].append(v)
self.adjacent_list[v].append(u)
def print_adj_list(self):
for node in self.nodes:
print(f"{node} --> {self.adjacent_list[node]}")
nodes = ['A','B','C','D','E','F','G']
graph = Graph(nodes)
graph.add_edge('A','B')
graph.print_adj_list()
|
from django.shortcuts import render, redirect, HttpResponse
from apps.books_authors_app.models import Book, Author
# Create your views here.
def books(request):
context = {}
context['books'] = Book.objects.all()
return render(request, "books_authors_app/books.html", context)
## Route to add a book to the database
def addbook(request):
'''Placeholder for route to add books'''
if request.method == "POST":
newbook = Book.objects.create(title=request.POST['form_book_title'],
desc=request.POST['form_book_desc'])
return redirect("/books")
## Route to display authors
def authors(request):
context = {}
context['authors'] = Author.objects.all()
return render(request, "books_authors_app/authors.html", context)
## Route to add author to the database
def addauthor(request):
if request.method == "POST":
newauthor = Author.objects.create(first_name=request.POST['form_author_f_name'],
last_name=request.POST['form_author_l_name'],
notes=request.POST['form_author_notes'])
return redirect("/authors")
def booknum(request, url_num):
book = Book.objects.get(id=url_num)
context = {
"title" : book.title,
"id" : book.id,
"desc" : book.desc,
"authors" : book.authors.all(),
"authors_exclude" : Author.objects.exclude(books=url_num),
}
return render(request, "books_authors_app/booknum.html", context)
def authornum(request, url_num):
author = Author.objects.get(id=url_num)
context = {
"id" : author.id,
"first_name" : author.first_name,
"last_name" : author.last_name,
"notes" : author.notes,
"books" : author.books.all(),
"books_exclude" : Book.objects.exclude(authors=url_num),
}
return render(request, "books_authors_app/authornum.html", context)
def author2book(request):
if request.method == 'POST':
# print("*"*80)
# print(request.POST['author_to_add'])
# print(request.POST['book_id'])
book = Book.objects.get(id=request.POST['book_id'])
book.authors.add(Author.objects.get(id=request.POST['author_to_add']))
return redirect("books/"+request.POST['book_id'])
def book2author(request):
if request.method == 'POST':
author = Author.objects.get(id=request.POST['author_id'])
author.books.add(Book.objects.get(id=request.POST['book_to_add']))
return redirect("authors/"+request.POST['author_id']) |
import random
from pyrlog.node import *
from pyrlog.message import *
"""Implementation of the Raft consistency protocol.
See: In Search of an Understandable Consensus Algorithm (Extended Version)
http://ramcloud.stanford.edu/raft.pdf
"""
class State(object):
FOLLOWER = 1
LEADER = 2
CANDIDATE = 3
class Server(object):
KEEPALIVE_DELAY = 30
ELECTION_DELAY = 150 # XXX not used
def __reset_timeout(self):
if self._state == State.LEADER:
self._next_timeout_time = self._node.time() + KEEPALIVE_DELAY
else:
self._next_timeout_time = self._node.time() + ELECTION_DELAY
def __get_receive_timeout(self):
return self._next_timeout_time - self._node.time()
def __reset_volatile_state(self):
# XXX hard-coded leader
# TODO: Leader election
self._state = State.FOLLOWER
if self._node.node_id == 0:
self._state = State.LEADER
# The leader maintains an estimate of the log length
# at each follower; this is used to determine the number
# of entries to send an AppendEntriesRequest:
# entries = self._log[estimate_length:]
log_len = len(self._log)
self._estimated_log_length = [log_len] * self._num_servers
# Count of log entries that are definitively known to reside on
# each server -- used to update _commit_count.
self._known_log_length = [0] * self._num_servers
self._known_log_length[0] = len(self._log)
# Number of log entires that have been committed and are therefore
# safe to apply to the state machine.
self._commit_count = -1
def __init__(self, node, num_servers, log):
"""Initialize a Raft server instance.
:param node: The node that provides system services for this server.
:param num_servers: The number of servers in the Raft group; assumed
to have node IDs equal to: range(num_servers).
:param log: An instance of log.Log to record tuples.
"""
self._node = node
self._num_servers = num_servers
self._log = log
# Volatile state
self.__reset_volatile_state()
def __broadcast(self, msg):
"""Send a message to other servers."""
for i in range(self._num_servers):
if i != self._node.node_id:
self._node.send(msg)
def __update_leader_commit_count(self):
known_commits = sorted(self._known_log_length)
self._commit_count = known_commits[self._num_servers / 2]
# TODO: Apply new commits to the state machine
# TODO: Store respond to client requests
def __handle_message(self, src, msg):
if isinstance(msg, AppendEntriesRequest):
self.__reset_timeout()
self._commit_count = min(self._commit_count,
msg._leader_commit_count)
# TODO: Apply new commits to the state machine
if msg._prev_log_length <= len(self._log):
self._log.append(msg.entries, msg._prev_log_length)
response = AppendEntriesResponse(len(self._log))
self._node.send(src, response)
elif isinstance(msg, AppendEntriesResponse):
self._estimated_log_length[src] = msg._log_length
self._known_log_length[src] = msg._log_length
self.__update_leader_commit_count()
# TODO: aggressively send out new append requests for nodes with
# holes in their log?
elif isinstance(msg, OperationRequest):
if self._state != State.LEADER:
# TODO: redirect the client to the leader
return
self._log.append(msg.op)
self._known_log_length[self._node.node_id] += 1
# TODO: Store state to generate client response
# TODO: aggressive generate append requests instead of waiting for
# heartbeat timeout?
def __handle_timeout(self):
if self._state == State.LEADER:
for i in range(self._num_servers):
if i == self._node.node_id:
continue
prev_log_length = self._estimated_log_length[i]
new_entries = self._log[prev_log_length:]
msg = AppendEntriesRequest(
prev_log_length, new_entries, self._commit_count)
self._node.send(i, msg)
# TODO: Consider reverting to follower if we didn't hear back
# from a majority of servers.
else:
pass # TODO: Handle leader failure here?
def run(self):
node = self._node
while True:
try:
src, message = node.receive(self.__get_receive_timeout())
self.__handle_message(src, message)
except NodeTimeout:
self.__handle_timeout()
|
import subprocess
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'マイグレーション履歴の削除、マイグレーションファイルの再生成、マイグレートするコマンド'
def handle(self, *args, **options):
subprocess.call(['python', 'manage.py', 'migrate', 'models', 'zero'])
subprocess.call(['rm', '-fr', 'models/migrations'])
subprocess.call(['mkdir', 'models/migrations'])
subprocess.call(['touch', 'models/migrations/__init__.py'])
subprocess.call(['python', 'manage.py', 'makemigrations'])
subprocess.call(['python', 'manage.py', 'migrate'])
subprocess.call(['python', 'manage.py', 'runseed']) |
from tkinter import *
import tkinter.font as font
from tkinter import filedialog
def upload():
window.file=filedialog.askopenfilename()
print(window.file)
def doctor_details():
window1=Tk()
window1.geometry('-0+80')
window1.title("Doctor Details")
f2=Frame(window1)
f2.configure(background='#ff8484')
L2=Label(f2,bg='#ff8484',text="Doctor's Details:",font=("Monaco",28))
L2.pack(side='left')
f2.pack(side=TOP,fill='x')
f92=Frame(window1)
f92.configure(bg='#ff8484')
l14=Label(f92,text='+++++++++++++++++++++++++++++++++++++++++++++',bg='#ff8484').pack(side='left')
f92.pack(fill='x')
f2_1=Frame(window1)
f2_1.configure(background='#ff8484')
L2_1=Label(f2_1,bg='#ff8484',text="Doctor Name : {0}".format('Somisetty sai vasavi Harsha Vardhan Gupta'),font=("Ariel",24))
L2_1.pack(side='left')
f2_1.pack(side=TOP,fill='x')
f2_2=Frame(window1,bg='#ff8484')
l2_2=Label(f2_2,text="Department : {0}".format('E.N.T'),font=("Ariel",24),bg='#ff8484')
l2_2.pack(side='left')
f2_2.pack(side=TOP,fill='x')
f93=Frame(window1)
f93.configure(bg='#ff8484')
l15=Label(f93,text=' ',bg='#ff8484').pack(side='left')
f93.pack(fill='x')
window1.mainloop()
def first_window(Event):
window.destroy()
global window
window=Tk()
window.wm_attributes('-fullscreen',True)
window.configure(background='#ff8484')
f_back=Frame(window,bg='#ff8484')
back_image=PhotoImage(file="Back_Arrow.png")
back_label=Label(f_back,image=back_image,bg='#ff8484')
back_label.pack(side='left')
back_label.bind('<Button-1>',first_window)
f_back.pack(fill='x')
f11=Frame(window)
f11.configure(bg='#ff8484')
l10=Label(f11,bg='#ff8484',font=("Monaco",24),text='Some more Good Deeds to be done,for those coming up!')
l10.pack(side='left')
l11=Label(f11,bg='#ff8484',font=("Ariel",24),text=' 10')
l11.pack(side='left')
f11.pack(fill='x')
f3=Frame(window)
f3.configure(background='#ff8484')
L4=Label(f3,bg='#ff8484',text="Patient's Details:",font=("Monaco",28))
L4.pack(side='left')
L00=Label(f3,bg='#ff8484',text=" ")
L00.pack(side='right')
B4=Button(f3,text='Doctor Details',command=doctor_details,font=("Ariel",14))
B4.pack(side='right')
f3.pack(fill='x')
f4=Frame(window)
f4.configure(background='#ff8484')
L5=Label(f4,bg='#ff8484',text="Name: {0}".format(name),font=("Ariel",24))
L5.pack(side='left')
f4.pack(fill='x')
'''entry3=Entry(f4,borderwidth=3,width=38)
entry3.insert(END,name)
entry3.pack(side='left')
'''
f7=Frame(window)
f7.configure(bg='#ff8484')
L6=Label(f7,bg='#ff8484',text="Gender: {0}".format(gender),font=("Ariel",24))
L6.pack(side='left')
'''entry0=Entry(f4,borderwidth=3,width=20)
entry0.insert(END,gender)
entry0.pack(side='left')
'''
f7.pack(fill='x')
f4_1=Frame(window)
f4_1.configure(bg='#ff8484')
pn_p=Label(f4_1,text="Phone Number: {0}".format(pn),font=("Ariel",24),bg='#ff8484')
pn_p.pack(side='left')
f4_1.pack(fill='x')
f4_12=Frame(window)
f4_12.configure(bg='#ff8484')
'''pn_p_e=Entry(f4_1,borderwidth=3,width=22)
pn_p_e.pack(side='left')
pn_p_e.insert(0,p_no)
'''
p_a=Label(f4_12,text="Age: {0}".format(age),font=("Ariel",24),bg='#ff8484')
p_a.pack(side='left')
f4_12.pack(fill='x')
f4_2=Frame(window)
f4_2.configure(bg='#ff8484')
pn_w=Label(f4_2,text="Weight: {0}".format(weight),font=("Ariel",24),bg='#ff8484')
pn_w.pack(side='left')
'''pn_w_e=Entry(f4_2,borderwidth=3,width=20)
pn_w_e.pack(side='left')
pn_w_e.insert(0,weight)
'''
f4_2.pack(fill='x')
f5_w=Frame(window,bg='#ff8484')
L_w=Label(f5_w,bg='#ff8484').pack()
f5_w.pack(fill='x')
f5=Frame(window)
f5.configure(background='#ff8484')
x=open('tests.txt','r').read().split('\n')
L7=Label(f5,bg='#ff8484',text="Tests:",font=("Monaco",28))
L7.pack(side='left')
f5.pack(fill='x')
for i in x:
if i!='':
f6=Frame(window,background='#ff8484')
dot=Label(f6,text='o',font=("Ariel",20),bg='#ff8484').pack(side='left')
tests1=Label(f6,text=i,font=("Ariel",24),bg='#ff8484').pack(side='left')
f6.pack(fill='x')
#waste=Label(f6,text='',font=("Ariel",14),bg='#ff8484').pack(side='top')
#f6.pack(fill='x')
f9=Frame(window)
f9.configure(bg='#ff8484')
l12=Label(f9,text=' ',bg='#ff8484').pack(side='left')
f9.pack(fill='x')
f8=Frame(window)
f8.configure(bg='#ff8484')
l_tp=Label(f8,text=" ",bg='#ff8484').pack(side='left')
b1=Button(f8,text='Open Reports')
myFont = font.Font(family='Monaco', size=20)
b1['font']=myFont
b1.pack(side='left')
l_tp2=Label(f8,text=" ",bg='#ff8484').pack(side='left')
b2=Button(f8,text='Upload Report',command=upload)
b2['font']=myFont
b2.pack(side='left')
f8.pack(fill='x')
window.mainloop()
|
from datacompression import compressionAlgorithmn
targetFile = "sample.txt"
h = compressionAlgorithmn(targetFile)
output_path = h.compress()
h.decompress(output_path)
|
'''
Author: your name
Date: 2020-08-06 15:18:03
LastEditTime: 2020-08-06 18:56:04
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \Algorithms_and_Data_Strucures\chapter_1\poker.py
'''
class Piece_Pocker:
"""描述一张扑克牌的类
"""
def __init__(self):
# 此时花色就是类型,包括大小王
self.suit = None
self.point = None
def show_card(self):
print(self.suit + ' *** *** ' + self.point)
class Pocker():
"""描述一副扑克牌的类,总共54张包括大小王
Args:
Piece_Pocker ([type]): [description]
"""
def __init__(self):
self.make_cards()
def make_cards(self):
# 每一张牌都是一个类,包括花色和点数
self.cards = []
suit = ['Spade', 'Heart', 'Diamond', 'Club']
for suit_num in range(4): # 有四个花色
for numbers in range(1, 14): # 数字从1到13
card = Piece_Pocker()
card.suit = suit[suit_num]
if numbers == 11:
numbers = 'J'
elif numbers == 12:
numbers = 'Q'
elif numbers == 13:
numbers = 'K'
elif numbers == 1:
numbers = 'A'
else:
numbers = str(numbers)
card.point = numbers
self.cards.append(card)
# 创立大小王,大小王无点数
card_king = Piece_Pocker()
card_king.suit = 'king'
card_queen = Piece_Pocker()
card_queen.suit = 'queen'
self.cards.append(card_king)
self.cards.append(card_queen) |
def powerset(array):
# Write your code here.
result = [[]]
for element in array:
result += powersetUtil(result, element)
return result
def powersetUtil(result, element):
tempArray = []
for subarray in result:
tempArray.append(subarray + [element])
return tempArray
print(powerset([1,2,3]))
|
'''
@author: Victor Pedroso Curtarelli
-------------------------------------------------------------------------------
Modelo QAA v6 integrado ao modelo de Kd com base no ajuste dos Passos 2 e 4
usando bandas do sensor Sentinel 2A/MSI.
Este modelo é separado em duas fases, na primeira é aplicado o modelo "Quasi
Analytical Algorithm" (QAA) de Lee et al. (2014) até seu Passo 6, onde é
calculada o coeficiente de absorção espectral total (at). Na segunda fase é
aplicado o modelo de Kd de Lee et al. (2013) com base nos dados de at simulados
pelo QAA v6.
'''
###############################################################################
##################### QAA.V6 - model ############################
###############################################################################
######################## IMPORTS ##############################################
###############################################################################
# Import basic modules from python
import pandas as pd
import os
import math as mt
from qaapy_kdpy.conversion import Rrs_to_rrs
from qaapy_kdpy.file import export_result
from qaapy_kdpy.filter import filter_wvl
from qaapy_kdpy.empirical_calc import *
# QAA - V5 modules
from qaapy_kdpy.QAA_Core.v5.calculation import *
# QAA - V6 modules
from qaapy_kdpy.QAA_Core.v6.calculation import *
def Qaa_v6_model_RUN(data_obj, ano, lb0):
'''
Função que aplica o modelo QAA v6 dado um objeto de Data Frames contendo todos
os dados necessários, uma string com o nome/ano da campanha para salvar o arquivo
dos resultados e um valor de comprimento de onda inicial (Lambda Zero).
----------
Parameters
----------
data_obj [Object]
Objeto com Data Frames contendo dados usados nas funções do modelo QAA/Kd
alocados pela função read_files através dos argumentos contendo as strings
do caminho do diretório para cada planilha '.xlsx' de dados.
O objeto de Data Frames deve conter no mínimo:
[acs] Absorção total na coluna d'água;
[Rrs] Reflectância de sensoriamento remoto superficial;
[aw] Coeficiente de absorção espectral da água;
[bbw] Coeficiente de retroespalhamento espectral da água;
[coleta] Planilha contendo Coordenadas, datas e horários das
coletas por estação;
[Kd_ZEU] Kd medido com dados até a zona de atenação da luz até 1%;
[Kd_3m] Kd medido com dados até 3 metros de profundidade;
[Kd_6m] Kd medido com dados até 6 metros de profundidade.
* Os dados devem concordar em faixa espectral, bandas e nome das estações.
ano [String]
String contendo o nome ou ano da campanha, usado para salvar arquivo
dicionário contendo todos os dados produtos no formato '.pickle'.
Ex.: 'TRM2019', '2013', 'PACO_2011', etc...
lb0 [Value]
Comprimento de onda incial usado no modelo QAA (Lambda Zero).
Ex.: 665, 560, 490, etc...
-------
Returns
-------
result_dict [Dictionary]
O resultado desta função é um dicionário contendo todos os resultados e
dados de entrada relevantes.
Além disso o modelo salva um arquivo contendo todos os dados em formato
'.pickle' na pasta '.\Results_Espectral'.
Dados salvos:
[dados/lb0] Comprimento de onda incial (Lambda Zero);
[dados/wl] Comprimentos de onda utilizados;
[dados/r_ref] Dados de rrs;
[dados/campanha] Nome/ano da campanha;
[dados/equação] Equação utilizada no modelo em questão;
[dados/g0] Coeficiente g1 - Passo 1 QAA;
[dados/g1] Coeficiente g0 - Passo 1 QAA;
[a_Lb] Absorção em Lambda Zero;
[bb] Retroespalhamento total;
[at] Absorção total estimada pelo modelo QAA;
[n] Decaimento do retroespalhamento do particulado;
[bbp_lb0] Retroespalhamento do particulado em Lambda Zero;
[bbp] Retroespalhamento do particulado;
[Kd_QAA] Kd estimado pelo modelo QAA / Kd;
[Kd_ZEU] Kd medido com dados até a zona de atenação da luz
até 1%;
[Kd_3m] Kd medido com dados até 3 metros de profundidade;
[Kd_6m] Kd medido com dados até 6 metros de profundidade.
'''
###############################################################################
########################### QAA SETTINGS - GERAL ##############################
###############################################################################
# data_obj.Rrs = data_obj.Rrs.drop([833, 865, 945, 1374, 1614, 2202])
# data_obj.aw = data_obj.aw.drop([833, 865, 945, 1374, 1614, 2202])
# data_obj.bbw = data_obj.bbw.drop([833, 865, 945, 1374, 1614, 2202])
# Transformação do Rrs
rrs = Rrs_to_rrs(data_obj.Rrs)
Rrs = data_obj.Rrs
# Cálculando o U
g0 = 0.089
g1 = 0.1245
u = calc_u(g0, g1, rrs)
##############################################################################################################
# Filtrando os dados
wvl_ref = {
# 'referencia': [443, 492, 560, 665, 704, 741, 783], # Bandas do sensor Sentinel-2A
# 'referencia': [400,413,443,490,510,560,620,665,674,681,709,754,761,764,768,779], # Bandas do sensor OLCI
# 'referencia': [400,413,443,490,510,560,620,665,674,681,709,754,761,764,768,779,865,885,900], # Bandas do sensor OLCI
# 'referencia': [400,411,443,490,560,620,667,674,681,709,754,761,779,865,555], # Bandas v6
# 'referencia': list(range(400, 951, 1)), # Bandas v6
'referencia': list(range(400, 751, 1)), # Bandas PAR v6
'lb0': lb0
}
wl = pd.Series(wvl_ref['referencia'])
r_ref = filter_wvl(rrs, wvl_ref)
R_ref = filter_wvl(Rrs, wvl_ref)
###############################################################################
############################ FASE 1 - QAA.V6 ##################################
###############################################################################
# QAA v6 considera at_lb0 = a(670)
aw = data_obj.aw
# a_lb0_v6_492 = calc_alb0_v6_492(492, R_ref, 665, 704, data_obj.aw); lago = 'Eq. QAA.v6 AJUSTE 492 nm'
a_lb0_v6_560 = calc_alb0_v6_560(560, R_ref, 665, 704, data_obj.aw); lago = 'Eq. QAA.v6 AJUSTE 560 nm'
# a_lb0_v6 = calc_alb0_v6(wvl_ref['lb0'], R_ref, 443, 490, data_obj.aw); lago = 'Eq. QAA.v6'
# a_lb0_v6 = aw.loc[lb0].values; lago = 'Eq. QAA.v6 aw'
# a_lb0_v6_olci = calc_alb0_v6(wvl_ref['lb0'], R_ref, 443, 490, data_obj.aw); lago = 'QAA.v6_OLCI'
# a_lb0_v6_s2a = calc_alb0_v6(wvl_ref['lb0'], R_ref, 443, 492, data_obj.aw); lago = 'QAA.v6_S2a'
## bbp (lambda_0)
# bbp_lb0_492 = calc_bbp_lb0_v6(u, data_obj.bbw, 492, a_lb0_v6_492)
bbp_lb0_560 = calc_bbp_lb0_v6(u, data_obj.bbw, 560, a_lb0_v6_560)
## Cálculando N
n_acs_492 = calc_n_acs_492(r_ref[665], r_ref[704])
# n_acs_560 = calc_n_acs_560(r_ref[665], r_ref[704])
# n_hydro_560 = calc_n_hydro_560(r_ref[443], r_ref[492])
# n_hydro_704 = calc_n_hydro_704(r_ref[443], r_ref[492])
# Realizando cálculo do BBP para todos os comprimentos de onda
# bbp_492_acs_492 = calc_bbp(wl, n_acs_492, bbp_lb0_492, 492)
# bbp_492_acs_560 = calc_bbp(wl, n_acs_560, bbp_lb0_492, 492)
# bbp_492_hydro_560 = calc_bbp(wl, n_hydro_560, bbp_lb0_492, 492)
# bbp_492_hydro_704 = calc_bbp(wl, n_hydro_704, bbp_lb0_492, 492)
bbp_560_acs_492 = calc_bbp(wl, n_acs_492, bbp_lb0_560, 560)
# bbp_560_acs_560 = calc_bbp(wl, n_acs_560, bbp_lb0_560, 560)
# bbp_560_hydro_560 = calc_bbp(wl, n_hydro_560, bbp_lb0_560, 560)
# bbp_560_hydro_704 = calc_bbp(wl, n_hydro_704, bbp_lb0_560, 560)
# Realizando cálculo do BB para todos comprimentos de onda
# bb_492_acs_492 = calc_bb(bbp_492_acs_492, data_obj.bbw)
# bb_492_acs_560 = calc_bb(bbp_492_acs_560, data_obj.bbw)
# bb_492_hydro_560 = calc_bb(bbp_492_hydro_560, data_obj.bbw)
# bb_492_hydro_704 = calc_bb(bbp_492_hydro_704, data_obj.bbw)
bb_560_acs_492 = calc_bb(bbp_560_acs_492, data_obj.bbw)
# bb_560_acs_560 = calc_bb(bbp_560_acs_560, data_obj.bbw)
# bb_560_hydro_560 = calc_bb(bbp_560_hydro_560, data_obj.bbw)
# bb_560_hydro_704 = calc_bb(bbp_560_hydro_704, data_obj.bbw)
# Realizando cálculo do atotal para todos os comprimentos de onda
# at_492_acs_492 = calc_a_total(bbp_492_acs_492, data_obj.bbw, u)
# at_492_acs_560 = calc_a_total(bbp_492_acs_560, data_obj.bbw, u)
# at_492_hydro_560 = calc_a_total(bbp_492_hydro_560, data_obj.bbw, u)
# at_492_hydro_704 = calc_a_total(bbp_492_hydro_704, data_obj.bbw, u)
at_560_acs_492 = calc_a_total(bbp_560_acs_492, data_obj.bbw, u)
# at_560_acs_560 = calc_a_total(bbp_560_acs_560, data_obj.bbw, u)
# at_560_hydro_560 = calc_a_total(bbp_560_hydro_560, data_obj.bbw, u)
# at_560_hydro_704 = calc_a_total(bbp_560_hydro_704, data_obj.bbw, u)
###############################################################################
############################ FASE 2 - Kd ######################################
###############################################################################
m = [0.005, 4.26, 0.52, 10.54]
chi = 0.265
theta_s = calc_theta_s(data_obj.coleta)
'''
Renomear tudo de acordo!!!!!!
Reescrever o modelo e automatizar etapa dos ajustes
'''
# raz_bbw_bb_492_acs_492 = calc_raz_bbw_bb(data_obj.bbw, bb_492_acs_492)
# raz_bbw_bb_492_acs_560 = calc_raz_bbw_bb(data_obj.bbw, bb_492_acs_560)
# raz_bbw_bb_492_hydro_560 = calc_raz_bbw_bb(data_obj.bbw, bb_492_hydro_560)
# raz_bbw_bb_492_hydro_704 = calc_raz_bbw_bb(data_obj.bbw, bb_492_hydro_704)
raz_bbw_bb_560_acs_492 = calc_raz_bbw_bb(data_obj.bbw, bb_560_acs_492)
# raz_bbw_bb_560_acs_560 = calc_raz_bbw_bb(data_obj.bbw, bb_560_acs_560)
# raz_bbw_bb_560_hydro_560 = calc_raz_bbw_bb(data_obj.bbw, bb_560_hydro_560)
# raz_bbw_bb_560_hydro_704 = calc_raz_bbw_bb(data_obj.bbw, bb_560_hydro_704)
# kd_qaa_492_acs_492 = calc_kd_lee_2013(m, theta_s, at_492_acs_492, bb_492_acs_492, chi, raz_bbw_bb_492_acs_492)
# kd_qaa_492_acs_560 = calc_kd_lee_2013(m, theta_s, at_492_acs_560, bb_492_acs_560, chi, raz_bbw_bb_492_acs_560)
# kd_qaa_492_hydro_560 = calc_kd_lee_2013(m, theta_s, at_492_hydro_560, bb_492_hydro_560, chi, raz_bbw_bb_492_hydro_560)
# kd_qaa_492_hydro_704 = calc_kd_lee_2013(m, theta_s, at_492_hydro_704, bb_492_hydro_704, chi, raz_bbw_bb_492_hydro_704)
kd_qaa_560_acs_492 = calc_kd_lee_2013(m, theta_s, at_560_acs_492, bb_560_acs_492, chi, raz_bbw_bb_560_acs_492)
# kd_qaa_560_acs_560 = calc_kd_lee_2013(m, theta_s, at_560_acs_560, bb_560_acs_560, chi, raz_bbw_bb_560_acs_560)
# kd_qaa_560_hydro_560 = calc_kd_lee_2013(m, theta_s, at_560_hydro_560, bb_560_hydro_560, chi, raz_bbw_bb_560_hydro_560)
# kd_qaa_560_hydro_704 = calc_kd_lee_2013(m, theta_s, at_560_hydro_704, bb_560_hydro_704, chi, raz_bbw_bb_560_hydro_704)
###############################################################################
########################### Teste Kd - absorção insitu ########################
###############################################################################
at_acs = data_obj.acs + aw.values
bb_acs = calc_bb_ref(u, wl, at_acs)
bb_bb = data_obj.bb
dif = set(theta_s).difference(at_acs)
theta_s_2 = theta_s.drop(dif, axis = 1)
raz_bbw_bb_acs = calc_raz_bbw_bb(data_obj.bbw, bb_acs)
raz_bbw_bb_bb = calc_raz_bbw_bb(data_obj.bbw, bb_bb)
kd_acs = calc_kd_lee_2013(m, theta_s_2, at_acs, bb_acs, chi, raz_bbw_bb_acs)
kd_bb = calc_kd_lee_2013(m, theta_s_2, at_acs, bb_bb, chi, raz_bbw_bb_bb)
###############################################################################
########################### Data Extraction ###################################
###############################################################################
result_dict = {}
result_dict['dados'] = {
'lb0': wvl_ref['lb0'],
'wl': list(r_ref.keys()),
'r_ref': r_ref,
'campanhas': ano,
'equacao': lago,
'chute_inicial':'não otimizado',
'h0h1h2': 'não otimizado',
'g0': g0,
'g1': g1
}
# result_dict['a_lb0_492'] = a_lb0_v6_492
result_dict['a_lb0_560'] = a_lb0_v6_560
# result_dict['bb_492_acs_492'] = bb_492_acs_492
# result_dict['bb_492_acs_560'] = bb_492_acs_560
# result_dict['bb_492_hydro_560'] = bb_492_hydro_560
# result_dict['bb_492_hydro_704'] = bb_492_hydro_704
result_dict['bb_560_acs_492'] = bb_560_acs_492
# result_dict['bb_560_acs_560'] = bb_560_acs_560
# result_dict['bb_560_hydro_560'] = bb_560_hydro_560
# result_dict['bb_560_hydro_704'] = bb_560_hydro_704
# result_dict['at_492_acs_492'] = at_492_acs_492
# result_dict['at_492_acs_560'] = at_492_acs_560
# result_dict['at_492_hydro_560'] = at_492_hydro_560
# result_dict['at_492_hydro_704'] = at_492_hydro_704
result_dict['at_560_acs_492'] = at_560_acs_492
# result_dict['at_560_acs_560'] = at_560_acs_560
# result_dict['at_560_hydro_560'] = at_560_hydro_560
# result_dict['at_560_hydro_704'] = at_560_hydro_704
result_dict['n_acs_492'] = n_acs_492
# result_dict['n_acs_560'] = n_acs_560
# result_dict['n_hydro_560'] = n_hydro_560
# result_dict['n_hydro_704'] = n_hydro_704
# result_dict['bbp_lb0_492'] = bbp_lb0_492
result_dict['bbp_lb0_560'] = bbp_lb0_560
# result_dict['bbp_492_acs_492'] = bbp_492_acs_492
# result_dict['bbp_492_acs_560'] = bbp_492_acs_560
# result_dict['bbp_492_hydro_560'] = bbp_492_hydro_560
# result_dict['bbp_492_hydro_704'] = bbp_492_hydro_704
result_dict['bbp_560_acs_492'] = bbp_560_acs_492
# result_dict['bbp_560_acs_560'] = bbp_560_acs_560
# result_dict['bbp_560_hydro_560'] = bbp_560_hydro_560
# result_dict['bbp_560_hydro_704'] = bbp_560_hydro_704
# result_dict['kd_qaa_492_acs_492'] = kd_qaa_492_acs_492
# result_dict['kd_qaa_492_acs_560'] = kd_qaa_492_acs_560
# result_dict['kd_qaa_492_hydro_560'] = kd_qaa_492_hydro_560
# result_dict['kd_qaa_492_hydro_704'] = kd_qaa_492_hydro_704
result_dict['kd_qaa_560_acs_492'] = kd_qaa_560_acs_492
# result_dict['kd_qaa_560_acs_560'] = kd_qaa_560_acs_560
# result_dict['kd_qaa_560_hydro_560'] = kd_qaa_560_hydro_560
# result_dict['kd_qaa_560_hydro_704'] = kd_qaa_560_hydro_704
result_dict['kd_acs'] = kd_acs
result_dict['kd_bb'] = kd_bb
result_dict['Kd_ZEU'] = data_obj.kd_zeu
result_dict['Kd_3m'] = data_obj.kd_3m
result_dict['Kd_6m'] = data_obj.kd_6m
os.makedirs('./Results_Espectral', exist_ok = True)
export_result(result_dict, './Results_Espectral/QAAv6_' + ano + '_' + str(wvl_ref['lb0']) + '_Ajustado.pickle')
return result_dict
def Qaa_v6_model_msi(Rrs, wvl, b1, b2, b3, b4, b5, lb0, aw, bbw, theta_s):
'''
Função que aplica o modelo QAA v6 dado um objeto de Data Frames contendo todos
os dados necessários, uma string com o nome/ano da campanha para salvar o arquivo
dos resultados e um valor de comprimento de onda inicial (Lambda Zero).
----------
Parameters
----------
data_obj [Object]
Objeto com Data Frames contendo dados usados nas funções do modelo QAA/Kd
alocados pela função read_files através dos argumentos contendo as strings
do caminho do diretório para cada planilha '.xlsx' de dados.
O objeto de Data Frames deve conter no mínimo:
[acs] Absorção total na coluna d'água;
[Rrs] Reflectância de sensoriamento remoto superficial;
[aw] Coeficiente de absorção espectral da água;
[bbw] Coeficiente de retroespalhamento espectral da água;
[coleta] Planilha contendo Coordenadas, datas e horários das
coletas por estação;
[Kd_ZEU] Kd medido com dados até a zona de atenação da luz até 1%;
[Kd_3m] Kd medido com dados até 3 metros de profundidade;
[Kd_6m] Kd medido com dados até 6 metros de profundidade.
* Os dados devem concordar em faixa espectral, bandas e nome das estações.
ano [String]
String contendo o nome ou ano da campanha, usado para salvar arquivo
dicionário contendo todos os dados produtos no formato '.pickle'.
Ex.: 'TRM2019', '2013', 'PACO_2011', etc...
lb0 [Value]
Comprimento de onda incial usado no modelo QAA (Lambda Zero).
Ex.: 665, 560, 490, etc...
-------
Returns
-------
result_dict [Dictionary]
O resultado desta função é um dicionário contendo todos os resultados e
dados de entrada relevantes.
Além disso o modelo salva um arquivo contendo todos os dados em formato
'.pickle' na pasta '.\Results_Espectral'.
Dados salvos:
[dados/lb0] Comprimento de onda incial (Lambda Zero);
[dados/wl] Comprimentos de onda utilizados;
[dados/r_ref] Dados de rrs;
[dados/campanha] Nome/ano da campanha;
[dados/equação] Equação utilizada no modelo em questão;
[dados/g0] Coeficiente g1 - Passo 1 QAA;
[dados/g1] Coeficiente g0 - Passo 1 QAA;
[a_Lb] Absorção em Lambda Zero;
[bb] Retroespalhamento total;
[at] Absorção total estimada pelo modelo QAA;
[n] Decaimento do retroespalhamento do particulado;
[bbp_lb0] Retroespalhamento do particulado em Lambda Zero;
[bbp] Retroespalhamento do particulado;
[Kd_QAA] Kd estimado pelo modelo QAA / Kd;
[Kd_ZEU] Kd medido com dados até a zona de atenação da luz
até 1%;
[Kd_3m] Kd medido com dados até 3 metros de profundidade;
[Kd_6m] Kd medido com dados até 6 metros de profundidade.
'''
###############################################################################
########################### QAA SETTINGS - GERAL ##############################
###############################################################################
aw = aw.drop([741, 783])
bbw = bbw.drop([741, 783])
# Transformação do Rrs
# rrs = Rrs_to_rrs(Rrs)
rrs = Rrs / (0.52 + 1.7 * Rrs)
# rrs_lb0 = Rrs_to_rrs(b3)
rrs_lb0 = b3 / (0.52 + 1.7 * b3)
rrs_b4 = b4 / (0.52 + 1.7 * b4)
rrs_b5 = b5 / (0.52 + 1.7 * b5)
# Cálculando o U
g0 = 0.089
g1 = 0.1245
u = calc_u(g0, g1, rrs)
u_lb0 = calc_u(g0, g1, rrs_lb0)
###############################################################################
############################ FASE 1 - QAA.V6 ##################################
###############################################################################
a_lb0_v6_560 = aw.loc[lb0].values + 0.4310 * (b3 / (b4 + b5)) ** -1.4408; lago = 'Eq. QAA.v6 AJUSTE 560 nm'
## bbp (lambda_0)
bbp_lb0_560 = (( u_lb0 * a_lb0_v6_560 ) / (1 - u_lb0)) - bbw.loc[lb0].values
## Cálculando N
n_acs_492 = (0.5248 * np.exp(rrs_b4 / rrs_b5)) - 1.1849
bbp_560_acs_492 = bbp_lb0_560 * np.power(lb0 / wvl, n_acs_492)
# Realizando cálculo do BB para todos comprimentos de onda
bb_560_acs_492 = bbw.loc[wvl].values + bbp_560_acs_492
# Realizando cálculo do atotal para todos os comprimentos de onda
at_560_acs_492 = ((1 - u) * (bbw.loc[wvl].values + bbp_560_acs_492)) / u
###############################################################################
############################ FASE 2 - Kd ######################################
###############################################################################
m = [0.005, 4.26, 0.52, 10.54]
chi = 0.265
#theta_s = calc_theta_s(data_obj.coleta)
raz_bbw_bb_560_acs_492 = bbw.loc[wvl].values / bb_560_acs_492
kd_qaa_560_acs_492 = ((1 + (m[0] * theta_s)) * at_560_acs_492) + ((1 - (chi * raz_bbw_bb_560_acs_492)) * m[1] * (1 - m[2] * np.exp(-m[3] * at_560_acs_492)) * bb_560_acs_492)
###############################################################################
########################### Data Extraction ###################################
###############################################################################
return kd_qaa_560_acs_492
def Qaa_v6_model_RUN_original(data_obj, ano, lb0):
'''
Função que aplica o modelo QAA v6 dado um objeto de Data Frames contendo todos
os dados necessários, uma string com o nome/ano da campanha para salvar o arquivo
dos resultados e um valor de comprimento de onda inicial (Lambda Zero).
----------
Parameters
----------
data_obj [Object]
Objeto com Data Frames contendo dados usados nas funções do modelo QAA/Kd
alocados pela função read_files através dos argumentos contendo as strings
do caminho do diretório para cada planilha '.xlsx' de dados.
O objeto de Data Frames deve conter no mínimo:
[acs] Absorção total na coluna d'água;
[Rrs] Reflectância de sensoriamento remoto superficial;
[aw] Coeficiente de absorção espectral da água;
[bbw] Coeficiente de retroespalhamento espectral da água;
[coleta] Planilha contendo Coordenadas, datas e horários das
coletas por estação;
[Kd_ZEU] Kd medido com dados até a zona de atenação da luz até 1%;
[Kd_3m] Kd medido com dados até 3 metros de profundidade;
[Kd_6m] Kd medido com dados até 6 metros de profundidade.
* Os dados devem concordar em faixa espectral, bandas e nome das estações.
ano [String]
String contendo o nome ou ano da campanha, usado para salvar arquivo
dicionário contendo todos os dados produtos no formato '.pickle'.
Ex.: 'TRM2019', '2013', 'PACO_2011', etc...
lb0 [Value]
Comprimento de onda incial usado no modelo QAA (Lambda Zero).
Ex.: 665, 560, 490, etc...
-------
Returns
-------
result_dict [Dictionary]
O resultado desta função é um dicionário contendo todos os resultados e
dados de entrada relevantes.
Além disso o modelo salva um arquivo contendo todos os dados em formato
'.pickle' na pasta '.\Results_Espectral'.
Dados salvos:
[dados/lb0] Comprimento de onda incial (Lambda Zero);
[dados/wl] Comprimentos de onda utilizados;
[dados/r_ref] Dados de rrs;
[dados/campanha] Nome/ano da campanha;
[dados/equação] Equação utilizada no modelo em questão;
[dados/g0] Coeficiente g1 - Passo 1 QAA;
[dados/g1] Coeficiente g0 - Passo 1 QAA;
[a_Lb] Absorção em Lambda Zero;
[bb] Retroespalhamento total;
[at] Absorção total estimada pelo modelo QAA;
[n] Decaimento do retroespalhamento do particulado;
[bbp_lb0] Retroespalhamento do particulado em Lambda Zero;
[bbp] Retroespalhamento do particulado;
[Kd_QAA] Kd estimado pelo modelo QAA / Kd;
[Kd_ZEU] Kd medido com dados até a zona de atenação da luz
até 1%;
[Kd_3m] Kd medido com dados até 3 metros de profundidade;
[Kd_6m] Kd medido com dados até 6 metros de profundidade.
'''
###############################################################################
########################### QAA SETTINGS - GERAL ##############################
###############################################################################
# data_obj.Rrs = data_obj.Rrs.drop([833, 865, 945, 1374, 1614, 2202])
# data_obj.aw = data_obj.aw.drop([833, 865, 945, 1374, 1614, 2202])
# data_obj.bbw = data_obj.bbw.drop([833, 865, 945, 1374, 1614, 2202])
# Transformação do Rrs
rrs = Rrs_to_rrs(data_obj.Rrs)
Rrs = data_obj.Rrs
# Cálculando o U
g0 = 0.089
g1 = 0.1245
u = calc_u(g0, g1, rrs)
##############################################################################################################
# Filtrando os dados
wvl_ref = {
# 'referencia': [443, 492, 560, 665, 704, 741, 783], # Bandas do sensor Sentinel-2A
# 'referencia': [400,413,443,490,510,560,620,665,674,681,709,754,761,764,768,779], # Bandas do sensor OLCI
# 'referencia': [400,413,443,490,510,560,620,665,674,681,709,754,761,764,768,779,865,885,900], # Bandas do sensor OLCI
# 'referencia': [400,411,443,490,560,620,667,674,681,709,754,761,779,865,555], # Bandas v6
# 'referencia': list(range(400, 951, 1)), # Bandas v6
'referencia': list(range(400, 751, 1)), # Bandas PAR v6
'lb0': lb0
}
wl = pd.Series(wvl_ref['referencia'])
r_ref = filter_wvl(rrs, wvl_ref)
R_ref = filter_wvl(Rrs, wvl_ref)
###############################################################################
############################ FASE 1 - QAA.V6 ##################################
###############################################################################
# QAA v6 considera at_lb0 = a(670)
aw = data_obj.aw
# a_lb0_v6_492 = calc_alb0_v6_492(492, R_ref, 665, 704, data_obj.aw); lago = 'Eq. QAA.v6 AJUSTE 492 nm'
# a_lb0_v6_560 = calc_alb0_v6_560(560, R_ref, 665, 704, data_obj.aw); lago = 'Eq. QAA.v6 AJUSTE 560 nm'
# a_lb0_v6 = calc_alb0_v6(wvl_ref['lb0'], R_ref, 443, 490, data_obj.aw); lago = 'Eq. QAA.v6'
# a_lb0_v6 = aw.loc[lb0].values; lago = 'Eq. QAA.v6 aw'
# a_lb0_v6_olci = calc_alb0_v6(wvl_ref['lb0'], R_ref, 443, 490, data_obj.aw); lago = 'QAA.v6_OLCI'
a_lb0_v6_s2a = calc_alb0_v6(wvl_ref['lb0'], R_ref, 443, 492, data_obj.aw); lago = 'QAA.v6_S2a'
## bbp (lambda_0)
# bbp_lb0_492 = calc_bbp_lb0_v6(u, data_obj.bbw, 492, a_lb0_v6_492)
# bbp_lb0_560 = calc_bbp_lb0_v6(u, data_obj.bbw, 560, a_lb0_v6_560)
bbp_lb0_665 = calc_bbp_lb0_v6(u, data_obj.bbw, 665, a_lb0_v6_s2a)
## Cálculando N
# n_acs_492 = calc_n_acs_492(r_ref[665], r_ref[704])
# n_acs_560 = calc_n_acs_560(r_ref[665], r_ref[704])
# n_hydro_560 = calc_n_hydro_560(r_ref[443], r_ref[492])
# n_hydro_704 = calc_n_hydro_704(r_ref[443], r_ref[492])
n_slope = calc_n(r_ref[443], r_ref[560])
# Realizando cálculo do BBP para todos os comprimentos de onda
# bbp_492_acs_492 = calc_bbp(wl, n_acs_492, bbp_lb0_492, 492)
# bbp_492_acs_560 = calc_bbp(wl, n_acs_560, bbp_lb0_492, 492)
# bbp_492_hydro_560 = calc_bbp(wl, n_hydro_560, bbp_lb0_492, 492)
# bbp_492_hydro_704 = calc_bbp(wl, n_hydro_704, bbp_lb0_492, 492)
# bbp_560_acs_492 = calc_bbp(wl, n_acs_492, bbp_lb0_560, 560)
# bbp_560_acs_560 = calc_bbp(wl, n_acs_560, bbp_lb0_560, 560)
# bbp_560_hydro_560 = calc_bbp(wl, n_hydro_560, bbp_lb0_560, 560)
# bbp_560_hydro_704 = calc_bbp(wl, n_hydro_704, bbp_lb0_560, 560)
bbp_665 = calc_bbp(wl, n_slope, bbp_lb0_665, 665)
# Realizando cálculo do BB para todos comprimentos de onda
# bb_492_acs_492 = calc_bb(bbp_492_acs_492, data_obj.bbw)
# bb_492_acs_560 = calc_bb(bbp_492_acs_560, data_obj.bbw)
# bb_492_hydro_560 = calc_bb(bbp_492_hydro_560, data_obj.bbw)
# bb_492_hydro_704 = calc_bb(bbp_492_hydro_704, data_obj.bbw)
# bb_560_acs_492 = calc_bb(bbp_560_acs_492, data_obj.bbw)
# bb_560_acs_560 = calc_bb(bbp_560_acs_560, data_obj.bbw)
# bb_560_hydro_560 = calc_bb(bbp_560_hydro_560, data_obj.bbw)
# bb_560_hydro_704 = calc_bb(bbp_560_hydro_704, data_obj.bbw)
bb_665 = calc_bb(bbp_665, data_obj.bbw)
# Realizando cálculo do atotal para todos os comprimentos de onda
# at_492_acs_492 = calc_a_total(bbp_492_acs_492, data_obj.bbw, u)
# at_492_acs_560 = calc_a_total(bbp_492_acs_560, data_obj.bbw, u)
# at_492_hydro_560 = calc_a_total(bbp_492_hydro_560, data_obj.bbw, u)
# at_492_hydro_704 = calc_a_total(bbp_492_hydro_704, data_obj.bbw, u)
# at_560_acs_492 = calc_a_total(bbp_560_acs_492, data_obj.bbw, u)
# at_560_acs_560 = calc_a_total(bbp_560_acs_560, data_obj.bbw, u)
# at_560_hydro_560 = calc_a_total(bbp_560_hydro_560, data_obj.bbw, u)
# at_560_hydro_704 = calc_a_total(bbp_560_hydro_704, data_obj.bbw, u)
at_665 = calc_a_total(bbp_665, data_obj.bbw, u)
###############################################################################
############################ FASE 2 - Kd ######################################
###############################################################################
m = [0.005, 4.26, 0.52, 10.54]
chi = 0.265
theta_s = calc_theta_s(data_obj.coleta)
'''
Renomear tudo de acordo!!!!!!
Reescrever o modelo e automatizar etapa dos ajustes
'''
# raz_bbw_bb_492_acs_492 = calc_raz_bbw_bb(data_obj.bbw, bb_492_acs_492)
# raz_bbw_bb_492_acs_560 = calc_raz_bbw_bb(data_obj.bbw, bb_492_acs_560)
# raz_bbw_bb_492_hydro_560 = calc_raz_bbw_bb(data_obj.bbw, bb_492_hydro_560)
# raz_bbw_bb_492_hydro_704 = calc_raz_bbw_bb(data_obj.bbw, bb_492_hydro_704)
# raz_bbw_bb_560_acs_492 = calc_raz_bbw_bb(data_obj.bbw, bb_560_acs_492)
# raz_bbw_bb_560_acs_560 = calc_raz_bbw_bb(data_obj.bbw, bb_560_acs_560)
# raz_bbw_bb_560_hydro_560 = calc_raz_bbw_bb(data_obj.bbw, bb_560_hydro_560)
# raz_bbw_bb_560_hydro_704 = calc_raz_bbw_bb(data_obj.bbw, bb_560_hydro_704)
raz_bbw_bb_665 = calc_raz_bbw_bb(data_obj.bbw, bb_665)
# kd_qaa_492_acs_492 = calc_kd_lee_2013(m, theta_s, at_492_acs_492, bb_492_acs_492, chi, raz_bbw_bb_492_acs_492)
# kd_qaa_492_acs_560 = calc_kd_lee_2013(m, theta_s, at_492_acs_560, bb_492_acs_560, chi, raz_bbw_bb_492_acs_560)
# kd_qaa_492_hydro_560 = calc_kd_lee_2013(m, theta_s, at_492_hydro_560, bb_492_hydro_560, chi, raz_bbw_bb_492_hydro_560)
# kd_qaa_492_hydro_704 = calc_kd_lee_2013(m, theta_s, at_492_hydro_704, bb_492_hydro_704, chi, raz_bbw_bb_492_hydro_704)
# kd_qaa_560_acs_492 = calc_kd_lee_2013(m, theta_s, at_560_acs_492, bb_560_acs_492, chi, raz_bbw_bb_560_acs_492)
# kd_qaa_560_acs_560 = calc_kd_lee_2013(m, theta_s, at_560_acs_560, bb_560_acs_560, chi, raz_bbw_bb_560_acs_560)
# kd_qaa_560_hydro_560 = calc_kd_lee_2013(m, theta_s, at_560_hydro_560, bb_560_hydro_560, chi, raz_bbw_bb_560_hydro_560)
# kd_qaa_560_hydro_704 = calc_kd_lee_2013(m, theta_s, at_560_hydro_704, bb_560_hydro_704, chi, raz_bbw_bb_560_hydro_704)
kd_qaa_665 = calc_kd_lee_2013(m, theta_s, at_665, bb_665, chi, raz_bbw_bb_665)
###############################################################################
########################### Teste Kd - absorção insitu ########################
###############################################################################
'''
at_acs = data_obj.acs + aw.values
bb_acs = calc_bb_ref(u, wl, at_acs)
bb_bb = data_obj.bb
dif = set(theta_s).difference(at_acs)
theta_s_2 = theta_s.drop(dif, axis = 1)
raz_bbw_bb_acs = calc_raz_bbw_bb(data_obj.bbw, bb_acs)
raz_bbw_bb_bb = calc_raz_bbw_bb(data_obj.bbw, bb_bb)
kd_acs = calc_kd_lee_2013(m, theta_s_2, at_acs, bb_acs, chi, raz_bbw_bb_acs)
kd_bb = calc_kd_lee_2013(m, theta_s_2, at_acs, bb_bb, chi, raz_bbw_bb_bb)
'''
###############################################################################
########################### Data Extraction ###################################
###############################################################################
result_dict = {}
result_dict['dados'] = {
'lb0': wvl_ref['lb0'],
'wl': list(r_ref.keys()),
'r_ref': r_ref,
'campanhas': ano,
'equacao': lago,
'chute_inicial':'não otimizado',
'h0h1h2': 'não otimizado',
'g0': g0,
'g1': g1
}
# result_dict['a_lb0_492'] = a_lb0_v6_492
# result_dict['a_lb0_560'] = a_lb0_v6_560
result_dict['a_lb0_665'] = a_lb0_v6_s2a
# result_dict['bb_492_acs_492'] = bb_492_acs_492
# result_dict['bb_492_acs_560'] = bb_492_acs_560
# result_dict['bb_492_hydro_560'] = bb_492_hydro_560
# result_dict['bb_492_hydro_704'] = bb_492_hydro_704
# result_dict['bb_560_acs_492'] = bb_560_acs_492
# result_dict['bb_560_acs_560'] = bb_560_acs_560
# result_dict['bb_560_hydro_560'] = bb_560_hydro_560
# result_dict['bb_560_hydro_704'] = bb_560_hydro_704
result_dict['bb_665'] = bb_665
# result_dict['at_492_acs_492'] = at_492_acs_492
# result_dict['at_492_acs_560'] = at_492_acs_560
# result_dict['at_492_hydro_560'] = at_492_hydro_560
# result_dict['at_492_hydro_704'] = at_492_hydro_704
# result_dict['at_560_acs_492'] = at_560_acs_492
# result_dict['at_560_acs_560'] = at_560_acs_560
# result_dict['at_560_hydro_560'] = at_560_hydro_560
# result_dict['at_560_hydro_704'] = at_560_hydro_704
result_dict['at_665'] = at_665
# result_dict['n_acs_492'] = n_acs_492
# result_dict['n_acs_560'] = n_acs_560
# result_dict['n_hydro_560'] = n_hydro_560
# result_dict['n_hydro_704'] = n_hydro_704
result_dict['n_665'] = n_slope
# result_dict['bbp_lb0_492'] = bbp_lb0_492
# result_dict['bbp_lb0_560'] = bbp_lb0_560
result_dict['bbp_lb0_665'] = bbp_lb0_665
# result_dict['bbp_492_acs_492'] = bbp_492_acs_492
# result_dict['bbp_492_acs_560'] = bbp_492_acs_560
# result_dict['bbp_492_hydro_560'] = bbp_492_hydro_560
# result_dict['bbp_492_hydro_704'] = bbp_492_hydro_704
# result_dict['bbp_560_acs_492'] = bbp_560_acs_492
# result_dict['bbp_560_acs_560'] = bbp_560_acs_560
# result_dict['bbp_560_hydro_560'] = bbp_560_hydro_560
# result_dict['bbp_560_hydro_704'] = bbp_560_hydro_704
result_dict['bbp_665'] = bbp_665
# result_dict['kd_qaa_492_acs_492'] = kd_qaa_492_acs_492
# result_dict['kd_qaa_492_acs_560'] = kd_qaa_492_acs_560
# result_dict['kd_qaa_492_hydro_560'] = kd_qaa_492_hydro_560
# result_dict['kd_qaa_492_hydro_704'] = kd_qaa_492_hydro_704
# result_dict['kd_qaa_560_acs_492'] = kd_qaa_560_acs_492
# result_dict['kd_qaa_560_acs_560'] = kd_qaa_560_acs_560
# result_dict['kd_qaa_560_hydro_560'] = kd_qaa_560_hydro_560
# result_dict['kd_qaa_560_hydro_704'] = kd_qaa_560_hydro_704
result_dict['kd_qaa_665'] = kd_qaa_665
# result_dict['kd_acs'] = kd_acs
# result_dict['kd_bb'] = kd_bb
result_dict['Kd_ZEU'] = data_obj.kd_zeu
result_dict['Kd_3m'] = data_obj.kd_3m
result_dict['Kd_6m'] = data_obj.kd_6m
os.makedirs('./Results_Espectral', exist_ok = True)
export_result(result_dict, './Results_Espectral/QAAv6_' + ano + '_' + str(wvl_ref['lb0']) + '_Ajustado.pickle')
return result_dict
def Qaa_v6_model_msi_original(Rrs, wvl, b1, b2, b3, b4, b5, lb0, aw, bbw, theta_s):
'''
Função que aplica o modelo QAA v6 dado um objeto de Data Frames contendo todos
os dados necessários, uma string com o nome/ano da campanha para salvar o arquivo
dos resultados e um valor de comprimento de onda inicial (Lambda Zero).
----------
Parameters
----------
data_obj [Object]
Objeto com Data Frames contendo dados usados nas funções do modelo QAA/Kd
alocados pela função read_files através dos argumentos contendo as strings
do caminho do diretório para cada planilha '.xlsx' de dados.
O objeto de Data Frames deve conter no mínimo:
[acs] Absorção total na coluna d'água;
[Rrs] Reflectância de sensoriamento remoto superficial;
[aw] Coeficiente de absorção espectral da água;
[bbw] Coeficiente de retroespalhamento espectral da água;
[coleta] Planilha contendo Coordenadas, datas e horários das
coletas por estação;
[Kd_ZEU] Kd medido com dados até a zona de atenação da luz até 1%;
[Kd_3m] Kd medido com dados até 3 metros de profundidade;
[Kd_6m] Kd medido com dados até 6 metros de profundidade.
* Os dados devem concordar em faixa espectral, bandas e nome das estações.
ano [String]
String contendo o nome ou ano da campanha, usado para salvar arquivo
dicionário contendo todos os dados produtos no formato '.pickle'.
Ex.: 'TRM2019', '2013', 'PACO_2011', etc...
lb0 [Value]
Comprimento de onda incial usado no modelo QAA (Lambda Zero).
Ex.: 665, 560, 490, etc...
-------
Returns
-------
result_dict [Dictionary]
O resultado desta função é um dicionário contendo todos os resultados e
dados de entrada relevantes.
Além disso o modelo salva um arquivo contendo todos os dados em formato
'.pickle' na pasta '.\Results_Espectral'.
Dados salvos:
[dados/lb0] Comprimento de onda incial (Lambda Zero);
[dados/wl] Comprimentos de onda utilizados;
[dados/r_ref] Dados de rrs;
[dados/campanha] Nome/ano da campanha;
[dados/equação] Equação utilizada no modelo em questão;
[dados/g0] Coeficiente g1 - Passo 1 QAA;
[dados/g1] Coeficiente g0 - Passo 1 QAA;
[a_Lb] Absorção em Lambda Zero;
[bb] Retroespalhamento total;
[at] Absorção total estimada pelo modelo QAA;
[n] Decaimento do retroespalhamento do particulado;
[bbp_lb0] Retroespalhamento do particulado em Lambda Zero;
[bbp] Retroespalhamento do particulado;
[Kd_QAA] Kd estimado pelo modelo QAA / Kd;
[Kd_ZEU] Kd medido com dados até a zona de atenação da luz
até 1%;
[Kd_3m] Kd medido com dados até 3 metros de profundidade;
[Kd_6m] Kd medido com dados até 6 metros de profundidade.
'''
###############################################################################
########################### QAA SETTINGS - GERAL ##############################
###############################################################################
aw = aw.drop([741, 783])
bbw = bbw.drop([741, 783])
# Transformação do Rrs
# rrs = Rrs_to_rrs(Rrs)
rrs = Rrs / (0.52 + 1.7 * Rrs)
# rrs_lb0 = Rrs_to_rrs(b3)
rrs_lb0 = b4 / (0.52 + 1.7 * b4)
rrs_b1 = b1 / (0.52 + 1.7 * b1)
rrs_b3 = b3 / (0.52 + 1.7 * b3)
# Cálculando o U
g0 = 0.089
g1 = 0.1245
u = calc_u(g0, g1, rrs)
u_lb0 = calc_u(g0, g1, rrs_lb0)
###############################################################################
############################ FASE 1 - QAA.V6 ##################################
###############################################################################
a_lb0_v6_665 = aw.loc[lb0].values + 0.39 * (b4 / (b1 + b2)) ** 1.14; lago = 'Eq. QAA.v6 Original 665 nm'
## bbp (lambda_0)
bbp_lb0_665 = (( u_lb0 * a_lb0_v6_665 ) / (1 - u_lb0)) - bbw.loc[lb0].values
## Cálculando N
n_665 = 2 * (1 - 2* np.exp(-0.9 * (rrs_b1 / rrs_b3)))
bbp_665 = bbp_lb0_665 * np.power(lb0 / wvl, n_665)
# Realizando cálculo do BB para todos comprimentos de onda
bb_665 = bbw.loc[wvl].values + bbp_665
# Realizando cálculo do atotal para todos os comprimentos de onda
at_665 = ((1 - u) * (bbw.loc[wvl].values + bbp_665)) / u
###############################################################################
############################ FASE 2 - Kd ######################################
###############################################################################
m = [0.005, 4.26, 0.52, 10.54]
chi = 0.265
#theta_s = calc_theta_s(data_obj.coleta)
raz_bbw_bb_665 = bbw.loc[wvl].values / bb_665
kd_qaa_665 = ((1 + (m[0] * theta_s)) * at_665) + ((1 - (chi * raz_bbw_bb_665)) * m[1] * (1 - m[2] * np.exp(-m[3] * at_665)) * bb_665)
###############################################################################
########################### Data Extraction ###################################
###############################################################################
return kd_qaa_665
|
import pickle
from filelock import FileLock
from ppdl.storage.weight_storage import WeightStorage
class LocalWeightStorage(WeightStorage):
def __init__(self, weights_filename, stats_filename):
self.weights_filename = weights_filename
self.stats_filename = stats_filename
def store_weights(self, weights):
lock = FileLock("%s.lock" % self.weights_filename)
with lock:
with open(self.weights_filename, 'r') as f:
return pickle.dump(weights, f)
def store_stats(self, stats):
lock = FileLock("%s.lock" % self.stats_filename)
with lock:
with open(self.stats_filename, 'r') as f:
return pickle.dump(stats, f)
def fetch_weights(self):
lock = FileLock("%s.lock" % self.weights_filename)
with lock:
with open(self.weights_filename, 'w') as f:
server_params = pickle.load(f)
return server_params['weights']
def fetch_stats(self):
lock = FileLock("%s.lock" % self.stats_filename)
with lock:
with open(self.stats_filename, 'w') as f:
server_params = pickle.load(f)
return server_params['stats']
def close(self):
pass
|
from main import *
v = np.arange(1,4+0.5,0.5)
phi = np.arange(0,2*np.pi,0.25*np.pi)
T = ["Formal_Table","Ellipse_Table","Table_O1","Table_O2"]
# for t in ["Formal_Table","Ellipse_Table"]:
# print("=== {} ===".format(t))
# res = []
# for vi in v:
# temp = []
# for pi in phi:
# temp.append(do_simulation(pi,vi,t))
# res.append(temp)
# file = open("simulation_result_{}.txt".format(t),"w")
# file.write(str(res))
# file.close()
# np.save("simulation_result_{}".format(t),np.array(res))
# print("=================\n")
for t in T:
data = np.load("simulation_result_{}.npy".format(t))
plt.xlabel("V (cue stick)")
plt.ylabel("# collision")
plt.ylim(0,20)
plt.plot(v,data,"ko")
m_data = []
for i in np.arange(0,len(v)):
m_data.append(np.mean(data[i,np.where(data[i,:]<1000)]))
plt.plot(v,m_data)
plt.draw();plt.savefig("result_{}.png".format(t))
plt.clf()
print("program terminated.") |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#%%
import turtle
import random
scr=turtle.Screen()
scr.title('snake & ladder')
scr.bgpic('Snake.gif')
yellow=turtle.Turtle()
yellow.shape("circle")
yellow.hideturtle()
yellow.penup()
yellow.goto(-265, -155)
yellow.color(1.0,1.0,0.0)
yellow.showturtle()
yellow.speed(1)
blue=turtle.Turtle()
blue.shape("circle")
blue.color(0.0,0.0,1.0)
blue.hideturtle()
blue.penup()
blue.goto(-265,-185)
blue.showturtle()
blue.speed(1)
d={}
bluepos=1
yellowpos=1
d[1]=[-275,-170]
d[2]=[-170,-170]
d[3]=[-65,-170]
d[4]=[40,-170]
d[5]=[145,-170]
d[6]=[250,-170]
d[7]=[250,-90]
d[8]=[145,-90]
d[9]=[40,-90]
d[10]=[-65,-90]
d[11]=[-170,-90]
d[12]=[-275,-90]
d[13]=[-275,-10]
d[14]=[-170,-10]
d[15]=[-65,-10]
d[16]=[40,-10]
d[17]=[145,-10]
d[18]=[250,-10]
d[19]=[250,70]
d[20]=[145,70]
d[21]=[40,70]
d[22]=[-65,70]
d[23]=[-170,70]
d[24]=[-275,70]
d[25]=[-275,150]
d[26]=[-170,150]
d[27]=[-65,150]
d[28]=[40,150]
d[29]=[145,150]
d[30]=[250,150]
def ladder(pos):
if pos==3:
newpos=22
elif pos==5:
newpos=8
elif pos==11:
newpos=26
elif pos==20:
newpos=29
else:
newpos=0
return newpos
def snake(pos):
if pos==27:
newpos=1
elif pos==19:
newpos=7
elif pos==17:
newpos=4
elif pos==21:
newpos=9
else:
newpos=0
return newpos
flag=1
def dicerollyellow():
global yellowroll,yellowpos,flag
while yellowpos<30 and bluepos<30 and flag==1:
yellowroll=random.randint(1,6)
yellowgo=[]
for j in range(yellowpos+1,yellowpos+yellowroll+1):
yellowgo.append(j)
for i in yellowgo:
if i<=30:
yellow.goto(d[i][0],d[i][1])
yellowpos=yellowpos+1
else:
break
if ladder(yellowpos)!=0:
yellow.goto(d[ladder(yellowpos)][0],d[ladder(yellowpos)][1])
yellowpos=ladder(yellowpos)
if snake(yellowpos)!=0:
yellow.goto(d[snake(yellowpos)][0],d[snake(yellowpos)][1])
yellowpos=snake(yellowpos)
if yellowpos>=30:
yellow.write("Winner!", font=("Verdana", 15, "normal"))
blue.write("Loser!", font=("Verdana", 15, "normal"))
if bluepos>=30:
blue.write("Winner!", font=("Verdana", 15, "normal"))
yellow.write("Winner!", font=("Verdana", 15, "normal"))
flag=flag*(-1)
def dicerollblue():
global blueroll,bluepos,flag
while yellowpos<30 and bluepos<30 and flag==-1:
blueroll=random.randint(1,6)
bluego=[]
for j in range(bluepos+1,bluepos+blueroll+1):
bluego.append(j)
for i in bluego:
if i<=30:
blue.goto(d[i][0],d[i][1])
bluepos=bluepos+1
else:
break
if ladder(bluepos)!=0:
blue.goto(d[ladder(bluepos)][0],d[ladder(bluepos)][1])
bluepos=ladder(bluepos)
if snake(bluepos)!=0:
blue.goto(d[snake(bluepos)][0],d[snake(bluepos)][1])
bluepos=snake(bluepos)
if yellowpos>=30:
yellow.write("Winner!", font=("Verdana", 15, "normal"))
blue.write("Loser!", font=("Verdana", 15, "normal"))
if bluepos>=30:
yellow.write("Loser!", font=("Verdana", 15, "normal"))
blue.write("Winner!", font=("Verdana", 15, "normal"))
flag=flag*(-1)
turtle.listen()
turtle.onkey(dicerollyellow,'Down')
turtle.onkey(dicerollblue,'Up')
turtle.mainloop()
#%% |
import imutils
import cv2
import numpy as np
class ShapeDetector:
def __init__(self):
pass
def detect(self, c):
shape = "unidentified"
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
if len(approx) == 3:
shape = "triangle"
elif len(approx) == 4:
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
shape = "unidentified" if ar >= 0.95 and ar <= 1.05 else "circle"
elif len(approx) == 5:
shape = "pentagon"
else:
shape = "circle"
return shape
def detect_shape(image_path):
image = cv2.imread(image_path)
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
sd = ShapeDetector()
i = 0
con_dict = {'circle': 0, 'pentagon': 0, 'triangle': 0, 'unidentified': 0}
for c in cnts:
if i > 5:
break
shape = sd.detect(c)
con_dict[shape] = con_dict[shape] + 1
i += 1
max_votes = sorted(con_dict)
return max_votes[0] |
"""bigquery-etl CLI."""
import warnings
import click
from .._version import __version__
# We rename the import, otherwise it affects monkeypatching in tests
from ..cli.alchemer import alchemer as alchemer_
from ..cli.dag import dag
from ..cli.dryrun import dryrun
from ..cli.format import format
from ..cli.glean_usage import glean_usage
from ..cli.query import query
from ..cli.routine import mozfun, routine
from ..cli.view import view
from ..dependency import dependency
from ..feature_usage import feature_usage
from ..glam.cli import glam
from ..operational_monitoring import operational_monitoring
from ..stripe import stripe_
from ..subplat.apple import apple
def cli(prog_name=None):
"""Create the bigquery-etl CLI."""
commands = {
"query": query,
"dag": dag,
"dependency": dependency,
"dryrun": dryrun,
"format": format,
"routine": routine,
"mozfun": mozfun,
"stripe": stripe_,
"glam": glam,
"glean_usage": glean_usage,
"view": view,
"alchemer": alchemer_,
"opmon": operational_monitoring,
"feature_usage": feature_usage,
"apple": apple,
}
@click.group(commands=commands)
@click.version_option(version=__version__)
def group():
"""CLI tools for working with bigquery-etl."""
pass
warnings.filterwarnings(
"ignore", "Your application has authenticated using end user credentials"
)
group(prog_name=prog_name)
if __name__ == "__main__":
cli()
|
# -*- coding: utf-8 -*-
import os
import random
import shutil
from datetime import datetime
import numpy as np
import torch
import yaml
from pytorch_lightning.logging import TestTubeLogger
from test_tube import HyperOptArgumentParser
from test_tube.argparse_hopt import TTNamespace
def load_yaml_args(parser: HyperOptArgumentParser, log):
""" Function that load the args defined in a YAML file and replaces the values
parsed by the HyperOptArgumentParser """
old_args = vars(parser.parse_args())
configs = old_args.get("config")
if configs:
yaml_file = yaml.load(open(configs).read(), Loader=yaml.FullLoader)
for key, value in yaml_file.items():
if key in old_args:
old_args[key] = value
else:
raise Exception(
"{} argument defined in {} is not valid!".format(key, configs)
)
else:
log.warning(
"We recommend the usage of YAML files to keep track \
of the hyperparameter during testing and training."
)
return TTNamespace(**old_args)
def get_main_args_from_yaml(args):
""" Function for loading the __main__ arguments directly from the YAML """
if not args.config:
raise Exception("You must pass a YAML file if not using the command line.")
try:
yaml_file = yaml.load(open(args.config).read(), Loader=yaml.FullLoader)
return yaml_file["optimizer"], yaml_file["scheduler"], yaml_file["model"]
except KeyError as e:
raise Exception("YAML file is missing the {} parameter.".format(e.args[0]))
def setup_testube_logger():
""" Function that sets the TestTubeLogger to be used. """
try:
job_id = os.environ["SLURM_JOB_ID"]
except Exception:
job_id = None
now = datetime.now()
dt_string = now.strftime("%d-%m-%Y--%H-%M-%S")
return TestTubeLogger(
save_dir="experiments/",
version=job_id if job_id else dt_string,
name="lightning_logs",
)
|
# Gotta import gym!
import gym
# Make the environment, replace this string with any
# from the docs. (Some environments have dependencies)
env = gym.make('CartPole-v0')
# Reset the environment to default beginning
# Default observation variable
print("Initial Observation")
observation = env.reset()
print(observation)
print('\n')
stop = False
step_cnt = 0;
while not stop:
#for _ in range(2):
step_cnt += 1
# Show render
#env.render()
# Random Action
action = env.action_space.sample()
# Get the 4 observation values discussed
observation, reward, done, info = env.step(action)
stop = done
if done:
#print("Performed One Random Action")
print("Performed seccess Action in {}".format(step_cnt))
print('\n')
print('observation')
print(observation)
print('\n')
print('reward')
print(reward)
print('\n')
print('done')
print(done)
print('\n')
print('info')
print(info)
print('\n')
|
from math import*
t = float(input("Digite o tempo em segundos: "))
g = 9.81
l = g * (t / (2*pi))**2
print(l)
|
#encoding:utf-8
import selenium
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
driver = webdriver.Firefox()
driver.get('http://ms.dqs-edu.com/')
driver.find_element_by_id('uname').clear()
driver.find_element_by_id('uname').send_keys('admin')
driver.find_element_by_id('upwd').clear()
driver.find_element_by_id('upwd').send_keys('ossadmin')
driver.find_element_by_tag_name('button').click()
driver.implicitly_wait(3)
driver.maximize_window()
driver.find_element_by_id('shop-name').send_keys(u'测试')
driver.find_element_by_id('search-btn').click()
try:
assert 'hahaahhaahhahh' in driver.page_source
except:
print '没有搜索出结果'
|
num1 = int(input("Enter a number between 10 and 20: "))
if num1 <= 20 and num1 >= 10:
print('Thank you')
else:
print('Incorrect')
|
# -*- coding: utf-8 -*-
"""
Study Regular Expression functions named split
@author: Sam Fang
"""
import re
def how_to_use_split():
DATA = ('Mountain View, CA 94040',
'Sunnyvale, CA doweewe',
'Los Altos, 94023 XAX',
'Cupertino, 95014',
'Palo Alto, CA'
)
"""
(?:pattern) 表示匹配 pattern 但不获取匹配结果,也就是说这是一个非获取匹配,不
进行存储供以后使用。这在使用 "或" 字符 (|) 来组合一个模式的各个部分是很有用。
例如, 'industr(?:y|ies) 就是一个比 'industry|industries' 更简略的表达式。
模式 (?:\d{5}|[A-Z]{2}) 代表的含义是匹配:空格后面跟5个数字或者空格后面跟2个大
写字母
"""
for datum in DATA:
print(re.split(", |(?= (?:\d{5}|[A-Z]{2})) ", datum))
#print(re.search(" (?=\d{5}|[A-Z]{2}) ", datum))
#print(re.search("([A-Z]{2})|(\d{5})", datum).groups())
#print(re.split("[A-Z]{2}", datum))
# print(re.split(", ", datum))
how_to_use_split() |
import os
import tarfile
import zipfile
from os.path import isdir, isfile, islink, join, exists
from subprocess import check_output, STDOUT
import pytest
from venv_pack.formats import archive
@pytest.fixture(scope="module")
def root_and_paths(tmpdir_factory):
root = str(tmpdir_factory.mktemp('example_dir'))
def mkfil(*paths):
with open(join(root, *paths), mode='w'):
pass
def mkdir(path):
os.mkdir(join(root, path))
def symlink(path, target):
target = join(root, target)
path = join(root, path)
target = os.path.relpath(target, os.path.dirname(path))
os.symlink(target, path)
# Build test directory structure
mkdir("empty_dir")
symlink("link_to_empty_dir", "empty_dir")
mkdir("dir")
mkfil("dir", "one")
mkfil("dir", "two")
symlink("link_to_dir", "dir")
mkfil("file")
symlink("link_to_file", "file")
paths = ["empty_dir",
"link_to_empty_dir",
join("dir", "one"),
join("dir", "two"),
"link_to_dir",
"file",
"link_to_file"]
# make sure the input matches the test
check(root)
return root, paths
def checklink(path, sol):
assert islink(path)
assert os.readlink(path) == sol
def check(out_dir, links=False):
assert exists(join(out_dir, "empty_dir"))
assert isdir(join(out_dir, "empty_dir"))
assert isdir(join(out_dir, "link_to_empty_dir"))
assert isdir(join(out_dir, "dir"))
assert isfile(join(out_dir, "dir", "one"))
assert isfile(join(out_dir, "dir", "two"))
assert isdir(join(out_dir, "link_to_dir"))
assert isfile(join(out_dir, "file"))
assert isfile(join(out_dir, "link_to_file"))
if links:
checklink(join(out_dir, "link_to_dir"), "dir")
checklink(join(out_dir, "link_to_file"), "file")
checklink(join(out_dir, "link_to_empty_dir"), "empty_dir")
else:
# Check that contents of directories are same
assert set(os.listdir(join(out_dir, "link_to_dir"))) == {'one', 'two'}
def has_infozip():
try:
out = check_output(['unzip', '-h'], stderr=STDOUT).decode()
except Exception:
return False
return "Info-ZIP" in out
@pytest.mark.parametrize('format, symlinks',
[('zip', False),
('zip', True),
('tar.gz', True),
('tar.bz2', True),
('tar', True)])
def test_format(tmpdir, format, symlinks, root_and_paths):
if 'zip' and symlinks and not has_infozip():
pytest.skip("Info-ZIP not installed")
root, paths = root_and_paths
out_path = join(str(tmpdir), 'test.' + format)
out_dir = join(str(tmpdir), 'test')
os.mkdir(out_dir)
with open(out_path, mode='wb') as fil:
with archive(fil, format, zip_symlinks=symlinks) as arc:
for rel in paths:
arc.add(join(root, rel), rel)
arc.add_bytes(join(root, "file"),
b"foo bar",
join("dir", "from_bytes"))
if symlinks:
arc.add_link(join(root, "link_to_file"),
join("dir", "one"),
"manual_link_to_file")
arc.add_link(join(root, "link_to_dir"),
"empty_dir",
"manual_link_to_dir")
if format == 'zip':
if symlinks:
check_output(['unzip', out_path, '-d', out_dir])
else:
with zipfile.ZipFile(out_path) as out:
out.extractall(out_dir)
else:
with tarfile.open(out_path) as out:
out.extractall(out_dir)
check(out_dir, links=symlinks)
assert isfile(join(out_dir, "dir", "from_bytes"))
with open(join(out_dir, "dir", "from_bytes"), 'rb') as fil:
assert fil.read() == b"foo bar"
if symlinks:
checklink(join(out_dir, "manual_link_to_dir"), "empty_dir")
checklink(join(out_dir, "manual_link_to_file"),
join("dir", "one"))
|
from django.conf.urls import url, include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'products', views.ProductViewSet)
router.register(r'productcomplete', views.ProductAutocompleteViewSet)
router.register(r'cloud', views.CloudViewSet)
router.register(r'categories', views.CategoriesViewSet)
router.register(r'discounts', views.DiscountViewSet)
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^purchase/$', views.purchase, name="purchase"),
# API
url(r'^save-client/$', views.save_client_json, name="save_client"),
url(r'^get-or-create-cart/$', views.get_create_cart, name="get_create_cart"),
url(r'^get-or-create-client/$', views.get_create_client, name="get_create_client"),
url(r'^get-estimate-pdf/$', views.get_estimate_pdf, name="get_estimate_pdf"),
url(r'^send-quote-email/$', views.email_estimate_pdf, name="send_quote_email"),
url(r'^get-previous-estimate/$', views.get_previous_estimate, name="get_previous_estimate"),
url(r'^checkout/$', views.checkout, name="checkout"),
url(r'^plaid-credentials/$', views.plaid_credentials, name="plaid_credentials"),
]
urlpatterns += [
url(r'^api/', include(router.urls)),
] |
__author__ = 'Jonathan Rubin'
import os
import matplotlib
matplotlib.use('Agg')
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import matplotlib.pyplot as plt
import scipy.stats as stats
def run(files,figures):
outfile = open(figures + 'TFs.txt','w')
for file1 in os.listdir(files):
print file1
TFs = list()
hist = list()
pos = 0
neg = 0
with open(files + file1) as F:
names = F.readline().strip().split()[1:]
one = [0] * len(names)
zero = [0] * len(names)
alist = [0] * len(names)
blist = [0] * len(names)
clist = [0] * len(names)
dlist = [0] * len(names)
for line in F:
line = line.strip().split()[3:]
for i in range(len(line[1:])):
if int(line[i+1]) > 0:
alist[i] += 1.0
else:
blist[i] += 1.0
if line[0] == '0':
neg += 1.0
for i in range(len(line[1:])):
if int(line[i+1]) == 0:
zero[i] += 1.0
else:
clist[i] += 1.0
else:
pos += 1.0
for i in range(len(line[1:])):
if int(line[i+1]) != 0:
one[i] += 1.0
else:
dlist[i] += 1.0
for i in range(len(one)):
N = pos + neg
p = pos/N
a = alist[i]/N
#S = one[i] - N*p*a
S = 1.0-stats.binom(N,p*a).cdf(one[i])
S2 = stats.binom(N,p*(1-a)).cdf(dlist[i])
S3 = stats.binom(N,(1-p)*a).cdf(clist[i])
S4 = 1.0-stats.binom(N,(1-p)*(1-a)).cdf(zero[i])
#print alist[i], one[i],zero[i],pos,neg,N
if alist[i] == 0:
alist[i] = 0.002
one[i] += 0.001
if blist[i] == 0:
blist[i] = 0.002
zero[i] += 0.001
S5 = ((float(one[i])/alist[i]) + (float(zero[i])/blist[i]))/2
TFs.append((names[i],S,S2,S3,S4,S5))
hist = [x[1] for x in TFs]
TFs.sort(key=lambda x: x[5], reverse=True)
outfile.write(file1.split('.')[0] + '\t')
for item in TFs:
for val in item:
outfile.write(str(val) + ",")
outfile.write('\n')
#F1 = plt.figure()
#ax1 = F1.add_subplot(121)
#ax1.hist(hist,50)
#ax2 = F1.add_subplot(122)
#ax2.xaxis.set_visible(False)
#ax2.yaxis.set_visible(False)
#colLabels=("TF","p-value","S-Score")
#the_table = ax2.table(cellText=TFs[:27], colLabels=colLabels, loc='center',fontsize=1)
#plt.savefig(figures + file1.split('.')[0] + '.png')
#plt.close()
outfile.close()
|
def split_and_join(line):
# write your code here
lines="this is a string"
lines=line.split(" ")
lines="-".join(line)
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result) |
a = int(input("Enter a Number: "))
f=1
for i in range(1,a+1):
f=f*i
print("Factorial of a number is",f) |
#!/usr/bin/python
# Importing the module
import urllib2
import random
from SimpleCV import *
import time
import pygame
import csv
pygame.mixer.init()
while True:
url = 'http://data.sparkfun.com/output/jqwVKxlQgYTa9om26GjL.cvs'
#url = 'http://data.sparkfun.com/output/yAnZOd1KQ6IzNRMr6jM1.cvs'
response = urllib2.urlopen(url)
cr = csv.reader(response)
# Setup an empty list
#parsed_data = []
fields = cr.next()
# Skip over the first line of the file for the headers
#print "l"
ramp=[]
sin=[]
square=[]
for row in cr:
#parsed_data.append(dict(zip(fields, row)))
ramp.append(int(row[1]))
#print row[1]
sin.append(float(row[2]))
square.append(int(row[3]))
print ramp[0]
print sin[0]
print square[0]
playtime=random.randint(1, 10)
print playtime
if ramp[0]>100:
fi=6
if ramp[0]>200:
fi=5
if ramp[0]>300:
fi=4
if ramp[0]>400:
fi=3
if ramp[0]>500:
fi=2
if ramp[0]>600:
fi=1
r=abs(sin[0])
#r=abs(random.random())
#print `r`
pygame.mixer.music.set_volume(r)
#print` abs(random.random())`
#pygame.mixer.music.set_volume(abs(sin[0]))
pygame.mixer.music.load(`fi`+".wav")
pygame.mixer.music.play()
#while pygame.mixer.music.get_busy() == True:
#continue
time.sleep(square[0])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gameplay', '0005_auto_20180427_1415'),
]
operations = [
migrations.AddField(
model_name='dailyjackpot',
name='Dtickets',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='dailyjackpot',
name='start',
field=models.PositiveIntegerField(default=0),
),
]
|
import sentiment
import os
import re
import math
import csv
from tqdm import tqdm
from tkinter import filedialog
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
#将每年频率高的词移除
spc_stopwords = ['rms','received','told','car','assigned','arrested','rape','mater','id','suspect','state','states','stated','male','sex','case','time','unit','crime','crimes','original','narrative','investigation','investigative','victim','victims','police']
stopwords = stopwords.words('english')
stopwords.extend(spc_stopwords)
# 启动情感分析
stmnt = sentiment.SentimentAnalysis()
# 词频统计
def wordcount(words_list):
count_dic = {}
words_count = []
for word in words_list:
if word not in count_dic:
count_dic[word] = 1
else:
count_dic[word] += 1
for key in count_dic:
words_count.append([key,count_dic.get(key)])
return words_count # 返还值为[word,count]
#定义TF-IDF的计算过程
def D_con(word, count_list):
D_con = 0
for count in count_list:
if word in count:
D_con += 1
return D_con
def tf(word, count):
return count[word] / sum(count.values())
def idf(word, count_list):
return math.log(len(count_list)) / (1 + D_con(word, count_list))
def tfidf(word, count, count_list):
return round(tf(word, count) * idf(word, count_list),6)
# 报告文件夹位置
input_dir = '/Users/liziyang/Downloads/NJIT_Sandbox-selected/Report_population'
report_list = os.listdir(input_dir)
# 报告类别文件夹位置
cluster_dir = r'/Users/liziyang/Downloads/Cluster_csv_data'
cluster_list = os.listdir(cluster_dir)
# 报告类型
output = []
for elem in tqdm(cluster_list):
TargetRMS = [] # 记录RMS搜寻报告文件
complete_name = os.path.join(cluster_dir,elem)
print(complete_name)
with open(complete_name, 'r', encoding='utf-8') as csvf:
cluster = csv.reader(csvf)
print()
for line in cluster:
if line[2] == '1':
TargetRMS.append('RMS' + line[1])
words = [] # 搜寻目标文件RMS 并对文件进行清洗 单词计数
score = [] # 情感分析得分
for item in report_list:
RMS_ext = re.search(r'RMS\d{2}-?\d{2,8}',item)
RMS = RMS_ext.group()
if RMS in TargetRMS:
complete_name = os.path.join(input_dir, item)
with open(complete_name, 'r', encoding='utf-8') as f:
report_content = f.read()
sco = stmnt.score(report_content) # 情感分析
score.append(sco)
words_token = word_tokenize(report_content)
words_alph = [word.lower() for word in words_token if word.isalpha()]
words_local = [word for word in words_alph if word not in stopwords]
words.extend(words_local)
avg_score = stmnt.average(score)
output.append([elem.replace('.csv',''),avg_score])
count_list = wordcount(words) # 对目标类别报告进行词频统计
output_path = r'/Users/liziyang/Downloads/Cluster_output/'
# with open(str(output_path) + str(elem),'w',encoding='utf-8') as f:
# writer = csv.writer(f)
# header = ['word','frequency']
# writer.writerow(header)
# for i in count_list:
# writer.writerow(i)
with open('Cluster_Analysis.csv','w',encoding='utf-8') as f:
writer = csv.writer(f)
header = ['Cluster','Avg_Score']
writer.writerow(header)
for line in output:
writer.writerow(line)
|
from models.course.exams.student_answers import Student_Answers
from methods.errors import *
class student_answers_controller():
def post_student_answer(self, student_answer):
try:
student_answer = Student_Answers(**student_answer)
student_answer = Student_Answers.insert(student_answer)
except SQLAlchemyError as e:
error = str(e)
raise ErrorHandler({
'description': error,
'status_code': 500
})
return
def delete_all_student_answers(self, student_question_id):
try:
student_answers = Student_Answers.query.filter(student_question_id == student_question_id).delete(
synchronize_session=False)
except SQLAlchemyError as e:
error = str(e)
raise ErrorHandler({
'description': error,
'status_code': 500
})
return
def update_student_answer(self, student_id, question_id, student_answers, student_question_id):
try:
self.delete_all_student_answers(student_question_id)
for student_answer in student_answers:
updated_student_answer = {
"student_question_id": student_question_id,
"student_answer": student_answer
}
self.post_student_answer(updated_student_answer)
except SQLAlchemyError as e:
error = str(e)
raise ErrorHandler({
'description': error,
'status_code': 500
})
return
def get_student_answer_with_student_question_id_and_answer(self, student_question_id, answer):
try:
student_answer = Student_Answers.query.filter(student_question_id == student_question_id,
answer == answer).first()
except SQLAlchemyError as e:
error = str(e)
raise ErrorHandler({
'description': error,
'status_code': 500
})
return student_answer.serialize()
def update_one_student_answer(self, student_answer_id, correct_answer):
try:
student_answer = Student_Answers.query.filter(student_answer_id == student_answer_id).first()
student_answer.correct_answer = correct_answer
student_answer.update()
except SQLAlchemyError as e:
error = str(e)
raise ErrorHandler({
'description': error,
'status_code': 500
})
|
from django.urls import path, include
from . import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('employees', views.EmployeeView),
router.register('title', views.TitleView),
router.register('specialist', views.SpecialistView),
router.register('manager', views.ManagerView),
router.register('employee_id', views.Employee_IDView)
urlpatterns = [
path('employees-api', include(router.urls)),
path('', views.homepage, name='home'),
path('<slug:slug>/', views.details, name='details')
]
|
import numpy as np
import cv2
import math
import win32ui
import os
def Automeasure(img):
# step1:加载图片,转成灰度图
image = cv2.imread(img)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
width, height = image.shape[:2]
# step2:用Sobel算子计算x,y方向上的梯度,之后在x方向上减去y方向上的梯度,通过这个减法,我们留下具有高水平梯度和低垂直梯度的图像区域。
gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)
# subtract the y-gradient from the x-gradient
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
# step3:去除图像上的噪声。首先使用低通滤泼器平滑图像(9 x 9内核),这将有助于平滑图像中的高频噪声。
# 低通滤波器的目标是降低图像的变化率。如将每个像素替换为该像素周围像素的均值。这样就可以平滑并替代那些强度变化明显的区域。
# 然后,对模糊图像二值化。梯度图像中不大于90的任何像素都设置为0(黑色)。 否则,像素设置为255(白色)。
# blur and threshold the image
blurred = cv2.blur(gradient, (9, 9))
ret, thresh = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 25))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)
# perform a series of erosions and dilations
closed = cv2.erode(closed, None, iterations=4)
closed = cv2.dilate(closed, None, iterations=4)
# 开运算
ret, th2 = cv2.threshold(closed, 0.1, 255, cv2.THRESH_BINARY)
kernel = np.ones((10, 10), np.uint8)
opening = cv2.morphologyEx(th2, cv2.MORPH_OPEN, kernel, iterations=2)
# 腐蚀
kernel = np.ones((5, 5), np.uint8)
erosion = cv2.erode(opening, kernel, iterations=2)
# cv2.imshow("contours", opening)
# cv2.waitKey(0)
# 找出边界
contours, hierarchy = cv2.findContours(
erosion.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 获取最大轮廓
c = sorted(contours, key=cv2.contourArea, reverse=True)[0]
# compute the rotated bounding box of the largest contour
rect = cv2.minAreaRect(c)
box = np.int0(cv2.boxPoints(rect))
shape = np.trunc(rect[0])
shaped = shape.astype(int)
# # draw a bounding box arounded the detected barcode and display the image
# cv2.drawContours(image, [box], -1, (0, 255, 0), 3)
# cv2.imshow("Image", image)
# cv2.imwrite("contoursImage2.jpg", image)
# cv2.waitKey(0)
# 1.加载图片,转为二值图
drawing = np.zeros(shaped, dtype=np.uint8)
edges = cv2.Canny(gray, 50, 150)
# 3.统计概率霍夫线变换
lines = cv2.HoughLinesP(edges, 0.8, np.pi / 180, 90,
minLineLength=height*0.95, maxLineGap=10)
if lines.any:
# 3.将检测的线画出来
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(image, (x1, y1), (x2, y2), (0, 255, 0), 1)
cv2.namedWindow("Image")
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
dlg = win32ui.CreateFileDialog(1) # 1表示打开文件对话框
dlg.SetOFNInitialDir(r'D:\OpenCV') # 设置打开文件对话框中的初始显示目录
dlg.DoModal()
filename = dlg.GetPathName() # 获取选择的文件名称
#image = cv2.imread(filename)
path = os.path.dirname(filename) # 获取该文件上一级文件夹(目的是选择该文件夹内所有图片)
for img in os.listdir(path):
img = os.path.join(path, img)
Automeasure(img)
# cv2.namedWindow("Image")
# cv2.imshow("Image", closed)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 9 21:31:36 2019
@author: md705
"""
from bs4 import BeautifulSoup
import requests
class WebScrape():
#class to scrape news website for top stories
#returns a dictionary of titles and links
def __init__(self):
self.url = ''
self.article_tag = ''
self.related_tag = ''
self.website = ''
def top_stories(self):
page = requests.get(self.url)
data = page.text
soup = BeautifulSoup(data, 'html.parser')
content = soup.find('div', {"class": self.article_tag})
#sometimes the first headline has 2/3 related links to other closely related articles
related = soup.find('div', {"class": self.related_tag})
try:
related_links = []
for link in related.find_all('a'):
if not self.website + link.get('href') in related_links:
related_links.append(self.website + link.get('href'))
except:
related_links = []
weblinks = []
#list of links to articles
try:
for link in content.find_all('a'):
if not self.website + link.get('href') in weblinks:
if not self.website + link.get('href') in related_links:
weblinks.append(self.website + link.get('href'))
except:
print("Error: Cannot find the links")
#this is currently specific to bbc news
for link in weblinks:
if not link[-1] in '0123456789':
weblinks.remove(link)
#grab the titles
titles = []
for title in content.find_all('h3'):
if not title.text in titles:
titles.append(title.text)
#return a dictionary of the headlines and links
articles = dict(zip(titles, weblinks))
return articles
|
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Context classes
============================================================================
Dragonfly uses context classes to define when grammars and
rules should be active. A context is an object with a
:meth:`Context.matches` method which returns *True* if the
system is currently within that context, and *False* if it
is not.
The following context classes are available:
- :class:`Context` --
the base class from which all other context classes are derived
- :class:`AppContext` --
class which based on the application context, i.e. foreground window
executable, title, and handle
Logical operations
----------------------------------------------------------------------------
It is possible to modify and combine the behavior of contexts using the
Python's standard logical operators:
:logical AND: ``context1 & context2`` -- *all* contexts must match
:logical OR: ``context1 | context2`` --
*one or more* of the contexts must match
:logical NOT: ``~context1`` -- *inversion* of when the context matches
For example, to create a context which will match when
Firefox is in the foreground, but only if Google Reader is
*not* being viewed::
firefox_context = AppContext(executable="firefox")
reader_context = AppContext(executable="firefox", title="Google Reader")
firefox_but_not_reader_context = firefox_context & ~reader_context
Class reference
----------------------------------------------------------------------------
"""
import copy
from dragonfly import get_log
#---------------------------------------------------------------------------
class Context(object):
"""
Base class for other context classes.
This base class implements some basic
infrastructure, including that's required for
logical operations on context objects. Derived
classes should at least do the following things:
- During initialization, set *self._str* to some descriptive,
human readable value. This attribute is used by the
``__str__()`` method.
- Overload the :meth:`Context.matches` method to implement
the logic to determine when to be active.
The *self._log* logger objects should be used in methods of
derived classes for logging purposes. It is a standard logger
object from the *logger* module in the Python standard library.
"""
_log = get_log("context.match")
_log_match = _log
#-----------------------------------------------------------------------
# Initialization and aggregation methods.
def __init__(self):
self._str = ""
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self._str)
def copy(self):
return copy_.deepcopy(self)
#-----------------------------------------------------------------------
# Logical operations.
def __and__(self, other):
return LogicAndContext(self, other)
def __or__(self, other):
return LogicOrContext(self, other)
def __invert__(self):
return LogicNotContext(self)
#-----------------------------------------------------------------------
# Matching methods.
def matches(self, executable, title, handle):
"""
Indicate whether the system is currently within this context.
Arguments:
- *executable* (*str*) --
path name to the executable of the foreground application
- *title* (*str*) -- title of the foreground window
- *handle* (*int*) -- window handle to the foreground window
The default implementation of this method simply returns *True*.
.. note::
This is generally the method which developers should
overload to give derived context classes custom
functionality.
"""
return True
#---------------------------------------------------------------------------
# Wrapper contexts for combining contexts in logical structures.
class LogicAndContext(Context):
def __init__(self, *children):
self._children = children
self._str = ", ".join(str(child) for child in children)
def matches(self, executable, title, handle):
for child in self._children:
if not child.matches(executable, title, handle):
return False
return True
class LogicOrContext(Context):
def __init__(self, *children):
self._children = children
self._str = ", ".join(str(child) for child in children)
def matches(self, executable, title, handle):
for child in self._children:
if child.matches(executable, title, handle):
return True
return False
class LogicNotContext(Context):
def __init__(self, child):
self._child = child
self._str = str(child)
def matches(self, executable, title, handle):
return not self._child.matches(executable, title, handle)
#---------------------------------------------------------------------------
class AppContext(Context):
"""
Context class using foreground application details.
This class determines whether the foreground window meets
certain requirements. Which requirements must be met for this
context to match are determined by the constructor arguments.
Constructor arguments:
- *executable* (*str*) --
(part of) the path name of the foreground application's
executable; case insensitive
- *title* (*str*) --
(part of) the title of the foreground window; case insensitive
"""
#-----------------------------------------------------------------------
# Initialization methods.
def __init__(self, executable=None, title=None, exclude=False):
Context.__init__(self)
if isinstance(executable, str):
self._executable = executable.lower()
elif executable is None:
self._executable = None
else:
raise TypeError("executable argument must be a string or None;"
" received %r" % executable)
if isinstance(title, str):
self._title = title.lower()
elif title is None:
self._title = None
else:
raise TypeError("title argument must be a string or None;"
" received %r" % title)
self._exclude = bool(exclude)
self._str = "%s, %s, %s" % (self._executable, self._title,
self._exclude)
#-----------------------------------------------------------------------
# Matching methods.
def matches(self, executable, title, handle):
executable = executable.lower()
title = title.lower()
if self._executable:
found = (executable.find(self._executable) != -1)
if self._exclude == found:
self._log_match.debug("%s:"
" No match, executable doesn't match." % (self))
return False
if self._title:
found = (title.find(self._title) != -1)
if self._exclude == found:
self._log_match.debug("%s:"
" No match, title doesn't match." % (self))
return False
if self._log_match: self._log_match.debug("%s: Match." % (self))
return True
|
from django.contrib import admin
# Register your models here.
from .models import UserBalance, UserBalanceChange
admin.site.register(UserBalance)
admin.site.register(UserBalanceChange) |
# -*- coding: utf-8 -*-
# Copyright 2018 Quartile Limited
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'Model Security Delivery',
'category': 'Security',
'version': '8.0.1.3.1',
'author': 'OA Trade Ltd.',
'website': '',
'depends': [
'sale',
'stock',
'product',
'sale_line_quant',
'sale_margin',
'sale_stock',
'view_adjustments',
'oa_company_quot_report_dalko_adjust',
'oa_company_quotation_report_sino_adjust',
'model_security_adjust_oaw'
],
'summary':"""A group for delivery users. Must be installed after module_security_adjust_oaw""",
'description': """
Accesses mostly on "sales" module \
in sales.order, NO ADD ITEMS \
in sales.order, NO Stock Owner, Cost Price, Margin \
allow CONFIRM QUOTATION \
allow DO-OUT \
""",
'data': [
'security/delivery_security.xml',
'security/base_security.xml',
'security/ir.model.access.csv',
'views/sale_order.xml',
'views/sale_views.xml',
'views/stock_picking.xml',
'views/stock_views.xml',
'views/reports.xml',
],
'qweb': [],
'installable': True,
}
|
import json
import os
import protogen
input_ports_location = '/mnt/work/input/'
output_ports_location = '/mnt/work/output/'
# Get image directory
image_dir = os.path.join(input_ports_location, 'image')
# Point to image file. If there are multiple tif's in multiple subdirectories, pick one.
image = [os.path.join(dp, f) for dp, dn, fn in os.walk(image_dir) for f in fn if ('tif' in f) or ('TIF' in f)][0]
# Read from ports.json
input_ports_path = os.path.join(input_ports_location, 'ports.json')
if os.path.exists(input_ports_path):
string_ports = json.load(open(input_ports_path))
else:
string_ports = None
if string_ports:
vegetation = string_ports.get('vegetation', 'false')
water = string_ports.get('water', 'false')
soil = string_ports.get('soil', 'false')
clouds = string_ports.get('clouds', 'false')
shadows = string_ports.get('shadows', 'false')
unclassified = string_ports.get('unclassified', 'false')
tiles = string_ports.get('tiles', '1')
verbose = string_ports.get('verbose', 'false')
bbox = string_ports.get('bbox', '')
tiles = int(tiles)
if bbox:
bbox = map(float, bbox.split(','))
if vegetation in ['true', 'True']:
vegetation = True
else:
vegetation = False
if water in ['true', 'True']:
water = True
else:
water = False
if soil in ['true', 'True']:
soil = True
else:
soil = False
if clouds in ['true', 'True']:
clouds = True
else:
clouds = False
if shadows in ['true', 'True']:
shadows = True
else:
shadows = False
if unclassified in ['true', 'True']:
unclassified = True
else:
unclassified = False
if vegetation or water or soil or clouds or shadows or unclassified:
rgb = False
else:
rgb = True
if verbose in ['true', 'True']:
verbose = True
else:
verbose = False
else:
vegetation = False
water = False
soil = False
clouds = False
shadows = False
unclassified = False
rgb = True
tiles = 1
verbose = False
bbox = ''
# Create output directory
output_dir = os.path.join(output_ports_location, 'image')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
os.chdir(output_dir)
# Run lulc
if rgb:
p = protogen.Interface('lulc', 'layers')
p.lulc.layers.name = 'lulc'
p.lulc.layers.visualization = 'rgb'
else:
p = protogen.Interface('lulc', 'masks')
p.lulc.masks.type = 'single'
p.lulc.masks.switch_vegetation = vegetation
p.lulc.masks.switch_water = water
p.lulc.masks.switch_bare_soil = soil
p.lulc.masks.switch_clouds = clouds
p.lulc.masks.switch_shadows = shadows
p.lulc.masks.switch_unclassified = unclassified
# no data can never be the foreground
p.lulc.masks.switch_no_data = False
# Specify input image and range of bands
p.image = image
p.image_config.bands = range(1, 9)
# Tile if asked for
if tiles > 1:
p.image_config.number_of_tiles = tiles
p.image_config.mosaic_method = 'max'
# bbox if provided
if bbox:
W, S, E, N = bbox
p.image_config.input_latlong_rectangle = [W, N, E, S]
# Execute
p.verbose = verbose
p.execute()
|
import pandas as pd
import time
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers
from tensorflow import sigmoid
import numpy as np
def categoriza(data):
id = 1.
dic = {}
index = []
for i, d in enumerate(data):
if d ==" ":
index.append(i)
continue
if d in dic.keys():
data[i] = dic[d]
else:
dic[d] = id
data[i] = id
id = id + 1.
return index
def categoriza_l(data):
id = 0
dic = {}
index = []
for i, d in enumerate(data):
if d ==" ":
index.append(i)
continue
if d in dic.keys():
data[i] = dic[d]
else:
dic[d] = id
data[i] = id
id = id + 1
return index
def date_to_seconds(data):
for i, d in enumerate(data):
data[i] = time.mktime(data[i].timetuple())
def parse_float(v):
index = []
for i, n in enumerate(v):
try:
v[i] = float(n);
except Exception:
index.append(i)
return index
def writeCSV(v, openFile):
pri = True
for d in v:
if pri:
pri = False
openFile.write(str(d))
else:
openFile.write(","+str(d))
openFile.write("\n")
# Configuración del conjunto de datos
xl = pd.ExcelFile('Datos_PrActica_2_CONJUNTO_1.xls')
df = xl.parse(0, parse_dates=['FECHA_HORA'], dayfirst = True)
data = df.get_values()
no_1 = 0
yes_1 = 0
for v in data:
if v[13]=='Yes':
yes_1 = yes_1 + 1
else:
no_1 = no_1 +1
data = np.delete(data,categoriza(data[:,8]),0)
data = np.delete(data,categoriza(data[:,9]),0)
data = np.delete(data,categoriza(data[:,12]),0)
data = np.delete(data,categoriza_l(data[:,13]),0)
date_to_seconds(data[:,0])
data = np.delete(data,parse_float(data[:,1]),0)
data = np.delete(data,parse_float(data[:,2]),0)
data = np.delete(data,parse_float(data[:,3]),0)
data = np.delete(data,parse_float(data[:,4]),0)
data = np.delete(data,parse_float(data[:,5]),0)
data = np.delete(data,parse_float(data[:,6]),0)
data = np.delete(data,parse_float(data[:,7]),0)
data = np.delete(data,parse_float(data[:,10]),0)
data = np.delete(data,parse_float(data[:,11]),0)
yes = 0;
no = 0;
file_no = open("no.csv",'w')
file_yes = open("yes.csv",'w')
for v in data:
if v[13]==1:
yes = yes + 1
writeCSV(v, file_yes)
else:
no = no +1
writeCSV(v, file_no)
file_yes.close()
file_no.close()
print("No: " + str(no) + "\nYes: " + str(yes) )
print("No: " + str(no_1) + "\nYes: " + str(yes_1) ) |
import os
# TODO: This dirname chain is dumb.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
MEDIA_ROOT = BASE_DIR + '/media/'
STATIC_ROOT = BASE_DIR + '/static/'
# We use different static directories because I compile
# frontend code via grunt (sass compiling, minifying, etc),
# and most resources are shared among django apps, but django
# wants to use silly collectstatic stuff to merge it all together
# for deployment. That's fine and all, but I don't want to commit
# unnecessary copies of things. EVENTUALLY, I'll come up with a
# better deployment setup, something that doesn't involve duplicating
# resources and works with django without having to collectstatic
# each deployment run.
CSS_ROOT = BASE_DIR + '/css'
FONT_ROOT = BASE_DIR + '/font'
JS_ROOT = BASE_DIR + '/js'
IMG_ROOT = BASE_DIR + '/img'
# Site version number, used for cache busting among other things
VERSION = '1.1.2'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'landing.apps.LandingConfig',
'safespace.apps.SafespaceConfig',
'live.apps.LiveConfig',
'love.apps.LoveConfig',
'django_jinja',
'django_jinja.contrib._humanize',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sybolt.urls'
TEMPLATES = [
{
"BACKEND": "django_jinja.backend.Jinja2",
'DIRS': [BASE_DIR + '/sybolt/templates'],
"APP_DIRS": True,
"OPTIONS": {
"match_extension": ".j2",
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'globals': {
'settings': 'django.conf.settings'
}
}
},
# Django templates - for admin pages and non jinja2 stuff
# These are accessed via `templates` directory under each app
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}
]
WSGI_APPLICATION = 'sybolt.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York' # UTC
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static serve settings, required by admin module
STATIC_URL = '/static/'
# Auth settings
LOGIN_REDIRECT_URL = '/safespace'
# TMDB API settings
TMDB_API_URI = 'http://api.themoviedb.org/3/movie'
|
__author__ = 'Brendan'
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017')
db = client['pymongo_twit_test'] # specify/create database
posts = db.posts # sample entry
print "initial database: "
print db.posts
'''
post_data = {
'title': 'My database entry',
'content': 'somewhat disappointing',
'author': 'Ywwh'
}
#result = posts.insert_one(post_data)
#print('One post: {0}'.format(result.inserted_id))
test_fetch = posts.find_one({'author':'Ywwh'})
print test_fetch
'''
# get a small batch of tweets
# write them to a database |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import mysql.connector
mydb = mysql.connector.connect(
host = "localhost",
user = "root",
passwd = "Esther99!",
database = "HW3"
)
print(mydb)
mycursor = mydb.cursor()
mycursor.execute("show tables;")
mycursor.close()
for x in mycursor: print(x)
sql_query = "SELECT * FROM intl_football;"
mycursor = mydb.cursor()
mycursor.execute(sql_query)
myresult = mycursor.fetchall()
mycursor.close()
dta_at_country_date = pd.DataFrame(myresult)
# In[]
#data_url = 'http://bit.ly/2cLzoxH'
#dta_at_country_year = pd.read_csv(data_url)
#dta_at_country_year.to_excel('D:\data_country_db_at_country_year.xlsx')
|
# Author: Kevin Köck
# Copyright Kevin Köck 2019 Released under the MIT license
# Created on 2019-11-05
__updated__ = "2019-11-05"
__version__ = "0.1"
from ..mqtt_as_timeout_concurrent import MQTTClient
import uasyncio as asyncio
loop = asyncio.get_event_loop(waitq_len=60, runq_len=60)
async def publish(val, t):
print("Publishing", val, "timeout", t)
await client.publish("foo_topic", val, qos=1, timeout=t)
def callback(topic, msg, retained):
print((topic, msg, retained))
first = True
async def conn_han(client):
global first
if first:
# await client.subscribe('foo_topic', 1)
loop = asyncio.get_event_loop()
loop.create_task(publish("payload {!s}".format(1), 1))
loop.create_task(publish("payload {!s}".format(2), 2))
loop.create_task(client.subscribe("testtopic{!s}".format(3), qos=1, timeout=5))
loop.create_task(publish("payload {!s}".format(4), 4))
loop.create_task(publish("payload {!s}".format(5), 5))
loop.create_task(client.subscribe("testtopic{!s}".format(6), qos=1, timeout=5))
first = False
await asyncio.sleep(1)
print("Closing connection")
await client.disconnect()
await asyncio.sleep(5)
print("Publishing disconnected")
loop.create_task(publish("payload {!s}".format(1), 1))
loop.create_task(publish("payload {!s}".format(2), 2))
loop.create_task(client.subscribe("testtopic{!s}".format(3), qos=1, timeout=5))
loop.create_task(publish("payload {!s}".format(4), 4))
loop.create_task(publish("payload {!s}".format(5), 5))
loop.create_task(client.subscribe("testtopic{!s}".format(6), qos=1, timeout=5))
await asyncio.sleep(10)
print("Reconnecting after all timeouts")
await client.connect()
loop.create_task(publish("payload {!s}".format(8), 8))
await asyncio.sleep(5)
print("Test done")
await client.disconnect()
import config
from ubinascii import hexlify
from machine import unique_id
async def wifi(state):
print("WIFI state", state)
async def eliza(*_): # e.g. via set_wifi_handler(coro): see test program
await asyncio.sleep_ms(20)
config_dict = {
'client_id': hexlify(unique_id()),
'server': config.MQTT_HOST,
'port': config.MQTT_PORT,
'user': config.MQTT_USER,
'password': config.MQTT_PASSWORD,
'keepalive': 60,
'ping_interval': 0,
'ssl': False,
'ssl_params': {},
'response_time': 10,
'clean_init': True,
'clean': True,
'max_repubs': 4,
'will': None,
'subs_cb': lambda *_: None,
'wifi_coro': wifi,
'connect_coro': eliza,
'ssid': None,
'wifi_pw': None,
}
config_dict['connect_coro'] = conn_han
config_dict['subs_cb'] = callback
client = MQTTClient(**config_dict)
client.DEBUG = True
async def main(client):
await client.connect()
n = 0
while True:
await asyncio.sleep(5)
def test():
try:
loop.run_until_complete(main(client))
finally:
client.close() # Prevent LmacRxBlk:1 errors
|
# Hash table module; similar purpose and structure as dictionaries
class HashTable:
''' Create a hash table with size m.'''
def __init__(self, m):
# m is the size of tha array, r can be any prime number
self.m = m
self.r = 31
self.hashtable = []
self.construct()
''' Initial setup for hash table.'''
def construct(self):
for i in range(self.m):
self.hashtable.append([None, None])
''' Calling print() on this HashTable will print the hash table itself.'''
def __str__(self):
string = "{\n"
for i in range(self.m):
for j in range(0,len(self.hashtable[i]),2):
# Only print items with a key and/or value in them
if None not in self.hashtable[i]:
# Series of if/elif/else statements is for correct concatenation
# If both the key and value are not strings
if type(self.hashtable[i][j]) != str and type(self.hashtable[i][j+1]) != str:
string += str(self.hashtable[i][j]) + ": " + str(self.hashtable[i][j+1])
# If only the key is not a string
if type(self.hashtable[i][j]) != str:
string += str(self.hashtable[i][j]) + ": " + self.hashtable[i][j+1]
# If only the value is not a string
elif type(self.hashtable[i][j+1]) != str:
string += self.hashtable[i][j] + ": " + str(self.hashtable[i][j+1])
# Otherwise if both key and value are strings
else:
string += self.hashtable[i][j] + ": " + self.hashtable[i][j+1]
string += ",\n"
# Remove the extra ",\n" at the end
string = string[:len(string)-2]
string += "\n}"
return string
''' Calculates hash for a string and puts the string in the respective index
of the hash table.'''
def hash(self, s):
h = 0 # hash
for i in range(len(s)):
h = (self.r * h + ord(s[i])) % self.m
return h
''' Inserts key-value pair into hash table.'''
def put(self, k, v):
index = self.hash(k)
if self.hashtable[index][0] == None:
self.hashtable[index][0] = k
self.hashtable[index][1] = v
else:
# This allows multiple key-value pairs to be stored in a single hash
self.hashtable[index].append(k)
self.hashtable[index].append(v)
''' Searches for a key in the hash table.'''
def find(self, k):
k = k.lower()
index = self.hash(k)
# Having a step of 2 ensures we only check keys
for i in range(0,len(self.hashtable[index]),2):
if self.hashtable[index][i] == k:
return True
return False
''' Gets the value given a key.'''
def getValue(self, k):
index = self.hash(k)
# Having a step of 2 ensures we only check values
for i in range(0,len(self.hashtable[index]),2):
if self.hashtable[index][i] == k:
# i+1 because there's a value after every key
return self.hashtable[index][i+1]
# If key is not in the hash table, return an error
return "ERROR: Key does not exist"
''' Returns an array of the keys.'''
def keys(self):
keysArray = []
for i in range(self.m):
for j in range(0,len(self.hashtable[i]),2):
keysArray.append(self.hashtable[i][j])
return keysArray
''' Returns an array of the values.'''
def values(self):
valuesArray = []
for i in range(self.m):
for j in range(1,len(self.hashtable[i]),2):
valuesArray.append(self.hashtable[i][j])
return valuesArray
|
def add(num1: float, num2: float) -> float:
"""a function that adds two numbers"""
num3 = num1 + num2
return num3
def subtract(num1: float, num2: float) -> float:
"""a function that adds two numbers"""
num3 = num1 - num2
return num3
def division(num1: float, num2: float) -> float:
"""a function that adds two numbers"""
num3 = num1 / num2
return num3
def square_root(num1: float) -> float:
"""a function that adds two numbers"""
num2 = num1 ** 0.5
return num2
def square(num1: float) -> float:
"""a function that adds two numbers"""
num2 = num1 ** 2
return num2
def main():
x = 2
y = 10
z = x + y
print(f" x + y: {x} + {y} = {z}")
|
from django.db import models
# Create your models here.
class Elemento(models.Model):
texto_um = models.CharField(
max_length=10,
verbose_name='Texto Um',
default='',
blank=True,
)
texto_dois = models.CharField(
max_length=20,
verbose_name='Texto Dois',
default='',
blank=True,
)
texto_tres = models.CharField(
max_length=30,
verbose_name='Texto Três',
default='',
blank=True
)
def __str__(self):
return str(self.texto_um)
|
from typing import AsyncGenerator, Generic, List, Optional, Sequence
import httpx
import pytest
from fastapi import Depends, FastAPI, Request, status
from fastapi.security.base import SecurityBase
from fastapi_users import models
from fastapi_users.authentication import AuthenticationBackend, Authenticator
from fastapi_users.authentication.authenticator import DuplicateBackendNamesError
from fastapi_users.authentication.strategy import Strategy
from fastapi_users.authentication.transport import Transport
from fastapi_users.manager import BaseUserManager
from fastapi_users.types import DependencyCallable
from tests.conftest import User, UserModel
class MockSecurityScheme(SecurityBase):
def __call__(self, request: Request) -> Optional[str]:
return "mock"
class MockTransport(Transport):
scheme: MockSecurityScheme
def __init__(self):
self.scheme = MockSecurityScheme()
class NoneStrategy(Strategy):
async def read_token(
self, token: Optional[str], user_manager: BaseUserManager[models.UP, models.ID]
) -> Optional[models.UP]:
return None
class UserStrategy(Strategy, Generic[models.UP]):
def __init__(self, user: models.UP):
self.user = user
async def read_token(
self, token: Optional[str], user_manager: BaseUserManager[models.UP, models.ID]
) -> Optional[models.UP]:
return self.user
@pytest.fixture
def get_backend_none():
def _get_backend_none(name: str = "none"):
return AuthenticationBackend(
name=name, transport=MockTransport(), get_strategy=lambda: NoneStrategy()
)
return _get_backend_none
@pytest.fixture
def get_backend_user(user: UserModel):
def _get_backend_user(name: str = "user"):
return AuthenticationBackend(
name=name,
transport=MockTransport(),
get_strategy=lambda: UserStrategy(user),
)
return _get_backend_user
@pytest.fixture
@pytest.mark.asyncio
def get_test_auth_client(get_user_manager, get_test_client):
async def _get_test_auth_client(
backends: List[AuthenticationBackend],
get_enabled_backends: Optional[
DependencyCallable[Sequence[AuthenticationBackend]]
] = None,
) -> AsyncGenerator[httpx.AsyncClient, None]:
app = FastAPI()
authenticator = Authenticator(backends, get_user_manager)
@app.get("/test-current-user", response_model=User)
def test_current_user(
user: UserModel = Depends(
authenticator.current_user(get_enabled_backends=get_enabled_backends)
),
):
return user
@app.get("/test-current-active-user", response_model=User)
def test_current_active_user(
user: UserModel = Depends(
authenticator.current_user(
active=True, get_enabled_backends=get_enabled_backends
)
),
):
return user
@app.get("/test-current-superuser", response_model=User)
def test_current_superuser(
user: UserModel = Depends(
authenticator.current_user(
active=True,
superuser=True,
get_enabled_backends=get_enabled_backends,
)
),
):
return user
async for client in get_test_client(app):
yield client
return _get_test_auth_client
@pytest.mark.authentication
@pytest.mark.asyncio
async def test_authenticator(get_test_auth_client, get_backend_none, get_backend_user):
async for client in get_test_auth_client([get_backend_none(), get_backend_user()]):
response = await client.get("/test-current-user")
assert response.status_code == status.HTTP_200_OK
@pytest.mark.authentication
@pytest.mark.asyncio
async def test_authenticator_none(get_test_auth_client, get_backend_none):
async for client in get_test_auth_client(
[get_backend_none(), get_backend_none(name="none-bis")]
):
response = await client.get("/test-current-user")
assert response.status_code == status.HTTP_401_UNAUTHORIZED
@pytest.mark.authentication
@pytest.mark.asyncio
async def test_authenticator_none_enabled(
get_test_auth_client, get_backend_none, get_backend_user
):
backend_none = get_backend_none()
backend_user = get_backend_user()
async def get_enabled_backends():
return [backend_none]
async for client in get_test_auth_client(
[backend_none, backend_user], get_enabled_backends
):
response = await client.get("/test-current-user")
assert response.status_code == status.HTTP_401_UNAUTHORIZED
@pytest.mark.authentication
@pytest.mark.asyncio
async def test_authenticators_with_same_name(get_test_auth_client, get_backend_none):
with pytest.raises(DuplicateBackendNamesError):
async for _ in get_test_auth_client([get_backend_none(), get_backend_none()]):
pass
|
from aizynthfinder.chem.serialization import MoleculeSerializer, MoleculeDeserializer
from aizynthfinder.chem import TreeMolecule
from aizynthfinder.search.mcts import MctsState
from aizynthfinder.search.mcts import MctsNode
from aizynthfinder.search.mcts import MctsSearchTree
def test_serialize_deserialize_state(default_config):
mol = TreeMolecule(parent=None, smiles="CCC", transform=1)
state0 = MctsState([mol], default_config)
serializer = MoleculeSerializer()
state_serialized = state0.serialize(serializer)
assert len(state_serialized["mols"]) == 1
assert state_serialized["mols"][0] == id(mol)
deserializer = MoleculeDeserializer(serializer.store)
state1 = MctsState.from_dict(state_serialized, default_config, deserializer)
assert len(state1.mols) == 1
assert state1.mols[0] == state0.mols[0]
assert state1.in_stock_list == state0.in_stock_list
assert state1.score == state0.score
def test_serialize_node(setup_mcts_search):
serializer = MoleculeSerializer()
root, _, _ = setup_mcts_search
state_serialized = root.state.serialize(serializer)
node_serialized = root.serialize(serializer)
assert not node_serialized["is_expanded"]
assert node_serialized["state"] == state_serialized
assert node_serialized["children_values"] == []
assert node_serialized["children_priors"] == []
assert node_serialized["children_visitations"] == []
assert node_serialized["children"] == []
root.expand()
node_serialized = root.serialize(serializer)
action_list = root.children_view()["actions"]
assert node_serialized["children_values"] == [0.7, 0.5, 0.3]
assert node_serialized["children_priors"] == [0.7, 0.5, 0.3]
assert node_serialized["children_visitations"] == [1, 1, 1]
assert all(
id(expected.mol) == actual["mol"]
for expected, actual in zip(action_list, node_serialized["children_actions"])
)
assert all(
expected.reactants_str == actual["reactants_str"]
for expected, actual in zip(action_list, node_serialized["children_actions"])
)
assert all(
expected.index == actual["index"]
for expected, actual in zip(action_list, node_serialized["children_actions"])
)
assert all(
expected.metadata == actual["metadata"]
for expected, actual in zip(action_list, node_serialized["children_actions"])
)
assert node_serialized["children"] == [None, None, None]
assert node_serialized["is_expanded"]
child = root.promising_child()
node_serialized = root.serialize(serializer)
state_serialized = child.state.serialize(serializer)
assert node_serialized["is_expanded"]
assert node_serialized["children"][1] is None
assert node_serialized["children"][2] is None
assert node_serialized["children"][0]["state"] == state_serialized
assert node_serialized["children"][0]["children_values"] == []
assert node_serialized["children"][0]["children_priors"] == []
assert node_serialized["children"][0]["children_visitations"] == []
assert node_serialized["children"][0]["children"] == []
assert not node_serialized["children"][0]["is_expanded"]
def test_deserialize_node(setup_mcts_search, default_config):
serializer = MoleculeSerializer()
root, _, _ = setup_mcts_search
root.expand()
child = root.promising_child()
node_serialized = root.serialize(serializer)
deserializer = MoleculeDeserializer(serializer.store)
root_new = MctsNode.from_dict(node_serialized, None, default_config, deserializer)
assert len(root_new.children) == 1
new_child = root_new.children[0]
assert root_new.children_view()["values"] == root.children_view()["values"]
assert root_new.children_view()["priors"] == root.children_view()["priors"]
assert (
root_new.children_view()["visitations"] == root.children_view()["visitations"]
)
assert root_new.is_expanded
assert new_child.children_view()["values"] == child.children_view()["values"]
assert new_child.children_view()["priors"] == child.children_view()["priors"]
assert (
new_child.children_view()["visitations"] == child.children_view()["visitations"]
)
assert not new_child.is_expanded
assert str(root_new.state) == str(root.state)
assert str(new_child.state) == str(child.state)
def test_serialize_deserialize_tree(
setup_complete_mcts_tree,
default_config,
mocker,
tmpdir,
):
tree, nodes = setup_complete_mcts_tree
root, child, _ = nodes
mocked_json_dump = mocker.patch("aizynthfinder.search.mcts.search.json.dump")
serializer = MoleculeSerializer()
filename = str(tmpdir / "dummy.json")
# Test serialization
tree.serialize(filename)
expected_dict = {"tree": root.serialize(serializer), "molecules": serializer.store}
mocked_json_dump.assert_called_once_with(
expected_dict, mocker.ANY, indent=mocker.ANY
)
# Test deserialization
mocker.patch(
"aizynthfinder.search.mcts.search.json.load", return_value=expected_dict
)
new_tree = MctsSearchTree.from_json(filename, default_config)
root_new = new_tree.root
assert len(root_new.children) == 1
new_child = root_new.children[0]
assert root_new.children_view()["values"] == root.children_view()["values"]
assert root_new.children_view()["priors"] == root.children_view()["priors"]
assert (
root_new.children_view()["visitations"] == root.children_view()["visitations"]
)
assert root_new.is_expanded
assert new_child.children_view()["values"] == child.children_view()["values"]
assert new_child.children_view()["priors"] == child.children_view()["priors"]
assert (
new_child.children_view()["visitations"] == child.children_view()["visitations"]
)
assert new_child.is_expanded
assert str(root_new.state) == str(root.state)
assert str(new_child.state) == str(child.state)
|
# ******************************
# Doug Smyka
# Multiplication Application
# Date Created: 10.19.20
# Date Revised: 10.19.20
# ******************************
import random
import time
import pyinputplus as pyip
# METHODS
# ******************************
# TAKE USER INPUT FOR MAX VALUE
# ******************************
def max_value():
ret = pyip.inputInt(min=1)
return ret
# ******************************
# TAKE USER INPUT FOR NUMBER
# OF QUESTIONS
# ******************************
def num_of_questions():
ret = pyip.inputInt(min=1)
return ret
# ******************************
# RANDOMLY GENERATE QUESTIONS
# ******************************
def question_generator(max_val):
# Variables to increment
guesses = 0
global correct
global incorrect
# Use max_value method to get range for values
# Assign values for question
val1 = random.randint(0, max_val)
val2 = random.randint(0, max_val)
# Print the question
print(f"What is {val1} * {val2}? ")
# Set a boolean for the while Loop
guessed = False
while guessed is False:
timer()
# Take user input
user_answer = pyip.inputInt()
# Increment guesses here
guesses += 1
# If all guesses are exhausted
if guesses == 3:
print("You have exceeded your limit")
print(f"{val1} * {val2} = {val1*val2}")
# Increment the counter 'incorrect'
incorrect += 1
guessed = True
# If user provides the correct answer
elif user_answer == val1 * val2:
print(f"That is correct! {val1} * {val2} is {user_answer}!")
# Increment the counter 'correct'
correct += 1
guessed = True
# If user provides incorrect answer
else:
print(f"Sorry try again!")
continue
# ******************************
# MAIN METHOD
# ******************************
def main():
welcome_message()
# Prompt user for input
print(f"Please enter a maximum value for the quiz: ", end='')
max_val = max_value()
# Prompt user for input
print(f"How many questions: ", end='')
num = num_of_questions()
for i in range(0, num):
question_generator(max_val)
# Print results
print(f"You got {correct} questions right and {incorrect} questions wrong")
# Print average
print(f"Your grade is {(correct/num)*10}%")
# ******************************
# DISPLAY HEADER
# ******************************
def welcome_message():
print()
print(f"\tMultiplication Quiz")
print()
# ******************************
# TIMER
# ******************************
def timer():
# Initialize variables
timeout = 10
# Get start time
start_time = time.time()
while True:
# If current time - start_time is > 10 seconds
# Print this message
if time.time() - start_time > timeout:
print("10 seconds has passed, it's time to move on")
break
# VARIABLES
correct = 0
incorrect = 0
# MAIN
main()
# EOF
|
#! coding: utf-8
import os
import mock
from django_datajsonar.tasks import read_datajson
from django_datajsonar.models import Distribution, Field, Catalog
from django_datajsonar.models import ReadDataJsonTask, Node
from series_tiempo_ar_api.apps.management import meta_keys
from series_tiempo_ar_api.apps.management.models import IndexDataTask as ManagementTask, DistributionValidatorConfig
from series_tiempo_ar_api.libs.indexing.catalog_reader import index_catalog
from series_tiempo_ar_api.libs.indexing.tests.indexing_test_case import IndexingTestCase
SAMPLES_DIR = os.path.join(os.path.dirname(__file__), 'samples')
CATALOG_ID = 'test_catalog'
@mock.patch("series_tiempo_ar_api.libs.indexing.tasks.DistributionIndexer.reindex")
class ReaderTests(IndexingTestCase):
catalog = os.path.join(SAMPLES_DIR, 'full_ts_data.json')
catalog_id = 'catalog_id'
def setUp(self):
self.task = ReadDataJsonTask.objects.create()
self.task.save()
self.mgmt_task = ManagementTask()
self.mgmt_task.save()
self.node = Node(catalog_id=self.catalog_id, catalog_url=self.catalog, indexable=True)
self.node.save()
def test_index_same_series_different_catalogs(self, *_):
read_datajson(self.task, whitelist=True, )
index_catalog(self.node, self.mgmt_task, )
read_datajson(self.task, whitelist=True, )
index_catalog(self.node, self.mgmt_task, )
count = Field.objects.filter(identifier='212.1_PSCIOS_ERN_0_0_25').count()
self.assertEqual(count, 1)
def test_dont_index_same_distribution_twice(self, *_):
read_datajson(self.task, whitelist=True, )
index_catalog(self.node, self.mgmt_task, )
read_datajson(self.task, whitelist=True, )
index_catalog(self.node, self.mgmt_task, )
distribution = Distribution.objects.get(identifier='212.1')
# La distribucion es marcada como no indexable hasta que cambien sus datos
self.assertEqual(distribution.enhanced_meta.get(key=meta_keys.CHANGED).value, 'False')
def test_first_time_distribution_indexable(self, *_):
read_datajson(self.task, whitelist=True, )
index_catalog(self.node, self.mgmt_task,)
distribution = Distribution.objects.get(identifier='212.1')
self.assertEqual(distribution.enhanced_meta.get(key=meta_keys.CHANGED).value, 'True')
def test_index_same_distribution_if_data_changed(self, *_):
read_datajson(self.task, whitelist=True)
index_catalog(self.node, self.mgmt_task)
new_catalog = os.path.join(SAMPLES_DIR, 'full_ts_data_changed.json')
self.node.catalog_url = new_catalog
self.node.save()
read_datajson(self.task, whitelist=True, )
index_catalog(self.node, self.mgmt_task, )
distribution = Distribution.objects.get(identifier='212.1')
# La distribución fue indexada nuevamente, está marcada como indexable
self.assertEqual(distribution.enhanced_meta.get(key=meta_keys.CHANGED).value, 'True')
def test_error_distribution_logs(self, *_):
catalog = os.path.join(SAMPLES_DIR, 'distribution_missing_downloadurl.json')
self.node.catalog_url = catalog
self.node.save()
read_datajson(self.task, whitelist=True, )
index_catalog(self.node, self.mgmt_task, )
self.assertGreater(len(ReadDataJsonTask.objects.get(id=self.task.id).logs), 10)
def test_index_YYYY_MM_distribution(self, *_):
catalog = os.path.join(SAMPLES_DIR, 'single_data_yyyy_mm.json')
self.node.catalog_url = catalog
self.node.save()
read_datajson(self.task, whitelist=True, )
index_catalog(self.node, self.mgmt_task, )
distribution = Distribution.objects.get(identifier='102.1')
self.assertEqual(distribution.enhanced_meta.get(key=meta_keys.CHANGED).value, 'True')
def test_index_YYYY_distribution(self, *_):
catalog = os.path.join(SAMPLES_DIR, 'single_data_yyyy.json')
self.node.catalog_url = catalog
self.node.save()
read_datajson(self.task, whitelist=True)
index_catalog(self.node, self.mgmt_task)
distribution = Distribution.objects.get(identifier='102.1')
self.assertEqual(distribution.enhanced_meta.get(key=meta_keys.CHANGED).value, 'True')
@mock.patch('series_tiempo_ar_api.libs.indexing.catalog_reader.DataJson')
def test_format_is_passed_to_data_json(self, data_json, *_):
read_datajson(self.task, whitelist=True)
self.node.catalog_format = 'xlsx'
index_catalog(self.node, self.mgmt_task)
self.assertEqual(data_json.call_args[1]['catalog_format'], self.node.catalog_format)
def test_significant_figures(self, *_):
Catalog.objects.all().delete()
catalog = os.path.join(SAMPLES_DIR, 'ipc_data.json')
self.node.catalog_url = catalog
self.node.save()
read_datajson(self.task, whitelist=True)
index_catalog(self.node, self.mgmt_task)
field = Field.objects.get(identifier='serie_inflacion') # Sacado del data.json
self.assertEqual(field.enhanced_meta.get(key='significant_figures').value, '4')
def test_custom_validation_options(self, *_):
# Fallarán todas las validaciones
config = DistributionValidatorConfig.get_solo()
config.max_field_title_len = 0
config.save()
read_datajson(self.task, whitelist=True)
index_catalog(self.node, self.mgmt_task)
distribution = Distribution.objects.get(identifier='212.1')
self.assertTrue(distribution.error)
|
# -*- coding:utf-8 -*-
import numpy as np
import random
import qiujie
sd=np.zeros((9,9))
print "数独初始化\n******************************************"
i=0fffffffffffffffffff
j=0
while(i<=8):
while(j<=8):
m=(i/3)*3+j/3
[a,b,c]=qiujie.shuaxin(sd)
kx=qiujie.keyi(a[i],b[j],c[m])
if kx:
sd[i,j]=random.choice(kx)
j=j+1
print i,j
else:
i=0
j=0
i=i+1
j=0
|
from django.contrib import admin
from .models import Stock, SupplierInformation, Job, StockTemp, TemplateList
from .forms import StockCreateForm, SuppliersCreateForm, StockIssueForm, TemplateListForm
# Register your models here.
class StockCreateAdmin(admin.ModelAdmin):
#BUILTIN ADMIN
# what I want to dis'catagory_name','item_name','item_fattal_code','item_barcode_external','quantity_item'
list_display = ['category_name','item_name','item_fattal_code','item_barcode_external','quantity_item', 'item_fattal_code_issue','description' ]
form = StockCreateForm
list_filter = ['category_name'] #filter items by catagory
search_fields = ['category_name', 'item_name']
class StockTempAdmin(admin.ModelAdmin):
#BUILTIN ADMIN
# what I want to dis'catagory_name','item_name','item_fattal_code','item_barcode_external','quantity_item'
list_display3 = ['item_fattal_code_issue','item_name_issue']
form3 = StockIssueForm
list_filter3 = ['item_fattal_code_issue'] #filter items by catagory
search_fields3 = ['item_fattal_code_issue', 'item_name_issue', ]
class TemplateListAdmin(admin.ModelAdmin):
#BUILTIN ADMIN
# what I want to dis'catagory_name','item_name','item_fattal_code','item_barcode_external','quantity_item'
list_display4 = ['alchol_kind']
form4 =TemplateListForm
#list_filter = ['alchol_kind'] #filter items by catagory
#search_fields = ['alchol_kind']
class SuppliersCreateAdmin(admin.ModelAdmin):
#BUILTIN ADMIN
# what I want to dis'catagory_name','item_name','item_fattal_code','item_barcode_external','quantity_item'
# list_display = ['suppliers_name','supplier_phone_number','supplier_sale_leader_name','supplier_sale_leader_phone','suppliers_driver_info']
# form = SuppliersCreateForm
# list_filter = ['suppliers_name'] #filter items by catagory
# search_fields =['suppliers_name','supplier_phone_number','supplier_sale_leader_name','supplier_sale_leader_phone','suppliers_driver_info']
list_display2 = ['suppliers_name']
form2 = SuppliersCreateForm
list_filter2 = ['suppliers_name'] #filter items by catagory
search_fields2 =['suppliers_name']
admin.site.register(Stock, StockCreateAdmin)
admin.site.register(SupplierInformation, SuppliersCreateAdmin)
admin.site.register(Job)
admin.site.register(TemplateList, TemplateListAdmin)
admin.site.register(StockTemp, StockTempAdmin)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-03-22 08:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0003_auto_20200321_1634'),
]
operations = [
migrations.AlterModelOptions(
name='imagefile',
options={'verbose_name': '图片', 'verbose_name_plural': '图片'},
),
migrations.AlterField(
model_name='imagefile',
name='is_valid',
field=models.BooleanField(default=True, verbose_name='是否有效'),
),
]
|
from openpyxl import Workbook
from openpyxl import load_workbook
########################################
########################################
## MAKE SURE THE FILE IS CLOSED FIRST ##
## MAKE SURE THE FILE IS CLOSED FIRST ##
## MAKE SURE THE FILE IS CLOSED FIRST ##
########################################
########################################
#Enter the file name and the extension here:
FILENAME = input("Enter the file name and the extension here: ")
wb = load_workbook(FILENAME)
ws = wb.active
PERIOD_COLUMN_LETTERS = input("The Period Column letters: ")
ACTIVELEV_COLUMN_LETTERS = input("The ACTIVELEV column letters: ")
COLUMN_TO_WRITE_TO = input("The column that you would like to write info to: ")
ROW_TO_START_ON = input("This number should be the first row number of the info: ")
ROW_TO_END_ON = '100000'
ACTIVELEV = ws[ACTIVELEV_COLUMN_LETTERS+ROW_TO_START_ON:ACTIVELEV_COLUMN_LETTERS+ROW_TO_END_ON]
period_range = ws[PERIOD_COLUMN_LETTERS+ROW_TO_START_ON:PERIOD_COLUMN_LETTERS+ROW_TO_END_ON]
write_range = ws[COLUMN_TO_WRITE_TO+ROW_TO_START_ON:COLUMN_TO_WRITE_TO+ROW_TO_END_ON]
current = ws[PERIOD_COLUMN_LETTERS+ROW_TO_START_ON].value
counter = 0
sums = 0
average = 0.0
items = []
for period, active, write in zip(period_range, ACTIVELEV, write_range):
if period[0].value == current:
if active[0].value == 'W':
sums += 3
elif active[0].value == 'S':
sums += 1.5
elif active[0].value == 'V':
sums += 6
else:
sums += 0
counter += 1
items.append(write)
else:
average = sums / counter
current = period[0].value
counter = 1
sums = 0
if active[0].value == 'W':
sums += 3
elif active[0].value == 'S':
sums += 1.5
elif active[0].value == 'V':
sums += 6
else:
sums += 0
for cell in items:
print(cell[0])
cell[0].value = average
items.clear()
items.append(write)
wb.save(FILENAME)
|
# -*- coding: utf-8 -*-
# @Time : 2019/7/16
# @Author : JWDUAN
# @Email : 494056012@qq.com
# @File : gluon_loss.py
# @Software: PyCharm
import numpy as np
from mxnet import ndarray
from mxnet.base import numeric_types
from mxnet.gluon import HybridBlock
def _apply_weighting(F, loss, weight=None, sample_weight=None):
"""Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss
"""
if sample_weight is not None:
loss = F.broadcast_mul(loss, sample_weight)
if weight is not None:
assert isinstance(weight, numeric_types), "weight must be a number"
loss = loss * weight
return loss
def _reshape_like(F, x, y):
"""Reshapes x to the same shape as y."""
return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y)
class Loss(HybridBlock):
"""Base class for loss.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, weight, batch_axis, **kwargs):
super(Loss, self).__init__(**kwargs)
self._weight = weight
self._batch_axis = batch_axis
def __repr__(self):
s = '{name}(batch_axis={_batch_axis}, w={_weight})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def hybrid_forward(self, F, x, *args, **kwargs):
"""Overrides to construct symbolic graph for this `Block`.
Parameters
----------
x : Symbol or NDArray
The first input tensor.
*args : list of Symbol or list of NDArray
Additional input tensors.
"""
# pylint: disable= invalid-name
raise NotImplementedError
class SoftmaxCrossEntropyLoss(Loss):
r"""Computes the softmax cross entropy loss. (alias: SoftmaxCELoss)
If `sparse_label` is `True` (default), label should contain integer
category indicators:
.. math::
\DeclareMathOperator{softmax}{softmax}
p = \softmax({pred})
L = -\sum_i \log p_{i,{label}_i}
`label`'s shape should be `pred`'s shape with the `axis` dimension removed.
i.e. for `pred` with shape (1,2,3,4) and `axis = 2`, `label`'s shape should
be (1,2,4).
If `sparse_label` is `False`, `label` should contain probability distribution
and `label`'s shape should be the same with `pred`:
.. math::
p = \softmax({pred})
L = -\sum_i \sum_j {label}_j \log p_{ij}
Parameters
----------
axis : int, default -1
The axis to sum over when computing softmax and entropy.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
from_logits : bool, default False
Whether input is a log probability (usually from log_softmax) instead
of unnormalized numbers.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: the prediction tensor, where the `batch_axis` dimension
ranges over batch size and `axis` dimension ranges over the number
of classes.
- **label**: the truth tensor. When `sparse_label` is True, `label`'s
shape should be `pred`'s shape with the `axis` dimension removed.
i.e. for `pred` with shape (1,2,3,4) and `axis = 2`, `label`'s shape
should be (1,2,4) and values should be integers between 0 and 2. If
`sparse_label` is False, `label`'s shape must be the same as `pred`
and values should be floats in the range `[0, 1]`.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as label. For example, if label has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, axis=-1, sparse_label=True, from_logits=False, weight=None,
batch_axis=0, **kwargs):
super(SoftmaxCrossEntropyLoss, self).__init__(weight, batch_axis, **kwargs)
self._axis = axis
self._sparse_label = sparse_label
self._from_logits = from_logits
def hybrid_forward(self, F, pred, label, sample_weight=None):
if not self._from_logits:
pred = F.log_softmax(pred, self._axis)
if self._sparse_label:
loss = -F.pick(pred, label, axis=self._axis, keepdims=True)
else:
label = _reshape_like(F, label, pred)
loss = -F.sum(pred*label, axis=self._axis, keepdims=True)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
# loss,(2, 1, 256, 256)
return F.mean(loss, axis=self._batch_axis, exclude=True), loss
SoftmaxCELoss = SoftmaxCrossEntropyLoss |
#!/usr/bin/python
from pattern import *
class PCurl(Pattern):
Pattern.ATTRIBUTS.extend(['name', 'url', 'protocole', 'descr', 'user'])
def __init__(self, arguments_attribut):
super(PCurl, self).__init__('PCurl', arguments_attribut)
def do(self):
if 'user' in self.attributs:
return ["curl -u %s %s://%s --fail --silent --show-error" %(self.attributs['user'], self.attributs['protocole'], self.attributs['name'])]
else:
return ["curl %s://%s --fail --silent --show-error" %(self.attributs['protocole'], self.attributs['name'])]
def check_arg(self, arguments_attribut):
if 'name' in arguments_attribut and 'protocole' in arguments_attribut:
return True
return False
|
from distutils.core import setup
setup(
name='autonomous',
version='1.0dev',
packages=['autonomous', 'common', 'car_to_x/CarToCar'],
license='Creative Commons Attribution-Noncommercial-Share Alike license',
long_description=open('README.txt').read(),
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.