text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
"""Bot for taking new posts from one subreddit and reposting them to another.
"""
import sys
import sqlite3
import datetime
import os
import re
import configparser
import argparse
import praw
DEFAULT_BOT_NAME='rpgonbot'
__version__ = '0.3'
def debug(*s):
if DEBUG:
if len(s)>1:
print(datetime.datetime.now(), repr(s))
else:
print(datetime.datetime.now(), s[0].encode('utf-8'))
def print_all_fields(o):
"""Prints all attributes of object not starting with _"""
for f in dir(o):
if f.startswith('_'):
continue
a = getattr(o, f)
print ("{}: {} [{}]".format(f, a, type(a)))
class RPGonBot(object):
# look for any type of brackets with all non-whitespace and not "OC" between them.
re_title_brackets = re.compile(r"[[({](?!OC)([^\s]*)[])}]")
silly_thesaurus = {
'curve': 'arc',
'hole': 'empty space',
'leg': 'appendage',
'i love': 'it\'s nice',
'pole': 'line',
'curvy': 'wavy',
'you': 'some things',
'arm': 'limb',
'pump': 'pulse',
'pumping': 'pulsing',
'hard': 'solid',
'frisky': 'whimsical',
'into me': 'into something',
'in me': 'in object',
'stick it': 'geometry',
'mmf': '',
'ffm': '',
'first post': 'hello',
'be gentle': 'such good',
'baby': 'infant',
'push': 'apply effort to',
'deeper': 'more subsantially',
'spread': 'distribute',
'pleasure': 'good feeling',
}
def __init__(self, source, destination):
self._db = None
self._flairs = {}
self._subreddit_source = None
self._subreddit_destination = None
self.source = source
self.destination = destination
@property
def db(self):
"""Returns db connection"""
if self._db is None:
if not os.path.exists(dbfn):
debug("creating DB")
self._db = sqlite3.connect(dbfn)
self.db_create()
self._db.commit()
else:
self._db = sqlite3.connect(dbfn)
return self._db
@property
def subreddit_destination(self):
if self._subreddit_destination is None:
self._subreddit_destination = reddit.subreddit(self.destination)
return self._subreddit_destination
@property
def subreddit_source(self):
if self._subreddit_source is None:
self._subreddit_source = reddit.subreddit(self.source)
return self._subreddit_source
def dblog(self, code, text):
self.db.execute("INSERT INTO rp_log (code, data) VALUES (?, ?)",
(code, text))
self.db.commit()
def get_flair_id(self, flair_text, submission=None, subreddit=None):
"""return the flair_template_id for the given flair_text available
for the given submission object. User link flairs need to be
enabled on the subreddit.
(Flairs could be looked up based on subreddit, but the user needs
flair moderator permission to access.)
All flairs are cached in dictionary so lookup only has to be done once.
"""
if submission is None:
sub = subreddit or self.subreddit_destination
choices = sub.flair
attr = 'display_name'
else:
sub = submission
choices = sub.flair.choices
attr = 'title'
if flair_text not in self._flairs:
try:
for f in choices():
self._flairs[f['flair_text']] = f['flair_template_id']
except Exception as e:
debug("Could not load flairs for subreddit: {} ({}): {}".format(
getattr(sub, attr), sub.id, e))
return self._flairs.get(flair_text)
def db_create(self):
statements = (
"""
CREATE TABLE rp_post (
added integer NOT NULL DEFAULT (strftime('%s', 'now')),
updated integer NOT NULL DEFAULT (strftime('%s', 'now')),
created_utc integer NOT NULL,
reddit_id text NOT NULL PRIMARY KEY,
author text,
score integer,
title text,
url text,
subreddit text NOT NULL,
num_comments integer,
permalink text,
repost_id text UNIQUE,
commnt_id text UNIQUE
);
""",
"""
CREATE INDEX rp_post_created_utc_i ON rp_post (created_utc);
""",
"""
CREATE TABLE rp_data (
subreddit text NOT NULL,
key text NOT NULL,
value text,
PRIMARY KEY (subreddit, key)
);
""",
"""
CREATE TRIGGER update_rp_data AFTER INSERT ON rp_post BEGIN
UPDATE rp_data SET value = NEW.created_utc
WHERE subreddit=NEW.subreddit AND key='last_created_utc';
UPDATE rp_data SET value = NEW.reddit_id
WHERE subreddit=NEW.subreddit AND key='last_post_id';
END;
""",
"""
CREATE TABLE rp_log (
id integer PRIMARY KEY,
date integer NOT NULL DEFAULT (strftime('%s', 'now')),
code integer NOT NULL,
data text
);
"""
)
for sql in statements:
self.db.execute(sql)
self.db.commit()
self.dblog(0, 'Created database')
def clean_title(self, text):
"""Clean up the gonwild title"""
return self.hack_title(self.re_title_brackets.sub(r'\1', text))
def hack_title(self, text):
count = 0
index = 0
#print ('--', text)
for k, v in self.silly_thesaurus.items():
r = re.compile(r"\b({})(s?)\b".format(k), re.I)
while True:
count += 1
if count>999:
break
m = r.search(text, index)
if not m:
break
word = text[m.start(1):m.end(1)]
#print(m, word)
replacement = self.text_replacement(word) + text[m.start(2):m.end(2)]
text = text[:m.start(0)] + replacement + text[m.end(0):]
index = m.start(0) + (len(replacement) or 1)
return text.strip()
def text_replacement(self, word):
"""Find matching thesaurus item, and try to match case, etc"""
result = self.silly_thesaurus.get(word.lower(), '')
if result:
if word == word.lower():
pass
elif word == word.upper():
result = result.upper()
elif word == word.capitalize():
result = result.capitalize()
elif word == word[:1].upper() + word[1:]:
result = result[:1].upper() + result[1:]
return result
def crosspost(self, post, flair=None):
"""Create the crosspost from the provided submission; also add comment with link back."""
title = self.clean_title(post.title)
title = title + " - [via: " + post.author.name + "]"
try:
submission = self.subreddit_destination.submit(title,
url=post.url, resubmit=False, send_replies=False)
except praw.exceptions.APIException as e:
self.dblog(1, "{} : {} : {}".format(post.id, post.title, e))
debug(str(e), post.title)
return None
flair_id = self.get_flair_id('x-post', submission=submission)
submission.flair.select(flair_id)
# if one uses /u/ in front of author/owner name they get private message on every
# post due to being "mentioned" -- rather spammy
comment_template = """Thanks to [Original Submission]({permalink}) by {author}
______
^^^Reposted ^^^by ^^^RPGonBot ^^^^(reddit ^^^user: ^^^""" + OWNER + ")"
comment = submission.reply(comment_template.format(
permalink=post.permalink, author=post.author.name))
try:
self.db.execute("""INSERT INTO rp_post
(created_utc, reddit_id, author, score,
title, url, subreddit, num_comments, permalink,
repost_id)
VALUES (?,?,?,?,?,?,?,?,?,?)""", (
int(post.created_utc),
post.id,
post.author.name,
post.score,
post.title,
post.url,
self.source,
post.num_comments,
post.permalink,
submission.id
))
self.db.commit()
except:
print_all_fields(post)
raise
debug("Crossposted: <{}> {}".format(post.id, title))
return submission
def db_create_rp_data(self):
"""create the rows to store the last timestamp and id crossposted"""
debug("Creating rp_data for sub", self.source)
self.db.execute("INSERT INTO rp_data VALUES (?, 'last_created_utc', 0);", (self.source,))
self.db.execute("INSERT INTO rp_data VALUES (?, 'last_post_id', NULL);", (self.source,))
self.dblog(0, 'Created rp_data for: {}'.format(self.source))
self.db.commit()
def db_reset_rp_data(self, reddit_id):
"""find the timestamp of a submission and use it to reset the database to"""
submission = next(reddit.submission(reddit_id))
if submission:
self.db.execute("""UPDATE rp_data SET value=? WHERE
subreddit=? and key='last_post_id'""", (reddit_id, self.source))
self.db.execute("""UPDATE rp_data SET value=? WHERE
subreddit=? and key='last_post_id'""", (reddit_id, submission.created_utc))
self.db.log(4, 'Reset rp_data for {} to post {} ({})'.format(
self.source, reddit_id, submission.created_utc))
self.db.commit()
def db_fetch_rp_data(self, subreddit, key):
"""Fetch value from the table that records various "last submission" data.
And do some data casting in one case.
"""
result = self.db.execute("""SELECT value FROM rp_data
WHERE subreddit=? AND key=?""", (subreddit,key)).fetchone()
if result:
if key == 'last_created_utc':
result = int(result[0])
else:
result = result[0]
return result
def check_for_posts(self):
"""Check for new posts in the source subreddit, and crosspost any found.
Was going to update local database with old submission comment and score numbers,
but then didn't.
"""
submissions = self.subreddit_source.new(limit=source_limit)
new_posts = []
# load the last post data for the sub
last_created_utc = self.db_fetch_rp_data(self.source,'last_created_utc')
if last_created_utc is None:
self.db_create_rp_data()
last_created_utc = 0
last_post_id = self.db_fetch_rp_data(self.source,'last_post_id')
# go through posts and find new ones
desc_format = "[{} - {}]: {} ({})"
for s in submissions:
desc = desc_format.format(
s.id,
datetime.datetime.utcfromtimestamp(int(s.created_utc)),
s.title,
s.score)
# the equals is included in the wierd chance two things have same timestamp
# dupliate is still rejected later
if int(s.created_utc) >= last_created_utc:
if s.score > 0:
if not s.is_self:
new_posts.insert(0,s)
else:
self.dblog(2, "Ignoring (self post): " + desc)
else:
self.dblog(2, "Ignoring (low score): " + desc)
else:
debug("Ignoring (older post): " + desc)
# list of new posts in ascending chronological order
for np in new_posts:
if np.id != last_post_id:
self.crosspost(np)
else:
debug("Ignoring (last xpost): " + desc_format.format(last_post_id,
datetime.datetime.utcfromtimestamp(int(np.created_utc)),
np.title, np.score))
def show_database(self):
print("Bot name: {} (database: {})".format(args.bot, dbfn))
print("User-agent: " + my_user_agent)
print("Repost records: " + str(self.db.execute(
"""SELECT count(*) FROM rp_post""").fetchone()[0]))
print("Log entries: " + str(self.db.execute(
"""SELECT count(*) FROM rp_log""").fetchone()[0]))
print("Repost data:")
result = self.db.execute("""SELECT subreddit, key, value FROM
rp_data ORDER BY subreddit, key""").fetchall()
if len(result)>0:
for r in result:
if r[1] == 'last_created_utc' and r[2] != '0':
r = (r[0],r[1],datetime.datetime.utcfromtimestamp(int(r[2])))
print("{:<16}{:<18}{}".format(*r))
else:
print("No rp_data found")
def show_reposts(self, count):
print("{} Most Recent Reposts:".format(count))
result = self.db.execute("""SELECT
strftime('%Y-%m-%d %H:%M:%S', added, 'unixepoch') AS added_ts,
strftime('%Y-%m-%d %H:%M:%S', created_utc, 'unixepoch') AS created_ts,
reddit_id, author, substr(title,1,16), repost_id, substr(subreddit,1,8), url, permalink
FROM rp_post
ORDER BY added DESC LIMIT ?""", (count,))
rows = 0
format = "{0} ({1}) {6:<8} {4:<16} {2} {5}"
for r in result:
print(format.format(*r))
rows+=1
if rows == 0:
print("No reposts found")
def show_log(self, count):
print("{} Most Log Entries:".format(count))
result = self.db.execute("""SELECT
id, strftime('%Y-%m-%d %H:%M:%S', date, 'unixepoch') AS ts,
code, data
FROM rp_log
ORDER BY id DESC LIMIT ?""", (count,))
rows = 0
format = "[{1}] ({2}) {3}"
for r in result:
print(format.format(*r))
rows+=1
if rows == 0:
print("No log entries found")
def show_posts(self, subreddit_object):
print("{} most recent posts in /r/{}:".format(source_limit, subreddit_object.display_name))
submissions = subreddit_object.new(limit=source_limit)
format = "{count:02}.[{timestamp}] <{id}> {title} - {domain} ({ups}^)"
count = 1
for s in submissions:
print(format.format(
count = count,
timestamp = datetime.datetime.utcfromtimestamp(int(s.created_utc)),
id = s.id,
title = s.title,
domain = s.domain,
ups = s.ups
))
count += 1
parser = argparse.ArgumentParser(description='Reddit repost bot')
subparsers = parser.add_subparsers(dest="command")
subparser_run = subparsers.add_parser('run', help="Run the bot and repost new posts")
subparser_test = subparsers.add_parser('test', help="Various testing fuctions")
subparser_show = subparsers.add_parser('show', help="Bot information")
parser.add_argument('-c', '--config', type=str, default='praw.ini',
metavar="<praw.ini>", help="Name of config file to load")
parser.add_argument('-b', '--bot', type=str, default=DEFAULT_BOT_NAME,
metavar="<"+DEFAULT_BOT_NAME+">", help="Name of bot (used in config file, and user agent, and other places)")
subparser_test.add_argument('--clean-title', type=str, metavar='"TITLE TO CLEAN"',
help='Test the title cleaner')
subparser_test.add_argument('--flair-id', type=str, metavar='"FLAIR TEXT"',
help='Return flair ID for the given flair (uses most recent repost, if post-id not specified)')
subparser_test.add_argument('--post-id', type=str, metavar='ID',
help='Submission ID to use with tests if needed')
subparser_show.add_argument('--all', action="store_true", help="Show all (or most) data")
subparser_show.add_argument('--database', action="store_true", help="Show info about database")
subparser_show.add_argument('--log', type=int, default=0, metavar="[count]", help="Show latest # log entries")
subparser_show.add_argument('--reposts', type=int, default=0, metavar="[count]", help="Show latest # reposts logged")
subparser_show.add_argument('--posts', action="store_true", help="Show latest posts online in source & destination subreddits")
subparser_run.add_argument('--reset', type=str, help="Reset the last repost date to the given reddit submission ID (eg. 5wxv94)")
args = parser.parse_args()
#print(args)
config = configparser.ConfigParser()
config.read(args.config)
DEBUG = config[args.bot]['debug'] in ('1','Y','on') and True or False
OWNER = config[args.bot]['owner'] or "unknown"
dbfn = args.bot + '.db'
my_user_agent = '{3}:{0}:v{1} (by /u/{2})'.format(args.bot, __version__, OWNER, sys.platform)
source_limit = int(config[args.bot]['source_limit'] or 10)
reddit = praw.Reddit(args.bot, user_agent=my_user_agent)
bot = RPGonBot(config[args.bot]['source'], config[args.bot]['destination'])
if args.command == 'show':
if args.all:
args.database = True
args.reposts = 10
args.log = 10
if args.database:
bot.show_database()
print()
if args.reposts > 0:
bot.show_reposts(args.reposts)
print()
if args.log > 0:
bot.show_log(args.log)
print()
if args.posts > 0:
bot.show_posts(bot.subreddit_source)
print()
bot.show_posts(bot.subreddit_destination)
print()
if args.command == 'test':
if args.clean_title:
print("Cleaned title: " + repr(bot.clean_title(args.clean_title)))
if args.flair_id:
submissions = bot.subreddit_destination.new(limit=1)
if submissions:
print("Flair id: " + bot.get_flair_id(args.flair_id, submission=next(submissions)))
if args.command == 'run':
if args.reset:
bot.db_reset_rp_data(args.reset)
print("Repost data has been reset")
else:
bot.check_for_posts()
|
from BasePhone import phone
from math import factorial
class applePhone(phone):
def special_freature(self,n,m):
return self._calc_arrangement(n,m)
def _calc_arrangement(self,n, m):
return factorial(n)/factorial(n-m)
|
def SOLVE(N):
full = set([1, 2, 3 , 4 ,5, 6, 7, 8, 9, 0])
digits = set([int(n) for n in str(N)])
Done = False
i = 2
while not Done:
new_digits = set([int(c) for c in str(N * i)])
if N == 0:
return 'INSOMNIA'
digits = digits.union(new_digits)
if digits == full:
return N * i
break
i += 1
def answer():
source = open("A-large.in", 'r')
output = open("large-output.txt", 'w')
first_line = True
case = 1
for line in source:
if first_line:
first_line = False
elif line =='\n':
pass
else:
output.write("Case #{0}: {1}\n".format(case, SOLVE(int(line))))
case += 1
answer() |
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
"""
build dp matrix with same dimension as matrix
dp[i][j] is side length of largest square whose lower right corner ends at matrix[i][j]
note:
(1) if list is empty, cannot access ncol
(2) pad an extra row and column to handle edge case
(3) keep track of side length, not area
(4) when return, square the side length
"""
ans = 0
nrow = len(matrix)
if nrow == 0: return ans
ncol = len(matrix[0])
dp = [[0] * (ncol + 1) for _ in range(nrow + 1)]
# not needed
# for r in range(nrow):
# for c in range(ncol):
# dp[r + 1][c + 1] = int(matrix[r][c])
for r in range(1, len(dp)):
for c in range(1, len(dp[0])):
if matrix[r - 1][c - 1] == "1": # offset to correct for padding
dp[r][c] = min(min(dp[r][c-1], dp[r-1][c]), dp[r - 1][c - 1]) + 1
ans = max(ans, dp[r][c])
return ans ** 2 |
# word cocatenation
first = "sarah"
last = "mcgee"
full_name = first + " " + last
print("full name: {}".format(full_name))
list_split = full_name.split()
print("split list: {}".format(list_split))
|
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection as NanoAODCollection
from CMGTools.TTHAnalysis.treeReAnalyzer import Collection as CMGCollection
from CMGTools.TTHAnalysis.tools.nanoAOD.friendVariableProducerTools import declareOutput, writeOutput
from math import sqrt, cos
from copy import deepcopy
from PhysicsTools.NanoAODTools.postprocessing.tools import deltaR,deltaPhi
from PhysicsTools.Heppy.physicsobjects.Jet import _btagWPs
class EventVars_5TeV(Module):
def __init__(self, label="", recllabel='Recl', doSystJEC=True, variations=[]):
self.namebranches = [ "MT_met_lep1",
"MT_met_lep2",
"MT_met_lep3",
"MTmin",
"MTlW",
]
self.label = "" if (label in ["",None]) else ("_"+label)
self.systsJEC = {0:"",\
1:"_jesTotalUp" , -1:"_jesTotalDown",\
2:"_jerUp", -2: "_jerDown",\
} if doSystJEC else {0:""}
if len(variations):
self.systsJEC = {0:""}
for i,var in enumerate(variations):
self.systsJEC[i+1] ="_%sUp"%var
self.systsJEC[-(i+1)]="_%sDown"%var
self.inputlabel = '_'+recllabel
self.branches = []
for var in self.systsJEC: self.branches.extend([br+self.label+self.systsJEC[var] for br in self.namebranches])
if len(self.systsJEC) > 1:
self.branches.extend([br+self.label+'_unclustEnUp' for br in self.namebranches if 'MT' in br])
self.branches.extend([br+self.label+'_unclustEnDown' for br in self.namebranches if 'MT' in br])
self.branches.extend(['drlep12','drlep13','drlep23','dphilep12','dphilep13','dphilep23','ptlep12'])
self.branches.extend(["hasOSSF4l","hasOSSF3l","m3l","m4l","mZ_3l"])
self.branches.extend(["idx_lZ1", "idx_lZ2", "idx_lW"])
self.branches.extend(["lZ1_pt","lZ2_pt","lW_pt","lZ1_eta","lZ2_eta", "lW_eta", "lZ1_pdg", "lZ2_pdg", "lW_pdg", "lZ1_isT", "lZ2_isT", "lW_isT"])
# old interface (CMG)
def listBranches(self):
return self.branches[:]
def __call__(self,event):
return self.run(event, CMGCollection, "met")
# new interface (nanoAOD-tools)
def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
declareOutput(self, wrappedOutputTree, self.branches)
def analyze(self, event):
writeOutput(self, self.run(event, NanoAODCollection))
return True
# logic of the algorithm
def run(self,event,Collection):
allret = {}
all_leps = [l for l in Collection(event,"LepGood")]
nFO = getattr(event,"nLepFO"+self.inputlabel)
chosen = getattr(event,"iLepFO"+self.inputlabel)
leps = [all_leps[chosen[i]] for i in range(nFO)]
if nFO >= 2:
allret['drlep12'] = deltaR(leps[0],leps[1])
allret['dphilep12'] = deltaPhi(leps[0],leps[1])
allret['ptlep12'] = (leps[0].p4()+leps[1].p4()).Pt()
else:
allret['drlep12'] = 0
allret['dphilep12'] = 0
allret['ptlep12'] = 0
if nFO >= 3:
allret['drlep13'] = deltaR(leps[0],leps[2])
allret['drlep23'] = deltaR(leps[1],leps[2])
allret['dphilep13'] = deltaPhi(leps[0],leps[2])
allret['dphilep23'] = deltaPhi(leps[1],leps[2])
else:
allret['drlep13'] = 0
allret['drlep23'] = 0
allret['dphilep13'] = 0
allret['dphilep23'] = 0
allret['hasOSSF3l'] = False
allret['hasOSSF4l'] = False
allret['m4l'] = -99
allret['m3l'] = -99
allret['mZ_3l' ] = -99
allret['idx_lZ1'] = -1
allret['idx_lZ2'] = -1
allret['idx_lW'] = -1
allret['lZ1_pt'] = -99
allret['lZ2_pt'] = -99
allret['lW_eta'] = -99
allret['lZ1_eta'] = -99
allret['lZ2_eta'] = -99
allret['lW_eta'] = -99
allret['lZ1_pdg'] = -99
allret['lZ2_pdg'] = -99
allret['lW_pdg'] = -99
allret['lZ1_isT'] = False
allret['lZ2_isT'] = False
allret['lW_isT'] = False
idxlW = -1
idxlZ1 = -1
idxlZ2 = -1
if nFO >= 3:
allret['m3l'] = (leps[0].p4()+leps[1].p4()+leps[2].p4()).M()
bestmZ = 9999.
leps3 = [leps[0], leps[1], leps[2]]
for l1 in leps3:
for l2 in leps3:
if l1 == l2: continue
if l1.pdgId * l2.pdgId > 0: continue
if abs(l1.pdgId) != abs(l2.pdgId): continue
allret['hasOSSF3l'] = True
mll = (l1.p4()+l2.p4()).M()
if (abs(bestmZ-91.1876) > abs(mll-91.1876)):
bestmZ = mll
idxlZ1 = leps.index(l1)
idxlZ2 = leps.index(l2)
for l3 in leps3:
if (l3==leps3[idxlZ1]): continue
if (l3==leps3[idxlZ2]): continue
idxlW = leps.index(l3)
# ensure that the lW and lZ1 has same-sign:
if leps[idxlW].pdgId * leps[idxlZ2].pdgId > 0:
tmpidx = idxlZ1
idxlZ1 = idxlZ2
idxlZ2 = tmpidx
allret['mZ_3l' ] = (leps[idxlZ1].p4()+leps[idxlZ2].p4()).M()
allret['idx_lZ1'] = idxlZ1
allret['idx_lZ2'] = idxlZ2
allret['idx_lW'] = idxlW
allret['lZ1_pt'] = leps[idxlZ1].pt
allret['lZ2_pt'] = leps[idxlZ2].pt
allret['lW_pt'] = leps[idxlW].pt
allret['lZ1_eta'] = leps[idxlZ1].eta
allret['lZ2_eta'] = leps[idxlZ2].eta
allret['lW_eta'] = leps[idxlW].eta
allret['lZ1_pdg'] = leps[idxlZ1].pdgId
allret['lZ2_pdg'] = leps[idxlZ2].pdgId
allret['lW_pdg'] = leps[idxlW].pdgId
allret['lZ1_isT'] = leps[idxlZ1].isLepTight_Recl
allret['lZ2_isT'] = leps[idxlZ2].isLepTight_Recl
allret['lW_isT'] = leps[idxlW].isLepTight_Recl
if nFO >= 4:
allret['m4l'] = (leps[0].p4()+leps[1].p4()+leps[2].p4()+leps[3].p4()).M()
leps4 = [leps[0], leps[1], leps[2], leps[3]]
for l1 in leps4:
for l2 in leps4:
if l1 == l2: continue
if l1.pdgId * l2.pdgId > 0: continue
if abs(l1.pdgId) != abs(l2.pdgId): continue
allret['hasOSSF4l'] = True
for var in self.systsJEC:
# prepare output
ret = dict([(name,0.0) for name in self.namebranches])
_var = var
if not hasattr(event,"nJet25"+self.systsJEC[var]+self.inputlabel):
_var = 0
metName = 'MET'
if not _var and not hasattr(event, '%s_pt_nom' %metName):
met = getattr(event,metName+"_pt")
metphi = getattr(event,metName+"_phi")
elif not _var and hasattr(event, '%s_pt_nom' %metName):
met = getattr(event,metName+"_pt_nom")
metphi = getattr(event,metName+"_phi_nom")
else :
met = getattr(event,metName+"_pt"+self.systsJEC[_var])
metphi = getattr(event,metName+"_phi"+self.systsJEC[_var])
nlep = len(leps)
if nlep > 0:
ret["MT_met_lep1"] = sqrt( 2*leps[0].conePt*met*(1-cos(leps[0].phi-metphi)) )
if nlep > 1:
ret["MT_met_lep2"] = sqrt( 2*leps[1].conePt*met*(1-cos(leps[1].phi-metphi)) )
ret["MTmin"] = min(ret["MT_met_lep1"],ret["MT_met_lep2"])
if nlep > 2:
ret["MT_met_lep3"] = sqrt( 2*leps[2].conePt*met*(1-cos(leps[2].phi-metphi)) )
ret["MTlW"] = sqrt( 2*leps[idxlW].conePt*met*(1-cos(leps[idxlW].phi-metphi)) )
if not _var and hasattr(event, '%s_pt_unclustEnUp'%metName):
met_up = getattr(event,metName+"_pt_unclustEnUp")
metphi_up = getattr(event,metName+"_phi_unclustEnUp")
met_down = getattr(event,metName+"_pt_unclustEnDown")
metphi_down = getattr(event,metName+"_phi_unclustEnDown")
if nlep > 0:
allret["MT_met_lep1" + self.label + '_unclustEnUp'] = sqrt( 2*leps[0].conePt*met_up*(1-cos(leps[0].phi-metphi_up)) )
allret["MT_met_lep1" + self.label + '_unclustEnDown'] = sqrt( 2*leps[0].conePt*met_down*(1-cos(leps[0].phi-metphi_down)) )
if nlep > 1:
allret["MT_met_lep2" + self.label + '_unclustEnUp'] = sqrt( 2*leps[1].conePt*met_up*(1-cos(leps[1].phi-metphi_up)) )
allret["MT_met_lep2" + self.label + '_unclustEnDown'] = sqrt( 2*leps[1].conePt*met_down*(1-cos(leps[1].phi-metphi_down)) )
allret["MTmin"+self.label+"_unclustEnUp"] = min(allret["MT_met_lep1" + self.label + '_unclustEnUp'],allret["MT_met_lep2" + self.label + '_unclustEnUp'])
allret["MTmin"+self.label+"_unclustEnDown"] = min(allret["MT_met_lep1"+self.label+'_unclustEnDown'],allret["MT_met_lep2" + self.label+'_unclustEnDown'])
if nlep > 2:
allret["MT_met_lep3" + self.label + '_unclustEnUp'] = sqrt( 2*leps[2].conePt*met_up*(1-cos(leps[2].phi-metphi_up)) )
allret["MT_met_lep3" + self.label + '_unclustEnDown'] = sqrt( 2*leps[2].conePt*met_down*(1-cos(leps[2].phi-metphi_down)) )
allret["MTlW" + self.label + '_unclustEnUp'] = sqrt( 2*leps[idxlW].conePt*met_up*(1-cos(leps[idxlW].phi-metphi_up)) )
allret["MTlW" + self.label + '_unclustEnDown'] = sqrt( 2*leps[idxlW].conePt*met_down*(1-cos(leps[idxlW].phi-metphi_down)) )
for br in self.namebranches:
allret[br+self.label+self.systsJEC[_var]] = ret[br]
return allret
if __name__ == '__main__':
from sys import argv
file = ROOT.TFile(argv[1])
tree = file.Get("tree")
tree.vectorTree = True
tree.AddFriend("sf/t",argv[2])
class Tester(Module):
def __init__(self, name):
Module.__init__(self,name,None)
self.sf = EventVarsWZ('','Recl')
def analyze(self,ev):
print("\nrun %6d lumi %4d event %d: leps %d" % (ev.run, ev.lumi, ev.evt, ev.nLepGood))
print(self.sf(ev))
el = EventLoop([ Tester("tester") ])
el.loop([tree], maxEvents = 50)
|
'''
脚本一:
用例名称:验证隔离下基于上传扩展名过滤的FTP传输策略
编写人员:李皖秋
编写日期:2021.7.15
测试目的:验证隔离下基于上传扩展名过滤的FTP传输策略
测试步骤:
1、下发ftp的隔离代理:代理ip为前置机安全卡的ip,port为8887,等待nginx的24个进程起来
2、下发ftp的上传扩展名白名单:txt,等待nginx的24个进程起来
3、控制台走ftp隔离登录ftp服务器,上传文件扩展名为白名单txt,查看上传是否成功
4、控制台走ftp隔离登录ftp服务器,上传文件扩展名为非白名单pdf,查看上传是否成功
5、移除ftp的隔离策略,清空环境,等待nginx的24个进程起来
6、移除ftp传输策略,等待nginx的24个进程起来
预期结果:
1、cat /etc/jsac/customapp.stream应该包含代理ip和port,netstat -anp |grep tcp应该可以查看到监听ip和端口
2、cat /etc/jsac/filter.json文件应该包含:allow-upload和上传扩展名白名单:txt
3、上传成功
4、上传失败
5、cat /etc/jsac/customapp.stream应该不包含代理ip和port
6、cat /etc/jsac/filter.json文件应该不包含:ftp协议
脚本二:
用例名称:验证隔离下基于多个上传扩展名过滤的FTP传输策略
编写人员:李皖秋
编写日期:2021.7.15
测试目的:验证隔离下基于多个上传扩展名过滤的FTP传输策略
测试步骤:
1、下发ftp的隔离代理:代理ip为前置机安全卡的ip,port为8887,等待nginx的24个进程起来
2、下发ftp的上传扩展名白名单:txt、xls,等待nginx的24个进程起来
3、控制台走ftp隔离登录ftp服务器,上传文件扩展名为白名单txt,查看上传是否成功
4、控制台走ftp隔离登录ftp服务器,上传文件扩展名为白名单xls,查看上传是否成功
5、控制台走ftp隔离登录ftp服务器,上传文件扩展名为非白名单pdf,查看上传是否成功
6、移除ftp的隔离策略,清空环境,等待nginx的24个进程起来
7、移除ftp传输策略,等待nginx的24个进程起来
预期结果:
1、cat /etc/jsac/customapp.stream应该包含代理ip和port,netstat -anp |grep tcp应该可以查看到监听ip和端口
2、cat /etc/jsac/filter.json文件应该包含:allow-upload和上传扩展名白名单:txt、xls
3、上传成功
4、上传成功
5、上传失败
6、cat /etc/jsac/customapp.stream应该不包含代理ip和port
7、cat /etc/jsac/filter.json文件应该不包含:ftp协议
'''
# encoding='utf-8'
try:
import os, sys, pytest, allure, time, re, time
except Exception as err:
print('导入CPython内置函数库失败!错误信息如下:')
print(err)
sys.exit(0) # 避免程序继续运行造成的异常崩溃,友好退出程序
base_path = os.path.dirname(os.path.abspath(__file__)) # 获取当前项目文件夹
base_path = base_path.replace('\\', '/')
sys.path.insert(0, base_path) # 将当前目录添加到系统环境变量,方便下面导入版本配置等文件
print(base_path)
try:
from iso_ftp_check_upload import index
from iso_ftp_check_upload import message
from common import fun
import common.ssh as c_ssh
except Exception as err:
print(
'导入基础函数库失败!请检查相关文件是否存在.\n文件位于: ' + str(base_path) + '/common/ 目录下.\n分别为:pcap.py rabbitmq.py ssh.py\n错误信息如下:')
print(err)
sys.exit(0) # 避免程序继续运行造成的异常崩溃,友好退出程序
else:
del sys.path[0] # 及时删除导入的环境变量,避免重复导入造成的异常错误
# import index
# del sys.path[0]
# dir_dir_path=os.path.abspath(os.path.join(os.getcwd()))
# sys.path.append(os.getcwd())
from common import baseinfo
from common import clr_env
from common.rabbitmq import *
from data_check import con_ftp
datatime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
FrontDomain = baseinfo.BG8010FrontDomain
BackDomain = baseinfo.BG8010BackDomain
proxy_ip = baseinfo.BG8010FrontOpeIp
rbmExc = baseinfo.rbmExc
class Test_iso_ftp_check_upload():
def setup_method(self):
clr_env.data_check_setup_met(dut='FrontDut')
clr_env.data_check_setup_met(dut='BackDut')
def teardown_method(self):
clr_env.iso_teardown_met('ftp', base_path)
clr_env.clear_datacheck('ftp', base_path)
clr_env.iso_setup_class(dut='FrontDut')
clr_env.iso_setup_class(dut='BackDut')
def setup_class(self):
# 获取参数
fun.ssh_FrontDut.connect()
fun.ssh_BackDut.connect()
clr_env.iso_setup_class(dut='FrontDut')
clr_env.iso_setup_class(dut='BackDut')
self.case1_step1 = index.case1_step1
self.case1_step11 = index.case1_step11
self.case1_step2 = index.case1_step2
self.case2_step2 = index.case2_step2
self.delcheck = index.delcheck
self.port = index.port
self.username = index.username
self.password = index.password
self.case1_upremotePath = index.case1_upremotePath
self.case1_uplocalPath = index.case1_uplocalPath
self.case1_deny_upremotePath = index.case1_deny_upremotePath
self.case1_deny_uplocalPath = index.case1_deny_uplocalPath
self.case2_upremotePath = index.case2_upremotePath
self.case2_uplocalPath = index.case2_uplocalPath
self.case2_allow_upremotePath = index.case2_allow_upremotePath
self.case2_allow_uplocalPath = index.case2_allow_uplocalPath
self.case2_deny_upremotePath = index.case2_deny_upremotePath
self.case2_deny_uplocalPath = index.case2_deny_uplocalPath
@allure.feature('验证隔离下基于上传扩展名过滤的FTP传输策略')
def test_iso_ftp_check_upload_a1(self):
# 下发配置
print('1、下发ftp的隔离代理:代理ip为前置机安全卡的ip,port为8887,等待nginx的24个进程起来;cat /etc/jsac/customapp.stream应该包含代理ip和port,netstat -anp |grep tcp应该可以查看到监听ip和端口')
fun.send(rbmExc, message.addftp_front['AddCustomAppPolicy'], FrontDomain, base_path)
fun.send(rbmExc, message.addftp_back['AddCustomAppPolicy'], BackDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
front_res = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process', name='前置机nginx进程')
assert front_res == 1
fun.wait_data('ps -ef |grep nginx', 'BackDut', 'nginx: worker process')
back_res = fun.nginx_worker('ps -ef |grep nginx', 'BackDut', 'nginx: worker process', name='后置机nginx进程')
assert back_res == 1
# 检查配置下发是否成功
for key in self.case1_step1:
re = fun.wait_data(self.case1_step1[key][0], 'FrontDut', self.case1_step1[key][1], '配置', 100)
print(re)
assert self.case1_step1[key][1] in re
# 检查配置下发是否成功
for key in self.case1_step11:
re = fun.wait_data(self.case1_step11[key][0], 'FrontDut', self.case1_step11[key][1], '配置', 100)
print(re)
assert self.case1_step11[key][1] in re
print('2、下发ftp的上传扩展名白名单:txt,等待nginx的24个进程起来;cat /etc/jsac/filter.json文件应该包含:allow-upload和上传扩展名白名单:txt')
fun.send(rbmExc, message.ftpcheck1['SetFtpCheck'], FrontDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
add_res2 = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
assert add_res2 == 1
for key in self.case1_step2:
re = fun.wait_data(self.case1_step2[key][0], 'FrontDut', self.case1_step2[key][1], '配置', 100)
print(re)
assert self.case1_step2[key][1] in re
# 登录ftp服务器,上传文件扩展名为白名单
print('3、控制台走ftp隔离登录ftp服务器,上传文件扩展名为白名单txt,查看上传是否成功;上传成功')
fp = con_ftp.connect_ftp(proxy_ip, self.port, self.username, self.password)
print('欢迎语是:{}'.format(fp.getwelcome()))
result1 = con_ftp.uploadFile(fp, self.case1_upremotePath, self.case1_uplocalPath)
print('ftp上传文件扩展名{}为白名单结果为:{}'.format(self.case1_uplocalPath, result1))
assert result1 == 1
# 登录ftp服务器,上传文件扩展名为非白名单
print('4、控制台走ftp隔离登录ftp服务器,上传文件扩展名为非白名单pdf,查看上传是否成功;上传失败')
fp = con_ftp.connect_ftp(proxy_ip, self.port, self.username, self.password)
print('欢迎语是:{}'.format(fp.getwelcome()))
result2 = con_ftp.uploadFile(fp, self.case1_deny_upremotePath, self.case1_deny_uplocalPath)
print('ftp上传文件扩展名{}为非白名单结果为:{}'.format(self.case1_deny_uplocalPath, result2))
assert result2 == 0
# 移除策略,还原环境
print('5、移除ftp的隔离策略,清空环境,等待nginx的24个进程起来;cat /etc/jsac/customapp.stream应该不包含代理ip和port')
fun.send(rbmExc, message.delftp_front['DelCustomAppPolicy'], FrontDomain, base_path)
fun.send(rbmExc, message.delftp_back['DelCustomAppPolicy'], BackDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
fdel_res = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process', name='前置机nginx进程')
assert fdel_res == 1
fun.wait_data('ps -ef |grep nginx', 'BackDut', 'nginx: worker process')
bdel_res = fun.nginx_worker('ps -ef |grep nginx', 'BackDut', 'nginx: worker process', name='后置机nginx进程')
assert bdel_res == 1
# 检查策略移除是否成功
for key in self.case1_step1:
re = fun.wait_data(self.case1_step1[key][0], 'FrontDut', self.case1_step1[key][1], '配置', 100, flag='不存在')
print(re)
assert self.case1_step1[key][1] not in re
# 检查ftp传输策略是否清空
print('6、移除ftp传输策略,等待nginx的24个进程起来;cat /etc/jsac/filter.json文件应该不包含:ftp协议')
fun.send(rbmExc, message.delftpcheck['DropFtpCheck'], FrontDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
del_res2 = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
assert del_res2 == 1
for key in self.delcheck:
re = fun.wait_data(self.delcheck[key][0], 'FrontDut', self.delcheck[key][1], '配置', 100, flag='不存在')
assert self.delcheck[key][1] not in re
# @pytest.mark.skip(reseason="skip")
@allure.feature('验证隔离下基于多个上传扩展名过滤的FTP传输策略')
def test_iso_ftp_check_upload_a2(self):
# 下发配置
print('1、下发ftp的隔离代理:代理ip为前置机安全卡的ip,port为8887,等待nginx的24个进程起来;cat /etc/jsac/customapp.stream应该包含代理ip和port,netstat -anp |grep tcp应该可以查看到监听ip和端口')
fun.send(rbmExc, message.addftp_front['AddCustomAppPolicy'], FrontDomain, base_path)
fun.send(rbmExc, message.addftp_back['AddCustomAppPolicy'], BackDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
front_res = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process', name='前置机nginx进程')
assert front_res == 1
fun.wait_data('ps -ef |grep nginx', 'BackDut', 'nginx: worker process')
back_res = fun.nginx_worker('ps -ef |grep nginx', 'BackDut', 'nginx: worker process', name='后置机nginx进程')
assert back_res == 1
# 检查配置下发是否成功
for key in self.case1_step1:
re = fun.wait_data(self.case1_step1[key][0], 'FrontDut', self.case1_step1[key][1], '配置', 100)
print(re)
assert self.case1_step1[key][1] in re
# 检查配置下发是否成功
for key in self.case1_step11:
re = fun.wait_data(self.case1_step11[key][0], 'FrontDut', self.case1_step11[key][1], '配置', 100)
print(re)
assert self.case1_step11[key][1] in re
print('2、下发ftp的上传扩展名白名单:txt、xls,等待nginx的24个进程起来;cat /etc/jsac/filter.json文件应该包含:allow-upload和上传扩展名白名单:txt、xls')
fun.send(rbmExc, message.ftpcheck2['SetFtpCheck'], FrontDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
add_res2 = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
assert add_res2 == 1
for key in self.case2_step2:
re = fun.wait_data(self.case2_step2[key][0], 'FrontDut', self.case2_step2[key][1], '配置', 100)
print(re)
assert self.case2_step2[key][1] in re
# 登录ftp服务器,上传文件扩展名为白名单
print('3、控制台走ftp隔离登录ftp服务器,上传文件扩展名为白名单txt,查看上传是否成功;上传成功')
fp = con_ftp.connect_ftp(proxy_ip, self.port, self.username, self.password)
print('欢迎语是:{}'.format(fp.getwelcome()))
result1 = con_ftp.uploadFile(fp, self.case2_upremotePath, self.case2_uplocalPath)
print('第一个ftp上传文件扩展名{}为白名单结果为:{}'.format(self.case2_uplocalPath, result1))
assert result1 == 1
# 登录ftp服务器,上传文件扩展名为白名单
print('4、控制台走ftp隔离登录ftp服务器,上传文件扩展名为白名单xls,查看上传是否成功;上传成功')
fp = con_ftp.connect_ftp(proxy_ip, self.port, self.username, self.password)
print('欢迎语是:{}'.format(fp.getwelcome()))
result2 = con_ftp.uploadFile(fp, self.case2_allow_upremotePath, self.case2_allow_uplocalPath)
print('第二个ftp上传文件扩展名{}为白名单结果为:{}'.format(self.case2_allow_uplocalPath, result2))
assert result2 == 1
# 登录ftp服务器,上传文件扩展名为非白名单
print('5、控制台走ftp隔离登录ftp服务器,上传文件扩展名为非白名单pdf,查看上传是否成功;上传失败')
fp = con_ftp.connect_ftp(proxy_ip, self.port, self.username, self.password)
print('欢迎语是:{}'.format(fp.getwelcome()))
result3 = con_ftp.uploadFile(fp, self.case2_deny_upremotePath, self.case2_deny_uplocalPath)
print('ftp上传文件扩展名{}为非白名单结果为:{}'.format(self.case2_deny_uplocalPath, result3))
assert result3 == 0
# 移除策略,还原环境
print('6、移除ftp的隔离策略,清空环境,等待nginx的24个进程起来;cat /etc/jsac/customapp.stream应该不包含代理ip和port')
fun.send(rbmExc, message.delftp_front['DelCustomAppPolicy'], FrontDomain, base_path)
fun.send(rbmExc, message.delftp_back['DelCustomAppPolicy'], BackDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
fdel_res = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process', name='前置机nginx进程')
assert fdel_res == 1
fun.wait_data('ps -ef |grep nginx', 'BackDut', 'nginx: worker process')
bdel_res = fun.nginx_worker('ps -ef |grep nginx', 'BackDut', 'nginx: worker process', name='后置机nginx进程')
assert bdel_res == 1
# 检查策略移除是否成功
for key in self.case1_step1:
re = fun.wait_data(self.case1_step1[key][0], 'FrontDut', self.case1_step1[key][1], '配置', 100, flag='不存在')
print(re)
assert self.case1_step1[key][1] not in re
# 检查ftp传输策略是否清空
print('7、移除ftp传输策略,等待nginx的24个进程起来;cat /etc/jsac/filter.json文件应该不包含:ftp协议')
fun.send(rbmExc, message.delftpcheck['DropFtpCheck'], FrontDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
del_res2 = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
assert del_res2 == 1
for key in self.delcheck:
re = fun.wait_data(self.delcheck[key][0], 'FrontDut', self.delcheck[key][1], '配置', 100, flag='不存在')
assert self.delcheck[key][1] not in re
def teardown_class(self):
# 回收环境
clr_env.iso_setup_class(dut='FrontDut')
clr_env.iso_setup_class(dut='BackDut')
fun.rbm_close()
fun.ssh_close('FrontDut')
fun.ssh_close('BackDut') |
# Decision Board:
# https://docs.qq.com/doc/DTnlicHFFUVlyaFBL
import torch.nn as nn
import torch.nn.functional as F
from train_pred_helper import train_helper
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.c1 = nn.Conv2d(3, 6, kernel_size=5)
self.c3 = nn.Conv2d(6, 16, kernel_size=5)
self.c5 = nn.Linear(16 * 5 * 5, 120)
self.f6 = nn.Linear(120, 84)
self.output_layer = nn.Linear(84, 10)
def forward(self, x):
temp = F.relu(self.c1(x)) # C1层 卷积层
temp = F.max_pool2d(temp, 2) # S2层 池化层(下采样层)
temp = F.relu(self.c3(temp)) # C3层 卷积层
temp = F.max_pool2d(temp, 2) # S4层 池化层(下采样层)
temp = temp.view(temp.size(0), -1)
temp = F.relu(self.c5(temp)) # C5层 卷积层
temp = F.relu(self.f6(temp)) # F6层 全连接层
out = self.output_layer(temp) # output层 全连接层
return F.softmax(out)
def lenet_train():
print("LeNet training.")
model = LeNet()
train_helper(model)
if __name__ == '__main__':
lenet_train()
|
from datetime import datetime
from airflow.hooks.sqlite_hook import SqliteHook
from sqlalchemy.orm import sessionmaker
from models.sql.normalized import Stakeholder
from utils.handlers import iteration_handler
def process_record(session, execution_date, row):
Stakeholder.from_source(
row, session, created_at=execution_date, updated_at=datetime.utcnow()
).insert_or_update(session)
def handler(SourceModel, execution_date, next_execution_date, **kwargs):
connection = SqliteHook()
Session = sessionmaker(
bind=connection.get_sqlalchemy_engine(), expire_on_commit=False
)
session = Session()
def handle_error(*_, **__):
session.rollback()
return iteration_handler(
SourceModel.iterate_rows(Session(), execution_date, next_execution_date),
lambda x: process_record(session, execution_date, x),
handle_error,
)
|
__author__ = 'gbhardwaj'
def contains_magic_number(list, magic_number):
if magic_number in list:
#i == magic_number:
print "This list contains the magic number."
else:
print "This list does NOT contain the magic number."
contains_magic_number(range(10), 5) |
"""Unit tests for the Root Resource."""
import kgb
from django.core.exceptions import ImproperlyConfigured
from django.test.client import RequestFactory
from django.test.utils import override_settings
from djblets.extensions.manager import ExtensionManager
from djblets.extensions.resources import ExtensionResource
from djblets.testing.testcases import TestCase
from djblets.webapi.resources import RootResource, WebAPIResource
from djblets.webapi.resources.root import _URITemplatesCache
# Will appear in the URI templates list under the names 'mock' and 'mocks'.
class MockResource(WebAPIResource):
name = 'mock'
uri_object_key = 'mock_id'
# Will cause a conflict with MockResource when added to the URI templates list.
class DuplicateMockResource(MockResource):
uri_name = 'duplicatemocks'
# Will appear in the URI templates list under the names 'test_mock' and
# 'test_mocks'.
class OtherMockResource(MockResource):
uri_template_name = 'test_mock'
# Will be excluded from the URI templates list.
class ExcludedResource(WebAPIResource):
name = 'exclude'
uri_template_name = None
uri_object_key = 'exclude_id'
# Will have its item resource excluded but list resource included in the
# URI templates list.
class IncludedListResource(WebAPIResource):
name = 'test_list'
uri_template_name = None
uri_template_name_plural = 'lists'
class RootResourceTests(kgb.SpyAgency, TestCase):
"""Unit tests for RootResource.
Version Added:
3.1
"""
def setUp(self):
"""Setup for the RootResource unit tests."""
super().setUp()
self.root_res = RootResource(
[
MockResource(),
OtherMockResource(),
ExcludedResource(),
IncludedListResource(),
]
)
self.request = RequestFactory().get('/')
def test_get_uri_templates_uses_uri_template_names(self):
"""Testing RootResource.get_uri_templates uses the uri_template_name
and uri_template_name_plural for resources instead of name and
plural_name
"""
uri_templates = self.root_res.get_uri_templates(self.request)
self.assertEqual(uri_templates['mock'],
'http://testserver/mocks/{mock_id}/')
self.assertEqual(uri_templates['mocks'], 'http://testserver/mocks/')
self.assertEqual(uri_templates['test_mock'],
'http://testserver/mocks/{mock_id}/')
self.assertEqual(uri_templates['test_mocks'],
'http://testserver/mocks/')
self.assertEqual(uri_templates['lists'],
'http://testserver/test-lists/')
def test_get_uri_templates_skips_with_none(self):
"""Testing RootResource.get_uri_templates skips item resources that
have their uri_template_name and list resources that have their
uri_template_plural_name set to None
"""
uri_templates = self.root_res.get_uri_templates(self.request)
self.assertEqual(uri_templates['mock'],
'http://testserver/mocks/{mock_id}/')
self.assertEqual(uri_templates['mocks'], 'http://testserver/mocks/')
self.assertEqual(uri_templates['test_mocks'],
'http://testserver/mocks/')
self.assertEqual(uri_templates['test_mock'],
'http://testserver/mocks/{mock_id}/')
self.assertEqual(uri_templates['lists'],
'http://testserver/test-lists/')
self.assertNotIn('test_list', uri_templates)
self.assertNotIn('exclude', uri_templates)
self.assertNotIn('excludes', uri_templates)
@override_settings(DEBUG=False)
def test_get_uri_templates_must_be_unique(self):
"""Testing RootResource.get_uri_templates logs an error when multiple
URI templates are mapped to the same name in production mode
"""
self.root_res = RootResource([MockResource(), DuplicateMockResource()])
expected_message = (
'More than one URI template was mapped to the "mocks" name: '
'http://testserver/mocks/, http://testserver/duplicatemocks/. '
'Only the first one will be included in the URI templates list. '
'To include the other URI templates, they must be mapped to a '
'unique name by setting each resource\'s uri_template_name '
'property.'
)
with self.assertLogs() as logs:
uri_templates = self.root_res.get_uri_templates(self.request)
self.assertEqual(logs.records[0].getMessage(), expected_message)
self.assertIn('mocks', uri_templates)
self.assertEqual(uri_templates['mocks'],
'http://testserver/mocks/')
@override_settings(DEBUG=True)
def test_get_uri_templates_must_be_unique_debug(self):
"""Testing RootResource.get_uri_templates raises an error when multiple
URI templates are mapped to the same name in debug mode
"""
self.root_res = RootResource([MockResource(), DuplicateMockResource()])
expected_message = (
'More than one URI template was mapped to the "mocks" name: '
'http://testserver/mocks/, http://testserver/duplicatemocks/. '
'Each URI template must be mapped to a unique URI template '
'name in order to be included in the URI templates list. This can '
'be set through the uri_template_name property.'
)
with self.assertRaisesMessage(ImproperlyConfigured, expected_message):
self.root_res.get_uri_templates(self.request)
def test_get_uri_templates_caching(self):
"""Testing RootResource.get_uri_templates caching"""
resource = self.root_res
self.spy_on(resource.build_uri_templates)
# Check repeated calls to the same URL.
request1 = RequestFactory().get('/api1/')
uri_templates1 = resource.get_uri_templates(request1)
uri_templates2 = resource.get_uri_templates(request1)
self.assertEqual(uri_templates1, {
'lists': 'http://testserver/api1/test-lists/',
'mock': 'http://testserver/api1/mocks/{mock_id}/',
'mocks': 'http://testserver/api1/mocks/',
'root': 'http://testserver/api1/',
'test_mock': 'http://testserver/api1/mocks/{mock_id}/',
'test_mocks': 'http://testserver/api1/mocks/',
})
self.assertIs(uri_templates1, uri_templates2)
self.assertSpyCallCount(resource.build_uri_templates, 1)
self.assertSpyLastCalledWith(resource.build_uri_templates,
'http://testserver/api1/')
# Check against a second API URL.
request2 = RequestFactory().get('/api2/')
uri_templates3 = resource.get_uri_templates(request2)
uri_templates4 = resource.get_uri_templates(request2)
self.assertEqual(uri_templates3, {
'lists': 'http://testserver/api2/test-lists/',
'mock': 'http://testserver/api2/mocks/{mock_id}/',
'mocks': 'http://testserver/api2/mocks/',
'root': 'http://testserver/api2/',
'test_mock': 'http://testserver/api2/mocks/{mock_id}/',
'test_mocks': 'http://testserver/api2/mocks/',
})
self.assertIs(uri_templates3, uri_templates4)
self.assertSpyCallCount(resource.build_uri_templates, 2)
self.assertSpyLastCalledWith(resource.build_uri_templates,
'http://testserver/api2/')
# And invalidate the cache.
resource._cached_uri_templates.clear()
uri_templates5 = resource.get_uri_templates(request1)
uri_templates6 = resource.get_uri_templates(request2)
self.assertIsNot(uri_templates5, uri_templates1)
self.assertEqual(uri_templates5, uri_templates1)
self.assertIsNot(uri_templates6, uri_templates3)
self.assertEqual(uri_templates6, uri_templates3)
self.assertSpyCallCount(resource.build_uri_templates, 4)
class RootResourceTemplateRegistrationTests(TestCase):
"""Unit tests for the (un)registration of templates in RootResource."""
def setUp(self):
super(RootResourceTemplateRegistrationTests, self).setUp()
self.ext_mgr = ExtensionManager('')
self.ext_res = ExtensionResource(self.ext_mgr)
self.root_res = RootResource([self.ext_res])
self.root_res._registered_uri_templates = {
self.ext_res: {
'extensions': 'http://localhost:8080/api/extensions/'
},
None: {
'extensions': 'http://localhost:8080/api/extensions/none/'
},
}
def test_register_uri_template_without_relative_resource(self):
"""Testing register_uri_templates without a relative resource"""
self.root_res.register_uri_template(name='key', relative_path='value')
actual_result = self.root_res._registered_uri_templates[None]
self.assertEqual(actual_result, {
'extensions': 'http://localhost:8080/api/extensions/none/',
'key': 'value',
})
def test_register_uri_template_with_relative_resource(self):
"""Testing register_uri_templates with a relative resource"""
mock_extension_resource = ExtensionResource(self.ext_mgr)
self.root_res.register_uri_template(
name='key',
relative_path='value',
relative_resource=mock_extension_resource)
actual_result = self.root_res._registered_uri_templates[
mock_extension_resource]
self.assertEqual(actual_result, {'key': 'value'})
def test_register_uri_template_clears_uri_template_cache(self):
"""Testing register_uri_templates clears the URI template cache"""
resource = self.root_res
request = RequestFactory().get('/api/')
resource.get_uri_templates(request)
self.assertEqual(len(resource._cached_uri_templates._cache), 1)
resource.register_uri_template('extension_name', 'some/relative/path/')
self.assertEqual(len(resource._cached_uri_templates._cache), 0)
def test_register_uri_template_overwrites_existing_uri_template(self):
"""Testing register_uri_templates overwrites existing uri templates and
logs a message saying so
"""
expected_message = ('The extensions resource is already mapped to the '
'following URI template: '
'http://localhost:8080/api/extensions/none/. This '
'will be overwritten by the new URI template: '
'http://localhost:8080/api/different/.')
with self.assertLogs(level='DEBUG') as logs:
self.root_res.register_uri_template(
name='extensions',
relative_path='http://localhost:8080/api/different/')
actual_result = self.root_res._registered_uri_templates[None]
self.assertEqual(logs.records[0].getMessage(), expected_message)
self.assertEqual(actual_result, {
'extensions': 'http://localhost:8080/api/different/',
})
def test_unregister_uri_template_without_relative_resource(self):
"""Testing unregister_uri_template without a relative resource"""
self.root_res.unregister_uri_template('extensions')
self.assertFalse(self.root_res._registered_uri_templates[None])
def test_unregister_uri_template_with_relative_resource(self):
"""Testing unregister_uri_template with a relative resource"""
self.root_res.unregister_uri_template('extensions', self.ext_res)
self.assertEqual(
self.root_res._registered_uri_templates[self.ext_res],
{})
def test_unregister_uri_template_clears_uri_template_cache(self):
"""Testing unregister_uri_templates clears the URI template cache"""
resource = self.root_res
request = RequestFactory().get('/api/')
resource.get_uri_templates(request)
self.assertEqual(len(resource._cached_uri_templates._cache), 1)
resource.unregister_uri_template('extensions')
self.assertEqual(len(resource._cached_uri_templates._cache), 0)
class URITemplateCacheTests(TestCase):
"""Unit tests for _URITemplatesCache.
Version Added:
3.2
"""
def test_add(self):
"""Testing _URITemplatesCache.add"""
cache = _URITemplatesCache()
cache.add('/api1/', {
'template1': 'http://localhost:8080/api1/resource1/',
'template2': 'http://localhost:8080/api1/resource2/',
})
cache.add('/api2/', {
'template3': 'http://localhost:8080/api2/resource3/',
})
self.assertEqual(
list(cache._cache.items()),
[
('/api1/', {
'template1': 'http://localhost:8080/api1/resource1/',
'template2': 'http://localhost:8080/api1/resource2/',
}),
('/api2/', {
'template3': 'http://localhost:8080/api2/resource3/',
}),
])
def test_add_when_full(self):
"""Testing _URITemplatesCache.add when cache is full"""
cache = _URITemplatesCache(max_size=2)
cache.add('/api1/', {
'template1': 'http://localhost:8080/api1/resource1/',
'template2': 'http://localhost:8080/api1/resource2/',
})
cache.add('/api2/', {
'template3': 'http://localhost:8080/api2/resource3/',
})
cache.add('/api3/', {
'template4': 'http://localhost:8080/api3/resource4/',
})
self.assertEqual(
list(cache._cache.items()),
[
('/api2/', {
'template3': 'http://localhost:8080/api2/resource3/',
}),
('/api3/', {
'template4': 'http://localhost:8080/api3/resource4/',
}),
])
def test_get(self):
"""Testing _URITemplatesCache.get"""
cache = _URITemplatesCache()
cache.add('/api1/', {
'template1': 'http://localhost:8080/api1/resource1/',
'template2': 'http://localhost:8080/api1/resource2/',
})
cache.add('/api2/', {
'template3': 'http://localhost:8080/api2/resource3/',
})
cache.add('/api3/', {
'template4': 'http://localhost:8080/api3/resource4/',
})
# Retrieving the second item should reorder it to last.
self.assertEqual(
cache.get('/api2/'),
{
'template3': 'http://localhost:8080/api2/resource3/',
})
self.assertEqual(
list(cache._cache.items()),
[
('/api1/', {
'template1': 'http://localhost:8080/api1/resource1/',
'template2': 'http://localhost:8080/api1/resource2/',
}),
('/api3/', {
'template4': 'http://localhost:8080/api3/resource4/',
}),
('/api2/', {
'template3': 'http://localhost:8080/api2/resource3/',
}),
])
def test_get_and_not_found(self):
"""Testing _URITemplatesCache.get with URI templates not found"""
cache = _URITemplatesCache()
with self.assertRaises(KeyError):
self.assertIsNone(cache.get('/api/'))
self.assertEqual(cache._cache, {})
def test_get_and_not_found_and_build_func(self):
"""Testing _URITemplatesCache.get with URI templates not found and
build_func=
"""
cache = _URITemplatesCache()
self.assertEqual(
cache.get(
'/api1/',
build_func=lambda base_href: {
'template1': 'http://localhost:8080/api1/resource1/',
'template2': 'http://localhost:8080/api1/resource2/',
}),
{
'template1': 'http://localhost:8080/api1/resource1/',
'template2': 'http://localhost:8080/api1/resource2/',
})
self.assertEqual(
list(cache._cache.items()),
[
('/api1/', {
'template1': 'http://localhost:8080/api1/resource1/',
'template2': 'http://localhost:8080/api1/resource2/',
}),
])
def test_clear(self):
"""Testing _URITemplatesCache.clear"""
cache = _URITemplatesCache()
cache.add('/api1/', {
'template1': 'http://localhost:8080/api1/resource1/',
'template2': 'http://localhost:8080/api1/resource2/',
})
cache.clear()
self.assertEqual(cache._cache, {})
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
year = 2019 # change year to analyze different eras
url_bbr = 'https://www.basketball-reference.com/leagues/NBA_{}_per_game.html'.format(year)
html_bbr = urlopen(url_bbr)
soup_bbr = BeautifulSoup(html_bbr, features='lxml') # parses HTML doc
stat_headers = [txt.get_text() for txt in soup_bbr.find_all('tr', limit=2)[0].find_all('th')] #extracts headers for statistics such as PTS or MP
stat_headers = stat_headers[1:] # removes the first item, which is garbage
rows = soup_bbr.find_all('tr')[1:]
player_stats_bb = [[stats.get_text() for stats in rows[i].find_all('td')] for i in range(len(rows))] #double list comprehesion to extract stats as a 2D list, makes it complatible w/ DataFrame
stats = pd.DataFrame(player_stats_bb, columns=stat_headers)
first_ten = stats.head(10) # .head(int) extracts the first 10 rows from the DataFrame, just used to test the frame out
print(first_ten) |
"""
A program to know the even number in the given rangeusing for loop.
"""
n=int(input("Enter the range:"))
for i in range(1,n+1):
if(i%2==0):
print(i)
|
from Normal_Game_NR import Normal_Game_NR
import random
from colored import fg, bg, attr
import rooms.biblioteca_room as biblioteca_room
import os
import datetime
def show_ahorcado(tries):
"""Donde se guardan los dibujos
Returns:
[str]: stage, el dibujo
"""
stages = [
"""
--------
| |
| O
| \\|/
| |
| / \\
-
""",
"""
--------
| |
| O
| \\|/
| |
| /
-
""",
"""
--------
| |
| O
| \\|/
| |
|
-
""",
"""
--------
| |
| O
| \\|
| |
|
-
""",
"""
--------
| |
| O
| |
| |
|
-
""",
"""
--------
| |
| O
|
|
|
-
""",
"""
--------
| |
|
|
|
|
-
"""
]
return stages[tries]
def get_clue(player, info_question, n_clue):
"""Función donde se ejecuta las mecánicas del juego
Args:
[Player]: información del juegador
[dic]: info_question, información donde se encuentran las pistas
[int] n_clue, las pistas que se han usado en el juego
Returns:
[int]: n_clue + 1 si el jugador consume la pista, n_clue si no.
"""
clue = input("¿Desea gastar una pista? (S/N)\n==> ").upper()
if clue == "S":
if n_clue<=3 and player.get_clues_count() > 0:
print(info_question[f"clue_{n_clue}"])
player.set_clues_count(player.get_clues_count() - 1)
print(f"Pistas restantes: {player.get_clues_count()}")
return n_clue+1
else:
print("No te quedan pistas")
return n_clue
def ahorcado_game(ahorcado,player):
"""Función donde se ejecuta las mecánicas del juego
Args:
[Objeto clase Normal_Game_NR]: ahorcado, juego normal sin requisitos donde está toda la información del juego.
[Player]: información del juegador
Returns:
[Bool]: True, si se gana el juego, False, si no.
"""
print(ahorcado.get_name().capitalize())
print(ahorcado.get_rules().capitalize())
question_n=random.randint(0,2)
info_question=ahorcado.send_question(question_n)
word = info_question["answer"].upper()
word_completion = "_" * len(word)
guessed = False
guessed_letters = []
tries = 6
n_clue = 1
print(show_ahorcado(tries))
print(word_completion)
print("\n")
while (not guessed) and tries > 0 and player.get_time_left() > datetime.timedelta() and player.get_lives() > 0:
print(f"Intentos restantes: {tries}")
print(f"""> {info_question["question"]}""")
guess = input("Ingrese una letra o palabra: ").upper()
if len(guess) == 1 and guess.isalpha():
if guess in guessed_letters:
print(f"Ya habías intentado con la letra {guess}")
elif guess not in word:
print(f"{guess} no está en la palabra. Pierdes un cuarto de vida.")
tries-=1
player.set_lives(player.get_lives() - 0.25)
guessed_letters.append(guess)
n_clue=get_clue(player, info_question, n_clue)
else:
print("Bien, la letra está en la palabra")
guessed_letters.append(guess)
word_as_list = list(word_completion)
indices = [i for i, letter in enumerate(word) if letter == guess]
for index in indices:
word_as_list[index] = guess
word_completion = "".join(word_as_list)
if "_" not in word_completion:
guessed = True
elif len(guess) == len(word) and guess.isalpha():
if guess != word:
print(f"{guess} no es la palabra. Pierdes un cuarto de vida.")
tries-=1
player.set_lives(player.get_lives() - 0.25)
n_clue=get_clue(player, info_question, n_clue)
else:
guessed = True
word_completion = word
else:
print("No es válido")
print(show_ahorcado(tries))
print(word_completion)
print("\n")
if guessed:
return True
elif guessed == False:
print(f"Perdiste :( \n La palabra era {word}")
return False
def main_ahorcado(mueble_libros,player):
"""Función principal del juego, mediante atributo del objeto mueble_libros se instancia el juego de la clase Game
Args:
[Objeto clase Object]: mueble_libros
[Player]: información del juegador
"""
os.system("clear")
info_game=mueble_libros.get_info_game()
ahorcado = Normal_Game_NR(info_game["name"], info_game["rules"] , info_game["award"],info_game["questions"])
if ahorcado_game(ahorcado,player) :
print (f'%sFelicidades, ahora como recompenza obtienes un {ahorcado.get_award()} %s' % (fg(2), attr(0)))
player.add_award(ahorcado.get_award())
biblioteca_room.main_biblioteca(player)
else:
print("Para la próxima será, no te desanimes.")
biblioteca_room.main_biblioteca(player) |
from mylib.detect.detect_face import create_mtcnn,p_stage,r_stage,o_stage,drawLandMarks,drawDectectBox
from mylib.db.DataSource import WIDERDATA
import tensorflow as tf
import numpy as np
import os
def variable_summary(var,name='mysummary'):
tf.summary.scalar(name,var)
def classify_loss(label, logit_prob, mask,eps=1e-7):
label = tf.squeeze(label)
logit_prob=tf.squeeze(logit_prob)
loss=-tf.reduce_sum(label * tf.log(logit_prob+eps), 1)
# loss=tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logit_prob)
loss=tf.reduce_mean(loss*mask)
return loss
def lrRate(lr, decay_steps, decay_rate, global_steps):
ret = tf.train.exponential_decay(
lr,
global_steps,
decay_steps,
decay_rate,
name='LearnRate'
)
variable_summary(ret,'LearnRate')
return ret
def initial_variable(sess):
uninit_vars = tf.report_uninitialized_variables()
need_init_vars=sess.run(uninit_vars)
op=[]
for v in need_init_vars:
name = v.decode("utf-8")
init_op=[v.initializer for v in tf.global_variables(name)]
op.extend(init_op)
sess.run(op)
'''
Y,Y1表示box的左上角和右下角的坐标,shape=[?,1,1,4]
返回loss=mean(|Y-Y1|^2)
'''
def box_loss(Y,Y1,mask):
Y=tf.squeeze(Y)
Y1 = tf.squeeze(Y1)
#Y ,shape[?,1,1,4]
ret=tf.reduce_sum((Y-Y1)**2,axis=-1) #[?,1,1]
# ret=tf.squeeze(ret) #[?,]
ret=tf.reduce_mean(ret*mask)
return ret
def preparePnetLoss(Y,Y_BOX,MASK):
def pnet_loss(Label, Logit, Ybox, YhatBox, mask):
with tf.name_scope('pnet'):
with tf.name_scope('entropyLoss'):
ls1 = classify_loss(Label, Logit, mask[:, 0])
variable_summary(ls1)
with tf.name_scope('regressorLoss'):
ls2 = box_loss(Ybox, YhatBox, mask[:, 1])
variable_summary(ls2)
with tf.name_scope('totalLoss'):
ls=ls1 + 0.5 * ls2
variable_summary(ls)
return ls
g = tf.get_default_graph()
#输入
X = g.get_tensor_by_name("pnet/input:0")
#输出
YHAT = g.get_tensor_by_name("pnet/prob1:0")
YHAT_BOX = g.get_tensor_by_name("pnet/conv4-2/BiasAdd:0")
loss=pnet_loss(Y,YHAT,Y_BOX,YHAT_BOX,MASK)
return X,Y,Y_BOX,loss
def prepareRnetLoss(Y,Y_BOX,MASK):
def rnet_loss(Label, Logit, Ybox, YhatBox, mask):
with tf.name_scope('rnet'):
with tf.name_scope('entropyLoss'):
ls1 = classify_loss(Label, Logit, mask[:, 0])
variable_summary(ls1)
with tf.name_scope('regressorLoss'):
ls2 = box_loss(Ybox, YhatBox, mask[:, 1])
variable_summary(ls2)
with tf.name_scope('totalLoss'):
ls=ls1 + 0.5 * ls2
variable_summary(ls)
return ls
g = tf.get_default_graph()
#输入
X = g.get_tensor_by_name("rnet/input:0")
#输出
YHAT = g.get_tensor_by_name("rnet/prob1:0")
YHAT_BOX = g.get_tensor_by_name("rnet/conv5-2/conv5-2:0")
loss=rnet_loss(Y,YHAT,Y_BOX,YHAT_BOX,MASK)
return X,Y,Y_BOX,loss
def prepareOnetLoss(Y,Y_BOX,MASK):
def onet_loss(Label, Logit, Ybox, YhatBox, mask):
with tf.name_scope('onet'):
with tf.name_scope('entropyLoss'):
ls1 = classify_loss(Label, Logit, mask[:, 0])
variable_summary(ls1)
with tf.name_scope('regressorLoss'):
ls2 = box_loss(Ybox, YhatBox, mask[:, 1])
variable_summary(ls2)
with tf.name_scope('totalLoss'):
ls=ls1 + 0.5 * ls2
variable_summary(ls)
return ls
g = tf.get_default_graph()
#输入
X = g.get_tensor_by_name("onet/input:0")
#输出
YHAT = g.get_tensor_by_name("onet/prob1:0")
YHAT_BOX = g.get_tensor_by_name("onet/conv6-2/conv6-2:0")
loss=onet_loss(Y,YHAT,Y_BOX,YHAT_BOX,MASK)
return X,Y,Y_BOX,loss
def prepareLossAndInput():
Y = tf.placeholder(dtype=tf.float32, shape=[None, 2])
YBOX = tf.placeholder(dtype=tf.float32, shape=[None, 4])
haveObject=Y[:,1]
one=tf.ones_like(Y[:,1])
MASK=tf.stack([one,haveObject],axis=1)
pX,_, _,pLOSS=preparePnetLoss(Y,YBOX,MASK)
rX,_, _,rLOSS=prepareRnetLoss(Y,YBOX,MASK)
oX,_, _,oLOSS=prepareOnetLoss(Y,YBOX,MASK)
with tf.name_scope('AllLoss'):
LOSS=pLOSS+rLOSS+oLOSS
variable_summary(LOSS)
return [pX,rX,oX,Y,YBOX,LOSS]
#train configuration
batchSize=128
epochs=100
logdir='/home/zxk/PycharmProjects/deepAI/daily/8/studyOfFace/logs'
modeldir=os.path.join(logdir,'models','facedect.ckpt')
path='/home/zxk/AI/data/widerface/WIDER_train/samples'
summaryPerSteps=50
lr=0.001
decay_rate=0.96
decay_steps=26100
#define network
sess=tf.Session()
pnet,rnet,onet=create_mtcnn(sess)
pX,rX,oX,Y,YBOX,LOSS=prepareLossAndInput()
global_steps = tf.Variable(0, trainable=False)
OPTIMIZER=tf.train.AdamOptimizer(lrRate(lr,decay_steps,decay_rate,global_steps)).minimize(LOSS,global_step=global_steps)
MERGED=tf.summary.merge_all()
SUMMARY_WRITER=tf.summary.FileWriter(logdir, sess.graph)
SAVER=tf.train.Saver(max_to_keep=1000)
#before run solver
source=WIDERDATA(path,True)
# sess.run(tf.global_variables_initializer())
initial_variable(sess)
loopsPerEpochs=source.numExamples//batchSize +1
for e in range(epochs):
avg_loss=0
SAVER.save(sess, modeldir, e)
for step in range(loopsPerEpochs):
px, rx, ox, y, ybox = source.next(128,True)
px = (px - 127.5) * 0.0078125
rx = (rx - 127.5) * 0.0078125
ox = (ox - 127.5) * 0.0078125
feed={
pX: px, rX: rx, oX: ox,
Y: y, YBOX: ybox
}
_loss, _ = sess.run([LOSS, OPTIMIZER], feed_dict=feed)
avg_loss+= _loss / loopsPerEpochs
if step%summaryPerSteps==0:
merged=sess.run(MERGED, feed_dict=feed)
SUMMARY_WRITER.add_summary(merged, step + e * loopsPerEpochs)
SUMMARY_WRITER.flush()
print('End of epochs %d,loss is %.3f' % (e, avg_loss))
SUMMARY_WRITER.close()
|
"""
A peak is defined as adjacent intgers in the array that are strcictly increasing until they reach a tip the highest value in the peak,
at which point they become strictly decreasing. at least 3 integers are required to form a peak
sample input: [1,2,3,3,4,0,10,6,5,-1,-3,2,3]
sample output: 6 //0,10,6,5,-1,-3
TestCases:
input:[1,2,3,4,5,1]
out:6
"""
def longestPeak(array):
out = 0
holdHighestValue = 3
if len(array) >= 3:
for index in range(2,len(array)):
firstIndex = index-2
middleIndex = index-1
if array[middleIndex] > array[firstIndex] and array[middleIndex] > array[index]:
tempValue = 3
for helpIndx in range(index+1,len(array)):
if array[helpIndx-1] >= array[helpIndx]:
tempValue = tempValue+1
else:
break
if tempValue > holdHighestValue:
holdHighestValue = tempValue
out = holdHighestValue
#print(out)
#print(str(array[firstIndex]) +" "+ str(array[middleIndex]) +" "+ str(array[index]))
return out
result = longestPeak([1,2,3,3,4,0,10,6,5,-1,-3,2,3])
print(result)
|
import os
import csv
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
f = open("books.csv")
reader = csv.reader(f)
next(reader, None)
for isbn, title, author, year in reader: # loop gives each column a name
db.execute("INSERT INTO books (isbn, title, author, year) VALUES (:isbn, :title, :author, :year)",
{"isbn": isbn, "title": title, "author": author, "year": year}) # substitute values from CSV line into SQL command, as per this dict
print(f"Added book TITLE: {title} AUTHOR: {author} YEAR: {year}")
db.commit() # transactions are assumed, so close the transaction finished
|
#!/usr/bin/env python3
from PyCRC.CRC16 import CRC16
from cobs import cobs
from reedsolo import RSCodec
class Packetizer():
def __init__(self, callsign):
self.sequenceId = 0
callsignLen = 6 if len(callsign) > 6 else len(callsign)
self.callsign = bytes(callsign[:callsignLen].upper() + ' '*(6-len(callsign)), 'utf-8')
self.rs = RSCodec(16)
def createPacket(self, data=b''):
packet = bytearray()
# Header (1 byte - added after padding)
# Callsign (6 bytes)
packet += self.callsign
# Length (2 bytes)
dataLen = len(data)
packet += bytes([(dataLen & 0xFF00) >> 8, dataLen & 0xFF])
# Sequence ID (2 bytes)
packet += bytes([(self.sequenceId & 0xFF00) >> 8, self.sequenceId & 0xFF])
self.sequenceId = (self.sequenceId + 1) & 0xFFFF
# payload (unknown length)
packet += data
# CRC and footer (CRC MSB, CRC LSB) (2 bytes)
crcCalculator = CRC16()
crc = crcCalculator.calculate(bytes(packet))
packet += bytes([(crc & 0xFF00) >> 8, crc & 0xFF])
cobs_packet = cobs.encode(packet)
encoded_packet = self.rs.encode(cobs_packet)
return bytes([0]) + encoded_packet + bytes([0])
|
#!/usr/bin/python3
""" Contains the is_kind_of_class function. """
def is_kind_of_class(obj, a_class):
""" Checks if obj is an instance of a class. """
return isinstance(obj, a_class)
|
import math
import matplotlib.pyplot as plt
class Distribution:
def __init__(self, mu, sigma):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
return (self.data.sum() / len(self.data))
def calculate_stdev(self, sample = True):
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
squared_diffs = [(i - self.mean)**2 for i in self.data]
variance = sum(squared_diffs) / n
self.stdev = math.sqrt(variance)
def read_data(self, file_name, sample = True):
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list += int(line)
file.readline()
file.close()
self.data = data_list
def get_mean(self):
return self.mean
def get_stdev(self):
return self.stdev
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Vertex AI operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.cloud.aiplatform_v1.types import Dataset, ExportDataConfig, ImportDataConfig
from airflow.providers.google.cloud.hooks.vertex_ai.dataset import DatasetHook
from airflow.providers.google.cloud.links.vertex_ai import VertexAIDatasetLink, VertexAIDatasetListLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from google.api_core.retry import Retry
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.utils.context import Context
class CreateDatasetOperator(GoogleCloudBaseOperator):
"""
Creates a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset: Required. The Dataset to create. This corresponds to the ``dataset`` field on the
``request`` instance; if ``request`` is provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIDatasetLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
dataset: Dataset | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset = dataset
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating dataset")
operation = hook.create_dataset(
project_id=self.project_id,
region=self.region,
dataset=self.dataset,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = hook.wait_for_operation(timeout=self.timeout, operation=operation)
dataset = Dataset.to_dict(result)
dataset_id = hook.extract_dataset_id(dataset)
self.log.info("Dataset was created. Dataset id: %s", dataset_id)
self.xcom_push(context, key="dataset_id", value=dataset_id)
VertexAIDatasetLink.persist(context=context, task_instance=self, dataset_id=dataset_id)
return dataset
class GetDatasetOperator(GoogleCloudBaseOperator):
"""
Get a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset_id: Required. The ID of the Dataset to get.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIDatasetLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
dataset_id: str,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset_id = dataset_id
self.read_mask = read_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Get dataset: %s", self.dataset_id)
dataset_obj = hook.get_dataset(
project_id=self.project_id,
region=self.region,
dataset=self.dataset_id,
read_mask=self.read_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIDatasetLink.persist(context=context, task_instance=self, dataset_id=self.dataset_id)
self.log.info("Dataset was gotten.")
return Dataset.to_dict(dataset_obj)
except NotFound:
self.log.info("The Dataset ID %s does not exist.", self.dataset_id)
class DeleteDatasetOperator(GoogleCloudBaseOperator):
"""
Deletes a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset_id: Required. The ID of the Dataset to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
dataset_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset_id = dataset_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting dataset: %s", self.dataset_id)
operation = hook.delete_dataset(
project_id=self.project_id,
region=self.region,
dataset=self.dataset_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Dataset was deleted.")
except NotFound:
self.log.info("The Dataset ID %s does not exist.", self.dataset_id)
class ExportDataOperator(GoogleCloudBaseOperator):
"""
Exports data from a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset_id: Required. The ID of the Dataset to delete.
:param export_config: Required. The desired output location.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
dataset_id: str,
export_config: ExportDataConfig | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset_id = dataset_id
self.export_config = export_config
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Exporting data: %s", self.dataset_id)
operation = hook.export_data(
project_id=self.project_id,
region=self.region,
dataset=self.dataset_id,
export_config=self.export_config,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Export was done successfully")
class ImportDataOperator(GoogleCloudBaseOperator):
"""
Imports data into a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset_id: Required. The ID of the Dataset to delete.
:param import_configs: Required. The desired input locations. The contents of all input locations will be
imported in one batch.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
dataset_id: str,
import_configs: Sequence[ImportDataConfig] | list,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset_id = dataset_id
self.import_configs = import_configs
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Importing data: %s", self.dataset_id)
operation = hook.import_data(
project_id=self.project_id,
region=self.region,
dataset=self.dataset_id,
import_configs=self.import_configs,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Import was done successfully")
class ListDatasetsOperator(GoogleCloudBaseOperator):
"""
Lists Datasets in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc"
after a field name for descending.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIDatasetListLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.filter = filter
self.page_size = page_size
self.page_token = page_token
self.read_mask = read_mask
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
results = hook.list_datasets(
project_id=self.project_id,
region=self.region,
filter=self.filter,
page_size=self.page_size,
page_token=self.page_token,
read_mask=self.read_mask,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIDatasetListLink.persist(context=context, task_instance=self)
return [Dataset.to_dict(result) for result in results]
class UpdateDatasetOperator(GoogleCloudBaseOperator):
"""
Updates a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset_id: Required. The ID of the Dataset to update.
:param dataset: Required. The Dataset which replaces the resource on the server.
:param update_mask: Required. The update mask applies to the resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
project_id: str,
region: str,
dataset_id: str,
dataset: Dataset | dict,
update_mask: FieldMask | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.dataset_id = dataset_id
self.dataset = dataset
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Updating dataset: %s", self.dataset_id)
result = hook.update_dataset(
project_id=self.project_id,
region=self.region,
dataset_id=self.dataset_id,
dataset=self.dataset,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Dataset was updated")
return Dataset.to_dict(result)
|
from .user_base import UserBase
from application.modules.db.base import *
class User(UserBase):
fullname = Column(String(1000))
birthdate = Column(Date)
isAdmin = Column(Boolean, default=False)
# def is_admin(self):
# return False
|
#!/usr/bin/env python
#Filename:lspace.py
#This program is made to calculate the space charge to the beam transverse
#The number of the grides m prefered to be 2**n
import numpy as np
def space(bunch,m,energy0,energy1,realnum,macronum,length,h):
q = 1.0
epsilon0 = 8.8541878e-12
lenth = len(bunch)
realcharge = realnum / macronum * 1.602e-19 #* 0.02 * 0.02
mass = 938.272046
gamma0 = (energy0 + mass) / mass
beta0 = (1.0 - 1.0 / gamma0 ** 2) ** 0.5
gamma1 = (energy1 + mass) / mass
beta1 = (1.0 - 1.0 / gamma1 ** 2) ** 0.5
bunchnp = np.zeros((lenth,2),dtype = np.float)
density = np.zeros(m,dtype = np.float)
phaL = 0.00001
for i in range(0,lenth):
bunchnp[i,0] = bunch[i][5]
bunchnp[i,1] = bunch[i][6]
if abs(bunchnp[i,0]) > phaL:
phaL = abs(bunchnp[i,0])
ds = phaL * 2.0 / (m - 1.0) #/ h / np.pi / 2 * length
for i in range(0,lenth):
nums = int((bunchnp[i,0] + phaL) / ds)
if nums == m - 1:
nums -= 1
#print nums
# nums -=1
#if bunchnp[i,0] < 0:
# nums -= 1
#nums_ = nums + int(m / 2)
# nums_ = nums - 1
w1 = (bunchnp[i,0] + phaL) / ds - nums
density[nums] += (1-w1)
density[nums + 1] += w1
# density[0] *= 2.0
# density[m - 1] *= 2.0
#print density
L = 2.0 * phaL / gamma0 / h / np.pi / 2 * length
ds_r = L / (m-1.0)
#print ds_r
for i in range(0,m):
#density[i] = realcharge / 10000 / ds_r * density[i] #/ epsilon0
density[i] = realcharge / ds_r * density[i] / epsilon0 / 2.0 / np.pi / ds_r ** 2
#print density
fftdens = np.zeros(m,dtype = np.float)
for i in range(0,m):
#fftdens = np.fft.fft(density) * (-1 * L ** 2 / np.pi ** 2 / m ** 2)
fftdens = np.fft.fft(density) * (-1 * L ** 2 / np.pi ** 2 / (i + 1.0) ** 2)
phas = np.fft.ifft(fftdens)
#print np.fft.ifft(np.fft.fft(phas) / (-1 * L ** 2 / np.pi ** 2 / (i + 1.0) ** 2))
phas = phas.real
#print phas
Ei = np.zeros(m,dtype = np.float)
for i in range(0,m - 1):
Ei[i] = -1 * (phas[i+1] - phas[i]) / ds_r / gamma0 ** 2
#print Ei
for i in range(0,lenth):
nums = int(bunchnp[i,0] / ds) + int(m / 2)
detaE = bunch[i][6] * beta1 ** 2 * energy1
bunch[i][6] = (detaE + energy0 + q * Ei[nums] * length / 1.0e6 - energy1) / energy1 / beta1 ** 2
#bunchnp[i,1] = realnum / lenth * q * Ei[nums] / gamma1 ** 2 * length #this is for the full ring,it should be changed latter
# for i in range(0,lenth):
# bunch[i][6] = bunchnp[i,1]
return bunch
|
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def serialize(node):
if not node:
return ''
if not node.val:
return ''
return '[{0},{1},{2}]'.format(node.val, serialize(node.left), serialize(node.right))
def deserialize(string):
if string == '':
return None
cur_index = 1
root = ''
while (cur_index < len(string)):
if string[cur_index] == ',':
break
root += string[cur_index]
cur_index += 1
cur_index += 1
left_string, cur_index = returnList(string, cur_index)
left = deserialize(left_string)
cur_index += 1
right_string, cur_index = returnList(string, cur_index)
right = deserialize(right_string)
return Node(root, left, right)
def returnList(string, cur_index):
index = 1
answer = ''
for x in string[cur_index:]:
if index == 0:
break
elif x == '[':
index += 1
elif x == ']':
index -= 1
answer += x
cur_index += 1
return answer[:len(answer)-1], cur_index
node = Node('root', Node('left', Node('left.left')), Node('right'))
assert deserialize(serialize(node)).left.left.val == 'left.left'
|
import cs50
s = cs50.get_string("name: ")
print("hello, {}".format(s))
i = cs50.get_int()
print("hello, {}".format(i))
print("{:.55f}".format(1 / 10)) |
# SCRATCH ALL IDEAS
# Suggestions:
# - Use Bootstrap for CSS
# Core library
import webapp2
import jinja2
import os
# global jinja_environment
global jinja_environment
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
# ====================== IMPORT HANDLER FILES HERE ======================
from index import *
from questions import *
from share import *
from editor import *
from wiki import *
# ====================== IMPORT HANDLER FILES HERE ======================
# ========================== ADD HANDLERS HERE ==========================
app = webapp2.WSGIApplication([
('/', IndexHandler),
('/questions', QuestionHandler),
('/questions/createquestionhtml', CreateQuestionHTMLHandler),
('/questions/newquestion', NewQuestionHandler),
# ('/questions/getquestions', GetQuestionsHandler),
('/questions/getquestionshtml', GetQuestionsHTMLHandler),
('/questions/getquestionhtml', GetQuestionHTMLHandler),
('/questions/newanswer', NewAnswerHandler),
('/share', ShareHandler),
('/editor', EditorHandler),
('/wiki', WikiHandler)
], debug=True)
# ========================== ADD HANDLERS HERE ==========================
|
#sns.heatmap(cm, annot=True, cmap='Reds', fmt='.1f', square=True);
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
np.random.seed(12345)
sns.set()
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def example():
sns.set()
flights = sns.load_dataset("flights")
print(flights)
print(type(flights))
flights = flights.pivot("month", "year", "passengers")
print(flights)
print(type(flights))
hm = sns.heatmap(flights, annot=True, fmt="d")
figure = hm.get_figure()
figure.savefig('sample1.png',dpi=400)
def mysample():
sns.set()
mydata = pd.read_csv('hetmap.csv')
print(mydata)
print(type(mydata))
hm = sns.heatmap(mydata, annot=True)
figure = hm.get_figure()
figure.savefig('my.png',dpi=400)
def mysample2():
sns.set()
mydata = pd.read_csv('heat2.csv')
mydata = mydata.pivot("Input Time", "Pred Delay", "F1")
# print(mydata)
# print(type(mydata))
#
hm = sns.heatmap(mydata, annot=True,fmt='.1f',linewidths=.5,vmin=60,vmax=75)
figure = hm.get_figure()
# plt.suptitle('USD-JPY')
figure.savefig('USD-JPY.pdf',dpi=400)
def mysample3():
sns.set()
mydata = pd.read_csv('smooth1.csv')
mydata = mydata.pivot("Model", "#Model Layer", "MAD")
# print(mydata)
# print(type(mydata))
#
hm = sns.heatmap(mydata, annot=True,fmt='.3f',linewidths=.5,vmin=-0.2,vmax=1,cmap="YlGnBu")
figure = hm.get_figure()
figure.subplots_adjust(left=0.3)
# plt.suptitle('USD-JPY')
figure.savefig('smooth_cora.pdf',dpi=400)
if __name__ == '__main__':
# mydata()
mysample3() |
# def numero_par(num):
# if num % 2 == 0:
# return True
numeros = [17, 24, 8, 52, 18, 44, 67]
print(list(filter(lambda num: num%2==0, numeros)))
class Empleado:
def __init__(self, nombre, cargo, salario):
self.nombre = nombre
self.cargo = cargo
self.salario = salario
def __str__(self):
return "{} que trabaja como {}, tiene un salario de {}$".format(self.nombre, self.cargo, self.salario)
listaEmpleados = [
Empleado("Juan", "Director", 75000),
Empleado("Ana", "Gerente", 72000),
Empleado("Antonio", "Administrativo", 70000),
Empleado("Sara", "Secretaria", 40000),
Empleado("Mario", "Botones", 20000),
]
# Recorro el objeto buscando los salarios más altos, filter devolvera solo los elementos
# que cumplan o sean verdaderos de laexpreción lambda
salarios_altos = filter(lambda empleado: empleado.salario >50000, listaEmpleados)
for empleado_salario in salarios_altos:
print(empleado_salario) |
#!/usr/bin/env python3
(
__import__("venvstarter")
.manager("noy_pylama")
.add_pypi_deps("pylama==8.3.8", "noseOfYeti==2.3.1")
.run()
)
|
"""
Flask 消息闪现
一个好的基于GUI的应用程序会向用户提供有关交互的反馈。例如,桌面应用程序使用对话框或消息框,JavaScript使用警报用于类似目的。
在Flask Web应用程序中生成这样的信息性消息很容易。Flask框架的闪现系统可以在一个视图中创建消息,并在名为next的视图函数中呈现它。
Flask模块包含flash()方法。它将消息传递给下一个请求,该请求通常是一个模板。
flash(message, category)
message参数是要闪现的实际消息。
category参数是可选的。它可以是“error”,“info”或“warning”。
为了从会话中删除消息,模板调用get_flashed_messages()。
get_flashed_messages(with_categories, category_filter)
""" |
#!/usr/bin/env python2
"""
mylib_test.py: Tests for mylib.py
"""
from __future__ import print_function
import unittest
from mycpp import mylib # module under test
class MylibTest(unittest.TestCase):
def testSplit(self):
self.assertEqual(('foo', None), mylib.split_once('foo', '='))
self.assertEqual(('foo', ''), mylib.split_once('foo=', '='))
self.assertEqual(('foo', 'bar'), mylib.split_once('foo=bar', '='))
def testFile(self):
stdout = mylib.File(1)
stderr = mylib.File(2)
stdout.write('stdout ')
stdout.writeln('stdout')
stderr.write('stderr ')
stderr.writeln('stderr ')
if __name__ == '__main__':
unittest.main()
|
import sys
from pynetdicom import AE
from pynetdicom.sop_class import StudyRootQueryRetrieveInformationModelFind, StudyRootQueryRetrieveInformationModelMove
from pynetdicom.status import QR_MOVE_SERVICE_CLASS_STATUS as move_status_dict
from pynetdicom.status import QR_FIND_SERVICE_CLASS_STATUS as find_status_dict
from pydicom.dataset import Dataset
def pacs_find(hostname, port, host_ae_title, user_ae_title, query_retrieve_level, accession_number, patient_id):
ae = AE(ae_title=user_ae_title)
ae.add_requested_context(StudyRootQueryRetrieveInformationModelFind)
ds = Dataset()
ds.AccessionNumber = accession_number
ds.PatientID = patient_id
ds.PatientBirthDate = ''
ds.StudyDescription = ''
ds.StudyInstanceUID = ''
ds.StudyDate = ''
ds.StudyTime = ''
ds.ModalitiesInStudy = ''
ds.StationName = ''
ds.NumberOfStudyRelatedInstances = ''
ds.QueryRetrieveLevel = query_retrieve_level
assoc = ae.associate(hostname, port, ae_title=host_ae_title)
a = None
matches = 0
msg = None
if assoc.is_established:
responses = assoc.send_c_find(ds,StudyRootQueryRetrieveInformationModelFind)
for (status, identifier) in responses:
if matches == 2:
a = None
ae.shutdown()
msg = 'Multiple studies found for this query.'
break
if status:
msg = find_status_dict[status.Status][1]
if status.Status in (0xFF00, 0xFF01):
a = identifier
matches += 1
if status.Status == 0x0000 and a is None:
msg = 'No study found for this query.'
else:
msg = 'Connection timed out, was aborted or received invalid response.'
assoc.release()
else:
msg = 'Association rejected, aborted or never connected.'
matches = None
return a, matches, msg
def pacs_move(hostname, port, host_ae_title, user_ae_title, receiver_ae_title, query_retrieve_level, study_instance_uid):
ae = AE(ae_title=user_ae_title)
ae.add_requested_context(StudyRootQueryRetrieveInformationModelMove)
ds = Dataset()
ds.QueryRetrieveLevel = query_retrieve_level
ds.StudyInstanceUID = study_instance_uid
assoc = ae.associate(hostname, port, ae_title=host_ae_title)
msg = None
if assoc.is_established:
responses = assoc.send_c_move(ds, receiver_ae_title, StudyRootQueryRetrieveInformationModelMove)
for (status, identifier) in responses:
if status:
msg = move_status_dict[status.Status][1]
else:
msg = 'Connection timed out, was aborted or received invalid response.'
assoc.release()
else:
msg = 'Association rejected, aborted or never connected.'
return msg
|
import time
import sys
import io
import logging
from pymavlink import mavutil
from pymavlink.dialects.v20.granum import MAVLink_data_transmission_handshake_message
def Listener:
def __init__(self, T_output_dir, H_output_dir):
self.T_output_dir = T_output_dir
self.H_output_dir = H_output_dir
with open(self.T_output_dir, mode="w", newline="") as ostream:
fieldnames = ["temperature", "time"]
self.T_writer = csv.DictWriter(ostream, fieldnames)
self.T_writer.writeheader()
with open(self.H_output_dir, mode="w", newline="") as ostream:
fieldnames = ["humidity", "time"]
self.H_writer = csv.DictWriter(ostream, fieldnames)
self.H_writer.writeheader()
def accept_message(self, msg)
if msg.get_type() == "AM2320":
self.temperature = float(msg.temperature*10.0)
self.humidity = float(msg.humidity*10.0)
self.time = msg.time_usec
def save_data(self):
row = {"temperature": self.temperature, "time": self.time}
T_writer.writerow(row)
row = {"humidity": self.humidity, "time": self.time}
Hwriter.writerow(row)
def main():
connection = mavutil.mavlink_connection("udpin:0.0.0.0:11000")
listener = Listener()
while True:
msg = connection.recv_match(blocking=True)
print(msg)
listener.accept_message(msg)
listener.save_data()
if __name__ == "__main__":
main()
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import ui, expected_conditions
from selenium.webdriver.chrome.options import Options as chromeOptions
from selenium.webdriver.firefox.options import Options as firefoxOptions
import time
def python_selenium_test(browser_var='chrome'):
print('Hello, world!')
if('headless' in browser_var.lower()):
if('chrome' in browser_var.lower()):
chrome_options = chromeOptions()
chrome_options.add_argument("--headless")
browser = webdriver.Chrome(chrome_options=chrome_options)
elif('firefox' in browser_var.lower()):
firefox_options = firefoxOptions()
firefox_options.add_argument("--headless")
browser = webdriver.Firefox(options=firefox_options)
else:
if('chrome' in browser_var.lower()):
browser = webdriver.Chrome()
elif('firefox' in browser_var.lower()):
browser = webdriver.Firefox()
wait = ui.WebDriverWait(browser, 5)
browser.get('https://the-internet.herokuapp.com/login')
wait.until(expected_conditions.visibility_of_element_located(
(By.ID, 'username')))
browser.find_element_by_id(
'username').click()
browser.find_element_by_id(
'username').send_keys('tomsmith')
wait.until(expected_conditions.visibility_of_element_located(
(By.ID, 'password')))
browser.find_element_by_id(
'password').click()
browser.find_element_by_id(
'password').send_keys('SuperSecretPassword!')
browser.find_element_by_css_selector('button[type=submit]').click()
wait.until(
lambda browser: browser.find_element_by_css_selector('.button > i.icon-signout'))
time.sleep(5)
browser.quit()
if(__name__ == '__main__'):
python_selenium_test()
|
__title__ = ''
__version__ = ''
__author__ = ''
__license__ = ''
__copyright__ = ''
VERSION = __version__
HTTP_HEADER_ENCODING = 'iso-8859-1'
ISO_8601 = 'iso-8601' |
##5장 연습문제
#1 다음은 Calculator 클래스이다. 위 클래스를 상속하는 UpgradeCalculator를 만들고 값을 뺄 수 있는 minus 메서드를 추가해 보자. 즉 다음과 같이 동작하는 클래스를 만들어야 한다.
class Calculator:
def __init__(self):
self.value = 0
def add(self, val):
self.value += val
class UpgradeCalculator(Calculator):
def minus(self, val):
self.value -= val
cal = UpgradeCalculator()
cal.add(10)
cal.minus(7)
print(cal.value) # 10에서 7을 뺀 3을 출력
#2 객체변수 value가 100 이상의 값은 가질 수 없도록 제한하는 MaxLimitCalculator 클래스를 만들어 보자.
class MaxLimitCalculator(Calculator):
def add(self, val):
self.value += val
if self.value > 100:
self.value = 100
cal = MaxLimitCalculator()
cal.add(50) # 50 더하기
cal.add(60) # 60 더하기
print(cal.value) # 100 출력
#3 다음 결과를 예측해 보자.
all([1, 2, abs(-3)-3]) #false
chr(ord('a')) == 'a'
#4 filter와 lambda를 사용하여 리스트 [1, -2, 3, -5, 8, -3]에서 음수를 모두 제거해 보자.
def positive(x):
return x>0
print(list(filter(positive, [1, -2, 3, -5, 8, -3])))
print(list(filter(lambda x: x > 0, [1, -2, 3, -5, 8, -3])))
#5 234라는 10진수의 16진수는 다음과 같이 구할 수 있다.
#>>> hex(234)
#'0xea'
# 이번에는 반대로 16진수 문자열 0xea를 10진수로 변경해 보자.
int('0xea', 16)
#6 map과 lambda를 사용하여 [1, 2, 3, 4] 리스트의 각 요솟값에 3이 곱해진 리스트 [3, 6, 9, 12]를 만들어 보자
list(map(lambda x:x*3, [1,2,3,4]))
#7 다음 리스트의 최댓값과 최솟값의 합을 구해 보자.
list = [-8, 2, 7, 5, -3, 5, 0, 1]
max = max(list)
min = min(list)
print(max+min)
#8 5.666666666666667을 소수점 4자리까지만 반올림하여 표시해 보자.
round(5.666666666666667, 4)
#9 다음처럼 sys모듈의 argv를 사용하여 명령 행 입력값 모두를 차례로 더해 준다.
import sys
numbers = sys.argv[1:] # 파일 이름을 제외한 명령 행의 모든 입력
result = 0
for number in numbers:
result += int(number)
print(result)
#10 os 모듈을 사용하여 다음과 같이 동작하도록 코드를 작성해 보자 1.C:\doit 디렉터리로 이동한다. / 2. dir 명령을 실행하고 그 결과를 변수에 담는다. / 3. dir 명령의 결과를 출력한다.
#11 glob 모듈을 사용하여 C:\doit 디렉터리의 파일 중 확장자가 .py인 파일만 출력하는 프로그램을 작성해 보자.
import glob
glob.glob("c:/doit/*.py")
#12 time 모듈을 사용하여 현재 날짜와 시간을 다음과 같은 형식으로 출력해 보자.
import time
time.strftime("%Y/%m/%d %H:%M:%S") # %Y:년, %m:월, %d:일, %H:시, %M:분, %S:초
#13 random 모듈을 사용하여 로또 번호(1~45 사이의 숫자 6개)를 생성해 보자.
import random
result = []
while len(result) < 6:
num = random.randint(1, 45) # 1부터 45까지의 난수 발생
if num not in result:
result.append(num)
print(result) |
'''5. Write a program to print the Fibonacci series up to the number 34.
(Example: 0, 1, 1, 2, 3, 5, 8, 13, … The Fibonacci Series always starts with 0 and 1, the numbers that follow are arrived at by adding the 2 previous numbers.)'''
fib = 0
fib1 = 1
print(fib)
print(fib1)
for i in range (3,35):
fib3 = fib + fib1
fib = fib1
fib1 = fib3
print(fib3)
if fib3 == 34:
break
|
# Copyright 2020 Soil, Inc.
from oslo_log import log as logging
import webob.exc
from soil.wsgi import common as base_wsgi
from soil.authenticate.token import verify_token
|
import csv
f1=open("teja.csv","w")
objw=csv.writer(f1)
objw.writerow(["sna","name","age","location"])
objw.writerow([1,"teja",23,"kamareddy"])
objw.writerow([2,"vinod",27,"sirsilla"])
objw.writerow([3,"raju",25,"jagithyal"])
objw.writerow([4,"srinivas",28,"mahabubnagar"])
objw.writerow([5,"chandu",23,"rangareddy"])
f1.close()
import csv
f=open("teja.csv","r")
objr = csv.reader(f)
for i in objr:
print(i)
f.close()
import csv
f=open("teja.csv","w",newline="")
objw=csv.writer(f)
objw.writerow(["sid","sname","sage"])
a=111
b="teja"
c=23
objw.writerow([a,b,c])
f.close()
import csv
f=open("teja.csv","w",newline="")
objw=csv.writer(f)
objw.writerow(["sid","sname","sage"])
objw.writerow([111,"teja",23])
f.close()
import csv
f=open("teja.csv","w",newline="")
objw=csv.writer(f)
objw.writerow(["sid","sname","sage"])
a=int(input("enter student id:"))
b=input("enter student name:")
c=int(input("enter student age:"))
objw.writerow([a,b,c])
f.close()
import csv
f=open("teja.csv","w",newline="")
objw=csv.writer(f)
objw.writerow(["eid","ename","esal"])
while True:
empid=int(input("enter employrr id:"))
emppname=input("enter employee name:")
empsal=int(input("entr employee salary:"))
objw.writerow([empid,emppname,empsal])
ch=input("if u wanna continue the programme say yes/no:")
if ch.lower()!="yes":
print("every line stored succesfully")
break
f.close()
import csv
f=open("teja.csv","r")
objr=csv.reader(f)
list=[]
for i in objr:
list.append(i)
print(list)
f.close()
import csv
f=open("teja.csv","r")
objr=csv.reader(f)
list=list(objr)
print(list)
f.close()
print(list)
list1=[]
for i in list:
if i[0]=="eid":
i.append("hesal")
list1.append(i)
else:
hsal= (int(i[2])*0.1)+int(i[2])
i.append(hsal)
list1.append(i)
print(list1)
print("********************")
for i in list1:
print(i)
import csv
f=open("tejaa.csv","w+",newline="")
objw=csv.writer(f)
objw.writerows(list1)
print("*************")
f.seek(0)
objr=csv.reader(f)
for i in objr:
print(i)
#by using module type creating csv file
def teja_csv(obj):
obj.writerow(["eid", "ename", "esal"])
while True:
empid = int(input("enter employrr id:"))
emppname = input("enter employee name:")
empsal = int(input("entr employee salary:"))
obj.writerow([empid, emppname, empsal])
ch = input("if u wanna continue the programme say yes/no:")
if ch.lower() != "yes":
print("every line stored succesfully")
break
#callin in deferent python file
import csv
import teja
f=open("teja.csv","w")
objw=csv.writer(f)
teja.teja_csv(objw)
f.close()
#by using module reading file
def teja_csv(obj):
for i in obj:
print(i)
#calling function
import csv
import teja
f=open("teja.csv","r")
objr=csv.reader(f)
teja.teja_csv(objr)
f.close()
# students programme
import csv
f=open("teja.csv","w",newline="")
objw=csv.writer(f)
objw.writerow(["sid","sname","m1","m2","m3"])
while True:
a=input("enter student id:")
b=input("enter student name:")
c=input("enter m1 marks:")
d=input("enter m2 marks:")
e=input("enter m3 marks:")
objw.writerow([a,b,c,d,e])
ch=input("if u wanna continue the programme say yes/no:")
if ch.lower()!="yes":
print("every line stored succesfully")
break
f.close()
import csv
f=open("teja.csv","r")
objr=csv.reader(f)
list=[]
for i in objr:
list.append(i)
print(list)
f.close()
list1=[]
for i in list:
if i[0]=="sid":
i.append("total")
i.append("avg")
i.append("grade")
list1.append(i)
else:
total=int(i[2])+int(i[3])+int(i[4])
i.append(total)
avg=total/3
i.append(avg)
grade="a" if (avg)>=90 else "b" if (avg)<90 and (avg)>=70 else "c"
i.append(grade)
list1.append(i)
print(list1)
import csv
f=open("tejaa.csv","w+",newline="")
objw=csv.writer(f)
objw.writerows(list1)
f.seek(0)
objr=csv.reader(f)
for i in objr:
print(i)
#copy file from one file to another file
f=open("abcd.csv","w",newline="")
objw=csv.writer(f)
objw.writerow(["id","name"])
objw.writerow([1111,"teja"])
f.seek(0)
f=open("abcd.csv","r")
objr=csv.reader(f)
list=[]
for i in objr:
list.append(i)
print(list)
f.close()
import csv
f=open("xyz.csv","w+",newline="")
objw1=csv.writer(f)
objw1.writerows(list)
f.seek(0)
objr=csv.reader(f)
for i in objr:
print(i)
#merging three files in one file
import csv
f=open("abcd.csv","w",newline="")
objw1=csv.writer(f)
objw1.writerow(["id","name"])
objw1.writerow([1111,"teja"])
f.seek(0)
f=open("abcd.csv","r")
objr1=csv.reader(f)
list1=[]
for i in objr1:
list1.append(i)
print(list1)
f.close()
import csv
f=open("abcde.csv","w",newline="")
objw2=csv.writer(f)
objw2.writerow(["id","name"])
objw2.writerow([2222,"raju"])
f.seek(0)
f=open("abcde.csv","r")
objr2=csv.reader(f)
list2=[]
for i in objr2:
list2.append(i[1])
print(list2)
f.close()
import csv
f=open("abcdef.csv","w",newline="")
objw3=csv.writer(f)
objw3.writerow(["id","name"])
objw3.writerow([3333,"chandu"])
f.seek(0)
f=open("abcd.csv","r")
objr3=csv.reader(f)
list3=[]
for i in objr3:
list3.append(i[1])
print(list3)
f.close()
import csv
f=open("xyz.csv","w+",newline="")
objw4=csv.writer(f)
objw4.writerows([list1,list2,list3])
f.seek(0)
objr4=csv.reader(f)
list4=[]
for i in objr4:
list4.append(i)
print(list4)
#taking nest line:
import csv
f=open("teja,csv","w+",newline="")
objw=csv.writer(f)
objw.writerow(["sid","sname","m1","m2","m3"])
objw.writerow([111,"teja",66,77,88])
objw.writerow([222,"raju",66,77,88])
objw.writerow([333,"cnu",66,77,88])
f.seek(0)
objr=csv.reader(f)
next(objr)
for i in objr:
print(i)
f.close()
#2
import csv
f=open("teja,csv","w+",newline="")
objw=csv.writer(f)
objw.writerow(["sid","sname","m1","m2","m3"])
objw.writerow([111,"teja",66,77,88])
objw.writerow([222,"raju",66,77,88])
objw.writerow([333,"cnu",66,77,88])
f.seek(0)
objr=csv.reader(f)
list1=list(objr)
for i in range(1,len(list1)):
print(list1[i])
f.close()
#file is exist or not:
import os
import csv
fname=input("Enter file name:")
a=os.path.isfile(fname)
if a==True:
print("file is exist")
ch=input("if u want read the file say yes/no:")
if ch.lower()!="yes":
print("thank you for using this application")
exit()
else:
f=open(fname,"r")
objr=csv.reader(f)
for i in objr:
print(i)
else:
print("we don't have such a file")
|
from locust import HttpUser, LoadTestShape, between, task
class CustomLoadShape(LoadTestShape):
"""This is a custom load shape that does the following:
- Slowly ramps to 30% of MAX_USERS during the first half of the load cycle.
- Quickly peaks to 100% of MAX_USERS during the next 10%.
- Sustains the peak for another 10% of time.
- Gradually ramps down to 5% of MAX_USERS throughout the rest of the cycle.
The cycle starts again in a loop.
"""
# The maximum number of users.
MAX_USERS = 1000
# The duration (in seconds) of a single load cycle.
LOAD_SHAPE_DURATION_SECONDS = 1800
def tick(self):
run_time = self.get_run_time() % self.LOAD_SHAPE_DURATION_SECONDS
# First half of load shape is a slow ramp up to 30% of MAX_USERS.
first_half_duration_seconds = round(self.LOAD_SHAPE_DURATION_SECONDS * 0.5)
first_half_max_users = round(self.MAX_USERS * 0.3)
if run_time < first_half_duration_seconds:
return (first_half_max_users, first_half_max_users / first_half_duration_seconds)
# Next 10% of load is a spike to MAX_USERS.
spike_duration_seconds = round(self.LOAD_SHAPE_DURATION_SECONDS * 0.1)
if run_time < round(self.LOAD_SHAPE_DURATION_SECONDS * 0.6):
return (self.MAX_USERS, self.MAX_USERS / spike_duration_seconds)
# Next 10% of load is sustained MAX_USERS
if run_time < round(self.LOAD_SHAPE_DURATION_SECONDS * 0.7):
return (self.MAX_USERS, self.MAX_USERS)
# Then ramp down to 5% of MAX_USERS until completion.
ramp_down_duration_seconds = round(self.LOAD_SHAPE_DURATION_SECONDS * 0.3)
ramp_down_min_users = round(self.MAX_USERS * 0.05)
return (ramp_down_min_users, (self.MAX_USERS - ramp_down_min_users) / ramp_down_duration_seconds)
class FrontendServiceUser(HttpUser):
wait_time = between(1, 5)
@task
def visit_frontend(self):
self.client.get("/")
|
#A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
#
#The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
#
#How many possible unique paths are there?
class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
l=[[0 for i in xrange(n)] for i in xrange(m)]
for i in xrange(m):
for j in xrange(n):
if i==0 or j==0:
l[i][j]=1
else:
l[i][j]+=l[i-1][j]+l[i][j-1]
return l[m-1][n-1] |
import AppKit
from mojo.glyphPreview import GlyphPreview
from mojo.roboFont import version
inRF3 = version >= "3.0"
if inRF3:
from mojo.glyphPreview import RFGlyphPreviewView
else:
from mojo.glyphPreview import GlyphPreviewView as RFGlyphPreviewView
class GlyphLayerPreviewView(RFGlyphPreviewView):
def init(self):
super(GlyphLayerPreviewView, self).init()
self._color = None
return self
def setColor_(self, color):
self._color = color
self.refresh()
def drawRect_(self, rect):
if self.inLiveResize():
self.calculateScale()
if self._glyph is None:
return
transform = AppKit.NSAffineTransform.transform()
transform.translateXBy_yBy_(0, self._buffer)
transform.concat()
transform = AppKit.NSAffineTransform.transform()
transform.scaleBy_(self._scale)
transform.translateXBy_yBy_(0, self._descender)
transform.concat()
flipTransform = AppKit.NSAffineTransform.transform()
flipTransform.translateXBy_yBy_(self._shift, self._upm)
flipTransform.scaleXBy_yBy_(1.0, -1.0)
flipTransform.concat()
glyph = self._glyph
if not inRF3:
if glyph.isLayer():
glyph = glyph.getBaseGlyph()
if inRF3:
layer = glyph.layer
else:
layer = glyph.getParent()
if self._color is not None:
self._color.set()
if inRF3:
layerNames = layer.layerSet.layerOrder
else:
layerNames = ["foreground"] + layer.layerOrder
for layerName in reversed(layerNames):
if inRF3:
layerGlyph = glyph.getLayerGlyph(layerName)
layerColor = layerGlyph.layer.color
if layerColor:
color = AppKit.NSColor.colorWithCalibratedRed_green_blue_alpha_(layerColor.r, layerColor.g, layerColor.b, layerColor.a)
else:
color = AppKit.NSColor.blackColor()
else:
layerGlyph = glyph.getLayer(layerName)
color = layer.getLayerColor(layerName)
if self._color is None:
color.set()
path = layerGlyph.getRepresentation("defconAppKit.NSBezierPath")
path.fill()
if self._selection:
selectionPath = AppKit.NSBezierPath.bezierPath()
radius = 3 / self._scale
for x, y in self._selection:
selectionPath.appendBezierPathWithOvalInRect_(AppKit.NSMakeRect(x - radius, y - radius, radius * 2, radius * 2))
AppKit.NSColor.redColor().set()
selectionPath.fill()
class GlyphLayerPreview(GlyphPreview):
nsViewClass = GlyphLayerPreviewView
def setColor(self, color):
self.getNSView().setColor_(color)
|
# encoding: utf-8
# module gi.repository.GLib
# from /usr/lib64/girepository-1.0/GLib-2.0.typelib
# by generator 1.147
# no doc
# imports
import gi._option as option # /usr/lib64/python3.8/site-packages/gi/_option.py
from gi._gi import OptionContext, OptionGroup, Pid, spawn_async
import gi as __gi
import gi.overrides as __gi_overrides
import gi.overrides.GLib as __gi_overrides_GLib
import gobject as __gobject
from .IOChannel import IOChannel
class IOChannel(IOChannel):
"""
:Constructors:
::
IOChannel()
new_file(filename:str, mode:str) -> GLib.IOChannel
unix_new(fd:int) -> GLib.IOChannel
"""
def add_watch(*args, **kwargs): # reliably restored by inspect
# no doc
pass
def close(self): # real signature unknown; restored from __doc__
""" close(self) """
pass
def copy(self, *args, **kwargs): # real signature unknown
pass
def error_from_errno(self, en): # real signature unknown; restored from __doc__
""" error_from_errno(en:int) -> GLib.IOChannelError """
pass
def error_quark(self): # real signature unknown; restored from __doc__
""" error_quark() -> int """
return 0
def flush(self): # real signature unknown; restored from __doc__
""" flush(self) -> GLib.IOStatus """
pass
def get_buffered(self): # real signature unknown; restored from __doc__
""" get_buffered(self) -> bool """
return False
def get_buffer_condition(self): # real signature unknown; restored from __doc__
""" get_buffer_condition(self) -> GLib.IOCondition """
pass
def get_buffer_size(self): # real signature unknown; restored from __doc__
""" get_buffer_size(self) -> int """
return 0
def get_close_on_unref(self): # real signature unknown; restored from __doc__
""" get_close_on_unref(self) -> bool """
return False
def get_encoding(self): # real signature unknown; restored from __doc__
""" get_encoding(self) -> str """
return ""
def get_flags(self): # real signature unknown; restored from __doc__
""" get_flags(self) -> GLib.IOFlags """
pass
def get_line_term(self, length): # real signature unknown; restored from __doc__
""" get_line_term(self, length:int) -> str """
return ""
def init(self): # real signature unknown; restored from __doc__
""" init(self) """
pass
def new_file(self, filename, mode): # real signature unknown; restored from __doc__
""" new_file(filename:str, mode:str) -> GLib.IOChannel """
pass
def next(self): # reliably restored by inspect
# no doc
pass
def read(self, max_count=-1): # reliably restored by inspect
# no doc
pass
def readline(self, size_hint=-1): # reliably restored by inspect
# no doc
pass
def readlines(self, size_hint=-1): # reliably restored by inspect
# no doc
pass
def read_chars(self): # real signature unknown; restored from __doc__
""" read_chars(self) -> GLib.IOStatus, buf:list, bytes_read:int """
pass
def read_line(self): # real signature unknown; restored from __doc__
""" read_line(self) -> GLib.IOStatus, str_return:str, length:int, terminator_pos:int """
pass
def read_line_string(self, buffer, terminator_pos=None): # real signature unknown; restored from __doc__
""" read_line_string(self, buffer:GLib.String, terminator_pos:int=None) -> GLib.IOStatus """
pass
def read_to_end(self): # real signature unknown; restored from __doc__
""" read_to_end(self) -> GLib.IOStatus, str_return:list """
pass
def read_unichar(self): # real signature unknown; restored from __doc__
""" read_unichar(self) -> GLib.IOStatus, thechar:str """
pass
def ref(self): # real signature unknown; restored from __doc__
""" ref(self) -> GLib.IOChannel """
pass
def seek(self, offset, whence=0): # reliably restored by inspect
# no doc
pass
def seek_position(self, offset, type): # real signature unknown; restored from __doc__
""" seek_position(self, offset:int, type:GLib.SeekType) -> GLib.IOStatus """
pass
def set_buffered(self, buffered): # real signature unknown; restored from __doc__
""" set_buffered(self, buffered:bool) """
pass
def set_buffer_size(self, size): # real signature unknown; restored from __doc__
""" set_buffer_size(self, size:int) """
pass
def set_close_on_unref(self, do_close): # real signature unknown; restored from __doc__
""" set_close_on_unref(self, do_close:bool) """
pass
def set_encoding(self, encoding=None): # real signature unknown; restored from __doc__
""" set_encoding(self, encoding:str=None) -> GLib.IOStatus """
pass
def set_flags(self, flags): # real signature unknown; restored from __doc__
""" set_flags(self, flags:GLib.IOFlags) -> GLib.IOStatus """
pass
def set_line_term(self, line_term=None, length): # real signature unknown; restored from __doc__
""" set_line_term(self, line_term:str=None, length:int) """
pass
def shutdown(self, flush): # real signature unknown; restored from __doc__
""" shutdown(self, flush:bool) -> GLib.IOStatus """
pass
def unix_get_fd(self): # real signature unknown; restored from __doc__
""" unix_get_fd(self) -> int """
return 0
def unix_new(self, fd): # real signature unknown; restored from __doc__
""" unix_new(fd:int) -> GLib.IOChannel """
pass
def unref(self): # real signature unknown; restored from __doc__
""" unref(self) """
pass
def write(self, buf, buflen=-1): # reliably restored by inspect
# no doc
pass
def writelines(self, lines): # reliably restored by inspect
# no doc
pass
def write_chars(self, buf, count): # real signature unknown; restored from __doc__
""" write_chars(self, buf:list, count:int) -> GLib.IOStatus, bytes_written:int """
pass
def write_unichar(self, thechar): # real signature unknown; restored from __doc__
""" write_unichar(self, thechar:str) -> GLib.IOStatus """
pass
def _clear_boxed(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # reliably restored by inspect
# no doc
pass
def __iter__(self): # reliably restored by inspect
# no doc
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(cls, filedes=None, filename=None, mode=None, hwnd=None): # reliably restored by inspect
# no doc
pass
def __next__(self): # reliably restored by inspect
# no doc
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
buf_size = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
close_on_unref = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
do_encode = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
encoded_read_buf = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
encoding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
funcs = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
is_readable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
is_seekable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
is_writeable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
line_term = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
line_term_len = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
partial_write_buf = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
read_buf = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
read_cd = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ref_count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
reserved1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
reserved2 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
use_buffer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
write_buf = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
write_cd = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_whence_map = {
0: 1,
1: 0,
2: 2,
}
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__module__': 'gi.overrides.GLib', '__new__': <staticmethod object at 0x7f85148871f0>, '__init__': <function IOChannel.__init__ at 0x7f8514879ee0>, 'read': <function IOChannel.read at 0x7f8514879f70>, 'readline': <function IOChannel.readline at 0x7f8514888040>, 'readlines': <function IOChannel.readlines at 0x7f85148880d0>, 'write': <function IOChannel.write at 0x7f8514888160>, 'writelines': <function IOChannel.writelines at 0x7f85148881f0>, '_whence_map': {0: <enum G_SEEK_SET of type GLib.SeekType>, 1: <enum G_SEEK_CUR of type GLib.SeekType>, 2: <enum G_SEEK_END of type GLib.SeekType>}, 'seek': <function IOChannel.seek at 0x7f8514888280>, 'add_watch': <function IOChannel.add_watch at 0x7f85148883a0>, '__iter__': <function IOChannel.__iter__ at 0x7f8514888430>, '__next__': <function IOChannel.__next__ at 0x7f85148884c0>, 'next': <function IOChannel.__next__ at 0x7f85148884c0>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType GIOChannel (94581033164064)>'
__info__ = StructInfo(IOChannel)
|
# lvl_m.py corrects pore pressure transducer data for barometric pressure to compute water levels
# and plots the time-series of water levels for stations near Mukilteo, WA
# By Rex L. Baum and Sarah J. Fischer, USGS 2015-2016
# Developed for Python 2.7, and requires compatible versions of numpy, pandas, and matplotlib.
# This script contains parameters specific to a particular problem.
# It can be used as a template for other sites.
#
# Get libraries
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from numpy import ma
from matplotlib.dates import strpdate2num
# Set fontsize for plots
font = {'family' : 'monospace',
'weight' : 'normal',
'size' : '10'}
matplotlib.rc('font', **font) # pass in the font dict as kwargs
# ------------------------
# Obtain barometric pressure corrections
def readfiles1(file_list,c1): # defines function to read timestamp and 1 column of data
""" read <TAB> delemited files as strings
ignoring '# Comment' lines """
data = []
for fname in file_list:
data.append(
np.loadtxt(fname,
usecols=(0,c1),
comments='#', # skip comment lines
delimiter='\t',
converters = { 0 : strpdate2num('%Y-%m-%d %H:%M:%S') },
dtype=None))
return data
# Import data and assign to arrays
data = readfiles1(['waWatertonA_14d.txt'],5)
column_0 = np.array(data)[0][:,0]
barometricPressure_raw = np.array(data)[0][:,1]
#Compute Barometric pressure in kPa
barometricPressure_kPa=(barometricPressure_raw*0.240+500)*0.1
#-------------------------
# Obtain vibrating-wire piezometer frequency and temperature, readings, scale and plot
# Define functions
def readfiles(file_list,c1,c2,c3,c4): # Read timestamps and 4 columns of data
""" read <TAB> delemited files as strings
ignoring '# Comment' lines """
data = []
for fname in file_list:
data.append(
np.loadtxt(fname,
usecols=(0,c1,c2,c3,c4),
comments='#', # skip comment lines
delimiter='\t',
converters = { 0 : strpdate2num('%Y-%m-%d %H:%M:%S') },
dtype=None))
return data
def init_plot(title, yMin=0, yMax=3): # Set parameters and dimensions of plots
plt.figure(figsize=(12, 6))
plt.title(title + disclamers, fontsize=11)
plt.xlabel(xtext)
plt.ylabel(ytext)
plt.ylim(yMin,yMax)
plt.grid()
def end_plot(name=None, cols=5):
plt.legend(bbox_to_anchor=(0, -.15, 1, -0.5), loc=8, ncol=cols, fontsize=10,
mode="expand", borderaxespad=-1., scatterpoints=1)
if name:
plt.savefig(name, bbox_inches='tight')
disclamers = ('\nUSGS PROVISIONAL DATA'
'\nSUBJECT TO REVISION'
)
xtext = ('Date and time')
ytext = ('Water Level, in meters')
# Import raw data and assign to arrays
data = readfiles(['waMVD116_14d.txt'],17,18,19,20) # 17,18,19,20
data_1 = ma.fix_invalid(data, fill_value = 'nan')
column_0 = np.array(data_1)[0][:,0]
freq1 = np.array(data_1)[0][:,1]
thermRes1 = np.array(data_1)[0][:,2]
freq2 = np.array(data_1)[0][:,3]
thermRes2 = np.array(data_1)[0][:,4]
#'VW piezometer Serial numbers
#'VWP #1 = 83807
#'VWP #2 = 83808
#'VWP #1 Calibration Coefficients
C1_A = -0.000095792
C1_B = -0.0023260
C1_C = 828.53
tempCoeff1_m = 0.0380*6.89475729 #'Temp Coefficient, slope(m)
tempCoeff1_b = -0.762*6.89475729 #'Temp Coefficient, y-int(b)
tempOffset1 = 0.3 #'Offset Temp
tempCal1 = 20.2 #'Temp Calibrated
#'VWP #2 Calibration Coefficients
C2_A = -0.00010171
C2_B = 0.016517
C2_C = 772.93
tempCoeff2_m = 0.0208*6.89475729 #'Temp Coefficients
tempCoeff2_b = -0.414*6.89475729
tempOffset2 = 0.1 #'Offset Temp
tempCal2 = 20.2 #'Temp Calibrated
# 'Calculate thermistor temperature 'ThermTemp'
thermTemp1_degC =1/(1.4051E-3+2.369E-4*np.log(thermRes1)+1.019E-7*np.log(thermRes1)**3)
# 'Convert 'ThermTemp' to 'degC' and add 'TempOffset'
thermTemp1_degC = thermTemp1_degC-273.15+tempOffset1
# 'Calculate water level 'pHead' (kPa)
pHead1_kpa=(C1_A*freq1**2)+(C1_B*freq1)+(C1_C)
# 'Apply temperature corrections
pHead1_kpa = pHead1_kpa +((tempCal1-thermTemp1_degC)*tempCoeff1_m)+(tempCoeff1_b)
# Apply barometric pressure correction, 1 standard atmosphere = 101.3 kPa
pHead1_kpa = pHead1_kpa - (barometricPressure_kPa -101.3)
# 'Convert 'pHead' from kpa to m, and shift by small offset
lvl1_m_mvd= pHead1_kpa*0.1019977334 + 0.1
#
# 'Calculate thermistor temperature 'ThermTemp'
thermTemp2_degC=1/(1.4051E-3+2.369E-4*np.log(thermRes2)+1.019E-7*np.log(thermRes2)**3)
# 'Convert 'ThermTemp' to 'degC' and add 'TempOffset'
thermTemp2_degC=thermTemp2_degC-273.15+tempOffset2
# 'Calculate water level 'pHead' (kPa)
pHead2_kpa =(C2_A*freq2**2)+(C2_B*freq2)+(C2_C)
# 'Apply temperature corrections
pHead2_kpa = pHead2_kpa +((tempCal2-thermTemp2_degC)*tempCoeff2_m)+(tempCoeff2_b)
# Apply barometric pressure correction, 1 standard atmosphere = 101.3 kPa
pHead2_kpa = pHead2_kpa - (barometricPressure_kPa -101.3)
# 'Convert pressureKPA to m, and shift by small offset
lvl2_m_mvd = pHead2_kpa*0.1019977334 - 0.2
init_plot('Water Level at Marine View Drive & 116 St. SW')
plt.plot(column_0, lvl1_m_mvd, linestyle='-', color='b', label='Water Level 1')
plt.plot(column_0, lvl2_m_mvd, linestyle='-', color='r', label='Water Level 2')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d\n%H:%M'))
plt.gca().xaxis.set_major_locator(mdates.HourLocator())
plt.gca().xaxis.set_minor_locator(mdates.HourLocator(interval=6))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))
end_plot(name='MVD116_lvl.png')
# ------------------------
data = readfiles(['waWatertonA_14d.txt'],18,19,20,21) # 18,19,20,21
data_1 = ma.fix_invalid(data, fill_value = 'nan')
column_0 = np.array(data_1)[0][:,0]
freq1 = np.array(data_1)[0][:,1]
thermRes1 = np.array(data_1)[0][:,2]
freq2 = np.array(data_1)[0][:,3]
thermRes2 = np.array(data_1)[0][:,4]
#VW Piezometer Calibration Coefficients
#'VWP #3 10-1786, site3-1 2850
C2_0 = 9.674485E2
C2_1 = -2.293154E-2
C2_2 = -1.132928E-1
C2_3 = -1.070764E-4
C2_4 = 1.155441E-4
C2_5 = -2.123954E-3
#
#'VWP #4 10-1784
C1_0 = 1.075071E3
C1_1 = -3.277043E-2
C1_2 =1.011760E-1
C1_3 =-1.149217E-4
C1_4 =1.661176E-4
C1_5 =-8.454856E-3
#Compute Thermistor Temperature and Water Level
thermTemp1_degC = 1/(1.401E-3 + 2.377E-4*np.log(thermRes1) + 9.730E-8*np.log(thermRes1)**3)-273.15
lvl1_m = (C1_0 + (C1_1*freq1) + (C1_2*thermTemp1_degC) + (C1_3*(freq1**2)) + (C1_4*freq1*thermTemp1_degC) + (C1_5*(thermTemp1_degC**2))) * 0.70432
thermTemp2_degC = 1/(1.401E-3 + 2.377E-4*np.log(thermRes2) + 9.730E-8*np.log(thermRes2)**3)-273.15
lvl2_m = (C2_0 + (C2_1*freq2) + (C2_2*thermTemp2_degC) + (C2_3*(freq2**2)) + (C2_4*freq2*thermTemp2_degC) + (C2_5*(thermTemp2_degC**2))) * 0.70432
# Apply barometric pressure correction, 1 standard atmosphere = 101.3 kPa
lvl1_m = lvl1_m - (barometricPressure_kPa -101.3)/6.895
lvl2_m = lvl2_m - (barometricPressure_kPa -101.3)/6.895
#'Convert water level from PSI to meters and shift by small offset.
lvl1_m_wca = lvl1_m*0.1019977334 - 1.1
lvl2_m_wca = lvl2_m*0.1019977334 + 1.5
init_plot('Water Level at Waterton Circle Station A')
plt.plot(column_0, lvl1_m_wca, linestyle='-', color='b', label='Water Level 3')
plt.plot(column_0, lvl2_m_wca, linestyle='-', color='r', label='Water Level 4')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d\n%H:%M'))
plt.gca().xaxis.set_major_locator(mdates.HourLocator())
plt.gca().xaxis.set_minor_locator(mdates.HourLocator(interval=6))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))
end_plot(name='MWatA_lvl.png')
# ------------------------
data = readfiles(['waWatertonB_14d.txt'],17,18,19,20) # 17,18,19,20
data_1 = ma.fix_invalid(data, fill_value = 'nan')
column_0 = np.array(data_1)[0][:,0]
freq1 = np.array(data_1)[0][:,1]
thermRes1 = np.array(data_1)[0][:,2]
freq2 = np.array(data_1)[0][:,3]
thermRes2 = np.array(data_1)[0][:,4]
#
#'VW piezometer Serial numbers
#'VWP #5 = 2850
#'VWP #6 = 2851
#'VWP #5 Calibration Coefficients
C1_A = 0.000057403
C1_B = -0.0099641
C1_C = -124.16
tempCoeff1_m = -0.0044*6.89475729 #Temp Coefficient, slope(m)
tempCoeff1_b = 0*6.89475729 #Temp Coefficient, y-int(b)
tempOffset1 = -1.6 #Offset Temp
tempCal1 = 23.5 #Temp Calibrated
#'VWP #6 Calibration Coefficients
C2_A = 0.000053431
C2_B = -0.0025086
C2_C = -137.43
tempCoeff2_m = -0.0020*6.89475729 #Temp Coefficients
tempCoeff2_b = 0*6.89475729
tempOffset2 = -1.4 #Offset Temp
tempCal2 = 23.5 #Temp Calibrated
#
# 'Calculate thermistor temperature 'ThermTemp'
thermTemp1_degC = (-23.50833439*((thermRes1/1000)**2)) + (227.625007*(thermRes1/1000))+(-341.217356417)
# 'Convert 'ThermTemp' to 'degC' and add 'TempOffset'
thermTemp1_degC = thermTemp1_degC+tempOffset1
# 'Calculate water level 'pHead' (kPa)
pHead1_kpa=(C1_A*freq1**2)+(C1_B*freq1)+(C1_C)
# 'Apply temperature corrections
pHead1_kpa = pHead1_kpa +((tempCal1-thermTemp1_degC)*tempCoeff1_m)+(tempCoeff1_b)
# Apply barometric pressure correction, 1 standard atmosphere = 101.3 kPa
pHead1_kpa = pHead1_kpa - (barometricPressure_kPa -101.3)
# 'Convert 'pHead' from kpa to m, and shift by small offset
lvl1_m_wcb= pHead1_kpa*0.1019977334
#
# 'Calculate thermistor temperature 'ThermTemp'
thermTemp2_degC = (-23.50833439*((thermRes2/1000)**2)) + (227.625007*(thermRes2/1000))+(-341.217356417)
# 'Convert 'ThermTemp' to 'degC' and add 'TempOffset'
thermTemp2_degC=thermTemp2_degC+tempOffset2
# 'Calculate water level 'pHead' (kPa)
pHead2_kpa =(C2_A*freq2**2)+(C2_B*freq2)+(C2_C)
# 'Apply temperature corrections
pHead2_kpa = pHead2_kpa +((tempCal2-thermTemp2_degC)*tempCoeff2_m)+(tempCoeff2_b)
# Apply barometric pressure correction, 1 standard atmosphere = 101.3 kPa
pHead2_kpa = pHead2_kpa - (barometricPressure_kPa -101.3)
# 'Convert pressureKPA to m, and shift by small offset
lvl2_m_wcb = pHead2_kpa*0.1019977334
#
init_plot('Water Level at Waterton Circle Station B')
plt.plot(column_0, lvl1_m_wcb, linestyle='-', color='b', label='Water Level 5')
plt.plot(column_0, lvl2_m_wcb, linestyle='-', color='r', label='Water Level 6')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d\n%H:%M'))
plt.gca().xaxis.set_major_locator(mdates.HourLocator())
plt.gca().xaxis.set_minor_locator(mdates.HourLocator(interval=6))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))
end_plot(name='MWatB_lvl.png')
def init_plot1(title, yMin=0, yMax=3):
plt.figure(figsize=(12, 6))
plt.title(title + disclamers, fontsize=11)
plt.xlabel(xtext, fontsize=11)
plt.ylabel(ytext, fontsize=11)
#plt.xlim(xMin,xMax)
plt.ylim(yMin,yMax)
plt.grid()
def end_plot1(name=None, cols=5):
plt.legend(loc=2, ncol=cols, fontsize=10, title=' Sensor Position & Depth, cm\nVH LS-a LS-b')
if name:
plt.savefig(name, bbox_inches='tight')
init_plot1('Water Level at Mukilteo Stations')
plt.plot(column_0, lvl1_m_mvd, linestyle='-', color='b', label='1 178')
plt.plot(column_0, lvl2_m_mvd, linestyle='-', color='r', label='5 297')
plt.plot(column_0, lvl1_m_wca, linestyle='--', color='b', alpha=0, label='1 300')
plt.plot(column_0, lvl2_m_wca, linestyle='--', color='r', alpha=1, label='5 300')
plt.plot(column_0, lvl1_m_wcb, linestyle='-.', color='b', label='1 300')
plt.plot(column_0, lvl2_m_wcb, linestyle='-.', color='r', alpha=0, label='5 175') # alpha=0 hides plot of malfunctioning sensor
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d\n%H:%M'))
plt.gca().xaxis.set_major_locator(mdates.HourLocator())
plt.gca().xaxis.set_minor_locator(mdates.HourLocator(interval=6))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))
end_plot1(name='Muk_lvl.png',cols=3)
|
#!/usr/bin/env python3
import requests
def get_position_from_spi(date_str):
auth = (user, password)
params = {'date': date_str}
r = requests.get('https://scdm-ace.swisspolar.ch/api/position', params=params, auth=auth)
result = r.json()
return result['latitude'], result['longitude']
position = get_position_from_spi('2017-01-28T21:00:00+00:00')
print(position)
|
# encoding: utf-8
# module gi.repository.OSTree
# from /usr/lib64/girepository-1.0/OSTree-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gio as __gi_repository_Gio
import gobject as __gobject
# Variables with simple values
BUILT_FEATURES = 'libcurl libsoup gpgme ex-fsverity libarchive selinux openssl libmount systemd release p2p'
COMMIT_GVARIANT_STRING = '(a{sv}aya(say)sstayay)'
COMMIT_META_KEY_COLLECTION_BINDING = 'ostree.collection-binding'
COMMIT_META_KEY_ENDOFLIFE = 'ostree.endoflife'
COMMIT_META_KEY_ENDOFLIFE_REBASE = 'ostree.endoflife-rebase'
COMMIT_META_KEY_REF_BINDING = 'ostree.ref-binding'
COMMIT_META_KEY_SOURCE_TITLE = 'ostree.source-title'
COMMIT_META_KEY_VERSION = 'version'
DIRMETA_GVARIANT_STRING = '(uuua(ayay))'
FILEMETA_GVARIANT_STRING = '(uuua(ayay))'
MAX_METADATA_SIZE = 10485760
MAX_METADATA_WARN_SIZE = 7340032
META_KEY_DEPLOY_COLLECTION_ID = 'ostree.deploy-collection-id'
ORIGIN_TRANSIENT_GROUP = 'libostree-transient'
RELEASE_VERSION = 3
REPO_METADATA_REF = 'ostree-metadata'
SHA256_DIGEST_LEN = 32
SHA256_STRING_LEN = 64
SUMMARY_GVARIANT_STRING = '(a(s(taya{sv}))a{sv})'
SUMMARY_SIG_GVARIANT_STRING = 'a{sv}'
TIMESTAMP = 0
TREE_GVARIANT_STRING = '(a(say)a(sayay))'
VERSION = 2020.3
VERSION_S = '2020.3'
YEAR_VERSION = 2020
_namespace = 'OSTree'
_version = '1.0'
__weakref__ = None
# functions
def break_hardlink(dfd, path, skip_xattrs, cancellable=None): # real signature unknown; restored from __doc__
""" break_hardlink(dfd:int, path:str, skip_xattrs:bool, cancellable:Gio.Cancellable=None) -> bool """
return False
def checksum_b64_from_bytes(csum): # real signature unknown; restored from __doc__
""" checksum_b64_from_bytes(csum:list) -> str """
return ""
def checksum_b64_to_bytes(checksum): # real signature unknown; restored from __doc__
""" checksum_b64_to_bytes(checksum:str) -> list """
return []
def checksum_bytes_peek(bytes): # real signature unknown; restored from __doc__
""" checksum_bytes_peek(bytes:GLib.Variant) -> list """
return []
def checksum_bytes_peek_validate(bytes): # real signature unknown; restored from __doc__
""" checksum_bytes_peek_validate(bytes:GLib.Variant) -> list """
return []
def checksum_file(f, objtype, cancellable=None): # real signature unknown; restored from __doc__
""" checksum_file(f:Gio.File, objtype:OSTree.ObjectType, cancellable:Gio.Cancellable=None) -> bool, out_csum:list """
return False
def checksum_file_async(f, objtype, io_priority, cancellable=None, callback=None, user_data=None): # real signature unknown; restored from __doc__
""" checksum_file_async(f:Gio.File, objtype:OSTree.ObjectType, io_priority:int, cancellable:Gio.Cancellable=None, callback:Gio.AsyncReadyCallback=None, user_data=None) """
pass
def checksum_file_async_finish(f, result): # real signature unknown; restored from __doc__
""" checksum_file_async_finish(f:Gio.File, result:Gio.AsyncResult) -> bool, out_csum:list """
return False
def checksum_file_at(dfd, path, stbuf=None, objtype, flags, out_checksum, cancellable=None): # real signature unknown; restored from __doc__
""" checksum_file_at(dfd:int, path:str, stbuf=None, objtype:OSTree.ObjectType, flags:OSTree.ChecksumFlags, out_checksum:str, cancellable:Gio.Cancellable=None) -> bool """
return False
def checksum_file_from_input(file_info, xattrs=None, in_=None, objtype, cancellable=None): # real signature unknown; restored from __doc__
""" checksum_file_from_input(file_info:Gio.FileInfo, xattrs:GLib.Variant=None, in_:Gio.InputStream=None, objtype:OSTree.ObjectType, cancellable:Gio.Cancellable=None) -> bool, out_csum:list """
return False
def checksum_from_bytes(csum): # real signature unknown; restored from __doc__
""" checksum_from_bytes(csum:list) -> str """
return ""
def checksum_from_bytes_v(csum_v): # real signature unknown; restored from __doc__
""" checksum_from_bytes_v(csum_v:GLib.Variant) -> str """
return ""
def checksum_inplace_to_bytes(checksum, buf): # real signature unknown; restored from __doc__
""" checksum_inplace_to_bytes(checksum:str, buf:int) """
pass
def checksum_to_bytes(checksum): # real signature unknown; restored from __doc__
""" checksum_to_bytes(checksum:str) -> list """
return []
def checksum_to_bytes_v(checksum): # real signature unknown; restored from __doc__
""" checksum_to_bytes_v(checksum:str) -> GLib.Variant """
pass
def check_version(required_year, required_release): # real signature unknown; restored from __doc__
""" check_version(required_year:int, required_release:int) -> bool """
return False
def cmd__private__(): # real signature unknown; restored from __doc__
""" cmd__private__() -> OSTree.CmdPrivateVTable """
pass
def cmp_checksum_bytes(a, b): # real signature unknown; restored from __doc__
""" cmp_checksum_bytes(a:int, b:int) -> int """
return 0
def collection_ref_dupv(refs): # real signature unknown; restored from __doc__
""" collection_ref_dupv(refs:list) -> list """
return []
def collection_ref_equal(ref1, ref2): # real signature unknown; restored from __doc__
""" collection_ref_equal(ref1, ref2) -> bool """
return False
def collection_ref_freev(refs): # real signature unknown; restored from __doc__
""" collection_ref_freev(refs:list) """
pass
def collection_ref_hash(ref): # real signature unknown; restored from __doc__
""" collection_ref_hash(ref) -> int """
return 0
def commit_get_content_checksum(commit_variant): # real signature unknown; restored from __doc__
""" commit_get_content_checksum(commit_variant:GLib.Variant) -> str or None """
return ""
def commit_get_object_sizes(commit_variant): # real signature unknown; restored from __doc__
""" commit_get_object_sizes(commit_variant:GLib.Variant) -> bool, out_sizes_entries:list """
return False
def commit_get_parent(commit_variant): # real signature unknown; restored from __doc__
""" commit_get_parent(commit_variant:GLib.Variant) -> str """
return ""
def commit_get_timestamp(commit_variant): # real signature unknown; restored from __doc__
""" commit_get_timestamp(commit_variant:GLib.Variant) -> int """
return 0
def content_file_parse(compressed, content_path, trusted, cancellable=None): # real signature unknown; restored from __doc__
""" content_file_parse(compressed:bool, content_path:Gio.File, trusted:bool, cancellable:Gio.Cancellable=None) -> bool, out_input:Gio.InputStream, out_file_info:Gio.FileInfo, out_xattrs:GLib.Variant """
return False
def content_file_parse_at(compressed, parent_dfd, path, trusted, cancellable=None): # real signature unknown; restored from __doc__
""" content_file_parse_at(compressed:bool, parent_dfd:int, path:str, trusted:bool, cancellable:Gio.Cancellable=None) -> bool, out_input:Gio.InputStream, out_file_info:Gio.FileInfo, out_xattrs:GLib.Variant """
return False
def content_stream_parse(compressed, input, input_length, trusted, cancellable=None): # real signature unknown; restored from __doc__
""" content_stream_parse(compressed:bool, input:Gio.InputStream, input_length:int, trusted:bool, cancellable:Gio.Cancellable=None) -> bool, out_input:Gio.InputStream, out_file_info:Gio.FileInfo, out_xattrs:GLib.Variant """
return False
def create_directory_metadata(dir_info, xattrs=None): # real signature unknown; restored from __doc__
""" create_directory_metadata(dir_info:Gio.FileInfo, xattrs:GLib.Variant=None) -> GLib.Variant """
pass
def diff_dirs(flags, a, b, modified, removed, added, cancellable=None): # real signature unknown; restored from __doc__
""" diff_dirs(flags:OSTree.DiffFlags, a:Gio.File, b:Gio.File, modified:list, removed:list, added:list, cancellable:Gio.Cancellable=None) -> bool """
return False
def diff_dirs_with_options(flags, a, b, modified, removed, added, options=None, cancellable=None): # real signature unknown; restored from __doc__
""" diff_dirs_with_options(flags:OSTree.DiffFlags, a:Gio.File, b:Gio.File, modified:list, removed:list, added:list, options:OSTree.DiffDirsOptions=None, cancellable:Gio.Cancellable=None) -> bool """
return False
def diff_print(a, b, modified, removed, added): # real signature unknown; restored from __doc__
""" diff_print(a:Gio.File, b:Gio.File, modified:list, removed:list, added:list) """
pass
def gpg_error_quark(): # real signature unknown; restored from __doc__
""" gpg_error_quark() -> int """
return 0
def hash_object_name(a=None): # real signature unknown; restored from __doc__
""" hash_object_name(a=None) -> int """
return 0
def kernel_args_cleanup(loc=None): # real signature unknown; restored from __doc__
""" kernel_args_cleanup(loc=None) """
pass
def metadata_variant_type(objtype): # real signature unknown; restored from __doc__
""" metadata_variant_type(objtype:OSTree.ObjectType) -> GLib.VariantType """
pass
def object_from_string(p_str): # real signature unknown; restored from __doc__
""" object_from_string(str:str) -> out_checksum:str, out_objtype:OSTree.ObjectType """
pass
def object_name_deserialize(variant): # real signature unknown; restored from __doc__
""" object_name_deserialize(variant:GLib.Variant) -> out_checksum:str, out_objtype:OSTree.ObjectType """
pass
def object_name_serialize(checksum, objtype): # real signature unknown; restored from __doc__
""" object_name_serialize(checksum:str, objtype:OSTree.ObjectType) -> GLib.Variant """
pass
def object_to_string(checksum, objtype): # real signature unknown; restored from __doc__
""" object_to_string(checksum:str, objtype:OSTree.ObjectType) -> str """
return ""
def object_type_from_string(p_str): # real signature unknown; restored from __doc__
""" object_type_from_string(str:str) -> OSTree.ObjectType """
pass
def object_type_to_string(objtype): # real signature unknown; restored from __doc__
""" object_type_to_string(objtype:OSTree.ObjectType) -> str """
return ""
def parse_refspec(refspec): # real signature unknown; restored from __doc__
""" parse_refspec(refspec:str) -> bool, out_remote:str, out_ref:str """
return False
def raw_file_to_archive_z2_stream(input, file_info, xattrs=None, cancellable=None): # real signature unknown; restored from __doc__
""" raw_file_to_archive_z2_stream(input:Gio.InputStream, file_info:Gio.FileInfo, xattrs:GLib.Variant=None, cancellable:Gio.Cancellable=None) -> bool, out_input:Gio.InputStream """
return False
def raw_file_to_archive_z2_stream_with_options(input, file_info, xattrs=None, options=None, cancellable=None): # real signature unknown; restored from __doc__
""" raw_file_to_archive_z2_stream_with_options(input:Gio.InputStream, file_info:Gio.FileInfo, xattrs:GLib.Variant=None, options:GLib.Variant=None, cancellable:Gio.Cancellable=None) -> bool, out_input:Gio.InputStream """
return False
def raw_file_to_content_stream(input, file_info, xattrs=None, cancellable=None): # real signature unknown; restored from __doc__
""" raw_file_to_content_stream(input:Gio.InputStream, file_info:Gio.FileInfo, xattrs:GLib.Variant=None, cancellable:Gio.Cancellable=None) -> bool, out_input:Gio.InputStream, out_length:int """
return False
def repo_commit_traverse_iter_cleanup(p=None): # real signature unknown; restored from __doc__
""" repo_commit_traverse_iter_cleanup(p=None) """
pass
def repo_finder_resolve_all_async(finders, refs, parent_repo, cancellable=None, callback=None, user_data=None): # real signature unknown; restored from __doc__
""" repo_finder_resolve_all_async(finders:list, refs:list, parent_repo:OSTree.Repo, cancellable:Gio.Cancellable=None, callback:Gio.AsyncReadyCallback=None, user_data=None) """
pass
def repo_finder_resolve_all_finish(result): # real signature unknown; restored from __doc__
""" repo_finder_resolve_all_finish(result:Gio.AsyncResult) -> list """
return []
def repo_finder_result_freev(results): # real signature unknown; restored from __doc__
""" repo_finder_result_freev(results:list) """
pass
def validate_checksum_string(sha256): # real signature unknown; restored from __doc__
""" validate_checksum_string(sha256:str) -> bool """
return False
def validate_collection_id(collection_id=None): # real signature unknown; restored from __doc__
""" validate_collection_id(collection_id:str=None) -> bool """
return False
def validate_remote_name(remote_name): # real signature unknown; restored from __doc__
""" validate_remote_name(remote_name:str) -> bool """
return False
def validate_rev(rev): # real signature unknown; restored from __doc__
""" validate_rev(rev:str) -> bool """
return False
def validate_structureof_checksum_string(checksum): # real signature unknown; restored from __doc__
""" validate_structureof_checksum_string(checksum:str) -> bool """
return False
def validate_structureof_commit(commit): # real signature unknown; restored from __doc__
""" validate_structureof_commit(commit:GLib.Variant) -> bool """
return False
def validate_structureof_csum_v(checksum): # real signature unknown; restored from __doc__
""" validate_structureof_csum_v(checksum:GLib.Variant) -> bool """
return False
def validate_structureof_dirmeta(dirmeta): # real signature unknown; restored from __doc__
""" validate_structureof_dirmeta(dirmeta:GLib.Variant) -> bool """
return False
def validate_structureof_dirtree(dirtree): # real signature unknown; restored from __doc__
""" validate_structureof_dirtree(dirtree:GLib.Variant) -> bool """
return False
def validate_structureof_file_mode(mode): # real signature unknown; restored from __doc__
""" validate_structureof_file_mode(mode:int) -> bool """
return False
def validate_structureof_objtype(objtype): # real signature unknown; restored from __doc__
""" validate_structureof_objtype(objtype:int) -> bool """
return False
def __delattr__(*args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(*args, **kwargs): # real signature unknown
pass
def __eq__(*args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(*args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(*args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getattr__(*args, **kwargs): # real signature unknown
pass
def __ge__(*args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(*args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(*args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(*args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(*args, **kwargs): # real signature unknown
""" Might raise gi._gi.RepositoryError """
pass
def __le__(*args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(*args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(*args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(*args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(*args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(*args, **kwargs): # real signature unknown
pass
def __setattr__(*args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(*args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(*args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(*args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
# classes
from .AsyncProgress import AsyncProgress
from .AsyncProgressClass import AsyncProgressClass
from .BootconfigParser import BootconfigParser
from .Bootloader import Bootloader
from .BootloaderGrub2 import BootloaderGrub2
from .BootloaderInterface import BootloaderInterface
from .BootloaderSyslinux import BootloaderSyslinux
from .BootloaderUboot import BootloaderUboot
from .BootloaderZipl import BootloaderZipl
from .ChecksumFlags import ChecksumFlags
from .ChecksumInputStream import ChecksumInputStream
from .ChecksumInputStreamClass import ChecksumInputStreamClass
from .ChecksumInputStreamPrivate import ChecksumInputStreamPrivate
from .CmdPrivateVTable import CmdPrivateVTable
from .CollectionRef import CollectionRef
from .CommitSizesEntry import CommitSizesEntry
from .Deployment import Deployment
from .DeploymentUnlockedState import DeploymentUnlockedState
from .DiffDirsOptions import DiffDirsOptions
from .DiffFlags import DiffFlags
from .DiffItem import DiffItem
from .GpgError import GpgError
from .GpgSignatureAttr import GpgSignatureAttr
from .GpgSignatureFormatFlags import GpgSignatureFormatFlags
from .GpgVerifier import GpgVerifier
from .GpgVerifyResult import GpgVerifyResult
from .KernelArgs import KernelArgs
from .KernelArgsEntry import KernelArgsEntry
from .LibarchiveInputStream import LibarchiveInputStream
from .LibarchiveInputStreamClass import LibarchiveInputStreamClass
from .LibarchiveInputStreamPrivate import LibarchiveInputStreamPrivate
from .LzmaCompressor import LzmaCompressor
from .LzmaCompressorClass import LzmaCompressorClass
from .LzmaDecompressor import LzmaDecompressor
from .LzmaDecompressorClass import LzmaDecompressorClass
from .MutableTree import MutableTree
from .MutableTreeClass import MutableTreeClass
from .MutableTreeIter import MutableTreeIter
from .ObjectType import ObjectType
from .Remote import Remote
from .Repo import Repo
from .RepoCheckoutAtOptions import RepoCheckoutAtOptions
from .RepoCheckoutFilterResult import RepoCheckoutFilterResult
from .RepoCheckoutMode import RepoCheckoutMode
from .RepoCheckoutOverwriteMode import RepoCheckoutOverwriteMode
from .RepoCommitFilterResult import RepoCommitFilterResult
from .RepoCommitIterResult import RepoCommitIterResult
from .RepoCommitModifier import RepoCommitModifier
from .RepoCommitModifierFlags import RepoCommitModifierFlags
from .RepoCommitState import RepoCommitState
from .RepoCommitTraverseFlags import RepoCommitTraverseFlags
from .RepoCommitTraverseIter import RepoCommitTraverseIter
from .RepoDevInoCache import RepoDevInoCache
from .RepoFile import RepoFile
from .RepoFileClass import RepoFileClass
from .RepoFileEnumerator import RepoFileEnumerator
from .RepoFileEnumeratorClass import RepoFileEnumeratorClass
from .RepoFinder import RepoFinder
from .RepoFinderAvahi import RepoFinderAvahi
from .RepoFinderAvahiClass import RepoFinderAvahiClass
from .RepoFinderConfig import RepoFinderConfig
from .RepoFinderConfigClass import RepoFinderConfigClass
from .RepoFinderInterface import RepoFinderInterface
from .RepoFinderMount import RepoFinderMount
from .RepoFinderMountClass import RepoFinderMountClass
from .RepoFinderOverride import RepoFinderOverride
from .RepoFinderOverrideClass import RepoFinderOverrideClass
from .RepoFinderResult import RepoFinderResult
from .RepoListObjectsFlags import RepoListObjectsFlags
from .RepoListRefsExtFlags import RepoListRefsExtFlags
from .RepoMode import RepoMode
from .RepoPruneFlags import RepoPruneFlags
from .RepoPruneOptions import RepoPruneOptions
from .RepoPullFlags import RepoPullFlags
from .RepoRemoteChange import RepoRemoteChange
from .RepoResolveRevExtFlags import RepoResolveRevExtFlags
from .RepoTransactionStats import RepoTransactionStats
from .RollsumMatches import RollsumMatches
from .SePolicy import SePolicy
from .SePolicyRestoreconFlags import SePolicyRestoreconFlags
from .StaticDeltaGenerateOpt import StaticDeltaGenerateOpt
from .Sysroot import Sysroot
from .SysrootSimpleWriteDeploymentFlags import SysrootSimpleWriteDeploymentFlags
from .SysrootUpgrader import SysrootUpgrader
from .SysrootUpgraderFlags import SysrootUpgraderFlags
from .SysrootUpgraderPullFlags import SysrootUpgraderPullFlags
from .SysrootWriteDeploymentsOpts import SysrootWriteDeploymentsOpts
from .TlsCertInteraction import TlsCertInteraction
from .TlsCertInteractionClass import TlsCertInteractionClass
from .__class__ import __class__
# variables with complex values
__loader__ = None # (!) real value is '<gi.importer.DynamicImporter object at 0x7feced122d00>'
__path__ = [
'/usr/lib64/girepository-1.0/OSTree-1.0.typelib',
]
__spec__ = None # (!) real value is "ModuleSpec(name='gi.repository.OSTree', loader=<gi.importer.DynamicImporter object at 0x7feced122d00>)"
|
""" Tests on utils module
"""
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import Angle
from ne2001 import utils
def test_parse_lbd():
""" Parse lbd """
# Simple floats
in_l,in_b,in_d = 1., 1., 50.
l,b,d = utils.parse_lbd(in_l, in_b, in_d)
# Test
for xx in [l,b,d]:
assert isinstance(xx,float)
# Angles
in_l,in_b,in_d = Angle(1.*u.deg), Angle(1.*u.deg), 50.
l,b,d = utils.parse_lbd(in_l, in_b, in_d)
assert np.isclose(in_l.value, l)
# Distance
in_l,in_b,in_d = Angle(1.*u.deg), Angle(1.*u.deg), 50.*u.kpc
l,b,d = utils.parse_lbd(in_l, in_b, in_d)
assert np.isclose(in_d.value, d)
# Again
in_l,in_b,in_d = Angle(1.*u.deg), Angle(1.*u.deg), 500.*u.pc
l,b,d = utils.parse_lbd(in_l, in_b, in_d)
assert np.isclose(d, 0.5)
# Arrays
in_l,in_b,in_d = [1]*3*u.deg, [1]*3*u.deg, [500]*3*u.pc
l,b,d = utils.parse_lbd(in_l, in_b, in_d)
assert l.size == b.size == d.size == len(in_l)
# Bad input
in_l,in_b,in_d = Angle(1.*u.deg), Angle(1.*u.deg), 500.*u.s
with pytest.raises(IOError):
l,b,d = utils.parse_lbd(in_l, in_b, in_d)
in_l,in_b,in_d = Angle(1.*u.deg), Angle(1.*u.deg), 'abc'
with pytest.raises(IOError):
l,b,d = utils.parse_lbd(in_l, in_b, in_d)
def test_parse_DM():
""" Parse lbd """
# Simple floats
in_DM = 20.
DM = utils.parse_DM(in_DM)
assert isinstance(DM, float)
# Quantity
in_DM = 20. * u.pc / u.cm**3
DM = utils.parse_DM(in_DM)
assert np.isclose(DM, in_DM.value)
# Array
in_DM = [1]*10*u.pc / u.cm**3
DM = utils.parse_DM(in_DM)
assert DM.size == len(DM)
# Bad inputs
with pytest.raises(IOError):
DM = utils.parse_DM('abc')
with pytest.raises(IOError):
DM = utils.parse_DM(1*u.s)
|
str1 = "12345"
str2 = "34989"
t = ""
# make sure length of str2 is larger.
if (len(str1) > len(str2)):
t = str1
str1 = str2
str2 = t
# Take an empty string for
# storing result
str = ""
# Calculate length of both string
n1 = len(str1)
n2 = len(str2)
# Reverse both of strings
str1 = str1[::-1]
str2 = str2[::-1]
carry = 0
for i in range(n1):
# compute sum of current digits and carry
sum = ((ord(str1[i]) - 48) +
((ord(str2[i]) - 48) + carry))
str += chr(sum % 10 + 48)
# Calculate carry for next step
carry = int(sum / 10)
# Add remaining digits of larger number
for i in range(n1, n2):
sum = ((ord(str2[i]) - 48) + carry)
str += chr(sum % 10 + 48)
carry = (int)(sum / 10)
# Add remaining carry
if (carry):
str += chr(carry + 48)
# reverse resultant string
str = str[::-1]
print(str) |
from app import application
import os
ROOTDIR = os.path.abspath(os.path.dirname(__file__))
application.jinja_loader.searchpath = [os.path.normpath(os.path.join(ROOTDIR, 'templates'))]
application.static_folder = os.path.normpath(os.path.join(ROOTDIR, 'static'))
if __name__ == "__main__":
application.run(debug=True)
|
import sys
from Bio import Phylo
from Bio import pairwise2
from Bio.Phylo.Consensus import *
from Bio import SeqIO
import phylogenetic_tree, Alignment
sys.setrecursionlimit(10000000)
"""
Created by Mohsen Naghipourfar on 1/27/18.
Email : mn7697np@gmail.com
"""
NJ_trees = []
UPGMA_trees = []
def merge_all_trees():
phylogenetic_tree.construct_trees_for_all_genes_without_marburg()
NJ_trees = phylogenetic_tree.NJ_trees # All NJ Trees in a list
UPGMA_trees = phylogenetic_tree.UPGMA_trees # All UPGMA Trees in a list
NJ_tree = majority_consensus(NJ_trees,
0.4) # Merge NJ Trees using Majority Consensus Algorithm (0.4 is best for practical)
UPGMA_tree = majority_consensus(UPGMA_trees,
0.4) # Merge UPGMA Trees using Majority Consensus Algorithm (0.4 is best for practical)
phylogenetic_tree.save_tree(UPGMA_tree, 'UPGMA_Merged') # Draw merged UPGMA Tree --> it is not a good merge
phylogenetic_tree.save_tree(NJ_tree, 'NJ_Merged') # Draw merged NJ Tree
# final_tree = majority_consensus([NJ_tree, UPGMA_tree], 0.4) # Merge UPGMA && NJ Trees (Not Recommended!)
# Phylo.draw_graphviz(final_tree) # Draw Final Tree
def align_all_ebola_genomes(): # All all ebola genomes to each other
edm = [[0 for i in range(5)] for j in range(5)] # New edit matrix
Alignment.read_data()
all_genomes = Alignment.ebolavirus_genomes
g1_id = 0
for genome1 in all_genomes:
g2_id = 0
for genome2 in all_genomes:
if genome1.name != genome2.name and g2_id > g1_id:
print('Aligning {0} with {1}'.format(genome1.name, genome2.name))
alignments = pairwise2.align.globalms(genome1, genome2, 1, -1, -1, 0) # Biopython package
alignment = alignments[0] # first alignment
score = alignment[2] # score of alignment
# a, b, score = Alignment.global_alignment(genome1.seq, genome2.seq) # Global Alignment
edm[g1_id][g2_id] = score
edm[g2_id][g1_id] = score
g2_id += 1
g1_id += 1
print("genomes aligned!")
Alignment.save_edit_matrix("all_ebola_genomes", edm) # Save edit matrix to file
phylogenetic_tree.construct_tree("all_ebola_genomes", algorithm="UPGMA") # Construct Tree
phylogenetic_tree.construct_tree("all_ebola_genomes", algorithm="NJ") # Construct Tree
def construct_trees_for_all_genes_with_marburg():
for gene_name in Alignment.gene_names: # For all genes
NJ_trees.append(
phylogenetic_tree.construct_tree(gene_name, with_marburg=2, algorithm="NJ")) # Construct NJ Tree
UPGMA_trees.append(
phylogenetic_tree.construct_tree(gene_name, with_marburg=2, algorithm="UPGMA")) # Construct UPGMA Tree
if __name__ == '__main__':
merge_all_trees()# Section 3.2 in pdf
align_all_ebola_genomes()# Section 3.3 in pdf
construct_trees_for_all_genes_with_marburg()# Section 3.4 in pdf
|
"""This is a class which implements Perceptron Learning Algorithm aka PLA
"""
import random
import numpy as np
import matplotlib.pyplot as plt
class PLA:
def __init__(self, N, iterations):
self.N = N
self.iter = iterations
self.generate_target()
self.generate_training_data()
def generate_target(self):
"""Get a random line"""
xA, yA, xB, yB = [random.uniform(-1, 1) for x in range(4)]
self.target = np.array([xB * yA - xA * yB, yB - yA, xA - xB])
def generate_training_data(self):
self.training_data = []
for i in range(self.N):
x, y = [random.uniform(-1, 1) for x in range(2)]
X = np.array([1, x, y])
s = int(np.sign(self.target.T.dot(X)))
self.training_data.append((X, s))
def get_misclassified_count(self):
count = 0
for X,s in self.training_data:
if (np.sign(self.weights.T.dot(X))) != s:
count += 1
return count
def get_disagreement(self):
count = 0
for i in range(1000):
x, y = [random.uniform(-1, 1) for x in range(2)]
X = np.array([1, x, y])
s1 = int(np.sign(self.target.T.dot(X)))
s2 = int(np.sign(self.weights_g.T.dot(X)))
if s1 != s2:
count += 1
return count / (1000.0)
def get_misclassified_point(self):
mis_pts = []
for X,s in self.training_data:
if (np.sign(self.weights.T.dot(X))) != s:
mis_pts.append((X, s))
if len(mis_pts) == 0:
return [None, None]
return mis_pts[random.randrange(0, len(mis_pts))]
def run_pla(self):
temp = self.N + 1
self.mis_count = 0
for i in range(1000):
self.weights = np.zeros(3)
for i in range(self.iter):
X, s = self.get_misclassified_point()
if X == None:
break
self.weights += s*X
temp1 = self.get_misclassified_count()
self.mis_count += temp1
if temp1 < temp:
self.weights_g = self.weights
temp = temp1
self.mis_count = self.mis_count / (1000.0)
self.disagreement = self.get_disagreement()
print self.mis_count, self.disagreement
def traing_error(self):
pass
def test_error(self):
pass
|
from __future__ import print_function
import lldb
import os
import json
def handle_call(debugger, raw_args, result, internal_dict):
"""Receives and handles the call to write from lldb"""
config_filepath = os.path.expanduser('~/simulator_debug.json')
if not os.path.exists(config_filepath):
return
# JSON escaped string: @see https://stackoverflow.com/questions/25242262/dump-to-json-adds-additional-double-quotes-and-escaping-of-quotes
with open(config_filepath, 'r') as file:
separators = (',', ':')
json_object = json.load(file)
json_string = json.dumps(json.dumps(json_object, indent=None, separators=separators, sort_keys=True))
s = '@import Foundation;' \
'NSString *filePath = NSHomeDirectory();' \
'filePath = [filePath stringByAppendingPathComponent:@"Documents/simulator_debug.json"];' \
'[[NSFileManager defaultManager] createFileAtPath:filePath contents:[@{0} dataUsingEncoding:4] attributes:nil];' \
''.format(json_string)
res = lldb.SBCommandReturnObject()
interpreter = lldb.debugger.GetCommandInterpreter()
interpreter.HandleCommand("exp -l objc -O -- " + s, res)
output = res.GetOutput() or res.GetError()
#print(res)
#print(output, end='')
def __lldb_init_module(debugger, internal_dict):
"""Initialize the config command within lldb"""
debugger.HandleCommand('command script add -f %s.handle_call config' % os.path.splitext(os.path.basename(__file__))[0])
print('The "config" command has been loaded and is ready for use.')
# print("exp -l objc -O -- " + s)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlite3 as lite
import sys
from tabulate import tabulate
import dbf
con = lite.connect('db/test.db')
with con:
cur = con.cursor()
cur.execute('PRAGMA table_info(test_output_table)')
data = cur.fetchall()
for d in data:
# print d[0], d[1], d[2]
print "%s: %s \t%s" % (d[0] , d[1] , d[2])
print "\nPrint Table \"temptable\" data with columns names: "
# cur.execute("SELECT * FROM final where id < 20")
cur.execute("SELECT * FROM test_output_table")
col_names = [cn[0] for cn in cur.description]
rows = cur.fetchall()
# print tabulate(rows, col_names, tablefmt="grid")
print tabulate(rows, col_names)
|
# 题目:猴子吃桃问题:猴子第一天摘下若干个桃子,
# 当即吃了一半,还不瘾,又多吃了一个第二天早上又将剩下的桃子吃掉一半,
# 又多吃了一个。以后每天早上都吃了前一天剩下的一半零一个。
# 到第10天早上想再吃时,见只剩下一个桃子了。求第一天共摘了多少。
# 程序分析:采取逆向思维的方法,从后往前推断。
# 程序源代码
# ======================================================
x2 = 1
for day in range(9,0,-1):
x1 = (x2 + 1) * 2
x2 = x1
print(x1)
# ======================================================
# 输出结果:
# 1534
|
import pandas as pd
import numpy as np
df2 = pd.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)), columns=['a', 'b', 'c', 'd', 'e'])
print(df2)
|
from pygame import camera, image, init as pygame_init, quit as pygame_quit
class CameraController:
def __init__(self, device_name='/dev/video0', width=640, height=480):
pygame_init()
camera.init()
self.device = camera.Camera(device_name, (width, height))
self.device.start()
def capture_image(self, filename):
captured_image = self.device.get_image()
image.save(captured_image, filename)
def cleanup(self):
camera.quit()
pygame_quit()
|
import hashlib
from django import forms
from crm import models
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
class BSForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for filed in self.fields.values():
if not isinstance(filed, forms.BooleanField):
filed.widget.attrs.update({'class': "form-control"})
class RegForm(BSForm):
password = forms.CharField(widget=forms.PasswordInput, min_length=6, label='密码')
re_pwd = forms.CharField(widget=forms.PasswordInput, label='确认密码')
class Meta:
model = models.UserProfile
fields = '__all__' # ['username','password']
exclude = ['memo', 'is_active']
labels = {
'username': '用户名'
}
widgets = {
'password': forms.PasswordInput(attrs={'class': "form-control", 'k1': 'v1'}),
}
error_messages = {
'password': {
'required': '必填的'
}
}
def clean(self):
pwd = self.cleaned_data.get('password', '')
re_pwd = self.cleaned_data.get('re_pwd', '')
if pwd == re_pwd:
md5 = hashlib.md5()
md5.update(pwd.encode('utf-8'))
pwd = md5.hexdigest()
self.cleaned_data['password'] = pwd
return self.cleaned_data
self.add_error('re_pwd', '两次密码不一致')
raise ValidationError('两次密码不一直')
class CustomerForm(BSForm):
class Meta:
model = models.Customer
fileds = "__all__"
exclude = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['course'].widget.attrs.pop('class')
class ConsultForm(BSForm):
class Meta:
model = models.ConsultRecord
fileds = "__all__"
exclude = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# print(list(self.fields['customer'].choices))
# print(self.fields['customer'].choices)
# self.fields['customer'].choices =
# print(self.instance) # obj = models.ConsultRecord(consultant=request.uer_obj)
# print(self.instance.consultant.customers.all())
customer_chioices = [(customer.pk, str(customer)) for customer in self.instance.consultant.customers.all()]
customer_chioices.insert(0, ('', '----------'))
self.fields['customer'].choices = customer_chioices
self.fields['consultant'].choices = [(self.instance.consultant.pk, self.instance.consultant), ]
class EnrollmentForm(BSForm):
class Meta:
model = models.Enrollment
fields = '__all__'
exclude = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['customer'].choices = [(self.instance.customer.pk, self.instance.customer)]
self.fields['enrolment_class'].choices = [(i.pk, str(i)) for i in self.instance.customer.class_list.all()]
|
import unittest
from datetime import datetime, timedelta, date
import decimal
from django.test import Client
from my_app.calendar.utils import calendarUtils
import pandas as pd
class SimpleTest(unittest.TestCase):
def setUp(self):
# Every test needs a client.
# self.client = Client()
pass
def test_getCalenderforInstruments(self):
print('check getCalenderforInstruments')
CalendarUtilObj = calendarUtils()
resultFrame = CalendarUtilObj.getCalenderforInstruments([4460,4461,4462])
# print(resultFrame)
calendar = resultFrame.loc[(resultFrame['calendarid'] == 24 )]
# print(calendarid)
self.assertEqual(len(calendar)==1,True)
# print(resultFrame)
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from .models import *
# Register your models here.
# Define an inline admin descriptor for Employee model
# which acts a bit like a singleton
class ManagerInline(admin.StackedInline):
model = manager
can_delete = False
verbose_name_plural = 'manager'
# Define a new User admin
class UserAdmin(BaseUserAdmin):
inlines = (ManagerInline,)
class key_user(admin.ModelAdmin):
def get_list_display(self, request):
if request.user.is_superuser:
list_display = ('description', 'parent', 'key_type', 'active', 'deleted', 'key_value', 'created_for')
else:
list_display = ('description', 'parent', 'key_type', 'active')
return list_display
def get_queryset(self, request):
qs = super(admin.ModelAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(deleted=False, created_for=request.user.manager.tenant)
def delete_queryset(self, request, queryset):
for model in queryset:
model.delete()
def save_model(self, request, obj, form, change):
if obj.created_for == None:
obj.created_for = request.user.manager.tenant
if obj.created_by == None:
obj.created_by = request.user
if obj.parent == None and not obj.key_type == keyType.objects.get(id='M'):
parent = key.objects.get(key_type=keyType.objects.get(id='M'), created_for=request.user.manager.tenant)
obj.parent = parent
print(parent)
super().save_model(request, obj, form, change)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "parent":
print(db_field.name)
kwargs["queryset"] = key.objects.filter(created_for=request.user.manager.tenant)
print(kwargs["queryset"])
return super().formfield_for_foreignkey(db_field, request, **kwargs)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(key, key_user)
admin.site.register(keyType)
admin.site.register(tenant)
|
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal, LAParams
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfpage import PDFTextExtractionNotAllowed
from pdfminer.pdfparser import PDFParser
from PyPDF2 import PdfFileReader
def extract_information(pdf_path):
with open(pdf_path, 'rb') as f:
pdf = PdfFileReader(f)
information = pdf.getDocumentInfo()
number_of_pages = pdf.getNumPages()
print(pdf.getFormTextFields().get())
print(number_of_pages)
print(f.name)
def parse(path, save_name):
parser = PDFParser(path)
document = PDFDocument(parser)
if not document.is_extractable:
raise PDFTextExtractionNotAllowed
else:
rsrcmgr = PDFResourceManager()
laparms = LAParams()
device = PDFPageAggregator(rsrcmgr, laparms=laparms)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.create_pages(document):
interpreter.process_page(page)
layout = device.get_result()
for x in layout:
if (isinstance(x, LTTextBoxHorizontal)):
with open('%s' % (save_name), 'a') as f:
results = x.get_text().encode('utf-8')
f.write(results + "\n")
if __name__ == '__main__':
# path = open(r'C:\Users\ZKTT\Desktop\example-001\test.pdf')
# parse(path, 'test.txt')
extract_information(r'C:\Users\ZKTT\Desktop\example-001\test.pdf')
|
import os
from typing import List
import pytest
import yaml
from python_code.calc import Calculator
@pytest.fixture(scope='class')
def get_cale():
print('实例化计算器')
cale = Calculator()
return cale
# 获取yaml文件所在的绝对路径
yaml_file_path = os.path.dirname(__file__) + '/task_datas.yml'
# yaml文件参数获取
with open(yaml_file_path, encoding='UTF-8') as f:
datas = yaml.safe_load(f)
add_datas = datas['add']
myid_add = datas['myid_add']
div_datas = datas['div']
myid_div = datas['myid_div']
sub_datas = datas['sub']
myid_sub = datas['myid_sub']
mul_datas = datas['mul']
myid_mul = datas['myid_mul']
# 定义加法计算数据
@pytest.fixture(params=add_datas, ids=myid_add)
def get_add_data(request):
print('开始计算')
data_add = request.param
print(f'request.param的数据是:{data_add}')
yield data_add
print('结束计算')
# 定义除法计算数据
@pytest.fixture(params=div_datas, ids=myid_div)
def get_div_data(request):
print('开始计算')
data_div = request.param
print(f'request.parm数据是:{data_div}')
yield data_div
print('结束计算')
# 定义减法计算数据
@pytest.fixture(params=sub_datas, ids=myid_sub)
def get_sub_data(request):
print('开始计算')
data_sub = request.param
print(f'request.parm的数据是:{data_sub}')
yield data_sub
print('结束计算')
# 定义乘法计算数据
@pytest.fixture(params=mul_datas, ids=myid_mul)
def get_mul_data(request):
print('开始计算')
data_mul = request.param
print(f"request.parm的数据是:{data_mul}")
yield data_mul
print('结束计算')
def pytest_collection_modifyitems(
session: "Session", config: "Config", items: List["Item"]
) -> None:
"""Called after collection has been performed. May filter or re-order
the items in-place.
:param pytest.Session session: The pytest session object.
:param _pytest.config.Config config: The pytest config object.
:param List[pytest.Item] items: List of item objects.
"""
print('items')
print(items)
# # 实现用例反转
# items.reverse()
# 修改测试用例参数编码格式
for item in items:
item.name = item.name.encode('utf-8').decode('unicode-escape')
item._nodeid = item.nodeid.encode('utf-8').decode('unicode-escape')
|
from cuadrado import Cuadrado
from figura_geometrica import FiguraGeometrica
from rectangulo import Rectangulo
#No se puede crear objetos de una clase abstracta
#figuraGeometrica = FiguraGeometrica()
cuadrado = Cuadrado(2, "Rojo")
print(cuadrado)
rectangulo = Rectangulo(2, 4, "Azul")
print(rectangulo) |
import argparse
from edgetpu.classification.engine import ClassificationEngine
from edgetpu.utils import dataset_utils
from PIL import Image
import numpy as np
from edgetpu.basic.basic_engine import BasicEngine
from PIL import Image
def takeSecond(elem):
return elem[0]
def Tpu_FaceRecognize(engine, face_img):
faces = []
for face in face_img:
img = np.asarray(face).flatten()
result = engine.ClassifyWithInputTensor(img, top_k=200, threshold=-0.5)
result.sort(key=takeSecond)
np_result = []
for i in range(0, len(result)):
np_result.append(result[i][1])
faces.append(np_result)
np_face = np.array(faces)
return np_face
|
import pytest
from conftest import SYNCPACK
@pytest.mark.parametrize(
"stepdict,outdata",
[
(
{
"name": "DummyStep",
"file": "DummyStep",
"syncpack": SYNCPACK,
"snow_hostname": "pytest.servicenow.com",
},
"foo"
)
],
)
def test_ProcessEventTrigger(stepdict, outdata, syncpack_step_runner):
processed_data = syncpack_step_runner.run(stepdict)
assert processed_data == outdata |
import random, sys
print('ROCK, PAPER, SHOTGUN')
# These variables keep track of the number of wins, losses and ties.
wins = 0
losses = 0
ties = 0
while True: # The main game loop
print ('{} Wins, {} Losses, {} Ties'.format(wins, losses, ties))
while True: # The player input loop
print('Enter your move : (r)ock, (p)aper, (s)hotgun or (q)uit')
playerMove = input()
if playerMove == 'q':
sys.exit() # Quit the program
if playerMove == 'r' or playerMove == 'p' or playerMove == 's':
break # Breaks out of the player loop
print('Type one of r, p, s or q.')
# Displays player move
if playerMove == 'r':
print('ROCK versus ...')
elif playerMove == 'p':
print('PAPER versus ...')
elif playerMove == 's':
print('SHOTGUN versus ...')
# Displays what the CPU chose
randomNumber = random.randint(1,3)
if randomNumber == 1:
computerMove = 'r'
print('ROCK')
elif randomNumber == 2:
computerMove = 'p'
print('PAPER')
elif randomNumber == 3:
computerMove = 's'
print('SHOTGUN')
# Display and record the win, loss, tie rate
if playerMove == computerMove:
print('It is a tie')
ties = ties + 1
elif playerMove == 'r' and computerMove == 's' or playerMove == 'p' and computerMove == 'r' or playerMove == 's' and computerMove == 'p':
print('It is a WIN')
wins = wins + 1
elif playerMove == 'r' and computerMove == 'p' or playerMove == 'p' and computerMove == 's' or playerMove == 's' and computerMove == 'r':
print('You LOSE')
losses = losses + 1 |
def position(n):
'''
>>> position(8)
(2, 1)
>>> position(0)
(3, 1)
'''
if n == 1:
return (0, 0)
if n == 2:
return (0, 1)
if n == 3:
return (0, 2)
if n == 4:
return (1, 0)
if n == 5:
return (1, 1)
if n == 6:
return (1, 2)
if n == 7:
return (2, 0)
if n == 8:
return (2, 1)
if n == 9:
return (2, 2)
if n == 0:
return (3, 1)
def movement(n, m):
'''
>>> movement(8, 8)
0
>>> movement(8, 9)
1
>>> movement(8, 1)
3
>>> movement(8, 3)
3
'''
nx = position(n)[0]
ny = position(n)[1]
mx = position(m)[0]
my = position(m)[1]
move = abs(nx-mx)+abs(ny-my)
return move
def fingermovement(number):
'''
>>> fingermovement('888-888-8888')
0
>>> fingermovement('053/67.83.47')
16
'''
validno = []
for t in range(len(number)):
x = number[t]
if 48 <= ord(x) <= 57:
validno.append(x)
count = 0
for i in range(len(validno)-1):
n = validno[i]
a = int(n)
m = validno[i+1]
b = int(m)
moving = movement(a, b)
count += moving
return count
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# Question: Is there a correlation between homicide and time of year, location?
# Homicides per month for certain state(s) and year(s)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataArr = pd.read_csv("../data/database.csv")
# remove these columns
dataArr = (dataArr.drop(['Record ID', 'Agency Code','Agency Name','Agency Type','City', 'Weapon', 'Incident', 'Crime Type', 'Crime Solved', 'Perpetrator Count', 'Perpetrator Race', 'Victim Race', 'Perpetrator Age', 'Victim Ethnicity', 'Perpetrator Ethnicity', 'Relationship','Record Source', 'Victim Age', 'Victim Sex', 'Perpetrator Sex'],axis=1))
#print(dataArr.head(n=10))
#year = dataArr.loc[dataArr['Year'] == 1980]
#state = dataArr.loc[dataArr['State'] == "Alaska"]
year = 2010,2011,2012,2013,2014
state = "District of Columbia", "Louisiana", "Missouri", "Maryland","South Carolina","Michigan","Tennessee","Florida","New Mexico","Nevada"
year_state = dataArr.loc[dataArr['Year'].isin(year) & (dataArr['State'].isin(state))]
#print year_state.head(n=1000)
# get count of each unique thing in month and sort based on Month
grouped = year_state.groupby("Month").size().reset_index()
grouped[0] = grouped[0].div(len(year)*len(state)).round(0)
months = {'March': 3, 'February': 2, 'August': 8, 'September': 9, 'April': 4, 'June': 6, 'July': 7, 'January': 1, 'May': 5, 'November': 11, 'December': 12, 'October': 10}
grouped["month_number"] = grouped["Month"].map(months)
grouped = grouped.sort_values("month_number", ascending=True)
print(grouped)
# plot the result
y_pos = np.arange(len(grouped["Month"]))
plt.bar(y_pos, grouped[0])
plt.xticks(y_pos, grouped["Month"])
plt.xticks(rotation=90)
plt.ylabel("Homicides")
plt.xlabel("Month")
plt.title("Homicides Per Month (2010-2014)")
#fix the viewport to grab everything
plt.tight_layout()
plt.show()
|
import pygame, time
def sign(num):
if num == 0: return 0
return abs(num)/num
class Player(object):
def __init__(self, pos):
self.img = pygame.image.load('mario.jpg')
self.setPos(pos)
self.xvel = 0
self.yvel = 0
self.onground = False
self.up = -1
self.jumping = True
#-1 is can jump
#0 is holding
def getPos(self):
return (self.rect.x, self.rect.y)
def setPos(self, pos):
x, y = pos
self.rect = pygame.Rect(x, y, 20, 20)
def draw(self, surf, camera):
rect = camera.shiftRect(self.rect)
if rect.colliderect(surf.get_rect()):
img = pygame.transform.scale(self.img, (self.rect.w, self.rect.h))
surf.blit(img, rect.topleft)
def col(self, oldrect, blocks):
for block in blocks:
self.colBlock(oldrect, block)
def colBlock(self, oldrect, block):
pos = self.getPos()
#if colliding
if self.rect.colliderect(block.rect):
#in from left
if self.rect.centerx < block.rect.centerx:
if oldrect.centerx < block.rect.centerx:
else:
if oldrect.centery < block.rect.centery:
#From left top go up
self.setPos(self.rect.x, block.rect.top - self.rect.h)
self.onground = True
self.jumping = False
self.yv = 0
else:
#From left bottom go down
self.setPos(newrect.x, block.rect.bottom)
self.yv = 0
else:
self.onground = False
def move(self, blocks):
oldrect = self.rect.copy()
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
if self.up == -1:
self.up == 0
if self.onground:
self.yvel = 200
self.jumping = True
if keys[pygame.K_RIGHT]:
self.xvel += 5
if keys[pygame.K_LEFT]:
self.xvel -= 5
if not self.onground:
self.yvel -= 1 /60
x, y = self.getPos()
x += self.xvel / 60
y += self.yvel / 60
self.xvel *= .6 ** (1/60)
self.setPos((x, y))
self.col(oldrect, blocks)
def tick(self, blocks):
self.move(blocks)
class Camera():
def __init__(self):
self.pos = (0,0)
self.zoom = 1
def setData(self, pos, zoom):
self.pos = pos
self.zoom = zoom
def shiftRect(self, rect):
x, y = self.shiftPoint((rect.x, rect.y))
w = rect.w * self.zoom
h = rect.h * self.zoom
return pygame.Rect(x, y, w, h)
def shiftPoint(self, point):
x = (point[0] - self.pos[0]) * self.zoom
y = (point[1] - self.pos[1]) * self.zoom
return (x, y)
class Block(object):
size = 20
def __init__(self, coord):
self.coord = coord
self.rect = pygame.Rect(self.coord[0] * self.size, self.coord[0] * self.size, self.size, self.size)
def draw(self, surf, camera):
newRect = camera.shiftRect(self.rect)
pygame.draw.rect(surf, (0, 0, 0), newRect, 0)
def getPos(self):
return (self.coord[0] * self.size, self.coord[0] * self.size)
class Level():
def __init__(self, size):
self.camera = Camera()
self.run = True
self.surf = pygame.Surface(size)
self.blocks = [Block((0, 0))]
self.player = Player((0, -30))
def changeCam(self):
x, y = self.player.getPos()
newx = x - self.surf.get_width() / 2
newy = y - self.surf.get_height() / 2
self.camera.setData((newx, newy), 1)
def tick(self):
self.player.tick(self.blocks)
self.changeCam()
for block in self.blocks:
self.surf.fill((255, 255, 255))
block.draw(self.surf, self.camera)
self.player.draw(self.surf, self.camera)
self.player.draw(self.surf, self.camera)
class Client():
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((1000, 750))
self.level = Level((800, 600))
self.run = True
self.inlevel = True
self.events = []
self.clock = pygame.time.Clock()
def gameloop(self):
while self.run:
self.tick()
def tick(self):
t = time.time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.run = False
if self.inlevel:
self.level.tick()
self.screen.fill((255, 255, 255))
if self.inlevel:
self.screen.blit(self.level.surf, (0, 0))
pygame.display.update()
self.clock.tick(60)
def main():
client = Client()
client.gameloop()
main()
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class Che300NewItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class che300_price(scrapy.Item):
geartype = scrapy.Field()
provname = scrapy.Field()
provid = scrapy.Field()
makeyear = scrapy.Field()
price = scrapy.Field()
cityname = scrapy.Field()
cityid = scrapy.Field()
emission = scrapy.Field()
reyear = scrapy.Field()
factoryname = scrapy.Field()
brandname = scrapy.Field()
brandid = scrapy.Field()
familyname = scrapy.Field()
familyid = scrapy.Field()
date = scrapy.Field()
salesdesc = scrapy.Field()
salesdescid = scrapy.Field()
milage = scrapy.Field()
liter_type = scrapy.Field()
url = scrapy.Field()
grabtime = scrapy.Field()
situation = scrapy.Field()
price1 = scrapy.Field()
price2 = scrapy.Field()
price3 = scrapy.Field()
price4 = scrapy.Field()
price5 = scrapy.Field()
price6 = scrapy.Field()
price7 = scrapy.Field()
saleDateRange1 = scrapy.Field()
saleDateRange2 = scrapy.Field()
saleDateRange3 = scrapy.Field()
saleDateRange4 = scrapy.Field()
saleDateRange5 = scrapy.Field()
saleDateRange6 = scrapy.Field()
saleDateRange7 = scrapy.Field()
saleRate1 = scrapy.Field()
saleRate2 = scrapy.Field()
saleRate3 = scrapy.Field()
saleRate4 = scrapy.Field()
saleRate5 = scrapy.Field()
saleRate6 = scrapy.Field()
saleRate7 = scrapy.Field()
status = scrapy.Field()
datasave = scrapy.Field()
default = scrapy.Field()
excellent = scrapy.Field()
good = scrapy.Field()
normal = scrapy.Field()
data = scrapy.Field()
year_2017 = scrapy.Field()
year_2018 = scrapy.Field()
year_2019 = scrapy.Field()
year_2020 = scrapy.Field()
year_2021 = scrapy.Field()
dealer_price = scrapy.Field()
individual_price = scrapy.Field()
help_sold_price = scrapy.Field()
dealer_buy_price = scrapy.Field()
dealer_auction_price = scrapy.Field()
dealer_low_buy_price = scrapy.Field()
dealer_low_auction_price = scrapy.Field()
individual_low_sold_price = scrapy.Field()
dealer_low_sold_price = scrapy.Field()
dealer_high_sold_price = scrapy.Field()
low_help_sold_price = scrapy.Field()
b2c_price = scrapy.Field()
class Che300PriceDaily(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
grabtime = scrapy.Field()
url = scrapy.Field()
price1 = scrapy.Field()
price2 = scrapy.Field()
price3 = scrapy.Field()
price4 = scrapy.Field()
price5 = scrapy.Field()
price6 = scrapy.Field()
price7 = scrapy.Field()
brand = scrapy.Field()
series = scrapy.Field()
salesdescid = scrapy.Field()
regDate = scrapy.Field()
cityid = scrapy.Field()
prov = scrapy.Field()
mile = scrapy.Field()
car_md5 = scrapy.Field()
statusplus = scrapy.Field()
status = scrapy.Field()
class Che300_Big_Car_evaluate_Item(scrapy.Item):
statusplus = scrapy.Field()
grab_time = scrapy.Field()
url = scrapy.Field()
brand_id = scrapy.Field()
series_id = scrapy.Field()
model_id = scrapy.Field()
prov_id = scrapy.Field()
city_id = scrapy.Field()
mile = scrapy.Field()
reg_date = scrapy.Field()
default_car_condition = scrapy.Field()
good_dealer_high_buy_price = scrapy.Field()
good_dealer_low_buy_price = scrapy.Field()
good_dealer_high_sold_price = scrapy.Field()
good_dealer_buy_price = scrapy.Field()
good_dealer_low_sold_price = scrapy.Field()
good_dealer_sold_price = scrapy.Field()
excellent_dealer_high_buy_price = scrapy.Field()
excellent_dealer_low_buy_price = scrapy.Field()
excellent_dealer_high_sold_price = scrapy.Field()
excellent_dealer_buy_price = scrapy.Field()
excellent_dealer_sold_price = scrapy.Field()
excellent_dealer_low_sold_price = scrapy.Field()
normal_dealer_high_buy_price = scrapy.Field()
normal_dealer_low_buy_price = scrapy.Field()
normal_dealer_high_sold_price = scrapy.Field()
normal_dealer_buy_price = scrapy.Field()
normal_dealer_low_sold_price = scrapy.Field()
normal_dealer_sold_price = scrapy.Field()
goods_type = scrapy.Field()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 12 14:36:00 2022
@author: david
"""
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# Split df into an imports and exports df
df = pd.read_csv('anc_loc_changes.csv')
imports_df = df[df['Destination'] == 'North Carolina']
exports_df = df[df['Origin'] == 'North Carolina']
# Plot imports into NC
sns.set_theme(style="darkgrid")
fig, ax = plt.subplots(figsize=(8, 5))
imports_by_state = imports_df['Origin'].value_counts()
sns.barplot(imports_by_state.index, imports_by_state.values)
ax.set_ylabel('Imports into NC')
plt.xticks(rotation=90) # rotate xtick labels so readable
fig.tight_layout()
fig.savefig('nc_imports_per_state.png', dpi=200)
# Plot exports into NC
fig, ax = plt.subplots(figsize=(8, 5))
exports_by_state = exports_df['Destination'].value_counts()
sns.barplot(exports_by_state.index, exports_by_state.values)
ax.set_ylabel('Exports into NC')
plt.xticks(rotation=90) # rotate xtick labels so readable
fig.tight_layout()
fig.savefig('nc_exports_per_state.png', dpi=200) |
__author__ = 'jonathan'
import itertools
from lib.rome.core.expression.expression import *
from sqlalchemy.sql.expression import BinaryExpression
from lib.rome.core.utils import current_milli_time
from sqlalchemy.util._collections import KeyedTuple
from lib.rome.core.models import get_model_classname_from_tablename, get_model_class_from_name
def intersect(b1, b2):
return [val for val in b1 if val in b2]
def flatten(lis):
"""Given a list, possibly nested to any level, return it flattened."""
new_lis = []
for item in lis:
if type(item) == type([]):
new_lis.extend(flatten(item))
else:
new_lis.append(item)
return new_lis
# def flatten(l):
# return [item for sublist in l for item in sublist]
def extract_table_data(term):
term_value = str(term)
if "." in term_value:
return {"table": term_value.split(".")[0], "column": term_value.split(".")[1]}
else:
return None
def extract_joining_criterion(exp):
from lib.rome.core.expression.expression import BooleanExpression
if type(exp) is BooleanExpression:
return map(lambda x:extract_joining_criterion(x), exp.exps)
elif type(exp) is BinaryExpression:
return [[extract_table_data(exp.left)] + [extract_table_data(exp.right)]]
else:
return []
def extract_joining_criterion_from_relationship(rel, local_table):
local_tabledata = {"table": local_table, "column": rel.local_fk_field}
remote_tabledata = {"table": rel.remote_object_tablename, "column": rel.remote_object_field}
return [local_tabledata, remote_tabledata]
def building_tuples(list_results, labels, criterions, hints=[]):
from lib.rome.core.rows.rows import get_attribute, set_attribute, has_attribute
mode = "experimental"
if mode is "cartesian_product":
cartesian_product = []
for element in itertools.product(*list_results):
cartesian_product += [element]
return cartesian_product
elif mode is "experimental":
steps = zip(list_results, labels)
candidates_values = {}
candidates_per_table = {}
joining_criterions = []
non_joining_criterions = {}
# Initialising candidates per table
for each in labels:
candidates_per_table[each] = {}
# Collecting joining expressions
for criterion in criterions:
# if criterion.operator in "NORMAL":
for exp in criterion.exps:
for joining_criterion in extract_joining_criterion(exp):
foo = [x for x in joining_criterion if x is not None]
if len(foo) > 1:
joining_criterions += [foo]
else:
# Extract here non joining criterions, and use it to filter objects
# that are located in list_results
exp_criterions = ([x for x in flatten(joining_criterion) if x is not None])
for non_joining_criterion in exp_criterions:
tablename = non_joining_criterion["table"]
column = non_joining_criterion["column"]
if not tablename in non_joining_criterions:
non_joining_criterions[tablename] = []
non_joining_criterions[tablename] += [{
"tablename": tablename,
"column": column,
"exp": exp,
"criterion": criterion
}]
# # Filtering list_of_results with non_joining_criterions
# corrected_list_results = []
# for results in list_results:
# cresults = []
# for each in results:
# tablename = each["nova_classname"]
# if tablename in non_joining_criterions:
# do_add = True
# for criterion in non_joining_criterions[tablename]:
# if not criterion["criterion"].evaluate(KeyedTuple([each], labels=[tablename])):
# do_add = False
# break
# if do_add:
# cresults += [each]
# corrected_list_results += [cresults]
# list_results = corrected_list_results
# Consolidating joining criterions with data stored in relationships
done_index = {}
for step in steps:
tablename = step[1]
model_classname = get_model_classname_from_tablename(tablename)
fake_instance = get_model_class_from_name(model_classname)()
relationships = fake_instance.get_relationships()
for r in relationships:
criterion = extract_joining_criterion_from_relationship(r, tablename)
key1 = criterion[0]["table"]+"__"+criterion[1]["table"]
key2 = criterion[1]["table"]+"__"+criterion[0]["table"]
if key1 not in done_index and key2 not in criterion[0]["table"] in labels and criterion[1]["table"] in labels:
joining_criterions += [criterion]
done_index[key1] = True
done_index[key2] = True
pass
# Collecting for each of the aforementioned expressions, its values <-> objects
if len(joining_criterions) > 0:
for criterion in joining_criterions:
for each in criterion:
key = "%s.%s" % (each["table"], each["column"])
index_list_results = labels.index(each["table"])
objects = list_results[index_list_results]
if not candidates_values.has_key(key):
candidates_values[key] = {}
for object in objects:
value_key = get_attribute(object, each["column"])
skip = False
for hint in hints:
if each["table"] == hint.table_name and hint.attribute in object and object[hint.attribute] != hint.value:
skip = True
break
if not skip:
if not candidates_values[key].has_key(value_key):
candidates_values[key][value_key] = {}
object_hash = str(object).__hash__()
object_table = object["nova_classname"]
candidates_values[key][value_key][object_hash] = {"value": value_key, "object": object}
candidates_per_table[object_table][object_hash] = object
else:
for each in steps:
for each_object in each[0]:
object_hash = str(each_object).__hash__()
object_table = each_object["nova_classname"]
candidates_per_table[object_table][object_hash] = each_object
# Progressively reduce the list of results
results = []
processed_models = []
if len(steps) > 0:
step = steps[0]
results = map(lambda x: [candidates_per_table[step[1]][x]], candidates_per_table[step[1]])
processed_models += [step[1]]
remaining_models = map(lambda x:x[1], steps[1:])
for step in steps[1:]:
for criterion in joining_criterions:
criterion_models = map(lambda x: x["table"], criterion)
candidate_models = [step[1]] + processed_models
if len(intersect(candidate_models, criterion_models)) > 1:
processed_models += [step[1]]
remaining_models = filter(lambda x: x ==step[1], remaining_models)
# try:
current_criterion_option = filter(lambda x:x["table"]==step[1], criterion)
remote_criterion_option = filter(lambda x:x["table"]!=step[1], criterion)
if not (len(current_criterion_option) > 0 and len(remote_criterion_option) > 0):
continue
current_criterion_part = current_criterion_option[0]
remote_criterion_part = remote_criterion_option[0]
new_results = []
for each in results:
existing_tuple_index = processed_models.index(remote_criterion_part["table"])
existing_value = get_attribute(each[existing_tuple_index], remote_criterion_part["column"])
if existing_value is not None:
key = "%s.%s" % (current_criterion_part["table"], current_criterion_part["column"])
candidates_value_index = candidates_values[key]
candidates = candidates_value_index[existing_value] if existing_value in candidates_value_index else {}
for candidate_key in candidates:
new_results += [each + [candidates[candidate_key]["object"]]]
results = new_results
break
continue
return results
|
'''
Created on 2013-7-26
sub(repl, string[, count]) | re.sub(pattern, repl, string[, count]):
@author: Administrator
'''
import re
p = re.compile(r'(\w+) (\w+)')
s = 'i say , hello world'
print p.sub(r'', s)
def func(m):
return m.group(1).title() + ' ' + m.group(2).title()
print p.sub(func, s)
|
#!/usr/bin/env python3
# coding: utf-8
'''
版本: 1.0
功能: 异步非阻塞IO
Python 版本: Python3.6
'''
import sys
import socket
import selectors
from selectors import EVENT_READ, EVENT_WRITE
# 创建selector对象,监视文件描述符
selector = selectors.DefaultSelector()
class Server(object):
def __init__(self, address):
# 创建监听描述符
address = address
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(address)
server.listen(5)
server.setblocking(False)
# 注册读事件
# data=self.connection 表示将connection方法绑定至data参数
# 返回SelectorKey实例
a = selector.register(
fileobj=server,
events=EVENT_READ,
data=self.connection)
print('EVENT_READ of accept', a)
def connection(self, server, mask):
conn, addr = server.accept()
print('Conncetion: ', conn, 'from', addr)
print()
conn.setblocking(False)
b = selector.register(fileobj=conn,
events=EVENT_READ,
data=self.handle_request
)
print('EVENT_READ of recv', b)
def handle_request(self, conn, mask):
data = conn.recv(1024)
print(data)
if data == '\r\n':
print('Data can not is null. closing', conn)
selector.unregister(conn)
conn.close()
elif data == b'exit\r\n':
print('closing', conn)
# 关闭TCP连接前需取消事件注册
selector.unregister(conn)
conn.close()
else:
print('Recived data: %s from', conn)
response = b'hello\r\n'
conn.sendall(response)
def send_data(self, conn):
print('Recived data: %s from', conn)
response = b'hello\r\n'
conn.sendall(response)
selector.unregister(conn)
conn.close()
def loop():
''' 事件循环
'''
while True:
# 等待已注册事件进入就绪状态
# select()返回(key, events)元组
# key 是一个已就绪文件对象的SelectorKey实例
# events 是这个文件对象上准备好的事件位掩码
events = selector.select(1)
print('Events is ----> ', events)
for key, mask in events:
# 回调:对于每个事件,发送到其对应的处理函数
callback = key.data
callback(key.fileobj, mask)
if __name__ == '__main__':
address = ('', 8080)
server = Server(address)
loop()
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import sys
sys.path.append("..")
import math
import random
import tensorflow as tf
import numpy as np
from util import metrics
from data_util import data_preprocess3
from model import Model
from args_8 import args
import nltk
import json
from nlgeval import NLGEval
nlgeval = NLGEval() # loads the models
eps = 1e-10
# ==============================================================================
# Loading dataset
# ==============================================================================
args = args("test")
data_preprocess = data_preprocess3(args)
num_sample = data_preprocess.num_sample
print("num_sample:", num_sample)
num_sample_train = 65607
num_IterPerEpoch_train = int(math.ceil(num_sample_train/args.batch_size))-1
print("num_IterPerEpoch_train:", num_IterPerEpoch_train)
# ==============================================================================
# Build Graph
# ==============================================================================
# Iterations per epoch
model = Model(type="test", training_steps_per_epoch=None,
vocabSize=data_preprocess.data.vocabSize)
config = tf.ConfigProto()
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
config.gpu_options.allow_growth = True
# ==============================================================================
# 测试方法
# ==============================================================================
def id2word(data):
ind2word = data_preprocess.data.ind2word
new_data = ' '.join([ind2word[str(m)] for m in data if m != -1])
return new_data
def F1_parlAI(p_answers, answers_str):
# parlAI
F1_score = []
for k in range(len(p_answers)):
p_a = p_answers[k]
y_true = answers_str[k]
# Determine the end position of p_answer
j = 0
for ii in range(len(p_a)):
p_ai = p_a[ii]
if p_ai == args.EOS:
break
j += 1
y_pred = id2word(p_a[:j])
F1_score.append(metrics._f1_score(y_pred, [y_true]))
return F1_score
def distinctEval(all_paths):
# distinct evaluation
# all_paths is all answers' ID [N, A_len]
response_ugm = set([])
response_bgm = set([])
response_tgm = set([])
response_len = sum([len(p) for p in all_paths])
for path in all_paths:
for u in path:
response_ugm.add(u)
for b in list(nltk.bigrams(path)):
response_bgm.add(b)
for t in list(nltk.trigrams(path)):
response_tgm.add(t)
# print("total length of response:", response_len)
# print("distinct unigrams:", len(response_ugm)/response_len)
# print("distinct bigrams:", len(response_bgm)/response_len)
dist1 = len(response_ugm)/response_len
dist2 = len(response_bgm)/response_len
dist3 = len(response_tgm) / response_len
return dist1, dist2, dist3
# ==============================================================================
# other method
# ==============================================================================
def update_batch(batch_topic_words):
batch_topic_words_emb = []
for i in range(args.batch_size):
topic_words = batch_topic_words[i]
topic_words_emb = []
for j in range(args.num_topic_words):
if j >= len(topic_words):
print("padding")
w = topic_words[j-len(topic_words)]
else:
w = topic_words[j]
# topic_words to embedding
emb_index = data_preprocess.dic["token2id"][w]
topic_words_emb.append(data_preprocess.embedding[emb_index])
batch_topic_words_emb.append(topic_words_emb)
return batch_topic_words_emb
def update_batch2(batch_topic_words):
batch_topic_words_emb = []
for i in range(args.batch_size):
topic_words_id = batch_topic_words[i]
topic_words_emb = []
for j in range(args.num_topic_words):
w = topic_words_id[j]
# topic_words to embedding
# emb_index = data_preprocess.dic["token2id"][w]
topic_words_emb.append(data_preprocess.embedding[w])
batch_topic_words_emb.append(topic_words_emb)
return batch_topic_words_emb
F1_score_all = []
dict1_all = []
dict2_all = []
dict3_all = []
for bbb in range(num_IterPerEpoch_train, num_IterPerEpoch_train*30+1, num_IterPerEpoch_train):
with tf.Session(config=config) as sess:
idxs = [i for i in range(num_sample)]
print("model restore from savePath:", args.savePath)
checkpoint_path = os.path.join(args.savePath, "visdial-%d" % bbb)
if not os.path.exists(checkpoint_path + '.index'):
exit(0)
model.saver.restore(sess, checkpoint_path)
F1_score = []
model_answers_id = []
true_answers = []
model_answers = []
for batch_id, (start, end) in enumerate(zip(range(0, data_preprocess.num_sample, args.batch_size),
range(args.batch_size, data_preprocess.num_sample, args.batch_size))):
# print("idxs[start:end]:", idxs[start:end])
batch_persona, batch_persona_len, batch_persona_turn, batch_history, batch_history_len, batch_history_turn, \
batch_question, batch_question_len, batch_answer, batch_answer_len, batch_answer_target, batch_answer_str, \
batch_answers_in_persona = data_preprocess.get_batch(idxs[start:end])
# print("batch_answer_str:", batch_answer_str[0])
# print("batch_answers:", id2word(batch_answer[0]))
batch_personas_emb, batch_historys_emb, batch_questions_emb, batch_topic_words, batch_topic_words_weigth = data_preprocess.get_batch_topic_info(
idxs[start:end])
batch_topic_words_emb = update_batch(batch_topic_words) # words
# batch_topic_words_emb = update_batch2(batch_topic_words) # id
input_feed = {model.personas_ph: batch_persona,
model.personas_len_ph: batch_persona_len,
model.persona_turn: batch_persona_turn,
model.historys_ph: batch_history,
model.historys_len_ph: batch_history_len,
model.historys_turn: batch_history_turn,
model.answers_ph: batch_answer,
model.answer_len_ph: batch_answer_len,
model.answer_targets_ph: batch_answer_target,
model.topic_words_emb_ph: batch_topic_words_emb,
model.answers_in_persona_label: batch_answers_in_persona}
output_feed = [model.answers_predict]
outputs = sess.run(output_feed, input_feed)
f1 = F1_parlAI(outputs[0], batch_answer_str)
F1_score.extend(f1)
for i in range(args.batch_size):
if batch_id == 100 and i < 10:
# print("batch_question:", id2word(batch_question[i]))
print("batch_answer_str:", batch_answer_str[i]) # real answer
print("model_answer:", id2word(outputs[0][i]))
# true_answers.append(batch_answer_str[i])
# model1_answers.append(id2word(outputs[0][i]))
model_answers_id.append([m for m in outputs[0][i] if m > 2])
true_answers.append(batch_answer_str[i])
model_answers.append(id2word(outputs[0][i]).replace("<EOS>", ""))
print("num_batch:", bbb, "beam_wide=", args.num_BeamSearch)
model_metrics_dict = nlgeval.compute_metrics([true_answers], model_answers)
print("model:\n", model_metrics_dict)
print("F1_score:", np.mean(F1_score))
F1_score_all.append(np.mean(F1_score))
dist1, dist2, dist3 = distinctEval(model_answers_id)
print("dist1:", dist1)
print("dist2:", dist2)
print("dist3:", dist3)
dict1_all.append(dist1)
dict2_all.append(dist2)
dict3_all.append(dist3)
print("F1_score_all:", F1_score_all)
print("dict1_all:", dict1_all)
print("dict2_all:", dict2_all)
print("dict3_all:", dict3_all)
|
import glob
import os
from nipype import Node
from nipype.interfaces.fsl import BET, FAST
DATA_DIR = '/export/home/zvibaratz/Projects/brain_profile/Skull-stripped'
PATTERN = '**/*.nii.gz'
OUTPUT = '/export/home/zvibaratz/Projects/brain_profile/Bias-corrected'
def get_default_destination(
scan: str,
create: bool = True,
) -> str:
parts = scan.split('/')
file_name, subject_id = parts[-1], parts[-2]
if create:
os.makedirs(os.path.join(OUTPUT, subject_id), exist_ok=True)
return os.path.join(OUTPUT, subject_id, file_name)
def run_bet(
skip_existing: bool = True
):
full_pattern = os.path.join(DATA_DIR, PATTERN)
scans = glob.iglob(full_pattern, recursive=True)
for scan in scans:
print(f'\nCurrent series: {scan}')
if skip_existing:
print('Checking for existing skull-stripping output...', end='\t')
dest = get_default_destination(scan)
if skip_existing and os.path.isfile(dest):
print(f'\u2714')
continue
print(f'\u2718')
print('Running skull-stripping with BET...')
try:
bet = Node(BET(robust=True), name='bet_node')
bet.inputs.in_file = scan
bet.inputs.out_file = dest
bet.run()
print(f'\u2714\tDone!')
except Exception as e:
print(f'\u2718')
print(e.args)
break
def run_fast(in_file: str):
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Module that will handle `ISO 8601:2004 Representation of dates and
times <http://www.iso.org/iso/catalogue_detail?csnumber=40874>`_ date
parsing and formatting.
"""
__copyright__ = """Copyright 2011 Lance Finn Helsten (helsten@acm.org)"""
from .__meta__ import (__version__, __author__, __license__)
import sys
if sys.version_info < (3, 2):
raise Exception("iso8601 requires Python 3.2 or higher.")
import os
import logging
import datetime
import time
import re
import string
import math
__all__ = ['isotime',
'HOUR', 'MINUTE', 'SECOND'
]
__log__ = logging.getLogger("iso8601")
HOUR = (0b10011, 0b11111)
MINUTE = (0b11011, 0b01111)
SECOND = (0b11111, 0b00111)
class isotime():
"""
Arguments
---------
hour
The hour number in the range [00, 24]. If `None` then the current
hour is used as the implied hour. If this is 24 then the minute
must be 0 and the second must be 0, and it indicates the end of
the current day and may be exchanged with 00:00:00 of the next
day.
minute
THe minute number in the range [00, 59]. If `None` then the
current minute is used as the implied minute.
second
The second number in the range [00, 59]. If `None` then the
current second is used as the implied second. This may be set
to 60 if hour is 23 and minute is 59 to indicate a positive
leap second.
microsecond
The number of microseconds in the range [0000, 9999] within the
current second. If this is `None` then the start of the current
second is assumed (i.e. microsecond is 0000).
tzinfo
The `timezone` information object that is a subclass of
`datetime.tzinfo`. If `None` then the current local time zone
is assumed.
Properties
----------
iso_implied
This is the implied base that determines a fully qualitied ISO
time. This is returned as an object that is duck typed to
`datetime.time`.
Built-in Function
-----------------
Built in functions may have behavior that seem unusual and are
documented here:
bytes
This will return a byte string that is a compact encoding
of the time.
hash
The hash code is the same as `hash(bytes(obj))`.
int
This will return the ordinal number of days from 1 Jan 0001.
Format Specifier
----------------
The following are `isotime` specific `format_spec` symantics for use
with string formatting:
fill
The fill character, if `None` this defaults to space.
align
May use the '<', '>', or '^' alignment operator.
sign
This is not allowed.
#
The normal format is to use extended unless not applicable
then use basic. This will force all representations to basic.
0
This is not allowed because '=' align is not allowed.
width
The width of the field.
,
This indicates that the preferred fraction separator [,]
should be used instead of [.].
precision
This representation will be shortened to this length.
.. WARNING::
Use of precision could result in an invalid ISO 8601
reduced precision representation.
type
s
This results in the representation as `str(x)`, but
with modifiers as documented.
It is possible to generate reduced precision dates with format, but
it is not possible to generate truncated representations.
"""
def __init__(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=None):
if (hour is None and minute is None and second is None):
raise ValueError("Either hour, minute, or second must be specified.")
self.__orig_hour = None
self.__orig_minute = None
self.__orig_second = None
self.__orig_microsecond = None
self.__orig_tzinfo = None
if hour is not None:
hour = int(hour)
if not (0 <= hour <= 24):
raise ValueError("Hour is not in range [00, 24].")
self.__orig_hour = hour
if minute is not None:
minute = int(minute)
if not (0 <= minute <= 59):
raise ValueError("Minute is not in range [00, 59].")
self.__orig_minute = minute
if second is not None:
second = int(second)
if not (0 <= second < 60):
if second == 60 and (hour != 23 and minute != 59):
raise ValueError("Second can only be 60 at 23:59:60.")
else:
raise ValueError("Second is not in range [00, 59].")
self.__orig_second = second
if microsecond is not None:
microsecond = int(microsecond)
if not (0 <= microsecond < 1000000):
raise ValueError("Microsecond is not in range [0, 1000000].")
self.__orig_microsecond = microsecond
if tzinfo is not None:
if not isinstance(tzinfo, datetime.tzinfo):
raise TypeError("tzinfo argument is not of type `datetime.tzinfo`.")
self.__orig_tzinfo = tzinfo
self.iso_implied = None
normal_re = re.compile(r'''(?ax)^T?
(?P<hour>\d{2})(:?(?P<minute>\d{2})(:?(?P<second>\d{2}))?)?
([,.](?P<fraction>\d+))?
((?P<utc>Z)|((?P<tzsign>[+-])(?P<tzhour>\d{2})(:?(?P<tzmin>\d{2}))?))?
$''')
truncated_re = re.compile(r'''(?ax)^T?
(?P<hour>-)
((?P<minute>(-|\d{2})))
(:?(?P<second>\d{2}))?
([,.](?P<fraction>\d+))?
(?P<utc>)(?P<tzsign>)(?P<tzhour>)(?P<tzmin>)
$''')
@classmethod
def parse_iso(cls, value):
if not value.startswith('-'):
mo = cls.normal_re.match(value)
else:
mo = cls.truncated_re.match(value)
if mo is None:
raise ValueError('Invalid representation "{0}".'.format(value))
hour = mo.group("hour")
if hour is not None and hour != '-':
hour = int(hour)
else:
hour = None
minute = mo.group("minute")
if minute is not None and minute != '-':
minute = int(minute)
else:
minute = None
second = mo.group("second")
if second is not None:
second = int(second)
microsecond = None
fraction = mo.group("fraction")
if fraction is not None:
fraction = int(fraction) / 10**len(fraction)
if second is not None:
microsecond = math.floor(fraction * 1000000)
elif minute is not None:
second, microsecond = divmod(fraction * 60 * 1000000, 1000000)
else:
minute, second = divmod(fraction * 60 * 60, 60)
second, microsecond = divmod(second * 1000000, 1000000)
if mo.group("utc") == 'Z':
tzinfo = datetime.timezone.utc
elif mo.group("tzsign"):
offh = int(mo.group("tzhour")) if mo.group("tzhour") else 0
if mo.group("tzsign") == '-':
offh = offh * -1
offm = int(mo.group("tzmin")) if mo.group("tzmin") else 0
td = datetime.timedelta(hours=offh, minutes=offm)
tzinfo = datetime.timezone(td)
else:
tzinfo = None
return isotime(hour, minute, second, microsecond, tzinfo)
def __repr__(self):
"""This includes all the implied values to recreate this object as
it stands."""
fmt = []
fmt.append("hour={0.hour:d}")
fmt.append("minute={0.minute:d}")
fmt.append("second={0.second:d}")
if self.microsecond > 0:
fmt.append("microsecond={0.microsecond:d}")
if self.tzinfo:
fmt.append("tzinfo={0.tzinfo}")
fmt = "isotime({0})".format(', '.join(fmt))
return fmt.format(self)
def __str__(self):
"""This is the same as calling `isoformat`."""
return self.isoformat()
def __bytes__(self):
if self.__bytes is None:
us2, us3 = divmod(self.microsecond, 256)
us1, us2 = divmod(us2, 256)
buf = [self.hour, self.minute, self.second,
us1, us2, us3]
if self.tzinfo:
tzoff = self.tzinfo.utcoffset(self).total_seconds() // 60
buf.extend(divmod(tzoff, 60))
self.__bytes = bytes(buf)
return self.__bytes
__format_re = re.compile(r"""(?ax)^
(?P<align>(?P<fill>.)?[<>=\^])?
(?P<sign>[+\- ])?
(?P<altform>\#)?
(?P<width>(?P<fill0>0)?\d+)?
(?P<comma>,)?
(.(?P<precision>\d+))?
(?P<type>[scow])?
$""");
def __format__(self, format_spec):
mo = self.__format_re.match(format_spec)
if mo.group("sign") is not None:
raise ValueError("Sign not allowed in isodate format specifier.")
preferred_mark = (mo.group("comma") is not None)
basic = (mo.group("altform") is not None)
ftype = mo.group("type")
if ftype is None:
ret = str(self)
elif ftype == 's':
ret = self.isoformat(basic=basic, preferred_mark=preferred_mark)
else:
raise ValueError("Unknown format code '{0}' for object of type 'isotime'.".format(ftype))
if mo.group('precision') is not None:
precision = int(mo.group('precision'))
if len(ret) > precision:
ret = ret[:precision]
if mo.group('width') is not None:
align = mo.group('align')
fill = mo.group('fill')
width = int(mo.group('width')) - len(ret)
if fill is None:
fill = ' '
if width > 0:
if align == '<':
ret = ret + fill * width
elif align == '>' or align is None:
ret = fill * width + ret
elif align == '^':
l = r = width // 2
if l + r < width:
l += 1
ret = fill * l + ret + fill * r
elif align == '=' or mo.group("fill0") is not None:
raise ValueError("'=' alignment not allowed in isodate format specification.")
return ret
def __eq__(self, other):
if not self.__comparable(other):
return NotImplemented
return self.__cmp(other) == 0
def __ne__(self, other):
if not self.__comparable(other):
return NotImplemented
return self.__cmp(other) != 0
def __lt__(self, other):
if not self.__comparable(other):
return NotImplemented
return self.__cmp(other) < 0
def __le__(self, other):
if not self.__comparable(other):
return NotImplemented
return self.__cmp(other) <= 0
def __gt__(self, other):
if not self.__comparable(other):
return NotImplemented
return self.__cmp(other) > 0
def __ge__(self, other):
if not self.__comparable(other):
return NotImplemented
return self.__cmp(other) >= 0
def __comparable(self, other):
return (isinstance(other, isotime) or
isinstance(other, datetime.time) or
isinstance(other, datetime.datetime))
def __cmp(self, other):
lhs = float(self)
if isinstance(other, isotime):
rhs = float(other)
else:
rhs = other.hour * 3600 + other.minute * 60 + other.second + other.microsecond / 1000000
return lhs - rhs
def __hash__(self):
return hash(bytes(self))
def __int__(self):
return int(float(self))
def __float__(self):
return self.hour * 3600 + self.minute * 60 + self.second + self.microsecond / 1000000
def __getstate__(self):
return bytes(self)
def __setstate__(self, state):
if len(state) == 6:
hour, minute, second, us1, us2, us3 = state
tzh = tzm = None
elif len(state) == 8:
hour, minute, second, us1, us2, us3, tzh, tzm = state
else:
raise TypeError("Not enough arguments.")
self.__orig_hour = hour
self.__orig_minute = minute
self.__orig_second = second
self.__orig_microsecond = (us1 * 256 + us2) * 256 + us3
if tzh is not None:
td = datetime.timedelta(hours=tzh, minutes=tzm)
self.__orig_tzinfo = datetime.timezone(td)
else:
self.__orig_tzinfo = None
self.iso_implied = None
@property
def iso_implied(self):
return datetime.time(self.hour, self.minute, self.second,
self.microsecond, self.tzinfo)
@iso_implied.setter
def iso_implied(self, value):
if value is None:
value = datetime.datetime.now()
elif isinstance(value, isotime): #or isinstance(value, isodatetime):
pass
elif isinstance(value, datetime.time) or isinstance(value, datetime.datetime):
pass
elif isinstance(value, str):
value = isotime.parse_iso(value)
elif isinstance(value, int):
value = isotime.fromtimestamp(value)
elif isinstance(value, float):
value = isotime.fromtimestamp(float(value))
elif isinstance(value, time.struct_time):
value = isotime.fromtimestamp(time.mktime(value))
elif isinstance(value, tuple):
value = isotime.fromtimestamp(time.mktime(value))
elif isinstance(value, list):
value = isotime.fromtimestamp(time.mktime(tuple(value)))
elif isinstance(value, dict):
value = isotime.fromtimestamp(time.mktime(tuple(value.values)))
else:
isodate.parse_iso(str(value))
self.__hour = self.__orig_hour if self.__orig_hour is not None else value.hour
self.__minute = self.__orig_minute if self.__orig_minute is not None else value.minute
self.__second = self.__orig_second if self.__orig_second is not None else value.second
self.__microsecond = self.__orig_microsecond if self.__orig_microsecond is not None else 0
self.__tzinfo = self.__orig_tzinfo if self.__orig_tzinfo is not None else value.tzinfo
self.__bytes = None
@property
def hour(self):
return self.__hour
@property
def minute(self):
return self.__minute
@property
def second(self):
return self.__second
@property
def microsecond(self):
return self.__microsecond
@property
def tzinfo(self):
return self.__tzinfo
def replace(self, hour=None, minute=None, second=None, microsecond=None, tzinfo=None):
raise NotImplementedError()
code2fmt = {
# 5.3.1.1
0b11101:"{0.hour:02d}{0.minute:02d}{0.second:02d}{1:.0s}",
0b11100:"{0.hour:02d}:{0.minute:02d}:{0.second:02d}{1:.0s}",
# 5.3.1.2
0b11001:"{0.hour:02d}{0.minute:02d}{1:.0s}",
0b11000:"{0.hour:02d}:{0.minute:02d}{1:.0s}",
0b10001:"{0.hour:02d}{1:.0s}",
# 5.3.1.3
0b11111:"{0.hour:02d}{0.minute:02d}{0.second:02d}{1:.7s}",
0b11110:"{0.hour:02d}:{0.minute:02d}:{0.second:02d}{1:.7s}",
0b11011:"{0.hour:02d}{0.minute:02d}{1:.8s}",
0b11010:"{0.hour:02d}:{0.minute:02d}{1:.9s}",
0b10011:"{0.hour:02d}{1:.11s}",
# 5.3.1.4
0b01101:"-{0.minute:02d}{0.second:02d}{1:.0s}",
0b01100:"-{0.minute:02d}:{0.second:02d}{1:.0s}",
0b01001:"-{0.minute:02d}{1:.0s}",
0b00101:"--{0.second:02d}{1:.0s}",
0b01111:"-{0.minute:02d}{0.second:02d}{1:.7s}",
0b01110:"-{0.minute:02d}:{0.second:02d}{1:.7s}",
0b01011:"-{0.minute:02d}{1:.9s}",
0b00111:"--{0.second:02d}{1:.7s}",
}
def isoformat(self, basic=False, reduced=None, truncated=None,
fraction=True, preferred_mark=False, timezone=True):
"""Return a string representing the time in ISO 8601 format.
If `basic` is true then the [:](colon) is ommitted from the
representation, otherwise (default) it will separate the parts
of the representation.
For reduced precision set `reduced` to the last part to include
(`None` means no reduced precision):
- `SECOND` for a complete representation §5.3.1.1.
- `MINUTE` for a specific minute §5.3.1.2 (a).
- `HOUR` for a specific hour §5.3.1.2 (b).
For truncated representations set `truncated` to the first part
to include (`None` means no truncation):
- `HOUR` for a complete representation §5.3.1.1.
- `MINUTE` for an implied hour §5.3.1.4 (a).
- `reduced=MINUTE` for specific minute §5.3.1.4 (b)
- `SECOND` for an implied minute §5.3.1.4 (c).
The fraction will always be shown unless `fraction` is set to
false. So if a reduced precision is chosen then the fraction of
that precision will be added (e.g. if HOUR is chosen then the
fractions of the hour will be added).
When a fraction is added the mark will be a period [.], but the
`preferred_mark` according to ISO 8601 is a comma [,].
If the `tzinfo` is available then it will be added to the
representation unless `timezone` is set to false. If `tzinfo` is
UTC then [Z] will be added instead of [+00:00].
This will duck type to `datetime.date.isoformat()` if called with
no arguments.
"""
if reduced is None:
reduced = (0b11101, 0b11101)
if truncated is None:
truncated = (0b11101, 0b11101)
if fraction:
fraction = ""
if reduced[0] & 0b00100 != 0:
fraction = self.microsecond / 1000000
fraction = "{0:f}".format(fraction)
elif reduced[0] & 0b01000 != 0:
fraction = (self.second + self.microsecond / 1000000) / 60
fraction = "{0:f}".format(fraction)
elif reduced[0] & 0b10000 != 0:
fraction = (self.minute + (self.second + self.microsecond / 1000000) / 60) / 60
fraction = "{0:f}".format(fraction)
fraction = fraction[2:]
if preferred_mark:
fraction = ',' + fraction
else:
fraction = '.' + fraction
else:
fraction = ""
code = reduced[0] & truncated[1]
if not basic:
code = code & 0b11110
if fraction and self.microsecond > 0:
code = code | 0b00010
if not basic and code not in self.code2fmt:
code = code | 0b00001
#print()
#print("redu {0:05b}".format(reduced[0]))
#print("truc {0:05b}".format(truncated[1]))
#print("code {0:05b}".format(code))
#print("real {0:05b}".format(0b11101))
if code not in self.code2fmt:
__log__.debug("{0:05b}".format(code))
raise ValueError("Invalid ISO 8601 time representation.")
fmt = self.code2fmt[code]
ret = fmt.format(self, fraction)
if fraction and self.microsecond > 0:
while ret.endswith('0'):
ret = ret[:-1]
if timezone and self.tzinfo:
td = self.tzinfo.utcoffset(None)
tzhour, tzminute = divmod(td.seconds // 60, 60)
if tzhour > 12:
tzhour -= 24
if tzhour == 0 and tzminute == 0:
tzone = "Z"
elif basic: #code & 0b00001:
tzone = "{0:+03d}{1:02d}".format(tzhour, tzminute)
else:
tzone = "{0:+03d}:{1:02d}".format(tzhour, tzminute)
ret = ret + tzone
return ret
def strftime(self, format):
tt = (1900, 1, 1, self.hour, self.minute, self.second, 0, 1, -1)
return time.strftime(format, tt)
def utcoffset(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
self.__check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
self.__check_tzname(name)
return name
def dst(self):
if self.__tzinfo is None:
return None
offset = self.__tzinfo.dst(None)
self.__check_utc_offset("dst", offset)
return offset
def __check_utc_offset(self, name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if offset % timedelta(minutes=1) or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes, got %s" % (name, offset))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be must be strictly between"
" -timedelta(hours=24) and timedelta(hours=24)"
% (name, offset))
def __check_tzname(self, name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
isotime.min = isotime(0, 0, 0)
isotime.max = isotime(23, 59, 59, 999999)
isotime.resolution = datetime.timedelta(microseconds=1)
|
""" Main script to run ETL functions. """
import scripts.etl.extract as extract
import scripts.etl.transform as transform
if __name__ == "__main__":
extract.load_kaggle_envars()
extract.download_dataset_from_kaggle_as_zip()
extract.unzip_dataset()
transform.remove_non_ascii_from_datasets()
transform.rename_files()
dataframes = transform.load_datasets(drop_na=True)
transform.remove_duplicates_from_geolocation(dataframes)
transform.remove_missing_geolocation_primary_keys(dataframes)
transform.remove_missing_customers_primary_keys(dataframes)
transform.remove_missing_orders_primary_keys(dataframes)
transform.remove_missing_sellers_primary_keys(dataframes)
transform.remove_missing_products_primary_keys(dataframes)
transform.replace_products_product_category_name(dataframes)
|
import urllib2
from BeautifulSoup import BeautifulSoup
from xml.dom.minidom import getDOMImplementation
times_url = 'http://times.kaist.ac.kr/news/articleList.html?sc_section_code=S1N1'
impl = getDOMImplementation()
newdoc = impl.createDocument(None, "articles", None)
top_element = newdoc.documentElement
print 'Looking up', times_url
print
f = urllib2.urlopen(times_url)
html_data = unicode(f.read(), 'euc-kr')
soup = BeautifulSoup(html_data)
elems = soup.findAll('td',width="380",height="26",align="left")
for elem in elems:
category = elem.findAll('span')[0].font.string
title = elem.findAll('span')[1].a.font.string
written_by = elem.parent.find('td', width="100", align="center").span.font.string
date = elem.parent.find('td', width="80", align="center").span.font.string
print category, title, written_by, date
article = newdoc.createElement('article')
top_element.appendChild(article)
category_elem = newdoc.createElement('category')
article.appendChild(category_elem)
category_text = newdoc.createTextNode(category)
category_elem.appendChild(category_text)
title_elem = newdoc.createElement('title')
article.appendChild(title_elem)
title_text = newdoc.createTextNode(title)
title_elem.appendChild(title_text)
writtenby_elem = newdoc.createElement('writtenby')
article.appendChild(writtenby_elem)
writtenby_text = newdoc.createTextNode(written_by)
writtenby_elem.appendChild(writtenby_text)
date_elem = newdoc.createElement('date')
article.appendChild(date_elem)
date_text = newdoc.createTextNode(date)
date_elem.appendChild(date_text)
xml_f_name = 'timeswrapper.xml'
xml_f = open(xml_f_name, 'w')
xml_f.write(newdoc.toprettyxml(indent=" ").encode('utf-8'))
xml_f.close()
print 'Produced an XML file named representing the results', xml_f_name
|
import urllib2, socket, json, subprocess, time
DEBUG=True
HOST=None
IP=None
from secrets import MAYA_NS
SERVER, DEFAULT_DOMAIN, KEY = MAYA_NS
IP_SERVERS=(('http://aislynn.net/ip.cgi', 'ip'), ('http://jsonip.com','ip'),('http://ip-api.com/json','query'),('http://api.ipify.org/?format=json','ip'),('http://wtfismyip.com/json',"YourFuckingIPAddress"))
def nslookup(host,server):
p= subprocess.Popen(["nslookup",host,server], close_fds=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
result = p.communicate()[0]
result = result.split("Address:")
h = result[-2].strip().split()[-1]
i = result[-1].strip()
if h == host: return i
return None
def updateNS(server, host, current_ip = None, secret=KEY):
result = None
httpResponse = None
if current_ip is None:
for retries in xrange(4,0,-1):
for url,attr in IP_SERVERS:
try:
if DEBUG: print "TRY", url
httpResponse = urllib2.urlopen(urllib2.Request(url), timeout=5).read()
break
except:
if DEBUG: print "RETRY", url
time.sleep(0.50)
continue
if httpResponse is not None:
break
if httpResponse is None:
raise Exception("could not find IP address from any server")
jsonData = json.loads(httpResponse)
current_ip = jsonData[attr]
try:
named_ip = nslookup(host,server)
if DEBUG: print "NSLOOKUP",named_ip
except:
named_ip = None
if named_ip is None:
# this is not as reliable due to caching on many systems
try:
named_ip = socket.gethostbyname(host)
if DEBUG: print "GETHOSTNAME",named_ip
except:
named_ip = None
if named_ip != current_ip:
zone = host.split('.',1)[1]
print "Updating",host,current_ip,named_ip
op = "server %s\nzone %s\nkey %s %s\nupdate delete %s\nupdate add %s 86400 A %s\nsend\n\n" %(server,zone,zone,secret,host,host,current_ip)
p= subprocess.Popen(["nsupdate"], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.stdin.write(op)
result = p.communicate()[0]
if DEBUG and result != '': print "FAIL?=",result
elif DEBUG:
print "No update necessary"
return result
if __name__ == "__main__":
import sys
DEBUG = "-debug" in sys.argv
argv = [a for a in sys.argv[1:] if not a.startswith("-")]
for a in argv:
if a[0] in "0123456789":
IP = a
else:
HOST=a
if HOST is None:
HOST=socket.gethostname().split('.')[0] + "." + DEFAULT_DOMAIN
if DEBUG: print "DEFAULT HOST:", HOST
updateNS(SERVER,HOST,IP)
|
import torch, torchvision
from detectron2.utils.logger import setup_logger
setup_logger()
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
import os, cv2
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import sys
def get_masked_imgs(img, masks=None, boxes=None, box_scale=-1):
imtype = img.dtype
height = img.shape[0]
width = img.shape[1]
# pixel masks
pms = []
if masks is not None:
for mask in masks:
pm = img.copy()
pm[mask == 0] = 0
pms.append(pm)
# bounding box masks
bbms = []
if boxes is not None:
for box in boxes:
# x_bl, y_bl, x_tr, y_tr = box
x_1, y_1, x_2, y_2 = box
x_bl, y_bl, x_tr, y_tr = x_1, height-y_2, x_2, height-y_1
# scale box
if box_scale > 0:
box_width, box_height = x_tr-x_bl, y_tr-y_bl
width_delta, height_delta = (box_scale-1)*box_width/2, (box_scale-1)*box_height/2
x_bl, y_bl, x_tr, y_tr = x_bl-width_delta, y_bl-height_delta, x_tr+width_delta, y_tr+height_delta
# clip values
[x_bl, x_tr] = np.clip([x_bl, x_tr], 0, width-1)
[y_bl, y_tr] = np.clip([y_bl, y_tr], 0, height-1)
# create bounding box
bbm = np.zeros(img.shape, dtype=imtype)
bbm[int(height-y_tr):int(height-y_bl), int(x_bl):int(x_tr), :] = img[int(height-y_tr):int(height-y_bl), int(x_bl):int(x_tr), :]
bbms.append(bbm)
return pms, bbms
class Detectron2:
def __init__(self, model='mask-rcnn', model_yaml=None):
if not isinstance(model_yaml, str):
if isinstance(model, str) and (model.lower() == 'mask-rcnn'):
model_yaml = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
print("[Detectron2] Loading model: MASK R-CNN (R-50+FPN+3x)")
elif isinstance(model, str) and (model.lower() == 'faster-rcnn'):
model_yaml = "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
print("[Detectron2] Loading model: FASTER R-CNN (R-50+FPN+3x)")
else:
print("[Detectron2] Invalid model choice!")
exit(0)
else:
print(f"[Detectron2] Loading model: {model_yaml}")
cfg = get_cfg()
if torch.cuda.is_available():
cfg.MODEL.DEVICE = 'cuda'
print("[Detectron2] Use GPU")
else:
cfg.MODEL.DEVICE = 'cpu'
print("[Detectron2] Use CPU")
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
cfg.merge_from_file(model_zoo.get_config_file(model_yaml))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_yaml)
self.predictor = DefaultPredictor(cfg)
print("[Detectron2] Model loaded!")
self.img_root = None
self.img_dict = dict() # {name: tag}
self.model_yaml = model_yaml
def load_images(self, img_root):
self.img_root = img_root
# read images
if os.path.isdir(img_root):
img_names = sorted(os.listdir(img_root))
if '.DS_Store' in img_names:
img_names.remove('.DS_Store')
cnt = 0
for img_name in tqdm(img_names, desc='loading images'):
img_path = img_root + '/' + img_name
if os.path.isfile(img_path):
cnt += 1
sep = img_name.find('.', -5)
self.img_dict[img_name[:sep]] = img_name[sep+1:]
print(f"[Detectron2.load_images] {cnt} images are loaded.")
else:
print(f"[Detectron2.load_images] {img_root} is not a folder!")
exit(0)
def instance_segmentation(self, save_root, box_summary_dir=None, pixel_summary_dir=None, box_scale=-1, item_id=11):
# check save root
if not os.path.isdir(save_root):
os.mkdir(save_root)
if pixel_summary_dir is not None:
try:
f = open(f'{pixel_summary_dir}', 'w')
f.close()
except:
print(f"[Detectron2.instance_segmentation] pixel_summary_dir '{pixel_summary_dir}' is invalid!")
pixel_summary_dir = None
if box_summary_dir is not None:
try:
f = open(f'{box_summary_dir}', 'w')
f.close()
except:
print(f"[Detectron2.instance_segmentation] box_summary_dir '{box_summary_dir}' is invalid!")
box_summary_dir = None
# predict
num = 0
num_box = 0
num_pixel = 0
for img_name, tag in tqdm(self.img_dict.items(), desc='instance segmentation'):
im = cv2.imread(f'{self.img_root}/{img_name}.{tag}') # BGR
outputs = self.predictor(im)
idx = np.intersect1d(np.where(outputs["instances"].to("cpu").pred_classes.numpy()==item_id), np.where(outputs["instances"].to("cpu").scores.numpy()>0.95))
if len(idx) == 0: # no items detected
tqdm.write(f"[Detectron2.instance_segmentation] {img_name}.{tag} not detected!")
continue
box = outputs["instances"].to("cpu").pred_boxes.tensor.numpy()[idx, :]
mask = outputs["instances"].to("cpu").pred_masks.numpy()[idx, :, :]
pms, bbms = get_masked_imgs(im[:, :, ::-1], list(mask), list(box), box_scale)
if pixel_summary_dir is not None:
for idx, pm in enumerate(pms):
# plt.imsave(f'{save_root}/{img_name}-{idx}pixel.{tag}', pm)
cv2.imwrite(f'{save_root}/{img_name}-{idx}pixel.{tag}', pm[:, :, ::-1])
with open(f'{pixel_summary_dir}', 'a') as f:
f.write(f'{img_name}-{idx}\n')
if box_summary_dir is not None:
for idx, bbm in enumerate(bbms):
# plt.imsave(f'{save_root}/{img_name}-{idx}box.{tag}', bbm)
cv2.imwrite(f'{save_root}/{img_name}-{idx}box.{tag}', bbm[:, :, ::-1])
with open(f'{box_summary_dir}', 'a') as f:
f.write(f'{img_name}-{idx}\n')
# count
num += 1
num_box += len(bbms)
num_pixel += len(pms)
print(f"[Detectron2.instance_segmentation] {num} images | {num_box} bounding boxes | {num_pixel} pixel-wised masks")
def object_detection(self, save_root, box_summary_dir=None, box_scale=-1, item_id=11):
# check save root
if not os.path.isdir(save_root):
os.mkdir(save_root)
if box_summary_dir is not None:
try:
f = open(f'{box_summary_dir}', 'w')
f.close()
except:
print(f"[Detectron2.object_detection] box_summary_dir '{box_summary_dir}' is invalid!")
box_summary_dir = None
# predict
num = 0
num_box = 0
for img_name, tag in tqdm(self.img_dict.items(), desc='object detection'):
im = cv2.imread(f'{self.img_root}/{img_name}.{tag}') # BGR
outputs = self.predictor(im)
idx = np.intersect1d(np.where(outputs["instances"].to("cpu").pred_classes.numpy()==item_id), np.where(outputs["instances"].to("cpu").scores.numpy()>0.95))
if len(idx) == 0: # no items detected
tqdm.write(f"[Detectron2.object_detection] {img_name}.{tag} not detected!")
continue
box = outputs["instances"].to("cpu").pred_boxes.tensor.numpy()[idx, :]
pms, bbms = get_masked_imgs(im[:, :, ::-1], list(), list(box), box_scale)
if box_summary_dir is not None:
for idx, bbm in enumerate(bbms):
# plt.imsave(f'{save_root}/{img_name}-{idx}box.{tag}', bbm)
cv2.imwrite(f'{save_root}/{img_name}-{idx}box.{tag}', bbm[:, :, ::-1])
with open(f'{box_summary_dir}', 'a') as f:
f.write(f'{img_name}-{idx}\n')
# count
num += 1
num_box += len(bbms)
print(f"[Detectron2.object_detection] {num} images | {num_box} bounding boxes")
def detect_stop_signs(self, img_root, save_root, box_summary_dir=None, pixel_summary_dir=None, box_scale=-1):
# load images
self.load_images(img_root)
# detect
if self.model_yaml.find('InstanceSegmentation') != -1:
self.instance_segmentation(save_root, box_summary_dir, pixel_summary_dir, box_scale)
elif self.model_yaml.find('Detection') != -1:
self.object_detection(save_root, box_summary_dir, box_scale)
else:
raise NotImplementedError(self.model_yaml)
if __name__ == '__main__':
img_root = sys.argv[1]
save_root = sys.argv[2]
box_summary_dir = sys.argv[3]
pixel_summary_dir = None
detector = Detectron2(model=sys.argv[4])
box_scale = float(sys.argv[5])
detector.detect_stop_signs(img_root, save_root, box_summary_dir, pixel_summary_dir, box_scale)
|
from django.forms import ModelForm
from crispy_forms.bootstrap import FormActions, AppendedText
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Field, Submit
from . import models
class FundingPromiseForm(ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.html5_required = True
self.helper.layout = Layout(
Field('project', type='hidden'),
'name',
'email',
AppendedText('amount', 'CHF'),
AppendedText(
'expiry_date',
'<span class="glyphicon glyphicon-calendar"></span>',
template='crowdfund/datepickerfield.html',
),
FormActions(
Submit('submit', 'Angebot übermitteln')
)
)
class Meta:
model = models.FundingPromise
fields = ('project', 'name', 'email', 'amount', 'expiry_date')
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pandas as pd
print("For data integration:")
print("Be sure to put in the same folder of this script GoldMatchesDataset.csv and ChallengerMatchesDataset")
df1= pd.read_csv('GoldMatchesDataset.csv')
#Just to be scure
df1.drop_duplicates(subset=['gameId'])
df2= pd.read_csv('ChallengerMatchesDataset.csv')
df2.drop_duplicates(subset=['gameId'])
dfTot=pd.concat([df1, df2])
# #Shuffle the dataset
dfTot=dfTot.sample(frac=1)
#Remove some instances to have a balance dataset
win=dfTot[dfTot["BLUE_WIN"]==0]["BLUE_WIN"].count()
lose=dfTot[dfTot["BLUE_WIN"]==1]["BLUE_WIN"].count()
index = dfTot.index
if (win>lose):
condition = (dfTot["BLUE_WIN"] == 0)
diff=win-lose
else:
condition = (dfTot["BLUE_WIN"] == 1)
diff=lose-win
sel_indices = index[condition]
sel_indices_list = sel_indices.tolist()
rm_idx=sel_indices_list[-(diff+1):-1]
update_df = dfTot.drop(rm_idx)
print(update_df[update_df["BLUE_WIN"]==0]["BLUE_WIN"].count())
print(update_df[update_df["BLUE_WIN"]==1]["BLUE_WIN"].count())
update_df.to_csv('RawDataset.csv',index=False)
# In[54]:
dfTot["gameId"==4767097992]
# In[ ]:
# In[ ]:
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
guaDictateTool
An dictate tool for LTH.
Author: Yiqin Xiong
Create: August 2021
"""
import os
import random
import sys
import time
from shutil import copyfile
from PyQt5.QtGui import QIcon, QCursor, QKeySequence
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QFileDialog, QHeaderView, QTableWidgetItem, \
QAbstractItemView, QMenu, QUndoStack, QUndoCommand, QItemDelegate
from guaWindow import Ui_MainWindow
from PyQt5.QtCore import Qt, pyqtSlot, QTimer
import sqlite3
class MWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super(MWindow, self).__init__()
# about UI
self.setupUi(self)
self.setWindowIcon(QIcon(':/icon/icon.png'))
self.setWindowTitle('LTH的单词听写机')
self.tabWidget.setCurrentIndex(0)
self.save_box = QMessageBox(QMessageBox.Warning, '错误,找不到存档', '找不到本地存档,请选择:')
self.import_box = QMessageBox(QMessageBox.Information, '选择导入方式', '你想以哪种方式导入,请选择:')
self.import_from_excel = self.import_box.addButton('从Excel导入', QMessageBox.ActionRole)
self.import_from_db = self.import_box.addButton('从.db文件导入', QMessageBox.ActionRole)
self.import_from_new = self.import_box.addButton('新建存档', QMessageBox.ActionRole)
self.import_cancel = self.import_box.addButton('取消', QMessageBox.RejectRole)
self.import_box.setDefaultButton(self.import_cancel)
self.tableWidget_add_word.setContextMenuPolicy(Qt.CustomContextMenu)
self.tableWidget_notebook.setContextMenuPolicy(Qt.CustomContextMenu)
self.pushButton_search_word.setShortcut(Qt.Key_Return)
self.pushButton_add_word_save.setShortcut(QKeySequence.Save)
self.pushButton_get_answer.setShortcut(QKeySequence.Cut)
self.pushButton_add_to_notebook.setShortcut(QKeySequence.SelectAll)
self.pushButton_next_word.setShortcut(QKeySequence.New)
self.pushButton_undo.setShortcut(QKeySequence.Undo)
self.pushButton_redo.setShortcut(QKeySequence.Redo)
# self.pushButton_notebook_undo.setShortcut(QKeySequence.Undo)
# self.pushButton_notebook_redo.setShortcut(QKeySequence.Redo)
self.tableWidget_add_word.horizontalHeader().setVisible(True)
self.tableWidget_add_word.verticalHeader().setVisible(True)
self.timer = QTimer()
self.label_word.setDisabled(True)
self.label_attr.setDisabled(True)
self.label_chinese.setDisabled(True)
self.pushButton_get_answer.setDisabled(True)
self.pushButton_add_to_notebook.setDisabled(True)
self.pushButton_next_word.setDisabled(True)
self.progressBar_finish.setValue(0)
self.lineEdit_input_attr.setDisabled(True)
self.lineEdit_input_chinese.setDisabled(True)
for i in range(1, 6):
self.tableWidget_notebook.setItemDelegateForColumn(i, EmptyDelegate(self))
# private variables
self.db = 'guaDictateTool_save.db'
if os.name == 'nt':
self.db = os.path.expanduser(os.path.join('~\\Documents', self.db))
elif os.uname()[0] == 'Darwin':
self.db = os.path.expanduser(
os.path.join('~/Library/Mobile Documents/com~apple~CloudDocs/', self.db))
self.undo_stack = QUndoStack()
self.undo_stack_notebook = QUndoStack()
self.previous_cell_text = None
self.previous_cell_text_notebook = None
self.dict_time = 30 # 默认每个单词思考30秒
self.choices = None
self.cur_dict_idx = 0
self.in_dictating = False
# init actions
self.undo_action = self.undo_stack.createUndoAction(self, '撤销')
self.redo_action = self.undo_stack.createRedoAction(self, '重做')
# self.undo_action_notebook = self.undo_stack_notebook.createUndoAction(self, '撤销')
# self.redo_action_notebook = self.undo_stack_notebook.createRedoAction(self, '重做')
self.addAction(self.undo_action)
self.addAction(self.redo_action)
# self.addAction(self.undo_action_notebook)
# self.addAction(self.redo_action_notebook)
# connect SIGNALS and SLOTS
self.tableWidget_add_word.customContextMenuRequested.connect(self.tableWidget_add_word_showMenu)
self.tableWidget_notebook.customContextMenuRequested.connect(self.tableWidget_notebook_showMenu)
self.tabWidget.currentChanged.connect(self.tabWidget_currentChanged)
self.pushButton_add.clicked.connect(self.tableWidget_add_word_insert_behind)
self.pushButton_remove.clicked.connect(self.tableWidget_add_word_delete_selected)
self.pushButton_add_word_save.clicked.connect(self.pushButton_add_word_save_clicked)
self.pushButton_undo.clicked.connect(self.undo_action.trigger)
self.pushButton_redo.clicked.connect(self.redo_action.trigger)
# self.pushButton_notebook_undo.clicked.connect(self.undo_action_notebook.trigger)
# self.pushButton_notebook_redo.clicked.connect(self.redo_action_notebook.trigger)
# self.tableWidget_add_word.currentItemChanged.connect(
# self.tableWidget_add_word_currentItemChanged)
# self.tableWidget_add_word.dataChanged.connect(self.tableWidget_add_word_currentItemChanged)
# self.tableWidget_add_word.clicked.connect(self.tableWidget_add_word_clicked)
# self.tableWidget_add_word.itemDoubleClicked.connect(self.tableWidget_add_word_itemDoubleClicked)
self.tableWidget_add_word.cellDoubleClicked.connect(self.tableWidget_add_word_cellDoubleClicked)
self.tableWidget_add_word.itemChanged.connect(self.tableWidget_add_word_itemChanged)
self.pushButton_search_word.clicked.connect(self.pushButton_search_word_clicked)
self.comboBox_year.activated.connect(self.pushButton_search_word_clicked)
self.comboBox_lesson.activated.connect(self.pushButton_search_word_clicked)
self.pushButton_history.clicked.connect(self.pushButton_history_clicked)
self.listWidget_history.itemClicked.connect(self.listWidget_history_itemClicked)
self.pushButton_range_start1.clicked.connect(self.pushButton_range_start1_clicked)
self.pushButton_range_start2.clicked.connect(self.pushButton_range_start2_clicked)
self.timer.timeout.connect(self.timer_timeout)
self.pushButton_get_answer.clicked.connect(self.pushButton_get_answer_clicked)
self.pushButton_add_to_notebook.clicked.connect(self.pushButton_add_to_notebook_clicked)
self.pushButton_next_word.clicked.connect(self.pushButton_next_word_clicked)
self.tableWidget_notebook.cellDoubleClicked.connect(self.tableWidget_notebook_cellDoubleClicked)
self.tableWidget_notebook.itemChanged.connect(self.tableWidget_notebook_itemChanged)
self.pushButton_import.clicked.connect(self.import_excel)
self.pushButton_export.clicked.connect(self.export_excel)
# actions after init
self._check_db_exist()
self._flush_tab_1()
self._flush_tab_2()
self._flush_tab_3()
self._flush_tab_4()
# 检查存档是否存在
def _check_db_exist(self):
if not os.path.exists(self.db):
from_excel = self.save_box.addButton('从Excel导入', QMessageBox.ActionRole)
from_db = self.save_box.addButton('从.db文件导入', QMessageBox.ActionRole)
from_new = self.save_box.addButton('新建存档', QMessageBox.ActionRole)
cancel = self.save_box.addButton('退出', QMessageBox.RejectRole)
self.save_box.setDefaultButton(cancel)
self.save_box.exec_()
if self.save_box.clickedButton() == cancel:
sys.exit(0)
elif self.save_box.clickedButton() == from_db:
file_name = QFileDialog.getOpenFileName(self, '选取词典数据库文件', '', 'SQLite Database(*.db)')
if file_name[0]:
copyfile(file_name[0], self.db)
else:
create_new_db(self.db)
if self.save_box.clickedButton() == from_excel:
file_name = QFileDialog.getOpenFileName(self, '选取Excel文件', '', 'Excel(*.xls *.xlsx)')
if file_name[0]:
self._import_excel_to_sqlite(file_name[0], self.db)
# 从excel读取内容到sqlite
def _import_excel_to_sqlite(self, excel_file_path, db_name):
# 读取excel内容
import xlrd
data = xlrd.open_workbook(excel_file_path)
dict_sheet = data.sheet_by_index(0)
if dict_sheet.ncols != 5:
QMessageBox.warning(self, 'excel解析错误', '此excel格式不符,请检查')
return
dict_data = [tuple(dict_sheet.row_values(row_idx)) for row_idx in range(1, dict_sheet.nrows)]
if len(data.sheets()) == 2:
notebook_sheet = data.sheet_by_index(1)
notebook_data = [tuple(notebook_sheet.row_values(row_idx)) for row_idx in range(1, notebook_sheet.nrows)]
# 写入数据库
conn = self._connect_to_db(db_name)
cur = conn.cursor()
try:
cur.executemany("REPLACE INTO dict VALUES (?,?,?,?,?)", dict_data)
if len(data.sheets()) == 2:
cur.executemany("REPLACE INTO notebook VALUES (?,?)", notebook_data)
conn.commit()
except Exception as e:
print(f'_import_excel_to_sqlite: {e}')
conn.rollback()
finally:
cur.close()
conn.close()
# 对sqlite进行[查]操作
def _get_sql_data(self, sql_query):
conn = self._connect_to_db(self.db)
cur = conn.cursor()
try:
cur.execute(sql_query)
data = cur.fetchall()
except Exception as e:
print(f'_get_sql_data: {e}')
data = []
finally:
cur.close()
conn.close()
return data
# 对sqlite进行[增删改]操作
def _change_sql_data(self, sql_query):
conn = self._connect_to_db(self.db)
cur = conn.cursor()
try:
cur.execute(sql_query)
conn.commit()
except Exception as e:
print(f'_change_sql_data: {e}')
conn.rollback()
finally:
cur.close()
conn.close()
# 连接到sqlite,返回conn
def _connect_to_db(self, db_name):
if self.db == "":
QMessageBox.warning(self, '打开词典数据库失败', '数据库文件路径为空')
return None
# 指定SQLite数据库的文件名
conn = sqlite3.connect(db_name)
return conn
# 连接到sqlite,执行search的查询任务
def _search_by_condition(self, year, text, keyword):
# 清除原有表格内容
self.tableWidget_search_word.clearContents()
# 构造SQL语句
if year == '不限' and text == '不限':
query = f"SELECT * FROM dict WHERE word LIKE '%{keyword}%' " \
f"ORDER BY year DESC, text, word"
elif year == '不限':
query = f"SELECT * FROM dict WHERE text = '{text}' AND word LIKE '%{keyword}%' " \
f"ORDER BY year DESC, text, word"
elif text == '不限':
query = f"SELECT * FROM dict WHERE year = '{year}' AND word LIKE '%{keyword}%' " \
f"ORDER BY year DESC, text, word"
else:
query = f"SELECT * FROM dict WHERE year = '{year}' AND text = '{text}' AND word LIKE '%{keyword}%' " \
f"ORDER BY year DESC, text, word"
data = self._get_sql_data(query)
# 设置表格内容
set_data_to_tableWidget(self.tableWidget_search_word, data)
# 重置听写界面ui
def _reset_dict_ui(self):
self.label_word.setText('单词在这里')
self.label_word.setDisabled(True)
self.label_attr.setText('词性在这里')
self.label_attr.setDisabled(True)
self.label_chinese.setText('中文在这里')
self.label_chinese.setDisabled(True)
self.label_finish.setText(f'完成{self.cur_dict_idx} / {self.progressBar_finish.maximum()}')
self.progressBar_finish.setValue(self.cur_dict_idx)
self.lcdNumber_timer.setStyleSheet("")
self._reset_dict_time()
self.lcdNumber_timer.display(self.dict_time)
self.lineEdit_input_attr.clear()
self.lineEdit_input_chinese.clear()
self.pushButton_get_answer.setDisabled(False)
self.pushButton_add_to_notebook.setDisabled(False)
# 听写界面切换单词
def _show_word(self):
# ui相关
self._reset_dict_ui()
# 内容相关
self.label_word.setDisabled(False)
self.label_word.setText(self.choices[self.cur_dict_idx][0])
self.timer.start(1000)
# 重置听写倒计时
def _reset_dict_time(self):
self.dict_time = 30
# 开始听写
def _start_dict(self, data):
num = self.spinBox_num.value()
# 数据相关
weight = [int(((d[3] + 1) ** 0.5) * 10) for d in data] # 神奇的开根号除以10算法
self.choices = random.choices(data, weight, k=num)
# ui相关
self.progressBar_finish.setMaximum(num)
self.label_word.setDisabled(False)
self.pushButton_range_start1.setDisabled(True)
self.pushButton_range_start2.setDisabled(True)
self.pushButton_get_answer.setDisabled(False)
self.pushButton_add_to_notebook.setDisabled(False)
self.pushButton_next_word.setDisabled(False)
self.spinBox_num.setDisabled(True)
self.spinBox_year.setDisabled(True)
self.spinBox_text_from.setDisabled(True)
self.spinBox_text_to.setDisabled(True)
self.lineEdit_input_attr.setDisabled(False)
self.lineEdit_input_chinese.setDisabled(False)
# 开始听写
self.in_dictating = True
self.cur_dict_idx = -1
try:
self.pushButton_next_word_clicked()
except Exception as e:
print(f'_start_dict:{e}')
# 刷新加新词页面
def _flush_tab_1(self):
# tableWidget相关
self.tableWidget_add_word.clearContents()
data = self._get_sql_data("SELECT * FROM dict ORDER BY year DESC, text, word")
set_data_to_tableWidget(self.tableWidget_add_word, data)
# 刷新查词页面
def _flush_tab_2(self):
# comboBox相关
self.comboBox_year.clear()
self.comboBox_lesson.clear()
self.comboBox_year.addItem('不限')
self.comboBox_lesson.addItem('不限')
# self.comboBox_year.setCurrentIndex()
conn = self._connect_to_db(self.db)
cur = conn.cursor()
try:
# 查询所有不重复的year(可能为空)
cur.execute("select distinct year from dict order by year desc")
years = [str(year[0]) for year in cur.fetchall()]
# 查询所有不重复的text(可能为空)
cur.execute("select distinct text from dict order by text")
texts = [str(text[0]) for text in cur.fetchall()]
except Exception as e:
print(f'_flush_tab_2: {e}')
years = texts = []
finally:
cur.close()
conn.close()
# print(years, texts)
self.comboBox_year.addItems(years)
self.comboBox_lesson.addItems(texts)
# tableWidget相关
self.tableWidget_search_word.clearContents()
data = self._get_sql_data("SELECT * FROM dict ORDER BY year DESC, text, word")
set_data_to_tableWidget(self.tableWidget_search_word, data)
# 刷新听写页面
def _flush_tab_3(self):
pass
# 刷新复习单词本页面
def _flush_tab_4(self):
# undoStack相关
self.previous_cell_text_notebook = None
self.undo_stack_notebook.clear()
# tableWidget相关
self.tableWidget_notebook.clearContents()
data = self._get_sql_data("SELECT count,dict.word,attr,chinese,year,text "
"FROM dict JOIN notebook n ON dict.word = n.word "
"WHERE count > 0 "
"ORDER BY n.count DESC, dict.word")
set_data_to_tableWidget(self.tableWidget_notebook, data)
################## 槽函数(SLOT) #################
# 切换页面时触发
def tabWidget_currentChanged(self):
sender = self.sender()
idx = sender.currentIndex()
if idx == 0:
# 加新词页面
# print('切换到加新词页面')
if self.in_dictating:
self.timer.stop()
elif idx == 1:
# 查单词页面
# print('切换到查单词页面')
if self.in_dictating:
self.timer.stop()
elif idx == 2:
# 听写页面
# print('切换到听写页面')
if self.in_dictating:
self.timer.start()
elif idx == 3:
# 复习单词本页面
# print('切换到复习单词本页面')
if self.in_dictating:
self.timer.stop()
else:
pass
# 在tableWidget_add_word上单击右键时触发右键菜单
def tableWidget_add_word_showMenu(self, pos):
pop_menu = QMenu(self.tableWidget_add_word)
insert_action = pop_menu.addAction('添加一行')
delete_action = pop_menu.addAction('删除选中的行')
add_to_notebook = pop_menu.addAction('添加到单词复习本')
insert_action.triggered.connect(lambda: self.tableWidget_add_word_insert(pos))
delete_action.triggered.connect(self.tableWidget_add_word_delete_selected)
add_to_notebook.triggered.connect(lambda: self.tableWidget_add_word_add_to_notebook(pos))
pop_menu.exec_(QCursor.pos())
# 在tableWidget_notebook上单击右键时触发右键菜单
def tableWidget_notebook_showMenu(self, pos):
pop_menu = QMenu(self.tableWidget_notebook)
delete_action = pop_menu.addAction('删除选中的行')
delete_action.triggered.connect(self.tableWidget_notebook_delete_selected)
pop_menu.exec_(QCursor.pos())
# def tableWidget_add_word_delete(self, pos):
# row_id = self.tableWidget_add_word.rowAt(pos.y())
# self.tableWidget_add_word.removeRow(row_id)
# 在tableWidget_add_word中删除选中行
def tableWidget_add_word_delete_selected(self):
rows = self.tableWidget_add_word.selectionModel().selectedRows()
if len(rows) == 0:
return
row_ids = [r.row() for r in rows] # 获得需要删除的行号的list
row_ids.sort(key=int, reverse=True) # 用sort方法将list进行降序排列
delete_selection = DeleteSelectedCommand(self.tableWidget_add_word, row_ids)
self.undo_stack.push(delete_selection)
# 在tableWidget_notebook中删除选中行
def tableWidget_notebook_delete_selected(self):
rows = self.tableWidget_notebook.selectionModel().selectedRows()
if len(rows) == 0:
return
row_ids = [r.row() for r in rows] # 获得需要删除的行号的list
row_ids.sort(key=int, reverse=True) # 用sort方法将list进行降序排列
for r in row_ids:
self._change_sql_data(f"delete from notebook where word = '{self.tableWidget_notebook.item(r, 1).text()}'")
self.tableWidget_notebook.removeRow(r)
self._flush_tab_4()
# 在tableWidget_add_word中鼠标右键位置插入
def tableWidget_add_word_insert(self, pos):
row_id = self.tableWidget_add_word.rowAt(pos.y())
insert = InsertCommand(self.tableWidget_add_word, row_id + 1)
self.undo_stack.push(insert)
# 在tableWidget_add_word中最末尾插入
def tableWidget_add_word_insert_behind(self):
insert = InsertCommand(self.tableWidget_add_word, self.tableWidget_add_word.rowCount())
self.undo_stack.push(insert)
# 在tableWidget_add_word中鼠标右键添加到单词复习本
def tableWidget_add_word_add_to_notebook(self, pos):
row_id = self.tableWidget_add_word.rowAt(pos.y())
word = self.tableWidget_add_word.item(row_id, 2).text()
data = self._get_sql_data(f"select count from notebook where word = '{word}'")
if len(data) > 0 and data[0][0] > 0:
pass
else:
self._change_sql_data(f"replace into notebook(word,count) values ('{word}',1)")
self._flush_tab_4()
# 点击加新词页面的”SAVE“按钮后触发
def pushButton_add_word_save_clicked(self):
row_count = self.tableWidget_add_word.rowCount()
col_count = self.tableWidget_add_word.columnCount()
data = get_data_from_tableWidget(self.tableWidget_add_word, list(range(row_count)), list(range(col_count)))
# 写入数据库
conn = self._connect_to_db(self.db)
cur = conn.cursor()
try:
# # 备份notebook的数据
# cur.execute("CREATE TABLE notebook_bak AS SELECT * from notebook")
# # 删表
# cur.execute("TRUNCATE TABLE notebook")
# cur.execute("TRUNCATE TABLE dict")
# # 重建dict表
# cur.executemany("INSERT INTO dict VALUES (?,?,?,?,?)", data)
# # 重建notebook表,可能会有外键约束错误,忽略掉错误的行
# cur.execute("INSERT INTO notebook SELECT * FROM notebook_bak")
# # 删除notebook_bak表
# cur.execute("DROP TABLE notebook_bak")
# 删表
cur.execute("DELETE FROM dict")
# 重建dict表
cur.executemany("INSERT INTO dict VALUES (?,?,?,?,?)", data)
conn.commit()
except Exception as e:
if 'UNIQUE constraint failed' in str(e):
QMessageBox.warning(self, '保存失败', '不允许有相同的单词出现噢,请检查一下')
else:
QMessageBox.warning(self, '保存失败', f'SQL错误信息:{e}')
conn.rollback()
finally:
# 关闭连接
cur.close()
conn.close()
self.setWindowTitle('LTH的单词听写机')
self._flush_tab_2()
self._flush_tab_4()
# 加新词页面表格的内容修改后触发
def tableWidget_add_word_itemChanged(self):
# print(f'tableWidget_add_word_itemChanged: {self.previous_cell_text}')
if self.previous_cell_text is None:
return
row = self.previous_cell_text[0]
col = self.previous_cell_text[1]
text = self.previous_cell_text[2]
cur_text = self.tableWidget_add_word.item(row, col).text()
if cur_text != text:
change_item = ChangeItemCommand(self.tableWidget_add_word, row, col, text, cur_text)
self.undo_stack.push(change_item)
self.previous_cell_text = None
# 双击加新词页面表格的单元格时触发
def tableWidget_add_word_cellDoubleClicked(self, row, col):
item = self.tableWidget_add_word.item(row, col)
# print(f'tableWidget_add_word_cellDoubleClicked: row {row}, col {col}, item {item}')
# self.tableWidget_add_word.cellActivated()
text = item.text() if item is not None else ''
self.previous_cell_text = (row, col, text)
# 复习单词本页面表格的内容修改后触发
def tableWidget_notebook_itemChanged(self):
# print(f'tableWidget_notebook_itemChanged: {self.previous_cell_text_notebook}')
if self.previous_cell_text_notebook is None:
return
row = self.previous_cell_text_notebook[0]
col = self.previous_cell_text_notebook[1]
text = self.previous_cell_text_notebook[2]
cur_text = self.tableWidget_notebook.item(row, col).text()
if cur_text != text:
self._change_sql_data(
f"update notebook set count='{cur_text}' where word='{self.tableWidget_notebook.item(row, 1).text()}'")
self._flush_tab_4()
self.previous_cell_text_notebook = None
# 双击单词复习本页面表格的单元格时触发
def tableWidget_notebook_cellDoubleClicked(self, row, col):
item = self.tableWidget_notebook.item(row, col)
# print(f'tableWidget_notebook_cellDoubleClicked: row {row}, col {col}, item {item}')
# self.tableWidget_add_word.cellActivated()
text = item.text() if item is not None else ''
self.previous_cell_text_notebook = (row, col, text)
# 点击”快查一下“时触发
@pyqtSlot()
def pushButton_search_word_clicked(self):
# 获取查询条件
year = self.comboBox_year.currentText()
text = self.comboBox_lesson.currentText()
keyword = self.lineEdit_search_word.text()
# print(f'search word: 年份:{year},Text:{text},关键词:{keyword}')
self._search_by_condition(year, text, keyword)
self.listWidget_history.addItem(f'[{year}], [{text}], [{keyword}]')
# 点击清空搜索历史记录时触发
@pyqtSlot()
def pushButton_history_clicked(self):
self.listWidget_history.clear()
# 点击搜索记录里的条目时触发
def listWidget_history_itemClicked(self, item):
text = item.text()
# 获取查询条件
conditions = text.split(', ')
if len(conditions) != 3:
QMessageBox.warning(self, '查询条件解析错误!', '请仔细检查一下搜索记录里的查询条件')
return
year, text, keyword = [con[1:-1] for con in conditions]
self._search_by_condition(year, text, keyword)
# 点击”全部随机,立即开始“时触发
@pyqtSlot()
def pushButton_range_start1_clicked(self):
data = self._get_sql_data(
"select dict.word,attr,chinese,case when count is null then 0 else count end "
"from dict left join notebook n on dict.word = n.word")
# print(data)
if len(data) == 0:
QMessageBox.warning(self, '听写失败', '没有单词可供听写')
else:
self._start_dict(data)
# 点击”选好范围,立即开始“时触发
@pyqtSlot()
def pushButton_range_start2_clicked(self):
year = self.spinBox_year.value()
text_from = self.spinBox_text_from.value()
text_to = self.spinBox_text_to.value()
if text_from > text_to:
text_from, text_to = text_to, text_from
data = self._get_sql_data(
f"select dict.word,attr,chinese,case when count is null then 0 else count end "
f"from dict left join notebook n on dict.word = n.word "
f"where dict.year = {year} and dict.text between {text_from} and {text_to}")
# print(data)
if len(data) == 0:
QMessageBox.warning(self, '听写失败', '该范围没有单词可供听写')
else:
self._start_dict(data)
# 听写页面计时器timeout时触发
def timer_timeout(self):
if self.dict_time > 0:
if self.dict_time <= 5:
self.lcdNumber_timer.setStyleSheet("color: rgb(255, 0, 0)")
self.dict_time -= 1
self.lcdNumber_timer.display(self.dict_time)
self.timer.start(1000) # 开始下一秒的计时
else:
if self.pushButton_add_to_notebook.isEnabled():
QMessageBox.information(self, '已超时', '超时啦,自动添加到错题本')
self.pushButton_add_to_notebook_clicked()
else:
QMessageBox.information(self, '已超时', '超时啦,你好像已经手动添加到错题本了')
self.timer.stop()
self.pushButton_next_word_clicked()
# 点击听写页面的”查看答案“时触发
@pyqtSlot()
def pushButton_get_answer_clicked(self):
try:
self.pushButton_get_answer.setDisabled(True)
cur_choice = self.choices[self.cur_dict_idx]
self.label_attr.setDisabled(False)
self.label_chinese.setDisabled(False)
self.label_attr.setText(f'{cur_choice[1]}')
self.label_chinese.setText(f'{cur_choice[2]}')
except Exception as e:
print(f'pushButton_get_answer_clicked:{e}')
# 点击听写页面的”添加到单词复习本“时触发
@pyqtSlot()
def pushButton_add_to_notebook_clicked(self):
self.pushButton_add_to_notebook.setDisabled(True)
cur_choice = self.choices[self.cur_dict_idx]
word = cur_choice[0]
data = self._get_sql_data(f"select count from notebook where word = '{word}'")
if len(data) > 0 and data[0][0] > 0:
count = data[0][0] + 1
else:
count = 1
self._change_sql_data(f"replace into notebook(word,count) values ('{word}','{count}')")
self._flush_tab_4()
self.pushButton_get_answer_clicked()
# 点击听写页面的”下一个“时触发
@pyqtSlot()
def pushButton_next_word_clicked(self):
self.cur_dict_idx += 1
try:
if self.cur_dict_idx >= self.progressBar_finish.maximum():
# 听写结束
self.in_dictating = False
self.timer.stop()
QMessageBox.information(self, "听写结束",
f"完成了一轮听写({self.progressBar_finish.maximum()}个单词),牛蹄滑给力奥!")
self.cur_dict_idx = 0
self._reset_dict_ui()
self.label_word.setDisabled(True)
self.pushButton_range_start1.setDisabled(False)
self.pushButton_range_start2.setDisabled(False)
self.pushButton_get_answer.setDisabled(True)
self.pushButton_add_to_notebook.setDisabled(True)
self.pushButton_next_word.setDisabled(True)
self.spinBox_num.setDisabled(False)
self.spinBox_year.setDisabled(False)
self.spinBox_text_from.setDisabled(False)
self.spinBox_text_to.setDisabled(False)
self.label_finish.setText(f'完成{self.cur_dict_idx} / {0}')
else:
self._show_word()
except Exception as e:
print(f'pushButton_next_word_clicked: {e}')
# 点击导入时触发
@pyqtSlot()
def import_excel(self):
temp_db_path = self.db[:self.db.rfind('.')] + '_temp.db'
backup_db_path = self.db[:self.db.rfind('.')] + '_bak.db'
self.import_box.exec_()
if self.import_box.clickedButton() == self.import_cancel:
return
elif self.import_box.clickedButton() == self.import_from_db:
file_name = QFileDialog.getOpenFileName(self, '选取词典数据库文件', '', 'SQLite Database(*.db)')
if file_name[0] and (file_name[0] != self.db):
copyfile(file_name[0], temp_db_path)
else:
return
elif self.import_box.clickedButton() == self.import_from_excel:
file_name = QFileDialog.getOpenFileName(self, '选取Excel文件', '', 'Excel(*.xls *.xlsx)')
if file_name[0]:
create_new_db(temp_db_path)
self._import_excel_to_sqlite(file_name[0], temp_db_path)
else:
return
else:
create_new_db(temp_db_path)
# 创建备份
if os.path.exists(backup_db_path):
os.remove(backup_db_path)
os.rename(self.db, backup_db_path)
# 用temp_db覆盖self.db
if os.path.exists(self.db):
os.remove(self.db)
os.rename(temp_db_path, self.db)
# 刷新页面
self.undo_stack.clear()
self._flush_tab_1()
self._flush_tab_2()
self._flush_tab_3()
self._flush_tab_4()
# 点击导出时触发
@pyqtSlot()
def export_excel(self):
# 获取保存路径
now_time = time.strftime("%Y%m%d-%H%M", time.localtime())
xls_path = QFileDialog.getSaveFileName(self, '选取Excel文件', f'{now_time}_guaDictate导出', 'Excel(*.xls)')
if not xls_path[0]:
QMessageBox.warning(self, '保存错误', '保存路径选取有误,请重试!')
return
xls_path = xls_path[0]
# 保存内容
self.pushButton_add_word_save_clicked()
# 读取sql内容
dict_header = ["年份", "Text", "单词", "词性", "中文"]
notebook_header = ["单词", "出错次数"]
dict_data = self._get_sql_data("SELECT * FROM dict ORDER BY year DESC, text, word")
notebook_data = self._get_sql_data("SELECT dict.word,count "
"FROM dict JOIN notebook n ON dict.word = n.word "
"WHERE count > 0 "
"ORDER BY n.count DESC, dict.word")
# 写入到excel
import xlwt
# 创建excel文件, 如果已有就会覆盖
workbook = xlwt.Workbook(encoding='utf-8')
# 创建新的工作表
workbook.add_sheet('dict')
workbook.add_sheet('notebook')
dict_sheet = workbook.get_sheet(0)
notebook_sheet = workbook.get_sheet(1)
# 写入dict表
for i, h in enumerate(dict_header):
dict_sheet.write(0, i, h)
for rn, row in enumerate(dict_data):
for cn, item in enumerate(row):
dict_sheet.write(rn + 1, cn, item)
# 写入notebook表
for i, h in enumerate(notebook_header):
notebook_sheet.write(0, i, h)
for rn, row in enumerate(notebook_data):
for cn, item in enumerate(row):
notebook_sheet.write(rn + 1, cn, item)
# 保存
workbook.save(xls_path)
class InsertCommand(QUndoCommand):
def __init__(self, table, row_idx):
super(InsertCommand, self).__init__()
self.table = table
self.row_idx = row_idx
self.main_window = table.parent().parent().parent().parent().parent().parent()
def redo(self):
self.table.insertRow(self.row_idx)
self.main_window.setWindowTitle('LTH的单词听写机(未保存!!)')
def undo(self):
self.table.removeRow(self.row_idx)
self.main_window.setWindowTitle('LTH的单词听写机(未保存!!)')
# print(
# f'canRedo:{self.main_window.undo_stack.canRedo()} isClean:{self.main_window.undo_stack.isClean()} '
# f'count:{self.main_window.undo_stack.count()} index:{self.main_window.undo_stack.index()}')
if self.main_window.undo_stack.index() == 1:
self.main_window.setWindowTitle('LTH的单词听写机')
class DeleteSelectedCommand(QUndoCommand):
def __init__(self, table, rows):
super(DeleteSelectedCommand, self).__init__()
self.table = table
self.rows = rows
self.rows_rev = rows[::-1]
self.rows_data = get_data_from_tableWidget(table, self.rows_rev, list(range(table.columnCount())))
self.main_window = table.parent().parent().parent().parent().parent().parent()
def redo(self):
for r in self.rows:
self.table.removeRow(r)
self.main_window.setWindowTitle('LTH的单词听写机(未保存!!)')
def undo(self):
for i, r in enumerate(self.rows_rev):
self.table.insertRow(r)
for j, item in enumerate(self.rows_data[i]):
item = QTableWidgetItem(str(item))
item.setTextAlignment(Qt.AlignJustify | Qt.AlignVCenter)
self.table.setItem(r, j, item)
self.main_window.setWindowTitle('LTH的单词听写机(未保存!!)')
# print(
# f'canRedo:{self.main_window.undo_stack.canRedo()} isClean:{self.main_window.undo_stack.isClean()} '
# f'count:{self.main_window.undo_stack.count()} index:{self.main_window.undo_stack.index()}')
if self.main_window.undo_stack.index() == 1:
self.main_window.setWindowTitle('LTH的单词听写机')
class ChangeItemCommand(QUndoCommand):
def __init__(self, table, row, col, text, cur_text):
super(ChangeItemCommand, self).__init__()
self.table = table
self.row = row
self.col = col
self.text = text
self.cur_text = cur_text
self.main_window = table.parent().parent().parent().parent().parent().parent()
def redo(self):
self.table.item(self.row, self.col).setText(self.cur_text)
self.main_window.setWindowTitle('LTH的单词听写机(未保存!!)')
def undo(self):
self.table.item(self.row, self.col).setText(self.text)
self.main_window.setWindowTitle('LTH的单词听写机(未保存!!)')
# print(
# f'canRedo:{self.main_window.undo_stack.canRedo()} isClean:{self.main_window.undo_stack.isClean()} '
# f'count:{self.main_window.undo_stack.count()} index:{}')
if self.main_window.undo_stack.index() == 1:
self.main_window.setWindowTitle('LTH的单词听写机')
class EmptyDelegate(QItemDelegate):
def __init__(self, parent):
super(EmptyDelegate, self).__init__(parent)
def createEditor(self, QWidget, QStyleOptionViewItem, QModelIndex):
return None
# sqlite的create table建立dict和notebook两个表的结构
def create_new_db(db_name):
conn = sqlite3.connect(db_name)
cur = conn.cursor()
try:
cur.execute(
"CREATE TABLE IF NOT EXISTS "
"dict(year INTEGER,text INTEGER,word TEXT NOT NULL PRIMARY KEY,attr TEXT,chinese TEXT)")
cur.execute(
"CREATE TABLE IF NOT EXISTS "
"notebook(word TEXT NOT NULL PRIMARY KEY,count INTEGER,"
"CONSTRAINT FK_Notebook FOREIGN KEY (word) REFERENCES dict(word))")
conn.commit()
except Exception as e:
print(f'create_new_db: {e}')
conn.rollback()
finally:
cur.close()
conn.close()
# 从tableWidget读取内容到data
def get_data_from_tableWidget(table_widget, rows, cols):
# data = [tuple(self.tableWidget_add_word.item(i, j).text() for j in range(col_count)) for i in range(row_count)]
# 按下面的方式遍历,可以对item特殊处理
data = []
for i in rows:
row_data = []
for j in cols:
if table_widget.item(i, j) is None:
row_data.append('')
else:
if j < 2:
if str.isdigit(table_widget.item(i, j).text()):
row_data.append(int(table_widget.item(i, j).text()))
else:
row_data.append('')
else:
row_data.append(table_widget.item(i, j).text())
data.append(tuple(row_data))
# print(f'get_all_data_from_tableWidget: {data}')
return data
# 设置tableWidget的显示内容(从sqlite读取数据)
def set_data_to_tableWidget(table, data):
table.horizontalHeader().setMinimumSectionSize(80)
for col in range(table.columnCount()):
if table.horizontalHeaderItem(col).text() not in ('单词', '中文'):
table.horizontalHeader().setSectionResizeMode(col, QHeaderView.ResizeToContents)
elif table.horizontalHeaderItem(col).text() == '单词':
table.setColumnWidth(col, 240)
else:
table.horizontalHeader().setSectionResizeMode(col, QHeaderView.Stretch)
table.setSelectionBehavior(QAbstractItemView.SelectRows)
table.setRowCount(len(data))
for i, row in enumerate(data):
for j, item in enumerate(row):
item = QTableWidgetItem(str(item))
item.setTextAlignment(Qt.AlignJustify | Qt.AlignVCenter | Qt.AlignHCenter)
table.setItem(i, j, item)
if __name__ == '__main__':
app = QApplication(sys.argv)
m = MWindow()
m.show()
sys.exit(app.exec_())
|
class Person:
count = 0
def __init__(self):
Person.count += 1
@classmethod
def print_count(cls):
print(f'{cls.count}명이 생성되었습니다')
james = Person()
maria = Person()
Person.print_count()
|
#Farabi Hasan;
#This program displays a map of various hurricanes with lines of longitude and
#latitude, hurricane positions, paths, and strengths (via colour indicators).
#Import code from SimpleGraphics.py
from SimpleGraphics import *
#Set the number of times the program is run as count
count = 0
#Allow user to input values for latitude and longitude
#This if statement ensures that when a value of zero is entered, the following
#body statements won't run.
if count == 0:
latitude = float(input("Enter a value for latitude (0 to stop): "))
if latitude > 0:
longitude = float(input("Enter a value for longitude: "))
wind_speed = float(input("Enter wind speed here: "))
#Add one to count so that once these statements are run once, the condition for
#the previous if statement won't be true
count = count + 1
#Store the dimensions of the image in variables to avoid usage of magic numbers.
map_width = 1022
map_height = 620
#Resizing the window to the image dimensions
resize(map_width, map_height)
#Use loadImage to store the map in a variable
map = loadImage ("map.gif")
#Display the image using drawImage
#Variable assigned for any coordinate of the origin (0, 0)
origin = 0
drawImage (map, origin, origin)
#Draw lines of longitude and latitude
#Store the value for the gap between the latitude and longitude lines
#in a variable.
#Proportion of the latitude to the number of pixels in the height
#Use getWidth() and getHeight() functions to avoid using magic numbers
y_to_lat_proportion = 25 / getHeight()
#Proportion of the longitude to the number of pixels in the width
x_to_long_proportion = 45 / getWidth()
#Difference between latitude origin (Equator) and window origin
lat_difference = 10
#Difference between longitude origin (Prime Meridian) and window origin
long_difference = 95
#Pixel value of spaces between lines of latitude
lat_space = getHeight() / 5
#Pixel value of spaces between lines of longitude
long_space = getWidth() / 9
#Lines of latitude and values displayed through a while loop
#Code for the lines itself
while map_height >= 0:
setColor ("light gray")
line (origin, map_height, getWidth(), map_height)
#Code for the text displaying values of latitude
#Set restrictions on map_height so that 10N and 35N aren't displayed.
if map_height < getHeight() and map_height > 0:
#lat_text stores the latitude value converted from the y-value
lat_text = (getHeight() - map_height) * y_to_lat_proportion + 10
setFont ("Arial", "12")
#Move text 3 pixels to the right of the origin for better visibility and
#make the anchor at the southwest corner of the text box
text (origin + 3, map_height, "%.iN" % lat_text, "sw")
#Reassign value of map_height by continously subtracting lat_space to
#determine the y-coordinae of the next line of latitude.
map_height = map_height - lat_space
#While loop for creating the lines of longitude and values in text form.
while map_width >= 0:
setColor ("light gray")
line (map_width, origin, map_width, getHeight())
#Conversion from x to longitude
#0.1 added to origin due to approximation of floating point numbers.
if map_width > origin + .1 and map_width < getWidth():
long_text = -((map_width) * x_to_long_proportion - 95)
setFont ("Arial", "12")
#Move text 3 pixels to the right of the origin for better visibility
text (map_width + 3, origin, "%.iW" % long_text, "nw")
map_width = map_width - long_space
#Plotting points on the map
#Store value for number of dots so that lines are only drawn once there are at
#at least two dots on the map.
dot = 0
#Assign a variable for the maximum hurricane category which later helps
#determine which one is the maximum
max_category = 0
#Store wind speed in a variable to help determine max wind speed
stored_wind_speed = 0
#Create a while loop for the points, the repetition of input statements, and
#the lines connecting the points.
while latitude > 0:
#Series of if-elif-else statements that postition and colour-code each point
#based on the input wind speed. Conversion from longitude/latitude to x/y
#values is performed.
if wind_speed >= 157:
setColor("purple")
ellipse((longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion, 15, 15)
elif wind_speed >= 130:
setColor("red")
ellipse((longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion, 13, 13)
elif wind_speed >= 111:
setColor("orange")
ellipse((longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion, 11, 11)
elif wind_speed >= 96:
setColor("yellow")
ellipse((longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion, 9, 9)
elif wind_speed >= 74:
setColor("green")
ellipse((longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion, 7, 7)
elif wind_speed < 74:
setColor("gray")
ellipse((longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion, 5, 5)
#Add one to dot variable so that we only draw lines once we have more than
#one dot (plotted point)
dot = dot + 1
if dot > 1:
#Series of if-elif-else statements that help determine line colours,
#maximum categories, and maximum wind speeds.
if wind_speed >= 157:
setColor ("purple")
#Add the radius of the plotted points to the x and y coordinates of
#the ellipses to make the lines travel through the centres of the
#points instead of their top left corners.
line(stored_x + 7.5, stored_y + 7.5, (longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion + 7.5)
#Maximum category stored if category 5 is reached at any point.
max_category = 5
#stored_wind speed is only altered if a new wind speed is greater
#than any of the others through the use of an if statement.
if stored_wind_speed < wind_speed:
stored_wind_speed = wind_speed
elif wind_speed >= 130:
#Using prev_wind_speed (defined at the bottom of the loop; line 242)
#we can compare the wind speeds of subsequent points and assign
#colors.
if prev_wind_speed >= 157:
setColor("purple")
else:
setColor("red")
line(stored_x + 6.5, stored_y + 6.5, (longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion + 6.5)
if max_category < 4:
max_category = 4
if stored_wind_speed < wind_speed:
stored_wind_speed = wind_speed
elif wind_speed >= 111:
if prev_wind_speed >= 157:
setColor("purple")
elif prev_wind_speed >= 130:
setColor("red")
else:
setColor("orange")
line(stored_x + 5.5, stored_y + 5.5, (longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion + 5.5)
if max_category < 3:
max_category = 3
if stored_wind_speed < wind_speed:
stored_wind_speed = wind_speed
elif wind_speed >= 96:
if prev_wind_speed >= 157:
setColor("purple")
elif prev_wind_speed >= 130:
setColor("red")
elif prev_wind_speed >= 111:
setColor("orange")
else:
setColor("yellow")
line(stored_x + 4.5, stored_y + 4.5, (longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion + 4.5)
if max_category < 2:
max_category = 2
if stored_wind_speed < wind_speed:
stored_wind_speed = wind_speed
elif wind_speed >= 74:
if prev_wind_speed >= 157:
setColor("purple")
elif prev_wind_speed >= 130:
setColor("red")
elif prev_wind_speed >= 111:
setColor("orange")
elif prev_wind_speed >= 96:
setColor("yellow")
else:
setColor("green")
line(stored_x + 3.5, stored_y + 3.5, (longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion + 3.5)
if max_category < 1:
max_category = 1
if stored_wind_speed < wind_speed:
stored_wind_speed = wind_speed
else:
if prev_wind_speed >= 157:
setColor("purple")
elif prev_wind_speed >= 130:
setColor("red")
elif prev_wind_speed >= 111:
setColor("orange")
elif prev_wind_speed >= 96:
setColor("yellow")
elif prev_wind_speed >= 74:
setColor("green")
else:
setColor("gray")
line(stored_x + 2.5, stored_y + 2.5, (longitude + long_difference) * 1 / x_to_long_proportion, getHeight() - (latitude - 10) * 1 / y_to_lat_proportion + 2.5)
if max_category < 1:
max_category = 0
if stored_wind_speed < wind_speed:
stored_wind_speed = wind_speed
#Stored x and y coordinates from previous dot for the coordinates of the
#lines connecting each of the dots.
stored_x = (longitude + long_difference) * 1 / x_to_long_proportion
stored_y = getHeight() - (latitude - 10) * 1 / y_to_lat_proportion
#Store previous wind speed in a variable to help determine the colours of
#connecting lines.
prev_wind_speed = wind_speed
#Repeat input statement until a value of zero is entered for latitude.
latitude = float(input("Enter a value for latitude (0 to stop): "))
#If a zero is entered for latitude, we will want the program to stop
#requesting values and display a message (line 259)
if latitude != 0:
longitude = float(input("Enter a value for longitude: "))
wind_speed = float(input("Enter wind speed here: "))
#Code for displaying the Maximum Category in the top right corner of the map,
#where the coordinates of the text box are given for the northeast corner. 5 is
#subtracted from the total width and 20 added to the origin for better
#visibility.
setColor("light gray")
setFont ("Arial", "13.5")
text(getWidth() - 5, origin + 20, "Max. Category: %.i" % max_category, "ne")
#Code for displaying the Max Wind Speed in mph which is 20 pixels below the text
#for the maximum category.
setColor("light gray")
setFont ("Arial", "13.5")
text(getWidth() - 5, origin + 40, "Max. Wind Speed (mph): %.1f" % stored_wind_speed, "ne")
#Message displayed once a 0 is entered in latitude
print("You cannot enter any more values.")
|
#!/usr/bin/env python
# coding: utf-8
# # Exploratory Data Analysis-Retail
#
# ## The Spark Foundation
#
# ## Data science &Business Analytics Intern
#
# ## Task#3:
# ### Author:Harish patel
# ###### Exploratory Data Analysis on SampleSuperstore.csv
# #AS a Business manager,try to find out the weak areas where you can work to make more profit.
# In[1]:
#import the libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
#let's the data read by using pandas libraries
# In[3]:
df=pd.read_csv("Downloads/SampleSuperstore.csv")
# In[4]:
df.head()
# In[5]:
df.tail()
# In[6]:
df.info
# In[7]:
df.isnull().sum()
# In[8]:
df.duplicated().sum()
# In[9]:
df.drop_duplicates(subset=None,keep="first",inplace=True)
df
# In[10]:
df.nunique()
# In[11]:
col=["Postal Code"]
df=df.drop(columns=col,axis=1)
# In[12]:
corr=df.corr()
corr
# In[13]:
plt.figure(figsize=(6,4))
with sns.color_palette("muted"):
sns.countplot(x='Ship Mode',data=df)
# In[14]:
df.hist(bins=50,figsize=(20,15))
# In[15]:
plt.figure(figsize=(6,4))
with sns.color_palette("muted"):
sns.countplot(x="Region",data=df)
# In[ ]:
# In[16]:
plt.figure(figsize=(9,7))
with sns.color_palette("muted"):
sns.countplot(x="State",data=df)
plt.xticks(rotation=90)
# In[17]:
plt.figure(figsize=(6,4))
with sns.color_palette("muted"):
sns.countplot(x="Segment",data=df)
# In[18]:
newdata=pd.DataFrame(df.groupby('State').sum())['Profit'].sort_values(ascending=True)
# In[19]:
print(newdata)
# In[20]:
state=df.groupby('State')[['Sales','Profit']].sum().sort_values(by="Sales",ascending=False)
plt.figure(figsize=(60,70))
state[:30].plot(kind="bar",color=['blue',"red"])
plt.title("Profit or loss and sales of the top 30 States")
plt.xlabel("States")
plt.ylabel("total profit/ loss and sales")
state[30:].plot(kind="bar",color=["blue","red"])
plt.title("Profit or loss and sales of least economic States")
plt.xlabel("States")
plt.ylabel("total profit/ loss and sales")
# In[21]:
data=pd.DataFrame(df.groupby("State").sum())["Discount"].sort_values(ascending=True)
data
# In[22]:
plt.figure(figsize=(10,5))
sns.lineplot("Discount","Profit",data=df,color="blue",label="Discount")
plt.legend()
# In[23]:
#The above the analysis showing if we reduce the discount the profit we will increase.
# In[ ]:
|
import os
class DbCreator:
def __init__(self, filesPath, newDb, dbName, outputFile, outputFormat):
#self.projectPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# directorio donde se encuentran las secuencias individuales
self.filesPath = self.resourcePath("/"+ filesPath)
# directorio donde se creara la base de datos
self.newDb = newDb
#Nommbre de la base de datos seleccionada
self.dbName = dbName
# archivo intermedio usado para crear la base de datos
self.outputFile = outputFile
self.outputFormat = outputFormat
def makeDb(self):
pass
|
"""
Smallest multiple
Problem 5
2520 is the smallest number that can be divided by each of the numbers
from 1 to 10 without any remainder. What is the smallest positive number
that is evenly divisible by all of the numbers from 1 to 20?
"""
def run():
"""The Main Function"""
num = 2520
status = False
while True:
for i in range(20, 1, -1):
if num % i != 0:
status = False
break
else:
status = True
if status:
break
num = num + 10
print num
if __name__ == '__main__':
run()
|
from django import forms
from user.models import ProductModel
#
# class ProductForm(forms.ModelForm):
# class Meta:
# model = ProductModel
# fields = |
def bangunDatar():
print('''Bangun Datar (Luas dan keliling [cm])
1. Segitiga
2. Persegi
3. Persegi Panjang
4. Jajar Genjang
5. Belah Ketupat
6. Layang-Layang
7. Trapesium
8. Lingkaran
''')
pilih = int(input("Cari bangun apa? "))
#___Segitiga___#
if pilih == 1:
print("\n\tSegitiga")
a = int(input("Alas: "))
b = int(input("Sisi b: "))
c = int(input("Sisi c: "))
t = int(input("Tinggi: "))
L = 1/2 * a * t
K = a+b+c
print("\nLuas segitiga = ", L, "cm^2")
print("Keliling segitiga = ", K, "cm")
#___Persegi___#
elif pilih == 2:
print("\n\tPersegi")
s = int(input("Sisi: "))
L, K = s**2, 4*s
print("\nLuas persegi = ", L, "cm^2")
print("Keliling persegi = ", K, "cm")
#___Persegi Panjang___#
elif pilih == 3:
print("\n\tPersegi Panjang")
p = int(input("Panjang: "))
l = int(input("Lebar: "))
print("\nLuas segitiga = ", p*l, "cm^2")
print("Keliling segitiga = ", 2*(p+l), "cm")
#___Jajar Genjang___#
elif pilih == 4:
print("\n\tJajar Genjang")
a = int(input("Alas: "))
b = int(input("Miring: "))
t = int(input("Tinggi: "))
print("\nLuas segitiga = ", a*t, "cm^2")
print("Keliling segitiga = ", 2*(a+b), "cm")
#___Belah Ketupat___#
elif pilih == 5:
print("\n\tBelah Ketupat")
s = int(input("Sisi: "))
d1 = int(input("Diagonal1: "))
d2 = int(input("Diagonal2: "))
print("\nLuas segitiga = ", 1/2 * d1 * d2, "cm^2")
print("Keliling segitiga = ", s*4, "cm")
#___Layang-Layang___#
elif pilih == 6:
print("\n\tLayang-Layang")
a = int(input("Sisi a/b: "))
b = int(input("Sisi c/d: "))
d1 = int(input("Diagonal1: "))
d2 = int(input("Diagonal2: "))
print("\nLuas segitiga = ", 1/2 * d1 * d2, "cm^2")
print("Keliling segitiga = ", 2*(a+b), "cm")
#___Trapesium___#
elif pilih == 7:
print("\n\tTrapesium")
a = int(input("Alas(a): "))
b = int(input("Atas(b): "))
c = int(input("Kanan(c): "))
d = int(input("Kiri(d): "))
t = int(input("Tinggi(t): "))
print("\nLuas segitiga = ", ((a+b)*t) / 2, "cm^2")
print("Keliling segitiga = ", a+b+c+d, "cm")
#___Lingkaran___#
elif pilih == 8:
print("\n\tLingkaran")
r = int(input("Jari-Jari: "))
print("\nLuas segitiga = ", 22/7 * r * r , "cm^2")
print("Keliling segitiga = ", 22/7 * 2 * r, "cm")
bangunDatar()
|
# A two-sample bootstrap hypothesis test for difference of means.
# You performed a one-sample bootstrap hypothesis test, which is impossible to do with permutation. Testing the hypothesis that two samples have the same distribution may be done with a bootstrap test, but a permutation test is preferred because it is more accurate (exact, in fact). But therein lies the limit of a permutation test; it is not very versatile. We now want to test the hypothesis that Frog A and Frog B have the same mean impact force, but not necessarily the same distribution. This, too, is impossible with a permutation test.
# To do the two-sample bootstrap test, we shift both arrays to have the same mean, since we are simulating the hypothesis that their means are, in fact, equal. We then draw bootstrap samples out of the shifted arrays and compute the difference in means. This constitutes a bootstrap replicate, and we generate many of them. The p-value is the fraction of replicates with a difference in means greater than or equal to what was observed.
# The objects forces_concat and empirical_diff_means are already in your namespace.
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
def bootstrap_replicate_1d(data, func):
""" Generate bootstrap replicate of 1d data."""
return func(np.random.choice(data, size=len(data)))
def draw_bs_reps(data, func, size=1):
"""Draw bootstrap replicates."""
# Initialize array of replicates: bs_replicates
bs_replicates = np.empty(size)
# Generate replicates
for i in range(size):
bs_replicates[i] = bootstrap_replicate_1d(data,func)
return bs_replicates
df = pd.read_csv('data.csv', sep='\t', header=0)
force_a= df['impact_force'][(df['ID']=='A')].tolist()
force_b= df['impact_force'][(df['ID']=='B')].tolist()
# Concatenate forces: forces_concat
forces_concat = np.concatenate((force_a,force_b))
# Compute difference of mean impact force from experiment: empirical_diff_means
empirical_diff_means = np.mean(force_a)-np.mean(force_b)
# Compute mean of all forces: mean_force
mean_force = np.mean(forces_concat)
# Generate shifted arrays
force_a_shifted = force_a - np.mean(force_a) + mean_force
force_b_shifted = force_b - np.mean(force_b) + mean_force
# Compute 10,000 bootstrap replicates from shifted arrays
bs_replicates_a = draw_bs_reps(force_a_shifted, np.mean, 10000)
bs_replicates_b = draw_bs_reps(force_b_shifted, np.mean, 10000)
# Get replicates of difference of means: bs_replicates
bs_replicates = bs_replicates_a - bs_replicates_b
# Compute and print p-value: p
p = np.sum(bs_replicates >= empirical_diff_means)/ 10000
print('p-value =', p)
# Nice work! Not surprisingly, the more forgiving hypothesis, only that the means are equal as opposed to having identical distributions, gives a higher p-value. Again, it is important to carefully think about what question you want to ask. Are you only interested in the mean impact force, or the distribution of impact forces? |
import json
import datetime
import csv
import time
#may choose to only import urllb.request
import urllib.request
#id of the access token as formed by the dummy application
#needs to be filled
app_id = ""
app_secret = ""
access_token = " "
#alter as needed to find out which page to identify
#numeric id of bamboo grove page = 560898400668463
#page_id = "SNUBamboo"
page_id = "SNUBamboo"
#YYYY_MM_DD
since_date = "2000-01-01"
until_date = "2018-08-01"
def request_until_succeed(url):
req = urllib.request.Request(url)
success = False;
while success is False:
try:
response = urllib.request.urlopen(req)
if response.getcode() == 200:
success = True
except Exception as e:
print(e)
time.sleep(5)
#optionally print error
return response.read()
#check if works for korean
def unicode_decode(text):
try:
return text.encode('utf-8').decode()
except UnicodeDecodeError:
return text.encode('utf-8')
def getFacebookPageFeedUrl(base_url):
fields = "&fields=message,link,created_time,type,name,id," + \
"comments.limit(0).summary(true),shares,reactions" + \
".limit(0).summary(true)"
return base_url + fields
def getReactionsForStatuses(base_url):
reaction_types = ['like', 'love', 'wow', 'haha', 'sad', 'angry']
reactions_dict = {}
for reaction_type in reaction_types:
fields = "&fields=reactions.type({}).limit(0).summary(total_count)".format(
reaction_type.upper())
url = base_url + fields
data = json.loads(request_until_succeed(url))['data']
data_processed = set() # set() removes rare duplicates in statuses
for status in data:
id = status['id']
count = status['reactions']['summary']['total_count']
data_processed.add((id, count))
for id, count in data_processed:
if id in reactions_dict:
reactions_dict[id] = reactions_dict[id] + (count,)
else:
reactions_dict[id] = (count,)
return reactions_dict
def processFacebookPageFeedStatus(status):
status_id = status['id']
status_type = status['type']
status_message = '' if 'message' not in status else unicode_decode(status['message'])
link_name = '' if 'name' not in status else unicode_decode(status['name'])
status_link = '' if 'link' not in status else unicode_decode(status['link'])
status_published = datetime.datetime.strptime(status['created_time'], '%Y-%m-%dT%H:%M:%S+0000')
status_published = status_published + datetime.timedelta(hours=+9) #is this KST??? need to check
status_published = status_published.strftime('%Y-%m-%d %H:%M:%S')
num_reactions = 0 if 'reactions' not in status else status['reactions']['summary']['total_count']
num_comments = 0 if 'comments' not in status else status['comments']['summary']['total_count']
num_shares = 0 if 'num_shares' not in status else status['shares']['count']
return (status_id, status_message, link_name, status_type, status_link, status_published, num_reactions, num_comments, num_shares)
def scrapeFacebookPageFeedStatus(page_id, access_token, since_date, until_date):
with open('{}_facebook_statuses.csv'.format(page_id), 'w') as file:
w = csv.writer(file)
w.writerow(["status_id", "status_message", "link_name", "status_type",
"status_link", "status_published", "num_reactions",
"num_comments", "num_shares", "num_likes", "num_loves",
"num_wows", "num_hahas", "num_sads", "num_angrys",
"num_special"])
has_next_page = True
num_processed = 0
scrape_starttime = datetime.datetime.now()
after = ''
base = "https://graph.facebook.com/v2.9"
node = "/{}/posts".format(page_id)
parameters = "/?limit={}&access_token={}".format(100, access_token)
since = "&since={}".format(since_date) if since_date is not '' else ''
until = "&until={}".format(until_date) if until_date is not '' else ''
print("Scraping {} Facebook Page: {}\n".format(page_id, scrape_starttime))
while has_next_page:
after = '' if after is '' else "&after={}".format(after)
base_url = base + node + parameters + after + since + until
url = getFacebookPageFeedUrl(base_url)
statuses = json.loads(request_until_succeed(url))
reactions = getReactionsForStatuses(base_url)
for status in statuses['data']:
if 'reactions' in status:
status_data = processFacebookPageFeedStatus(status)
reactions_data = reactions[status_data[0]]
num_special = status_data[6] - sum(reactions_data)
w.writerow(status_data + reactions_data + (num_special,))
num_processed += 1
if num_processed % 100 == 0:
print("{} Statuses Processed: {}".format
(num_processed, datetime.datetime.now()))
# if there is no next page
if 'paging' in statuses:
after = statuses['paging']['cursors']['after']
else:
has_next_page = False
print("\nDone!\n{} Statuses Processed in {}".format(
num_processed, datetime.datetime.now() - scrape_starttime))
if __name__ == '__main__':
scrapeFacebookPageFeedStatus(page_id, access_token, since_date, until_date)
|
import streamlit as st
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.axes as axes
import seaborn as sns
data=pd.read_csv('streamlit_data1.csv')
head = pd.DataFrame(data.head(10))
head = head.drop(['n_gram_prediction','author', 'subreddit', 'score', 'ups','ratio_char',
'downs', 'date', 'created_utc', 'parent_comment', 'cleaned','punc(”)',"punc(')",'label'], axis=1)
head.columns = ['Comment', 'Sentiment score','Capital words', 'Total words',
'.', ',', '!','?', '*', 'Characters repeated','Unique Characters', 'Total Characters',
'Subreddit Ratio','Sentence length', 'Syllables per word', 'Flesch Score','Swear Words']
head1 = head[['Comment','Capital words','Total Characters','Unique Characters','Characters repeated',
'Capital words','Sentence length','Syllables per word','Flesch Score','Swear Words',
'.', ',', '!','?', '*','Subreddit Ratio','Sentiment score']]
data['abs_cc_score']=abs(data['cc_score'])
shape = data.shape
n = 16
st.set_option('deprecation.showPyplotGlobalUse', False)
def app():
writesa='<p style="font-family:black body ; color:#000033 ; text-align:center; font-size: 60px;">Extracted Features.</p>'
st.markdown(writesa,unsafe_allow_html=True)
graph_sel = st.sidebar.selectbox("Select Graph",('Dataset', 'Swear Words', 'Capital Words',
'Complexity', 'Sentiment', 'Punctuation', 'Repeated Characters'))
if graph_sel=='Dataset':
st.write("Head of the dataset")
st.dataframe(data=head1)
st.write("Number of Features extracted",n)
elif graph_sel=='Swear Words':
writer='<p style="font-family:black body ; color:#000033 ; text-align:left; font-size: 25px;">Swear Words.</p>'
st.markdown(writer,unsafe_allow_html=True)
plt.figure(figsize=[10,6])
sns.countplot(data=data, x="SwearWord", hue="label")
plt.xlabel( "Swear Words Used", size = 16 )
plt.ylabel( "Number of comments", size = 16 )
plt.xlim(-1,3)
plt.title( "Swear Words", size = 24 )
plt.legend(["No", "Yes"], loc="upper right", title="Sarcastic Comment")
plt.annotate("481268 484150", (-0.35,182653), fontsize=12)
plt.annotate("24137 21218", (0.65,50721), fontsize=12)
plt.show()
st.pyplot()
elif graph_sel=='Capital Words':
writer='<p style="font-family:black body ; color:#000033 ; text-align:left; font-size: 25px;">Capital words.</p>'
st.markdown(writer,unsafe_allow_html=True)
slider_sel = st.sidebar.slider("Y limit", max_value=400000, min_value=10000, value=400000, step = 25000)
plt.figure(figsize=[10,6])
sns.histplot(data=data, x='capital_words', hue='label', binrange=(-0.5,10.5),binwidth=1)
plt.xlabel( "Capital word" , size = 16 )
plt.ylim(0,slider_sel)
plt.ylabel( "Number of comments" , size = 16 )
plt.title( "Complete capital word" , size = 24 )
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif graph_sel=='Repeated Characters':
writer='<p style="font-family:black body ; color:#000033 ; text-align:left; font-size: 25px;">Repeated characters.</p>'
st.markdown(writer,unsafe_allow_html=True)
plt.figure(figsize=[10,6])
a = sns.countplot(data=data, x="char_repeated", hue="label")
a.plot()
plt.xlabel( "Characters repeated" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.title( "Repeated Chaaaaaaracters" , size = 24 )
plt.xlim(-1,3)
plt.legend(["No","Yes"], loc="upper right", title="Sarcastic Comment")
plt.annotate("481268 484150", (-0.30,182653), fontsize=12)
axes.Axes.set_xticklabels(a,['False','True'])
plt.annotate("24137 21218", (0.7,50721), fontsize=12)
plt.show()
st.pyplot()
elif graph_sel=='Sentiment':
writer='<p style="font-family:black body ; color:#000033 ; text-align:left; font-size: 25px;">Sentiment in sarcasm.</p>'
st.markdown(writer,unsafe_allow_html=True)
switch = st.selectbox("Polarity or Intensity", ('Polar', 'Intense'))
slider_sel = st.sidebar.slider("Y limit", max_value=225000, min_value=25000, value=225000, step = 25000)
slider_sel1 = st.sidebar.slider("X Range", max_value=1.1, min_value=-1.1, value=[-1.1,1.1], step = 0.1)
if switch=='Polar':
plt.figure(figsize=[12,6])
sns.histplot(data=data,x='cc_score', hue='label', binrange=(-1,1), binwidth=0.1)
plt.xlabel( "Polarity" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.ylim((0,slider_sel))
plt.xlim(slider_sel1)
plt.title( "Sentiment Score" , size = 24 )
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif switch=='Intense':
plt.figure(figsize=[8,6])
a = sns.histplot(data=data, x='cc_score', hue='label', binrange=(0,1),binwidth=0.33)
a.plot()
plt.xlabel( "Intensity" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.xticks(ticks=[0,0.165,0.33,0.5,0.66,0.83,0.99])
labels = [0,'Nuetral',0.33, 'Moderate',0.66,'Strong',1]
axes.Axes.set_xticklabels(a,labels=labels)
plt.title( "Sentiment Score" , size = 24 )
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif graph_sel=='Punctuation':
writer='<p style="font-family:black body ; color:#000033 ; text-align:left; font-size: 25px;">Punctuation marks!!</p>'
st.markdown(writer,unsafe_allow_html=True)
mark = st.selectbox("Punctuation Mark",('.',',','!','?','*'))
slider_sel = st.sidebar.slider("Y limit", max_value=200000, min_value=50, value=93450, step = 25)
if mark == '.':
plt.figure(figsize=[8,6])
sns.histplot(data=data,x='punc(.)',
hue='label' ,
binrange=(-0.5,10.5),
binwidth=1)
plt.xlabel( "Total numer of '.'s" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.title( "Punc(.)" , size = 24 )
plt.ylim(0,slider_sel)
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif mark == ',':
plt.figure(figsize=[8,6])
sns.histplot(data=data,x='punc(,)',
hue='label' ,
binrange=(-0.5,10.5),
binwidth=1)
plt.xlabel( "Total numer of ','s" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.title( "Punc(,)" , size = 24 )
plt.ylim(0,slider_sel)
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif mark == '!':
plt.figure(figsize=[8,6])
sns.histplot(data=data,x='punc(!)',
hue='label' ,
binrange=(-0.5,10.5),
binwidth=1)
plt.xlabel( "Total numer of '!'s" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.title( "Punc(!)" , size = 24 )
plt.ylim(0,slider_sel)
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif mark == '?':
plt.figure(figsize=[8,6])
sns.histplot(data=data,x='punc(?)',
hue='label' ,
binrange=(-0.5,10.5),
binwidth=1)
plt.xlabel( "Total numer of '?'s" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.title( "Punc(?)" , size = 24 )
plt.ylim(0,slider_sel)
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif mark == '*':
plt.figure(figsize=[8,6])
sns.histplot(data=data,x='punc(*)',
hue='label' ,
binrange=(-0.5,10.5),
binwidth=1)
plt.xlabel( "Total numer of '*'s" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.title( "Punc(*)" , size = 24 )
plt.ylim(0,slider_sel)
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif graph_sel == 'Complexity':
writer='<p style="font-family:black body ; color:#000033 ; text-align:left; font-size: 25px;">Complexity.</p>'
st.markdown(writer,unsafe_allow_html=True)
minigrap = st.selectbox("Measure of Complexity", ('Flesch Score', 'Sentence Length',
'Comment Length', 'Syllables per word'))
if minigrap == 'Flesch Score':
plt.figure(figsize=[16,6])
sns.histplot(data=data,x='Flesch_score',
hue='label' ,
binrange=(0,100),
binwidth=1)
plt.xlabel( "Flesch Score" , size = 16 )
plt.ylabel( "Number of people" , size = 16 )
plt.title( "Flesch Score" , size = 24 )
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif minigrap == 'Sentence Length':
plt.figure(figsize=[16,6])
sns.histplot(data=data,x='Avg_sentence_length',
hue='label' ,
binrange=(.5,70.5),
binwidth=1)
plt.xlabel( "Number of words per sentence" , size = 16 )
plt.ylabel( "Number of comment" , size = 16 )
plt.title( "Sentence lengths" , size = 24 )
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif minigrap == 'Comment Length':
subminigrap = st.selectbox("Measuremnts based on", ('Words', 'Characters', 'Unique Characters'))
if subminigrap == 'Words':
plt.figure(figsize=[16,6])
sns.histplot(data=data,x='total_words',
hue='label' ,
binrange=(-0.5,80.5),
binwidth=1)
plt.xlabel( "Total words" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.title( "Total numer of words" , size = 24 )
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif subminigrap == 'Characters':
plt.figure(figsize=[16,6])
sns.histplot(data=data,x='tot_chars',
hue='label' ,
binrange=(-0.5,200.5),
binwidth=1)
plt.xlabel( "Characters" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.title( "Total numer of Characters" , size = 24 )
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif subminigrap == 'Unique Characters':
plt.figure(figsize=[16,6])
sns.histplot(data=data,x='unique_char',
hue='label' ,
binrange=(-0.5,50.5),
binwidth=1)
plt.xlabel( "Unique Characters" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.title( "Total numer of unique characters" , size = 24 )
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
elif minigrap == 'Syllables per word':
Yslider = st.sidebar.slider("Y limit", max_value=200000, min_value=50, value=179610, step=10)
plt.figure(figsize=[8,6])
sns.histplot(data=data,x='Avg_syllables_per_word',
hue='label' ,
binrange=(0,6),
binwidth=0.3)
plt.xlabel( "Number syllables per word" , size = 16 )
plt.ylabel( "Number of comments" , size = 16 )
plt.title( "Syllables per Word" , size = 24 )
plt.ylim(0,Yslider)
plt.legend(["Yes", "No"], loc="upper right", title="Sarcastic Comment")
plt.show()
st.pyplot()
|
'''
if its length is greater than or equal to 10 symbols, it has at least one digit, as well as containing one uppercase
letter and one lowercase letter in it. The password contains only ASCII latin letters or digits
'''
def checkio(data):
a = set()
b = {'len', 'dig', 'lower', 'supper'}
if len(data) > 10:
a.add('len')
for i in data:
if i.isdigit():
a.add('dig')
elif i.islower():
a.add('lower')
elif i.isupper():
a.add('supper')
if a == b:
return True
else:
return False
#Some hints
#Just check all conditions
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert checkio('A1213pokl') == False, "1st example"
assert checkio('bAse730onE4') == True, "2nd example"
assert checkio('asasasasasasasaas') == False, "3rd example"
assert checkio('QWERTYqwerty') == False, "4th example"
assert checkio('123456123456') == False, "5th example"
assert checkio('QwErTy911poqqqq') == True, "6th example"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
|
import csv
import string
import os
import argparse
import synapseclient
import webbrowser
syn = synapseclient.Synapse()
syn=syn.login()
#x="ls"
#y=`eval $x`
#echo $y <- all the files in the folder
#sed 'Nd' file > newfile <- where N is the line in the file
#sort -k1,1 -k2,2n H9.102.2.5__exon.bed > sorted.bed <- sorting bed files (All bed/VCF files have to be sorted before indexing)
#chromosome and start position matter
parser = argparse.ArgumentParser()
#positional arguments
parser.add_argument("Project", metavar='Project',type=str, help='Project name')
parser.add_argument("Jbrowse", metavar='Jbrowse directory', type=str, help= "Where Jbrowse-1.11.6 is installed (path)")
parser.add_argument("Genome",metavar='Genome',type=str,help="Input (human/mouse) - hg19 and mm10 supported")
parser.add_argument("FolderPath",metavar='Folder Path',type=str,help="Full path of folder with datafiles")
#Optional arguments
#parser.add_argument("-ref","--reference", action="store",default="Reference",help= "Folder of DNA reference files (fasta/(bed file of all genes))")
parser.add_argument("-N","--needRef",action='store_true',help="Need reference genome?")
parser.add_argument("-A","--add",action='store_true',help="Append onto existing conf?")
parser.add_argument("-D","--download",action='store_true',help="Download genome fasta files")
parser.add_argument("-C","--create", action="store_true",help="Create Folder structure for project")
args = parser.parse_args()
#Required
genome = args.Genome
jbrowse = args.Jbrowse
project = args.Project
folderpath = args.FolderPath
#Optional
files = args.files
urls = args.url
ref = args.reference
needRef = args.needRef
add = args.add
download = args.download
create = args.create
if create:
os.mkdir(os.path.join(jbrowse,project))
os.mkdir(os.path.join(jbrowse,project,"json"))
os.mkdir(os.path.join(jbrowse,project,"raw"))
os.mkdir(os.path.join(jbrowse,project,"json",genome))
os.mkdir(os.path.join(jbrowse,project,"raw",genome))
#This is where the configuration file goes
output = os.path.join(project,"json",genome)##for right now <- genome is the subfolder name
rawfiles = os.path.join(project,"raw",genome)
os.system("ln -s %s %s" %(folderpath,os.path.join(project,"raw")))
os.system("mv %s %s"%(os.path.join(project,"raw","*"),rawfiles)) #Temporary hack that will work for now...
def createRefGenome(directory):
##If the person doesn't have the fasta files, then download them from synapse
if download:
os.mkdir(os.path.join(jbrowse,"Reference"))
os.mkdir(os.path.join(jbrowse,"Reference",genome))
if genome=="human":
temp = syn.query('SELECT id, name FROM entity WHERE parentId == "syn4557835"')
else:
temp = syn.query('SELECT id, name FROM entity WHERE parentId == "syn4557836"')
for each in temp['entity.id']:
syn.get(temp,downloadLocation = "%s" %(directory))
#Gives list of filenames but with the path appended to it
filelist = [os.path.join(directory,filenames) for filenames in os.listdir(directory)]
for each in filelist:
if ".fa" in each: # This gives the DNA seqs
os.system("perl %s/bin/prepare-refseqs.pl --fasta %s --out %s" % (jbrowse,each,os.path.join(jbrowse,output)))
elif ".bed" in each: #This gives the genes (this bed file is already formatted)
os.system("""perl %s/bin/flatfile-to-json.pl --bed %s --trackType CanvasFeatures --trackLabel human_genes --config '{"maxFeatureScreenDensity":20,"maxHeight":300}' --clientConfig '{"strandArrow": false,"color":"cornflowerblue"}' --out %s""" %(jbrowse,each,os.path.join(jbrowse,output)))
os.system("perl %s/bin/generate-names.pl -v --out %s"%(jbrowse,os.path.join(jbrowse,output)))
##If the reference files aren't already local, then have to get the reference files
if needRef:
createRefGenome(os.path.join(jbrowse,ref,genome))
##since the folders should be formatted prior to running this script. All files should be under
##(Project folder)/(raw)/(human)
def createJbrowse(allFiles, directory="raw",data="human",append=False,meta_type="tumor"):
count = 0
##If you want to add on extra data to existing, then append=TRUE
if append:
f = open(os.path.join(jbrowse,output,'tracks.conf'),'a+')
metaData = open(os.path.join(jbrowse,output,'trackMetadata.csv'), 'ab')
fieldnames = ['label', 'category','meta_type','datatype','key'] ##Hardcoded fields
writer = csv.DictWriter(metaData, fieldnames=fieldnames)
##This gets the track info [...] So that you can get the track number
f.seek(0)
lines = f.readlines()
for each in lines:
if "tracks" in each:
words = each.split()
for word in words:
num = word.split(".")
if len(num)==2:
count = int(num[1])+1
else: ##Open fresh tracks and trackmetadata
f = open(os.path.join(jbrowse,output,'tracks.conf'),'w')
metaData = open(os.path.join(jbrowse,project,'trackMetaData.csv'), 'wb')
fieldnames = ['label', 'category','meta_type','datatype','key'] ##Hardcoded fields
writer = csv.DictWriter(metaData, fieldnames=fieldnames)
writer.writeheader()
###########Big wig file congiuration#######
for each in allFiles:
if "bw" in each:
category = "Human_Coverage"
datatype = "bigwig"
track = """
[ tracks.%s ]
storeClass = JBrowse/Store/SeqFeature/BigWig
urlTemplate = ../../%s/%s/%s
metadata.category = %s
metadata.type = %s
metadata.datatype = %s
type = JBrowse/View/Track/Wiggle/XYPlot
key = %s
autoscale = clipped_global\n""" % (count,directory,data,each,category,meta_type,datatype,each)
f.write(track)
writer.writerow({'label':count, 'category':category,'meta_type':meta_type,'datatype':datatype,'key':each})
count+=1
#####VCF FILES#######
#VCF file configuration Make sure the .tbi file exists. Jbrowse config auto searches for this file.
elif "vcf.gz" in each and ".tbi" not in each:
category = "Variant"
datatype = "VCF"
track = """
[ tracks.%s ]
storeClass = JBrowse/Store/SeqFeature/VCFTabix
urlTemplate = ../../%s/%s/%s
metadata.category = %s
metadata.type = %s
metadata.datatype = %s
# settings for how the track looks
type = JBrowse/View/Track/CanvasVariants
key = %s\n"""% (count,directory,data,each,category,meta_type,datatype,each)
f.write(track)
##Write each line of the metadata.csv
writer.writerow({'label':count, 'category':category,'meta_type':meta_type,'datatype':datatype,'key':each})
count+=1
####BAM FILES####
#baiUrlTemplate can we used to get exact location of bai file
elif ".bam" in each and ".bai" not in each:
category = "Alignment"
datatype = "BED"
track = """
[tracks.%s]
storeClass = JBrowse/Store/SeqFeature/BAM
urlTemplate = ../../%s/%s/%s
metadata.category = %s
metadata.type = %s
metadata.datatype = %s
type = JBrowse/View/Track/Alignments2
key = %s\n"""%(count,directory,data,each,category,meta_type,datatype,each)
f.write(track)
##Write each line of the metadata.csv
writer.writerow({'label':count, 'category':category,'meta_type':meta_type,'datatype':datatype,'key':each})
count+=1
#elif ".bed" in each and ".gz" not in each:
# os.system("perl %s/bin/flatfile-to-json.pl --bed %s --trackLabel %s --trackType CanvasFeatures" % (jbrowse,each,each))
else:
print("%s is not a Bigwig/VCF/BAM File"%each)
metaData.close()
f.close()
#url = "http://localhost/JBrowse-1.11.6/?data=%s/json/%s" %(project,genome)
#webbrowser.open(url,new=2)
dataFiles = os.listdir(os.path.join(jbrowse,rawfiles))
createJbrowse(dataFiles,data=genome,append =add) |
# %load q03_regression_plot/build.py
# Default imports
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
data = pd.read_csv('data/house_prices_multivariate.csv')
def regression_plot(var1,var2):
sns.lmplot(var1,var2, data=data, fit_reg=True)
# Write your code here
plt.show()
|
#
# [96] Unique Binary Search Trees
#
# https://leetcode.com/problems/unique-binary-search-trees/description/
#
# algorithms
# Medium (41.93%)
# Total Accepted: 147.9K
# Total Submissions: 352.8K
# Testcase Example: '3'
#
# Given n, how many structurally unique BST's (binary search trees) that store
# values 1...n?
#
# For example,
# Given n = 3, there are a total of 5 unique BST's.
#
#
# 1 3 3 2 1
# \ / / / \ \
# 3 2 1 1 3 2
# / / \ \
# 2 1 2 3
#
#
#
#
#
class Solution(object):
def numTrees(self, n):
res = [0] * (n+1)
res[0] = 1
for i in xrange(1, n+1):
for j in xrange(i):
res[i] += res[j] * res[i-1-j]
return res[n]
|
__author__ = "Vini Salazar"
__license__ = "MIT"
__maintainer__ = "Vini Salazar"
__url__ = "https://github.com/vinisalazar/bioprov"
__version__ = "0.1.24"
import setuptools
with open("README.md", "r") as readme_file:
readme = readme_file.read()
setuptools.setup(
name="bioprov",
version="0.1.24",
author="Vini Salazar",
author_email="17276653+vinisalazar@users.noreply.github.com",
description="BioProv - Provenance capture for bioinformatics workflows",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/vinisalazar/BioProv",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
packages=setuptools.find_packages(),
scripts=["bioprov/bioprov"],
include_package_data=True,
keywords="w3c-prov biopython biological-data provenance",
python_requires=">=3.6",
install_requires=[
"biopython",
"coolname",
"coveralls",
"dataclasses",
"pandas",
"prov",
"provstore-api",
"pydot",
"pytest",
"pytest-cov",
"tqdm",
"tinydb",
],
)
|
# Generated by Django 2.0.6 on 2019-03-06 16:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gameApp', '0006_auto_20190306_0845'),
('gameApp', '0006_auto_20190305_2243'),
]
operations = [
]
|
# -*- coding: utf-8 -*-
from lascaux import Controller
class Index(Controller):
def get(self, p, place=None):
# self.render('index', message=u"Hello World")
# self.final('index', app_package=True)
self.save("""
<form method="post" enctype="multipart/form-data">
<input type="text" />
<input type="file" name="file" />
<input type="submit" />
</form>
""")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.