blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
da81bb3dea7ab7ec884e515a580eb6c8ab10ce51 | 9efbcd9e8a1636b86305bbce7ea827cb673459b8 | /Images/z books courses tutorials and manuals/curso python - Nina/problema12.py | 5d80339b4ad262119d5f282efb35f7f63cde7c42 | [] | no_license | brenoskuk/Breno_and_Kogler | 754714853beba71b007488fd160192c4930b7b82 | 91d8ffaedab8196de20158c6975a2a1dea0945e9 | refs/heads/master | 2020-04-16T08:27:07.884823 | 2019-05-07T15:36:28 | 2019-05-07T15:36:28 | 165,425,686 | 0 | 0 | null | null | null | null | ISO-8859-1 | Python | false | false | 741 | py | # -*- coding: latin-1 -*-
# A linha acima faz o sistema interpretar direito os acentos
# nas linhas de comentários
#
# programa problema12.py
# Verificar se um número natural é triangular
def main():
n = input("Digite um numero natural: ")
if n<=0:
print "O numero deve ser positivo!!!"
return
# idéia da solução
# começar com 1*2*3 e prosseguir com
# 2*3*4, 3*4*5 e assim por diante
# até o produto de três consecutivos
# ficar maior ou igual a n
a=1
prod = a*(a+1)*(a+2)
while prod<n :
a = a+1
prod = a*(a+1)*(a+2)
if prod == n :
print n, "e' triangular pois", n, "=", a, "*", a+1, "*", a+2
else :
print n, "nao e' triangular"
| [
"breno.skuk@polijunior.com.br"
] | breno.skuk@polijunior.com.br |
6ff3929a41ab113dcbf117a3b1aa6a29d4691ed5 | 6a52b4cbd7255a4e99377ca445b8ebcb7b7845e4 | /examples_scalability/demo_small_race_performance.py | 073e2af6c57be8afb6e8056d20e1df22b5677b8f | [
"MIT"
] | permissive | sachman15/winning | 1a52f93e47a1a9a936c1c469b5b3267e1cb26653 | e68314ad6ebba50b439e383640b1a3f996ab5700 | refs/heads/main | 2023-08-07T12:17:11.810111 | 2021-10-04T19:27:55 | 2021-10-04T19:27:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | from winning.lattice_plot import densitiesPlot
from winning.lattice import skew_normal_density, mean_of_density, implicit_state_prices, winner_of_many, sample_winner_of_many
from winning.lattice_calibration import solve_for_implied_offsets, state_prices_from_offsets, densities_from_offsets
import numpy as np
PLOTS=True
import math
import time
unit = 0.01
L = 500
def demo( ):
density = skew_normal_density(L=500, unit = unit, a=1.5)
cpu_times = list()
errors = list()
race_sizes = list(range(5,100))
for k,n in enumerate(race_sizes):
print(n)
true_offsets = [ int(unit*k) for k in range( n ) ]
state_prices = state_prices_from_offsets( density=density, offsets=true_offsets )
print("State prices are " + str( state_prices ))
offset_samples = list( range( -100, 100 ))[::-1]
# Now try to infer offsets from state prices
start_time = time.time()
implied_offsets = solve_for_implied_offsets(prices = state_prices, density = density, offset_samples= offset_samples, nIter=3)
cpu_times.append(1000*(time.time()-start_time))
recentered_offsets = [ io-implied_offsets[0] for io in implied_offsets]
differences = [ o1-o2 for o1, o2 in zip(recentered_offsets,true_offsets)]
avg_l1_in_offset = np.mean(np.abs( differences ))
errors.append( avg_l1_in_offset)
print(avg_l1_in_offset)
print(cpu_times)
log_cpu = [math.log(cpu) for cpu in cpu_times]
log_n = [math.log(n_) for n_ in race_sizes[:k+1]]
if k>=2:
print('Fitting ...')
print(np.polyfit(log_n, log_cpu, 1))
import matplotlib.pyplot as plt
plt.clf()
plt.scatter(race_sizes[:k+1],cpu_times)
plt.xlabel('Number of participants (n)')
plt.ylabel('Inversion time in milliseconds')
plt.show()
if __name__=='__main__':
demo()
| [
"petercotton@Peters-Mac-mini-2.local"
] | petercotton@Peters-Mac-mini-2.local |
2c3403845866237d826c9ad973b26b0d4667c57b | 55617c83a3e3183ad8b08630b87d1cfda9f9ebcf | /jared/jpenny-bot.py | 51d3e11a69f432f2da3cffa5dabc7ba9eb0f4ee5 | [] | no_license | BjornLJohnson/Jane_Street | c063b021b49e1e549e73e49261f116a5f342b41d | 2664745d6ee3ad608b4d9d5784b5c1152c7cfbd3 | refs/heads/master | 2020-06-19T15:01:51.424569 | 2019-07-14T02:10:00 | 2019-07-14T02:10:00 | 196,753,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,987 | py | #!/usr/bin/python
# ~~~~~============== HOW TO RUN ==============~~~~~
# 1) Configure things in CONFIGURATION section
# 2) Change permissions: chmod +x bot.py
# 3) Run in loop: while true; do ./bot.py; sleep 1; done
from __future__ import print_function
import sys
import socket
import json
import random
import time
# ~~~~~============== CONFIGURATION ==============~~~~~
# replace REPLACEME with your team name!
team_name="BANANAS"
# This variable dictates whether or not the bot is connecting to the prod
# or test exchange. Be careful with this switch!
test_mode = True
# This setting changes which test exchange is connected to.
# 0 is prod-like
# 1 is slower
# 2 is empty
test_exchange_index=0
prod_exchange_hostname="production"
port=25000 + (test_exchange_index if test_mode else 0)
exchange_hostname = "test-exch-" + team_name if test_mode else prod_exchange_hostname
# ~~~~~============== NETWORKING CODE ==============~~~~~
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((exchange_hostname, port))
return s.makefile('rw', 1)
def write_to_exchange(exchange, obj):
json.dump(obj, exchange)
exchange.write("\n")
def read_from_exchange(exchange):
return json.loads(exchange.readline())
def hello(exchange):
write_to_exchange(exchange, {"type": "hello", "team": team_name.upper()})
def buy(exchange, order_id, symbol, price, size):
write_to_exchange(exchange, {"type": "add", "order_id": order_id, "symbol": symbol, "dir": "BUY", "price": price, "size": size})
def sell(exchange, order_id, symbol, price, size):
write_to_exchange(exchange, {"type": "add", "order_id": order_id, "symbol": symbol, "dir": "SELL", "price": price, "size": size})
def convert(exchange, order_id, symbol, size):
write_to_exchange(exchange, {"type": "convert", "order_id": order_id, "symbol": symbol, "dir": "BUY", "size": size})
def cancel(exchange, order_id):
write_to_exchange(exchange, {"type": "cancel", "order_id": order_id})
def get_info(exchange, buy_dict, sell_dict):
from_exchange = read_from_exchange(exchange)
highest_bid = 9999999999
lowest_offer = -9999999999
if from_exchange["type"] == "book":
security = from_exchange["symbol"]
security = from_exchange["symbol"]
if len(from_exchange["buy"]) > 0:
highest_bid = from_exchange["buy"][0][0]
buy_dict[security] = highest_bid
if len(from_exchange["sell"]) > 0:
lowest_offer = from_exchange["sell"][0][0]
sell_dict[security] = lowest_offer
def penny(exchange, buy_dict, sell_dict, orders):
for bond in buy_dict.keys():
order_id = random.randint(1000, 100000)
buy(exchange, order_id, bond, buy_dict[bond] + 1, 1)
print("ORDERED")
if not read_from_exchange(exchange)["type"] == "reject":
orders.append(order_id)
for bond in sell_dict.keys():
order_id = random.randint(1000, 100000)
sell(exchange, order_id, bond, sell_dict[bond] - 1, 1)
print("SOLD")
if not read_from_exchange(exchange)["type"] == "reject":
orders.append(order_id)
# ~~~~~============== MAIN LOOP ==============~~~~~
def main():
exchange = connect()
write_to_exchange(exchange, {"type": "hello", "team": team_name.upper()})
hello_from_exchange = read_from_exchange(exchange)
print("The exchange replied:", hello_from_exchange, file=sys.stderr)
sell_dict = {}
buy_dict = {}
orders = []
while(True):
get_info(exchange, buy_dict, sell_dict)
penny(exchange, buy_dict, sell_dict, orders)
time.sleep(5)
# A common mistake people make is to call write_to_exchange() > 1
# time for every read_from_exchange() response.
# Since many write messages generate marketdata, this will cause an
# exponential explosion in pending messages. Please, don't do that!
if __name__ == "__main__":
main()
| [
"ubuntu@bot-bananas.sf-2019.js-etc"
] | ubuntu@bot-bananas.sf-2019.js-etc |
98a9a6515829e349ecfdbe50d49b5c9199c1ddb6 | 99c969413c21b38f399a27298e9aa06c3aafc5ca | /venv/Scripts/runxlrd.py | 8c07eed56936ad09c83ae5f4a07720774a852fc9 | [] | no_license | wwwgang/JDReptail_price | 1a302a4fef36edb565994e9364a84621473635f8 | f010a4bc4518413a8bb5b7e89051b9b7a8b65131 | refs/heads/master | 2022-02-21T21:25:42.980215 | 2019-03-01T10:15:44 | 2019-03-01T10:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,338 | py | #!C:\Users\wg186\PycharmProjects\JDReptail_price\venv\Scripts\python.exe
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This script is part of the xlrd package, which is released under a
# BSD-style licence.
from __future__ import print_function
cmd_doc = """
Commands:
2rows Print the contents of first and last row in each sheet
3rows Print the contents of first, second and last row in each sheet
bench Same as "show", but doesn't print -- for profiling
biff_count[1] Print a count of each type of BIFF record in the file
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
fonts hdr + print a dump of all font objects
hdr Mini-overview of file (no per-sheet information)
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
labels Dump of sheet.col_label_ranges and ...row... for each sheet
name_dump Dump of each object in book.name_obj_list
names Print brief information for each NAME record
ov Overview of file
profile Like "hotshot", but uses cProfile
show Print the contents of all rows in each sheet
version[0] Print versions of xlrd and Python and exit
xfc Print "XF counts" and cell-type counts -- see code for details
[0] means no file arg
[1] means only one file arg i.e. no glob.glob pattern
"""
options = None
if __name__ == "__main__":
PSYCO = 0
import xlrd
import sys
import time
import glob
import traceback
import gc
from xlrd.timemachine import xrange, REPR
class LogHandler(object):
def __init__(self, logfileobj):
self.logfileobj = logfileobj
self.fileheading = None
self.shown = 0
def setfileheading(self, fileheading):
self.fileheading = fileheading
self.shown = 0
def write(self, text):
if self.fileheading and not self.shown:
self.logfileobj.write(self.fileheading)
self.shown = 1
self.logfileobj.write(text)
null_cell = xlrd.empty_cell
def show_row(bk, sh, rowx, colrange, printit):
if bk.ragged_rows:
colrange = range(sh.row_len(rowx))
if not colrange: return
if printit: print()
if bk.formatting_info:
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r, xfx: %s"
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
else:
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
def get_row_data(bk, sh, rowx, colrange):
result = []
dmode = bk.datemode
ctys = sh.row_types(rowx)
cvals = sh.row_values(rowx)
for colx in colrange:
cty = ctys[colx]
cval = cvals[colx]
if bk.formatting_info:
cxfx = str(sh.cell_xf_index(rowx, colx))
else:
cxfx = ''
if cty == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cval, dmode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
cty = xlrd.XL_CELL_ERROR
elif cty == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
else:
showval = cval
result.append((colx, cty, showval, cxfx))
return result
def bk_header(bk):
print()
print("BIFF version: %s; datemode: %s"
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
print("codepage: %r (encoding: %s); countries: %r"
% (bk.codepage, bk.encoding, bk.countries))
print("Last saved by: %r" % bk.user_name)
print("Number of data sheets: %d" % bk.nsheets)
print("Use mmap: %d; Formatting: %d; On demand: %d"
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
print("Ragged rows: %d" % bk.ragged_rows)
if bk.formatting_info:
print("FORMATs: %d, FONTs: %d, XFs: %d"
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
if not options.suppress_timing:
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
% (bk.load_time_stage_1, bk.load_time_stage_2))
print()
def show_fonts(bk):
print("Fonts:")
for x in xrange(len(bk.font_list)):
font = bk.font_list[x]
font.dump(header='== Index %d ==' % x, indent=4)
def show_names(bk, dump=0):
bk_header(bk)
if bk.biff_version < 50:
print("Names not extracted in this BIFF version")
return
nlist = bk.name_obj_list
print("Name list: %d entries" % len(nlist))
for nobj in nlist:
if dump:
nobj.dump(sys.stdout,
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
else:
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
def print_labels(sh, labs, title):
if not labs:return
for rlo, rhi, clo, chi in labs:
print("%s label range %s:%s contains:"
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
for rx in xrange(rlo, rhi):
for cx in xrange(clo, chi):
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
def show_labels(bk):
# bk_header(bk)
hdr = 0
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
clabs = sh.col_label_ranges
rlabs = sh.row_label_ranges
if clabs or rlabs:
if not hdr:
bk_header(bk)
hdr = 1
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
print_labels(sh, clabs, 'Col')
print_labels(sh, rlabs, 'Row')
if bk.on_demand: bk.unload_sheet(shx)
def show(bk, nshow=65535, printit=1):
bk_header(bk)
if 0:
rclist = xlrd.sheet.rc_stats.items()
rclist = sorted(rclist)
print("rc stats")
for k, v in rclist:
print("0x%04x %7d" % (k, v))
if options.onesheet:
try:
shx = int(options.onesheet)
except ValueError:
shx = bk.sheet_by_name(options.onesheet).number
shxrange = [shx]
else:
shxrange = range(bk.nsheets)
# print("shxrange", list(shxrange))
for shx in shxrange:
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
colrange = range(ncols)
anshow = min(nshow, nrows)
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
(shx, REPR(sh.name), sh.nrows, sh.ncols))
if nrows and ncols:
# Beat the bounds
for rowx in xrange(nrows):
nc = sh.row_len(rowx)
if nc:
sh.row_types(rowx)[nc-1]
sh.row_values(rowx)[nc-1]
sh.cell(rowx, nc-1)
for rowx in xrange(anshow-1):
if not printit and rowx % 10000 == 1 and rowx > 1:
print("done %d rows" % (rowx-1,))
show_row(bk, sh, rowx, colrange, printit)
if anshow and nrows:
show_row(bk, sh, nrows-1, colrange, printit)
print()
if bk.on_demand: bk.unload_sheet(shx)
def count_xfs(bk):
bk_header(bk)
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
nrows = sh.nrows
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
# Access all xfindexes to force gathering stats
type_stats = [0, 0, 0, 0, 0, 0, 0]
for rowx in xrange(nrows):
for colx in xrange(sh.row_len(rowx)):
xfx = sh.cell_xf_index(rowx, colx)
assert xfx >= 0
cty = sh.cell_type(rowx, colx)
type_stats[cty] += 1
print("XF stats", sh._xf_index_stats)
print("type stats", type_stats)
print()
if bk.on_demand: bk.unload_sheet(shx)
def main(cmd_args):
import optparse
global options, PSYCO
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
oparser = optparse.OptionParser(usage)
oparser.add_option(
"-l", "--logfilename",
default="",
help="contains error messages")
oparser.add_option(
"-v", "--verbosity",
type="int", default=0,
help="level of information and diagnostics provided")
oparser.add_option(
"-m", "--mmap",
type="int", default=-1,
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
oparser.add_option(
"-e", "--encoding",
default="",
help="encoding override")
oparser.add_option(
"-f", "--formatting",
type="int", default=0,
help="0 (default): no fmt info\n"
"1: fmt info (all cells)\n",
)
oparser.add_option(
"-g", "--gc",
type="int", default=0,
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
oparser.add_option(
"-s", "--onesheet",
default="",
help="restrict output to this sheet (name or index)")
oparser.add_option(
"-u", "--unnumbered",
action="store_true", default=0,
help="omit line numbers or offsets in biff_dump")
oparser.add_option(
"-d", "--on-demand",
action="store_true", default=0,
help="load sheets on demand instead of all at once")
oparser.add_option(
"-t", "--suppress-timing",
action="store_true", default=0,
help="don't print timings (diffs are less messy)")
oparser.add_option(
"-r", "--ragged-rows",
action="store_true", default=0,
help="open_workbook(..., ragged_rows=True)")
options, args = oparser.parse_args(cmd_args)
if len(args) == 1 and args[0] in ("version", ):
pass
elif len(args) < 2:
oparser.error("Expected at least 2 args, found %d" % len(args))
cmd = args[0]
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
if cmd == 'biff_dump':
xlrd.dump(args[1], unnumbered=options.unnumbered)
sys.exit(0)
if cmd == 'biff_count':
xlrd.count_records(args[1])
sys.exit(0)
if cmd == 'version':
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
print("Python:", sys.version)
sys.exit(0)
if options.logfilename:
logfile = LogHandler(open(options.logfilename, 'w'))
else:
logfile = sys.stdout
mmap_opt = options.mmap
mmap_arg = xlrd.USE_MMAP
if mmap_opt in (1, 0):
mmap_arg = mmap_opt
elif mmap_opt != -1:
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
fmt_opt = options.formatting | (cmd in ('xfc', ))
gc_mode = options.gc
if gc_mode:
gc.disable()
for pattern in args[1:]:
for fname in glob.glob(pattern):
print("\n=== File: %s ===" % fname)
if logfile != sys.stdout:
logfile.setfileheading("\n=== File: %s ===\n" % fname)
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC before open:", n_unreachable, "unreachable objects")
if PSYCO:
import psyco
psyco.full()
PSYCO = 0
try:
t0 = time.time()
bk = xlrd.open_workbook(
fname,
verbosity=options.verbosity, logfile=logfile,
use_mmap=mmap_arg,
encoding_override=options.encoding,
formatting_info=fmt_opt,
on_demand=options.on_demand,
ragged_rows=options.ragged_rows,
)
t1 = time.time()
if not options.suppress_timing:
print("Open took %.2f seconds" % (t1-t0,))
except xlrd.XLRDError as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
continue
except KeyboardInterrupt:
print("*** KeyboardInterrupt ***")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
except BaseException as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
traceback.print_exc(file=sys.stdout)
continue
t0 = time.time()
if cmd == 'hdr':
bk_header(bk)
elif cmd == 'ov': # OverView
show(bk, 0)
elif cmd == 'show': # all rows
show(bk)
elif cmd == '2rows': # first row and last row
show(bk, 2)
elif cmd == '3rows': # first row, 2nd row and last row
show(bk, 3)
elif cmd == 'bench':
show(bk, printit=0)
elif cmd == 'fonts':
bk_header(bk)
show_fonts(bk)
elif cmd == 'names': # named reference list
show_names(bk)
elif cmd == 'name_dump': # named reference list
show_names(bk, dump=1)
elif cmd == 'labels':
show_labels(bk)
elif cmd == 'xfc':
count_xfs(bk)
else:
print("*** Unknown command <%s>" % cmd)
sys.exit(1)
del bk
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
if not options.suppress_timing:
t1 = time.time()
print("\ncommand took %.2f seconds\n" % (t1-t0,))
return None
av = sys.argv[1:]
if not av:
main(av)
firstarg = av[0].lower()
if firstarg == "hotshot":
import hotshot
import hotshot.stats
av = av[1:]
prof_log_name = "XXXX.prof"
prof = hotshot.Profile(prof_log_name)
# benchtime, result = prof.runcall(main, *av)
result = prof.runcall(main, *(av, ))
print("result", repr(result))
prof.close()
stats = hotshot.stats.load(prof_log_name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
elif firstarg == "profile":
import cProfile
av = av[1:]
cProfile.run('main(av)', 'YYYY.prof')
import pstats
p = pstats.Stats('YYYY.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(30)
elif firstarg == "psyco":
PSYCO = 1
main(av[1:])
else:
main(av)
| [
"https://github.com/wg18618262234/JDReptail.git"
] | https://github.com/wg18618262234/JDReptail.git |
5427bf1d7adba27512955184fcf4aba4b4460d85 | 877edb2612f11e86d77d500c6d141f54a0275c71 | /gdsctools/readers.py | 0c843db2da62217614efe446729027cae6b27793 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | shukwong/gdsctools | 7172cfc12efb1611efa3eb33c35616cb42af28d6 | 09c0e80cb755b09b8d92d01cb08679c880122d4c | refs/heads/master | 2021-01-21T10:04:36.069659 | 2017-02-01T09:53:06 | 2017-02-01T09:53:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,635 | py | # -*- python -*-
# -*- coding utf-8 -*-
# This file is part of GDSCTools software
#
# Copyright (c) 2015 - Wellcome Trust Sanger Institute
# All rights reserved
#
# File author(s): Thomas Cokelaer <cokelaer@gmail.com>
#
# Distributed under the BSD 3-Clause License.
# See accompanying file LICENSE.txt distributed with this software
#
# website: http://github.com/CancerRxGene/gdsctools
#
##############################################################################
"""IO functionalities
Provides readers to read the following formats
- Matrix of IC50 data set :class:`IC50`
- Matrix of Genomic features with :class:`GenomicFeatures`
- Drug Decoder table with :class:`DrugDecode`
"""
import warnings
from gdsctools.errors import GDSCToolsDuplicatedDrugError
import pandas as pd
import pylab
import numpy as np
import easydev
import colorlog
__all__ = ['IC50', 'GenomicFeatures', 'Reader', 'DrugDecode']
def drug_name_to_int(name):
# We want to remove the prefix Drug_
# We also want to remove suffix _IC50 but in v18, we have names
# such as Drug_1_0.33_IC50 to provide the concentration.
# So, we should remove the string after the second _
# finally, #154 also causes a trouble that is a cast to integer
# from a string that is too large (more than 20 digits) may not be cast
# with pandas. Values must be less than 2**64-1. To guarantee that
# the cast works correctly, we can assume that it has less than 19 digits
def _str_to_int(x, maxdigits=19):
if isinstance(x, (int, np.integer)):
return x
elif isinstance(x, str):
if len(x) > maxdigits:
print("Warnings gdsctools.readers.drug_name_to_int: " +
"%s identifier too long." % x +
"Please use values below 2**64 with less than 19 digits")
x = int(x[0:maxdigits])
else:
x = int(x)
return x
else:
print(type(x))
raise NotImplementedError
# remove characters (' and ")
if isinstance(name, str):
name = name.replace("'", "")
name = name.replace('"', "")
# replace the Drug_ and DRUG_
try:
res = name.replace("Drug_", "").replace("DRUG_", "")
res = res.split("_")[0]
res = _str_to_int(res)
return res
except:
return _str_to_int(name)
class Reader(object):
"""Convenience base class to read CSV or TSV files (using extension)"""
def __init__(self, data=None):
r""".. rubric:: Constructor
This class takes only one input parameter, however, it may be a
filename, or a dataframe or an instance of :class:`Reader` itself. This
means than children classes such as :class:`IC50` can also be used
as input as long as a dataframe named :attr:`df` can be found.
:param data: a filename in CSV or TSV format with format specified by
child class (see e.g. :class:`IC50`), or a valid dataframe, or an
instance of :class:`Reader`.
The input can be a filename either in CSV (comma separated values) or
TSV (tabular separated values). The extension will be used to interpret
the content, so please be consistent in the naming of the file
extensions.
::
>>> from gdsctools import Reader, ic50_test
>>> r = Reader(ic50_test.filename) # this is a CSV file
>>> len(r.df) # number of rows
988
>>> len(r) # number of elements
11856
Note that :class:`Reader` is a base class and more sophisticated
readers are available. for example, the :class:`IC50` would be
better to read this IC50 data set.
The data has been stored in a data frame in the :attr:`df` attribute.
The dataframe of the object itself can be used as an input to create
an new instance::
>>> from gdsctools import Reader, ic50_test
>>> r = Reader(ic50_test.filename, sep="\t")
>>> r2 = Reader(r) # here r.df is simply copied into r2
>>> r == r2
True
It is sometimes convenient to create an empty Reader that will be
populated later on::
>>> r = Reader()
>>> len(r)
0
More advanced readers (e.g. :class:`IC50`) can also be used as input
as long as they have a :attr:`df` attribute::
>>> from gdsctools import Reader, ic50_test
>>> ic = IC50(ic50_test)
>>> r = Reader(ic)
"""
# input data
if data is None:
# create an empty dataframe
self.df = pd.DataFrame()
self._filename = None
elif isinstance(data, str):
# Read a filename in TSV or CSV format
self.read_data(data)
self._filename = data
elif hasattr(data, 'filename'):
# could be a data sets from gdsctools.datasets.Data
self.read_data(data.filename)
self._filename = data.filename
elif hasattr(data, 'df'):
# an instance of a Reader (or child such as IC50, GenomicFeatures)
self.df = data.df.copy()
self._filename = data._filename
elif isinstance(data, pd.DataFrame):
# Or just a dataframe ?
self.df = data.copy()
self._filename = None
else:
raise TypeError("Input must be a filename, a IC50 instance, or " +
"a dataframe.")
#: if populated, can be used to check validity of a header
# used by drug_decode only may be removed
self.header = []
# sanity check on cleaning columns if not alread done
#try:self.df.columns = [x.strip() for x in self.df.columns]
#except: pass # fails for the IC50 where header is made of integers
def read_data(self, filename):
# remove possible white spaces in the header's names
if ".csv" in filename:
separator = ","
elif ".tsv" in filename:
separator = "\t"
elif ".txt" in filename:
separator = "\t"
print("GDSCTools warning: files with .txt extension are "
"accepted (we assume a tab-separated file) but "
"should be renamed with .csv or .tsv extension")
else:
raise NotImplementedError("Only .csv or .tsv files are accepted ")
try:
# this is to cope with pandas 0.13 on ReadTheDoc
# and newer versions
na_values = ["NA", "NaN"]
if filename.endswith(".gz"):
compression = "gzip"
elif filename.endswith(".bz2"):
compression = "bz2"
elif filename.endswith(".zip"):
compression = "zip"
elif filename.endswith(".xz"):
compression = "xz"
else:
compression = None
# Sometimes a column in CSV file may have several values
# separated by comma. This should be surrended by quotes "
# To account for that feature, quotechar argument must be provided
# Besides, to avoid conflicts with spaces, skipinitialspace must
# be set to True. This also helps since spaces would be
# interpreted as a string. Using skipinitialspace, the spaces
# is converetd to NA
rawdf = pd.read_csv(filename, sep=separator, comment="#",
na_values=na_values, skipinitialspace=True,
compression=compression, quotechar='"')
#if sum([this.count('\t') for this in rawdf.columns])>2:
# print("Your input file does not seem to be comma"
# " separated. If tabulated, please rename with"
# " .tsv or .txt extension")
# Sometimes, a user will provide a CSV, which is actually
# tab-delimited. This is wrong and difficult to catch
except Exception as err:
msg = 'Could not read %s. See gdsctools.readers.Reader'
print(msg % filename)
raise(err)
# Make sure the columns' names are stripped
#rawdf.rename(columns=lambda x: x.strip(), inplace=True)
# let us drop columns that are unnamed and print information
columns = [x for x in rawdf.columns if x.startswith('Unnamed')]
if len(columns) > 0:
print('%s unnamed columns found and removed. ' % len(columns) +
'Please fix your input file.')
self.df = rawdf.drop(columns, axis=1)
# Some fields may be empty strings, which must be set as NA
import warnings
warnings.filterwarnings('ignore')
self.df = self.df.replace(" ", "").replace("\t", "").replace("",
np.nan)
warnings.filterwarnings("default")
# Finally, check that names do not contain the unwanted character
# / that was used in some old matrices.
if len([True for x in self.df.columns if "/" in x])>0:
print("Your input data contains unwanted / characters in " +
" the header. Let's remove them.")
self.df.columns = [x.replace("/", "_") for x in self.df.columns]
def _interpret(self):
pass
def _valid_header(self, df):
for name in self.header:
if name not in list(df.columns):
return False
return True
def _read_matrix_from_r(self, name):
"""Required biokit. Will be removed"""
print("Reading matrix %s " % (name))
self.session.run("rnames = rownames(%s)" % name)
self.session.run("cnames = colnames(%s)" % name)
self.session.run("data = %s" % name)
cnames = self.session.cnames
rnames = self.session.rnames
data = self.session.data
df = pd.DataFrame(data=data.copy())
df.columns = [x.strip() for x in cnames]
df.index = [x.strip() for x in rnames]
return df
def __str__(self):
self.df.info()
return ""
def __len__(self):
return self.df.shape[0] * self.df.shape[1]
def to_csv(self, filename, sep=",", index=False, reset_index=True):
"""Save data into a CSV file without indices"""
#Reset the index (e.g., COSMIC ID)
if reset_index is True:
df = self.df.reset_index()
else:
df = self.df
df.to_csv(filename, sep=sep, index=index)
def check(self):
"""Checking the format of the matrix
Currently, only checks that there is no duplicated column names
"""
if len(self.df.columns.unique()) != len(self.df.columns):
columns = list(self.df.columns)
for this in columns:
if columns.count(this) > 1:
raise GDSCToolsDuplicatedDrugError(this)
def _check_uniqueness(self, data):
if len(set(data)) != len(data):
raise Exception("Error gdsctools in readers.IC50: data " +
" identifiers not unique.")
def __eq__(self, other):
return all(self.df.fillna(0) == other.df.fillna(0))
class CosmicRows(object):
"""Parent class to IC50 and GenomicFeatures to handle cosmic identifiers"""
def _get_cosmic(self):
return list(self.df.index)
def _set_cosmic(self, cosmics):
for cosmic in cosmics:
if cosmic not in self.cosmicIds:
raise ValueError('Unknown cosmic identifier')
self.df = self.df.ix[cosmics]
cosmicIds = property(_get_cosmic, _set_cosmic,
doc="return list of cosmic ids (could have duplicates)")
def drop_cosmic(self, cosmics):
"""drop a drug or a list of cosmic ids"""
cosmics = easydev.to_list(cosmics)
tokeep = [x for x in self.cosmicIds if x not in cosmics]
self.cosmicIds = tokeep
class IC50(Reader, CosmicRows):
"""Reader of IC50 data set
This input matrix must be a comman-separated value (CSV) or
tab-separated value file (TSV).
The matrix must have a header and at least 2 columns. If the number of rows
is not sufficient, analysis may not be possible.
The header must have a column called "COSMIC_ID" or "COSMIC ID".
This column will be used as indices (row names). All other columns will
be considered as input data.
The column "COSMIC_ID" contains the cosmic identifiers (cell line). The
other columns should be filled with the IC50s corresponding to a pair
of COSMIC identifiers and Drug. Nothing prevents you to fill the file with
data that have other meaning (e.g. AUC).
If at least one column starts with ``Drug_``, all other columns will be
ignored. This was implemented for back compatibility.
The order of the columns is not important.
Here is a simple example of a valid TSV file::
COSMIC_ID Drug_1_IC50 Drug_20_IC50
111111 0.5 0.8
222222 1 2
A test file is provided in the gdsctools package::
from gdsctools import ic50_test
You can read it using this class and plot information as follows:
.. plot::
:width: 80%
:include-source:
from gdsctools import IC50, ic50_test
r = IC50(ic50_test)
r.plot_ic50_count()
You can get basic information using the print function::
>>> from gdsctools import IC50, ic50_test
>>> r = IC50(ic50_test)
>>> print(r)
Number of drugs: 11
Number of cell lines: 988
Percentage of NA 0.206569746043
You can get the drug identifiers as follows::
r.drugIds
and set the drugs, which means other will be removed::
r.drugsIds = [1, 1000]
.. versionchanged:: 0.9.10
The column **COSMIC ID** should now be **COSMIC_ID**.
Previous name is deprecated but still accepted.
"""
cosmic_name = 'COSMIC_ID'
def __init__(self, filename, v18=False):
""".. rubric:: Constructor
:param filename: input filename of IC50s. May also be an instance
of :class:`IC50` or a valid dataframe. The data is stored as a
dataframe in the attribute called :attr:`df`. Input file may be
gzipped
"""
super(IC50, self).__init__(filename)
# interpret the raw data and check some of its contents
self._v18 = v18
if len(self.df) > 0:
self._interpret()
self.check()
def _interpret(self):
# if there is at least one column that starts with Drug or drug or
# DRUG or variant then all other columns are dropped except "COSMIC ID"
# For back compatibility with data that mixes Drug identifiers and
# genomic features:
_cols = [str(x) for x in self.df.columns]
drug_prefix = None
for this in _cols:
if this.startswith("Drug_"):
drug_prefix = "Drug"
_cols = [str(x) for x in self.df.columns]
if "COSMIC ID" in _cols and self.cosmic_name not in _cols:
colorlog.warning("'COSMIC ID' column name is deprecated since " +
"0.9.10. Please replace with 'COSMIC_ID'", DeprecationWarning)
self.df.columns = [x.replace("COSMIC ID", "COSMIC_ID")
for x in self.df.columns]
if "CL" in _cols and "COSMID_ID" not in self.df.columns:
colorlog.warning("'CL column name is deprecated since " +
"0.9.10. Please replace with 'COSMIC_ID'", DeprecationWarning)
self.df.columns = [x.replace("CL", "COSMIC_ID")
for x in self.df.columns]
# If the data has not been interpreted, COSMIC column should be
# found in the column and set as the index
_cols = [str(x) for x in self.df.columns]
if self.cosmic_name in self.df.columns:
self.df.set_index(self.cosmic_name, inplace=True)
_cols = [str(x) for x in self.df.columns]
if drug_prefix:
columns = [x for x in _cols if x.startswith(drug_prefix)]
self.df = self.df[columns]
# If already interpreted, COSMIC name should be the index already.
# and should be integers, so let us cast to integer
elif self.df.index.name == self.cosmic_name:
_cols = [str(x) for x in self.df.columns]
if drug_prefix:
columns = [x for x in _cols if x.startswith(drug_prefix)]
columns = self.df.columns
assert len(columns) == len(set(columns))
self.df = self.df[columns]
# Otherwise, raise an error
else:
raise ValueError("{0} column could not be found in the header".format(
self.cosmic_name))
# In v18, the drug ids may be duplicated
if self._v18 is True:
return
self.df.columns = [drug_name_to_int(x) for x in self.df.columns]
self.df.columns = self.df.columns.astype(int)
self.df.index = [int(x) for x in self.df.index]
self.df.index = self.df.index.astype(int)
self.df.index.name = "COSMIC_ID"
# Check uniqueness
self._check_uniqueness(self.df.index)
def drug_name_to_int(self, name):
return drug_name_to_int(name)
def _get_drugs(self):
return list(self.df.columns)
def _set_drugs(self, drugs):
for drug in drugs:
if drug not in self.drugIds:
raise ValueError('Unknown drug name')
self.df = self.df[drugs]
drugIds = property(_get_drugs, _set_drugs,
doc='list the drug identifier name or select sub set')
def drop_drugs(self, drugs):
"""drop a drug or a list of drugs"""
drugs = easydev.to_list(drugs)
tokeep = [x for x in self.drugIds if x not in drugs]
self.drugIds = tokeep
def __contains__(self, item):
if item in self.drugIds:
return True
else:
return False
def plot_ic50_count(self, **kargs):
"""Plots the fraction of valid/measured IC50 per drug
:param kargs: any valid parameters accepted by pylab.plot function.
:return: the fraction of valid/measured IC50 per drug
"""
data = self.df.count()/len(self.df)
pylab.clf()
pylab.plot(data.values, **kargs)
pylab.grid()
pylab.xlim([0, len(self.drugIds)+1])
pylab.xlabel('Drug index')
pylab.ylim([0,1])
pylab.ylabel('Percentage of valid IC50')
return data
def hist(self, bins=20, **kargs):
"""Histogram of the measured IC50
:param bins: binning of the histogram
:param kargs: any argument accepted by pylab.hist function.
:return: all measured IC50
.. plot::
:include-source:
:width: 80%
from gdsctools import IC50, ic50_test
r = IC50(ic50_test)
r.hist()
"""
pylab.clf()
pylab.hist(self.get_ic50(), bins=bins, **kargs)
pylab.grid()
pylab.xlabel('log IC50')
def get_ic50(self):
"""Return all ic50 as a list"""
return [x for x in self.df.values.flatten() if not np.isnan(x)]
def __str__(self):
txt = "Number of drugs: %s\n" % len(self.drugIds)
txt += "Number of cell lines: %s\n" % len(self.df)
N = len(self.drugIds) * len(self.df)
Nna = self.df.isnull().sum().sum()
if N != 0:
txt += "Percentage of NA {0}\n".format(Nna / float(N))
return txt
def __repr__(self):
Nc = len(self.cosmicIds)
Nd = len(self.drugIds)
return "IC50 object <Nd={0}, Nc={1}>".format(Nd, Nc)
"""def __add__(self, other):
print("Experimantal. combines IC50 via COSMIC IDs")
df = pd.concat([self.df, other.df], ignore_index=True)
df = df.drop_duplicates(cols=[self.cosmic_name])
return df
"""
def copy(self):
new = IC50(self)
return new
class GenomicFeatures(Reader, CosmicRows):
"""Read Matrix with Genomic Features
These are the compulsary column names required (note the spaces):
- 'COSMIC_ID'
- 'TISSUE_FACTOR'
- 'MSI_FACTOR'
If one of the following column is found, it is removed (deprecated)::
- 'SAMPLE_NAME'
- 'Sample Name'
- 'CELL_LINE'
and features can be also encoded with the following convention:
- columns ending in "_mut" to encode a gene mutation (e.g., BRAF_mut)
- columns starting with "gain_cna"
- columns starting with "loss_cna"
Those columns will be removed:
- starting with `Drug_`, which are supposibly from the IC50 matrix
::
>>> from gdsctools import GenomicFeatures
>>> gf = GenomicFeatures()
>>> print(gf)
Genomic features distribution
Number of unique tissues 27
Number of unique features 677 with
- Mutation: 270
- CNA (gain): 116
- CNA (loss): 291
.. versionchanged:: 0.9.10
The header's columns' names have changed to be more consistant.
Previous names are deprecated but still accepted.
.. versionchanged:: 0.9.15
If a tissue is empty, it is replaced by UNDEFINED.
We also strip the spaces to make sure there is "THIS" and "THIS " are
the same.
"""
colnames = easydev.AttrDict()
colnames.cosmic = 'COSMIC_ID'
colnames.tissue = 'TISSUE_FACTOR'
colnames.msi = 'MSI_FACTOR'
colnames.media = 'MEDIA_FACTOR'
def __init__(self, filename=None, empty_tissue_name="UNDEFINED"):
""".. rubric:: Constructor
If no file is provided, using the default file provided in the
package that is made of 1001 cell lines times 680 features.
:param str empty_tissue_name: if a tissue name is let empty, replace
it with this string.
"""
# first reset the filename to the shared data (if not provided)
if filename is None:
from gdsctools.datasets import genomic_features
filename = genomic_features
# used in the header so should be ser before call to super()
super(GenomicFeatures, self).__init__(filename)
# FIXME Remove columns related to Drug if any. Can be removed in
# the future
self.df = self.df[[x for x in self.df.columns
if x.startswith('Drug_') is False]]
for this in ['Sample Name', 'SAMPLE_NAME', 'Sample_Name', 'CELL_LINE']:
if this in self.df.columns:
self.df.drop(this, axis=1, inplace=True)
# Let us rename "COSMIC ID" into "COSMIC_ID" if needed
for old, new in {
'Tissue Factor Value': 'TISSUE_FACTOR',
'MS-instability Factor Value': 'MSI_FACTOR',
'COSMIC ID': 'COSMIC_ID'}.items():
if old in self.df.columns:
colorlog.warning("'%s' column name is deprecated " % old +
" since 0.9.10. Please replace with '%s'" % new,
DeprecationWarning)
self.df.columns = [x.replace(old, new)
for x in self.df.columns]
if "CL" in self.df.columns and "COSMID_ID" not in self.df.columns:
self.df.columns = [x.replace("CL", "COSMIC_ID")
for x in self.df.columns]
# There are 3 special columns to hold the factors
self._special_names = []
# If tissue factor is not provided, we create and fill it with dummies.
# OTherwise, we need to change a lot in the original code in ANOVA
if self.colnames.tissue not in self.df.columns:
colorlog.warning("column named '%s' not found"
% self.colnames.tissue, UserWarning)
self.df[self.colnames.tissue] = ['UNDEFINED'] * len(self.df)
self._special_names.append(self.colnames.tissue)
else:
self._special_names.append(self.colnames.tissue)
self.found_msi = self.colnames.msi in self.df.columns
if self.found_msi is False:
colorlog.warning("column named '%s' not found" % self.colnames.msi)
else:
self._special_names.append(self.colnames.msi)
self.found_media = self.colnames.media in self.df.columns
if self.found_media is False:
pass
#colorlog.warning("column named '%s' not found" % self.colnames.media)
else:
self._special_names.append(self.colnames.media)
# order columns and index
self._order()
#
self._interpret_cosmic()
#
self.check()
self._fix_empty_tissues(empty_tissue_name)
def _fix_empty_tissues(self, name="UNDEFINED"):
# Sometimes, tissues may be empty so a nan is present. This lead to
# to errors in ANOVA or Regression so we replace them with "UNDEFINED"
N = self.df.TISSUE_FACTOR.isnull().sum()
if N > 0:
logger.warning("Some tissues were empty strings and renamed as UNDEFINED!")
self.df.TISSUE_FACTOR.fillna('UNDEFINED', inplace=True)
def _get_shift(self):
return len(self._special_names)
shift = property(_get_shift)
def _interpret_cosmic(self):
if self.colnames.cosmic in self.df.columns:
self.df.set_index(self.colnames.cosmic, inplace=True)
elif self.colnames.cosmic == self.df.index.name:
pass
else:
error_msg = "the features input file must contains a column " +\
" named %s" % self.colnames.cosmic
raise ValueError(error_msg)
self.df.index = [int(x) for x in self.df.index]
self.df.index = self.df.index.astype(int)
self.df.index.name = "COSMIC_ID"
self.df.sort_index(inplace=True)
def fill_media_factor(self):
"""Given the COSMIC identifiers, fills the MEDIA_FACTOR column
If already populated, replaced by new content.
"""
from gdsctools import COSMICInfo
c = COSMICInfo()
self.df['MEDIA_FACTOR'] = [c.get(x).SCREEN_MEDIUM
for x in self.df.index]
self.found_media = True
if self.colnames.media not in self._special_names:
self._special_names.append(self.colnames.media)
self._order()
def _order(self):
others = [x for x in self.df.columns if x not in self._special_names]
self.df = self.df[self._special_names + others]
def _get_features(self):
return list(self.df.columns)
def _set_features(self, features):
for feature in features:
if feature not in self.features:
raise ValueError('Unknown feature name %s' % feature)
features = [x for x in features if x.endswith('FACTOR') is False]
features = self._special_names + features
self.df = self.df[features]
self._order()
features = property(_get_features, _set_features,
doc="return list of features")
def _get_tissues(self):
return list(self.df[self.colnames.tissue])
tissues = property(_get_tissues, doc='return list of tissues')
def _get_unique_tissues(self):
return list(self.df[self.colnames.tissue].unique())
unique_tissues = property(_get_unique_tissues, doc='return set of tissues')
def plot(self):
"""Histogram of the tissues found
.. plot::
:include-source:
:width: 80%
from gdsctools import GenomicFeatures
gf = GenomicFeatures() # use the default file
gf.plot()
"""
if self.colnames.tissue not in self.df.columns:
return
data = pd.get_dummies(self.df[self.colnames.tissue]).sum()
data.index = [x.replace("_", " ") for x in data.index]
# deprecated but works for python 3.3
try:
data.sort_values(ascending=False)
except:
data.sort(ascending=False)
pylab.figure(1)
pylab.clf()
labels = list(data.index)
pylab.pie(data, labels=labels)
pylab.figure(2)
data.plot(kind='barh')
pylab.grid()
pylab.xlabel('Occurences')
# keep the try to prevent MacOS issue
try:pylab.tight_layout()
except:pass
return data
def __str__(self):
txt = 'Genomic features distribution\n'
try:
tissues = list(self.df[self.colnames.tissue].unique())
Ntissue = len(tissues)
txt += 'Number of unique tissues {0}'.format(Ntissue)
if Ntissue == 1:
txt += ' ({0})\n'.format(tissues[0])
elif Ntissue < 10:
txt += '\nHere are the tissues: '
txt += ",".join(tissues) + "\n"
else:
txt += '\nHere are the first 10 tissues: '
txt += ", ".join(tissues[0:10]) + "\n"
except:
txt += 'No information about tissues\n'
if self.found_msi:
txt += "MSI column: yes\n"
else:
txt += "MSI column: no\n"
if self.found_media:
txt += "MEDIA column: yes\n"
else:
txt += "MEDIA column: no\n"
# -3 since we have also the MSI, tissue, media columns
# TODO should use shift attribute ?
Nfeatures = len(self.features)
txt += '\nThere are {0} unique features distributed as\n'.format(Nfeatures-self.shift)
n_mutations = len([x for x in self.df.columns if x.endswith("_mut")])
txt += "- Mutation: {}\n".format(n_mutations)
n_gain = len([x for x in self.df.columns if x.startswith("gain_cna")])
txt += "- CNA (gain): {}\n".format(n_gain)
n_loss = len([x for x in self.df.columns if x.startswith("loss_cna")])
txt += "- CNA (loss): {}".format(n_loss)
return txt
def drop_tissue_in(self, tissues):
"""Drop tissues from the list
:param list tissues: a list of tissues to drop. If you have only
one tissue, can be provided as a string. Since rows are removed
some features (columns) may now be empty (all zeros). If so, those
columns are dropped (except for the special columns (e.g, MSI).
"""
tissues = easydev.to_list(tissues)
mask = self.df[self.colnames.tissue].isin(tissues) == False
self.df = self.df[mask]
self._cleanup()
def keep_tissue_in(self, tissues):
"""Drop tissues not in the list
:param list tissues: a list of tissues to keep. If you have only
one tissue, can be provided as a string. Since rows are removed
some features (columns) may now be empty (all zeros). If so, those
columns are dropped (except for the special columns (e.g, MSI).
"""
tissues = easydev.to_list(tissues)
mask = self.df[self.colnames.tissue].isin(tissues)
self.df = self.df[mask]
self._cleanup()
def _cleanup(self, required_features=0):
# FIXME: there is view/copy warning here in pandas. it should be fixed
# or may have side-effects
to_ignore = self._special_names
# create a view ignoring the informative columns
view = self.df[[x for x in self.df.columns if x not in to_ignore]]
todrop = list(view.columns[view.sum() <= required_features])
self.df.drop(todrop, axis=1, inplace=True)
def __repr__(self):
Nc = len(self.cosmicIds)
Nf = len(self.features) - self.shift
try:
Nt = len(set(self.tissues))
except:
Nt = '?'
return "GenomicFeatures <Nc={0}, Nf={1}, Nt={2}>".format(Nc, Nf, Nt)
def compress_identical_features(self):
"""Merge duplicated columns/features
Columns duplicated are merged as follows. Fhe first column is kept,
others are dropped but to keep track of those dropped, the column name
is renamed by concatenating the columns's names. The separator is a
double underscore.
::
gf = GenomicFeatures()
gf.compress_identical_features()
# You can now access to the column as follows (arbitrary example)
gf.df['ARHGAP26_mut__G3BP2_mut']
"""
# let us identify the duplicates as True/False
datatr = self.df.transpose()
duplicated_no_first = datatr[datatr.duplicated()]
try:
duplicated = datatr[datatr.duplicated(keep=False)]
except:
# pandas 0.16
duplicated = datatr[datatr.duplicated(take_last=False)]
tokeep = [x for x in duplicated.index if x not in duplicated_no_first.index]
# Let us create a groupby strategy
groups = {}
# Let us now add the corrsponding duplicats
for feature in tokeep:
# Find all row identical to this feature
matches = (duplicated.ix[feature] == duplicated).all(axis=1)
groups[feature] = "__".join(duplicated.index[matches])
# This drops all duplicated columns (the first is kept, others are
# dropped)
self.df = self.df.transpose().drop_duplicates().transpose()
self.df.rename(columns=groups, inplace=True)
# We want to keep the column names informative that is if there were
# duplicates, we rename the column kept with the concatenation of all
# the corresponding duplicates
print("compressed %s groups of duplicates" % len(groups))
return groups
def get_TCGA(self):
from gdsctools.cosmictools import COSMICInfo
c = COSMICInfo()
tcga = c.df.ix[self.df.index].TCGA
return tcga
class PANCAN(Reader):
"""Reads RData file wit all genomic features including methylation.
will be removed. Used to read original data in R format but
will provide the data as CSV or TSV
.. deprecated:: since v0.12
"""
def __init__(self, filename=None):
print('deprecated')
"""if filename is None:
filename = easydev.get_share_file('gdsctools', 'data',
'PANCAN_simple_MOBEM.rdata')
super(PANCAN, self).__init__(filename)
# Remove R dependencies
from biokit.rtools import RSession
self.session = RSession()
self.session.run('load("%s")' %self._filename)
self.df = self._read_matrix_from_r('MoBEM')
"""
class Extra(Reader):
def __init__(self, filename="djvIC50v17v002-nowWithRMSE.rdata"):
super(Extra, self).__init__(filename)
print("Deprecated since v0.12")
# Remove R dependencies
from biokit.rtools import RSession
self.session = RSession()
self.session.run('load("%s")' %self._filename)
# 3 identical matrices containing AUC, IC50 and
self.dfAUCv17= self._read_matrix_from_r('dfAUCv17')
self.dfIC50v17 = self._read_matrix_from_r('dfIC50v17')
# Residual
self.dfResv17 = self._read_matrix_from_r('dfResv17')
# This df holds the xmid/scale parameters for each cell line
# Can be visualised using the tools.Logistic class.
self.dfCL= self._read_matrix_from_r('dfCL')
# There is an extra matrix called MoBEM, which is the same as in the
# file
def hist_residuals(self, bins=100):
"""Plot residuals across all drugs and cell lines"""
data = [x for x in self.dfResv17.fillna(0).values.flatten() if x != 0]
pylab.clf()
pylab.hist(data, bins=bins, normed=True)
pylab.grid(True)
pylab.xlabel('Residuals')
pylab.ylabel(r'\#')
def scatter(self):
from biokit.viz import scatter
s = scatter.ScatterHist(self.dfCL)
s.plot(kargs_histx={'color':'red', 'bins':20},
kargs_scatter={'alpha':0.9, 's':100, 'c':'b'},
kargs_histy={'color':'red', 'bins':20})
def hist_ic50(self, bins=100):
data = [x for x in self.dfIC50v17.fillna(0).values.flatten() if x != 0]
pylab.clf()
pylab.hist(data, bins=bins, normed=True)
pylab.grid(True)
pylab.xlabel('IC50')
pylab.ylabel(r'\#')
def hist_auc(self, bins=100):
data = [x for x in self.dfAUCv17.fillna(0).values.flatten() if x != 0]
pylab.clf()
pylab.hist(data, bins=bins, normed=True)
pylab.grid(True)
pylab.xlabel('AUC')
pylab.ylabel(r'\#')
class DrugDecode(Reader):
"""Reads a "drug decode" file
The format must be comma-separated file. There are 3 compulsary columns
called DRUG_ID, DRUG_NAME and DRUG_TARGET. Here is an example::
DRUG_ID ,DRUG_NAME ,DRUG_TARGET
999 ,Erlotinib ,EGFR
1039 ,SL 0101-1 ,"RSK, AURKB, PIM3"
TSV file may also work out of the box. If a column name called
'PUTATIVE_TARGET' is found, it is renamed 'DRUG_TARGET' to be compatible with
earlier formats.
In addition, 3 extra columns may be provided::
- PUBCHEM_ID
- WEBRELEASE
- OWNED_BY
The OWNED_BY and WEBRELEASE may be required to create packages for each
company. If those columns are not provided, the internal dataframe is
filled with None.
Note that older version of identifiers such as::
Drug_950_IC50
are transformed as proper ID that is (in this case), just the number::
950
Then, the data is accessible as a dataframe, the index being the
DRUG_ID column::
data = DrugDecode('DRUG_DECODE.csv')
data.df.ix[999]
.. note:: the DRUG_ID column must be made of integer
"""
def __init__(self, filename=None):
""".. rubric:: Constructor"""
super(DrugDecode, self).__init__(filename)
self.header = ['DRUG_ID', 'DRUG_NAME', 'DRUG_TARGET', 'OWNED_BY',
'WEBRELEASE']
self.header_extra = ["PUBCHEM_ID", "CHEMBL_ID", "CHEMSPIDER_ID"]
try:
# if the input data is already a DrugDecode instance, this should
# fail since the expected df will not have the DRUG_ID field, that
# should be the index
self._interpret()
except:
pass
self.df = self.df[sorted(self.df.columns)]
def _interpret(self, filename=None):
N = len(self.df)
if N == 0:
return
self.df.rename(columns={
'PUTATIVE_TARGET': 'DRUG_TARGET',
'THERAPEUTIC_TARGET': 'DRUG_TARGET'},
inplace=True)
for column in ["WEBRELEASE", "OWNED_BY"] + self.header_extra:
if column not in self.df.columns:
self.df[column] = [np.nan] * N
#for this in self.header[1:]:
for this in self.header:
msg = " The column %s was not found and may be an issue later on."
if this not in self.df.columns and this != self.df.index.name:
logger.warning(msg % this )
# Finally, set the drug ids as the index.
try:
self.df.set_index('DRUG_ID', inplace=True)
except:
# could be done already
pass
self.df.index = [drug_name_to_int(x) for x in self.df.index]
self.df.index = self.df.index.astype(int)
self.df.index.name = "DRUG_ID"
# sort the columns
try:
self.df.sort_index(inplace=True)
except:
self.df = self.df.ix[sorted(self.df.index)]
self._check_uniqueness(self.df.index)
def _get_names(self):
return list(self.df.DRUG_NAME.values)
drug_names = property(_get_names)
def _get_target(self):
return list(self.df.DRUG_TARGET.values)
drug_targets = property(_get_target)
def _get_drug_ids(self):
return list(self.df.index)
drugIds = property(_get_drug_ids,
doc="return list of drug identifiers")
def _get_row(self, drug_id, colname):
if drug_id in self.df.index:
return self.df.ix[drug_id][colname]
elif str(drug_id).startswith("Drug_"):
try:
drug_id = int(drug_id.split("_")[1])
except:
print("DRUG ID %s not recognised" % drug_id)
return
if drug_id in self.df.index:
return self.df[colname].ix[drug_id]
elif "_" in str(drug_id):
try:
drug_id = int(drug_id.split("_")[0])
except:
print("DRUG ID %s not recognised" % drug_id)
return
if drug_id in self.df.index:
return self.df[colname].ix[drug_id]
else:
return
def get_name(self, drug_id):
return self._get_row(drug_id, 'DRUG_NAME')
def get_target(self, drug_id):
return self._get_row(drug_id, 'DRUG_TARGET')
def is_public(self, drug_id):
return self._get_row(drug_id, 'WEBRELEASE')
def check(self):
for x in self.drugIds:
try:
x += 1
except TypeError as err:
print("drug identifiers must be numeric values")
raise err
# it may happen that a drug has no target in the database ! so we
# cannot check that for the moment:
#if self.df.isnull().sum().sum()>0:
# print(d.df.isnull().sum())
# raise ValueError("all values must be non-na. check tabulation")
def get_info(self):
# Note that there are 4 cases : Y, N, U (unknown?) and NaN
dd = { 'N': len(self),
'N_public': sum(self.df.WEBRELEASE == 'Y'),
'N_prop': sum(self.df.WEBRELEASE != 'Y')}
return dd
def __len__(self):
return len(self.df)
def __str__(self):
txt = "Number of drugs: %s\n" % len(self.df)
return txt
def __repr__(self):
txt = self.__str__()
if len(self.companies):
txt += "Contains %s companies" % len(self.companies)
return txt
def _get_companies(self):
if 'OWNED_BY' in self.df.columns:
companies = list(self.df.OWNED_BY.dropna().unique())
else:
companies = []
return sorted(companies)
companies = property(_get_companies)
def drug_annotations(self, df):
"""Populate the drug_name and drug_target field if possible
:param df: input dataframe as given by e.g., :meth:`anova_one_drug`
:return df: same as input but with the FDR column populated
"""
if len(self.df) == 0:
return df
# print("Nothing done. DrugDecode is empty.")
# aliases
if 'DRUG_ID' not in df.columns:
raise ValueError('Expected column named DRUG_ID but not found')
drug_names = [self.get_name(x) for x in df.DRUG_ID.values]
drug_target = [self.get_target(x) for x in df.DRUG_ID.values]
# this is not clean. It works but could be simpler surely.
df['DRUG_NAME'] = drug_names
df['DRUG_TARGET'] = drug_target
return df
def __add__(self, other):
"""
Fill missing values but do not overwrite existing fields even though
the field in the other DrugDecode instance is difference.
"""
# Problably not efficient but will do for now
columns = list(self.df.columns)
dd = DrugDecode()
dd.df = self.df.copy()
# add missing entires
missing = [x for x in other.df.index if x not in self.df.index]
dd.df = dd.df.append(other.df.ix[missing])
# merge existing ones
for index, ts in other.df.iterrows():
# add the drug if not already present
if index in self.df.index:
# here it is found in the 2 instances but
# they may contain either complementary data, which
# could have been done with pandas.merge but we wish
# to check for incompatible data
for column in columns:
a = dd.df.ix[index][column]
b = ts[column]
if pd.isnull(b) is True:
# nothing to do if b is NULL
pass
elif pd.isnull(a) is True:
# we can merge the content of b into a
# that is the content of other into this instance
dd.df.loc[index,column] = b
else:
# a and b are not null
if a != b:
print('WARNING: different fields in drug %s (%s %s %s)' % (index, column, a, b))
return dd
def __eq__(self, other):
try:
return all(self.df.fillna(0) == other.df.fillna(0))
except:
return False
def get_public_and_one_company(self, company):
"""Return drugs that belong to a specific company and public drugs"""
drug_decode_company = self.df.query(
"WEBRELEASE=='Y' or OWNED_BY=='%s'" % company)
# Transform into a proper DrugDecode class for safety
return DrugDecode(drug_decode_company)
| [
"cokelaer@gmail.com"
] | cokelaer@gmail.com |
57c26cb2700ed8b46be455ccddd74d58132b8513 | 64edc5ecf979bf712d32b9972a5823d5ac13af43 | /deprecated/background_noise.py | 4ec4abe6994e9fd1ac14515582b3126b6f50fc51 | [
"MIT"
] | permissive | SMaldonado/snakerf | b678ea08a240f498f1e1115d46eadf413849c091 | d8b4701088f42450c856a5a7c0a21b98b8024485 | refs/heads/master | 2021-03-01T13:21:39.043726 | 2020-08-31T06:46:48 | 2020-08-31T06:46:48 | 245,788,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | import snakerf as srf
import matplotlib.pyplot as plt
import numpy as np
from math import inf, pi, log2
from scipy import signal
# see https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.periodogram.html#scipy.signal.periodogram
m = 3
data = '{0:0{1:d}b}'.format(srf.gold_codes(m)[2], 2**m - 1)
print(data)
n = 1
f = 1234
f_bit = 9001
T_bit = 1/f_bit
# t_max = len(data)*T_bit/n - T_bit/100
fs = 500e6
ns = 100000
t_max = ns/fs
V1 = srf.Signal(ns, t_max)
V2 = srf.Signal(ns, t_max)
V2.add_noise()
print(srf.NF2T_noise(3))
srf.plot_power_spectrum(plt.gca(), V2.fs, V2.Pf)
f_ref = [0, 4, 5, 8.3, 12] # log frequency
Fa_ref = [270, 150, 80, 0, 0] # Fa = 10*log10(T_noise/t0)
V1.update_Pf(srf.Vt_background_noise(V1.ts, V1.fs))
srf.plot_power_spectrum(plt.gca(), V1.fs, V1.Pf)
T_noise = srf.undB(np.interp(np.log10(np.maximum(V1.fs,np.ones(len(V1.fs)))), f_ref, Fa_ref)) * srf.t0 # weird thing with ones to avoid log(0)
plt.plot(V1.fs, srf.W2dBm(4*srf.kB*T_noise*V1.df))
N = 100
moving_avg = np.convolve(srf.mag(V1.Pf * V1.Z0 / V1.df), np.ones((N,))/N, mode='valid') * V1.df/V1.Z0
plt.plot(V1.fs[:-N+1], srf.W2dBm(moving_avg))
moving_avg = np.convolve(srf.mag(V2.Pf * V2.Z0 / V2.df), np.ones((N,))/N, mode='valid') * V2.df/V2.Z0
plt.plot(V2.fs[:-N+1], srf.W2dBm(moving_avg))
plt.show()
| [
"engineerajm@gmail.com"
] | engineerajm@gmail.com |
b0e8ca8760ad8bb8774db650426a7c08920f1800 | bb90af4bf93fff8f0924f09c42349de8e944b980 | /CP/Recursion/PermutationOfString.py | f03a2824fdc9c550642e1c9a510f73f555f3b662 | [] | no_license | tanucdi/dailycodingproblem | ed18a15a6efad9bf3612abe51ef0d585e5c63e12 | 4e995b1739a1e7a63ed68d50e508f1770a4e206b | refs/heads/main | 2023-05-31T19:47:46.196131 | 2021-06-12T05:07:24 | 2021-06-12T05:07:24 | 358,788,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | result=[]
def cal(data,i,n):
if i ==n:
result.append(''.join(data)) #A B C
for j in range(i,n+1):
data[i],data[j]=data[j],data[i]
cal(data,i+1,n)
data[i],data[j]=data[j],data[i] #BACKTRACKING
'''
data='abc'
i=0
n=len(data)-1
'''
cal(list('abc'),0,2)
print(result) | [
"tanucdi7@gmail.com"
] | tanucdi7@gmail.com |
4d4bf41cfc6668556e18405c2b1da9e6f85f8787 | e0e96b8d26cd12c16a3e4a6265b6bceb11c4b1f0 | /17day/updtest.py | 2d6ca62c4a6dff1d92723fc2cea303250088b3cf | [] | no_license | superwenqistyle/2-2018python | 4419bc4ae4700e5b7839c4974106e03fc33e85f8 | 76e5ea72413abfa774ad61b3bdff76eba0c5e16c | refs/heads/master | 2020-03-13T11:08:50.860361 | 2018-05-22T11:17:39 | 2018-05-22T11:17:39 | 131,096,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | from socket import *
from threading import Thread
from time import ctime
Id=""
port=0
updSocket=None
def send():
while True:
message=input("请输入内容:")
updSocket.sendto(message.encode("gb2312"),(Id,port))
def receive():
while True:
content=updSocket.recvfrom(1024)
print("%s-%s\n请输入内容:"%(content[0].decode("gb2312"),content[1][0]),end="")
def main():
global Id
global port
global updSocket
Id = input("输入对方的id:")
port = int(input("输入对方的端口号:"))
updSocket = socket(AF_INET,SOCK_DGRAM)
updSocket.bind(("",6666))
t = Thread(target=send)
t1 = Thread(target=receive)
t.start()
t1.start()
t.join()
t1.join()
if __name__ == "__main__":
main()
| [
"1623515120@qq.com"
] | 1623515120@qq.com |
66077abb066062ef6d4bc2e2eeed498e9420829e | 89d74b9d907ca96995afe702b3a3742206f9ef4d | /app.py | 4bb3d55cb2cf04111831e7ee5b352df16862f89c | [] | no_license | LEO2822/Flask-Todo-website | 9e12327aaef257ff6bfff8d8b6576cbe3d1673a9 | 4828a629bf49cf9ef2ed56b3ff522a217678f223 | refs/heads/main | 2023-08-29T14:16:54.272969 | 2021-09-22T05:55:32 | 2021-09-22T05:55:32 | 408,742,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py |
# importing required packages
from flask import Flask , render_template , session , request , redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
# initializing the app
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# backend fetching .. for data fetching
class Todo(db.Model):
'''creating table using sqlite'''
sno = db.Column(db.Integer , primary_key = True)
title = db.Column(db.String(200) , nullable = False)
desc = db.Column(db.String(500) , nullable = False)
date_created = db.Column(db.DateTime , default = datetime.utcnow)
'''to print the result when called the class'''
def __repr__(self) -> str:
return f"{self.sno} - {self.title}"
# home page
@app.route('/', methods = ['GET' , 'POST'])
def home():
# to get the requests or data from the form
if request.method == 'POST':
title = request.form['title']
desc = request.form['desc']
todo = Todo(title = title, desc = desc)
db.session.add(todo)
db.session.commit()
# to get the all the queries
allTodo = Todo.query.all()
return render_template('index.html', allTodo = allTodo)
@app.route('/show')
def show():
allTodo = Todo.query.all()
print(allTodo)
@app.route('/update<int:sno>', methods = ['GET' , 'POST'])
def update(sno):
if request.method == 'POST':
title = request.form['title']
desc = request.form['desc']
todo = Todo.query.filter_by(sno = sno).first()
todo.title = title
todo.desc = desc
db.session.add(todo)
db.session.commit()
return redirect('/')
todo = Todo.query.filter_by(sno = sno).first()
return render_template('update.html', todo = todo)
@app.route('/delete<int:sno>')
def delete(sno):
todo = Todo.query.filter_by(sno = sno).first()
db.session.delete(todo)
db.session.commit()
return redirect('/')
# we can change the port also and can assign to 8000
'''
"debug = True" only when we are in developer stage to see the error's if there are any.
after done setting it and publishing it , we have to set it False.
'''
if __name__ == '__main__':
app.run(debug=True) | [
"mtkashid7@gmail.com"
] | mtkashid7@gmail.com |
91cfed374788a01c5cdcce7947bb0ec816270afb | 2a116bcbb25b929e49a5f8eefebd76407ff40774 | /day_12_queue.py | 3040a482fcc7e6e81d4c275490cef7a729c004c0 | [] | no_license | yaweibuyousang/Python_Learning | 34bc6b6937a00347a8029194550eb791720849b4 | 5d80fe33c8b18d479db227facad4673153785c53 | refs/heads/master | 2020-12-29T04:38:37.871962 | 2020-02-05T23:49:10 | 2020-02-05T23:49:10 | 238,457,465 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,957 | py | #序列
'''
序列是具有先后关系的一组元素
序列是一维元素向量,元素类型可以不同
类似数学序列:S0,S1,...,Sn-1
元素间由序号引导,通过下标访问序列的特定元素
序列类型定义:
序列是一个基类类型
字符串类型 元组类型 列表类型
序号的定义
序号分为两类:
反向递减序号
-5 -4 -3 -2 -1
"BIT" 3.1415 1024 (2,3) ["中国",9]
0 1 2 3 4
正向递增序号
序列类型通用操作符
x in s 如果x是序列s的元素,返回True,否则返回False
x not in s 如果x是序列s的元素,返回False,否则返回True
s + t 连接两个序列s和t
s*n或n*s 将序列s复制n次
s[i] 索引,返回s中的第i个元素,i是序列的序号
s[i:j]或s[i:j:k] 切片,返回序列s中第i到j以k为步长的元素子序列
eg:
>>> ls = ["python",123,".io"]
>>> ls[::-1]
['.io',123,'python']
>>>s = "python123.io"
>>>s[::-1]
'oi.321nohtyp'
序列类型通用函数和方法
len(s) 返回序列s的长度,即元素个数
min(s) 返回序列s的最小元素,s中元素需要可比较
max(s) 返回序列s的最大元素,s中元素需要可比较
s.index(x)或s.index(x,i,j) 返回序列s从i开始到j位置中第一次出现元素x的位置
s.count(x) 返回序列s中出现x的总次数
eg:
>>>ls = ["python",123,".io"]
>>>len(ls)
3
>>>s = "python123.io"
>>>max(s)
'y'
元组类型定义
元组是序列类型的一种扩展
元组是一种序列类型,一旦创建就不能被修改
使用小括号()或tuple()创建,元素间用逗号,分隔
可以使用或不使用小括号
def func():
return 1,2
>>>creature = "cat","dog","tiger","human"
>>>creature
('cat',dog','tiger','human')
>>>color = (0x001100,"blue",creature)
>>>color
(4352,'blue',('cat','dog','tiger','human'))
元组类型操作
元组继承序列类型的全部通用操作
元组继承了序列类型的全部通用操作
元组因为创建后不能修改,因此没有特殊操作
使用或不使用小括号
>>>creature = "cat","dog","tiger","human"
>>>creature[::-1]
('human','tiger','dog','cat')
>>>color = (0x001100,"blue",creature)
>>>color[-1][2]
'tiger'
列表类型定义
列表是序列类型的一种扩展
列表是一种序列类型,创建后可以随意被修改
使用方括号[]或list()创建,元素间用逗号,分隔
列表中各元素类型可以不同,无长度限制
>>>ls = ["cat","dog","tiger",1024]
>>>ls
['cat','dog','tiger',1024]
>>>lt = ls
>>>lt
['cat','dog','tiger',1024]
方括号[]真正创建一个列表,赋值仅传递引用
列表类型操作函数和方法
ls[i] = x 替换列表ls第i元素为x
ls[i:j:k]= lt 用列表ls第i元素为x
del ls[i] 删除列表ls中第i元素
del ls[i:j:k] 删除列表ls中第i到第j以k为步长的元素
ls += lt 更新列表ls,将列表lt元素增加到列表ls中
ls *= n 更新列表ls,其元素重复n次
>>>ls = ["cat","dog","tiger",1024]
>>>ls[1:2] = [1,2,3,4]
['cat',1,2,3,4,'tiger',1024]
>>>del ls[::3] 删除列表ls中以3为步长的元素
[1,2,4,'tiger']
>>>ls*2
[1,2,4,'tiger',1,2,4,'tiger']
列表类型操作函数和方法
ls.append(x) 在列表ls最后增加一个元素x
ls.clear() 删除列表ls中所有元素
ls.copy() 生成一个新列表,赋值ls中所有元素
ls.insert(i,x) 在列表ls的第i位置增加元素x
ls.pop(i) 将列表ls中第i位置元素取出并删除该元素
ls.remove(x) 将列表ls中出现的第一个元素x删除
ls.reverse() 将列表ls中的元素反转
>>>ls = ["cat","dog","tiger",1024]
>>>ls.append(1234)
['cat','dog','tiger',1024,1234]
>>>ls.insert(3,"human")
['cat','dog','tiger','human',1024,1024]
>>>ls.reverse()
[1234,1024,'human','tiger','dog','cat']
列表功能
定义空列表lt >>>lt = []
向lt新增5个元素 >>>lt += [1,2,3,4,5]
修改lt中第2个元素 >>>lt[2] = 6
向lt中第2个位置增加一个元素 >>>lt.insert(2,7)
从lt中第1个位置删除一个元素 >>>del lt[1]
删除lt中第1-3位置元素 >>>del lt[1:4]
0 in lt 判断lt中是否包含数字0
lt.append(0) 向lt新增数字0
lt.index(0) 返回数字0所在lt中的索引
len(lt) lt的长度
max(lt) lt中最大元素
lt.clear() 清空lt
序列类型应用场景
数据表示:元组和列表
元组用于元素不改变的应用场景,更多用于固定搭配场景
列表更加灵活,它是最常用的序列类型
最主要作用:表示一组有序数据,进而操作它们
元素遍历:
for item in ls:
<语句块>
for item in tp:
<语句块>
序列类型应用场景
数据保护
如果不希望数据被程序所改变,转换成元组类型
>>> ls = ["cat","dog","tiger",1024]
>>> lt = tuple(ls)
>>> lt
('cat','dog','tiger',1024)
'''
| [
"noreply@github.com"
] | yaweibuyousang.noreply@github.com |
1626df6646682430eb9d47c57614e0fc6c70476f | 194124b184101bbeb18c00482a1f60a6dd32eebf | /blog/migrations/0002_category.py | a811ab55f0b35be64c8208579dfff5eb7e36a19a | [] | no_license | fc-wsd/s4-instablog | 2fc758461f09fe124b28d86d29d4df429ef72040 | 8e38b07fe7dae0378fda228f2cfa7752f93254c9 | refs/heads/master | 2021-01-10T12:13:09.293036 | 2015-12-12T06:13:34 | 2015-12-12T06:13:34 | 45,733,935 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"kay@hannal.net"
] | kay@hannal.net |
8adc351ce01139f3c2be60de53be8c6d0d8a8b74 | 2f6b3417c221bad1058a6f22956fbf56d5970c57 | /remember/__init__.py | a81108886013c5ca3b8bdc43224cccd279646f3c | [] | no_license | xkenneth/dontpaythestate | 062807530324b1f11fd88493d685bdb1f0ed21ef | 6917890b0347f83c15c86dd921b376d313219b6c | refs/heads/master | 2016-09-05T18:39:15.463460 | 2009-07-20T03:41:01 | 2009-07-20T03:41:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | import signal_handlers
| [
"citizen@donthatethestate.com"
] | citizen@donthatethestate.com |
852612c0740acb99feeafc07e63bc0d921779cc6 | c7d913f55bc6c7e9bacc0268481c33b1164c2fce | /node_modules/mongoose/node_modules/mongodb/node_modules/bson/build/config.gypi | 8bed261a5b44c09b99b9a9ae02f37062e90c7280 | [
"Apache-2.0",
"MIT"
] | permissive | nrtapia/user-app | f8d2f4349962726a1e60b575384852db3c3ca0af | a6095cf767de872d321b6af40cf82e578944f08f | refs/heads/master | 2016-09-06T15:35:30.961244 | 2014-10-03T20:33:51 | 2014-10-03T20:33:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,236 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"host_arch": "ia32",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"node_use_systemtap": "false",
"python": "c:\\python27\\python.exe",
"target_arch": "ia32",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"visibility": "",
"nodedir": "C:\\Users\\nrtapia\\.node-gyp\\0.10.26",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"registry": "https://registry.npmjs.org/",
"prefix": "C:\\Users\\nrtapia\\AppData\\Roaming\\npm",
"always_auth": "",
"bin_links": "true",
"browser": "",
"ca": "",
"cache": "C:\\Users\\nrtapia\\AppData\\Roaming\\npm-cache",
"cache_lock_stale": "60000",
"cache_lock_retries": "10",
"cache_lock_wait": "10000",
"cache_max": "null",
"cache_min": "10",
"cert": "",
"color": "true",
"depth": "null",
"description": "true",
"dev": "",
"editor": "notepad.exe",
"email": "",
"engine_strict": "",
"force": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_mintimeout": "10000",
"fetch_retry_maxtimeout": "60000",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\nrtapia\\AppData\\Roaming\\npm\\etc\\npmrc",
"group": "",
"heading": "npm",
"ignore_scripts": "",
"init_module": "C:\\Users\\nrtapia\\.npm-init.js",
"init_author_name": "",
"init_author_email": "",
"init_author_url": "",
"init_license": "ISC",
"json": "",
"key": "",
"link": "",
"local_address": "",
"long": "",
"message": "%s",
"node_version": "v0.10.26",
"npat": "",
"onload_script": "",
"optional": "true",
"parseable": "",
"production": "",
"proprietary_attribs": "true",
"https_proxy": "",
"user_agent": "node/v0.10.26 win32 ia32",
"rebuild_bundle": "true",
"rollback": "true",
"save": "",
"save_bundle": "",
"save_dev": "",
"save_optional": "",
"searchopts": "",
"searchexclude": "",
"searchsort": "name",
"shell": "C:\\Windows\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_tag": "",
"strict_ssl": "true",
"tag": "latest",
"tmp": "C:\\Users\\nrtapia\\AppData\\Local\\Temp",
"unicode": "true",
"unsafe_perm": "true",
"usage": "",
"user": "",
"username": "",
"userconfig": "C:\\Users\\nrtapia\\.npmrc",
"umask": "18",
"version": "",
"versions": "",
"viewer": "browser",
"globalignorefile": "C:\\Users\\nrtapia\\AppData\\Roaming\\npm\\etc\\npmignore"
}
}
| [
"nrtapia@sprc.com.co"
] | nrtapia@sprc.com.co |
1ba8b8cecc5ff4ed094141c30c623e714f1b361e | 50ab3f0abfc2dc410232fbb253c6e1f9211e65f4 | /knowledge-import/SNET/Physical Entity (PE) Identifier mapping.py | 49506b177735b9c47bef6399786c84f6979ffe8a | [] | no_license | MOZI-AI/agi-bio | c736e5b0c6d234f444d7f25497d785fb6dd8c597 | 95168fa6820b9d3ffcdfc24fbfe414947e78bc9a | refs/heads/master | 2020-07-05T06:51:11.062704 | 2020-03-09T22:48:56 | 2020-03-09T22:48:56 | 202,560,215 | 0 | 0 | null | 2020-03-09T22:48:57 | 2019-08-15T14:52:56 | Python | UTF-8 | Python | false | false | 8,543 | py | __author__ = "Hedra"
__email__ = "hedra@singularitynet.io"
# The following script imports the Physical Entity (PE) Identifier mapping files from https://reactome.org/download-data
# Requires: NCBI2Reactome_PE_Pathway.txt
# UniProt2Reactome_PE_Pathway.txt
# ChEBI2Reactome_PE_Pathway.txt
# from https://reactome.org/download/current/
import pandas as pd
import wget
import os
import sys
import metadata
# Get each of the files first
# URL's
ncbi = "https://reactome.org/download/current/NCBI2Reactome_PE_Pathway.txt"
uniprot = "https://reactome.org/download/current/UniProt2Reactome_PE_Pathway.txt"
chebi = "https://reactome.org/download/current/ChEBI2Reactome_PE_Pathway.txt"
script = "https://github.com/MOZI-AI/agi-bio/blob/master/knowledge-import/SNET/Physical%20Entity%20(PE)%20Identifier%20mapping.py"
# If you have the files downloaded, make sure the file names are the same
# Or modify the file names in this code to match yours.
def get_data(name):
print("Downloading the datasets, It might take a while")
if(name in ["N", "n", "A", "a"]):
if(not os.path.isfile('NCBI2Reactome_PE_Pathway.txt')):
wget.download(ncbi, "raw_data/")
if(name in ["U", "u", "A", "a"]):
if(not os.path.isfile('UniProt2Reactome_PE_Pathway.txt')):
wget.download(uniprot, "raw_data/")
if(name in ["C", "c", "A", "a"]):
if(not os.path.isfile('ChEBI2Reactome_PE_Pathway.txt')):
wget.download(chebi, "raw_data/")
print("Done")
# Helper functions for Atomese representation
def member(indiv, group):
if "Uniprot" in indiv or "ChEBI" in indiv:
return ""+"(MemberLink \n (MoleculeNode "+'"'+ indiv + '")\n' + '(ConceptNode "'+ group + '"))\n\n'
else:
return ""+"(MemberLink \n (GeneNode "+'"'+ indiv + '")\n' + '(ConceptNode "'+ group + '"))\n\n'
def eva(pred, el1, el2):
if pred == 'e':
pred = "has_evidence_code"
elif pred == 'l':
pred = "has_location"
elif pred == 'n':
pred = "has_name"
if "Uniprot" in el1 or "ChEBI" in el1 or "Uniprot" in el2 or "ChEBI" in el2:
return ""+'(EvaluationLink \n (PredicateNode "' + pred +'")\n (ListLink\n (MoleculeNode "'+ el1 + '")\n' + '(ConceptNode "'+ el2 + '")))\n\n'
else:
return ""+'(EvaluationLink \n (PredicateNode "' + pred +'")\n (ListLink\n (GeneNode "'+ el1 + '")\n' + '(ConceptNode "'+ el2 + '")))\n\n'
# The column 'R_PE_name' contains the Gene Symbol and its location information, so we need to split it
# Example: A1BG [extracellular region]
# A1BG is the Gene symbol and 'extracellular region' is the gene location
# some has extra symbols which needs preprocessing e.g. CCL5(24-91) [extracellular region], p-S472-AKT3 [plasma membrane]
def find_location(PEname, filter=False):
if "[" in PEname and "]" in PEname:
loc = PEname[PEname.find("[")+1:PEname.find("]")]
gene = PEname.split("[" +loc +"]")[0]
else:
loc = ""
gene = PEname
gene = gene.replace(gene[gene.find("("):PEname.find(")")+1], "").replace(")", "").replace("(","")
if "-" in gene:
gene = [i for i in gene.split("-") if not i.strip().isdigit()][-1]
gene = gene.strip()
if filter:
return gene
return gene,loc
# Finds the common word in a list of strings
def findstem(arr):
n = len(arr)
s = arr[0]
l = len(s)
res = ""
for i in range(l):
for j in range( i + 1, l + 1):
stem = s[i:j]
k = 1
for k in range(1, n):
if stem not in arr[k]:
break
if (k + 1 == n and len(res) < len(stem)):
res = stem
return res.strip()
def import_dataset(dataset, delim):
print("Started importing " + dataset)
if "UniProt" in dataset or "ChEBI" in dataset:
data = pd.read_csv(dataset, low_memory=False, delimiter=delim, names=["db_id", "R_PE_id", "R_PE_name","pathway","url","event_name", "evidence_code", "species","un1","un2","un3","un4","un5","un6"])
else:
data = pd.read_csv(dataset, low_memory=False, delimiter=delim, names=["db_id", "R_PE_id", "R_PE_name","pathway","url","event_name", "evidence_code", "species"])
# Take only symbols of Human species
data_human = data[data['species'] == 'Homo sapiens'][['db_id','R_PE_name','pathway']]
if not os.path.exists(os.path.join(os.getcwd(), 'dataset')):
os.makedirs('dataset')
with open("dataset/"+dataset.split("/")[-1]+".scm", 'w') as f:
if "NCBI" in dataset:
genes = []
pathways = []
db_ids = {}
for i in range(len(data_human)):
gene, location = find_location(data_human.iloc[i]['R_PE_name'])
pathway = data_human.iloc[i]['pathway']
db_id = data_human.iloc[i]['db_id']
# If a gene symbol is not one word, collect all gene symbols of the same db_id
# and find the common word in the list (which is the gene symbol in most cases)
# e.g "proKLK5" "KLK5" "propeptide KLK5"
if len(gene.split(" ")) >1:
if db_id in db_ids.keys():
gene = db_ids[data_human.iloc[i]['db_id']]
else:
gene_symbols = data_human[data_human['db_id']==db_id]['R_PE_name'].values
gene_symbols = [find_location(i, True) for i in gene_symbols]
if len(set(gene_symbols)) > 1:
stemed = findstem(gene_symbols)
else:
stemed = gene_symbols[-1]
if not (stemed.isdigit() and stemed in ["", " "] and len(stemed) == 1):
db_ids.update({db_id:stemed})
gene = stemed
if not gene.isdigit() and not len(gene) == 1 and not gene in ["", " "]:
f.write("(AndLink\n")
f.write(member(gene, pathway))
f.write(eva('l', gene, location))
f.write(")\n")
if not gene in genes:
genes.append(gene)
if not pathway in pathways:
pathways.append(pathway)
version = "NCBI2reactome_pathway_mapping:latest"
num_pathways = {"Reactome Pathway": len(pathways)}
metadata.update_meta(version,ncbi,script,genes=len(genes),pathways=num_pathways)
elif "UniProt" in dataset:
molecules = []
pathways = []
for i in range(len(data_human)):
prot = str(data_human.iloc[i]['R_PE_name'])
loc = prot[prot.find("[")+1:prot.find("]")]
prot_name = prot.split("[" +loc +"]")[0]
pathway = data_human.iloc[i]['pathway']
protein = [i for i in str(data_human.iloc[i]['db_id']).split("-") if not i.strip().isdigit()][-1]
f.write("(AndLink\n")
f.write(member("Uniprot:"+str(protein), pathway))
f.write(eva('l', "Uniprot:"+str(protein), loc))
f.write(")\n")
if not protein in molecules:
molecules.append(protein)
f.write(eva("n", "Uniprot:"+str(protein), prot_name))
if not pathway in pathways:
pathways.append(pathway)
version = "Uniprot2reactome_pathway_mapping:latest"
num_pathways = {"Reactome Pathway": len(pathways)}
metadata.update_meta(version,ncbi,script,prot=len(molecules),pathways=num_pathways)
elif "ChEBI" in dataset:
molecules = []
pathways = []
for i in range(len(data_human)):
chebi = str(data_human.iloc[i]['R_PE_name'])
loc = chebi[chebi.find("[")+1:chebi.find("]")]
chebi_name = chebi.split("[" +loc +"]")[0].replace('"',"")
chebi_id = str(data_human.iloc[i]['db_id'])
pathway = data_human.iloc[i]['pathway']
f.write("(AndLink \n")
f.write(member("ChEBI:"+chebi_id, pathway))
f.write(eva('l', "ChEBI:"+chebi_id, loc))
f.write(")\n")
if not chebi_id in molecules:
molecules.append(chebi_id)
f.write(eva("n","ChEBI:"+chebi_id, chebi_name))
if not pathway in pathways:
pathways.append(pathway)
version = "Chebi2reactome_pathway_mapping:latest"
num_pathways = {"Reactome Pathway": len(pathways)}
metadata.update_meta(version,ncbi,script,chebi=len(molecules),pathways=num_pathways)
print("Done")
if __name__ == "__main__":
print('''Import the following files from https://reactome.org
"Press N to import NCBI2Reactome_PE_Pathway
"Press U to import UniProt2Reactome_PE_Pathway
"Press C to import ChEBI2Reactome_PE_Pathway
"Press A for All \n''')
option = input()
if option == "N" or option == "n":
get_data(option)
import_dataset('raw_data/NCBI2Reactome_PE_Pathway.txt', '\t')
elif option == "U" or option == "u":
get_data(option)
import_dataset('raw_data/UniProt2Reactome_PE_Pathway.txt', '\t')
elif option == "C" or option == "c":
get_data(option)
import_dataset('raw_data/ChEBI2Reactome_PE_Pathway.txt', '\t')
elif option == "A" or option == "a":
get_data(option)
import_dataset('raw_data/NCBI2Reactome_PE_Pathway.txt', '\t')
import_dataset('raw_data/UniProt2Reactome_PE_Pathway.txt', '\t')
import_dataset('raw_data/ChEBI2Reactome_PE_Pathway.txt', '\t')
else:
print("Incorect option, Try again")
| [
"tanksh24@gmail.com"
] | tanksh24@gmail.com |
0ac0a0b90dd61c60bbb43322d941eaedad1d4918 | 58b79d98c2b12538324657a4e94a29d78ef93dff | /Automate the Boring Stuff With Python/forex_data_scraper.py | 0bb20372cc80d2fd86ccf2501086b05788c48264 | [] | no_license | Vasallius/Python-Journey | 2d68f8203453a1e4091d9ef14109914429a656da | ee0d8b1df2c3dbf20be5ff0805820835288a5ad6 | refs/heads/master | 2023-01-02T02:39:25.039693 | 2020-10-19T07:55:35 | 2020-10-19T07:55:35 | 265,748,648 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import requests
from bs4 import BeautifulSoup
start_time = time.time()
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2) # custom location
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir',
r'C:\Users\Joseph\Desktop\Forex') #change location
profile.set_preference(
'browser.helperApps.neverAsk.saveToDisk', 'application/octet-stream')
# GET ALL PAIRS
homepage = requests.get(
'https://www.histdata.com/download-free-forex-data/?/excel/1-minute-bar-quotes')
soup = BeautifulSoup(homepage.text, 'lxml')
pair_links = [] # List of hrefs
for link in soup.find_all('a')[14:-25]:
pair_links.append(link.get('href'))
homepage.close()
# LOOPING OVER EACH PAIR LINK
for index, pair_link in enumerate(pair_links):
pair_link = 'https://www.histdata.com' + pair_links[index]
pair_page = requests.get(pair_link)
date_links = []
soup = BeautifulSoup(pair_page.text, 'lxml')
for y in soup.find_all('a')[14:-25]:
date_links.append('https://histdata.com' + y.get('href'))
# LOOPING OVER EACH DATE OF THE PAIR
browser = webdriver.Firefox(profile)
for z in date_links:
browser.get(z)
z = browser.find_element_by_id('a_file')
z.send_keys(Keys.PAGE_DOWN)
time.sleep(0.1)
z.click()
time.sleep(0.2)
time.sleep(20)
browser.close()
end_time = time.time()
print(f'{round(end_time-start_time)} have elapsed. Download complete.')
| [
"masterjed7262@gmail.com"
] | masterjed7262@gmail.com |
335b402a007aeb18653a23cbcedabbb8a23b486f | 2f2d2ac1d9bad42f41d28cca2cbfd28540a9edba | /Z-Transform/IZ_transform/iztrans_13.Py | f7be378e3f2138acbf235db7ee582c367d7ce936 | [] | no_license | BrunoLQS/Projects_and_ITA | 92272380cb21b385c13a81c79a8dc47aec7c1c34 | 7cb26ae2712f0c1530dc966660db79ec37ff11e1 | refs/heads/master | 2023-03-14T18:14:59.135757 | 2021-03-21T02:01:43 | 2021-03-21T02:01:43 | 264,700,697 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | title="""Simétrico da Fórmula 5"""
N=6
X=Piecewise((z/(-a+z), True), (Sum(a**n*z**(-n), (n, 0, oo)), True))
X=X.subs(a,2)
# a deve ser substituído por um INTEIRO !
m=1.0
results=[ iztrans(X,i,m) for i in nparange(-N,N+1)]
fig = figure()
ax1 = fig.add_subplot(111)
ax1.set_ylabel('Valor',fontsize=15)
ax1.set_xlabel('N',fontsize=15)
ax1.set_title(title,fontsize=18)
stem(nparange(-N,N+1),results, use_line_collection=True,linefmt=None, markerfmt=None, basefmt=None)
show() | [
"bruno.lqs222@gmail.com"
] | bruno.lqs222@gmail.com |
9a21c23715d128db1bc954fa6c15f26465495d0a | 97187ec0a310f3c798e5ac8abdea3faaf6a7c06f | /preprocessing/data_processing/time_parsers.py | f08d504c320a00b1e9ea80f3aef077a7e194039d | [] | no_license | ClaudiaWinklmayr/RoboStats | a1e32cb06d6bcb93507684254d1cbe6fa35d7317 | 800c39ba7cccdd0ba4bc9f71c7dad2f8cc05045f | refs/heads/master | 2021-09-17T00:37:48.146240 | 2018-06-15T08:36:27 | 2018-06-15T08:36:27 | 114,121,447 | 0 | 1 | null | 2018-03-06T13:00:30 | 2017-12-13T13:02:11 | Python | UTF-8 | Python | false | false | 1,671 | py | from datetime import datetime
import locale
import json
import numpy as np
def handle_timestamp(timestamp, time_format, date_format_file, rounding = False):
''' This function takes a string value from the selected time column of the original file
and tries to convert it to either datetime format or a float value. If this is not possible
None is returned which then triggers an error message in the window'''
if time_format == 'dt':
t = handle_datetime(timestamp, date_format_file)
if t == None:
return None
elif time_format == 'ms':
try:
t = float(timestamp)
except ValueError:
return None
if rounding: t = np.round(t, 2)
elif time_format == 's':
try:
t = float(timestamp)
except ValueError:
return None
if rounding: t = np.round(t, 2)
return t
def handle_datetime(timestamp, date_format_file):
''' this function tries to convert a given string to datetime format using default formats
specified in the settings file'''
# Format of old BioTracker data
if isinstance(timestamp, str) and timestamp[0] == "'" and timestamp[-1] == "'":
timestamp = timestamp[1:-1]
date_formats = json.load(open(date_format_file))
dt = None
for key in date_formats:
form = date_formats[key][0]
loc = date_formats[key][1]
try:
locale.setlocale(locale.LC_ALL, loc)
dt = datetime.strptime(timestamp, form)
except (ValueError, TypeError) as error:
pass
return dt
| [
"claudia.winklmayr@gmx.net"
] | claudia.winklmayr@gmx.net |
abeeec02fe789c788714f86d5410f5b957b7b6c1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_276/ch49_2019_04_04_15_20_35_762666.py | 9d3cc6514e971164771488683d6fcc0b8efa07d7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | a = input('Digite um número inteiro positivo: )
lista = []
while a > 0:
lista.append(a)
print[ : :-1] | [
"you@example.com"
] | you@example.com |
94154160c02cab96d59313604b6931282af423a3 | ae25b06fad34f8ab68944761458c204f566b7f9f | /hoshino/modules/groupmaster/chat.py | eeceb3dc60785435a56725f363b26278aca72849 | [] | no_license | zw531129/shiori | 1adee2adc143c45ce3dfe35b32ef72b6bc728054 | 3803d00c02295000b37903222be34e478d5271d9 | refs/heads/master | 2023-02-24T13:50:26.184947 | 2021-02-02T13:52:45 | 2021-02-02T13:52:45 | 335,304,737 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,559 | py | import random
from nonebot import on_command
from hoshino import R, Service, priv, util
# basic function for debug, not included in Service('chat')
@on_command('zai?', aliases=('在?', '在?', '在吗', '在么?', '在嘛', '在嘛?'), only_to_me=True)
async def say_hello(session):
await session.send('はい!私はいつも貴方の側にいますよ!')
sv = Service('chat', visible=False)
@sv.on_fullmatch(('沙雕机器人', '沙雕機器人'))
async def say_sorry(bot, ev):
await bot.send(ev, 'ごめんなさい!嘤嘤嘤(〒︿〒)')
@sv.on_fullmatch(('老婆', 'waifu', 'laopo'), only_to_me=True)
async def chat_waifu(bot, ev):
if not priv.check_priv(ev, priv.SUPERUSER):
await bot.send(ev, R.img('laopo.jpg').cqcode)
else:
await bot.send(ev, 'mua~')
@sv.on_fullmatch('老公', only_to_me=True)
async def chat_laogong(bot, ev):
await bot.send(ev, '你给我滚!', at_sender=True)
@sv.on_fullmatch('mua', only_to_me=True)
async def chat_mua(bot, ev):
await bot.send(ev, '笨蛋~', at_sender=True)
@sv.on_fullmatch('看看柰子', only_to_me=True)
async def chat_mua(bot, ev):
await bot.send(ev, R.img('no_see.jpg').cqcode)
@sv.on_fullmatch('来点星奏')
async def seina(bot, ev):
await bot.send(ev, R.img('星奏.png').cqcode)
@sv.on_fullmatch(('我有个朋友说他好了', '我朋友说他好了', ))
async def ddhaole(bot, ev):
await bot.send(ev, '那个朋友是不是你弟弟?')
await util.silence(ev, 30)
@sv.on_fullmatch('我好了')
async def nihaole(bot, ev):
await bot.send(ev, '不许好,憋回去!')
await util.silence(ev, 30)
# ============================================ #
@sv.on_keyword(('确实', '有一说一', 'u1s1', 'yysy'))
async def chat_queshi(bot, ctx):
if random.random() < 0.05:
await bot.send(ctx, R.img('确实.jpg').cqcode)
@sv.on_keyword(('会战'))
async def chat_clanba(bot, ctx):
if random.random() < 0.2:
await bot.send(ctx, R.img('我的天啊你看看都几度了.jpg').cqcode)
@sv.on_keyword(('内鬼'))
async def chat_neigui(bot, ctx):
if random.random() < 0.10:
await bot.send(ctx, R.img('内鬼.png').cqcode)
nyb_player = f'''{R.img('newyearburst.gif').cqcode}
正在播放:New Year Burst
──●━━━━ 1:05/1:30
⇆ ㅤ◁ ㅤㅤ❚❚ ㅤㅤ▷ ㅤ↻
'''.strip()
@sv.on_keyword(('春黑', '新黑', '牛爷巴斯特', '牛爷巴斯妥'))
async def new_year_burst(bot, ev):
if random.random() < 0.2:
await bot.send(ev, nyb_player)
| [
"zw531129@outlook.com"
] | zw531129@outlook.com |
01b828d2865b4a3207556680e892c62aa6f28e15 | 2b468b1d22ecc5668529255676a1d43936829074 | /codes/personal_backend/tuoen/abs/service/product/__init__.py | 43853f724363e33396251d2f10c21af53b191a1a | [] | no_license | MaseraTiGo/4U | 5ac31b4cccc1093ab9a07d18218c3d8c0157dc9c | f572830aa996cfe619fc4dd8279972a2f567c94c | refs/heads/master | 2023-07-26T09:44:21.014294 | 2023-07-13T03:43:34 | 2023-07-13T03:43:34 | 149,217,706 | 0 | 0 | null | 2020-06-05T20:38:16 | 2018-09-18T02:34:29 | Python | UTF-8 | Python | false | false | 3,304 | py | # coding=UTF-8
'''
Created on 2016年7月22日
@author: Administrator
'''
import hashlib
import datetime
import json
import random
from django.db.models import Q
from tuoen.sys.core.exception.business_error import BusinessError
from tuoen.sys.utils.common.split_page import Splitor
from model.models import ProductModel
from model.models import Product
class ProductOperateServer(object):
@classmethod
def add(cls, **attrs):
"""add new product"""
if Product.query(name=attrs['name']):
BusinessError("产品名称已存在")
product = Product.create(**attrs)
if not product:
raise BusinessError("产品添加失败")
@classmethod
def update(cls, **attrs):
"""修改产品信息"""
if 'name' in attrs:
name = attrs['name']
id_qs = [p.id for p in Product.query(name=name)]
if id_qs and attrs['id'] not in id_qs:
raise BusinessError("产品名称已存在")
product = Product().update(**attrs)
return product
@classmethod
def search(cls, current_page, **search_info):
"""查询产品列表"""
if 'keyword' in search_info:
keyword = search_info.pop('keyword')
product_qs = Product.search(**search_info).filter(Q(name__contains = keyword) | \
Q(id__contains = keyword))
else:
product_qs = Product.search(**search_info)
product_qs = product_qs.order_by("-create_time")
return Splitor(current_page, product_qs)
@classmethod
def remove(cls, **attrs):
"""移除产品型号"""
id = attrs['id']
Product.query(id=id).delete()
return True
class ProductModelServer(object):
@classmethod
def add(cls, **attrs):
"""add new product model"""
if ProductModel.query(name=attrs['name']):
BusinessError("产品型号已存在")
product_id = attrs['product']
product = Product.get_byid(product_id)
attrs.update({"product": product})
product_model = ProductModel.create(**attrs)
if not product_model:
raise BusinessError("产品型号添加失败")
@classmethod
def update(cls, **attrs):
"""修改产品型号信息"""
product = ProductModel.query(id=attrs['id'])[0].product
attrs.update({'product': product})
if 'name' in attrs:
name = attrs['name']
product__model_ids = [pm.id for pm in ProductModel.query(name=name)]
if product__model_ids and attrs['id'] not in product__model_ids:
raise BusinessError("产品型号已存在")
product__model = ProductModel().update(**attrs)
return product__model
@classmethod
def search(cls, **search_info):
""""查询产品型号"""
product_id = search_info.pop('id')
product = Product.get_byid(product_id)
product_model_qs = ProductModel.search(product=product)
product_model_qs = product_model_qs.order_by("-create_time")
return product_model_qs
@classmethod
def remove(cls, **attrs):
"""移除产品型号"""
id = attrs['id']
ProductModel.query(id=id).delete()
return True | [
"344627181@qq.com"
] | 344627181@qq.com |
2372bc7c4eb86967b911e30dc506c92fcfd35f80 | 6ddba492106dff3295ff5dbe9f38b712ac84d9f9 | /KerasSingleLaneExperiment/health_nodewise_dropout.py | 3b4e656a40a81b2fff02adf03bcad93633e13e85 | [] | no_license | briannoogin/ANRL-UCI-Test-Networks | f2e067be3b4e141a2bfe9a30c4be680daaa032f3 | 3557d5ea964a17cb3239ec2d0576f1f598d1be86 | refs/heads/master | 2020-04-08T21:01:11.509497 | 2019-08-26T19:44:57 | 2019-08-26T19:44:57 | 159,725,312 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,049 | py |
from KerasSingleLaneExperiment.deepFogGuardPlus import define_deepFogGuardPlus, define_adjusted_deepFogGuardPlus
from KerasSingleLaneExperiment.loadData import load_data
from sklearn.model_selection import train_test_split
from KerasSingleLaneExperiment.FailureIteration import calculateExpectedAccuracy
from KerasSingleLaneExperiment.main import average
import keras.backend as K
import gc
import os
from keras.callbacks import ModelCheckpoint
# runs all 3 failure configurations for all 3 models
if __name__ == "__main__":
use_GCP = True
if use_GCP == True:
os.system('gsutil -m cp -r gs://anrl-storage/data/mHealth_complete.log ./')
os.mkdir('models/')
data,labels= load_data('mHealth_complete.log')
# split data into train, val, and test
# 80/10/10 split
training_data, test_data, training_labels, test_labels = train_test_split(data,labels,random_state = 42, test_size = .20, shuffle = True)
val_data, test_data, val_labels, test_labels = train_test_split(test_data,test_labels,random_state = 42, test_size = .50, shuffle = True)
num_vars = len(training_data[0])
num_classes = 13
survivability_settings = [
[1,1,1],
[.92,.96,.99],
[.87,.91,.95],
[.78,.8,.85],
]
# nodewise survival rates for deepFogGuardPlus
# elements of the vector are 1 - node-wise_dropout_rate
nodewise_survival_rates = [
[.95,.95,.95],
[.9,.9,.9],
[.7,.7,.7],
[.5,.5,.5],
]
hidden_units = 250
batch_size = 1028
load_model = False
num_train_epochs = 25
# file name with the experiments accuracy output
output_name = "results/health_nodewise_dropout.txt"
num_iterations = 10
verbose = 2
# keep track of output so that output is in order
output_list = []
# convert survivability settings into strings so it can be used in the dictionary as keys
no_failure = str(survivability_settings[0])
normal = str(survivability_settings[1])
poor = str(survivability_settings[2])
hazardous = str(survivability_settings[3])
# convert dropout rates into strings
nodewise_dropout_rate_05 = str(nodewise_survival_rates[0])
nodewise_dropout_rate_10 = str(nodewise_survival_rates[1])
nodewise_dropout_rate_30 = str(nodewise_survival_rates[2])
nodewise_dropout_rate_50 = str(nodewise_survival_rates[3])
# dictionary to store all the results
output = {
"deepFogGuardPlus Node-wise Dropout":
{
nodewise_dropout_rate_05:
{
hazardous:[0] * num_iterations,
poor:[0] * num_iterations,
normal:[0] * num_iterations,
no_failure:[0] * num_iterations,
},
nodewise_dropout_rate_10 :
{
hazardous:[0] * num_iterations,
poor:[0] * num_iterations,
normal:[0] * num_iterations,
no_failure:[0] * num_iterations,
},
nodewise_dropout_rate_30:
{
hazardous:[0] * num_iterations,
poor:[0] * num_iterations,
normal:[0] * num_iterations,
no_failure:[0] * num_iterations,
},
nodewise_dropout_rate_50:
{
hazardous:[0] * num_iterations,
poor:[0] * num_iterations,
normal:[0] * num_iterations,
no_failure:[0] * num_iterations,
},
},
"deepFogGuardPlus Adjusted Node-wise Dropout":
{
nodewise_dropout_rate_05:
{
hazardous:[0] * num_iterations,
poor:[0] * num_iterations,
normal:[0] * num_iterations,
no_failure:[0] * num_iterations,
},
nodewise_dropout_rate_10 :
{
hazardous:[0] * num_iterations,
poor:[0] * num_iterations,
normal:[0] * num_iterations,
no_failure:[0] * num_iterations,
},
nodewise_dropout_rate_30:
{
hazardous:[0] * num_iterations,
poor:[0] * num_iterations,
normal:[0] * num_iterations,
no_failure:[0] * num_iterations,
},
nodewise_dropout_rate_50:
{
hazardous:[0] * num_iterations,
poor:[0] * num_iterations,
normal:[0] * num_iterations,
no_failure:[0] * num_iterations,
},
}
}
# make folder for outputs
if not os.path.exists('results/'):
os.mkdir('results/')
for iteration in range(1,num_iterations+1):
output_list.append('ITERATION ' + str(iteration) + '\n')
print("ITERATION ", iteration)
output_list.append('deepFogGuardPlus Node-wise Dropout' + '\n')
print("deepFogGuardPlus Node-wise Dropout")
for nodewise_survival_rate in nodewise_survival_rates:
# node-wise dropout
deepFogGuardPlus_nodewise_dropout_file = str(iteration) + " " + str(nodewise_survival_rate) + 'health_nodewise_dropout.h5'
deepFogGuardPlus_nodewise_dropout = define_deepFogGuardPlus(num_vars,num_classes,hidden_units,nodewise_survival_rate)
# adjusted node_wise dropout
deepFogGuardPlus_adjusted_nodewise_dropout_file = str(iteration) + " " + str(nodewise_survival_rate) + 'health_nodewise_dropout.h5'
deepFogGuardPlus_adjusted_nodewise_dropout = define_adjusted_deepFogGuardPlus(num_vars,num_classes,hidden_units,nodewise_survival_rate)
if load_model:
deepFogGuardPlus_nodewise_dropout.load_weights(deepFogGuardPlus_nodewise_dropout_file)
deepFogGuardPlus_adjusted_nodewise_dropout.load_weights(deepFogGuardPlus_nodewise_dropout_file)
else:
print("Training deepFogGuardPlus Node-wise Dropout")
print(str(nodewise_survival_rate))
# node-wise dropout
deepFogGuardPlus_nodewise_dropout_CheckPoint = ModelCheckpoint(deepFogGuardPlus_nodewise_dropout_file, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True, mode='auto', period=1)
deepFogGuardPlus_nodewise_dropout.fit(training_data,training_labels,epochs=num_train_epochs, batch_size=batch_size,verbose=verbose,shuffle = True, callbacks = [deepFogGuardPlus_nodewise_dropout_CheckPoint],validation_data=(val_data,val_labels))
deepFogGuardPlus_nodewise_dropout.load_weights(deepFogGuardPlus_nodewise_dropout_file)
# adjusted node-wise dropout
print("Training deepFogGuardPlus Adjusted Node-wise Dropout")
deepFogGuardPlus_adjusted_nodewise_dropout_CheckPoint = ModelCheckpoint(deepFogGuardPlus_adjusted_nodewise_dropout_file, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True, mode='auto', period=1)
deepFogGuardPlus_adjusted_nodewise_dropout.fit(training_data,training_labels,epochs=num_train_epochs, batch_size=batch_size,verbose=verbose,shuffle = True, callbacks = [deepFogGuardPlus_adjusted_nodewise_dropout_CheckPoint],validation_data=(val_data,val_labels))
deepFogGuardPlus_adjusted_nodewise_dropout.load_weights(deepFogGuardPlus_adjusted_nodewise_dropout_file)
print("Test on normal survival rates")
output_list.append("Test on normal survival rates" + '\n')
for survivability_setting in survivability_settings:
output_list.append(str(survivability_setting)+ '\n')
print(survivability_setting)
output["deepFogGuardPlus Node-wise Dropout"][str(nodewise_survival_rate)][str(survivability_setting)][iteration-1] = calculateExpectedAccuracy(deepFogGuardPlus_nodewise_dropout,survivability_setting,output_list,training_labels,test_data,test_labels)
output["deepFogGuardPlus Adjusted Node-wise Dropout"][str(nodewise_survival_rate)][str(survivability_setting)][iteration-1] = calculateExpectedAccuracy(deepFogGuardPlus_adjusted_nodewise_dropout,survivability_setting,output_list,training_labels,test_data,test_labels)
# clear session so that model will recycled back into memory
K.clear_session()
gc.collect()
del deepFogGuardPlus_nodewise_dropout
# calculate average accuracies for deepFogGuardPlus Node-wise Dropout
for nodewise_survival_rate in nodewise_survival_rates:
print(nodewise_survival_rate)
for survivability_setting in survivability_settings:
deepFogGuardPlus_nodewise_dropout_acc = average(output["deepFogGuardPlus Node-wise Dropout"][str(nodewise_survival_rate)][str(survivability_setting)])
output_list.append(str(nodewise_survival_rate) + str(survivability_setting) + " deepFogGuardPlus Node-wise Dropout: " + str(deepFogGuardPlus_nodewise_dropout_acc) + '\n')
print(nodewise_survival_rate,survivability_setting,"deepFogGuardPlus Node-wise Dropout:",deepFogGuardPlus_nodewise_dropout_acc)
deepFogGuardPlus_adjusted_nodewise_dropout_acc = average(output["deepFogGuardPlus Adjusted Node-wise Dropout"][str(nodewise_survival_rate)][str(survivability_setting)])
output_list.append(str(nodewise_survival_rate) + str(survivability_setting) + " deepFogGuardPlus Adjusted Node-wise Dropout: " + str(deepFogGuardPlus_nodewise_dropout_acc) + '\n')
print(nodewise_survival_rate,survivability_setting,"deepFogGuardPlus Adjusted Node-wise Dropout:",deepFogGuardPlus_nodewise_dropout_acc)
# write experiments output to file
with open(output_name,'w') as file:
file.writelines(output_list)
file.flush()
os.fsync(file)
if use_GCP:
os.system('gsutil -m -q cp -r {} gs://anrl-storage/results/'.format(output_name))
print(output)
| [
"brian.qh.nguyen@gmail.com"
] | brian.qh.nguyen@gmail.com |
919890dfa27b2785488ab4ec815c2d7c9bf0faa7 | 9cac3bc1c61f4de32251072e49c50b0543450490 | /examples/find_available_seattlegeni_vessels.py | 412176990dffaec0800a9c6acb8ef925e3c14bd2 | [
"MIT"
] | permissive | SeattleTestbed/experimentmanager | 40b036028809fa77dcdec804d58853f679e326de | 31c52f35fba1e367b1177b3a95ae65b4dd0e1a1c | refs/heads/master | 2020-12-25T17:34:49.713296 | 2017-05-15T11:37:36 | 2017-05-15T11:37:36 | 20,136,879 | 0 | 5 | null | 2016-08-29T09:00:07 | 2014-05-24T18:43:36 | Python | UTF-8 | Python | false | false | 4,356 | py | """
This script will look up all active nodes that are part of a testbed managed
by SeattleGENI and determine which vessels on those nodes are available.
This information could be used in various ways, one of them being to gather
information about those node locations, such as latency from a certain
location, and decide which vessels to acquire based on that information.
Note: This script can result in a large amount of of node communication.
Specifically, it will try to communicate with every node that is part of
the testbed.
Example output of this script:
Number of advertising nodes: 452
DEBUG: only looking at 5 nodes.
Failure on NAT$2dfeca92a68744eb493cf5ba5559cdcee03684c5v2:1224: Connection Refused! ['[Errno 111] Connection refused']
On 1.1.1.1:1224 found 6 available vessels
On 4.4.4.4:1224 found 6 available vessels
On 3.3.3.3:1224 found 5 available vessels
Failure on 2.2.2.2:1224: timed out
Number of nodes that SeattleGENI vessels are available on: 3
"""
import sys
import traceback
# If this script resides outside of the directory that contains the seattlelib
# files and experimentlib.py, then you'll need to set that path here.
EXPERIMENTLIB_DIRECTORY = "./experimentlibrary/"
sys.path.append(EXPERIMENTLIB_DIRECTORY)
import experimentlib
# This can be used to adjust how many threads are used for concurrently
# contacting nodes when experimentlib.run_parallelized() is called.
#experimentlib.num_worker_threads = 10
# The public key that all seattlegeni nodes advertise under.
SEATTLECLEARINGHOUSE_PUBLICKEY_FILENAME = "seattlegeni_advertisement.publickey"
# Useful for development. Only contact this many nodes.
MAX_NODES_TO_LOOK_AT = 5
def main():
identity = experimentlib.create_identity_from_key_files(SEATTLECLEARINGHOUSE_PUBLICKEY_FILENAME)
nodelocation_list = experimentlib.lookup_node_locations_by_identity(identity)
print("Number of advertising nodes: " + str(len(nodelocation_list)))
if MAX_NODES_TO_LOOK_AT is not None:
print("DEBUG: only looking at " + str(MAX_NODES_TO_LOOK_AT) + " nodes.")
nodelocation_list = nodelocation_list[:MAX_NODES_TO_LOOK_AT]
# Talk to each nodemanager to find out vessel information.
browse_successlist, failurelist = \
experimentlib.run_parallelized(nodelocation_list, browse_node_for_available_vessels)
# Create a dictionary whose keys are the nodeids and values are lists of
# vesseldicts of the available vessels on that node.
available_vesseldicts_by_node = {}
for (nodeid, available_vesseldicts) in browse_successlist:
if available_vesseldicts:
available_vesseldicts_by_node[nodeid] = available_vesseldicts
print("Number of nodes that SeattleGENI vessels are available on: " +
str(len(available_vesseldicts_by_node.keys())))
def browse_node_for_available_vessels(nodelocation):
"""
Contact the node at nodelocation and return a list of vesseldicts
for each vessel on the node.
"""
try:
# Ask the node for information about the vessels on it.
vesseldict_list = experimentlib.browse_node(nodelocation)
# Gather up a list of vesseldicts of the available vessels.
available_vesseldict_list = []
for vesseldict in vesseldict_list:
if is_vessel_available(vesseldict):
available_vesseldict_list.append(vesseldict)
# Just so we can watch the progress, print some output.
# We display the nodelocation rather than the nodeid because it's more
# interesting to look at, even though nodes can change location and this
# isn't a unique identifier of the node.
print("On " + nodelocation + " found " +
str(len(available_vesseldict_list)) + " available vessels")
return available_vesseldict_list
except experimentlib.NodeCommunicationError, e:
print("Failure on " + nodelocation + ": " + str(e))
except:
traceback.print_exc()
def is_vessel_available(vesseldict):
"""
This returns True or False depending on whether the vesseldict indicates an
an available vessel. That is, one that can be acquired through SeattleGENI.
"""
if vesseldict['vesselname'] == 'v2':
# v2 is a special vessel that will never be available from SeattleGENI.
return False
else:
# If there are no userkeys, the vessel is available.
return len(vesseldict['userkeys']) == 0
if __name__ == "__main__":
main()
| [
"USER@DOMAIN"
] | USER@DOMAIN |
6d394181e586c05039f2bdc5b5b36643df128c19 | a7e326100e5e73d434685306d7b556b828ff0271 | /P95.fabriz.py | 8448c5c9e27c752d1cc9ccdbe7f65d5267dfe753 | [] | no_license | robj137/ProjectEuler | 812cdc3d2c1aed674bbddf50ea6bc4a197594d74 | 6fb6f50e62870c5bfcee5e271fceff8f655792cd | refs/heads/master | 2021-01-18T16:35:36.074584 | 2013-08-30T19:05:26 | 2013-08-30T19:05:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | #!/usr/bin/python
lim = 1000000
div = [0]*lim #sieving for divisor sums
for i in xrange(1,lim):
for j in xrange(2*i, lim, i):
div[j] += i
chain = [0]*lim #chains: -1 = bad, 0 = untested, n = length of chain
chain[0] = -1
for i in xrange(1,lim):
if chain[i]: continue
seq = [i]
while(div[seq[-1]]<lim and chain[div[seq[-1]]]==0 and div[seq[-1]] not in seq):
seq.append(div[seq[-1]])
if div[seq[-1]] in seq: #hit a loop
loop = seq.index(div[seq[-1]])
for l in range(0, loop):
chain[seq[l]] = -1 #pre-loop: mark as bad
for l in range(loop, len(seq)):
chain[seq[l]] = len(seq)-loop #within-loop: mark chain length
else: #exceeded lim or hit a bad number
for s in seq: chain[s] = -1 #mark as bad
print chain.index(max(chain))
| [
"robj137@gmail.com"
] | robj137@gmail.com |
dcfe71cae74fb930530a22f259f0c77b4e78a2f5 | 9da784a791c671ef08398f1833f90b67182e53d3 | /object_branch/preprocess/nyu/voxelize_objects.py | baf7e4af389d8dcf5a5d80b8e2a20b876bdf0bf7 | [
"MIT"
] | permissive | zebrajack/Associative3D | 2b237036b0fcc1ddcc7028b7c61287653a87354a | c50818b593ec48c38ed7ee3e109c23531089da32 | refs/heads/master | 2022-12-05T19:21:08.918853 | 2020-08-25T19:04:32 | 2020-08-25T19:04:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,568 | py |
'''
Converts the mat data mesh for object to vox
'''
import os.path as osp
import argparse
import scipy.io as sio
import pdb
import numpy as np
import os
import sys
#sys.path.append('/home/nileshk/3d/external/binvox')
code_dir=os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'../../'))
nyu_dir = '/nfs.yoda/imisra/nileshk/nyud2/'
binvox_dir = osp.join(code_dir, 'external','binvox')
binvox_exec_file = osp.join(binvox_dir, 'binvox')
import sys
sys.path.insert(0, osp.join(code_dir ,'external/binvox/'))
import binvox_rw
def convert_mat_to_obj(mat_file, obj_file):
object_mat = sio.loadmat(mat_file, squeeze_me=True, struct_as_record=False)
all_faces = np.zeros((0,3))
all_vertices = np.zeros((0,3))
for comp in object_mat['comp']:
f = comp.faces.reshape(-1, 3).astype(np.float32)
v = comp.vertices
f = f + len(all_vertices)
all_vertices = np.concatenate([all_vertices, v])
all_faces = np.concatenate([all_faces, f])
all_faces = all_faces.astype(np.int)
with open(obj_file, 'w') as fout:
for vert in all_vertices:
fout.write('v {}, {}, {}\n'.format(vert[0], vert[1], vert[2]))
for f in all_faces:
fout.write('f {}, {}, {}\n'.format(f[0], f[1], f[2]))
return
grid_size = 64
parser = argparse.ArgumentParser(description='Parse arguments.')
parser.add_argument('--min', type=int, help='min id')
parser.add_argument('--max', type=int, default=0, help='max id')
parser.add_argument('--matfile', type=str, default='all')
args = parser.parse_args()
dc1 = 'find {} -name "*.binvox" -type f -delete'.format(osp.join(nyu_dir,'object_obj'))
dc2 = 'find {} -name "*.mat" -type f -delete'.format(osp.join(nyu_dir,'object_obj'))
os.system(dc1)
os.system(dc2)
object_ids = [name.replace(".mat","") for name in os.listdir(osp.join(nyu_dir, 'object'))]
n_objects = len(object_ids)
obj_dir = osp.join(nyu_dir, 'object_obj')
if not osp.exists(obj_dir):
os.makedirs(obj_dir)
# n_objects = 2
for ix in range(n_objects):
obj_id = object_ids[ix]
print(obj_id)
obj_file = osp.join(nyu_dir, 'object_obj', obj_id + ".obj")
mat_file = osp.join(nyu_dir, 'object', obj_id + ".mat")
convert_mat_to_obj(mat_file, obj_file)
binvox_file_interior = osp.join(obj_dir, obj_id + '.binvox')
binvox_file_surface = osp.join(obj_dir, obj_id + '_1.binvox')
cmd_interior = '{} -cb -d {} {}'.format(binvox_exec_file, grid_size, osp.join(obj_dir, obj_id + '.obj'))
cmd_surface = '{} -cb -e -d {} {}'.format(binvox_exec_file, grid_size, osp.join(obj_dir, obj_id + '.obj'))
os.system(cmd_interior)
os.system(cmd_surface)
with open(binvox_file_interior, 'rb') as f0:
with open(binvox_file_surface, 'rb') as f1:
vox_read_interior = binvox_rw.read_as_3d_array(f0)
vox_read_surface = binvox_rw.read_as_3d_array(f1)
#need to add translation corresponding to voxel centering
shape_vox = vox_read_interior.data.astype(np.bool) + vox_read_surface.data.astype(np.bool)
if(np.max(shape_vox) > 0):
Xs, Ys, Zs = np.where(shape_vox)
trans_centre = np.array([1.0*np.min(Xs)/(np.size(shape_vox,0)), 1.0*np.min(Ys)/(np.size(shape_vox,1)), 1.0*np.min(Zs)/(np.size(shape_vox,2)-1)] )
translate = vox_read_surface.translate - trans_centre*vox_read_surface.scale
sio.savemat(osp.join(obj_dir, obj_id + '.mat'), {'voxels' : shape_vox, 'scale' : vox_read_surface.scale, 'translation' : translate})
| [
"jasonsyqian@gmail.com"
] | jasonsyqian@gmail.com |
871588cf841884f7fc798cea219e466dad82e5ed | c123cb27fbb807acbc4a8bc6148e539dc8c3c3a3 | /view/Ui_CadastrePageReportDialog.py | bf2daf3ef71c709552d9ebe8c80c5b11dea33fb7 | [] | no_license | ankhbold/lm3_mgis | 0b1e5498adc3d556b7ea0656ae9fdc02c47fc0f7 | a2b4fbdcf163662c179922698537ea9150ba16e5 | refs/heads/master | 2020-08-06T20:17:49.049160 | 2019-10-08T05:35:05 | 2019-10-08T05:35:05 | 213,139,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,886 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\work\LAND_MANAGER\lm2\view\CadastrePageReportDialog.ui.'
#
# Created by: PyQt5 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CadastrePageReportDialog(object):
def setupUi(self, CadastrePageReportDialog):
CadastrePageReportDialog.setObjectName(_fromUtf8("CadastrePageReportDialog"))
CadastrePageReportDialog.resize(732, 453)
self.close_button = QtGui.QPushButton(CadastrePageReportDialog)
self.close_button.setGeometry(QtCore.QRect(650, 410, 75, 23))
self.close_button.setObjectName(_fromUtf8("close_button"))
self.find_button = QtGui.QPushButton(CadastrePageReportDialog)
self.find_button.setGeometry(QtCore.QRect(450, 59, 75, 23))
self.find_button.setObjectName(_fromUtf8("find_button"))
self.cpage_twidget = QtGui.QTableWidget(CadastrePageReportDialog)
self.cpage_twidget.setGeometry(QtCore.QRect(10, 110, 718, 292))
self.cpage_twidget.setObjectName(_fromUtf8("cpage_twidget"))
self.cpage_twidget.setColumnCount(7)
self.cpage_twidget.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(6, item)
self.results_label = QtGui.QLabel(CadastrePageReportDialog)
self.results_label.setGeometry(QtCore.QRect(10, 90, 201, 16))
self.results_label.setText(_fromUtf8(""))
self.results_label.setObjectName(_fromUtf8("results_label"))
self.print_button = QtGui.QPushButton(CadastrePageReportDialog)
self.print_button.setGeometry(QtCore.QRect(550, 410, 75, 23))
self.print_button.setObjectName(_fromUtf8("print_button"))
self.line = QtGui.QFrame(CadastrePageReportDialog)
self.line.setGeometry(QtCore.QRect(0, 20, 731, 16))
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.line_2 = QtGui.QFrame(CadastrePageReportDialog)
self.line_2.setGeometry(QtCore.QRect(0, 430, 731, 16))
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.label_2 = QtGui.QLabel(CadastrePageReportDialog)
self.label_2.setGeometry(QtCore.QRect(10, 10, 281, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.print_year_chbox = QtGui.QCheckBox(CadastrePageReportDialog)
self.print_year_chbox.setGeometry(QtCore.QRect(330, 40, 101, 17))
self.print_year_chbox.setObjectName(_fromUtf8("print_year_chbox"))
self.print_year_sbox = QtGui.QSpinBox(CadastrePageReportDialog)
self.print_year_sbox.setEnabled(False)
self.print_year_sbox.setGeometry(QtCore.QRect(330, 59, 91, 22))
self.print_year_sbox.setMinimum(2000)
self.print_year_sbox.setMaximum(2100)
self.print_year_sbox.setProperty("value", 2017)
self.print_year_sbox.setObjectName(_fromUtf8("print_year_sbox"))
self.label_3 = QtGui.QLabel(CadastrePageReportDialog)
self.label_3.setGeometry(QtCore.QRect(10, 40, 171, 16))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.person_id_edit = QtGui.QLineEdit(CadastrePageReportDialog)
self.person_id_edit.setGeometry(QtCore.QRect(10, 60, 150, 20))
self.person_id_edit.setObjectName(_fromUtf8("person_id_edit"))
self.parcel_id_edit = QtGui.QLineEdit(CadastrePageReportDialog)
self.parcel_id_edit.setGeometry(QtCore.QRect(170, 60, 150, 20))
self.parcel_id_edit.setObjectName(_fromUtf8("parcel_id_edit"))
self.label_4 = QtGui.QLabel(CadastrePageReportDialog)
self.label_4.setGeometry(QtCore.QRect(170, 40, 151, 16))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.retranslateUi(CadastrePageReportDialog)
QtCore.QMetaObject.connectSlotsByName(CadastrePageReportDialog)
def retranslateUi(self, CadastrePageReportDialog):
CadastrePageReportDialog.setWindowTitle(_translate("CadastrePageReportDialog", "Dialog", None))
self.close_button.setText(_translate("CadastrePageReportDialog", "close", None))
self.find_button.setText(_translate("CadastrePageReportDialog", "Find", None))
item = self.cpage_twidget.horizontalHeaderItem(0)
item.setText(_translate("CadastrePageReportDialog", "ID", None))
item = self.cpage_twidget.horizontalHeaderItem(1)
item.setText(_translate("CadastrePageReportDialog", "PrintDate", None))
item = self.cpage_twidget.horizontalHeaderItem(2)
item.setText(_translate("CadastrePageReportDialog", "Page Number", None))
item = self.cpage_twidget.horizontalHeaderItem(3)
item.setText(_translate("CadastrePageReportDialog", "Person ID", None))
item = self.cpage_twidget.horizontalHeaderItem(4)
item.setText(_translate("CadastrePageReportDialog", "Right Holder", None))
item = self.cpage_twidget.horizontalHeaderItem(5)
item.setText(_translate("CadastrePageReportDialog", "Parcel ID", None))
item = self.cpage_twidget.horizontalHeaderItem(6)
item.setText(_translate("CadastrePageReportDialog", "Streetname-Khashaa", None))
self.print_button.setText(_translate("CadastrePageReportDialog", "Print", None))
self.label_2.setText(_translate("CadastrePageReportDialog", "Cadastre page report", None))
self.print_year_chbox.setText(_translate("CadastrePageReportDialog", "Year Print", None))
self.label_3.setText(_translate("CadastrePageReportDialog", "Person ID", None))
self.label_4.setText(_translate("CadastrePageReportDialog", "Parcel ID", None))
| [
"aagii_csms@yahoo.com"
] | aagii_csms@yahoo.com |
d2cc4dc3b948ffe438042ed4adc5ccc75d1930a0 | 66021e6e21fbc31af116b10472ce27f743c35c05 | /code/12_protein_identification.py | 208b84bc78f37139f4f1de97f33d2fb6e3aca559 | [] | no_license | erik-burger/erik_burger_genome_analysis | fc0c913429f8c5797e4e13fee91fa9b2725542a2 | b3c3b53ccba0b40f4b267e98b10d78f07f64dc73 | refs/heads/master | 2021-04-23T13:10:22.060975 | 2020-05-25T13:39:40 | 2020-05-25T13:39:40 | 249,927,729 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,599 | py | # This code is made to assign each found gene to a DNA sequence and then output a cvs file containing
# THe sequence together with it's 2logfold change and it's pvalue and name.
import csv
from Bio import SeqIO
import re
# Open and read the tsv file from R, this path is changed based on which of the R files that were to be analyzend
# path "/Users/ErikBurger/Desktop/Genomanalys/erik_burger_genome_analysis/analyses/12_DESeq/aril_vs_leaf.tsv"
tsv_file = open("/Users/ErikBurger/Desktop/Genomanalys/"
"erik_burger_genome_analysis/analyses/12_DESeq/aril_vs_leaf.tsv")
read_tsv = csv.reader(tsv_file, delimiter="\t")
# Open and create the file to read to also changed based on input data
f = open('/Users/ErikBurger/Desktop/Genomanalys/erik_burger_genome_analysis/'
'analyses/12_DESeq/results_aril_vs_leaf.csv', 'a')
# Write top the column names
f.write("name, log2FoldChange, pvalue, sequence \n")
# For each gene in the tsv file from R a DNA match is found i the fasta file form maker this is done using regex.
for row in read_tsv:
tig = re.search("(tig\d+)", row[0])
gene_num = re.search("gene-\d+\.\d+-", row[0])
if tig and gene_num:
fasta_sequences = SeqIO.parse(open("/Users/ErikBurger/Desktop/all_fasta.fasta"),'fasta')
for fasta in fasta_sequences:
if str(fasta.id).find(tig.group(0)) > -1 and str(fasta.id).find(gene_num.group(0)) > -1:
f.write(row[0] + ","+ row[2] + "," + row[5] + "," + str(fasta.seq) +"\n")
# The output files are then move into excel to be able to sort the data based on log2FoldChange
| [
"erik.burger@hotmail.se"
] | erik.burger@hotmail.se |
5ec37b8dfa191eb8cf8385f62e8bb0758b02315b | 619bbcfbdfcbc572d4233c2470bb11a07395f5ae | /Interprete/Instrucciones/Print.py | fcbfcaeacb8844081b6d902ed44b105bc39e0c3c | [] | no_license | Josue-Zea/-OLC2-Proyecto1_201807159 | 6119850e57bfcaaf1d2ef1bed7129b1378517f9f | 087e495bcb89ca0fb612492a9127eb7fd73390be | refs/heads/master | 2023-08-17T23:21:37.186506 | 2021-09-23T05:50:06 | 2021-09-23T05:50:06 | 403,418,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | from Interprete.Abstract.Instruccion import Instruccion
from Interprete.Abstract.NodoAst import NodoAst
from Interprete.TS.Exception import Exception
from Interprete.Expresiones.Primitivos import Primitivos
from datetime import datetime
from Interprete.Abstract.NodoAst import NodoAst
class Print(Instruccion):
def __init__(self, expresion, fila, columna):
self.expresion = expresion
self.fila = fila
self.columna = columna
def interpretar(self, tree, tabla):
ins = []
for i in self.expresion:
value = i.interpretar(tree, tabla)
if type(value) == list:
ins.append(self.obtenerString(value))
else:
ins.append(value)
for i in ins:
if isinstance(i, Exception):
return i
for i in ins:
tree.actualizar_consola_sin_salto(i)
def obtenerString(self, lista):
var = "["
for i in lista:
if isinstance(i, Primitivos):
var+=str(i.valor)+","
var = var.rstrip(var[-1])
var += "]"
return var
def getNodo(self):
nodo = NodoAst("PRINT")
for exp in self.expresion:
nodo.agregarHijoNodo(exp.getNodo())
return nodo | [
"jdzeaherrera@gmail.com"
] | jdzeaherrera@gmail.com |
4305a9232a81ce0a924a5bae10cd5e4b6444862a | 171a89102edf10901e18a2c0f41c3313608d2324 | /src/rogerthat/cron/send_unread_reminder.py | 2f76a5ae8ad60c5efdeacb4ee60c30ac0549458b | [
"Apache-2.0"
] | permissive | gitter-badger/rogerthat-backend | 7e9c12cdd236ef59c76a62ac644fcd0a7a712baf | ab92dc9334c24d1b166972b55f1c3a88abe2f00b | refs/heads/master | 2021-01-18T06:08:11.435313 | 2016-05-11T08:50:20 | 2016-05-11T08:50:20 | 58,615,985 | 0 | 0 | null | 2016-05-12T06:54:07 | 2016-05-12T06:54:07 | null | UTF-8 | Python | false | false | 834 | py | # -*- coding: utf-8 -*-
# Copyright 2016 Mobicage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.1@@
from rogerthat.bizz.job.send_unread_messages import send
from google.appengine.ext import webapp
class UnreadMessageReminderHandler(webapp.RequestHandler):
def get(self):
send(dry_run=False)
| [
"bart@mobicage.com"
] | bart@mobicage.com |
87888c3d6040c0c56b092e7fcb48f9d5955572bc | 75156596d9a6385542ae11b88d059231445537fd | /apps/goods/views_base.py | f203c13e92255abac5bb86b2ce0d0507ed02a9de | [] | no_license | hupingan86/VueShop5 | aa224fb39c15abb866a1e038879bfc113e12651a | 64fddf1438a7eeaf033bd458641c3c45dd76ed89 | refs/heads/master | 2020-06-26T08:56:10.455018 | 2019-08-06T01:10:37 | 2019-08-06T01:10:37 | 199,589,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | from django.views.generic.base import View
from goods.models import Goods
class GoodsListView(View):
def get(self,request):
"""
通过django的View实现商品列表页
:param request:
:return:
"""
json_list = []
goods = Goods.objects.all()[:10]
# for good in goods:
# json_dict = {}
# json_dict["name"] = good.name
# # json_dict["category"] = good.category
# json_dict["market_price"] = good.market_price
# json_dict["shop_price"] = good.shop_price
# json_dict["goods_sn"] = good.goods_sn
# json_dict["click_num"] = good.click_num
# json_dict["sold_num"] = good.sold_num
# json_dict["fav_num"] = good.fav_num
# json_dict["goods_num"] = good.goods_num
# json_dict["goods_brief"] = good.goods_brief
# json_dict["goods_desc"] = good.goods_desc
# json_dict["ship_free"] = good.ship_free
# json_dict["goods_front_image"] = good.goods_front_image
# json_dict["is_new"] = good.is_new
# json_dict["is_hot"] = good.is_hot
# json_dict["add_time"] = good.add_time
# json_list.append(json_dict)
# from django.forms.models import model_to_dict
# for good in goods:
# json_dict = model_to_dict(good)
# json_list.append(json_dict)
# from django.http import HttpResponse
# import json
# return HttpResponse(json.dumps(json_list), content_type="application/json")
import json
from django.core import serializers
# 进行序列化
json_data = serializers.serialize('json', goods)
json_data = json.loads(json_data)
from django.http import HttpResponse, JsonResponse
return JsonResponse(json_data, safe=False) | [
"406839815@qq.com"
] | 406839815@qq.com |
25622946d4cc694e63901dc2980ec2fa9f1ae137 | 57c62abd33f8b508e357ca8631a160ce85a7f340 | /ggNtuplizer/test/crab_submit/jobs/FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8/crab_FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8.py | 4470aec7aea4019d8df76db06409c83c17dfeaf4 | [] | no_license | jainshilpi/aNTGC_ggNtuplizer | 8973ce3cdab293317fd928679b14038f03c10976 | 7153d73fbee35969dad0d85c6517e577a0546566 | refs/heads/master | 2022-09-18T07:39:40.246699 | 2020-04-20T13:03:20 | 2020-04-20T13:03:20 | 267,979,045 | 1 | 1 | null | 2020-05-30T00:09:36 | 2020-05-30T00:09:36 | null | UTF-8 | Python | false | false | 2,178 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
import sys
config = config()
#**************************submit function***********************
from CRABAPI.RawCommand import crabCommand
from CRABClient.ClientExceptions import ClientException
from httplib import HTTPException
def submit(config):
try:
crabCommand('submit', config = config)
except HTTPException as hte:
print "Failed submitting task: %s" % (hte.headers)
except ClientException as cle:
print "Failed submitting task: %s" % (cle)
#****************************************************************
workarea='/afs/cern.ch/work/m/mwadud/private/naTGC/CMSSW_9_4_13/src/ggAnalysis/ggNtuplizer/test/crab_submit/jobs/FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8/'
mainOutputDir = '/store/user/mwadud/aNTGC/ggNtuplizerSkim/xSecs/'
config.General.requestName = 'FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8'
config.General.transferLogs = True
config.General.workArea = '%s' % workarea
config.Site.storageSite = 'T2_US_Wisconsin'
config.Site.whitelist = ['T3_US_UCR','T3_US_FNALLPC','T2_US_Purdue','T3_US_Rice','T3_US_Cornell','T3_US_Rutgers','T3_US_FIU','T3_US_FIT','T3_US_PSC','T3_US_OSU','T3_US_TAMU','T3_US_UMD','T3_US_VC3_NotreDame','T3_US_SDSC','T3_US_Colorado','T3_US_OSG','T3_US_Princeton_ICSE','T3_US_NERSC','T3_US_Baylor','T2_US_Nebraska','T2_US_UCSD','T2_US_Wisconsin','T2_US_MIT','T3_US_TACC','T3_US_TTU','T3_US_UMiss']
config.Site.blacklist = ['T2_US_Florida','T2_US_Vanderbilt','T3_US_PuertoRico','T2_US_Caltech']
config.JobType.psetName = '/afs/cern.ch/work/m/mwadud/private/naTGC/CMSSW_9_4_13/src/ggAnalysis/ggNtuplizer/test/crab_submit/XsecAna.py'
config.JobType.pluginName = 'Analysis'
config.Data.inputDataset = '/GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM'
config.Data.publication = False
config.Data.allowNonValidInputDataset = True
config.Data.outLFNDirBase = '%s' % mainOutputDir
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 5000
config.Data.ignoreLocality = True
config.Data.totalUnits = 5000
submit(config)
| [
"abrar.discloses@gmail.com"
] | abrar.discloses@gmail.com |
1cdc35d465e2d36f6b9dbcee0ccaa1c9a68fe7fd | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_24852.py | 0c27ea11820885c9563e4852cbe27378470e68f3 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,839 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((536.102, 420.6, 619.247), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((531.774, 477.248, 575.871), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((530.591, 547.332, 531.073), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((574.999, 545.265, 662.572), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((514.88, 674.99, 390.318), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((525.726, 456.842, 592.226), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((525.401, 456.177, 592.771), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((497.945, 461.622, 593.485), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((489.47, 488.345, 593.387), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((466.432, 482.69, 608.386), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((441.086, 490.185, 617.892), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((442.367, 466.112, 632.426), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((535.76, 430.229, 594.197), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((346.573, 497.307, 666.033), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((381.477, 607.364, 500.136), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((381.477, 607.364, 500.136), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((405.039, 598.129, 513.244), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((428.199, 586.683, 525.425), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((450.137, 571.143, 535.615), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((468.197, 549.587, 541.645), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((482.793, 524.718, 543.984), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((492.835, 497.45, 546.677), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((294.216, 641.996, 625.095), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((686.947, 337.35, 479.808), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((507.234, 504.53, 513.028), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((507.234, 504.53, 513.028), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((521.843, 515.862, 534.197), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((548.917, 523.011, 539.825), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((554.226, 546.614, 556.007), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((575.468, 458.014, 640.709), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((531.826, 640.077, 475.472), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((550.624, 476.489, 597.036), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((550.813, 476.507, 597.159), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((558.797, 456.987, 578.122), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((536.994, 446.214, 563.08), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((508.395, 447.652, 561.121), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((480.361, 449.521, 566.859), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((456.185, 450.2, 582.433), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((438.957, 447.29, 605.431), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((484.207, 431.772, 535.719), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((394.025, 463.66, 680.011), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((525.627, 443.578, 519.064), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((534.371, 463.471, 533.759), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((554.566, 506.6, 566.828), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((573.12, 552.993, 594.966), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((623.089, 498.477, 625.518), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((562.76, 656.528, 609.552), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((551.659, 430.878, 536.446), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((563.589, 450.866, 520.852), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((585.763, 468.066, 518.671), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((600.138, 482.561, 499.402), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((588.319, 507.991, 497.819), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((576.134, 532.851, 502.381), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((551.519, 472.589, 551.155), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((600.964, 592.874, 453.45), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
285c836fd77ebda83be0479d34015c7eabb7ff57 | 1bce3d256aac17b7cd86a71a6892a69b19b9580c | /LogicTT/__init__.py | be8d570383fe793f536d063ab05e9f92960b29be | [
"MIT"
] | permissive | SpecialDude/LogicTT | 3fdaca97a6a9aeb04a1f216ef0843dafc93c5901 | 1e65127686eb0a5fa9b6c196d8620c4c6f3d0101 | refs/heads/main | 2023-06-29T00:04:39.422402 | 2021-08-03T22:12:31 | 2021-08-03T22:12:31 | 361,158,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | from . import TT | [
"noreply@github.com"
] | SpecialDude.noreply@github.com |
fb6a10611097d3cbf96cc746303990edd07beedb | f91eda66a21e7435cdda4cc3ddbdd49c04879bf2 | /back/src/products/config.py | 426622383f45ee2e6b9d326e1ce222ddc3551460 | [] | no_license | matheusangelo/flask-dockerize | 0b2cbe4609aa054d2c1b944ceb4f1732cf6fe0a1 | 45472f547864ea56e4b27ee0e6caa698a168d673 | refs/heads/master | 2022-09-07T20:43:03.647202 | 2020-05-31T18:26:11 | 2020-05-31T18:26:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | import os
from pymongo import MongoClient
# connection
mongo = MongoClient('mongodb://db')
# databases
db = mongo['Sample']
# collections
products_collection = db['products']
| [
"msilva@brasilseg.com.br"
] | msilva@brasilseg.com.br |
f806b32b55a9145c4c04c121ccedc5edfff7e060 | 632d7759536ed0726499c2d52c8eb13b5ab213ab | /Data/Packages/mdpopups/tests/validate_json_format.py | 0afbb2d170664281507ba611c0927e38799d1ae9 | [
"MIT"
] | permissive | Void2403/sublime_text_3_costomize | e660ad803eb12b20e9fa7f8eb7c6aad0f2b4d9bc | c19977e498bd948fd6d8f55bd48c8d82cbc317c3 | refs/heads/master | 2023-08-31T21:32:32.791574 | 2019-05-31T11:46:19 | 2019-05-31T11:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,661 | py | """
Validate JSON format.
Licensed under MIT
Copyright (c) 2012-2015 Isaac Muse <isaacmuse@gmail.com>
"""
import re
import codecs
import json
RE_LINE_PRESERVE = re.compile(r"\r?\n", re.MULTILINE)
RE_COMMENT = re.compile(
r'''(?x)
(?P<comments>
/\*[^*]*\*+(?:[^/*][^*]*\*+)*/ # multi-line comments
| [ \t]*//(?:[^\r\n])* # single line comments
)
| (?P<code>
"(?:\\.|[^"\\])*" # double quotes
| .[^/"']* # everything else
)
''',
re.DOTALL
)
RE_TRAILING_COMMA = re.compile(
r'''(?x)
(
(?P<square_comma>
, # trailing comma
(?P<square_ws>[\s\r\n]*) # white space
(?P<square_bracket>\]) # bracket
)
| (?P<curly_comma>
, # trailing comma
(?P<curly_ws>[\s\r\n]*) # white space
(?P<curly_bracket>\}) # bracket
)
)
| (?P<code>
"(?:\\.|[^"\\])*" # double quoted string
| .[^,"']* # everything else
)
''',
re.DOTALL
)
RE_LINE_INDENT_TAB = re.compile(r'^(?:(\t+)?(?:(/\*)|[^ \t\r\n])[^\r\n]*)?\r?\n$')
RE_LINE_INDENT_SPACE = re.compile(r'^(?:((?: {4})+)?(?:(/\*)|[^ \t\r\n])[^\r\n]*)?\r?\n$')
RE_TRAILING_SPACES = re.compile(r'^.*?[ \t]+\r?\n?$')
RE_COMMENT_END = re.compile(r'\*/')
PATTERN_COMMENT_INDENT_SPACE = r'^(%s *?[^\t\r\n][^\r\n]*)?\r?\n$'
PATTERN_COMMENT_INDENT_TAB = r'^(%s[ \t]*[^ \t\r\n][^\r\n]*)?\r?\n$'
E_MALFORMED = "E0"
E_COMMENTS = "E1"
E_COMMA = "E2"
W_NL_START = "W1"
W_NL_END = "W2"
W_INDENT = "W3"
W_TRAILING_SPACE = "W4"
W_COMMENT_INDENT = "W5"
VIOLATION_MSG = {
E_MALFORMED: 'JSON content is malformed.',
E_COMMENTS: 'Comments are not part of the JSON spec.',
E_COMMA: 'Dangling comma found.',
W_NL_START: 'Unnecessary newlines at the start of file.',
W_NL_END: 'Missing a new line at the end of the file.',
W_INDENT: 'Indentation Error.',
W_TRAILING_SPACE: 'Trailing whitespace.',
W_COMMENT_INDENT: 'Comment Indentation Error.'
}
class CheckJsonFormat(object):
"""
Test JSON for format irregularities.
- Trailing spaces.
- Inconsistent indentation.
- New lines at end of file.
- Unnecessary newlines at start of file.
- Trailing commas.
- Malformed JSON.
"""
def __init__(self, use_tabs=False, allow_comments=False):
"""Setup the settings."""
self.use_tabs = use_tabs
self.allow_comments = allow_comments
self.fail = False
def index_lines(self, text):
"""Index the char range of each line."""
self.line_range = []
count = 1
last = 0
for m in re.finditer('\n', text):
self.line_range.append((last, m.end(0) - 1, count))
last = m.end(0)
count += 1
def get_line(self, pt):
"""Get the line from char index."""
line = None
for r in self.line_range:
if pt >= r[0] and pt <= r[1]:
line = r[2]
break
return line
def check_comments(self, text):
"""
Check for JavaScript comments.
Log them and strip them out so we can continue.
"""
def remove_comments(group):
return ''.join([x[0] for x in RE_LINE_PRESERVE.findall(group)])
def evaluate(m):
text = ''
g = m.groupdict()
if g["code"] is None:
if not self.allow_comments:
self.log_failure(E_COMMENTS, self.get_line(m.start(0)))
text = remove_comments(g["comments"])
else:
text = g["code"]
return text
content = ''.join(map(lambda m: evaluate(m), RE_COMMENT.finditer(text)))
return content
def check_dangling_commas(self, text):
"""
Check for dangling commas.
Log them and strip them out so we can continue.
"""
def check_comma(g, m, line):
# ,] -> ] or ,} -> }
self.log_failure(E_COMMA, line)
if g["square_comma"] is not None:
return g["square_ws"] + g["square_bracket"]
else:
return g["curly_ws"] + g["curly_bracket"]
def evaluate(m):
g = m.groupdict()
return check_comma(g, m, self.get_line(m.start(0))) if g["code"] is None else g["code"]
return ''.join(map(lambda m: evaluate(m), RE_TRAILING_COMMA.finditer(text)))
def log_failure(self, code, line=None):
"""
Log failure.
Log failure code, line number (if available) and message.
"""
if line:
print("%s: Line %d - %s" % (code, line, VIOLATION_MSG[code]))
else:
print("%s: %s" % (code, VIOLATION_MSG[code]))
self.fail = True
def check_format(self, file_name):
"""Initiate the check."""
self.fail = False
comment_align = None
with codecs.open(file_name, encoding='utf-8') as f:
count = 1
for line in f:
indent_match = (RE_LINE_INDENT_TAB if self.use_tabs else RE_LINE_INDENT_SPACE).match(line)
end_comment = (
(comment_align is not None or (indent_match and indent_match.group(2))) and
RE_COMMENT_END.search(line)
)
# Don't allow empty lines at file start.
if count == 1 and line.strip() == '':
self.log_failure(W_NL_START, count)
# Line must end in new line
if not line.endswith('\n'):
self.log_failure(W_NL_END, count)
# Trailing spaces
if RE_TRAILING_SPACES.match(line):
self.log_failure(W_TRAILING_SPACE, count)
# Handle block comment content indentation
if comment_align is not None:
if comment_align.match(line) is None:
self.log_failure(W_COMMENT_INDENT, count)
if end_comment:
comment_align = None
# Handle general indentation
elif indent_match is None:
self.log_failure(W_INDENT, count)
# Enter into block comment
elif comment_align is None and indent_match.group(2):
alignment = indent_match.group(1) if indent_match.group(1) is not None else ""
if not end_comment:
comment_align = re.compile(
(PATTERN_COMMENT_INDENT_TAB if self.use_tabs else PATTERN_COMMENT_INDENT_SPACE) % alignment
)
count += 1
f.seek(0)
text = f.read()
self.index_lines(text)
text = self.check_comments(text)
self.index_lines(text)
text = self.check_dangling_commas(text)
try:
json.loads(text)
except Exception as e:
self.log_failure(E_MALFORMED)
print(e)
return self.fail
if __name__ == "__main__":
import sys
cjf = CheckJsonFormat(False, True)
cjf.check_format(sys.argv[1])
| [
"guan2296107714@126.com"
] | guan2296107714@126.com |
fdbb4c570b879191cf9237843bc61dbaad27ada4 | f473b827edf903b7e02c4a8a1968310acbcc07e3 | /Tools/Scripts/webkitpy/benchmark_runner/browser_driver/browser_driver.py | 437a8cbcd38ae146741e94b77710530cc62e0dc8 | [] | no_license | raghavl78/webkit | 3e5e8563c164094451284c61eaa79a116d3bbd5e | 74b8aaf525ec4961324c528336963795a8f3624e | refs/heads/master | 2023-02-24T06:12:26.370924 | 2015-06-22T14:32:48 | 2015-06-22T14:32:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | #!/usr/bin/env python
import abc
class BrowserDriver(object):
@abc.abstractmethod
def prepareEnv(self, deviceID):
pass
@abc.abstractmethod
def launchUrl(self, url, browserBuildPath=None):
pass
@abc.abstractmethod
def closeBrowser(self):
pass
| [
"commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc"
] | commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc |
dede3dc0563b1336d20fdf7f164822132c1bf9be | 2ed86a79d0fcd299ad4a01310954c5eddcf01edf | /homeassistant/components/zha/climate.py | 9f999bd52fa561f770e24b9319954d8356a8b231 | [
"Apache-2.0"
] | permissive | konnected-io/home-assistant | 037f12c87bb79e19220192eb918e49db1b1a8b3e | 2e65b77b2b5c17919939481f327963abdfdc53f0 | refs/heads/dev | 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 | Apache-2.0 | 2023-02-22T06:24:01 | 2017-11-08T05:27:21 | Python | UTF-8 | Python | false | false | 29,742 | py | """Climate on Zigbee Home Automation networks.
For more details on this platform, please refer to the documentation
at https://home-assistant.io/components/zha.climate/
"""
from __future__ import annotations
from datetime import datetime, timedelta
import functools
from random import randint
from typing import Any
from zigpy.zcl.clusters.hvac import Fan as F, Thermostat as T
from homeassistant.components.climate import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
FAN_AUTO,
FAN_ON,
PRESET_AWAY,
PRESET_BOOST,
PRESET_COMFORT,
PRESET_ECO,
PRESET_NONE,
ClimateEntity,
ClimateEntityFeature,
HVACAction,
HVACMode,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_TENTHS,
Platform,
UnitOfTemperature,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_time_interval
import homeassistant.util.dt as dt_util
from .core import discovery
from .core.const import (
CLUSTER_HANDLER_FAN,
CLUSTER_HANDLER_THERMOSTAT,
DATA_ZHA,
PRESET_COMPLEX,
PRESET_SCHEDULE,
PRESET_TEMP_MANUAL,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
)
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity
ATTR_SYS_MODE = "system_mode"
ATTR_RUNNING_MODE = "running_mode"
ATTR_SETPT_CHANGE_SRC = "setpoint_change_source"
ATTR_SETPT_CHANGE_AMT = "setpoint_change_amount"
ATTR_OCCUPANCY = "occupancy"
ATTR_PI_COOLING_DEMAND = "pi_cooling_demand"
ATTR_PI_HEATING_DEMAND = "pi_heating_demand"
ATTR_OCCP_COOL_SETPT = "occupied_cooling_setpoint"
ATTR_OCCP_HEAT_SETPT = "occupied_heating_setpoint"
ATTR_UNOCCP_HEAT_SETPT = "unoccupied_heating_setpoint"
ATTR_UNOCCP_COOL_SETPT = "unoccupied_cooling_setpoint"
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, Platform.CLIMATE)
MULTI_MATCH = functools.partial(ZHA_ENTITIES.multipass_match, Platform.CLIMATE)
RUNNING_MODE = {0x00: HVACMode.OFF, 0x03: HVACMode.COOL, 0x04: HVACMode.HEAT}
SEQ_OF_OPERATION = {
0x00: [HVACMode.OFF, HVACMode.COOL], # cooling only
0x01: [HVACMode.OFF, HVACMode.COOL], # cooling with reheat
0x02: [HVACMode.OFF, HVACMode.HEAT], # heating only
0x03: [HVACMode.OFF, HVACMode.HEAT], # heating with reheat
# cooling and heating 4-pipes
0x04: [HVACMode.OFF, HVACMode.HEAT_COOL, HVACMode.COOL, HVACMode.HEAT],
# cooling and heating 4-pipes
0x05: [HVACMode.OFF, HVACMode.HEAT_COOL, HVACMode.COOL, HVACMode.HEAT],
0x06: [HVACMode.COOL, HVACMode.HEAT, HVACMode.OFF], # centralite specific
0x07: [HVACMode.HEAT_COOL, HVACMode.OFF], # centralite specific
}
HVAC_MODE_2_SYSTEM = {
HVACMode.OFF: T.SystemMode.Off,
HVACMode.HEAT_COOL: T.SystemMode.Auto,
HVACMode.COOL: T.SystemMode.Cool,
HVACMode.HEAT: T.SystemMode.Heat,
HVACMode.FAN_ONLY: T.SystemMode.Fan_only,
HVACMode.DRY: T.SystemMode.Dry,
}
SYSTEM_MODE_2_HVAC = {
T.SystemMode.Off: HVACMode.OFF,
T.SystemMode.Auto: HVACMode.HEAT_COOL,
T.SystemMode.Cool: HVACMode.COOL,
T.SystemMode.Heat: HVACMode.HEAT,
T.SystemMode.Emergency_Heating: HVACMode.HEAT,
T.SystemMode.Pre_cooling: HVACMode.COOL, # this is 'precooling'. is it the same?
T.SystemMode.Fan_only: HVACMode.FAN_ONLY,
T.SystemMode.Dry: HVACMode.DRY,
T.SystemMode.Sleep: HVACMode.OFF,
}
ZCL_TEMP = 100
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Zigbee Home Automation sensor from config entry."""
entities_to_create = hass.data[DATA_ZHA][Platform.CLIMATE]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities, async_add_entities, entities_to_create
),
)
config_entry.async_on_unload(unsub)
@MULTI_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
aux_cluster_handlers=CLUSTER_HANDLER_FAN,
stop_on_match_group=CLUSTER_HANDLER_THERMOSTAT,
)
class Thermostat(ZhaEntity, ClimateEntity):
"""Representation of a ZHA Thermostat device."""
DEFAULT_MAX_TEMP = 35
DEFAULT_MIN_TEMP = 7
_attr_precision = PRECISION_TENTHS
_attr_temperature_unit = UnitOfTemperature.CELSIUS
_attr_name: str = "Thermostat"
def __init__(self, unique_id, zha_device, cluster_handlers, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, cluster_handlers, **kwargs)
self._thrm = self.cluster_handlers.get(CLUSTER_HANDLER_THERMOSTAT)
self._preset = PRESET_NONE
self._presets = []
self._supported_flags = ClimateEntityFeature.TARGET_TEMPERATURE
self._fan = self.cluster_handlers.get(CLUSTER_HANDLER_FAN)
@property
def current_temperature(self):
"""Return the current temperature."""
if self._thrm.local_temperature is None:
return None
return self._thrm.local_temperature / ZCL_TEMP
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
data = {}
if self.hvac_mode:
mode = SYSTEM_MODE_2_HVAC.get(self._thrm.system_mode, "unknown")
data[ATTR_SYS_MODE] = f"[{self._thrm.system_mode}]/{mode}"
if self._thrm.occupancy is not None:
data[ATTR_OCCUPANCY] = self._thrm.occupancy
if self._thrm.occupied_cooling_setpoint is not None:
data[ATTR_OCCP_COOL_SETPT] = self._thrm.occupied_cooling_setpoint
if self._thrm.occupied_heating_setpoint is not None:
data[ATTR_OCCP_HEAT_SETPT] = self._thrm.occupied_heating_setpoint
if self._thrm.pi_heating_demand is not None:
data[ATTR_PI_HEATING_DEMAND] = self._thrm.pi_heating_demand
if self._thrm.pi_cooling_demand is not None:
data[ATTR_PI_COOLING_DEMAND] = self._thrm.pi_cooling_demand
unoccupied_cooling_setpoint = self._thrm.unoccupied_cooling_setpoint
if unoccupied_cooling_setpoint is not None:
data[ATTR_UNOCCP_COOL_SETPT] = unoccupied_cooling_setpoint
unoccupied_heating_setpoint = self._thrm.unoccupied_heating_setpoint
if unoccupied_heating_setpoint is not None:
data[ATTR_UNOCCP_HEAT_SETPT] = unoccupied_heating_setpoint
return data
@property
def fan_mode(self) -> str | None:
"""Return current FAN mode."""
if self._thrm.running_state is None:
return FAN_AUTO
if self._thrm.running_state & (
T.RunningState.Fan_State_On
| T.RunningState.Fan_2nd_Stage_On
| T.RunningState.Fan_3rd_Stage_On
):
return FAN_ON
return FAN_AUTO
@property
def fan_modes(self) -> list[str] | None:
"""Return supported FAN modes."""
if not self._fan:
return None
return [FAN_AUTO, FAN_ON]
@property
def hvac_action(self) -> HVACAction | None:
"""Return the current HVAC action."""
if (
self._thrm.pi_heating_demand is None
and self._thrm.pi_cooling_demand is None
):
return self._rm_rs_action
return self._pi_demand_action
@property
def _rm_rs_action(self) -> HVACAction | None:
"""Return the current HVAC action based on running mode and running state."""
if (running_state := self._thrm.running_state) is None:
return None
if running_state & (
T.RunningState.Heat_State_On | T.RunningState.Heat_2nd_Stage_On
):
return HVACAction.HEATING
if running_state & (
T.RunningState.Cool_State_On | T.RunningState.Cool_2nd_Stage_On
):
return HVACAction.COOLING
if running_state & (
T.RunningState.Fan_State_On
| T.RunningState.Fan_2nd_Stage_On
| T.RunningState.Fan_3rd_Stage_On
):
return HVACAction.FAN
if running_state & T.RunningState.Idle:
return HVACAction.IDLE
if self.hvac_mode != HVACMode.OFF:
return HVACAction.IDLE
return HVACAction.OFF
@property
def _pi_demand_action(self) -> HVACAction | None:
"""Return the current HVAC action based on pi_demands."""
heating_demand = self._thrm.pi_heating_demand
if heating_demand is not None and heating_demand > 0:
return HVACAction.HEATING
cooling_demand = self._thrm.pi_cooling_demand
if cooling_demand is not None and cooling_demand > 0:
return HVACAction.COOLING
if self.hvac_mode != HVACMode.OFF:
return HVACAction.IDLE
return HVACAction.OFF
@property
def hvac_mode(self) -> HVACMode | None:
"""Return HVAC operation mode."""
return SYSTEM_MODE_2_HVAC.get(self._thrm.system_mode)
@property
def hvac_modes(self) -> list[HVACMode]:
"""Return the list of available HVAC operation modes."""
return SEQ_OF_OPERATION.get(self._thrm.ctrl_sequence_of_oper, [HVACMode.OFF])
@property
def preset_mode(self) -> str:
"""Return current preset mode."""
return self._preset
@property
def preset_modes(self) -> list[str] | None:
"""Return supported preset modes."""
return self._presets
@property
def supported_features(self) -> ClimateEntityFeature:
"""Return the list of supported features."""
features = self._supported_flags
if HVACMode.HEAT_COOL in self.hvac_modes:
features |= ClimateEntityFeature.TARGET_TEMPERATURE_RANGE
if self._fan is not None:
self._supported_flags |= ClimateEntityFeature.FAN_MODE
return features
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
temp = None
if self.hvac_mode == HVACMode.COOL:
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_cooling_setpoint
else:
temp = self._thrm.occupied_cooling_setpoint
elif self.hvac_mode == HVACMode.HEAT:
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_heating_setpoint
else:
temp = self._thrm.occupied_heating_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.hvac_mode != HVACMode.HEAT_COOL:
return None
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_cooling_setpoint
else:
temp = self._thrm.occupied_cooling_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.hvac_mode != HVACMode.HEAT_COOL:
return None
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_heating_setpoint
else:
temp = self._thrm.occupied_heating_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
temps = []
if HVACMode.HEAT in self.hvac_modes:
temps.append(self._thrm.max_heat_setpoint_limit)
if HVACMode.COOL in self.hvac_modes:
temps.append(self._thrm.max_cool_setpoint_limit)
if not temps:
return self.DEFAULT_MAX_TEMP
return round(max(temps) / ZCL_TEMP, 1)
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
temps = []
if HVACMode.HEAT in self.hvac_modes:
temps.append(self._thrm.min_heat_setpoint_limit)
if HVACMode.COOL in self.hvac_modes:
temps.append(self._thrm.min_cool_setpoint_limit)
if not temps:
return self.DEFAULT_MIN_TEMP
return round(min(temps) / ZCL_TEMP, 1)
async def async_added_to_hass(self) -> None:
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._thrm, SIGNAL_ATTR_UPDATED, self.async_attribute_updated
)
async def async_attribute_updated(self, record):
"""Handle attribute update from device."""
if (
record.attr_name in (ATTR_OCCP_COOL_SETPT, ATTR_OCCP_HEAT_SETPT)
and self.preset_mode == PRESET_AWAY
):
# occupancy attribute is an unreportable attribute, but if we get
# an attribute update for an "occupied" setpoint, there's a chance
# occupancy has changed
if await self._thrm.get_occupancy() is True:
self._preset = PRESET_NONE
self.debug("Attribute '%s' = %s update", record.attr_name, record.value)
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set fan mode."""
if not self.fan_modes or fan_mode not in self.fan_modes:
self.warning("Unsupported '%s' fan mode", fan_mode)
return
if fan_mode == FAN_ON:
mode = F.FanMode.On
else:
mode = F.FanMode.Auto
await self._fan.async_set_speed(mode)
async def async_set_hvac_mode(self, hvac_mode: HVACMode) -> None:
"""Set new target operation mode."""
if hvac_mode not in self.hvac_modes:
self.warning(
"can't set '%s' mode. Supported modes are: %s",
hvac_mode,
self.hvac_modes,
)
return
if await self._thrm.async_set_operation_mode(HVAC_MODE_2_SYSTEM[hvac_mode]):
self.async_write_ha_state()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if not self.preset_modes or preset_mode not in self.preset_modes:
self.debug("Preset mode '%s' is not supported", preset_mode)
return
if self.preset_mode not in (
preset_mode,
PRESET_NONE,
) and not await self.async_preset_handler(self.preset_mode, enable=False):
self.debug("Couldn't turn off '%s' preset", self.preset_mode)
return
if preset_mode != PRESET_NONE and not await self.async_preset_handler(
preset_mode, enable=True
):
self.debug("Couldn't turn on '%s' preset", preset_mode)
return
self._preset = preset_mode
self.async_write_ha_state()
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
hvac_mode = kwargs.get(ATTR_HVAC_MODE)
if hvac_mode is not None:
await self.async_set_hvac_mode(hvac_mode)
thrm = self._thrm
if self.hvac_mode == HVACMode.HEAT_COOL:
success = True
if low_temp is not None:
low_temp = int(low_temp * ZCL_TEMP)
success = success and await thrm.async_set_heating_setpoint(
low_temp, self.preset_mode == PRESET_AWAY
)
self.debug("Setting heating %s setpoint: %s", low_temp, success)
if high_temp is not None:
high_temp = int(high_temp * ZCL_TEMP)
success = success and await thrm.async_set_cooling_setpoint(
high_temp, self.preset_mode == PRESET_AWAY
)
self.debug("Setting cooling %s setpoint: %s", low_temp, success)
elif temp is not None:
temp = int(temp * ZCL_TEMP)
if self.hvac_mode == HVACMode.COOL:
success = await thrm.async_set_cooling_setpoint(
temp, self.preset_mode == PRESET_AWAY
)
elif self.hvac_mode == HVACMode.HEAT:
success = await thrm.async_set_heating_setpoint(
temp, self.preset_mode == PRESET_AWAY
)
else:
self.debug("Not setting temperature for '%s' mode", self.hvac_mode)
return
else:
self.debug("incorrect %s setting for '%s' mode", kwargs, self.hvac_mode)
return
if success:
self.async_write_ha_state()
async def async_preset_handler(self, preset: str, enable: bool = False) -> bool:
"""Set the preset mode via handler."""
handler = getattr(self, f"async_preset_handler_{preset}")
return await handler(enable)
@MULTI_MATCH(
cluster_handler_names={CLUSTER_HANDLER_THERMOSTAT, "sinope_manufacturer_specific"},
manufacturers="Sinope Technologies",
stop_on_match_group=CLUSTER_HANDLER_THERMOSTAT,
)
class SinopeTechnologiesThermostat(Thermostat):
"""Sinope Technologies Thermostat."""
manufacturer = 0x119C
update_time_interval = timedelta(minutes=randint(45, 75))
def __init__(self, unique_id, zha_device, cluster_handlers, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, cluster_handlers, **kwargs)
self._presets = [PRESET_AWAY, PRESET_NONE]
self._supported_flags |= ClimateEntityFeature.PRESET_MODE
self._manufacturer_ch = self.cluster_handlers["sinope_manufacturer_specific"]
@property
def _rm_rs_action(self) -> HVACAction:
"""Return the current HVAC action based on running mode and running state."""
running_mode = self._thrm.running_mode
if running_mode == T.SystemMode.Heat:
return HVACAction.HEATING
if running_mode == T.SystemMode.Cool:
return HVACAction.COOLING
running_state = self._thrm.running_state
if running_state and running_state & (
T.RunningState.Fan_State_On
| T.RunningState.Fan_2nd_Stage_On
| T.RunningState.Fan_3rd_Stage_On
):
return HVACAction.FAN
if self.hvac_mode != HVACMode.OFF and running_mode == T.SystemMode.Off:
return HVACAction.IDLE
return HVACAction.OFF
@callback
def _async_update_time(self, timestamp=None) -> None:
"""Update thermostat's time display."""
secs_2k = (
dt_util.now().replace(tzinfo=None) - datetime(2000, 1, 1, 0, 0, 0, 0)
).total_seconds()
self.debug("Updating time: %s", secs_2k)
self._manufacturer_ch.cluster.create_catching_task(
self._manufacturer_ch.cluster.write_attributes(
{"secs_since_2k": secs_2k}, manufacturer=self.manufacturer
)
)
async def async_added_to_hass(self) -> None:
"""Run when about to be added to Hass."""
await super().async_added_to_hass()
self.async_on_remove(
async_track_time_interval(
self.hass, self._async_update_time, self.update_time_interval
)
)
self._async_update_time()
async def async_preset_handler_away(self, is_away: bool = False) -> bool:
"""Set occupancy."""
mfg_code = self._zha_device.manufacturer_code
res = await self._thrm.write_attributes(
{"set_occupancy": 0 if is_away else 1}, manufacturer=mfg_code
)
self.debug("set occupancy to %s. Status: %s", 0 if is_away else 1, res)
return res
@MULTI_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
aux_cluster_handlers=CLUSTER_HANDLER_FAN,
manufacturers={"Zen Within", "LUX"},
stop_on_match_group=CLUSTER_HANDLER_THERMOSTAT,
)
class ZenWithinThermostat(Thermostat):
"""Zen Within Thermostat implementation."""
@MULTI_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
aux_cluster_handlers=CLUSTER_HANDLER_FAN,
manufacturers="Centralite",
models={"3157100", "3157100-E"},
stop_on_match_group=CLUSTER_HANDLER_THERMOSTAT,
)
class CentralitePearl(ZenWithinThermostat):
"""Centralite Pearl Thermostat implementation."""
@STRICT_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
manufacturers={
"_TZE200_ckud7u2l",
"_TZE200_ywdxldoj",
"_TZE200_cwnjrr72",
"_TZE200_2atgpdho",
"_TZE200_pvvbommb",
"_TZE200_4eeyebrt",
"_TZE200_cpmgn2cf",
"_TZE200_9sfg7gm0",
"_TZE200_8whxpsiw",
"_TYST11_ckud7u2l",
"_TYST11_ywdxldoj",
"_TYST11_cwnjrr72",
"_TYST11_2atgpdho",
},
)
class MoesThermostat(Thermostat):
"""Moes Thermostat implementation."""
def __init__(self, unique_id, zha_device, cluster_handlers, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, cluster_handlers, **kwargs)
self._presets = [
PRESET_NONE,
PRESET_AWAY,
PRESET_SCHEDULE,
PRESET_COMFORT,
PRESET_ECO,
PRESET_BOOST,
PRESET_COMPLEX,
]
self._supported_flags |= ClimateEntityFeature.PRESET_MODE
@property
def hvac_modes(self) -> list[HVACMode]:
"""Return only the heat mode, because the device can't be turned off."""
return [HVACMode.HEAT]
async def async_attribute_updated(self, record):
"""Handle attribute update from device."""
if record.attr_name == "operation_preset":
if record.value == 0:
self._preset = PRESET_AWAY
if record.value == 1:
self._preset = PRESET_SCHEDULE
if record.value == 2:
self._preset = PRESET_NONE
if record.value == 3:
self._preset = PRESET_COMFORT
if record.value == 4:
self._preset = PRESET_ECO
if record.value == 5:
self._preset = PRESET_BOOST
if record.value == 6:
self._preset = PRESET_COMPLEX
await super().async_attribute_updated(record)
async def async_preset_handler(self, preset: str, enable: bool = False) -> bool:
"""Set the preset mode."""
mfg_code = self._zha_device.manufacturer_code
if not enable:
return await self._thrm.write_attributes(
{"operation_preset": 2}, manufacturer=mfg_code
)
if preset == PRESET_AWAY:
return await self._thrm.write_attributes(
{"operation_preset": 0}, manufacturer=mfg_code
)
if preset == PRESET_SCHEDULE:
return await self._thrm.write_attributes(
{"operation_preset": 1}, manufacturer=mfg_code
)
if preset == PRESET_COMFORT:
return await self._thrm.write_attributes(
{"operation_preset": 3}, manufacturer=mfg_code
)
if preset == PRESET_ECO:
return await self._thrm.write_attributes(
{"operation_preset": 4}, manufacturer=mfg_code
)
if preset == PRESET_BOOST:
return await self._thrm.write_attributes(
{"operation_preset": 5}, manufacturer=mfg_code
)
if preset == PRESET_COMPLEX:
return await self._thrm.write_attributes(
{"operation_preset": 6}, manufacturer=mfg_code
)
return False
@STRICT_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
manufacturers={
"_TZE200_b6wax7g0",
},
)
class BecaThermostat(Thermostat):
"""Beca Thermostat implementation."""
def __init__(self, unique_id, zha_device, cluster_handlers, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, cluster_handlers, **kwargs)
self._presets = [
PRESET_NONE,
PRESET_AWAY,
PRESET_SCHEDULE,
PRESET_ECO,
PRESET_BOOST,
PRESET_TEMP_MANUAL,
]
self._supported_flags |= ClimateEntityFeature.PRESET_MODE
@property
def hvac_modes(self) -> list[HVACMode]:
"""Return only the heat mode, because the device can't be turned off."""
return [HVACMode.HEAT]
async def async_attribute_updated(self, record):
"""Handle attribute update from device."""
if record.attr_name == "operation_preset":
if record.value == 0:
self._preset = PRESET_AWAY
if record.value == 1:
self._preset = PRESET_SCHEDULE
if record.value == 2:
self._preset = PRESET_NONE
if record.value == 4:
self._preset = PRESET_ECO
if record.value == 5:
self._preset = PRESET_BOOST
if record.value == 7:
self._preset = PRESET_TEMP_MANUAL
await super().async_attribute_updated(record)
async def async_preset_handler(self, preset: str, enable: bool = False) -> bool:
"""Set the preset mode."""
mfg_code = self._zha_device.manufacturer_code
if not enable:
return await self._thrm.write_attributes(
{"operation_preset": 2}, manufacturer=mfg_code
)
if preset == PRESET_AWAY:
return await self._thrm.write_attributes(
{"operation_preset": 0}, manufacturer=mfg_code
)
if preset == PRESET_SCHEDULE:
return await self._thrm.write_attributes(
{"operation_preset": 1}, manufacturer=mfg_code
)
if preset == PRESET_ECO:
return await self._thrm.write_attributes(
{"operation_preset": 4}, manufacturer=mfg_code
)
if preset == PRESET_BOOST:
return await self._thrm.write_attributes(
{"operation_preset": 5}, manufacturer=mfg_code
)
if preset == PRESET_TEMP_MANUAL:
return await self._thrm.write_attributes(
{"operation_preset": 7}, manufacturer=mfg_code
)
return False
@MULTI_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
manufacturers="Stelpro",
models={"SORB"},
stop_on_match_group=CLUSTER_HANDLER_THERMOSTAT,
)
class StelproFanHeater(Thermostat):
"""Stelpro Fan Heater implementation."""
@property
def hvac_modes(self) -> list[HVACMode]:
"""Return only the heat mode, because the device can't be turned off."""
return [HVACMode.HEAT]
@STRICT_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
manufacturers={
"_TZE200_7yoranx2",
"_TZE200_e9ba97vf", # TV01-ZG
"_TZE200_hue3yfsn", # TV02-ZG
"_TZE200_husqqvux", # TSL-TRV-TV01ZG
"_TZE200_kds0pmmv", # MOES TRV TV02
"_TZE200_kly8gjlz", # TV05-ZG
"_TZE200_lnbfnyxd",
"_TZE200_mudxchsu",
},
)
class ZONNSMARTThermostat(Thermostat):
"""ZONNSMART Thermostat implementation.
Notice that this device uses two holiday presets (2: HolidayMode,
3: HolidayModeTemp), but only one of them can be set.
"""
PRESET_HOLIDAY = "holiday"
PRESET_FROST = "frost protect"
def __init__(self, unique_id, zha_device, cluster_handlers, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, cluster_handlers, **kwargs)
self._presets = [
PRESET_NONE,
self.PRESET_HOLIDAY,
PRESET_SCHEDULE,
self.PRESET_FROST,
]
self._supported_flags |= ClimateEntityFeature.PRESET_MODE
async def async_attribute_updated(self, record):
"""Handle attribute update from device."""
if record.attr_name == "operation_preset":
if record.value == 0:
self._preset = PRESET_SCHEDULE
if record.value == 1:
self._preset = PRESET_NONE
if record.value == 2:
self._preset = self.PRESET_HOLIDAY
if record.value == 3:
self._preset = self.PRESET_HOLIDAY
if record.value == 4:
self._preset = self.PRESET_FROST
await super().async_attribute_updated(record)
async def async_preset_handler(self, preset: str, enable: bool = False) -> bool:
"""Set the preset mode."""
mfg_code = self._zha_device.manufacturer_code
if not enable:
return await self._thrm.write_attributes(
{"operation_preset": 1}, manufacturer=mfg_code
)
if preset == PRESET_SCHEDULE:
return await self._thrm.write_attributes(
{"operation_preset": 0}, manufacturer=mfg_code
)
if preset == self.PRESET_HOLIDAY:
return await self._thrm.write_attributes(
{"operation_preset": 3}, manufacturer=mfg_code
)
if preset == self.PRESET_FROST:
return await self._thrm.write_attributes(
{"operation_preset": 4}, manufacturer=mfg_code
)
return False
| [
"noreply@github.com"
] | konnected-io.noreply@github.com |
e191dcd55943188856e0aa6d20abcb3ae22cd4d2 | c5698844e4c5cd6428d25f5a97a2f4ad069df251 | /twitter/publicar desde python/read.py | a394d4c896e493b5d9f689dc1751a7b77d468356 | [] | no_license | jrartd/Python-tools | 1ade026dcc9b3987bb7a6af130403895a8456d3c | 361031a2d108e048d267bf386a8a703359a81321 | refs/heads/master | 2022-12-21T23:38:53.038535 | 2018-02-09T18:18:10 | 2018-02-09T18:18:10 | 114,409,529 | 0 | 1 | null | 2022-12-12T09:18:07 | 2017-12-15T20:41:15 | HTML | UTF-8 | Python | false | false | 458 | py | from twitter import *
access_token = "712533602102284288-QGxqYcFiQlGZGTaoNIgHgq2KZxqZeeH"
access_token_secret = "rlH5ItRHtlguzChQbIvLDo1yYCu47liEtq8fdVgeOZpb9"
consumer_key = "VWe4b0p7vRcVS06gbJyS83dIS"
consumer_secret = "PjkoSJ4YxPXo4V9Uk7bazq4y507e6zBr96q7u2OlJeP1aVZd7w"
texto_tweet = input("Ingrese el texto a twittear")
t = Twitter(auth=OAuth(access_token, access_token_secret, consumer_key, consumer_secret))
t.statuses.update(status= texto_tweet)
| [
"you@example.com"
] | you@example.com |
a1590dd5a7d854d633c6cc4a59cd757b06b26e95 | 84c4474a88a59da1e72d86b33b5326003f578271 | /saleor/graphql/app/mutations/app_retry_install.py | 64faee9ee45caa39c2e77961854e66c1815f20c1 | [
"BSD-3-Clause"
] | permissive | vineetb/saleor | 052bd416d067699db774f06453d942cb36c5a4b7 | b0d5ec1a55f2ceeba6f62cf15f53faea0adf93f9 | refs/heads/main | 2023-07-20T02:01:28.338748 | 2023-07-17T06:05:36 | 2023-07-17T06:05:36 | 309,911,573 | 0 | 0 | NOASSERTION | 2020-11-04T06:32:55 | 2020-11-04T06:32:55 | null | UTF-8 | Python | false | false | 2,274 | py | import graphene
from django.core.exceptions import ValidationError
from ....app import models
from ....app.error_codes import AppErrorCode
from ....app.tasks import install_app_task
from ....core import JobStatus
from ....permission.enums import AppPermission
from ....webhook.event_types import WebhookEventAsyncType
from ...core import ResolveInfo
from ...core.mutations import ModelMutation
from ...core.types import AppError
from ...core.utils import WebhookEventInfo
from ..types import AppInstallation
class AppRetryInstall(ModelMutation):
class Arguments:
id = graphene.ID(description="ID of failed installation.", required=True)
activate_after_installation = graphene.Boolean(
default_value=True,
required=False,
description="Determine if app will be set active or not.",
)
class Meta:
description = "Retry failed installation of new app."
model = models.AppInstallation
object_type = AppInstallation
permissions = (AppPermission.MANAGE_APPS,)
error_type_class = AppError
error_type_field = "app_errors"
webhook_events_info = [
WebhookEventInfo(
type=WebhookEventAsyncType.APP_INSTALLED,
description="An app was installed.",
),
]
@classmethod
def save(cls, _info: ResolveInfo, instance, _cleaned_input, /):
instance.status = JobStatus.PENDING
instance.save()
@classmethod
def clean_instance(cls, _info: ResolveInfo, instance):
if instance.status != JobStatus.FAILED:
msg = "Cannot retry installation with different status than failed."
code = AppErrorCode.INVALID_STATUS.value
raise ValidationError({"id": ValidationError(msg, code=code)})
@classmethod
def perform_mutation(cls, _root, info: ResolveInfo, /, **data):
activate_after_installation = data.get("activate_after_installation")
app_installation = cls.get_instance(info, **data)
cls.clean_instance(info, app_installation)
cls.save(info, app_installation, None)
install_app_task.delay(app_installation.pk, activate_after_installation)
return cls.success_response(app_installation)
| [
"noreply@github.com"
] | vineetb.noreply@github.com |
f6ff32206f96b23acbe579bc37f574b0be83c008 | 70045ba3b29d87d9d210b4757c6c46968841d977 | /p1.py | e05575a455d1780beab6a038db7ddfe930f7a617 | [] | no_license | jtarlecki/project_euler | 8ef05a5feaa949d73bac4ce06019ad3e90c1d420 | 7057997ef5195a2fc10062bb91d47eda4b40f7fa | refs/heads/master | 2021-01-22T19:44:53.474400 | 2015-09-21T03:12:42 | 2015-09-21T03:12:42 | 23,652,030 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | lim=1000
sum_mult=0
multiples = [3,5]
def modtest(number, mults):
for mult in mults:
mod = number % mult
if mod == 0:
return number
return 0
for i in range(1, lim):
sum_mult += modtest(i, multiples)
print sum_mult
| [
"jtarlecki@yahoo.com"
] | jtarlecki@yahoo.com |
f5e695d6725c6581db24a42d28a276bba108e8f3 | f663f5bedffdceca8d7884369f6daea91d4768b7 | /isdquantum/utils/binary.py | 451b063264c66b7b95a3a43e729fa3e5a4b56b75 | [] | no_license | simrit1/isd-quantum | 17500f74be99e80cb4668e7cf2559046686f5d97 | 33ee3b6e99530da358de6dcefdac9cebd3a93b83 | refs/heads/master | 2023-03-15T12:35:37.945753 | 2019-04-05T08:49:08 | 2019-04-05T08:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | import logging
from math import ceil, log
logger = logging.getLogger(__name__)
def check_enough_bits(a_int, bits):
bits_required = get_required_bits(a_int)
assert bits >= bits_required, "Not enough bits."
def get_required_bits(*ints):
if len(ints) == 0:
raise Exception("number of ints must be greater than 0")
if len(ints) == 1:
to_check_int = ints[0]
else:
to_check_int = max(ints)
if to_check_int < 0:
to_check_int = -to_check_int
bits_required = ceil(log(to_check_int + 1, 2))
return bits_required
# WARN: Returns 2's complement. If you want the negation of the bitstring
# representing i, you can use this method followed by the get_negated_bitstring
def get_bitstring_from_int(i, max_bits, littleEndian=False):
if i >= 0:
str = bin(i)[2:].zfill(max_bits)
else:
str = bin(2**max_bits + i)[2:].zfill(max_bits)
if len(str) > max_bits:
raise Exception("more than max_bits")
return str if not littleEndian else str[::-1]
def get_bitarray_from_int(i, max_bits, littleEndian=False):
return [int(x) for x in get_bitstring_from_int(i, max_bits, littleEndian)]
# TODO now it works w/ both list and string, maybe change names
def get_negated_bistring(a_str):
return a_str.translate(str.maketrans('01', '10'))
# Map seems to be slower
# return list(map(lambda x: 1 if int(x) == 0 else (0 if int(x) == 1 else None), ss))
def get_negated_bitarray(a_arr):
return [0 if int(x) == 1 else (1 if int(x) == 0 else None) for x in a_arr]
def get_int_from_bitstring(a_str, littleEndian=False):
return int(a_str if not littleEndian else a_str[::-1], 2)
def get_int_from_bitarray(a_arr, littleEndian=False):
return get_int_from_bitstring(''.join(str(e) for e in a_arr))
| [
"simone.perriello@protonmail.com"
] | simone.perriello@protonmail.com |
f638f56858b04ebec911d65ea5e21bc289feea28 | e75db05d6b5767f7d40b893b8febdcfaf1b9f28d | /run_cold+hot_f475w_Nie_20210103.py | 33d12bc6e22cb40fe94149957b2c2740fcecf13b | [] | no_license | Lu-Nie/PM | 46dbf425457eedd9d12f498b54e1acfe6240a3e6 | cc3449bafcbe976dd4c2e09941c570012a75997d | refs/heads/master | 2023-02-16T16:11:26.705508 | 2021-01-10T04:08:23 | 2021-01-10T04:08:23 | 327,254,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,343 | py | import os, subprocess, shlex, datetime, multiprocessing as mp, numpy as np, astropy.io.fits as pyfits
import pandas as pd
start_t = datetime.datetime.now()
num_cores = int(mp.cpu_count())
#print("The computer has " + str(num_cores) + " cores")
data_path, res_path = './data/', './res_test/'
X = np.arange(323.75,3824.25,647.5)
Y = np.arange(117.5,1191.5,235)
N = 0
table_all2 = []
outcat = res_path+'ds9b09f01f814w_comb.cat'
for p in range(len(Y)):
for q in range(len(X)):
x = X[q]
y = Y[p]
n = str(N)
os.system('cd '+data_path+'; ds9 -width 1360 -height 768 hlsp_phat_hst_acs-wfc_12057-m31-b09-f01_f814w_v1_drz.fits -zoom 2 -pan to %f %f image -saveimage ds9b09f01f814w%s.fits -exit' %(x,y,n))
pref = 'ds9b09f01f814w'+ n #f475w
detimage = data_path+pref+'.fits'
image = data_path+pref+'.fits'
cold, hot = 'ACS-WFC.Hfinal.rms.cold.Nie2020.sex', 'ACS-WFC.Hfinal.rms.rome.Nie2020.sex'
coldcat, hotcat = res_path+pref+'cold'+n+'.cat', res_path+pref+'hot'+n+'.cat'
coldseg, hotseg = res_path+pref+'segcold.fits', res_path+pref+'seghot.fits'
coldaper, hotaper = res_path+pref+'apercold.fits', res_path+pref+'aperhot.fits'
#outcat = res_path+pref+'comb.cat'
#outcat = res_path+pref+'comb.cat'
out_bad_cat = res_path+pref+'_badcomb.cat'
outseg = res_path+pref+'segcomb.fits'
outparam = 'default.param'
gain = '2.0' # [ 7200, 5450, 7028, 18232, 5017.606, 8197, 5086, 5250 ] for
# [ B, V, i, z, F098M, Y, J, H ]
magzp = '26.05' # [ 25.673, 26.486, 25.654, 24.862, 25.68, 26.27, 26.25, 25.96 ] for
# [ B, V, i, z, F098M, Y, J, H ]
#seeing = '0.18'
seeing = '0.09'
#DETth_hot = '0.3'
hotidtable = res_path+pref+'hotid.cat'
#--------------------------------------
# Run cold
print('Running cold')
os.system("sex "+detimage+","+image+" -c "+cold+" -CATALOG_NAME "+coldcat+" -CATALOG_TYPE ASCII "+\
" -PARAMETERS_NAME "+outparam+" -WEIGHT_TYPE NONE,NONE -CHECKIMAGE_TYPE SEGMENTATION,APERTURES -CHECKIMAGE_NAME "+\
coldseg+","+coldaper+" -GAIN "+gain+" -MAG_ZEROPOINT "+magzp+" -SEEING_FWHM "+seeing)
# Run hot
print('Running hot')
os.system("sex "+detimage+","+image+" -c "+hot+" -CATALOG_NAME "+hotcat+" -CATALOG_TYPE ASCII "+\
" -PARAMETERS_NAME "+outparam+" -WEIGHT_TYPE NONE,NONE -CHECKIMAGE_TYPE SEGMENTATION,APERTURES -CHECKIMAGE_NAME "+\
hotseg+","+hotaper+" -GAIN "+gain+" -MAG_ZEROPOINT "+magzp+" -SEEING_FWHM "+seeing)
#+" -DETECT_THRESH "+DETth_hot
"""
# no check images
# Run cold
print('Running cold')
os.system("sex "+detimage+","+image+" -c "+cold+" -CATALOG_NAME "+coldcat+" -CATALOG_TYPE ASCII "+\
" -PARAMETERS_NAME "+outparam+" -WEIGHT_IMAGE "+detweight+","+weight+" -WEIGHT_TYPE MAP_RMS,MAP_RMS -CHECKIMAGE_TYPE NONE "+\
" -GAIN "+gain+" -MAG_ZEROPOINT "+magzp+" -SEEING_FWHM "+seeing)
# Run hot
print('Running hot')
os.system("sex "+detimage+","+image+" -c "+hot+" -CATALOG_NAME "+hotcat+" -CATALOG_TYPE ASCII "+\
" -PARAMETERS_NAME "+outparam+" -WEIGHT_IMAGE "+detweight+","+weight+" -WEIGHT_TYPE MAP_RMS,MAP_RMS -CHECKIMAGE_TYPE NONE "+\
" -GAIN "+gain+" -MAG_ZEROPOINT "+magzp+" -SEEING_FWHM "+seeing)
"""
#--------------------------------------
# Read hotcat and coldcat
print('Read cold and hot catalogs')
a = open(outparam,'r').read().split('\n')
h = [item for item in a if item!='' and item[0]!='#']
print(h)
cold_table = np.genfromtxt(coldcat, names=h) # 22223
idx_c = np.where(cold_table['KRON_RADIUS']==0)
if len(idx_c) > 1 and len(cold_table['KRON_RADIUS'][idx_c]) > 0:
cold_table['KRON_RADIUS'][idx_c] = np.median(cold_table['KRON_RADIUS'])
#print(len(cold_table['KRON_RADIUS'][idx_c])) # 0
hot_table = np.genfromtxt(hotcat, names=h) # 39428
idx_h = np.where(hot_table['KRON_RADIUS']==0)
if len(hot_table['KRON_RADIUS'][idx_h]) > 0:
hot_table['KRON_RADIUS'][idx_h] = np.median(hot_table['KRON_RADIUS'])
#print(len(hot_table['KRON_RADIUS'][idx_h])) # 62
#--------------------------------------
print('Including hot detections')
cold_cxx = cold_table['CXX_IMAGE'] / cold_table['KRON_RADIUS']**2 # 22223
cold_cyy = cold_table['CYY_IMAGE'] / cold_table['KRON_RADIUS']**2
cold_cxy = cold_table['CXY_IMAGE'] / cold_table['KRON_RADIUS']**2
ncold = len(cold_table) # ncold = 22223
hot_cxx = hot_table['CXX_IMAGE'] / hot_table['KRON_RADIUS']**2
hot_cyy = hot_table['CYY_IMAGE'] / hot_table['KRON_RADIUS']**2
hot_cxy = hot_table['CXY_IMAGE'] / hot_table['KRON_RADIUS']**2
nhot = len(hot_table) # nhot = 39428
hc = pyfits.open(coldseg)
seghd, segim = hc[0].header, hc[0].data# [40500, 32400]
hh = pyfits.open(hotseg)
seghd_hot, segim_hot = hh[0].header, hh[0].data # [40500, 32400]
#------------------------------------------
for i in range(0, ncold): # range(0, ncold) # ncold = 22223
print('N:', ncold, i, len(cold_cxx[i] * (hot_table['X_IMAGE'] - cold_table['X_IMAGE'][i])**2 + cold_cyy[i] * (hot_table['Y_IMAGE'] - cold_table['Y_IMAGE'][i])**2 + cold_cxy[i] * (hot_table['X_IMAGE'] - cold_table['X_IMAGE'][i]) * (hot_table['Y_IMAGE'] - cold_table['Y_IMAGE'][i])))
idx = np.where( cold_cxx[i] * (hot_table['X_IMAGE'] - cold_table['X_IMAGE'][i])**2 + \
cold_cyy[i] * (hot_table['Y_IMAGE'] - cold_table['Y_IMAGE'][i])**2 + \
cold_cxy[i] * (hot_table['X_IMAGE'] - cold_table['X_IMAGE'][i]) * (hot_table['Y_IMAGE'] - cold_table['Y_IMAGE'][i]) > 1.1**2 )
hot_table = hot_table[idx]
print(len(hot_table)) # 14091
#--------------------------------------
# Read the segmentaiton images and add objects from hot segmentation map to cold segementation map,
# but only at pixels where no object was defined in cold segmentation map. Then write result.
print('Creating combined segmentation map')
os.system('> '+hotidtable)
fs = open(hotidtable, 'a')
nhot = len(hot_table) # 14091
off = np.max(cold_table['NUMBER']) + 1
for i in range(0, nhot):
print(nhot, i)
idx = np.where(segim_hot == hot_table['NUMBER'][i])
#print(len(idx), idx, segim[idx])
if len(idx) > 0:
segim[idx] = off + i
#print(segim[idx])
fs.write(str(int(off+i))+' '+str(int(hot_table['NUMBER'][i]))+'\n')
hot_table['NUMBER'][i] = off + i
fs.close()
primary_hdu = pyfits.PrimaryHDU(header=seghd)
image_hdu = pyfits.ImageHDU(segim)
hdul = pyfits.HDUList([primary_hdu, image_hdu])
hdul.writeto(outseg, overwrite=True)
N = N+1
table_all = np.append(cold_table,hot_table)
table_all2.append(table_all)
if N > 0:
break
table_all3 = table_all2[0]
for j in range(1,len(table_all2)):
table_all3 = np.append(table_all3,table_all2[j])
np.savetxt(outcat, table_all3, fmt="%d %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %d %f %f %f %d %d %d %d %f %f %d", header='NUMBER FLUX_ISO FLUXERR_ISO MAG_ISO MAGERR_ISO FLUX_AUTO FLUXERR_AUTO MAG_AUTO MAGERR_AUTO MAG_BEST MAGERR_BEST KRON_RADIUS BACKGROUND X_IMAGE Y_IMAGE ALPHA_J2000 DELTA_J2000 CXX_IMAGE CYY_IMAGE CXY_IMAGE ELLIPTICITY FWHM_IMAGE FLUX_RADIUS FLAGS CLASS_STAR A_IMAGE B_IMAGE XMIN_IMAGE YMIN_IMAGE XMAX_IMAGE YMAX_IMAGE ELONGATION THETA_IMAGE ISOAREA_IMAGE')
reg1 = res_path + 'test_f475w1.0.reg'
reg2 = res_path + 'test_f475w2.0.reg'
os.system("awk '{print \"ellipse(\"$14\",\"$15\",\"($26*$12)\",\"($27*$12)\",\"$33\")\"}' "+outcat+ " > " + reg1)
os.system("awk '{print $16,$17}' "+outcat+ " > " + reg2)
#psf
end_t = datetime.datetime.now()
elapsed_sec = (end_t - start_t).total_seconds()
print("Used Time: " + "{:.2f}".format(elapsed_sec) + " sec")
| [
"374594094@qq.com"
] | 374594094@qq.com |
dc43909402e1e03c13d1fa0416aabda3fe2f3a39 | c03b2d9117c02183ccc551b746edf7baddac3ecb | /settings.py | 20f0548c5dfb213b1e4602ce09e90fab2aabc9b5 | [] | no_license | SCARLETCRAZY/update2 | 58f65e583c4223ab7c93eb9a10d01ae33774369b | 45c5507feeb331ae6dfa50a9b111dc45a9a7d24b | refs/heads/master | 2022-11-23T13:03:15.367126 | 2020-07-24T08:05:30 | 2020-07-24T08:05:30 | 282,159,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,841 | py |
bot_token = '1356809982:AAEnCcj_QuQE3tOGqTLKncQqqhHTZlJjYjU' # токен бота
LOGIN_BOT = '@snus_pvl_bot' # логин бота
CHANNEL_ID = 1245689 # id канала куда будет отсылаться информация, ид без -100 в начале (например: 124873248)
admin_id = 691058046 # id админа
LOGIN_ADMIN = '@snus_sup' # тг логин спамера, нужен для информации
QIWI_NUMBER = '+77711739551' # номер киви
QIWI_TOKEN = 'b61884d5c31961bc81a8344ba7bdd301' # токен киви
PERCENT_SPAM = 0.5 # Процент спамеру (0.5 = 50%) #не работает в версии без спамеров
PERCENT_OWN = 0.5 # Процент вам (0.5 = 50%)
main_bd = '/home/TiredCat/Admin bot/main.db'
info = 'Информация\n' \
'Telegram поддержки @snus_sup' \
text_purchase = '❕ Вы выбрали: ' \
'{name}\n\n' \
'{info}\n\n' \
'💠 Цена: {price} тенге\n' \
'💠 Товар: {amount}\n' \
'💠 Введите ваш адрес после оплаты' \
replenish_balance = '➖➖➖➖➖➖➖➖➖➖➖\n' \
'💰 Пополнение баланса\n\n' \
'🥝 Оплата киви: \n\n' \
'👉 Номер {number}\n' \
'👉 Комментарий {code}\n' \
'👉 Сумма от 2000 тенге\n' \
'➖➖➖➖➖➖➖➖➖➖➖\n' \
profile = '🧾 Профиль\n\n' \
'❕ Ваш id - {id}\n' \
'❕ Ваш логин - {login}\n' \
'❕ Дата регистрации - {data}\n\n' \
'💰 Ваш баланс - {balance} тенге'
| [
"noreply@github.com"
] | SCARLETCRAZY.noreply@github.com |
0573b6563ad45c09808049f4fdd2f87ff082fce9 | ba157236151a65e3e1fde2db78b0c7db81b5d3f6 | /String/longest_group_positions.py | f01ef3284224992f2d915fed2ff79a7296bfda75 | [] | no_license | JaberKhanjk/LeetCode | 152488ccf385b449d2a97d20b33728483029f85b | 78368ea4c8dd8efc92e3db775b249a2f8758dd55 | refs/heads/master | 2023-02-08T20:03:34.704602 | 2020-12-26T06:24:33 | 2020-12-26T06:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | class Solution(object):
def largeGroupPositions(self, s):
ans = []
i = 0
for j in range(len(s)):
if j == len(s) - 1 or s[j] != s[j+1]:
if j-i+1 >= 3:
ans.append([i,j])
i = j+1
return ans
"""
:type s: str
:rtype: List[List[int]]
"""
| [
"spondoncsebuet@gmail.com"
] | spondoncsebuet@gmail.com |
d0d1f95bef7336294b1bd005942cb777bbb27c4f | fd16ccc7c5576a2f1921bcd9a10d7a157566190e | /Source/server/SocketServer/TestSocket/GameRules/GameRule_Poker.py | 86483d280051168781561c40724b23a8f28c8489 | [] | no_license | willy2358/lxqenjoy | 5469b2b8cf615a43ae777a841156523a8bf0564b | 8d72d76497b21996e72cf97aa4bb7a5fdf6a03be | refs/heads/dev | 2021-01-02T22:40:16.346181 | 2018-10-17T14:34:28 | 2018-10-17T14:34:28 | 99,359,908 | 0 | 1 | null | 2018-10-03T13:47:34 | 2017-08-04T16:12:19 | Python | UTF-8 | Python | false | false | 160 | py |
from GameRules.GameRule import GameRule
class GameRule_Poker(GameRule):
def __init__(self, rule_id):
super(GameRule, self).__init__(rule_id)
| [
"willy2358@139.com"
] | willy2358@139.com |
567b9ec705d537e69437e923395e97ecf0605c77 | e4bb1bdc907164512408aef2e5de9cb184997218 | /test_project/api/views.py | af2e99efe9409cb64c306d3ee5f249c4f130c1ed | [] | no_license | pawel1830/cassandra_app | 0fb61d9a8abf04c1c720faa3d9df49fd66ecfd2e | 5b074591fcf3be94361329fd37fa8064a139932a | refs/heads/master | 2023-01-04T00:07:28.199358 | 2020-10-21T08:35:00 | 2020-10-21T08:35:00 | 305,443,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,447 | py | from smtplib import SMTPException
from django.conf import settings
from django.core import mail
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework import status
from rest_framework.pagination import PageNumberPagination
import logging
from .models import Message
from .serializer import MessageSerializer
logger = logging.getLogger(__name__)
@api_view(['POST'])
def create_message(request):
message_data = JSONParser().parse(request)
message_serializer = MessageSerializer(data=message_data)
if not message_serializer.is_valid():
return Response({"errors": message_serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
message_serializer.save()
return JsonResponse(message_serializer.data, status=status.HTTP_201_CREATED)
@api_view(['POST'])
def send_message(request):
request_data = JSONParser().parse(request)
magic_number = request_data.get('magic_number')
if not magic_number:
return Response({"errors": "Bad magic_number"}, status=status.HTTP_400_BAD_REQUEST)
messages = Message.objects.filter(magic_number=magic_number)
try:
with mail.get_connection() as connection:
for message in messages:
mail.EmailMessage(
subject=message.title,
body=message.content,
to=[message.email],
connection=connection,
).send()
message.delete()
except SMTPException as exc:
logger.error(exc)
return Response({'errors': 'SMTP Error'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as exc:
logger.error(exc)
return Response({'errors': 'Internal Error'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({"message": "Messages send"})
@api_view(['GET'])
def get_messages(request, email_value):
paginator = PageNumberPagination()
rest_framework_settings = getattr(settings, 'REST_FRAMEWORK')
paginator.page_size = rest_framework_settings.get('PAGE_SIZE', 10)
messages = Message.objects.filter(email=email_value)
messages_page = paginator.paginate_queryset(messages, request)
message_serializer = MessageSerializer(messages_page, many=True)
return paginator.get_paginated_response(message_serializer.data)
| [
"pawel1830@gmail.com"
] | pawel1830@gmail.com |
d31ea69750d27b528737a27ade4b005680ae0f2f | 17cdde8c5de4ee2d40303a1621a3d9ac1abaf7dc | /2009/03/entries/case-pythonbf/lookup.py | ff96c07fa70db7d52a4d888b5c9d472f825f1e27 | [] | no_license | VijayEluri/sum_challenge2 | dee7316d6133526d653a48636b9bcdec531a8510 | d23ed432d481731a83f3660c6606bb979583974e | refs/heads/master | 2020-05-20T11:01:41.211668 | 2012-01-03T23:30:19 | 2012-01-03T23:30:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | segmenttable = {}
segmenttable[1] = [' ',
' |',
' ',
' |',
' ']
segmenttable[2] = [' - ',
' |',
' - ',
'| ',
' - ']
segmenttable[3] = [' - ',
' |',
' - ',
' |',
' - ']
segmenttable[4] = [' ',
'| |',
' - ',
' |',
' ']
segmenttable[5] = [' - ',
'| ',
' - ',
' |',
' - ']
segmenttable[6] = [' - ',
'| ',
' - ',
'| |',
' - ']
segmenttable[7] = [' - ',
' |',
' ',
' |',
' ']
segmenttable[8] = [' - ',
'| |',
' - ',
'| |',
' - ']
segmenttable[9] = [' - ',
'| |',
' - ',
' |',
' - ']
segmenttable[0] = [' - ',
'| |',
' ',
'| |',
' - ']
| [
"nsmith@.(none)"
] | nsmith@.(none) |
1a3f20ea52dde542cdf4d53cd5e2ea3d761e3e9d | 5c6c7eb44ae1b2c50a104b260df86e43730564bc | /face_detection/face_detection.py | a86430e7e716a12c8d210724b639f18e394ee1c0 | [] | no_license | xcacao/IBM-labcourse | 43e51bef9e91a70dafcc4229a5bef1a699b089a4 | 850d49c0d17f9984fb9741f971ae949edababbc8 | refs/heads/master | 2023-02-24T02:30:38.520730 | 2021-01-08T14:42:26 | 2021-01-08T14:42:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,347 | py | import cv2
import torch
import imutils
import time
import numpy as np
from os.path import join, dirname, abspath
absolute_dir = dirname(abspath(__file__))
PROTO_TXT = join(absolute_dir, "model", "deploy.prototxt")
MODEL = join(absolute_dir, "model", "res10_300x300_ssd_iter_140000.caffemodel")
THRESHOLD = 0.5
def face_detection(callback=None):
net = cv2.dnn.readNetFromCaffe(PROTO_TXT, MODEL)
prev_frame_time = 0
new_frame_time = 0
cam = cv2.VideoCapture(0)
while True:
_, frame = cam.read()
h, w = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence < THRESHOLD:
continue
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
start_x , start_y, end_x, end_y = box.astype("int")
#cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), (0, 0, 255), 2)
frame = crop_img(frame, start_x-10, start_y-10, end_x+10, end_y+10)
if callback:
tensor = callback(frame)
print(tensor.shape)
print(tensor)
cam.release()
cv2.destroyAllWindows()
return
new_frame_time = time.time()
fps = 1 / (new_frame_time - prev_frame_time)
prev_frame_time = new_frame_time
fps = str(int(fps))
cv2.putText(frame, fps, (7, 70), cv2.FONT_HERSHEY_SIMPLEX, 3, (100, 255, 0), 3, cv2.LINE_AA)
cv2.imshow('Webcam', frame)
if cv2.waitKey(1) == 27:
break
cam.release()
cv2.destroyAllWindows()
def detect(image, net):
blob = cv2.dnn.blobFromImage(image, 0.007843, (300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
return detections
def crop_img(img, start_x, start_y, end_x, end_y):
height, width = end_y - start_y, end_x - start_x
crop_img = img[start_y:start_y+height, start_x:start_x+width]
crop_img = cv2.resize(crop_img, (400, 400))
return crop_img
if __name__ == '__main__':
face_detection()
| [
"q.thien.nguyen@outlook.de"
] | q.thien.nguyen@outlook.de |
3d5ff485a4026f8cedfadf674fe06cf536874e11 | 4530aa754bec557fc7bc49d39d83991b47c745ce | /run.py | a77896dcac17c1901f552cf6eca616f4c80bf86a | [] | no_license | jvanvugt/ai-at-the-webscale | 79d44f11696e3564de4dbeec8a82d09baa53d88c | f0c0b946784dde039998ca1d3f59d973651fdc02 | refs/heads/master | 2021-01-21T14:40:12.437099 | 2016-06-30T02:12:23 | 2016-06-30T02:12:23 | 59,597,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py | from __future__ import division
import sys
from threading import Thread
import numpy as np
from tqdm import *
# import matplotlib.pyplot as plt
from aiws import api
from models import *
from encoding import encode_context, decode_action
from login_info import USERNAME, PASSWORD
api.authenticate(USERNAME, PASSWORD)
REQUEST_NUMBERS = 10000
def run_single_id(run_id, show_progress=True):
range_func = trange if show_progress else xrange
print 'starting run_id: ', run_id
reward = 0
successes = 0
model = BootstrapModel(ContextualThompsonModel,
100, alpha=0.1, beta=0.1)
# mean_reward = np.zeros(REQUEST_NUMBERS / 100)
for rn in range_func(REQUEST_NUMBERS):
# if rn % 100 == 0:
# mean_reward[rn / 100] = reward / (rn + 1e-9)
context = api.get_context(run_id=run_id, request_number=rn)['context']
context = encode_context(context)
action = model.propose(context)
decoded_action = decode_action(action)
result = api.serve_page(run_id=run_id, request_number=rn, **decoded_action)
reward += decoded_action['price'] * result['success']
if result['success']:
successes += 1
model.update(context, action, result['success'])
# plt.plot(mean_reward)
# plt.show()
mean_reward = reward / REQUEST_NUMBERS
print 'Mean reward for run_id', run_id, ':', mean_reward
print 'Successes for run_id', run_id, ':' , successes
# print model.successes
return mean_reward
def run(id=0):
return run_single_id(id)
def validate():
api.reset_leaderboard()
for i in xrange(5000, 5010):
thread = Thread(target=run_single_id, args=(i, False))
thread.start()
if __name__ == '__main__':
if '--validate' in sys.argv:
validate()
elif '--test' in sys.argv:
mean_reward = np.mean([run(id) for id in xrange(10000, 10010)])
print 'mean reward over 10 runs: ', mean_reward
elif '--rid' in sys.argv:
run(int(sys.argv[sys.argv.index('--rid') + 1]))
elif '--train' in sys.argv:
mean_reward = np.mean([run(id) for id in xrange(100, 110)])
print 'mean reward over 20 runs: ', mean_reward
else:
run()
| [
"jorisvan.vugt@student.ru.nl"
] | jorisvan.vugt@student.ru.nl |
047756f0358b390379b309b76e1451174e9c5664 | cd2a798257db172ef37ffeea320d0edd7041e7a1 | /scripts/__init__.py | 6bbe0fa87a1481169b68adb6df5040c113ad6140 | [
"MIT"
] | permissive | masonkadem/python-functions | 46e4e0e5ba4a23711182b612db3e7bfdf079c0af | cbbb2e9eb14c637aa402f0082a037a5d4dd70468 | refs/heads/main | 2023-08-31T17:30:42.849891 | 2021-09-14T17:42:42 | 2021-09-14T17:42:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | """
__init__.py.
init file for scripts module.
"""
from scripts.common import df_info
__all__ = [
"df_info",
]
| [
"szymonos@outlook.com"
] | szymonos@outlook.com |
794017d9f59b8fd340f371c0dea125e86eb249c4 | 2f5fca8492ca07854eb554d5d5c7dcfc84daf50d | /Day2/greeting.py | 521ad63e07595e48a5e3102e04cbb6e796e5231c | [] | no_license | rashmisharma83/python | e0c8b6db41b46a2bc9da1a20fab60e7db721a7f6 | e83036d8feb11aba99c3da8cfb7ae277325dec3f | refs/heads/master | 2023-07-31T14:58:28.272223 | 2021-09-24T15:02:52 | 2021-09-24T15:02:52 | 410,005,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | my_first_name ="rashmi"
my_last_name ="sharma"
message ="Good Morning"
print(message +" "+ my_first_name.title() +" " + my_last_name.title())
import datetime
now = datetime.datetime.now()
print("currentdate :" +(now.strftime("%y-%m-%d")))
message ="The beautiful thing about learning is that nobody can take it away from you. "
print(message) | [
"rashmisharma83@gmail.com"
] | rashmisharma83@gmail.com |
74813ff7c7c6ab9c755033aaf276c4d173470032 | fe47728df24e90c1d34355ec7d64213bfe66e718 | /Topics/Integer arithmetic/A complex expression/main.py | bbc96cc4a5e8b3730c6e16cdfd3fc3dd7e0cc8b3 | [] | no_license | amari-at4/Zookeeper | 02c61c2924c949552f32905b1a943ecdb532f4bc | 9afcbef27dd1e20e782dea8f8f5d37bb7aa960db | refs/heads/master | 2023-06-30T11:22:14.320865 | 2021-08-03T22:31:20 | 2021-08-03T22:31:20 | 392,474,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | n = int(input())
print((((n + n) * n) - n) // n)
| [
"amari@at4.net"
] | amari@at4.net |
081efe0c1b63967fdc1eed07e74e7fd3db00c00d | 397ca46ff137200953d2296618c8969ac8f7d88a | /ch3_app.py | 67066f4568d019be592b8809334085e97efc5e45 | [] | no_license | MiltonMcNeil/CHAPTER03HW | c9c28b94d64c240956cd239f5d6294026eb58699 | 559ee592e08639787df15d46dbd942e1f767cd54 | refs/heads/master | 2022-12-12T19:37:32.467834 | 2020-09-13T16:58:06 | 2020-09-13T16:58:06 | 295,196,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,378 | py | from datetime import datetime
import os
import csv
import tkinter as tk
from tkinter import ttk
class LabelInput(tk.Frame):
"""A widget containing a label and input together."""
def __init__(self, parent, label='', input_class=ttk.Entry,
input_var=None, input_args=None, label_args=None,
**kwargs):
super().__init__(parent, **kwargs)
input_args = input_args or {}
label_args = label_args or {}
self.variable = input_var
if input_class in (ttk.Checkbutton, ttk.Button, ttk.Radiobutton):
input_args["text"] = label
input_args["variable"] = input_var
else:
self.label = ttk.Label(self, text=label, **label_args)
self.label.grid(row=0, column=0, sticky=(tk.W + tk.E))
input_args["textvariable"] = input_var
self.input = input_class(self, **input_args)
self.input.grid(row=1, column=0, sticky=(tk.W + tk.E))
self.columnconfigure(0, weight=1)
def grid(self, sticky=(tk.E + tk.W), **kwargs):
super().grid(sticky=sticky, **kwargs)
def get(self):
if self.variable:
return self.variable.get()
elif type(self.input) == tk.Text:
return self.input.get('1.0', tk.END)
else:
return self.input.get()
def set(self, value, *args, **kwargs):
if type(self.variable) == tk.BooleanVar:
self.variable.set(bool(value))
elif self.variable:
self.variable.set(value, *args, **kwargs)
elif type(self.input).__name__.endswith('button'):
if value:
self.input.select()
else:
self.input.deselect()
elif type(self.input) == tk.Text:
self.input.delete('1.0', tk.END)
self.input.insert('1.0', value)
else:
self.input.delete(0, tk.END)
self.input.insert(0, value)
class DataRecordForm(tk.Frame):
"""The input form for our widgets"""
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
# A dict to keep track of input widgets
self.inputs = {}
recordinfo = tk.LabelFrame(self, text="Record Information")
# line 1
self.inputs['Date'] = LabelInput(
recordinfo, "Date",
input_var=tk.StringVar()
)
self.inputs['Date'].grid(row=0, column=0)
self.inputs['Time'] = LabelInput(
recordinfo, "Time",
input_class=ttk.Combobox,
input_var=tk.StringVar(),
input_args={"values": ["8:00", "12:00", "16:00", "20:00"]}
)
self.inputs['Time'].grid(row=0, column=1)
self.inputs['Technician'] = LabelInput(
recordinfo, "Technician",
input_var=tk.StringVar()
)
self.inputs['Technician'].grid(row=0, column=2)
# line 2
self.inputs['Lab'] = LabelInput(
recordinfo, "Lab",
input_class=ttk.Combobox,
input_var=tk.StringVar(),
input_args={"values": ["A", "B", "C", "D", "E"]}
)
self.inputs['Lab'].grid(row=1, column=0)
self.inputs['Plot'] = LabelInput(
recordinfo, "Plot",
input_class=ttk.Combobox,
input_var=tk.IntVar(),
input_args={"values": list(range(1, 21))}
)
self.inputs['Plot'].grid(row=1, column=1)
self.inputs['Seed sample'] = LabelInput(
recordinfo, "Seed sample",
input_var=tk.StringVar()
)
self.inputs['Seed sample'].grid(row=1, column=2)
recordinfo.grid(row=0, column=0, sticky=(tk.W + tk.E))
# Environment Data
environmentinfo = tk.LabelFrame(self, text="Environment Data")
self.inputs['Humidity'] = LabelInput(
environmentinfo, "Humidity (g/m³)",
input_class=tk.Spinbox,
input_var=tk.DoubleVar(),
input_args={"from_": 0.5, "to": 52.0, "increment": .01}
)
self.inputs['Humidity'].grid(row=0, column=0)
self.inputs['Light'] = LabelInput(
environmentinfo, "Light (klx)",
input_class=tk.Spinbox,
input_var=tk.DoubleVar(),
input_args={"from_": 0, "to": 100, "increment": .01}
)
self.inputs['Light'].grid(row=0, column=1)
self.inputs['Temperature'] = LabelInput(
environmentinfo, "Tenmperature (°C)",
input_class=tk.Spinbox,
input_var=tk.DoubleVar(),
input_args={"from_": 4, "to": 40, "increment": .01}
)
self.inputs['Temperature'].grid(row=0, column=2)
self.inputs['Equipment Fault'] = LabelInput(
environmentinfo, "Equipment Fault",
input_class=ttk.Checkbutton,
input_var=tk.BooleanVar()
)
self.inputs['Equipment Fault'].grid(row=1, column=0, columnspan=3)
environmentinfo.grid(row=1, column=0, sticky=(tk.W + tk.E))
# Plant Data section
plantinfo = tk.LabelFrame(self, text="Plant Data")
self.inputs['Plants'] = LabelInput(
plantinfo, "Plants",
input_class=tk.Spinbox,
input_var=tk.IntVar(),
input_args={"from_": 0, "to": 20}
)
self.inputs['Plants'].grid(row=0, column=0)
self.inputs['Blossoms'] = LabelInput(
plantinfo, "Blossoms",
input_class=tk.Spinbox,
input_var=tk.IntVar(),
input_args={"from_": 0, "to": 1000}
)
self.inputs['Blossoms'].grid(row=0, column=1)
self.inputs['Fruit'] = LabelInput(
plantinfo, "Fruit",
input_class=tk.Spinbox,
input_var=tk.IntVar(),
input_args={"from_": 0, "to": 1000}
)
self.inputs['Fruit'].grid(row=0, column=2)
# Height data
self.inputs['Min Height'] = LabelInput(
plantinfo, "Min Height (cm)",
input_class=tk.Spinbox,
input_var=tk.DoubleVar(),
input_args={"from_": 0, "to": 1000, "increment": .01}
)
self.inputs['Min Height'].grid(row=1, column=0)
self.inputs['Max Height'] = LabelInput(
plantinfo, "Max Height (cm)",
input_class=tk.Spinbox,
input_var=tk.DoubleVar(),
input_args={"from_": 0, "to": 1000, "increment": .01}
)
self.inputs['Max Height'].grid(row=1, column=1)
self.inputs['Median Height'] = LabelInput(
plantinfo, "Median Height (cm)",
input_class=tk.Spinbox,
input_var=tk.DoubleVar(),
input_args={"from_": 0, "to": 1000, "increment": .01}
)
self.inputs['Median Height'].grid(row=1, column=2)
plantinfo.grid(row=2, column=0, sticky=(tk.W + tk.E))
# Notes section
self.inputs['Notes'] = LabelInput(
self, "Notes",
input_class=tk.Text,
input_args={"width": 75, "height": 10}
)
self.inputs['Notes'].grid(sticky=tk.W, row=3, column=0)
self.reset()
def get(self):
"""Retrieve data from form as a dict"""
data = {}
for key, widget in self.inputs.items():
data[key] = widget.get()
return data
def reset(self):
"""Resets the form entries"""
# clear all values
for widget in self.inputs.values():
widget.set('')
class Application(tk.Tk):
"""Application root window"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.title("ABQ Data Entry Application")
self.resizable(width=False, height=False)
ttk.Label(
self,
text="ABQ Data Entry Application",
font=("TkDefaultFont", 16)
).grid(row=0)
self.recordform = DataRecordForm(self)
self.recordform.grid(row=1, padx=10)
self.savebutton = ttk.Button(self, text="Save", command=self.on_save)
self.savebutton.grid(sticky=tk.E, row=2, padx=10)
# status bar
self.status = tk.StringVar()
self.statusbar = ttk.Label(self, textvariable=self.status)
self.statusbar.grid(sticky=(tk.W + tk.E), row=3, padx=10)
self.records_saved = 0
def on_save(self):
"""Handles save button clicks"""
datestring = datetime.today().strftime("%Y-%m-%d")
filename = "abq_data_record_{}.csv".format(datestring)
newfile = not os.path.exists(filename)
data = self.recordform.get()
with open(filename, 'a') as fh:
csvwriter = csv.DictWriter(fh, fieldnames=data.keys())
if newfile:
csvwriter.writeheader()
csvwriter.writerow(data)
self.records_saved += 1
self.status.set(
"{} records saved this session".format(self.records_saved))
self.recordform.reset()
if __name__ == "__main__":
app = Application()
app.mainloop() | [
"noreply@github.com"
] | MiltonMcNeil.noreply@github.com |
6855504b26d9c7e32693fcd35a8479d92601f4c8 | a2160fdafd3520693cfa41626ca4d69f408cb1a5 | /benchmark/seaborn-test.py | 29558dd2f16e00068b079c7e6f89316d0d69a615 | [] | no_license | goFrendiAsgard/jurnal-chiml | cbc820f0a770389a98fec855e73df2cba6d4c4dd | 7cad128a8418e53013a039b8bef4f3ed8b59264d | refs/heads/master | 2020-03-13T03:51:54.263197 | 2018-09-04T15:09:14 | 2018-09-04T15:09:14 | 130,952,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
d = {
'a': [1, 1, 3, 1, 1, 3],
'b': [1, 2, 2, 1, 2, 2],
'c': [1, 1, 1, 0, 0, 0],
'd': [0, 0, 0, 1, 1, 1]
}
df = pd.DataFrame(data=d)
sns.pairplot(df, hue='d')
plt.show()
| [
"gofrendiasgard@gmail.com"
] | gofrendiasgard@gmail.com |
87273241a4c3e1194c7c82ded20a113867a816f0 | 75ed37cfdb793062f6e138d7d25a4ac357670d2a | /mercado.py | 4b8e2e96f0b379959683fdbb718cffbf180ef9cf | [] | no_license | ArturAvelino/mini-market | a8107a43a136aa5b8497697aa3852bbe0d83fad4 | df270373a8c3ed04dd849ab6c697222f755d21dc | refs/heads/main | 2023-02-23T10:17:04.372735 | 2021-02-01T21:35:42 | 2021-02-01T21:35:42 | 335,089,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,860 | py | from time import sleep
from Models.produto import Produto
from utils.helper import formata_valor
produtos = []
carrinho = []
def main():
menu()
def menu():
print("================================================================")
print("======================== Bem-vindo(a)! =========================")
print("========================= Tuca's Shop ==========================")
print("================================================================\n")
print("Selecione uma opção abaixo: ")
print("1 - Cadastrar Produto")
print("2 - Listar Produto")
print("3 - Comprar Produto")
print("4 - Visualizar Carrinho")
print("5 - Fechar Pedido")
print("6 - Sair")
opcao = int(input("Opção: "))
if opcao == 1:
cadastrar_produto()
elif opcao == 2:
listar_produtos()
elif opcao == 3:
comprar_produto()
elif opcao == 4:
visualizar_carrinho()
elif opcao == 5:
fechar_pedido()
elif opcao == 6:
print("Volte sempre!")
sleep(2)
exit()
else:
print("Opção inválida")
sleep(2)
menu()
def cadastrar_produto():
print("Cadastro de produtos")
print("====================")
nome = input("Digite o nome do produto: ")
preco = input("Digite o preço do produto: ")
produto = Produto(nome, preco)
p = None
for each in produtos:
if each.nome == nome:
p = each.nome
if p == produto.nome:
print("O produto já está cadastrado")
sleep(2)
menu()
else:
produtos.append(produto)
print(f"O produto {nome} foi cadastrado com sucesso!")
sleep(2)
menu()
def listar_produtos():
if len(produtos) > 0:
print("Listagem de produtos")
print("====================")
for produto in produtos:
print(f"{produto} \n")
sleep(1)
menu()
else:
print("Não existe produtos cadastrados")
sleep(2)
menu()
def comprar_produto():
if len(produtos) > 0:
print("Informe o código do produto que deseja comprar")
print("==============================================")
print("============Produtos Disponíveis==============")
print("==============================================")
for n in produtos:
print(f"{n}\n")
codigo = int(input("Código: "))
produto = pega_produto_codigo(codigo)
if produto:
if len(carrinho) > 0:
tem_no_carrinho = False
for item in carrinho:
quant = item.get(produto)
if quant:
item[produto] = quant + 1
print(f"O {produto.nome} agora posssui {quant + 1} unidades")
tem_no_carrinho = True
sleep(2)
menu()
if not tem_no_carrinho:
prod = {produto: 1}
carrinho.append(prod)
print(f"O produto {produto.nome} foi adicionado ao carrinho")
sleep(2)
menu()
else:
item = {produto: 1}
carrinho.append(item)
print(f"O produto {produto.nome} foi adicionado ao carrinho!")
else:
print(f"O produto com o código {codigo} não foi encontrado")
sleep(2)
menu()
else:
print("Não existe produtos cadastrados")
sleep(2)
menu()
def visualizar_carrinho():
if len(carrinho) > 0:
print("Produtos do carrinho: ")
for item in carrinho:
for dados in item.items():
print(dados[0])
print(f"Quantidade: {dados[1]}\n")
sleep(2)
menu()
else:
print("Ainda não existem produtos no carrinhos")
sleep(2)
menu()
def fechar_pedido():
if len(carrinho) > 0:
valor_total = 0
for item in carrinho:
for dados in item.items():
print(dados[0])
print(f"Quantidade: {dados[1]}\n")
valor_total += dados[0].preco * dados[1]
sleep(1)
print(f"Sua fatura é: {formata_valor(valor_total)}")
print("Volte sempre!")
carrinho.clear()
sleep(5)
else:
print("Não existe produtos no carrinho")
sleep(2)
menu()
def pega_produto_codigo(codigo):
p = None
for each in produtos:
if each.codigo == codigo:
p = each
return p
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | ArturAvelino.noreply@github.com |
ed83b8b9465e7789fbdf5342d12e6863ef98a36d | ab79ca83f97aff1f5e00d46781e0355b8e26b4c7 | /LogTranslation/SurveyMode.py | 32758c98925e9a4ab2306d4f3422dfbebcbe5061 | [] | no_license | AngusGLChen/LearningTransfer | d966ece2b94b3287f7cf0468ae7afd9591c64d99 | 956c9a9e557deb959b26ae42fb46eba38fb417dd | refs/heads/master | 2021-01-19T06:42:47.967713 | 2016-06-20T19:18:09 | 2016-06-20T19:18:09 | 61,573,656 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,461 | py | '''
Created on Jul 27, 2015
@author: Angus
'''
import os,re
from sets import Set
def survey_mode(path):
files = os.listdir(path)
course_id = ""
id_map = {}
response_id_set = set()
# Output survey_description table
survey_description_path = os.path.dirname(os.path.dirname(os.path.dirname(path))) + "/Results/FP101x/" + "survey_description.sql"
if os.path.isfile(survey_description_path):
os.remove(survey_description_path)
survey_description_file = open(survey_description_path, 'wb')
survey_description_file.write("\r\n" + "USE FP101x;" + "\r\n")
survey_description_file.write("\r\n" + "DROP TABLE IF EXISTS survey_description; CREATE TABLE survey_description (question_id varchar(255) NOT NULL, course_id varchar(255), question_type varchar(255), description text, PRIMARY KEY (question_id), FOREIGN KEY (course_id) REFERENCES courses(course_id)) ENGINE=MyISAM;" + "\r\n")
# Output survey_response table
survey_response_path = os.path.dirname(os.path.dirname(os.path.dirname(path))) + "/Results/FP101x/" + "survey_response.sql"
if os.path.isfile(survey_response_path):
os.remove(survey_response_path)
survey_response_file = open(survey_response_path, 'wb')
survey_response_file.write("\r\n" + "USE FP101x;" + "\r\n")
survey_response_file.write("\r\n" + "DROP TABLE IF EXISTS survey_response; CREATE TABLE survey_response (response_id varchar(255) NOT NULL, course_user_id varchar(255), question_id varchar(255), answer text, PRIMARY KEY (response_id), FOREIGN KEY (course_user_id) REFERENCES global_user(course_user_id)) ENGINE=MyISAM;" + "\r\n")
# Processing course_structure data
for file in files:
if "course_structure" in file:
# To extract course_id
course_id_array = file.split("-")
course_id = course_id_array[0] + "/" + course_id_array[1] + "/" + course_id_array[2]
# Processing ID information
for file in files:
if "2014T3_FP101x" in file:
sub_path = path + file + "/"
sub_files = os.listdir(sub_path)
for sub_file in sub_files:
if "FP Course Data" in sub_file:
id_path = sub_path + sub_file + "/"
id_files = os.listdir(id_path)
for id_file in id_files:
if "-anon-ids" in id_file:
fp = open(id_path + id_file, "r")
fp.readline()
lines = fp.readlines()
for line in lines:
array = line.split(",")
global_id = array[0].replace("\"","")
anonymized_id = array[1].replace("\"","")
id_map[anonymized_id] = global_id
# Processing Pre-survey information
for file in files:
if "2014T3_FP101x" in file:
sub_path = path + file + "/"
sub_files = os.listdir(sub_path)
for sub_file in sub_files:
if "FP Pre Survey" in sub_file:
pre_path = sub_path + sub_file + "/"
pre_files = os.listdir(pre_path)
for pre_file in pre_files:
if "survey_updated" in pre_file:
fp = open(pre_path + pre_file, "r")
# To process question_id line
question_id_line = fp.readline()
question_id_array = question_id_line.split(",")
# To process question description line
question_line = fp.readline()
question_line = question_line.replace("\",NA,\"","\",\"NA\",\"")
question_array = question_line.split("\",\"")
for i in range(23,98):
question_id = course_id + "_pre_" + question_id_array[i].replace("\"","")
question_array[i] = question_array[i].replace("\'", "\\'")
write_string = "\r\n" + "insert into survey_description (question_id, course_id, question_type, description) values"
write_string += "('%s','%s','%s','%s');\r\n" % (question_id, course_id, "pre", question_array[i])
survey_description_file.write(write_string)
response_lines = fp.readlines()
num_multipleID = 0
for response_line in response_lines:
response_line = response_line.replace("\",NA,\"","\",\"NA\",\"")
subRegex = re.compile("\(([^\(\)]*)\)")
matches = subRegex.findall(response_line)
if not len(matches) == 0:
for match in matches:
response_line = response_line.replace(match, "")
response_array = response_line.split("\",\"")
# print response_array[103]
if response_array[103] in id_map.keys():
course_user_id = course_id + "_" + id_map[response_array[103]]
for i in range(23,98):
question_id = course_id + "_" + "pre" + "_" + question_id_array[i].replace("\"","")
response_id = course_user_id + "_" + "pre" + "_" + question_id_array[i].replace("\"","")
if response_id not in response_id_set:
response_array[i] = response_array[i].replace("\'", "\\'")
write_string = "\r\n" + "insert into survey_response (response_id, course_user_id, question_id, answer) values"
write_string += "('%s','%s','%s','%s');\r\n" % (response_id, course_user_id, question_id, response_array[i])
survey_response_file.write(write_string)
response_id_set.add(response_id)
# else:
# print response_id + "\t" + response_array[103] + "\t" + question_array[i]
else:
num_multipleID += 1
# print response_line
print "Pre - The number of response is: " + str(len(response_lines))
print "Pre - The number of response with multiple/empty IDs is: " + str(num_multipleID)
print ""
# Processing Post-survey information
for file in files:
if "2014T3_FP101x" in file:
sub_path = path + file + "/"
sub_files = os.listdir(sub_path)
for sub_file in sub_files:
if "FP Post Survey" in sub_file:
post_path = sub_path + sub_file + "/"
post_files = os.listdir(post_path)
for post_file in post_files:
if "survey_updated" in post_file:
fp = open(post_path + post_file, "r")
# To process question_id line
question_id_line = fp.readline()
question_id_array = question_id_line.split(",")
# To process question description line
question_line = fp.readline()
question_line = question_line.replace("\",NA,\"","\",\"NA\",\"")
question_array = question_line.split("\",\"")
for i in range(15,113):
question_id = course_id + "_post_" + question_id_array[i].replace("\"","")
# print question_id
question_array[i] = question_array[i].replace("\'", "\\'")
write_string = "\r\n" + "insert into survey_description (question_id, course_id, question_type, description) values"
write_string += "('%s','%s','%s','%s');\r\n" % (question_id, course_id, "post", question_array[i])
survey_description_file.write(write_string)
response_lines = fp.readlines()
num_multipleID = 0
for response_line in response_lines:
response_line = response_line.replace("\",NA,\"","\",\"NA\",\"")
subRegex = re.compile("\(([^\(\)]*)\)")
matches = subRegex.findall(response_line)
if not len(matches) == 0:
for match in matches:
response_line = response_line.replace(match, "")
response_array = response_line.split("\",\"")
if response_array[118] in id_map.keys():
course_user_id = course_id + "_" + id_map[response_array[118]]
for i in range(15,113):
question_id = course_id + "_post_" + question_id_array[i].replace("\"","")
response_id = course_user_id + "_post_" + question_id_array[i].replace("\"","")
if response_id not in response_id_set:
response_array[i] = response_array[i].replace("\'", "\\'")
write_string = "\r\n" + "insert into survey_response (response_id, course_user_id, question_id, answer) values"
write_string += "('%s','%s','%s','%s');\r\n" % (response_id, course_user_id, question_id, response_array[i])
survey_response_file.write(write_string)
response_id_set.add(response_id)
# else:
# print response_id + "\t" + response_array[118] + "\t" + question_array[i]
else:
num_multipleID += 1
print "Post - The number of response is: " + str(len(response_lines))
print "Post - The number of response with multiple/empty IDs is: " + str(num_multipleID)
survey_description_file.close()
survey_response_file.close()
| [
"angus.glchen@gmail.com"
] | angus.glchen@gmail.com |
34bda52c1409fe6f08feee5eeea3683f4dfd5f15 | 9f030bcc6b5ff7a8b437c8c444f6146d83738dca | /thinktown/settings.py | 634b9aad7fb436f738e7c938504f237c431bb849 | [] | no_license | baby4bamboo/apolish | c1e47f9fc94c6ba90cdbf5a7d1b1e671fc90abdd | af3df232dacf0694504596d4a1e02079dcdd9b1c | refs/heads/master | 2021-01-10T14:04:07.360827 | 2015-11-26T02:42:02 | 2015-11-26T02:42:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,460 | py | # Django settings for thinktown project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'apolishdb', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh-cn'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '8bxqi8b4bg05g_4$e3s9oqs$q%j#v*0fa!(me8j$z6yp-xf(&x'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'thinktown.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'thinktown.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
#'django.contrib.admindocs',
'apolish',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"bayao@cisco.com"
] | bayao@cisco.com |
9eb4c6fd78ca26eecf850645b8fec5f1befa753d | d6fd92d44bfdd3c2f16fb91cd66910f21e88dbb5 | /echo_env_var.py | 55e769995ebf1ae27f230292a31e4bca2091057d | [] | no_license | Sruinard/AzVmssPerformance | 87fef23ee36dd447df01b55a9058d6918c4a665d | 376a142e9ffcf045790258ac8b9953657a0c30a1 | refs/heads/master | 2023-07-12T17:50:01.009728 | 2021-08-31T14:27:47 | 2021-08-31T14:27:47 | 399,727,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,957 | py | import os
import json
import requests
import datetime
import hashlib
import hmac
import base64
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config-id', type=str)
parser.add_argument('--time-in-seconds', type=int)
parser.add_argument('--number-of-instances', type=int)
args = parser.parse_args()
print('----------------------------------------------')
customer_id = os.environ.get("WORKSPACE_ID", 'key placeholder value')
shared_key = os.environ.get("WORKSPACE_KEY", 'key placeholder value')
print(args.time_in_seconds)
print(args.config_id)
print(args.number_of_instances)
print('----------------------------------------------')
# Update the customer ID to your Log Analytics workspace ID
# customer_id = 'xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
# For the shared key, use either the primary or the secondary Connected Sources client authentication key
# shared_key = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# The log type is the name of the event that is being submitted
log_type = 'PerformanceTestVmss'
# An example JSON web monitor object
json_data = [{
"slot_ID": 12345,
"ID": "5cdad72f-c848-4df0-8aaa-ffe033e75d57",
"time_in_seconds": args.time_in_seconds,
"config_id": args.config_id,
"number_of_instances": args.number_of_instances,
}]
body = json.dumps(json_data)
#####################
######Functions######
#####################
# Build the API signature
def build_signature(customer_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(customer_id,encoded_hash)
return authorization
# Build and send a request to the POST API
def post_data(customer_id, shared_key, body, log_type):
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = 'https://' + customer_id + '.ods.opinsights.azure.com' + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
response = requests.post(uri,data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
print('Accepted')
else:
print("Response code: {}".format(response.status_code))
post_data(customer_id, shared_key, body, log_type)
| [
"stefruinard@hotmail.com"
] | stefruinard@hotmail.com |
907f2069f1ed74d3a3e751a3b36db148b39348c8 | 539078fa5ba0ec583cf697ceec366cea3cbd8fc6 | /leetcode/替换后的最长重复子串(滑动窗口).py | 92534be4cccab194dc8c382fdb37467d1af9b865 | [] | no_license | Bubbleskye/2019summer | 88ec3b32aa1346e942455e2e38a20e036dd07d68 | 2329898b3653802de287434b5facd09636cc92cf | refs/heads/master | 2022-12-18T04:41:13.199755 | 2020-09-24T16:42:29 | 2020-09-24T16:42:29 | 291,307,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | # 如何判断一个字符串改变K个字符,能够变成一个连续串:如果当前字符串中的出现次数最多的字母个数+K大于串长度,那么这个串就是满足条件的
# historyCharMax保存滑动窗口内相同字母出现次数的历史最大值
# 通过判断窗口宽度(right - left + 1)是否大于historyCharMax + K,大于则窗口滑动,否则窗口就扩张
def characterReplacement(s,k):
map=[0 for _ in range(26)]
if not s:
return 0
left=0
right=0
historymax=0
maxlen=0
while right<len(s):
index=ord(s[right])-ord('A')
map[index]=map[index]+1
historymax=max(historymax,map[index])
if right-left+1>historymax+k:
# 不满足,则窗口整个右移
# 因为如果仅移动右边,虽然historymax可能会变大+1,但是right-left+1也随之变大+1,随意无法弥补空缺
map[ord(s[left])-ord('A')]=map[ord(s[left])-ord('A')]-1
left=left+1
right=right+1
else:
maxlen=max(maxlen,right-left+1)
right=right+1
return maxlen | [
"35057725+Bubbleskye@users.noreply.github.com"
] | 35057725+Bubbleskye@users.noreply.github.com |
e98c3ae636909443eeb2ddd593429ea6b54e78d4 | a953655b27aba5c72a4668c3e621874f406e70e1 | /MazeGenerator/MazeGenerator.pyde | aa5e7bedc278a5cfde2816d1cd374384376305b4 | [] | no_license | bitrogen/Processing-Scripts | 3cb98a3db816c6f2a8e55af581d12d313176b906 | 43ff2e016793a695cad2766ab77c5c457fab4269 | refs/heads/main | 2023-03-24T19:42:34.385062 | 2021-03-18T20:35:32 | 2021-03-18T20:35:32 | 348,644,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,096 | pyde | screenSize = [1920, 1080]
# from classes import Maze
class Maze:
def __init__(self, sizeofBoard):
self.size = sizeofBoard
self.blocks = list()
self.cellSize = 1000/sizeofBoard
self.squareSize = self.cellSize*0.7
self.padx = 485
self.pady = 65
for y in range(self.size):
self.blocks.append([])
for x in range(self.size):
self.blocks[y].append([False for _ in range(4)])
def isOpen (self, xindex, yindex):
block = self.blocks[xindex][yindex]
return any(block)
def drawMaze(self):
for xindex, column in enumerate(self.blocks):
for yindex, block in enumerate(column):
# rectMode(CENTER)
fill(255)
noStroke()
square(xindex*self.cellSize+460+25, yindex*self.cellSize+40+25, self.squareSize)
def dig(self, index, direction):
if index[0] == 0 and direction==0:
return False
if index[1] == 0 and direction==3:
return False
if index[0] == self.size and direction==2:
return False
if index[1] == self.size and direction==1:
return False
self.drawBetweenCells(index, direction)
neighbors = self.getNeighbors(index)
index2 = neighbors[direction]
fill(0,255,0)
square(index[0]*self.cellSize+self.padx, index[1]*self.cellSize+self.pady, self.squareSize)
square(index2[0]*self.cellSize+self.padx, index2[1]*self.cellSize+self.pady, self.squareSize)
self.blocks[index[0]][index[1]][direction] = True
self.blocks[index2[0]][index2[1]][self.getOppositeDirection(direction)] = True
self.drawBetweenCells(index, direction)
self.drawBetweenCells(index2, self.getOppositeDirection(direction))
return True
def getOppositeDirection(self, direction):
if direction in [3,1]:
return 4-direction
return 2-direction
def drawBetweenCells(self, index, direction):
fill(0,255,0)
noStroke()
change = (100 - self.squareSize)/2
if direction == 0:
square(index[0]*self.cellSize+self.padx, index[1]*self.cellSize+self.pady+change, self.squareSize)
if direction == 1:
square(index[0]*self.cellSize+self.padx+change, index[1]*self.cellSize+self.pady, self.squareSize)
if direction == 2:
square(index[0]*self.cellSize+self.padx, index[1]*self.cellSize+self.pady-change, self.squareSize)
if direction == 3:
square(index[0]*self.cellSize+self.padx-change, index[1]*self.cellSize+self.pady, self.squareSize)
def getNeighbors(self, index):
indexofNeighbors = [None for _ in range(4)]
if index[0] != 0:
indexofNeighbors[0] = [index[0], index[1]+1]
if index[1] != 0:
indexofNeighbors[3] = [index[0]-1, index[1]]
if index[0] != self.size:
indexofNeighbors[1] = [index[0]+1, index[1]]
if index[1] != self.size:
indexofNeighbors[2] = [index[0], index[1]-1]
return indexofNeighbors
theMaze = Maze(20)
def setup ():
# size(1920, 1080);
fullScreen(1)
background(0, 0, 40);
rectMode(CENTER);
fill(0);
stroke(255)
strokeWeight(4)
square(screenSize[0]/2, screenSize[1]/2, 1000);
print(screenSize)
theMaze.drawMaze()
print("Digged:",theMaze.dig([19,19],2))
print(theMaze.blocks[19][18])
print(theMaze.isOpen(19,18))
print(theMaze.isOpen(19,19))
def draw ():
pass
def mousePressed():
exit()
| [
"noreply@github.com"
] | bitrogen.noreply@github.com |
2a947f6dde626fc5c7a608db41b0b51fbd6eafdb | 8d5ba6747531cbd43d63d32265fd608f9081c3b7 | /.venv/lib/python2.7/site-packages/indico/core/db/sqlalchemy/custom/unaccent.py | b4838177828f13481121fa0984a94d46e9307b19 | [] | no_license | Collinsnyamao/indico | 0e433b78803afae5b1ac90483db1f3d90ce2fddb | 32adf8123e266eb81439b654abc993b98e0cd7f2 | refs/heads/master | 2020-03-18T04:55:40.386595 | 2018-06-02T13:45:47 | 2018-06-02T13:45:47 | 134,314,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,436 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from sqlalchemy import DDL, Index, text
from sqlalchemy.event import listens_for
from sqlalchemy.sql import func
from sqlalchemy.sql.elements import conv
from indico.util.string import to_unicode
# if you wonder why search_path is set and the two-argument `unaccent` function is used,
# see this post on stackoverflow: http://stackoverflow.com/a/11007216/298479
SQL_FUNCTION_UNACCENT = '''
CREATE FUNCTION indico.indico_unaccent(value TEXT)
RETURNS TEXT
AS $$
BEGIN
RETURN unaccent('unaccent', value);
END;
$$
LANGUAGE plpgsql IMMUTABLE SET search_path = public, pg_temp;
'''
def _should_create_function(ddl, target, connection, **kw):
sql = "SELECT COUNT(*) FROM information_schema.routines WHERE routine_name = 'indico_unaccent'"
count = connection.execute(text(sql)).scalar()
return not count
def create_unaccent_function(conn):
"""Creates the unaccent function if it doesn't exist yet.
In TESTING mode it always uses the no-op version to have a
consistent database setup.
"""
DDL(SQL_FUNCTION_UNACCENT).execute_if(callable_=_should_create_function).execute(conn)
def define_unaccented_lowercase_index(column):
"""Defines an index that uses the indico_unaccent function.
Since this is usually used for searching, the column's value is
also converted to lowercase before being unaccented. To make proper
use of this index, use this criterion when querying the table::
db.func.indico.indico_unaccent(db.func.lower(column)).ilike(...)
The index will use the trgm operators which allow very efficient LIKE
even when searching e.g. ``LIKE '%something%'``.
:param column: The column the index should be created on, e.g.
``User.first_name``
"""
@listens_for(column.table, 'after_create')
def _after_create(target, conn, **kw):
assert target is column.table
col_func = func.indico.indico_unaccent(func.lower(column))
index_kwargs = {'postgresql_using': 'gin',
'postgresql_ops': {col_func.key: 'gin_trgm_ops'}}
Index(conv('ix_{}_{}_unaccent'.format(column.table.name, column.name)), col_func, **index_kwargs).create(conn)
def unaccent_match(column, value, exact):
from indico.core.db import db
value = to_unicode(value).replace('%', r'\%').replace('_', r'\_').lower()
if not exact:
value = '%{}%'.format(value)
# we always use LIKE, even for an exact match. when using the pg_trgm indexes this is
# actually faster than `=`
return db.func.indico.indico_unaccent(db.func.lower(column)).ilike(db.func.indico.indico_unaccent(value))
| [
"collins.nyamao@strathmore.edu"
] | collins.nyamao@strathmore.edu |
7690636bb731b6485ad4915aa61584211b9871ae | 3490e72263dbb9084830d5d217ac7762069b1ddc | /src/data_collection/get_lineups_dev.py | 6ae9db2701654dfdcdc0a8ddcfa35a2eda16c8ed | [
"MIT"
] | permissive | vietanhnl95/football-results-prediction | 34ef412c65bbd902c00123da88499ad41cc40306 | f4b17b8720d78c449d799f9c622a58c5f481c244 | refs/heads/main | 2023-08-25T16:02:53.389502 | 2021-10-15T10:51:56 | 2021-10-15T10:51:56 | 414,508,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | import json
import pandas as pd
import datetime
import sqlalchemy
import utils
def main():
"""
Request to API and insert data to DB
"""
#read_config
config = utils.read_config()
lineups_config = config['lineups']
# connect to db
db_url = config['db_url']
conn = utils.create_db_connection(db_url)
def get_lineup(fixture_id):
# get api response
response = utils.get_api_response(
url="https://api-football-v1.p.rapidapi.com/v3/fixtures/lineups",
querystring={"fixture": fixture_id}
)
return response
print(get_lineup('192297'))
if __name__ == "__main__":
main()
| [
"anh.nguyen18@onemount.com"
] | anh.nguyen18@onemount.com |
77f768d0691bcb93bf5f8e5d1630136179da8c86 | 4da78036b1b94e9ca520f8c5ea264c8734848f7e | /a1-911-Andrioaie-Daria/p3.py | 00c6f3144432d36c76fc59fc8e0c91af2634fdbd | [] | no_license | daria-andrioaie/Fundamentals-Of-Programming | df5631a4afb20b119daba100c14bad22d0ccaffb | 91cebf0d279d0e52cb72ba49f4db2b05f7e209d5 | refs/heads/main | 2023-08-11T05:39:58.964184 | 2021-09-20T19:37:43 | 2021-09-20T19:37:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,567 | py | #
# Implement the program to solve the problem statement from the third set here
#Problem 12. Determine the age of a person, in number of days.
'''
Program description:
Imagining a 'year axis' where we place the birth date and the current date, the program calculates
the sum of all the days in the years between the birth year and the current year, then 'crops' the ends
such that it eliminates the number of days from the start of the birth year until the birth day
and the remaining days from the current day to the end of the year.
'''
def days_until(day, month, year):
"""
Calculates the number of days from the start of the year to the date entered as a parameter
:param day: the day
:param month: the month
:param year: the year
:return: the number of days
"""
days=0
#we add 30 or 31 days for each month previous to the current one, according to their parity
for i in range (1, month):
if i<8: #months before august
if i%2==0:
days+=30
else:
days+=31
else: #months after august, august including
if i%2==1:
days+=30
else:
days+=31
if month>2: #if the month is past february, we take into account that Februasry has 29 days
if year%4==0: #if the year is a multiple of 4 and 28, otherwise
days-=1
else:
days-=2
days+=day #add the days of the current month
return days
def days_left(day, month, year):
"""
Calculates the number of days left in the year, starting from the date entered as a parameter
:param day: the day
:param month: the month
:param year: the year
:return: the days left
"""
#first add the total o days of the whole year
if year%4==0:
days=366
else:
days=365
#subtract the days from the start of the year from the total
days-=days_until(day, month, year)
return days
def whole_years(y1, y2):
"""
Calculates the total of days in the years from the one year to another
:param by: birth year
:param cy: current yaer
:return: number of days
"""
alive=0;
for y in range(y1, y2+1):
if y % 4 == 0: #leap year
alive += 366
else: #otherwise
alive += 365
return alive
def calculate(date1, date2):
"""
Calculates the days between date1 and date2
"""
alive = whole_years(date1['year'], date2['year'])
alive -= days_until(date1)
alive -= days_left(date2)
# alive+=1 this is optional, depending on whether we want to include the current day or not
return alive
def show_result(date1, date2):
"""
Prints the result.
"""
alive_days=calculate(date1, date2)
print("Jimmy has been alive for ", alive_days, " days.")
def read_birth_date():
print("Please enter Jimmy's birth date")
dict['day']=int(input('Day: '))
dict['month'] = int(input('Month: '))
dict['year'] = int(input('Year: '))
return dict
def read_current_date():
print('Please enter the current date')
dict['day'] = int(input('Day: '))
dict['month'] = int(input('Month: '))
dict['year'] = int(input('Year: '))
return dict
def start():
birth = read_birth_date()
current = read_current_date()
show_result(birth, current)
start() | [
"noreply@github.com"
] | daria-andrioaie.noreply@github.com |
c7b8cda8705fd1fd9c44d4ed382af4c6b95dbabe | e0986503b275a658687798771cdef4e290630f29 | /Cyber_Security/25-MyNetScanner/venv/bin/chardetect | 60cbb097fad5f532e80f4840637cffeb62530d2d | [] | no_license | deliceyagmur/Python_Siber_Guvenlik | 76ce98803d65599ed7c898ad824dc543648e9c1e | eed640c46b0ba52e27d950fa21410af5e7745027 | refs/heads/master | 2023-04-18T11:18:59.452726 | 2021-05-07T12:18:33 | 2021-05-07T12:18:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/root/PycharmProjects/25-MyNetScanner/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"yagmur.delice@gmail.com"
] | yagmur.delice@gmail.com | |
6371e03f7e86aed6d39e751ba81d7471c80155ef | 7d2f933ed3c54e128ecaec3a771817c4260a8458 | /venv/Lib/site-packages/requests/packages/urllib3/__init__.py | f669e1f517d93579a48a00c7ca81b40ac6d206c5 | [] | no_license | danielmoreira12/BAProject | c61dfb1d0521eb5a28eef9531a00e744bfb0e26a | 859f588305d826a35cc8f7d64c432f54a0a2e031 | refs/heads/master | 2021-01-02T07:17:39.267278 | 2020-02-25T22:27:43 | 2020-02-25T22:27:43 | 239,541,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | # urllib3/__init__.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = 'dev'
# Set default logging handler to avoid "No handler found" warnings.
import logging
from . import exceptions
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util import make_headers, get_host, Timeout
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added an stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
| [
"danielmoreira12@github.com"
] | danielmoreira12@github.com |
9e0f5a34033d15ae81aa48a45f3c218ec10b7f08 | 72a9976d9e6c11459134521cc1e9d3ee0becf765 | /home/admin.py | 5184e4b7a4165b39fd094b6909792f15cbdb2443 | [] | no_license | vinaykumar1908/082021i | a0f69745b726baa3a4981208b040956073a57597 | 85950350f17781e791b87718eeecc1f65d39a056 | refs/heads/master | 2023-08-28T00:36:05.818798 | 2021-09-16T18:24:27 | 2021-09-16T18:24:27 | 400,771,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.p)
| [
"vinaykumar1908@gmail.com"
] | vinaykumar1908@gmail.com |
60f35b72cf602b20a9b81ac201d5bff69e1e3489 | 8a05d954639e5253ee4ed144671d74e0a85d5a30 | /callapp/migrations/0011_auto_20211004_1503.py | 9a7a345701f5eb90bd8f2190348b44cdebd5d823 | [] | no_license | jaguar48/Therapy-booking-app | 8c28826c992285592aadbde672be33ba54161ef0 | fe1b2434a843763097ddd71fd041e53770aea1d1 | refs/heads/master | 2023-09-05T23:38:50.305747 | 2021-11-16T12:11:17 | 2021-11-16T12:11:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | # Generated by Django 2.2.10 on 2021-10-04 22:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('callapp', '0010_transfer'),
]
operations = [
migrations.CreateModel(
name='balance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_balance', models.DecimalField(decimal_places=2, max_digits=10)),
('date_created', models.DateField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='transfer',
name='owner',
),
migrations.DeleteModel(
name='account',
),
migrations.DeleteModel(
name='transfer',
),
]
| [
"okonkwostanley67@yahoo.com"
] | okonkwostanley67@yahoo.com |
2914fe989199a7020838934d879bd59c4b3a46ad | ef7530b5816fc1dcde650f7c982ab7c97fd6e65b | /movies/admin.py | e5d26af9476beaf892d7ae817a0a500a95b6aebd | [] | no_license | shofizone/vidly | e908293257db5e7c4496ed44d07ab87a766d8dc9 | ca6c0af5504e582beee79df18b2fc893bb7e1232 | refs/heads/master | 2023-04-30T22:31:50.288851 | 2019-09-12T08:51:56 | 2019-09-12T08:51:56 | 207,768,499 | 0 | 0 | null | 2023-04-21T20:37:28 | 2019-09-11T08:59:46 | JavaScript | UTF-8 | Python | false | false | 395 | py | from django.contrib import admin
from .models import Genre, Movie
class GenreAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
class MovieAdmin(admin.ModelAdmin):
exclude = ('date_created',)
list_display = ('title', 'number_in_stock', 'daily_rate', 'release_year')
# Register your models here.
admin.site.register(Genre, GenreAdmin)
admin.site.register(Movie, MovieAdmin)
| [
"pop.shofi@mail.com"
] | pop.shofi@mail.com |
75d3f3cbc53aec0bbc59eb3519e7bbbfd1d50305 | a719bd5c20f377b4986595b5a64f2df4bad36f66 | /hw5/task2.py | b9567827a7c4e4efa83ee81e792a66a4794e3b07 | [] | no_license | tom6311tom6311/DLCV2018SPRING | d3e2222209dd1cf4ba1d584d758acb624e0d88a2 | ed0c5528a59ea67611eab850b83d698e92975ed8 | refs/heads/master | 2021-04-06T10:53:11.023432 | 2018-07-04T15:08:49 | 2018-07-04T15:08:49 | 125,384,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | import sys
import os
import preprocessor
import numpy as np
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv1D, Flatten, LSTM, Bidirectional
from keras.callbacks import EarlyStopping, TensorBoard
from keras import regularizers
ENABLE_EARLY_STOP = True
os.environ["CUDA_VISIBLE_DEVICES"] = str(sys.argv[1])
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
set_session(tf.Session(config=config))
TRAIN_FEAT_PATH = str(sys.argv[2])
VALID_FEAT_PATH = str(sys.argv[3])
TASK2_LOG_DIR = 'log_task2/'
LOG_SUB_DIR = str(sys.argv[4]) if str(sys.argv[4])[-1] == '/' else str(sys.argv[4]) + '/'
if not os.path.exists(TASK2_LOG_DIR):
os.makedirs(TASK2_LOG_DIR)
if not os.path.exists(LOG_SUB_DIR):
os.makedirs(LOG_SUB_DIR)
train_feats, train_labels = preprocessor.load_feats_and_labels(TRAIN_FEAT_PATH)
train_labels = np.eye(11)[train_labels]
valid_feats, valid_labels = preprocessor.load_feats_and_labels(VALID_FEAT_PATH)
valid_labels = np.eye(11)[valid_labels]
print(train_feats.shape)
print(train_labels.shape)
classifier = Sequential()
# classifier.add(LSTM(8, return_sequences=True, dropout=0.3, input_shape=train_feats.shape[1:]))
# classifier.add(LSTM(8, dropout=0.3))
classifier.add(Bidirectional(LSTM(32, return_sequences=True, dropout=0.3), input_shape=train_feats.shape[1:]))
classifier.add(Bidirectional(LSTM(32, dropout=0.3)))
# classifier.add(Dense(128, activation='relu'))
classifier.add(Dense(11, activation='softmax'))
classifier.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
callbacks = []
if ENABLE_EARLY_STOP:
callbacks.append(EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto'))
callbacks.append(TensorBoard(log_dir=LOG_SUB_DIR))
classifier.fit(train_feats, train_labels, validation_data=(valid_feats, valid_labels), epochs=100, batch_size=32, callbacks=callbacks)
classifier.save(LOG_SUB_DIR + 'model.hdf5', overwrite=True, include_optimizer=False)
print('Model saved.') | [
"tom.huang@positivegrid.com"
] | tom.huang@positivegrid.com |
5371ef32285aad17ae37e7185878ecb06b2d21ac | abffc57da3154adfa2e844698d50556e07e19336 | /u3/apps.py | 2e5525b184c73aad781ab5a4d24dc9517e9aa8d1 | [] | no_license | milym2/milogit | 7c67e299812bd024a338321c9c0766e17dc31893 | bd42ff36ff78e1faa21ac9fbec9407ee52b57a3c | refs/heads/master | 2023-03-31T10:51:52.727285 | 2021-04-06T23:47:28 | 2021-04-06T23:47:28 | 355,362,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | from django.apps import AppConfig
class U3Config(AppConfig):
name = 'u3'
| [
"milymejia8@gmail.com"
] | milymejia8@gmail.com |
f48b6acd7862cead47ba1fafc6a3ebd6557b73be | 303bac96502e5b1666c05afd6c2e85cf33f19d8c | /solutions/python3/993.py | d918d769447829914b6898f916ac2d314071b6a7 | [
"MIT"
] | permissive | jxhangithub/leetcode | 5e82f4aeee1bf201e93e889e5c4ded2fcda90437 | 0de1af607557d95856f0e4c2a12a56c8c57d731d | refs/heads/master | 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 | MIT | 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null | UTF-8 | Python | false | false | 430 | py | class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
def dfs(node, parent, depth, mod):
if node:
if node.val == mod:
return depth, parent
return dfs(node.left, node, depth + 1, mod) or dfs(node.right, node, depth + 1, mod)
dx, px, dy, py = dfs(root, None, 0, x) + dfs(root, None, 0, y)
return dx == dy and px != py | [
"cenkay.arapsagolu@gmail.com"
] | cenkay.arapsagolu@gmail.com |
869d4de40b4774adacf3db6705df1c3d7a5ab419 | cb2411c5e770bcdd07b170c2bc07f5e0cc72fc86 | /Greedy/55. Jump Game.py | 6ad22c17c7c535c3c1f269e0caf4426d60a13b2f | [] | no_license | roy355068/Algo | f79cf51662832e33664fc1d2479f79405d586e2e | 14febbb5d8504438ef143678dedc89d4b61b07c9 | refs/heads/master | 2021-05-11T04:32:11.434762 | 2018-01-30T00:01:26 | 2018-01-30T00:01:26 | 117,941,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | # Given an array of non-negative integers, you are initially positioned at the first index of the array.
# Each element in the array represents your maximum jump length at that position.
# Determine if you are able to reach the last index.
# For example:
# A = [2,3,1,1,4], return true.
# A = [3,2,1,0,4], return false.
# Idea is that use a maximumReach variable to track the max range of the array can reach
# if i > m, indicated that i is not reachable by previous element and jumping
# so end the program earlier and return False, else if maximumReach >= the index of
# last element, meaning that the last element is reachable, return True
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
# O(N ^ 2) time, O(N) space complexity
if not nums or len(nums) == 1:
return True
# jump array is a dp array that used to check if the index is reachable
jump = [False for _ in xrange(len(nums))]
jump[0] = True
for i in xrange(len(nums)):
step = nums[i]
j = i + 1
# jump[i] == True means that this index is reachable based
# on the jump steps before it
if jump[i] == True:
# update all indices that is reachable from current stand point
while j <= len(nums) - 1 and j < i + step + 1:
jump[j] = True
j += 1
return jump[-1]
# Optimized, O(N) time, O(1) space complexity
i, reachable = 0, 0
# if i exceeds reachable, meaning that current index is never going
# to be reachable by jumping from previous indices
# hence stop the loop earlier
while i < len(nums) and i <= reachable:
reachable = max(reachable, i + nums[i])
i += 1
return i == len(nums)
| [
"bochengl@andrew.cmu.edu"
] | bochengl@andrew.cmu.edu |
27c7d0f1df93464f4b0283670d93b1c42e27ffaa | 7119da65c22d702e410cefeb63c337bc6d46d15e | /template06_template_howto_use/csvt02/urls.py | 810e4f6756fd4b678484f056115e9f041a82232b | [
"Apache-2.0"
] | permissive | ysh329/django-test | a9a6e75b5c7822717b0c2dfca24505cd08199669 | 8b2c75b5a02257d464de2b6cd6cdb8f12dcda630 | refs/heads/master | 2021-01-19T11:17:56.089309 | 2015-09-26T03:15:30 | 2015-09-26T03:15:30 | 42,867,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | """csvt02 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^$', 'blog.views.home', name='home'),
#url(r'^admin/', include(admin.site.urls)),
url(r'^index/$', 'blog.views.index'),
url(r'^index1/$', 'blog.views.index1'),
url(r'^index2/$', 'blog.views.index2'),
]
| [
"ysh329@sina.com"
] | ysh329@sina.com |
ab02bade9da8357492dd294ee109487abed33b14 | 0461c0707135ca789db004c4ff621ed0cce3d1ac | /egSentence.py | 04fbb09fa6e2e9badfac3294d371abdf881006be | [
"MIT"
] | permissive | aklcqq/py | a44acd3445c7ddc6bb9b85cf4e3ee785fa5fbcd8 | 39c4abf8ee02d7021d47bd608be0498c6753f466 | refs/heads/master | 2020-05-16T17:17:26.485191 | 2019-04-24T13:33:01 | 2019-04-24T13:33:01 | 183,190,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | # This file is for example sentence
#!python3
#import lib
from selenium import webdriver
import urllib.request, urllib.parse, urllib.error
import re
import ssl
from bs4 import BeautifulSoup
import time
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
# use firefox as sebrowser
driver = webdriver.Firefox()
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# enter url
# url = input('Enter url: ')
url = 'https://ko.dict.naver.com/#/search?query='
# open data file
# fn = input('Enter file name: ')
# fh = open(fn)
fh = open('so.txt')
# open save file
#sfn = input('Enter file name: ')
fout = open('test0.txt' ,'a', encoding='utf-8') # write in file for note
for vocab in fh:
vocab = vocab.rstrip() # split the line
trueurl = url + vocab # send query
driver.get(trueurl)
time.sleep(1)
html = driver.page_source
# time.sleep(5)
soup = BeautifulSoup(html, 'html.parser')
# contents = soup.find_all("div", class_='row')
try:
contents = soup.find_all("p", class_='text') # add def of vocab
uni = contents[0].get_text()
fout.write(uni + '\n')
except:
fout.write(vocab+'NOT FOUND\n')
| [
"aklqq@163.com"
] | aklqq@163.com |
be591fc55d67ad2eff63ff3c3608b7c583ad18fa | b4196a2ce68a725f7e8c69e554b12faec0becbea | /id3rapgenius.py2 | 7c9504fdced3cf953e5b7ad5517e7ba74b7316fb | [] | no_license | cvzi/genius-downloader | 9a92a8b48ad756730ef564da62d336483340059d | 6ba81bf5a2a2337e0a8649932cfc95b90760dac9 | refs/heads/master | 2023-07-24T17:37:47.751946 | 2023-07-10T09:10:56 | 2023-07-10T09:10:56 | 32,405,791 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,796 | py2 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Python 2.7
# https://github.com/cvzi/genius-downloader
# Download lyrics from rap.genius.com and saves the lyrics in a mp3 or m4a file
import sys
import urllib
import urllib2
import re
import threading
import htmlentitydefs
import json
from mutagen import *
from mutagen.id3 import USLT
import mutagen.mp4
local = {
'baseurl': "http://rap.genius.com", # without trailing slash
'basesearchurl': "http://genius.com", # same here
'baseapiurl': "https://genius.com/api", # same here
'usage': """Downloads lyrics from rap.genius.com and saves the lyrics in a mp3 or m4a file
You can select the correct lyrics from the first 20 search results.
Usage: python id3rapgenius.py filename artist songname
This was inteded as a Mp3Tag extension.
To add it to the Mp3Tag context menu, do the following steps in Mp3Tag:
* Open Tools -> Options -> Tools
* Click on the "New" icon
* Enter the name that shall appear in the context menu
* For path choose your python.exe
* For parameter use: C:\pathtofile\id3rapgenius.py "%_path%" "$replace(%artist%,","")" "$replace(%title%,","")"
* Accept the "for all selected files" option"""
}
# http://effbot.org/zone/re-sub.htm#unescape-html
##
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
try:
return re.sub(r"&#?\w+;", fixup, text)
except BaseException:
return text
# Show progess with dots . . .
class doingSth(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.i = 0
self.exitFlag = 0
def run(self):
while 0 == self.exitFlag:
threading._sleep(0.3)
print "\r", (". " if self.i == 0 else (".. " if self.i == 1 else ("..." if self.i == 2 else " "))),
self.i = (self.i + 1) % 4
print "\r",
def exit(self):
self.exitFlag = 1
threading._sleep(0.4)
# Download from url with progress dots
def getUrl(url, getEncoding=False):
try:
thread1 = doingSth()
thread1.start()
fs = None
try:
req = urllib2.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
fs = urllib2.urlopen(req)
data = fs.read()
except KeyboardInterrupt as ki:
thread1.exit()
raise ki # allow CTRL-C to interrupt
finally:
if fs is not None:
fs.close()
thread1.exit()
#data = unicode(data,'UTF8')
#data = data.encode("utf-8")
if getEncoding:
try:
enc = fs.headers.get("Content-Type").split("charset=")[1]
except BaseException:
enc = "utf-8"
return data, enc
return data
except Exception as e:
thread1.exit()
raise e
# Set Lyrics of mp3 or m4a file
def setLyrics(filepath, lyrics):
# find correct encoding
for enc in ('utf8', 'iso-8859-1', 'iso-8859-15',
'cp1252', 'cp1251', 'latin1'):
try:
lyrics = lyrics.decode(enc)
break
except BaseException:
pass
# try to write to file
audiofile = File(filepath)
if isinstance(audiofile, mutagen.mp4.MP4):
audiofile["\xa9lyr"] = lyrics
elif isinstance(audiofile, mutagen.mp3.MP3):
audiofile[u"USLT:desc:'eng'"] = USLT(
encoding=3, lang=u'eng', desc=u'desc', text=lyrics)
else:
print "###unkown file type: ", type(audiofile)
return False
try:
audiofile.save()
except mutagen.MutagenError as e:
print "Could not save file:"
print e
return False
return True
if __name__ == "__main__":
if len(sys.argv) != 4:
print "Error: Wrong argument number"
print "\n" + local['usage']
quit(1)
filename = sys.argv[1]
artist = sys.argv[2].decode(
encoding="windows-1252").encode('utf-8').strip()
song = sys.argv[3].decode(encoding="windows-1252").encode('utf-8').strip()
print "%r\n%r\n%r" % (sys.argv[1], sys.argv[2], sys.argv[3])
foundsong = False
url = local['baseurl'] + '/' + \
artist.replace(" ", "-") + '-' + song.replace(" ", "-") + "-lyrics"
try:
print "Trying exact name: " + artist.replace(" ", "-") + '-' + song.replace(" ", "-")
except BaseException:
print "Trying exact name: %r - %r" % (artist.replace(" ", "-"), song.replace(" ", "-"))
try:
html = getUrl(url)
except urllib2.HTTPError:
html = "<h1>Looks like you came up short!<br>(Page not found)</h1>"
except KeyboardInterrupt:
sys.exit() # Exit program on Ctrl-C
if not "<h1>Looks like you came up short!<br>(Page not found)</h1>" in html:
# Page exists:
foundsong = True
print "Found Lyrics!"
else:
# Remove a leading "The", featuring artists or brackets in general
if artist[0:4] == "The " or artist[0:
4] == "The " or "(" in artist or "feat" in artist or "Feat" in artist or "ft." in artist or "Ft." in artist:
if artist[0:4] == "The " or artist[0:4] == "The ":
tartist = artist[4:]
else:
tartist = artist
tartist = tartist.split("(")[0].split("feat")[0].split(
"Feat")[0].split("ft.")[0].split("Ft.")[0].strip()
try:
print filename.encode(encoding="ibm437", errors="ignore"), tartist.encode(encoding="ibm437", errors="ignore"), song.encode(encoding="ibm437", errors="ignore")
except UnicodeDecodeError:
try:
print filename.encode(encoding="ascii", errors="ignore"), tartist.encode(encoding="ascii", errors="ignore"), song.encode(encoding="ascii", errors="ignore")
except BaseException:
pass
url = local['baseurl'] + '/' + tartist.replace(" ", "-").replace(
"&", "and") + '-' + song.replace(" ", "-").replace("&", "and") + "-lyrics"
try:
print "Trying exact name: " + tartist.replace(" ", "-").replace("&", "and").encode(encoding="ibm437", errors="ignore") + '-' + song.replace(" ", "-").replace("&", "and").encode(encoding="ibm437", errors="ignore")
except UnicodeDecodeError:
try:
print "Trying exact name: " + tartist.replace(" ", "-").replace("&", "and").encode(encoding="ascii", errors="ignore") + '-' + song.replace(" ", "-").replace("&", "and").encode(encoding="ascii", errors="ignore")
except BaseException:
print "Trying exact name"
try:
html = getUrl(url)
except urllib2.HTTPError:
html = "<h1>Looks like you came up short!<br>(Page not found)</h1>"
except KeyboardInterrupt:
sys.exit() # Exit program on Ctrl-C
if not "<h1>Looks like you came up short!<br>(Page not found)</h1>" in html:
# Page exists:
foundsong = True
print "Found Lyrics!"
if not foundsong:
# Try to search the song:
print "No result for:"
searchartist = artist.split("(")[0].split("feat")[0].split("Feat")[0].split(
"ft.")[0].split("Ft.")[0].replace("The ", "").replace("the ", "").strip()
searchsong = song.split("(")[0].split("feat")[0].split(
"Feat")[0].split("ft.")[0].split("Ft.")[0].strip()
try:
print artist + " - " + song
except BaseException:
print "%r - %r" % (artist, song)
print ""
print "Searching on website with:"
try:
print "Artist: " + searchartist.decode("utf8").encode("ibm437")
print "Song: " + searchsong.decode("utf8").encode("ibm437")
except BaseException:
pass
searchurl = local['basesearchurl'] + "/search?hide_unexplained_songs=false&q=" + \
urllib.quote_plus(searchartist) + "%20" + urllib.quote_plus(searchsong)
try:
text, encoding = getUrl(local["baseapiurl"] +
"/search/song?q=" +
urllib.quote_plus(searchartist) +
"%20" +
urllib.quote_plus(searchsong), getEncoding=True)
except urllib2.HTTPError as e:
print "Could not open: " + searchurl
print e
exit()
except KeyboardInterrupt:
sys.exit() # Exit program on Ctrl-C
obj = json.loads(text, encoding=encoding)
results_length = 0
assert obj["response"]["sections"][0]["type"] == "song", "Wrong type in json result"
results_length = len(obj["response"]["sections"][0]["hits"])
if 0 == results_length:
print "0 songs found!"
else:
print "## -------------------------"
results = []
i = 1
for hit in obj["response"]["sections"][0]["hits"]:
resulturl = hit["result"]["url"].encode(encoding="utf-8")
resultsongname = hit["result"]["title_with_featured"]
resultartist = hit["result"]["primary_artist"]["name"]
resultname = resultartist + " - " + resultsongname
resultname = resultname.replace(
u"\u200b", u"").replace(
u"\xa0", u" ").strip()
results.append([resultname, resulturl])
try:
print "%2d: %s" % (i, resultname.encode(encoding="ibm437", errors="ignore"))
except BaseException:
print "%2d: %r" % (i, resultname)
i += 1
print "---------------------------"
while True:
print "Please choose song (0 to exit)"
try:
print "close to: " + artist.decode("utf8").encode("ibm437") + " - " + song.decode("utf8").encode("ibm437")
except BaseException:
pass
inp = input()
try:
val = int(inp)
if 0 == val:
exit()
assert val > 0
assert val < i
break
except ValueError:
print "Sorry, wrong Number!"
except AssertionError:
print "Wtf?!"
print ""
try:
print "Downloading lyrics #%d: %s" % (val, results[val - 1][0])
except BaseException:
print "Downloading lyrics #%d: %r" % (val, results[val - 1][0])
print ""
#url = local['baseurl']+results[val-1][1]
# in newer versions, the url seems to be complete already
url = results[val - 1][1]
try:
html = getUrl(url)
print(url)
except urllib2.HTTPError as e:
print "Could not open: " + url
print e
exit()
except KeyboardInterrupt:
sys.exit() # Exit program on Ctrl-C
if not "<h1>Looks like you came up short!<br>(Page not found)</h1>" in html:
# Page exists:
foundsong = True
else:
print "URL wrong?! " + url
if foundsong:
lyrics = html.split('<div class="lyrics">')[1].split("</div>")[0]
if "for this song have yet to be released" in lyrics:
print "Lyrics for this song have yet to be released. Please check back once the song has been released."
threading._sleep(10)
exit(0)
# Remove <script>...</script>
while "<script" in lyrics:
before = lyrics.split("<script")[0]
after = lyrics.split("</script>", 1)[1]
lyrics = before + after
# Replace accents, prime and apostrophe with 'closing single quotation
# mark'
primes = ["´", "`", "’", "′", "ʻ", "‘"]
for symbol in primes:
lyrics = lyrics.replace(symbol, "'")
# Remove all html tags and add windows line breaks
lyrics = re.sub(
'<[^<]+?>',
'',
lyrics).strip().replace(
"\r\n",
"\n").replace(
"\n",
"\r\n")
# Replace &XXX; html encoding line by line and remove encoding with
# str()
lines = lyrics.split("\n")
lyrics = []
for line in lines:
esc = unescape(line.decode('utf-8')).encode('utf-8')
print(esc)
lyrics.append(str(esc))
lyrics = "\n".join(lyrics)
print "---------------------------"
try:
print lyrics
except UnicodeEncodeError:
try:
print lyrics.encode(sys.stdout.encoding, errors='ignore')
except BaseException:
print "##Sorry, encoding problems with terminal##"
pass
print "---------------------------"
if setLyrics(filename, lyrics):
try:
print "Saved lyrics to file " + filename
except BaseException:
print "Saved lyrics to file."
threading._sleep(3)
else:
print "Could not save lyrics to file " + filename
threading._sleep(60)
else:
print "No song results for " + song + " by " + artist
threading._sleep(10)
| [
"cuzi@openmail.cc"
] | cuzi@openmail.cc |
0d58a2ce4eb9faa73cccf4e8472c5309e6b4c691 | 5394d78d778dd1163b1548b185e0612e515c2285 | /HW4/problem1.py | f8009f0d78d7a082b8effa64289ff16da3174a0a | [] | no_license | claireedanaher/ML_hw | bf485b2027f612e56e388ae02711cd3dce28a95f | 0a17a46a336eb3648f82ce40e47b7605b8e9255e | refs/heads/master | 2020-04-08T05:38:24.132129 | 2018-11-25T19:49:27 | 2018-11-25T19:49:27 | 159,068,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,831 | py | import numpy as np
import math
#-------------------------------------------------------------------------
'''
Problem 1: softmax regression
In this problem, you will implement the softmax regression for multi-class classification problems.
The main goal of this problem is to extend the logistic regression method to solving multi-class classification problems.
We will get familiar with computing gradients of vectors/matrices.
We will use multi-class cross entropy as the loss function and stochastic gradient descent to train the model parameters.
You could test the correctness of your code by typing `nosetests test1.py` in the terminal.
Notations:
---------- input data ----------------------
p: the number of input features, an integer scalar.
c: the number of classes in the classification task, an integer scalar.
x: the feature vector of a data instance, a float numpy matrix of shape p by 1.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
---------- model parameters ----------------------
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p).
b: the bias values of softmax regression, a float numpy matrix of shape c by 1.
---------- values ----------------------
z: the linear logits, a float numpy matrix of shape c by 1.
a: the softmax activations, a float numpy matrix of shape c by 1.
L: the multi-class cross entropy loss, a float scalar.
---------- partial gradients ----------------------
dL_da: the partial gradients of the loss function L w.r.t. the activations a, a float numpy matrix of shape c by 1.
The i-th element dL_da[i] represents the partial gradient of the loss function L w.r.t. the i-th activation a[i]: d_L / d_a[i].
da_dz: the partial gradient of the activations a w.r.t. the logits z, a float numpy matrix of shape (c by c).
The (i,j)-th element of da_dz represents the partial gradient ( d_a[i] / d_z[j] )
dz_dW: the partial gradient of logits z w.r.t. the weight matrix W, a numpy float matrix of shape (c by p).
The (i,j)-th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]
dz_db: the partial gradient of the logits z w.r.t. the biases b, a float matrix of shape c by 1.
Each element dz_db[i] represents the partial gradient of the i-th logit z[i] w.r.t. the i-th bias b[i]: d_z[i] / d_b[i]
---------- partial gradients of parameters ------------------
dL_dW: the partial gradients of the loss function L w.r.t. the weight matrix W, a numpy float matrix of shape (c by p).
The i,j-th element dL_dW[i,j] represents the partial gradient of the loss function L w.r.t. the i,j-th weight W[i,j]: d_L / d_W[i,j]
dL_db: the partial gradient of the loss function L w.r.t. the biases b, a float numpy matrix of shape c by 1.
The i-th element dL_db[i] represents the partial gradient of the loss function w.r.t. the i-th bias: d_L / d_b[i]
---------- training ----------------------
alpha: the step-size parameter of gradient descent, a float scalar.
n_epoch: the number of passes to go through the training dataset in order to train the model, an integer scalar.
'''
#-----------------------------------------------------------------
# Forward Pass
#-----------------------------------------------------------------
#-----------------------------------------------------------------
def compute_z(x,W,b):
'''
Compute the linear logit values of a data instance. z = W x + b
Input:
x: the feature vector of a data instance, a float numpy matrix of shape p by 1. Here p is the number of features/dimensions.
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
Output:
z: the linear logits, a float numpy vector of shape c by 1.
Hint: you could solve this problem using 1 line of code.
'''
#########################################
## INSERT YOUR CODE HERE
z = W.dot(x) + b
#########################################
return z
#-----------------------------------------------------------------
def compute_a(z):
'''
Compute the softmax activations.
Input:
z: the logit values of softmax regression, a float numpy vector of shape c by 1. Here c is the number of classes
Output:
a: the softmax activations, a float numpy vector of shape c by 1.
'''
b=z.max()
z_rev=np.subtract(z,b)
a=np.exp(z_rev) / float(sum(np.exp(z_rev)))
#########################################
return a
#-----------------------------------------------------------------
def compute_L(a,y):
'''
Compute multi-class cross entropy, which is the loss function of softmax regression.
Input:
a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
Output:
L: the loss value of softmax regression, a float scalar.
'''
#########################################
## INSERT YOUR CODE HERE
if a[y]==0:
L=1e6
else:
L=-np.log(a[y])
L=float(L)
#########################################
return L
#-----------------------------------------------------------------
def forward(x,y,W,b):
'''
Forward pass: given an instance in the training data, compute the logits z, activations a and multi-class cross entropy L on the instance.
Input:
x: the feature vector of a training instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
y: the label of a training instance, an integer scalar value. The values can be 0 or 1.
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
Output:
z: the logit values of softmax regression, a float numpy vector of shape c by 1. Here c is the number of classes
a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.
L: the loss value of softmax regression, a float scalar.
'''
#########################################
## INSERT YOUR CODE HERE
z=compute_z(x,W,b)
a=compute_a(z)
L=compute_L(a,y)
#########################################
return z, a, L
#-----------------------------------------------------------------
# Compute Local Gradients
#-----------------------------------------------------------------
#-----------------------------------------------------------------
def compute_dL_da(a, y):
'''
Compute local gradient of the multi-class cross-entropy loss function w.r.t. the activations.
Input:
a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
Output:
dL_da: the local gradients of the loss function w.r.t. the activations, a float numpy vector of shape c by 1.
The i-th element dL_da[i] represents the partial gradient of the loss function w.r.t. the i-th activation a[i]: d_L / d_a[i].
'''
#########################################
## INSERT YOUR CODE HERE
n=len(a)
dL_da=np.zeros((n,1))
for i in range(0,n):
if y==i:
if a[i] == 0:
dL_da[i]=-1e8
else:
dL_da[i]=(-1.0/(a[i]))
dL_da=np.matrix(dL_da)
#########################################
return dL_da
#-----------------------------------------------------------------
def compute_da_dz(a):
'''
Compute local gradient of the softmax activations a w.r.t. the logits z.
Input:
a: the activation values of softmax function, a numpy float vector of shape c by 1. Here c is the number of classes.
Output:
da_dz: the local gradient of the activations a w.r.t. the logits z, a float numpy matrix of shape (c by c).
The (i,j)-th element of da_dz represents the partial gradient ( d_a[i] / d_z[j] )
Hint: you could solve this problem using 4 or 5 lines of code.
(3 points)
if i=j
ai (1 - ai)
if i≠j
- ai aj
'''
#########################################
## INSERT YOUR CODE HERE
at=np.multiply(a.T,-1)
da_dz=a.dot(at)
for i in range(0,len(a)):
p=a[i]
p1=1-a[i]
da_dz[i,i]=np.multiply(p,p1)
#########################################
return da_dz
#-----------------------------------------------------------------
def compute_dz_dW(x,c):
'''
Compute local gradient of the logits function z w.r.t. the weights W.
Input:
x: the feature vector of a data instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
c: the number of classes, an integer.
Output:
dz_dW: the partial gradient of logits z w.r.t. the weight matrix, a numpy float matrix of shape (c by p).
The (i,j)-th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]
Hint: the partial gradients only depend on the input x and the number of classes
'''
#########################################
## INSERT YOUR CODE HERE
i=np.identity(len(x))
dz_dW_1=x.T.dot(i)
dz_dW=dz_dW_1
for i in range(1,c):
dz_dW=np.concatenate((dz_dW, dz_dW_1), axis=0)
#########################################
return dz_dW
#-----------------------------------------------------------------
def compute_dz_db(c):
'''
Compute local gradient of the logits function z w.r.t. the biases b.
Input:
c: the number of classes, an integer.
Output:
dz_db: the partial gradient of the logits z w.r.t. the biases b, a float vector of shape c by 1.
Each element dz_db[i] represents the partial gradient of the i-th logit z[i] w.r.t. the i-th bias b[i]: d_z[i] / d_b[i]
Hint: you could solve this problem using 1 line of code.
'''
#########################################
## INSERT YOUR CODE HERE
dz_db=np.full((c,1),1)
dz_db=np.matrix(dz_db)
#########################################
return dz_db
#-----------------------------------------------------------------
# Back Propagation
#-----------------------------------------------------------------
#-----------------------------------------------------------------
def backward(x,y,a):
'''
Back Propagation: given an instance in the training data, compute the local gradients of the logits z, activations a, weights W and biases b on the instance.
Input:
x: the feature vector of a training instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.
Output:
dL_da: the local gradients of the loss function w.r.t. the activations, a float numpy vector of shape c by 1.
The i-th element dL_da[i] represents the partial gradient of the loss function L w.r.t. the i-th activation a[i]: d_L / d_a[i].
da_dz: the local gradient of the activation w.r.t. the logits z, a float numpy matrix of shape (c by c).
The (i,j)-th element of da_dz represents the partial gradient ( d_a[i] / d_z[j] )
dz_dW: the partial gradient of logits z w.r.t. the weight matrix W, a numpy float matrix of shape (c by p).
The i,j -th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]
dz_db: the partial gradient of the logits z w.r.t. the biases b, a float vector of shape c by 1.
Each element dz_db[i] represents the partial gradient of the i-th logit z[i] w.r.t. the i-th bias: d_z[i] / d_b[i]
'''
#########################################
## INSERT YOUR CODE HERE
c=len(a)
dL_da=compute_dL_da(a, y)
da_dz=compute_da_dz(a)
dz_dW=compute_dz_dW(x,c)
dz_db=compute_dz_db(c)
#########################################
return dL_da, da_dz, dz_dW, dz_db
#-----------------------------------------------------------------
def compute_dL_dz(dL_da,da_dz):
'''
Given the local gradients, compute the gradient of the loss function L w.r.t. the logits z using chain rule.
Input:
dL_da: the local gradients of the loss function w.r.t. the activations, a float numpy vector of shape c by 1.
The i-th element dL_da[i] represents the partial gradient of the loss function L w.r.t. the i-th activation a[i]: d_L / d_a[i].
da_dz: the local gradient of the activation w.r.t. the logits z, a float numpy matrix of shape (c by c).
The (i,j)-th element of da_dz represents the partial gradient ( d_a[i] / d_z[j] )
Output:
dL_dz: the gradient of the loss function L w.r.t. the logits z, a numpy float vector of shape c by 1.
The i-th element dL_dz[i] represents the partial gradient of the loss function L w.r.t. the i-th logit z[i]: d_L / d_z[i].
'''
#########################################
## INSERT YOUR CODE HERE
dL_dz=da_dz.dot(dL_da)
#########################################
return dL_dz
#-----------------------------------------------------------------
def compute_dL_dW(dL_dz,dz_dW):
'''
Given the local gradients, compute the gradient of the loss function L w.r.t. the weights W using chain rule.
Input:
dL_dz: the gradient of the loss function L w.r.t. the logits z, a numpy float vector of shape c by 1.
The i-th element dL_dz[i] represents the partial gradient of the loss function L w.r.t. the i-th logit z[i]: d_L / d_z[i].
dz_dW: the partial gradient of logits z w.r.t. the weight matrix W, a numpy float matrix of shape (c by p).
The i,j -th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]
Output:
dL_dW: the global gradient of the loss function w.r.t. the weight matrix, a numpy float matrix of shape (c by p).
Here c is the number of classes.
The i,j-th element dL_dW[i,j] represents the partial gradient of the loss function L w.r.t. the i,j-th weight W[i,j]: d_L / d_W[i,j]
Hint: you could solve this problem using 2 lines of code
'''
#########################################
## INSERT YOUR CODE HERE
#dL_dW=dL_dz.T.dot(dz_dW)
dL_dW=np.multiply(dL_dz,dz_dW)
#########################################
return dL_dW
#-----------------------------------------------------------------
def compute_dL_db(dL_dz,dz_db):
'''
Given the local gradients, compute the gradient of the loss function L w.r.t. the biases b using chain rule.
Input:
dL_dz: the gradient of the loss function L w.r.t. the logits z, a numpy float vector of shape c by 1.
The i-th element dL_dz[i] represents the partial gradient of the loss function L w.r.t. the i-th logit z[i]: d_L / d_z[i].
dz_db: the local gradient of the logits z w.r.t. the biases b, a float numpy vector of shape c by 1.
The i-th element dz_db[i] represents the partial gradient ( d_z[i] / d_b[i] )
Output:
dL_db: the global gradient of the loss function L w.r.t. the biases b, a float numpy vector of shape c by 1.
The i-th element dL_db[i] represents the partial gradient of the loss function w.r.t. the i-th bias: d_L / d_b[i]
Hint: you could solve this problem using 1 line of code in the block.
'''
#########################################
## INSERT YOUR CODE HERE
dL_db=np.multiply(dL_dz,dz_db)
#########################################
return dL_db
#-----------------------------------------------------------------
# gradient descent
#-----------------------------------------------------------------
#--------------------------
def update_W(W, dL_dW, alpha=0.001):
'''
Update the weights W using gradient descent.
Input:
W: the current weight matrix, a float numpy matrix of shape (c by p). Here c is the number of classes.
alpha: the step-size parameter of gradient descent, a float scalar.
dL_dW: the global gradient of the loss function w.r.t. the weight matrix, a numpy float matrix of shape (c by p).
The i,j-th element dL_dW[i,j] represents the partial gradient of the loss function L w.r.t. the i,j-th weight W[i,j]: d_L / d_W[i,j]
Output:
W: the updated weight matrix, a numpy float matrix of shape (c by p).
Hint: you could solve this problem using 1 line of code
'''
#########################################
## INSERT YOUR CODE HERE
W=W-alpha*dL_dW
#########################################
return W
#--------------------------
def update_b(b, dL_db, alpha=0.001):
'''
Update the biases b using gradient descent.
Input:
b: the current bias values, a float numpy vector of shape c by 1.
dL_db: the global gradient of the loss function L w.r.t. the biases b, a float numpy vector of shape c by 1.
The i-th element dL_db[i] represents the partial gradient of the loss function w.r.t. the i-th bias: d_L / d_b[i]
alpha: the step-size parameter of gradient descent, a float scalar.
Output:
b: the updated of bias vector, a float numpy vector of shape c by 1.
Hint: you could solve this problem using 1 lines of code
'''
#########################################
## INSERT YOUR CODE HERE
b=b-(dL_db*alpha)
#########################################
return b
#--------------------------
# train
def train(X, Y, alpha=0.01, n_epoch=100):
'''
Given a training dataset, train the softmax regression model by iteratively updating the weights W and biases b using the gradients computed over each data instance.
Input:
X: the feature matrix of training instances, a float numpy matrix of shape (n by p). Here n is the number of data instance in the training set, p is the number of features/dimensions.
Y: the labels of training instance, a numpy integer numpy array of length n. The values can be 0 or 1.
alpha: the step-size parameter of gradient ascent, a float scalar.
n_epoch: the number of passes to go through the training set, an integer scalar.
Output:
W: the weight matrix trained on the training set, a numpy float matrix of shape (c by p).
b: the bias, a float numpy vector of shape c by 1.
'''
# number of features
p = X.shape[1]
# number of classes
c = max(Y) + 1
# randomly initialize W and b
W = np.asmatrix(np.random.rand(c,p))
b= np.asmatrix(np.random.rand(c,1))
for _ in range(n_epoch):
# go through each training instance
for x,y in zip(X,Y):
x = x.T # convert to column vector
#########################################
## INSERT YOUR CODE HERE
# Forward pass: compute the logits, softmax and cross_entropy
z, a, L= forward(x,y,W,b)
# Back Propagation: compute local gradients of cross_entropy, softmax and logits
dL_da, da_dz, dz_dW, dz_db=backward(x,y,a)
# compute the global gradients using chain rule
dL_dz=compute_dL_dz(dL_da,da_dz)
dL_dW=compute_dL_dW(dL_dz,dz_dW)
dL_db=compute_dL_db(dL_dz,dz_db)
# update the paramters using gradient descent
W=update_W(W, dL_dW, alpha)
b=update_b(b, dL_db, alpha)
#########################################
return W, b
#--------------------------
def predict(Xtest, W, b):
'''
Predict the labels of the instances in a test dataset using softmax regression.
Input:
Xtest: the feature matrix of testing instances, a float numpy matrix of shape (n_test by p). Here n_test is the number of data instance in the test set, p is the number of features/dimensions.
W: the weight vector of the logistic model, a float numpy matrix of shape (c by p).
b: the bias values of the softmax regression model, a float vector of shape c by 1.
Output:
Y: the predicted labels of test data, an integer numpy array of length ntest Each element can be 0, 1, ..., or (c-1)
P: the predicted probabilities of test data to be in different classes, a float numpy matrix of shape (ntest,c). Each (i,j) element is between 0 and 1, indicating the probability of the i-th instance having the j-th class label.
(2 points)
'''
n = Xtest.shape[0]
c = W.shape[0]
Y = np.zeros(n) # initialize as all zeros
P = np.asmatrix(np.zeros((n,c)))
for i, x in enumerate(Xtest):
x = x.T # convert to column vector
#########################################
## INSERT YOUR CODE HERE
z=compute_z(x,W,b)
a=compute_a(z)
c=np.argmax(a)
Y[i]=c
for j in range(0,len(z)):
P[i,j]=a[j]
#########################################
return Y, P
#-----------------------------------------------------------------
# gradient checking
#-----------------------------------------------------------------
#-----------------------------------------------------------------
def check_da_dz(z, delta=1e-7):
'''
Compute local gradient of the softmax function using gradient checking.
Input:
z: the logit values of softmax regression, a float numpy vector of shape c by 1. Here c is the number of classes
delta: a small number for gradient check, a float scalar.
Output:
da_dz: the approximated local gradient of the activations w.r.t. the logits, a float numpy matrix of shape (c by c).
The (i,j)-th element represents the partial gradient ( d a[i] / d z[j] )
'''
c = z.shape[0] # number of classes
da_dz = np.asmatrix(np.zeros((c,c)))
for i in range(c):
for j in range(c):
d = np.asmatrix(np.zeros((c,1)))
d[j] = delta
da_dz[i,j] = (compute_a(z+d)[i,0] - compute_a(z)[i,0]) / delta
return da_dz
#-----------------------------------------------------------------
def check_dL_da(a, y, delta=1e-7):
'''
Compute local gradient of the multi-class cross-entropy function w.r.t. the activations using gradient checking.
Input:
a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
delta: a small number for gradient check, a float scalar.
Output:
dL_da: the approximated local gradients of the loss function w.r.t. the activations, a float numpy vector of shape c by 1.
'''
c = a.shape[0] # number of classes
dL_da = np.asmatrix(np.zeros((c,1))) # initialize the vector as all zeros
for i in range(c):
d = np.asmatrix(np.zeros((c,1)))
d[i] = delta
dL_da[i] = ( compute_L(a+d,y)
- compute_L(a,y)) / delta
return dL_da
#--------------------------
def check_dz_dW(x, W, b, delta=1e-7):
'''
compute the local gradient of the logit function using gradient check.
Input:
x: the feature vector of a data instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
delta: a small number for gradient check, a float scalar.
Output:
dz_dW: the approximated local gradient of the logits w.r.t. the weight matrix computed by gradient checking, a numpy float matrix of shape (c by p).
The i,j -th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]
'''
c,p = W.shape # number of classes and features
dz_dW = np.asmatrix(np.zeros((c,p)))
for i in range(c):
for j in range(p):
d = np.asmatrix(np.zeros((c,p)))
d[i,j] = delta
dz_dW[i,j] = (compute_z(x,W+d, b)[i,0] - compute_z(x, W, b))[i,0] / delta
return dz_dW
#--------------------------
def check_dz_db(x, W, b, delta=1e-7):
'''
compute the local gradient of the logit function using gradient check.
Input:
x: the feature vector of a data instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
delta: a small number for gradient check, a float scalar.
Output:
dz_db: the approximated local gradient of the logits w.r.t. the biases using gradient check, a float vector of shape c by 1.
Each element dz_db[i] represents the partial gradient of the i-th logit z[i] w.r.t. the i-th bias: d_z[i] / d_b[i]
'''
c,p = W.shape # number of classes and features
dz_db = np.asmatrix(np.zeros((c,1)))
for i in range(c):
d = np.asmatrix(np.zeros((c,1)))
d[i] = delta
dz_db[i] = (compute_z(x,W, b+d)[i,0] - compute_z(x, W, b)[i,0]) / delta
return dz_db
#-----------------------------------------------------------------
def check_dL_dW(x,y,W,b,delta=1e-7):
'''
Compute the gradient of the loss function w.r.t. the weights W using gradient checking.
Input:
x: the feature vector of a training instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
delta: a small number for gradient check, a float scalar.
Output:
dL_dW: the approximated gradients of the loss function w.r.t. the weight matrix, a numpy float matrix of shape (c by p).
'''
c, p = W.shape
dL_dW = np.asmatrix(np.zeros((c,p)))
for i in range(c):
for j in range(p):
d = np.asmatrix(np.zeros((c,p)))
d[i,j] = delta
dL_dW[i,j] = ( forward(x,y,W+d,b)[-1] - forward(x,y,W,b)[-1] ) / delta
return dL_dW
#-----------------------------------------------------------------
def check_dL_db(x,y,W,b,delta=1e-7):
'''
Compute the gradient of the loss function w.r.t. the bias b using gradient checking.
Input:
x: the feature vector of a training instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
delta: a small number for gradient check, a float scalar.
Output:
dL_db: the approxmiated gradients of the loss function w.r.t. the biases, a float vector of shape c by 1.
'''
c, p = W.shape
dL_db = np.asmatrix(np.zeros((c,1)))
for i in range(c):
d = np.asmatrix(np.zeros((c,1)))
d[i] = delta
dL_db[i] = ( forward(x,y,W,b+d)[-1] - forward(x,y,W,b)[-1] ) / delta
return dL_db
| [
"claireedanaher@gmail.com"
] | claireedanaher@gmail.com |
5b66e5488adb223bed0d7e7749eeb60e9097dcc8 | bfa60e4febca9d4bd07f82482586b553ab0b6102 | /mybbs/bbs/migrations/0002_auto_20170721_1146.py | 243a3f0a6e8e0bba6e220f59c4a09b631bd4b39f | [] | no_license | txowner/website | a9d0f427fad36f221b1b5d80d4b1bfd7583c6714 | 9e8d7c6dca0904a272a073bfaabd2da11bb13b2c | refs/heads/master | 2021-01-01T16:09:13.048481 | 2017-08-04T11:06:15 | 2017-08-04T11:06:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-21 03:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bbs', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='head_img',
field=models.ImageField(blank=True, max_length=150, null=True, upload_to='uploads'),
),
]
| [
"46785647@qq.com"
] | 46785647@qq.com |
dd43ef149c835d13ebd09048a1987e06f39bc38f | aac0c961fcea65cbbe61055bb25d8788f9b97e5e | /Practica01.py | a78f4d75f2c4adf12f5a718f83e13d354981b2a1 | [] | no_license | julioolivares90/PracticaOpenCV | 987d0322e78396f3f84d3b790c9b6593afb06082 | 47adfecf1008935a474944f53bf44871ded8f09a | refs/heads/master | 2022-09-30T23:48:08.994330 | 2020-06-06T19:22:00 | 2020-06-06T19:22:00 | 270,080,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | import cv2
from datetime import date
from datetime import datetime
#cv2.IMREAD_UNCHANGED = -1
#0 imagen en blanco y negro
#1 imagen a color
img = cv2.imread('lena.jpg',1)
print(img)
cv2.imshow('image',img)
key = cv2.waitKey(0)
if key == 27:
cv2.destroyAllWindows()
pass
elif key == ord('s'):
name_file = 'lena_copy.png'
cv2.imwrite(name_file,img)
cv2.destroyAllWindows() | [
"julioolivares90@hotmail.com"
] | julioolivares90@hotmail.com |
add36c49f08156fa9f65d5e079441f0e3c7f56f7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03168/s086851558.py | 7143c7be26364089fb75fade50516b0a34c2696e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | import sys
def input(): return sys.stdin.readline().rstrip()
def main():
n=int(input())
P=list(map(float,input().split()))
dp=[[0]*(n+1) for _ in range(n)]#コインi(0-)までで,j枚が表
dp[0][0]=1-P[0]
dp[0][1]=P[0]
for i in range(1,n):
for j in range(i+2):
if j==0:
dp[i][j]=dp[i-1][j]*(1-P[i])
else:
dp[i][j]=dp[i-1][j-1]*P[i]+dp[i-1][j]*(1-P[i])
print(sum(dp[-1][n//2+1:]))
if __name__=='__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
24ed744017c918e52f7deb73afd93041eb195411 | 8b5268b4111ef72fd4b0372969785a4407104c42 | /mapper_v2/tools/workspacePlotter.py | ff46dd8335edc24cfe2b6df31469306f7faa9dc3 | [] | no_license | oliverek12/robot_arm_workspace_mapper | 6b05c268fad21f84d829c6ba6b98bf02663f584d | 3e18249d719722af28a4d5c31bbdcfdaccf4af5a | refs/heads/master | 2020-12-24T15:22:28.327156 | 2016-03-07T05:33:14 | 2016-03-07T05:33:14 | 42,014,865 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,556 | py | #! /usr/bin/python
################################################################
### This is a tool used to plot the output csv file from ###
### the workspace_mapper node. ###
### author: Oliver Ebeling-Koning <odek@vt.edu> ###
### date: 09/07/2015 ###
################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits import mplot3d
from stl import mesh
import csv, time
import numpy as np
import sys, os
import math
#################################
# Subsample to display
subsampleEach = 1
#################################
# Check arguments
if len(sys.argv) != 2:
print "ERROR: wrong number of arguments.\n\tUSAGE: This tool takes in the csv file to use as an argument"
exit(1)
if not os.path.exists(sys.argv[1]):
print "ERROR: the file `%s` does not exist" % sys.argv[1]
exit(1)
# Make lists to hold values
xList = []
yList = []
zList = []
counter = 10
# Read in csv File
with open(sys.argv[1], 'rb') as csvFile:
csvReader = csv.reader(csvFile, delimiter=',')
for row in csvReader:
counter = counter + 1
if counter % subsampleEach == 0:
if not len(row) < 3:
xList.append(float(row[0]))
yList.append(float(row[1]))
zList.append(float(row[2]))
# Get distances
distances = []
for ii in range(0, len(xList)):
# dist = np.linalg.norm(a-b)
distances.append(math.sqrt(math.pow(xList[ii],2)+math.pow(yList[ii],2)+math.pow(zList[ii],2)))
# Scale all distances to 0-100 for colormap (y=y1+((x-x1)(y2-y1))/(x2-x1))
maximumDist = max(distances) # y2 ... x2=100
minimumDist = min(distances) # y1 .. x1=0
newDistances = []
for jj in range(0, len(xList)):
newDistances.append((minimumDist)+((distances[jj]*(maximumDist-minimumDist))/(100)))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xList, yList, zList, s=70, c=newDistances, cmap='Purples', alpha=0.2)
ax.set_xlabel('X Axis (meters)')
ax.set_ylabel('Y Axis (meters)')
ax.set_zlabel('Z Axis (meters)')
ax.set_title('ABB IRB1200 7/0.7 Reachability Plot')
ax.set_xlim3d(-0.4, 1.0)
ax.set_ylim3d(-0.7, 0.7)
ax.set_zlim3d(0.0, 1.4)
# Plot a print volume
xOffset = 0.4
yOffset = 0
zOffset = .4
xDist = .4
yDist = .4
zDist = .75
stepSize = 2
xVector = np.linspace(xOffset-(xDist/2.0), xOffset+(xDist/2.0), num=stepSize)
yVector = np.linspace(yOffset-(yDist/2.0), yOffset+(yDist/2.0), num=stepSize)
zVector = np.linspace(zOffset-(zDist/2.0), zOffset+(zDist/2.0), num=stepSize)
# 1) Side
#X, Y = np.meshgrid(xVector, yVector)
# Z = np.ones_like( X )
# Z = Z*(zOffset-(zDist/2.0))
# ax.plot_wireframe(X,Y,Z, color="red")
# 2) Side
# Z = np.ones_like( X )
# Z = Z*(zOffset+(zDist/2.0))
# ax.plot_wireframe(X,Y,Z, color="red")
# 3) Side
# Y, Z = np.meshgrid(yVector, zVector)
# X = np.ones_like( Y )
# X = X*(xOffset-(xDist/2.0))
# ax.plot_wireframe(X,Y,Z, color="red")
# 4) Side
# Y, Z = np.meshgrid(yVector, zVector)
# X = np.ones_like( Y )
# X = X*(xOffset+(xDist/2.0))
# ax.plot_wireframe(X,Y,Z, color="red")
# 5) Side
# X, Z = np.meshgrid(xVector, zVector)
# Y = np.ones_like( X )
# Y = Y*(yOffset-(yDist/2.0))
# ax.plot_wireframe(X,Y,Z, color="red")
# ) Side
# X, Z = np.meshgrid(xVector, zVector)
# Y = np.ones_like( X )
# Y = Y*(yOffset+(yDist/2.0))
# ax.plot_wireframe(X,Y,Z, color="red")
# Plot robot in 3d plot
robotMesh = mesh.Mesh.from_file("IRB1200_7_07.stl")
ax.add_collection3d(mplot3d.art3d.Poly3DCollection(robotMesh.vectors))
plt.show()
| [
"oliverek12@gmail.com"
] | oliverek12@gmail.com |
27fadea70c2e3189b9fb17f793a06d41f8af201b | 92382f1a7c719903684d06eb440f4fba346ce755 | /data/wrangling.py | cdc86ae129a728c215b09f5c50d53edab59bd083 | [] | no_license | furuta/springboard_capstone_gradient | c8dc2c3f14e2f273cb6eccfb9bd3ca3dcbe3aeb1 | 39f101dfa72eca3f3d884ae899c41f2893c04250 | refs/heads/master | 2020-08-23T15:59:34.943078 | 2019-11-02T15:16:48 | 2019-11-02T15:16:48 | 216,656,636 | 1 | 0 | null | 2019-10-24T02:53:56 | 2019-10-21T20:12:08 | Python | UTF-8 | Python | false | false | 10,693 | py | import pandas as pd
import numpy as np
import dask
import dask.dataframe as dd
import jpholiday
import luigi
import pickle
from datetime import datetime
import time
import requests
import json
import os
import argparse
# Parse input parameters
parser = argparse.ArgumentParser(description='Airbnb Listing Data Wrangling')
parser.add_argument("-o", "--out", dest="output",
help="location of output dataset")
args = parser.parse_args()
OUTPUT_FILE = args.output
# Read the EPOCH value from environment variable
API_KEY = os.getenv("API_KEY", '')
RADIUS = os.getenv("RADIUS", '300')
class ModifyCalendarDataTask(luigi.Task):
calendar_csv_filename = luigi.Parameter()
modified_calendar_filename = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.modified_calendar_filename)
def run(self):
start_time = datetime.now()
print("================================================")
print("==========Start ModifyCalendarDataTask==========")
dtype={'maximum_nights': 'float64', 'minimum_nights': 'float64'}
ddf_calendar = dd.read_csv(self.calendar_csv_filename, dtype=dtype)
use_columns_in_calendar = [
'listing_id',
'date',
'price',
]
ddf_calendar = ddf_calendar.loc[:, use_columns_in_calendar]
ddf_calendar = ddf_calendar.dropna()
print(ddf_calendar.head())
# price
ddf_calendar['price_amount'] = ddf_calendar['price'].map(lambda x: int(float(
str(x).replace(',', '').replace('$', ''))), meta=('x', int)) # need to specify type
# date
ddf_calendar['datetime'] = ddf_calendar['date'].map(lambda x: datetime.strptime(
str(x), '%Y-%m-%d'), meta=('x', object)) # need to specify type
ddf_calendar['month'] = ddf_calendar['datetime'].map(
lambda x: x.month, meta=('x', int)) # need to specify type
ddf_calendar['day'] = ddf_calendar['datetime'].map(
lambda x: x.day, meta=('x', int)) # need to specify type
ddf_calendar['day_of_week'] = ddf_calendar['datetime'].map(
lambda x: x.weekday(), meta=('x', int)) # need to specify type
ddf_calendar['holiday'] = ddf_calendar['datetime'].map(lambda x: 1 if jpholiday.is_holiday(
x.date()) else 0, meta=('x', int)) # need to specify type
ddf_calendar = ddf_calendar.categorize(
columns=['month', 'day_of_week', 'day']) # need to categorize
ddf_calendar = dd.get_dummies(
ddf_calendar, columns=['month', 'day_of_week', 'day'])
del ddf_calendar['date']
del ddf_calendar['price']
del ddf_calendar['datetime']
ddf_calendar = ddf_calendar.compute()
print(ddf_calendar.head())
print(ddf_calendar.shape)
print(ddf_calendar.columns)
with open(self.output().path, "wb") as target:
pickle.dump(ddf_calendar, target)
print("==========End ModifyCalendarDataTask==========")
print("==============================================")
print("Time ", datetime.now() - start_time)
class ModifyListingDataTask(luigi.Task):
listings_csv_filename = luigi.Parameter()
modified_listings_filename = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.modified_listings_filename)
def run(self):
start_time = datetime.now()
print("===============================================")
print("==========Start ModifyListingDataTask==========")
dtype = {'bedrooms': 'float32',
'beds': 'float32',
'review_scores_accuracy': 'float32',
'review_scores_checkin': 'float32',
'review_scores_cleanliness': 'float32',
'review_scores_communication': 'float32',
'review_scores_location': 'float32',
'review_scores_rating': 'float32',
'review_scores_value': 'float32'}
ddf_listing = dd.read_csv(self.listings_csv_filename, dtype=dtype)
use_columns_in_listing = [
'id',
'latitude',
'longitude',
'property_type',
'room_type',
'accommodates',
'bedrooms',
'beds',
'cancellation_policy',
]
ddf_listing = ddf_listing.loc[:, use_columns_in_listing]
print(ddf_listing.head())
# property_type, room_type, cancellation_policy
ddf_listing = ddf_listing.categorize(
columns=['property_type', 'room_type', 'cancellation_policy'])
ddf_listing = dd.get_dummies(
ddf_listing, columns=['property_type', 'room_type', 'cancellation_policy'])
# ddf_listing = ddf_listing.reset_index()
ddf_listing = ddf_listing.rename(columns={'id': 'listing_id'})
ddf_listing = ddf_listing.compute()
print(ddf_listing.head())
print(ddf_listing.shape)
print(ddf_listing.columns)
with open(self.output().path, "wb") as target:
pickle.dump(ddf_listing, target)
print("==========End ModifyListingDataTask==========")
print("=============================================")
print("Time ", datetime.now() - start_time)
class MargeNeighborhoodDataTask(luigi.Task):
neighborhood_data_file = luigi.Parameter()
modified_listings_filename = luigi.Parameter()
modified_listings_with_neighborhood_filename = luigi.Parameter()
google_places_api_url = luigi.Parameter()
language = 'en'
def requires(self):
return ModifyListingDataTask()
def output(self):
return luigi.LocalTarget(self.modified_listings_with_neighborhood_filename)
def run(self):
start_time = datetime.now()
print("===================================================")
print("==========Start MargeNeighborhoodDataTask==========")
# TODO:This should be managed with DB
neighborhood_data_filepath = self.neighborhood_data_file + RADIUS + '.pkl'
if os.path.exists(neighborhood_data_filepath):
df_neighborhood = pd.read_pickle(neighborhood_data_filepath)
else:
df_neighborhood = pd.DataFrame(
[], columns=['latitude', 'longitude', 'types', 'created'])
df_listing = pd.read_pickle(self.modified_listings_filename)
count = 1
for index, row in df_listing.iterrows():
# Because the difference is less than 10m, round off to the four decimal places
latitude_round = round(row.latitude, 4)
longitude_round = round(row.longitude, 4)
# find of neighborhood data
neighborhood = df_neighborhood[(df_neighborhood['latitude'] == latitude_round) & (
df_neighborhood['longitude'] == longitude_round)]
# get only when there is no data
if neighborhood.empty:
print("[{}]!!!!!!!!!!!empty!!!!!!!!!!!".format(count))
# if not exist, get data from api
response = requests.get(self.google_places_api_url +
'key=' + API_KEY +
'&location=' + str(latitude_round) + ',' + str(longitude_round) +
'&radius=' + RADIUS +
'&language=' + self.language)
data = response.json()
types = []
for result in data['results']:
types.append(result['types'][0])
neighborhood = pd.DataFrame(
[latitude_round, longitude_round, types, time.time()], index=df_neighborhood.columns).T
df_neighborhood = df_neighborhood.append(neighborhood)
with open(neighborhood_data_filepath, "wb") as target:
pickle.dump(df_neighborhood, target)
# else:
# print("[{}]-----------exist-----------".format(count))
count += 1
for neighbor_type in neighborhood.at[0, 'types']:
column_name = 'neighborhood_' + neighbor_type
if not column_name in df_listing.columns:
df_listing[column_name] = 0
df_listing.loc[index, column_name] += 1
del df_listing['latitude']
del df_listing['longitude']
ddf_listing = dd.from_pandas(df_listing, npartitions=4)
print(df_listing.head())
print(df_listing.shape)
print(df_listing.columns)
with open(self.output().path, "wb") as target:
pickle.dump(df_listing, target)
print("==========End MargeNeighborhoodDataTask==========")
print("=================================================")
print("Time ", datetime.now() - start_time)
class MargeAndPrepareDataTask(luigi.Task):
modified_calendar_filename = luigi.Parameter()
modified_listings_with_neighborhood_filename = luigi.Parameter()
def requires(self):
return [ModifyCalendarDataTask(), MargeNeighborhoodDataTask()]
def output(self):
return luigi.LocalTarget(OUTPUT_FILE)
def run(self):
start_time = datetime.now()
print("=================================================")
print("==========Start MargeAndPrepareDataTask==========")
with open(self.modified_calendar_filename, 'rb') as f:
ddf_calendar = pickle.load(f)
with open(self.modified_listings_with_neighborhood_filename, 'rb') as f:
ddf_listing = pickle.load(f)
ddf_marged = ddf_calendar.merge(ddf_listing, on='listing_id')
del ddf_marged['listing_id']
ddf_marged = ddf_marged.dropna()
# ddf_marged = ddf_marged.compute()
print(ddf_marged.head())
print(ddf_marged.shape)
print(ddf_marged.columns)
with open(self.output().path, "wb") as target:
pickle.dump(ddf_marged, target)
print("==========End MargeAndPrepareDataTask==========")
print("===============================================")
print("Time ", datetime.now() - start_time)
if __name__ == '__main__':
# luigi.run(['ModifyCalendarDataTask', '--workers', '1', '--local-scheduler'])
# luigi.run(['ModifyListingDataTask', '--workers', '1', '--local-scheduler'])
# luigi.run(['MargeNeighborhoodDataTask','--workers', '1', '--local-scheduler'])
luigi.run(['MargeAndPrepareDataTask', '--workers', '1', '--local-scheduler'])
# luigid --background --pidfile ./tmp/pidfile --logdir ./luigi_log --state-path ./tmp/state
| [
""
] | |
177ed0375292e788a78899691c3d5ee070da09aa | d50ce5f5c58a2c79b0a81a2d93936ed4493b75e1 | /myConfig.py | e969a43227819e297e65d2ffb6c12707418949cc | [] | no_license | Hilfri/windlabor | 2fc13d4c3b552056be9f9a9811f9883e954609bc | 13a8ec745838ebdfbc1125bca61ca4f30178dd2f | refs/heads/master | 2020-03-19T16:37:05.460320 | 2018-07-13T15:07:11 | 2018-07-13T15:07:11 | 136,721,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | import json
def get(param):
sysJson = open("settings.json")
sysData = json.load(sysJson)
try:
return sysData[param]
except:
return
def update(param, new_value):
pfad = "settings.json"
sysJson = open(pfad)
sysData = json.load(sysJson)
sysData[param] = new_value
with open(pfad, 'w') as f:
json.dump(sysData, f)
| [
"leonard.hilfrich@gmail.com"
] | leonard.hilfrich@gmail.com |
7d442a07bfb8f720507da67a316b7bfbddefbabe | e29b450bf924b983023db41a0cdea97cde129880 | /reversible/sinkhorn.py | da994a5c781f3dbf5244c34a45a3d33e8ec14a12 | [] | no_license | afcarl/generative-reversible | b9efedad155d9c08f0f299f0b861ff6ff53607cf | e21b0846c654e0e041562f715bc5ddd90dde0e07 | refs/heads/master | 2020-03-21T03:29:34.655671 | 2018-05-26T18:53:54 | 2018-05-26T18:53:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,783 | py | import torch as th
from reversible.gaussian import get_gauss_samples
from reversible.util import log_sum_exp, ensure_on_same_device, var_to_np
def sinkhorn_to_gauss_dist(outs, mean, std, **kwargs):
gauss_samples = get_gauss_samples(len(outs), mean, std)
return sinkhorn_sample_loss(outs, gauss_samples, **kwargs)
def M(u, v, C, epsilon):
"Modified cost for logarithmic updates"
"$M_{ij} = (-c_{ij} + u_i + v_j) / \epsilon$"
return (-C + u.unsqueeze(1) + v.unsqueeze(0)) / epsilon
def sinkhorn_sample_loss(samples_a, samples_b, epsilon=0.01, stop_threshold=0.1,
max_iters=50, normalize_cost_matrix=False, max_normed_entropy=None,
normalize_by_empirical_std_a=False):
assert normalize_cost_matrix in [False, 'mean', 'max']
diffs = samples_a.unsqueeze(1) - samples_b.unsqueeze(0)
if normalize_by_empirical_std_a:
stds = th.std(samples_a.detach(), dim=0, keepdim=True)
stds = th.clamp(stds, min=1e-5)
diffs = diffs / stds
C = th.sum(diffs * diffs, dim=2)
del diffs
C_nograd = C.detach()
if normalize_cost_matrix == 'mean':
C_nograd = C_nograd / th.mean(C_nograd)
elif normalize_cost_matrix == 'max':
C_nograd = C_nograd / th.max(C_nograd)
if max_normed_entropy is None:
estimated_trans_th = estimate_transport_matrix_sinkhorn(
C_nograd, epsilon=epsilon, stop_threshold=stop_threshold,
max_iters=max_iters)
else:
estimated_trans_th, _ = transport_mat_sinkhorn_below_entropy(
C_nograd, start_eps=epsilon, stop_threshold=stop_threshold,
max_iters_sinkhorn=max_iters, max_iters_for_entropy=10,
max_normed_entropy=max_normed_entropy)
cost = th.sqrt(th.sum(estimated_trans_th * C)) # Sinkhorn cost
return cost
def transport_mat_sinkhorn_below_entropy(
C, start_eps, max_normed_entropy, max_iters_for_entropy,
max_iters_sinkhorn=50, stop_threshold=1e-3):
normed_entropy = max_normed_entropy + 1
iteration = 0
cur_eps = start_eps
while (normed_entropy > max_normed_entropy) and (iteration < max_iters_for_entropy):
transport_mat = estimate_transport_matrix_sinkhorn(
C, epsilon=cur_eps, stop_threshold=stop_threshold, max_iters=max_iters_sinkhorn)
relevant_mat = transport_mat[transport_mat > 0]
normed_entropy = -th.sum(relevant_mat * th.log(relevant_mat)) / np.log(transport_mat.numel() * 1.)
normed_entropy = var_to_np(normed_entropy)
iteration += 1
cur_eps = cur_eps / 2
return transport_mat, cur_eps
def estimate_transport_matrix_sinkhorn(C, epsilon=0.01, stop_threshold=0.1,
max_iters=50):
n1 = C.size()[0]
n2 = C.size()[1]
mu = th.autograd.Variable(1. / n1 * th.FloatTensor(n1).fill_(1),
requires_grad=False)
nu = th.autograd.Variable(1. / n2 * th.FloatTensor(n2).fill_(1),
requires_grad=False)
mu, nu, C = ensure_on_same_device(mu, nu, C)
u, v, err = 0. * mu, 0. * nu, 0.
actual_nits = 0 # to check if algorithm terminates because of threshold or max iterations reached
for i in range(max_iters):
u1 = u # useful to check the update
u = epsilon * (
th.log(mu) - log_sum_exp(M(u, v, C, epsilon), dim=1, keepdim=True).squeeze()) + u
v = epsilon * (
th.log(nu) - log_sum_exp(M(u, v, C, epsilon).t(), dim=1, keepdim=True).squeeze()) + v
err = (u - u1).abs().sum()
actual_nits += 1
if var_to_np(err < stop_threshold).all():
break
estimated_transport_matrix = th.exp(M(u, v, C, epsilon))
return estimated_transport_matrix
| [
"robintibor@gmail.com"
] | robintibor@gmail.com |
89f7995781d60bb6ec3ed228079f873bf72f7ce1 | f47df27f960b3c5abebf16145026d20fc81f062b | /dheeranet/views/home.py | 9d2568894366f760bc5e482240240503dcf65e9a | [] | no_license | dheera/web-dheeranet | 34eec0591872d01afd441ce97a4853c95fde18a8 | 1faceb4d54d91ae1b7ee3f7e449ee3f224600b08 | refs/heads/master | 2021-01-22T06:32:12.403454 | 2017-04-10T20:55:33 | 2017-04-10T20:55:33 | 20,196,792 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | from flask import Blueprint, render_template, abort, request
from jinja2 import TemplateNotFound
from dheeranet import static_bucket
from dheeranet.cache import s3_get_cached
import json, datetime
home = Blueprint('home', __name__,template_folder='../template')
@home.route('/')
def show():
home_items = json.loads(s3_get_cached(static_bucket, '__home__'))
news_items = filter(lambda x:x['type']=='news', home_items)
return render_template('home.html', news_items = news_items)
| [
"dheera@dheera.net"
] | dheera@dheera.net |
4c3811665bbf4bd491fb4e745743c88e967f3dc6 | f855a86f687fce18fd359d0ccc6dc36b7e1b192a | /SimpleApply/trimPy.py | afc9f1bf2bb30dfcf865f6dea554fcbff9dac2ed | [] | no_license | wvkia/Python | b95d65366082dea5da4d2ab92cafa06f5b23df99 | d94be9936cb841b74da158104258ae098d01af4c | refs/heads/master | 2021-09-15T04:50:24.384603 | 2018-05-26T16:39:49 | 2018-05-26T16:39:49 | 123,883,420 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | #去除字符串前后空格
def trim(s):
while s[:1] ==' ':
s=s[1:]
while s[-1:] ==' ':
s=s[:-1]
return s
print(trim(' asdf '))
#判断是否可以迭代
from collections import Iterable
print(isinstance('adsf',Iterable)) #str是否可以迭代
print(isinstance([23,4,5,5],Iterable)) #list是否可以迭代
print(isinstance((34,45),Iterable)) #turple是否可以迭代
print(isinstance(234,Iterable)) #整数是否可以迭代
#查找list中大最大值和最小值,并返回一个tuple
def findMinAndMax(L):
if L == []:
return None, None
max=min=L[0]
for x in L[1:]:
if max < x:
max=x
if min > x:
min=x
return (min,max)
# 测试
if findMinAndMax([]) != (None, None):
print('测试失败!')
elif findMinAndMax([7]) != (7, 7):
print('测试失败!')
elif findMinAndMax([7, 1]) != (1, 7):
print('测试失败!')
elif findMinAndMax([7, 1, 3, 9, 5]) != (1, 9):
print('测试失败!')
else:
print('测试成功!')
| [
"502332082@qq.com"
] | 502332082@qq.com |
311a9775bce343a683f03cf92db0e518fac17914 | 95d050fb7ad215f3a34ffd8b56e92d8493af414d | /MovingAverageStrategy_1.py | d077e1ad04d7bb5313a2090026fca421839007e4 | [
"MIT"
] | permissive | kamzzang/StockAnalysis | fd7ac4dbb959511f08284d976c19302e5c49be6f | 1f78150a1c20ff827a37c2c63bde15d0f9a7b6de | refs/heads/master | 2022-11-19T22:34:17.909218 | 2020-07-26T08:06:40 | 2020-07-26T08:06:40 | 265,568,639 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,539 | py | import datetime, time
import talib as ta
import numpy as np
import pandas as pd
from pandas import DataFrame
import pandas.io.sql as pdsql
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import dates
import matplotlib.font_manager as font_manager
import seaborn as sns
import mysql.connector
# 맑은고딕체
sns.set(style="whitegrid", font="Malgun Gothic", font_scale=1.5)
fp = font_manager.FontProperties(fname="C:\\WINDOWS\\Fonts\\malgun.TTF", size=15)
def comma_volume(x, pos=None):
s = '{:0,d}K'.format(int(x / 1000))
return s
def comma_price(x, pos=None):
s = '{:0,d}'.format(int(x))
return s
def comma_percent(x, pos=None):
s = '{:+.2f}'.format(x)
return s
major_date_formatter = dates.DateFormatter('%Y-%m-%d')
minor_date_formatter = dates.DateFormatter('%m')
price_formatter = ticker.FuncFormatter(comma_price)
volume_formatter = ticker.FuncFormatter(comma_volume)
percent_formatter = ticker.FuncFormatter(comma_percent)
MySQL_POOL_SIZE = 2
데이타베이스_설정값 = {
'host': '127.0.0.1',
'user': 'root',
'password': 'password',
'database': 'database name',
'raise_on_warnings': True,
}
class NumpyMySQLConverter(mysql.connector.conversion.MySQLConverter):
""" A mysql.connector Converter that handles Numpy types """
def _float32_to_mysql(self, value):
return float(value)
def _float64_to_mysql(self, value):
return float(value)
def _int32_to_mysql(self, value):
return int(value)
def _int64_to_mysql(self, value):
return int(value)
def _timestamp_to_mysql(self, value):
return value.to_datetime()
def mysqlconn():
conn = mysql.connector.connect(pool_name="stockpool", pool_size=MySQL_POOL_SIZE, **데이타베이스_설정값)
conn.set_converter_class(NumpyMySQLConverter)
return conn
# 데이타를 기간에 맞게 잘라냄
def 기간(dataframe, 시작기간=None, 종료기간=None):
df = dataframe.copy()
if (시작기간 is None) and (종료기간 is None):
pass
elif (시작기간 is None) and not (종료기간 is None):
df = df[:종료기간]
elif not (시작기간 is None) and (종료기간 is None):
df = df[시작기간:]
elif not (시작기간 is None) and not (종료기간 is None):
df = df[시작기간:종료기간]
return df
# 종목코드의 정보를 읽음
def get_info(code):
query = """
select 시장구분, 종목코드, 종목명, 주식수, 전일종가*주식수 as 시가총액
from 종목코드
where 종목코드 = '%s'
""" % code
conn = mysqlconn()
df = pdsql.read_sql_query(query, con=conn)
conn.close()
for idx, row in df.iterrows():
시장구분, 종목코드, 종목명, 주식수, 시가총액 = row
return (시장구분, 종목코드, 종목명, 주식수, 시가총액)
# 지정한 종목의 가격/거래량 정보를 읽어 가공
def get_price(code, 시작일자=None, 종료일자=None):
if 시작일자 == None and 종료일자 == None:
query = """
SELECT 일자, 시가, 고가, 저가, 종가, 거래량
FROM 일별주가
WHERE 종목코드='%s'
ORDER BY 일자 ASC
""" % (code)
if 시작일자 != None and 종료일자 == None:
query = """
SELECT 일자, 시가, 고가, 저가, 종가, 거래량
FROM 일별주가
WHERE 종목코드='%s' AND 일자 >= '%s'
ORDER BY 일자 ASC
""" % (code, 시작일자)
if 시작일자 == None and 종료일자 != None:
query = """
SELECT 일자, 시가, 고가, 저가, 종가, 거래량
FROM 일별주가
WHERE 종목코드='%s' AND 일자 <= '%s'
ORDER BY 일자 ASC
""" % (code, 종료일자)
if 시작일자 != None and 종료일자 != None:
query = """
SELECT 일자, 시가, 고가, 저가, 종가, 거래량
FROM 일별주가
WHERE 종목코드='%s' AND 일자 BETWEEN '%s' AND '%s'
ORDER BY 일자 ASC
""" % (code, 시작일자, 종료일자)
conn = mysqlconn()
df = pdsql.read_sql_query(query, con=conn)
conn.close()
df.fillna(0, inplace=True)
df.set_index('일자', inplace=True)
# 추가 컬럼이 필요한 경우에 이 곳에 넣으시오
df['MA20'] = df['종가'].rolling(window=20).mean()
# 가중이동평균을 이용하는 경우
# df['MA20'] = ta.WMA(np.array(df['종가'].astype(float)), timeperiod=20)
df['전일MA20'] = df['MA20'].shift(1)
df['MA240'] = df['종가'].rolling(window=240).mean()
df['전일MA240'] = df['MA240'].shift(1)
df.dropna(inplace=True)
return df
# 이동평균을 이용한 백테스트 로봇
class CRobotMA(object):
def __init__(self, 종목코드='122630'):
self.info = get_info(code=종목코드)
self.df = get_price(code=종목코드, 시작일자=None, 종료일자=None)
# 투자 실행
def run(self, 투자시작일=None, 투자종료일=None, 투자금=1000 * 10000):
self.투자금 = 투자금
self.portfolio = [] # [일자, 매수가, 수량]
df = 기간(self.df, 시작기간=투자시작일, 종료기간=투자종료일)
계좌평가결과 = []
거래결과 = []
# for idate, row in df[['시가','종가','MA20','전일MA20','MA240','전일MA240']].iterrows():
# 시가, 종가, MA20, 전일MA20, MA240, 전일MA240 = row
for idate, row in df[['시가', '종가', 'MA20', '전일MA20']].iterrows():
시가, 종가, MA20, 전일MA20 = row
# 매수 매도 부분만 수정하면 다른 알고리즘 적용 가능
# 매수
##############################################################
매수조건 = 시가 > 전일MA20 # and 전일MA20 > 전일MA240
if 매수조건 == True and len(self.portfolio) == 0:
수량 = self.투자금 // 시가
매수가 = 시가
self.투자금 = self.투자금 - int((매수가 * 수량) * (1 + 0.00015))
self.portfolio = [idate, 매수가, 수량]
# 매도
##############################################################
매도조건 = 시가 < 전일MA20
if 매도조건 == True and len(self.portfolio) > 0:
매도가 = 시가
[매수일, 매수가, 수량] = self.portfolio
수익 = (매도가 - 매수가) * 수량
self.투자금 = self.투자금 + int((매도가 * 수량) * (1 - 0.00315))
self.portfolio = []
거래결과.append([idate, 매수가, 매도가, 수량, 수익, self.투자금])
# 매일 계좌 평가하여 기록
##############################################################
if len(self.portfolio) > 0:
[매수일, 매수가, 수량] = self.portfolio
매수금액 = 매수가 * 수량
평가금액 = 종가 * 수량
총자산 = self.투자금 + 평가금액
else:
매수가 = 0
수량 = 0
매수금액 = 0
평가금액 = 0
총자산 = self.투자금
계좌평가결과.append([idate, 종가, self.투자금, 매수가, 수량, 매수금액, 평가금액, 총자산])
# 거래의 최종 결과
if (len(df) > 0):
거래결과.append([df.index[-1], 0, 0, 0, 0, self.투자금])
self.거래결과 = DataFrame(data=거래결과, columns=['일자', '매수가', '매도가', '수량', '수익', '투자금'])
self.거래결과.set_index('일자', inplace=True)
self.계좌평가결과 = DataFrame(data=계좌평가결과, columns=['일자', '현재가', '투자금', '매수가', '수량', '매수금액', '평가금액', '총자산'])
self.계좌평가결과.set_index('일자', inplace=True)
self.계좌평가결과['MA20'] = self.계좌평가결과['현재가'].rolling(window=60).mean()
self.계좌평가결과['총자산MA60'] = self.계좌평가결과['총자산'].rolling(window=60).mean()
return True
else:
return False
def report(self, out=True):
_총손익 = self.거래결과['수익'].sum()
if out == True:
print('총손익(Total Net Profit) %s' % comma_price(x=_총손익))
_이익거래횟수 = len(self.거래결과.query("수익>0"))
_총거래횟수 = len(self.거래결과)
_승률 = _이익거래횟수 / _총거래횟수
if out == True:
print('승률(Percent Profit) %s/%s = %s' % (_이익거래횟수, _총거래횟수, comma_percent(x=_승률)))
_평균이익금액 = self.거래결과.query("수익>0")['수익'].mean()
_평균손실금액 = self.거래결과.query("수익<0")['수익'].mean()
if out == True:
print("평균이익금액(Ratio Avg Win) %s" % comma_price(x=_평균이익금액))
print("평균손실금액(Ratio Avg Loss) %s" % comma_price(x=_평균손실금액))
_최대수익금액 = self.거래결과['수익'].max()
_최대손실금액 = self.거래결과['수익'].min()
if out == True:
print("1회거래 최대수익금액 %s" % comma_price(x=_최대수익금액))
print("1회거래 최대손실금액 %s" % comma_price(x=_최대손실금액))
_days = 60
_MDD = np.max(self.계좌평가결과['총자산'].rolling(window=_days).max() - self.계좌평가결과['총자산'].rolling(window=_days).min())
if out == True:
print('%s일 최대연속손실폭(Maximum DrawDown) %s' % (_days, comma_price(x=_MDD)))
return (_이익거래횟수, _총거래횟수, _총손익)
def graph(self):
df = self.계좌평가결과
dfx = self.거래결과
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(10, 15), sharex=True)
fig.suptitle("%s (%s)" % (self.info[2], self.info[1]), fontsize=15) # (시장구분, 종목코드, 종목명, 주식수, 시가총액)
ax = df[['현재가', 'MA20']].plot(ax=ax1)
ax.xaxis.set_major_formatter(major_date_formatter)
ax.yaxis.set_major_formatter(price_formatter)
ax.set_ylabel('가격', fontproperties=fp)
ax.set_xlabel('', fontproperties=fp)
ax.legend(loc='best')
ax = df[['총자산', '총자산MA60']].plot(ax=ax2)
ax.xaxis.set_major_formatter(major_date_formatter)
ax.yaxis.set_major_formatter(price_formatter)
ax.set_ylabel('계좌평가결과', fontproperties=fp)
ax.set_xlabel('', fontproperties=fp)
ax.legend(loc='best')
ax = dfx[['수익']].plot(ax=ax3, style='-o')
ax.xaxis.set_major_formatter(major_date_formatter)
ax.yaxis.set_major_formatter(price_formatter)
ax.set_ylabel('거래결과', fontproperties=fp)
ax.set_xlabel('', fontproperties=fp)
ax.legend(loc='best')
robot = CRobotMA(종목코드='000020')
robot.run(투자시작일='2000-01-01', 투자종료일='2020-05-01', 투자금=1000 * 10000)
print(robot.report())
robot.graph()
print(robot.계좌평가결과.tail(10))
print(robot.거래결과.tail())
| [
"kamzzang1@naver.com"
] | kamzzang1@naver.com |
77443b0c81a87de4fc9f92620ad1d9f81cf46729 | a696d8aefb1dec34d1e030bbfbf9ac1e6d38167f | /config.py | c571f4a36b0f80273c393e072c128711929fae89 | [] | no_license | ayang2012/Dash_Beer_Stats | 2b282174d2f47790c1f8e573650dcbb33deddf8e | f781a4300ea3506258ffb26d02eebca38684ca23 | refs/heads/master | 2020-04-16T05:08:03.587822 | 2019-01-11T20:50:51 | 2019-01-11T20:50:51 | 165,293,955 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | api_key = "db2208bcd8a86d5b3a817f122e6ef489"
username = "ayang2012" | [
"ayang2012@gmail.com"
] | ayang2012@gmail.com |
63ad15afd9c026d9a3011825c941ab69cff2caf6 | 74d499c8aa661b19323fd0fc5ec7b55815997c5e | /GenerateTestCases/DatabaseGeneration.py | e1f2ec6dcfff6bb58c466786f3c905c04ddd9899 | [
"MIT"
] | permissive | MayAbdeldayem/LIWI | 65747e825998d0215481f4ff8efeac2c221996d4 | b4d615e0951b7c28c9258d0d7a8ff86c73c4ebe2 | refs/heads/master | 2020-05-31T08:14:45.501818 | 2019-06-04T06:02:58 | 2019-06-04T06:02:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,122 | py | import xml.etree.ElementTree as ET
import glob
import cv2
from pathlib import Path
import os, errno
import numpy as np
import shutil
from itertools import combinations
import random
from PIL import Image
import copy
#start at 687,3240
#id starts at 672
def firemaker_preprocessing():
base = 'C:/Users/omars/Documents/Github/LIWI/Omar/firemaker/firemaker/300dpi/'
baseDB = 'C:/Users/omars/Documents/Github/LIWI/Omar/test/'
id = 672
id1 = '01.tif'
id2 = '02.tif'
id3 = '03.tif'
id4 = '04.tif'
file1 = 'p1-copy-normal'
file2 = 'p2-copy-upper'
file3 = 'p3-copy-forged'
file4 = 'p4-self-natural'
for filename in glob.glob(base +'*/*01.tif'):
filename2 = copy.copy(filename)
filename3 = copy.copy(filename)
filename4 = copy.copy(filename)
filename2 = filename2.replace(id1,id2)
filename2 = filename2.replace(file1, file2)
filename3 = filename3.replace(id1,id3)
filename3 = filename3.replace(file1, file3)
filename4 = filename4.replace(id1,id4)
filename4 = filename4.replace(file1, file4)
try:
os.makedirs(baseDB+str(id)+'/')
except OSError as e:
if e.errno != errno.EEXIST:
raise
filename_arr = [filename,filename2,filename3,filename4]
for item in filename_arr:
temp = cv2.imread(item)
temp = temp[687:3240, :]
# temp = temp.convert('RGB')
name = Path(item).name
name = name.replace('.tif', '.jpg')
cv2.imwrite(baseDB+str(id)+'/' + name,temp)
# temp = Image.open(baseDB+str(id)+'/' + name)
#
# temp.save(baseDB+str(id)+'/' + name)
print(filename)
id += 1
# IAM
def test_generator():
# base = 'C:/Users/Samar Gamal/Documents/CCE/Faculty/Senior-2/2st term/GP/writer identification/LIWI/test/'
# imageCount = np.zeros((700,1))
# for filename in glob.glob('C:/Users/Samar Gamal/Documents/CCE/Faculty/Senior-2/2st term/GP/writer identification/LIWI/iAm/*.xml'):
# #temp = cv2.imread(filename)
# tree = ET.parse(filename)
# root = tree.getroot()
# id = root.attrib[ 'writer-id']
# imageCount[int(id)] += 1
#
# filename = filename.replace('xml', 'png')
# name = Path(filename).name
# print(name)
# try:
# os.makedirs(base+id)
# except OSError as e:
# if e.errno != errno.EEXIST:
# raise
# shutil.copyfile(filename,base+id+'/'+name)
#
# # cv2.imwrite(base+id+'/'+name,temp)
baseTraining = 'C:/Users/omars/Documents/Github/LIWI/Omar/Dataset/Training/'
baseValidation = 'C:/Users/omars/Documents/Github/LIWI/Omar/Dataset/Validation/'
baseTesting = 'C:/Users/omars/Documents/Github/LIWI/Omar/Dataset/Testing/'
#
# try:
# os.makedirs('C:/Users/Samar Gamal/Documents/CCE/Faculty/Senior-2/2st term/GP/writer identification/LIWI/TestCasesCompressed/TestCases')
# except OSError as e:
# if e.errno != errno.EEXIST:
# raise
# np.savetxt("foo.csv", imageCount, delimiter=",")
# imageCount = np.genfromtxt('foo.csv', delimiter=',')
classNum = 0
print('generating cases')
for i in range(0,962):
# if imageCount[i] < 3:
# continue
classNum += 1
id = str(i)
print(i)
try:
os.makedirs(baseTraining+'Class'+str(classNum))
except OSError as e:
if e.errno != errno.EEXIST:
raise
while len(id) < 3:
id = '0'+id
count = 0
for filename in glob.glob('C:/Users/omars/Documents/Github/LIWI/Omar/test/'+id+'/*.png'):
# temp = cv2.imread(filename)
temp = Image.open(filename)
temp = temp.convert('RGB')
name = Path(filename).name
name = name.replace('.png', '.jpg')
if count==0:
#Training
temp.save(baseTraining+'Class'+str(classNum)+'/'+name)
# cv2.imwrite(base+'Class'+str(classNum)+'/'+name,temp)
# shutil.copyfile(filename, base+'Class'+str(classNum)+'/'+name)
elif count == 1:
#Validation
temp.save(baseValidation+'testing'+str(classNum)+'_'+str(count-1) + '.jpg')
# cv2.imwrite('C:/Users/Samar Gamal/Documents/CCE/Faculty/Senior-2/2st term/GP/writer identification/LIWI/TestCases/testing'+str(classNum)+'_'+str(count-1) + '.jpg',temp)
# shutil.copyfile(filename, 'C:/Users/Samar Gamal/Documents/CCE/Faculty/Senior-2/2st term/GP/writer identification/LIWI/TestCases/testing'+str(classNum)+'_'+str(count-1)+'.jpg')
else:
temp.save(baseTesting + 'testing' + str(classNum) + '_' + str(count - 1) + '.jpg')
count += 1
for filename in glob.glob('C:/Users/omars/Documents/Github/LIWI/Omar/test/'+id+'/*.jpg'):
# temp = cv2.imread(filename)
temp = Image.open(filename)
name = Path(filename).name
if count==0:
#Training
temp.save(baseTraining+'Class'+str(classNum)+'/'+name)
# cv2.imwrite(base+'Class'+str(classNum)+'/'+name,temp)
# shutil.copyfile(filename, base+'Class'+str(classNum)+'/'+name)
elif count == 1:
#Validation
temp.save(baseValidation+'testing'+str(classNum)+'_'+str(count-1) + '.jpg')
# cv2.imwrite('C:/Users/Samar Gamal/Documents/CCE/Faculty/Senior-2/2st term/GP/writer identification/LIWI/TestCases/testing'+str(classNum)+'_'+str(count-1) + '.jpg',temp)
# shutil.copyfile(filename, 'C:/Users/Samar Gamal/Documents/CCE/Faculty/Senior-2/2st term/GP/writer identification/LIWI/TestCases/testing'+str(classNum)+'_'+str(count-1)+'.jpg')
else:
temp.save(baseTesting + 'testing' + str(classNum) + '_' + str(count - 1) + '.jpg')
count += 1
test_generator()
| [
"omarshaalan31@gmail.com"
] | omarshaalan31@gmail.com |
e570b8176b57d3d1da45335c0576713cf401f565 | 299f9ed8cfb4e24124ea45505561abd746f1b276 | /DECamExposure.py | 698390eb2976aac52323673b6d946a4140b3ce5d | [] | no_license | dwgerdes/tnofind | 18e3fc061d9c42da49e9832bb51bd1c796cfdec1 | 68cd58ffeee978caaf11ca23acfe2adff859a1ee | refs/heads/master | 2021-01-17T08:54:38.408501 | 2016-04-26T01:57:26 | 2016-04-26T01:57:26 | 40,010,273 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,155 | py | #!/usr/bin/env python
import os
import ephem
from DECamField import DECamField
class DECamExposure(object):
#
def __init__(self, expnum=0, date=ephem.date('2013-01-01 00:00:00'), exptime=0, band='r', ra=ephem.degrees(0), dec=ephem.degrees(0), nite=20130101, tag='None', obj='None'):
self.expnum = expnum
self.date = date
self.exptime = exptime
self.band = band
self.ra = ra
self.dec = dec
self.tag = tag
self.obj = obj
self.nite = nite
def contains(self, ra1, dec1):
# returns True if the point (ra1, dec1) lies inside the field
return DECamField(self.ra, self.dec).contains(ra1, dec1)
def ellipse(self):
return DECamField(self.ra, self.dec).ellipse()
def dump(self):
print 'ExpID: \t', self.expnum
print 'date: \t', self.date
print 'Exptime: \t', self.exptime
print 'Band: \t', self.band
print 'RA: \t', self.ra
print 'DEC: \t', self.dec
print 'Tag: \t', self.tag
print 'Tile: \t', self.obj
def local_files(self, rootdir):
# Searches rootdir and its subdirectories for files (not directories) of the form DECam_nnnnnnnn_cc.* where nnnnnnnn is the expnum
a = os.walk(rootdir)
flist = []
for root, dirs, files in a:
for f in files:
if str(self.expnum) in f and 'DECam_' in f:
flist.append(os.path.join(root, f))
return flist
def local_nulls(self, rootdir):
# Searches rootdir and its subdirectories for a directory containing 'null_nnnnnnnn' where nnnnnnn is the expnum,
# and makes a list of the files it contains
a = os.walk(rootdir)
nlist = []
for root, dirs, files in a:
for d in dirs:
if 'null_'+str(self.expnum) in d:
for r, dirs2, files2 in os.walk(os.path.join(root, d)):
for f in files2:
nlist.append(os.path.join(r,f))
return nlist
def main():
pass
if __name__=="__main__":
main()
| [
"gerdes@umich.edu"
] | gerdes@umich.edu |
4e66d7ef70e81215819cad08fe0cf65909585429 | 7ace308f23d4114cd2d28837200f497de3205a94 | /manage.py | 0bd28bee07324cf3c4bee440f9955e7fb241d017 | [] | no_license | jeffersonls-dev/desafio-amcom | 8a26d9f8bc3b52bc2500694acb39c7ba19814a3d | dd9408c3967117524d2d67defd56c4c281d7860b | refs/heads/main | 2023-06-04T09:29:08.504432 | 2021-06-22T14:11:52 | 2021-06-22T14:11:52 | 378,714,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'amcom.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"jefferson.ls563@gmail.com"
] | jefferson.ls563@gmail.com |
a6a2936d4a3bd0bbf1418468b07779177c769e4b | 765a9bcead1bd53ad7b95d93dbf8faf4485afb5a | /python/solutions/codeforces_263A.py | 659465486170123e0304cda010707407967e7899 | [] | no_license | haxdds/codeforces | fc59a3de3d72f1655f01ea9a1ba9414e4582cf62 | c822d3d9f119cefbc8b39fc2efb41f3086f71dc2 | refs/heads/main | 2023-01-31T04:19:26.408330 | 2020-12-13T00:54:39 | 2020-12-13T00:54:39 | 320,733,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | #!/usr/bin/env python3
matrix = []
for x in range(5):
row = [int(x) for x in input().split(' ')]
matrix.append(row)
i_1 = 0
j_1 = 0
i_center = 2
j_center = 2
delta_i = 0
delta_j = 0
for row in matrix:
if 1 in row:
for x in row:
if x != 1:
j_1 += 1
else:
delta_j = j_center - j_1
delta_i = i_center - i_1
else:
i_1 += 1
print(abs(delta_i) + abs(delta_j))
| [
"haxdds@gmail.com"
] | haxdds@gmail.com |
54adf7f713f597318439481b4a00f4ef0fe1b16c | 5857039ecf32a0eac002fca612c964dc528fe729 | /Estrutura de Repetição 2.0/Codes/059 Menu de Opções_1.py | 8d9b2434831cfef5bc2f3621da133713700de05b | [] | no_license | gabrielSampaioDev/Python_code | d8d62bfc90820f4bd8053b060f49ce21fd252b49 | f0b3d35e09e4dde49bf0fe4714afb0ea3dc575a6 | refs/heads/master | 2023-07-14T13:19:47.564018 | 2021-08-23T20:40:37 | 2021-08-23T20:40:37 | 369,521,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,568 | py | #INTRO
print('-=-'*20)
print('|',' '*20,'MENU DE OPÇÕES', ' '*20, '|')
print('-=-'*20)
opção = 0
while opção != 5:
primeiro_valor = int(input('Digite o primeiro valor: '))
segundo_valor = int(input('Digite o segundo valor: '))
print('=='*15)
print(''' [ 1 ] SOMAR
[ 2 ] MULTIPLICAR
[ 3 ] MAIOR
[ 4 ] NOVOS NÚMEROS
[ 5 ] SAIR DO PROGRAMA''')
print('=='*15)
opção = int(input('>>>> Qual é sua opção? '))
if opção == 1:
soma = primeiro_valor + segundo_valor
print('O valor de {} + {} é igual a: {}'.format(primeiro_valor, segundo_valor, soma))
elif opção == 2:
multiplicacao = primeiro_valor * segundo_valor
print('O valor de {} X {} é igual a: {}'.format(primeiro_valor, segundo_valor, multiplicacao))
elif opção == 3:
if primeiro_valor > segundo_valor:
print('O primeiro valor digitado \033[1;34m{}\033[m é maior que o segundo valor digitado \033[1;34m{}\033[m'.format(primeiro_valor, segundo_valor))
elif primeiro_valor < segundo_valor:
print('O segundo valor digitado \033[1;34m{}\033[m é maior que o primeiro valor digitado \033[1;34m{}\033[m'.format(segundo_valor, primeiro_valor))
else:
print('Os valores digitados são iguais.')
elif opção == 4:
print('Informe os números novamente: ')
elif opção == 5:
print('Finalizando...')
else:
print('Opção inválida. Tente novamente')
print('=-='*15)
print('FIM DO PROGRAMA! Volte sempre!') | [
"gabrielsampaio.ssa@gmail.com"
] | gabrielsampaio.ssa@gmail.com |
1cc88ac8efc9ee654b98623c83bb76faa5eb6493 | 4326aed1e764f8fa63099fa59c3886cbbc84c7b0 | /chatfirst/settings.py | b0fb7b2b31d7ff2011e8e64b1672bef680091012 | [] | no_license | theashu/chat-first-django | ba897a0f1228f69335056dd58b343c8041ddd54c | 67800cfcb1f18f7656c714e5f656e44c526fd882 | refs/heads/master | 2020-08-09T04:49:46.302241 | 2019-10-10T13:44:04 | 2019-10-10T13:44:04 | 214,001,864 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,097 | py | """
Django settings for chatfirst project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+h_e*-y$1f@r6$7-j0dbyc383uqh=@#u#f*m)y#+h!+5(+83ri'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chatfirst.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chatfirst.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"animesh00914902017@msi-ggsip.org"
] | animesh00914902017@msi-ggsip.org |
624cb6720d60660e5105faf9b4616df20f9bc3e3 | 48f2256ef0cfe582f39a7eb6ceef117069d29847 | /ChaosTheory/LogisticMapPrediction.py | af53c87af4862d9d481ce960d7e62de94bc2fc93 | [] | no_license | MGSE97/NAVY | 9dc4bffc5f05c9ec10dd29aed2a53868eb5c6e42 | 6137d51e68ba1657c29a16734052aac360ddba3d | refs/heads/master | 2022-07-17T21:33:59.667116 | 2020-05-13T16:42:44 | 2020-05-13T16:42:44 | 244,203,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,457 | py | import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from NeuralNetworks.Net import Net, Layer
from NeuralNetworks.Utils import Sigmoid, Linear, ReLu, Empty
Axes3D = Axes3D # pycharm auto import
def create_graph(name):
"""
Prepare graph window
:param name: Window Title
:return: ax
"""
fig = plt.figure(name)
fig.suptitle(name)
ax = fig.gca()
fig.canvas.draw_idle()
return ax, fig
# Configuration
map = lambda a, x: a*x*(1-x) # logistic map function
n = 1000 # number of values
x = 1e-5 * np.ones(n) # x values
a = np.linspace(1, 4.0, n) # a values
iterations = 100 # iteration count
# NN
lr = 1e-3 # learning rate
net = Net([
Layer(n, n, Sigmoid),
])
# Draw Bifurcation diagram
g, f = create_graph("Bifurcation diagram")
for i in range(iterations):
print("\r{}/{}".format(i, iterations), end="")
r = map(a, x)
g.plot(a, r, 'k', alpha=max(1/iterations, 0.01))
#g.plot(a, r, 'k')
# teach NN
nr = net.forward(x)
net.backwards(r, nr, lr)
# update x
x = r
plt.pause(0.1)
# Draw NN Bifurcation diagram
g2, f2 = create_graph("NN Bifurcation diagram")
x = 1e-5 * np.ones(n)
for i in range(iterations):
print("\r{}/{}".format(i, iterations), end="")
x = net.forward(x)
#g2.plot(a, x, 'k', alpha=1/iterations)
g2.plot(a, x, 'k')
plt.show()
| [
"elektrikar97@gmail.com"
] | elektrikar97@gmail.com |
f416e06cc19555240322fde37cd44dc114ade597 | 97e316355e4b0ee9d64f91ebf3d584316fa14610 | /parse_tools.py | 7facc1c64eb26c16a0a67f2eb4d54eced7147b9b | [] | no_license | beckgom/ae-wavenet | 91a73b6778fdee25b3aba1f4d1d45713f3a48ae5 | f31021060721c92bd9391fbd028a39c081c28e7f | refs/heads/master | 2020-05-16T17:03:53.844374 | 2019-04-18T01:51:57 | 2019-04-18T01:51:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,891 | py | import argparse
top_usage = """
Usage: train.py {new|resume} [options]
train.py new [options]
-- train a new model
train.py resume [options]
-- resume training from .ckpt file
"""
# Training options common to both "new" and "resume" training modes
train = argparse.ArgumentParser(add_help=False)
train.add_argument('--n-batch', '-nb', type=int, metavar='INT',
default=16, help='Batch size')
train.add_argument('--n-sam-per-slice', '-nw', type=int, metavar='INT',
default=100, help='# of consecutive window samples in one slice' )
train.add_argument('--max-steps', '-ms', type=int, metavar='INT', default=1e20,
help='Maximum number of training steps')
train.add_argument('--save-interval', '-si', type=int, default=1000, metavar='INT',
help='Save a checkpoint after this many steps each time')
train.add_argument('--progress-interval', '-pi', type=int, default=10, metavar='INT',
help='Print a progress message at this interval')
train.add_argument('--disable-cuda', '-dc', action='store_true', default=False,
help='If present, do all computation on CPU')
train.add_argument('--learning-rate-steps', '-lrs', type=int, nargs='+',
metavar='INT', default=[0, 4e6, 6e6, 8e6],
help='Learning rate starting steps to apply --learning-rate-rates')
train.add_argument('--learning-rate-rates', '-lrr', type=float, nargs='+',
metavar='FLOAT', default=[4e-4, 2e-4, 1e-4, 5e-5],
help='Each of these learning rates will be applied at the '
'corresponding value for --learning-rate-steps')
train.add_argument('ckpt_template', type=str, metavar='CHECKPOINT_TEMPLATE',
help="Full or relative path, including a filename template, containing "
"a single %%, which will be replaced by the step number.")
# Complete parser for cold-start mode
cold = argparse.ArgumentParser(parents=[train])
cold.add_argument('--arch-file', '-af', type=str, metavar='ARCH_FILE',
help='INI file specifying architectural parameters')
cold.add_argument('--train-file', '-tf', type=str, metavar='TRAIN_FILE',
help='INI file specifying training and other hyperparameters')
# Data generation options
cold.add_argument('--frac-permutation-use', '-fpu', type=float,
metavar='FLOAT', default=0.1,
help='Fraction of each random data permutation to '
'use. Lower fraction causes more frequent reading of data from '
'disk, but more globally random order of data samples presented '
'to the model')
cold.add_argument('--requested-wav-buf-sz', '-rws', type=int,
metavar='INT', default=1e7,
help='Size in bytes of the total memory available '
'to buffer training data. A higher value will minimize re-reading '
'of data and allow more globally random sample order')
# Preprocessing parameters
cold.add_argument('--pre-sample-rate', '-sr', type=int, metavar='INT', default=16000,
help='# samples per second in input wav files')
cold.add_argument('--pre-win-sz', '-wl', type=int, metavar='INT', default=400,
help='size of the MFCC window length in timesteps')
cold.add_argument('--pre-hop-sz', '-hl', type=int, metavar='INT', default=160,
help='size of the hop length for MFCC preprocessing, in timesteps')
cold.add_argument('--pre-n-mels', '-nm', type=int, metavar='INT', default=80,
help='number of mel frequency values to calculate')
cold.add_argument('--pre-n-mfcc', '-nf', type=int, metavar='INT', default=13,
help='number of mfcc values to calculate')
cold.prog += ' new'
# Encoder architectural parameters
cold.add_argument('--enc-n-out', '-no', type=int, metavar='INT', default=768,
help='number of output channels')
# Bottleneck architectural parameters
cold.add_argument('--bn-type', '-bt', type=str, metavar='STR', default='ae',
help='bottleneck type (one of "ae", "vae", or "vqvae")')
cold.add_argument('--bn-n-out', '-bo', type=int, metavar='INT', default=64,
help='number of output channels for the bottleneck')
# Decoder architectural parameters
cold.add_argument('--dec-filter-sz', '-dfs', type=int, metavar='INT', default=2,
help='decoder number of dilation kernel elements')
# !!! This is set equal to --bn-n-out
#cold.add_argument('--dec-n-lc-in', '-dli', type=int, metavar='INT', default=-1,
# help='decoder number of local conditioning input channels')
cold.add_argument('--dec-n-lc-out', '-dlo', type=int, metavar='INT', default=-1,
help='decoder number of local conditioning output channels')
cold.add_argument('--dec-n-res', '-dnr', type=int, metavar='INT', default=-1,
help='decoder number of residual channels')
cold.add_argument('--dec-n-dil', '-dnd', type=int, metavar='INT', default=-1,
help='decoder number of dilation channels')
cold.add_argument('--dec-n-skp', '-dns', type=int, metavar='INT', default=-1,
help='decoder number of skip channels')
cold.add_argument('--dec-n-post', '-dnp', type=int, metavar='INT', default=-1,
help='decoder number of post-processing channels')
cold.add_argument('--dec-n-quant', '-dnq', type=int, metavar='INT',
help='decoder number of input channels')
cold.add_argument('--dec-n-blocks', '-dnb', type=int, metavar='INT',
help='decoder number of dilation blocks')
cold.add_argument('--dec-n-block-layers', '-dnl', type=int, metavar='INT',
help='decoder number of power-of-two dilated '
'convolutions in each layer')
cold.add_argument('--dec-n-global-embed', '-dng', type=int, metavar='INT',
help='decoder number of global embedding channels')
# positional arguments
cold.add_argument('sam_file', type=str, metavar='SAMPLES_FILE',
help='File containing lines:\n'
+ '<id1>\t/path/to/sample1.flac\n'
+ '<id2>\t/path/to/sample2.flac\n')
# Complete parser for resuming from Checkpoint
resume = argparse.ArgumentParser(parents=[train], add_help=True)
resume.add_argument('ckpt_file', type=str, metavar='CHECKPOINT_FILE',
help="""Checkpoint file generated from a previous run. Restores model
architecture, model parameters, and data generator state.""")
resume.prog += ' resume'
def two_stage_parse(cold_parser, args=None):
'''wrapper for parse_args for overriding options from file'''
default_opts = cold_parser.parse_args(args)
cli_parser = argparse.ArgumentParser(parents=[cold_parser], add_help=False)
dests = {co.dest:argparse.SUPPRESS for co in cli_parser._actions}
cli_parser.set_defaults(**dests)
cli_parser._defaults = {} # hack to overcome bug in set_defaults
cli_opts = cli_parser.parse_args(args)
# Each option follows the rule:
# Use JSON file setting if present. Otherwise, use command-line argument,
# Otherwise, use command-line default
import json
try:
with open(cli_opts.arch_file) as fp:
arch_opts = json.load(fp)
except AttributeError:
arch_opts = {}
except FileNotFoundError:
print("Error: Couldn't open arch parameters file {}".format(cli_opts.arch_file))
exit(1)
try:
with open(cli_opts.train_file) as fp:
train_opts = json.load(fp)
except AttributeError:
train_opts = {}
except FileNotFoundError:
print("Error: Couldn't open train parameters file {}".format(cli_opts.train_file))
exit(1)
# Override with command-line settings, then defaults
merged_opts = vars(default_opts)
merged_opts.update(arch_opts)
merged_opts.update(train_opts)
merged_opts.update(vars(cli_opts))
# Convert back to a Namespace object
return argparse.Namespace(**merged_opts)
# return cli_opts
def get_prefixed_items(d, pfx):
'''select all items whose keys start with pfx, and strip that prefix'''
return { k[len(pfx):]:v for k,v in d.items() if k.startswith(pfx) }
| [
"hrbigelow@gmail.com"
] | hrbigelow@gmail.com |
7f0c6256117de70af1d86da7aec023d388d519a8 | 675cfed77845e7f717177a2e17e46a675a2eab43 | /src/riwayatstudi/migrations/0003_auto_20210829_0928.py | 7985681100e3ce268bf8d0ba6ba2c34e8b8cab75 | [] | no_license | guhkun13/websasambo | ca6e68ca2a57593157fd98af9f97e1641f6d13c1 | 3eb11a075d109b6d5f378db03f15690d02b78185 | refs/heads/master | 2023-07-16T12:50:35.744005 | 2021-09-08T12:33:50 | 2021-09-08T12:33:50 | 400,786,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | # Generated by Django 3.2.6 on 2021-08-29 09:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('riwayatstudi', '0002_auto_20210829_0730'),
]
operations = [
migrations.RemoveField(
model_name='riwayatstudi',
name='kabupaten',
),
migrations.RemoveField(
model_name='riwayatstudi',
name='provinsi',
),
migrations.AddField(
model_name='riwayatstudi',
name='id_kabupaten',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='riwayatstudi',
name='id_provinsi',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AddField(
model_name='riwayatstudi',
name='nama_kabupaten',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AddField(
model_name='riwayatstudi',
name='nama_provinsi',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| [
"guhkun13@gmail.com"
] | guhkun13@gmail.com |
c2ad9a49e4e23ffa98d960a2818b4175b1dece93 | b5029b5710f72010690c5e57fe5c045dcff2701c | /books_authors_app/migrations/0001_initial.py | 9f233b82732ee72e3c171a7a7c24c182c0d25b6d | [] | no_license | Jallnutt1/first_django_project | 2d059ed815227cf5c72af67e4e4074e95edf1508 | 200b98623292e806a407badf1cb9311e25bd561d | refs/heads/main | 2023-04-04T00:50:19.183891 | 2021-04-13T18:56:03 | 2021-04-13T18:56:03 | 357,659,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | # Generated by Django 2.2 on 2021-04-09 00:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Books',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('desc', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
ae0a91cab4eaa6cc3efe51378737808add69cb8b | d73bdca9cc2f612087eeafe021d3832e1e7cd90b | /binary-tree-right-side-view.py | deeacfbeb0e71caacc31a5ae03d463c6deaf5961 | [] | no_license | pathankhansalman/LeetCode | 5e51f9d45cf4769367a5996ae8f4759d9d14f5c4 | d9f37b437613ce7c2c8126555052eb8a899ec6a1 | refs/heads/master | 2022-06-25T00:33:03.083545 | 2022-06-12T15:25:07 | 2022-06-12T15:25:07 | 13,919,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 6 21:02:11 2022
@author: patha
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def rightSideView(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def _level_helper_(arg, level):
if arg is None:
return []
return [(arg.val, level)] + _level_helper_(arg.left, level + 1) +\
_level_helper_(arg.right, level + 1)
level_list = _level_helper_(root, 0)
level_dict = {}
for item in level_list:
if item[1] in level_dict.keys():
level_dict[item[1]].append(item[0])
else:
level_dict[item[1]] = [item[0]]
return [v[-1] for v in level_dict.values()] | [
"pathankhan.salman@gmail.com"
] | pathankhan.salman@gmail.com |
d0f1d4e3732bab857d6547863edbe8e25f3cd794 | 16caaf86763ae52abaa888d137472949718b8daf | /testpy.py | 2c317b9994b7e414b06313ddf9dda00ac1467401 | [] | no_license | ummood/Github-Test | 76fd8ab7f2fb2b1a4d837c443b47662505d14dd0 | 73e3cfca70d346f0869b307e18fce4fe7d3d4a30 | refs/heads/main | 2023-02-07T22:46:37.419610 | 2020-12-30T17:17:14 | 2020-12-30T17:17:14 | 325,598,623 | 0 | 0 | null | 2020-12-30T17:17:15 | 2020-12-30T16:34:39 | Python | UTF-8 | Python | false | false | 31 | py | print("Into the child branch")
| [
"noreply@github.com"
] | ummood.noreply@github.com |
4c9afb7f1a1c3156c3c0e419a9d664957618cf06 | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/pygments/lexers/theorem.py | ec55a32ea39569297ed9647deaf213b073c5d5f6 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 19,507 | py | """
pygments.lexers.theorem
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for theorem-proving languages.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['CoqLexer', 'IsabelleLexer', 'LeanLexer']
class CoqLexer(RegexLexer):
"""
For the `Coq <http://coq.inria.fr/>`_ theorem prover.
.. versionadded:: 1.5
"""
name = 'Coq'
aliases = ['coq']
filenames = ['*.v']
mimetypes = ['text/x-coq']
flags = re.UNICODE
keywords1 = (
# Vernacular commands
'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable',
'Variables', 'Parameter', 'Parameters', 'Axiom', 'Hypothesis',
'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope',
'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Let', 'Ltac',
'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit',
'Arguments', 'Set', 'Unset', 'Contextual', 'Strict', 'Prenex',
'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure',
'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Corollary',
'Proposition', 'Fact', 'Remark', 'Example', 'Proof', 'Goal', 'Save',
'Qed', 'Defined', 'Hint', 'Resolve', 'Rewrite', 'View', 'Search',
'Abort', 'Admitted',
'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside',
'outside', 'Check', 'Global', 'Instance', 'Class', 'Existing',
'Universe', 'Polymorphic', 'Monomorphic', 'Context'
)
keywords2 = (
# Gallina
'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct',
'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else',
'for', 'of', 'nosimpl', 'with', 'as',
)
keywords3 = (
# Sorts
'Type', 'Prop', 'SProp',
)
keywords4 = (
# Tactics
'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro',
'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct',
'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite',
'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold',
'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog',
'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial',
'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto',
'split', 'left', 'right', 'autorewrite', 'tauto', 'setoid_rewrite',
'intuition', 'eauto', 'eapply', 'econstructor', 'etransitivity',
'constructor', 'erewrite', 'red', 'cbv', 'lazy', 'vm_compute',
'native_compute', 'subst',
)
keywords5 = (
# Terminators
'by', 'done', 'exact', 'reflexivity', 'tauto', 'romega', 'omega',
'assumption', 'solve', 'contradiction', 'discriminate',
'congruence',
)
keywords6 = (
# Control
'do', 'last', 'first', 'try', 'idtac', 'repeat',
)
# 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
# 'downto', 'else', 'end', 'exception', 'external', 'false',
# 'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
# 'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
# 'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
# 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
# 'type', 'val', 'virtual', 'when', 'while', 'with'
keyopts = (
'!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', r'-\.',
'->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', '<-',
'<->', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~', '=>',
r'/\\', r'\\/', r'\{\|', r'\|\}',
# 'Π', 'Σ', # Not defined in the standard library
'λ', '¬', '∧', '∨', '∀', '∃', '→', '↔', '≠', '≤', '≥',
)
operators = r'[!$%&*+\./:<=>?@^|~-]'
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
tokens = {
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\(\*', Comment, 'comment'),
(words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type),
(words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keywords5, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(words(keywords6, prefix=r'\b', suffix=r'\b'), Keyword.Reserved),
# (r'\b([A-Z][\w\']*)(\.)', Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name),
(r'\S', Name.Builtin.Pseudo),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^"]+', String.Double),
(r'""', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name.Class, '#pop'),
(r'[a-z][a-z0-9_\']*', Name, '#pop'),
default('#pop')
],
}
def analyse_text(text):
if 'Qed' in text and 'Proof' in text:
return 1
class IsabelleLexer(RegexLexer):
"""
For the `Isabelle <http://isabelle.in.tum.de/>`_ proof assistant.
.. versionadded:: 2.0
"""
name = 'Isabelle'
aliases = ['isabelle']
filenames = ['*.thy']
mimetypes = ['text/x-isabelle']
keyword_minor = (
'and', 'assumes', 'attach', 'avoids', 'binder', 'checking',
'class_instance', 'class_relation', 'code_module', 'congs',
'constant', 'constrains', 'datatypes', 'defines', 'file', 'fixes',
'for', 'functions', 'hints', 'identifier', 'if', 'imports', 'in',
'includes', 'infix', 'infixl', 'infixr', 'is', 'keywords', 'lazy',
'module_name', 'monos', 'morphisms', 'no_discs_sels', 'notes',
'obtains', 'open', 'output', 'overloaded', 'parametric', 'permissive',
'pervasive', 'rep_compat', 'shows', 'structure', 'type_class',
'type_constructor', 'unchecked', 'unsafe', 'where',
)
keyword_diag = (
'ML_command', 'ML_val', 'class_deps', 'code_deps', 'code_thms',
'display_drafts', 'find_consts', 'find_theorems', 'find_unused_assms',
'full_prf', 'help', 'locale_deps', 'nitpick', 'pr', 'prf',
'print_abbrevs', 'print_antiquotations', 'print_attributes',
'print_binds', 'print_bnfs', 'print_bundles',
'print_case_translations', 'print_cases', 'print_claset',
'print_classes', 'print_codeproc', 'print_codesetup',
'print_coercions', 'print_commands', 'print_context',
'print_defn_rules', 'print_dependencies', 'print_facts',
'print_induct_rules', 'print_inductives', 'print_interps',
'print_locale', 'print_locales', 'print_methods', 'print_options',
'print_orders', 'print_quot_maps', 'print_quotconsts',
'print_quotients', 'print_quotientsQ3', 'print_quotmapsQ3',
'print_rules', 'print_simpset', 'print_state', 'print_statement',
'print_syntax', 'print_theorems', 'print_theory', 'print_trans_rules',
'prop', 'pwd', 'quickcheck', 'refute', 'sledgehammer', 'smt_status',
'solve_direct', 'spark_status', 'term', 'thm', 'thm_deps', 'thy_deps',
'try', 'try0', 'typ', 'unused_thms', 'value', 'values', 'welcome',
'print_ML_antiquotations', 'print_term_bindings', 'values_prolog',
)
keyword_thy = ('theory', 'begin', 'end')
keyword_section = ('header', 'chapter')
keyword_subsection = (
'section', 'subsection', 'subsubsection', 'sect', 'subsect',
'subsubsect',
)
keyword_theory_decl = (
'ML', 'ML_file', 'abbreviation', 'adhoc_overloading', 'arities',
'atom_decl', 'attribute_setup', 'axiomatization', 'bundle',
'case_of_simps', 'class', 'classes', 'classrel', 'codatatype',
'code_abort', 'code_class', 'code_const', 'code_datatype',
'code_identifier', 'code_include', 'code_instance', 'code_modulename',
'code_monad', 'code_printing', 'code_reflect', 'code_reserved',
'code_type', 'coinductive', 'coinductive_set', 'consts', 'context',
'datatype', 'datatype_new', 'datatype_new_compat', 'declaration',
'declare', 'default_sort', 'defer_recdef', 'definition', 'defs',
'domain', 'domain_isomorphism', 'domaindef', 'equivariance',
'export_code', 'extract', 'extract_type', 'fixrec', 'fun',
'fun_cases', 'hide_class', 'hide_const', 'hide_fact', 'hide_type',
'import_const_map', 'import_file', 'import_tptp', 'import_type_map',
'inductive', 'inductive_set', 'instantiation', 'judgment', 'lemmas',
'lifting_forget', 'lifting_update', 'local_setup', 'locale',
'method_setup', 'nitpick_params', 'no_adhoc_overloading',
'no_notation', 'no_syntax', 'no_translations', 'no_type_notation',
'nominal_datatype', 'nonterminal', 'notation', 'notepad', 'oracle',
'overloading', 'parse_ast_translation', 'parse_translation',
'partial_function', 'primcorec', 'primrec', 'primrec_new',
'print_ast_translation', 'print_translation', 'quickcheck_generator',
'quickcheck_params', 'realizability', 'realizers', 'recdef', 'record',
'refute_params', 'setup', 'setup_lifting', 'simproc_setup',
'simps_of_case', 'sledgehammer_params', 'spark_end', 'spark_open',
'spark_open_siv', 'spark_open_vcg', 'spark_proof_functions',
'spark_types', 'statespace', 'syntax', 'syntax_declaration', 'text',
'text_raw', 'theorems', 'translations', 'type_notation',
'type_synonym', 'typed_print_translation', 'typedecl', 'hoarestate',
'install_C_file', 'install_C_types', 'wpc_setup', 'c_defs', 'c_types',
'memsafe', 'SML_export', 'SML_file', 'SML_import', 'approximate',
'bnf_axiomatization', 'cartouche', 'datatype_compat',
'free_constructors', 'functor', 'nominal_function',
'nominal_termination', 'permanent_interpretation',
'binds', 'defining', 'smt2_status', 'term_cartouche',
'boogie_file', 'text_cartouche',
)
keyword_theory_script = ('inductive_cases', 'inductive_simps')
keyword_theory_goal = (
'ax_specification', 'bnf', 'code_pred', 'corollary', 'cpodef',
'crunch', 'crunch_ignore',
'enriched_type', 'function', 'instance', 'interpretation', 'lemma',
'lift_definition', 'nominal_inductive', 'nominal_inductive2',
'nominal_primrec', 'pcpodef', 'primcorecursive',
'quotient_definition', 'quotient_type', 'recdef_tc', 'rep_datatype',
'schematic_corollary', 'schematic_lemma', 'schematic_theorem',
'spark_vc', 'specification', 'subclass', 'sublocale', 'termination',
'theorem', 'typedef', 'wrap_free_constructors',
)
keyword_qed = ('by', 'done', 'qed')
keyword_abandon_proof = ('sorry', 'oops')
keyword_proof_goal = ('have', 'hence', 'interpret')
keyword_proof_block = ('next', 'proof')
keyword_proof_chain = (
'finally', 'from', 'then', 'ultimately', 'with',
)
keyword_proof_decl = (
'ML_prf', 'also', 'include', 'including', 'let', 'moreover', 'note',
'txt', 'txt_raw', 'unfolding', 'using', 'write',
)
keyword_proof_asm = ('assume', 'case', 'def', 'fix', 'presume')
keyword_proof_asm_goal = ('guess', 'obtain', 'show', 'thus')
keyword_proof_script = (
'apply', 'apply_end', 'apply_trace', 'back', 'defer', 'prefer',
)
operators = (
'::', ':', '(', ')', '[', ']', '_', '=', ',', '|',
'+', '-', '!', '?',
)
proof_operators = ('{', '}', '.', '..')
tokens = {
'root': [
(r'\s+', Text),
(r'\(\*', Comment, 'comment'),
(r'\{\*', Comment, 'text'),
(words(operators), Operator),
(words(proof_operators), Operator.Word),
(words(keyword_minor, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(words(keyword_diag, prefix=r'\b', suffix=r'\b'), Keyword.Type),
(words(keyword_thy, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_theory_decl, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_section, prefix=r'\b', suffix=r'\b'), Generic.Heading),
(words(keyword_subsection, prefix=r'\b', suffix=r'\b'), Generic.Subheading),
(words(keyword_theory_goal, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words(keyword_theory_script, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words(keyword_abandon_proof, prefix=r'\b', suffix=r'\b'), Generic.Error),
(words(keyword_qed, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_goal, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_block, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_decl, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_chain, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_asm, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_asm_goal, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_script, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(r'\\<\w*>', Text.Symbol),
(r"[^\W\d][.\w']*", Name),
(r"\?[^\W\d][.\w']*", Name),
(r"'[^\W\d][.\w']*", Name.Type),
(r'\d[\d_]*', Name), # display numbers as name
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'"', String, 'string'),
(r'`', String.Other, 'fact'),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'text': [
(r'[^*}]+', Comment),
(r'\*\}', Comment, '#pop'),
(r'\*', Comment),
(r'\}', Comment),
],
'string': [
(r'[^"\\]+', String),
(r'\\<\w*>', String.Symbol),
(r'\\"', String),
(r'\\', String),
(r'"', String, '#pop'),
],
'fact': [
(r'[^`\\]+', String.Other),
(r'\\<\w*>', String.Symbol),
(r'\\`', String.Other),
(r'\\', String.Other),
(r'`', String.Other, '#pop'),
],
}
class LeanLexer(RegexLexer):
"""
For the `Lean <https://github.com/leanprover/lean>`_
theorem prover.
.. versionadded:: 2.0
"""
name = 'Lean'
aliases = ['lean']
filenames = ['*.lean']
mimetypes = ['text/x-lean']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'\s+', Text),
(r'/--', String.Doc, 'docstring'),
(r'/-', Comment, 'comment'),
(r'--.*?$', Comment.Single),
(words((
'import', 'renaming', 'hiding',
'namespace',
'local',
'private', 'protected', 'section',
'include', 'omit', 'section',
'protected', 'export',
'open',
'attribute',
), prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words((
'lemma', 'theorem', 'def', 'definition', 'example',
'axiom', 'axioms', 'constant', 'constants',
'universe', 'universes',
'inductive', 'coinductive', 'structure', 'extends',
'class', 'instance',
'abbreviation',
'noncomputable theory',
'noncomputable', 'mutual', 'meta',
'attribute',
'parameter', 'parameters',
'variable', 'variables',
'reserve', 'precedence',
'postfix', 'prefix', 'notation', 'infix', 'infixl', 'infixr',
'begin', 'by', 'end',
'set_option',
'run_cmd',
), prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
(r'@\[[^\]]*\]', Keyword.Declaration),
(words((
'forall', 'fun', 'Pi', 'from', 'have', 'show', 'assume', 'suffices',
'let', 'if', 'else', 'then', 'in', 'with', 'calc', 'match',
'do'
), prefix=r'\b', suffix=r'\b'), Keyword),
(words(('sorry', 'admit'), prefix=r'\b', suffix=r'\b'), Generic.Error),
(words(('Sort', 'Prop', 'Type'), prefix=r'\b', suffix=r'\b'), Keyword.Type),
(words((
'#eval', '#check', '#reduce', '#exit',
'#print', '#help',
), suffix=r'\b'), Keyword),
(words((
'(', ')', ':', '{', '}', '[', ']', '⟨', '⟩', '‹', '›', '⦃', '⦄', ':=', ',',
)), Operator),
(r'[A-Za-z_\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2100-\u214f]'
r'[.A-Za-z_\'\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2070-\u2079'
r'\u207f-\u2089\u2090-\u209c\u2100-\u214f0-9]*', Name),
(r'0x[A-Za-z0-9]+', Number.Integer),
(r'0b[01]+', Number.Integer),
(r'\d+', Number.Integer),
(r'"', String.Double, 'string'),
(r"'(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4})|.)'", String.Char),
(r'[~?][a-z][\w\']*:', Name.Variable),
(r'\S', Name.Builtin.Pseudo),
],
'comment': [
(r'[^/-]', Comment.Multiline),
(r'/-', Comment.Multiline, '#push'),
(r'-/', Comment.Multiline, '#pop'),
(r'[/-]', Comment.Multiline)
],
'docstring': [
(r'[^/-]', String.Doc),
(r'-/', String.Doc, '#pop'),
(r'[/-]', String.Doc)
],
'string': [
(r'[^\\"]+', String.Double),
(r"(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4}))", String.Escape),
('"', String.Double, '#pop'),
],
}
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
d0fa78d37064bf03251c9c6edf18a378195106d5 | 6196bd8f9226042b4c8e171313e273d1875c3ee4 | /up_down_chain/up_down_chain/app/Subseribe/migrations/0002_bidsusersetting_mid.py | d2f23630662918aa99b11057208f625ad32ce97c | [] | no_license | wang18722/Up_down_chain | 87230f057dadea95ab8b2760ca756afe00675b26 | 3c18d5d5727db1562438edea66ef15f54b378e33 | refs/heads/master | 2022-12-27T09:04:49.976753 | 2019-07-01T01:19:12 | 2019-07-01T01:19:12 | 225,579,284 | 0 | 0 | null | 2022-12-08T01:23:21 | 2019-12-03T09:22:22 | Python | UTF-8 | Python | false | false | 694 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-06-24 05:29
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Subseribe', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='bidsusersetting',
name='mid',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户'),
),
]
| [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.