seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22777502445 | import numpy as np
from pbcore.io import BamReader, IndexedBamReader, IndexedFastaReader, AlignmentSet
import pickle
import sys
import itertools
import pandas as pd
import gzip
import pysam
from array import array
from tqdm import tqdm, trange
# Kinetics tools can be found https://github.com/PacificBiosciences/kineticsTools
# and can be installed in to a conda environment using setup.py
#from kineticsTools.KineticWorker import KineticWorkerProcess
#from kineticsTools.ResultWriter import KineticsWriter
from kineticsTools.ipdModel import IpdModel, GbmContextModel
#from kineticsTools import ReferenceUtils, loader
from kineticsTools.sharedArray import SharedArray
bamIn = pysam.AlignmentFile(sys.argv[1], check_sq=False)
bamOut = pysam.AlignmentFile(sys.argv[2], "wb", header=bamIn.header)
def snippetFunc(refId, pre, post):
"""
Return a function that returns a snippet of the reference sequence around a given position
"""
refArray = refId.getNumpyWrapper()
def f(tplPos, tplStrand):
"""Closure for returning a reference snippet. The reference is padded with N's for bases falling outside the extents of the reference"""
# skip over the padding
tplPos += pad
# Forward strand
if tplStrand == 0:
slc = refArray[(tplPos - pre):(tplPos + 1 + post)]
slc = np.right_shift(slc, 4)
return "".join(c for c in seqMapNp[slc])
# Reverse strand
else:
slc = refArray[(tplPos + pre):(tplPos - post - 1):-1]
slc = np.right_shift(slc, 4)
return "".join(c for c in seqMapComplementNp[slc])
return f
def _makeFramepoints():
B = 2
t = 6
T = 2**t
framepoints = []
next = 0
for i in range(256//T):
grain = B**i
nextOnes = next + grain * np.arange(0, T)
next = nextOnes[-1] + grain
framepoints = framepoints + list(nextOnes)
return np.array(framepoints, dtype=np.uint16)
def _makeLookup(framepoints):
# (frame -> code) involves some kind of rounding
# basic round-to-nearest
frameToCode = np.empty(shape=max(framepoints)+1, dtype=int)
for i, (fl, fu) in enumerate(zip(framepoints, framepoints[1:])):
if (fu > fl + 1):
m = (fl + fu)//2
for f in range(fl, m):
frameToCode[f] = i
frameToCode[f] = i + 1
else:
frameToCode[fl] = i
# Extra entry for last:
frameToCode[fu] = i + 1
return frameToCode, fu
_framepoints = _makeFramepoints()
_frameToCode, _maxFramepoint = _makeLookup(_framepoints)
def framesToCode(nframes):
nframes = np.minimum(_maxFramepoint, nframes)
return _frameToCode[nframes]
def codeToFrames(code):
return _framepoints[code]
byte = np.dtype('byte')
uint8 = np.dtype('uint8')
# Map for ascii encoded bases to integers 0-3 -- will be used to define a 24-bit lookup code
# for fetching predicted IPDs from the kinetic LUT.
# We start everything at 0, so anything will map to 'A' unless it appears
# in this table
lutCodeMap = np.zeros(256, dtype=uint8)
maps = {'a': 0, 'A': 0, 'c': 1, 'C': 1, 'g': 2, 'G': 2, 't': 3, 'T': 3}
for k in maps:
lutCodeMap[ord(k)] = maps[k]
lutReverseMap = {0: 'A', 1: 'C', 2: 'G', 3: 'T'}
seqCodeMap = np.ones(256, dtype=uint8) * 4
for k in maps:
seqCodeMap[ord(k)] = maps[k]
seqMap = {0: 'A', 1: 'C', 2: 'G', 3: 'T', 4: 'N'}
seqMapNp = np.array(['A', 'C', 'G', 'T', 'N'])
seqMapComplement = {0: 'T', 1: 'G', 2: 'C', 3: 'A', 4: 'N'}
seqMapComplementNp = np.array(['T', 'G', 'C', 'A', 'N'])
# Base letters for modification calling
# 'H' : m6A, 'I' : m5C, 'J' : m4C, 'K' : m5C/TET
baseToCode = {'N': 0, 'A': 0, 'C': 1, 'G': 2,
'T': 3, 'H': 4, 'I': 5, 'J': 6, 'K': 7}
baseToCanonicalCode = {'N': 0, 'A': 0, 'C': 1,
'G': 2, 'T': 3, 'H': 0, 'I': 1, 'J': 1, 'K': 1}
codeToBase = dict([(y, x) for (x, y) in baseToCode.items()])
pre = 10
post = 4
pad = 30
base4 = 4 ** np.array(range(pre + post + 1))
refDict = {}
refLengthDict = {}
refid = 0
lutPath = "./kineticsTools/resources/SP3-C3.npz.gz"
with gzip.open(lutPath, "rb") as npz_in:
gbmModelData = np.load(npz_in, allow_pickle=True)
gbmModel = GbmContextModel(gbmModelData, -1)
for read in tqdm(bamIn, desc='Reads'):
rawSeq = read.seq
refSeq = np.frombuffer(rawSeq.encode("utf-8"), dtype=byte)
# Store the reference length
length = len(rawSeq)
# Make a shared array
sa = SharedArray(dtype='B', shape=len(rawSeq) + pad * 2)
saWrap = sa.getNumpyWrapper()
# Lut Codes convert Ns to As so that we don't put Ns into the Gbm Model
# Seq Codes leaves Ns as Ns for getting reference snippets out
innerLutCodes = lutCodeMap[refSeq]
innerSeqCodes = seqCodeMap[refSeq]
innerCodes = np.bitwise_or(innerLutCodes, np.left_shift(innerSeqCodes, 4))
saWrap[pad:(len(rawSeq) + pad)] = innerCodes
# Padding codes -- the lut array is padded with 0s the sequence
# array is padded with N's (4)
outerCodes = np.left_shift(np.ones(pad, dtype=uint8) * 4, 4)
saWrap[0:pad] = outerCodes
saWrap[(len(rawSeq) + pad):(len(rawSeq) + 2 * pad)] = outerCodes
snipFunction = snippetFunc(sa, post, pre)
sites = range(0,length)
contexts = [snipFunction(sites[x], 1) for x in sites]
control = gbmModel.getPredictions(contexts)
ip = read.get_tag('ip')
ipFrames = codeToFrames(read.get_tag('ip'))
ipFramesNorm = ipFrames / np.mean(ipFrames)
ipr = ipFramesNorm / control
read.set_tag('in', array('f', ipFramesNorm))
read.set_tag('ir', array('f', ipr))
bamOut.write(read)
bamOut.close() | amaslan/dimelo-seq | ctcf_and_h3k9me3/PerMoleculeIPDRatio.py | PerMoleculeIPDRatio.py | py | 5,676 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "pysam.AlignmentFile",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pysam.AlignmentFile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.argv",
... |
30267108046 | #!/usr/bin/env python
# -*- encoding:UTF-8 -*-
import doctest
from typing import Generator, MutableSequence
from common import CT
def rank(mseq: MutableSequence[CT], k: CT) -> CT:
"""Return the `k` rank element of `seq`
Args:
seq (MutableSequence[CT]): input sequence
k (CT): element index
Returns:
CT: element in sequence
>>> import random
>>> seq = [i for i in range(10)]
>>> random.shuffle(seq)
>>> [rank(seq, i) for i in range(10)]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
def partition(seq: MutableSequence[CT], low: int, high: int) -> int:
"""Quick sort partition process, return pivot's changed position
after partition.
Args:
seq (MutableSequence[CT]): input array
low (int): start index
high (int): end index
Returns:
int: index of pivot
"""
i, j = low + 1, high
val = seq[low]
while 1:
while i < high and seq[i] <= val:
i += 1
while j > low and seq[j] >= val:
j -= 1
if i >= j:
break
seq[i], seq[j] = seq[j], seq[i]
seq[low], seq[j] = seq[j], seq[low]
return j
low, high = 0, len(mseq) - 1
while high > low:
j = partition(mseq, low, high)
if j == k:
return mseq[k]
elif j > k:
high = j - 1
elif j < k:
low = j + 1
return mseq[k]
# 2.5.4 practice, return a sorted and non-duplicated-item list
def dedup(seq: MutableSequence[CT]) -> Generator[CT, None, None]:
"""Return a new sorted and deduplicated sequence from `seq`.
Args:
seq (MutableSequence[CT]): input sequence
Yields:
Generator[CT]: generator with unique element
>>> lst = [i for i in dedup([2, 1, 3, 1, 1, 3, 2, 3, 4, 7])]
>>> lst
[1, 2, 3, 4, 7]
>>> lst2 = [i for i in dedup([1, 1])]
>>> lst2
[1]
>>> lst3 = [i for i in dedup([2, 1, 1, 4, 3, 5])]
>>> lst3
[1, 2, 3, 4, 5]
"""
assert seq and len(seq) >= 2
new_list = sorted(seq)
val, count, length = new_list[0], 1, len(new_list)
for i in range(1, length):
if new_list[i] == val:
if i == length - 1:
yield new_list[i]
count += 1
else:
count = 1
val = new_list[i]
yield new_list[i - count]
if count == 1:
yield new_list[length - 1]
# 2.5.10 practice, implement a version class with __cmp__
class Version(object):
"""
>>> lst = [Version(i) for i in ['115.1.1', '115.10.1', '115.10.2']]
>>> lst.sort()
>>> lst
[Version(115.1.1), Version(115.10.1), Version(115.10.2)]
"""
def __init__(self, version):
self._version = version
def __eq__(self, other):
return self._version == other._version
def __lt__(self, other):
return self._version < other._version
def __repr__(self):
return 'Version({})'.format(self._version)
@property
def version(self):
return self._version
@version.setter
def version(self, val):
assert all(i.isdigit() for i in val.split('.'))
self._version = val
# 2.5.14 practice, implement a domain class with __cmp__,
# compare the reversed order domain.
class Domain(object):
"""
>>> test_list = ['cs.princeton.edu', 'cs.harvard.edu', 'mail.python.org', 'cs.mit.edu']
>>> lst = [Domain(i) for i in test_list]
>>> lst.sort()
>>> lst
[Domain(cs.harvard.edu), Domain(cs.mit.edu), Domain(cs.princeton.edu), Domain(mail.python.org)]
"""
def __init__(self, domain):
self._domain = domain
self._cmp_domain = '.'.join(reversed(self._domain.split('.')))
def __eq__(self, other):
return self._cmp_domain == other._cmp_domain
def __lt__(self, other):
return self._cmp_domain < other._cmp_domain
def __repr__(self):
return 'Domain({})'.format(self._domain)
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, val):
self._domain = val
self._cmp_domain = '.'.join(reversed(self._domain.split('.')))
# 2.5.16 practice, construct object which
# order by the name with a new alphabet order
class California(object):
"""
>>> seq = [California(name) for name
... in ('RISBY', 'PATRICK', 'DAMIEN', 'GEORGE')]
>>> seq.sort()
>>> seq
[California(RISBY), California(GEORGE), California(PATRICK), California(DAMIEN)]
"""
alphabet = ('R', 'W', 'Q', 'O', 'J', 'M', 'V', 'A',
'H', 'B', 'S', 'G', 'Z', 'X', 'N',
'T', 'C', 'I', 'E', 'K', 'U', 'P', 'D', 'Y', 'F', 'L')
def __init__(self, name):
self._name = name
self._cmp_tuple = tuple(California.alphabet.index(i)
for i in self._name)
def __eq__(self, other):
return self._cmp_tuple == other._cmp_tuple
def __lt__(self, other):
return self._cmp_tuple < other._cmp_tuple
def __repr__(self):
return 'California({})'.format(self._name)
@property
def name(self):
return self._name
@name.setter
def name(self, val):
self._name = val
self._cmp_tuple = tuple(California.alphabet.index(i)
for i in self._name)
# 2.5.19 practice, kendall tau algorithm implementation
class KendallTau(object):
"""
>>> klt = KendallTau()
>>> klt.kendall_tau_count((0, 3, 1, 6, 2, 5, 4), (1, 0, 3, 6, 4, 2, 5))
4
"""
def kendall_tau_count(self, origin_list, count_list):
lst = [origin_list.index(count_list[i])
for i in range(len(count_list))]
aux = lst[:]
return self.count(lst, aux, 0, len(lst) - 1)
def count(self, lst, aux, low, high):
if low >= high:
return 0
mid = (low + high) // 2
lc = self.count(lst, aux, low, mid)
rc = self.count(lst, aux, mid + 1, high)
mc = self.merge_count(lst, aux, low, mid, high)
return lc + rc + mc
def merge_count(self, lst, aux, low, mid, high):
aux[low:high + 1] = lst[low:high + 1]
count, left, right = 0, low, mid + 1
for j in range(low, high + 1):
if left > mid:
lst[j] = aux[right]
right += 1
elif right > high:
lst[j] = aux[left]
left += 1
elif aux[left] < aux[right]:
lst[j] = aux[left]
left += 1
else:
lst[j] = aux[right]
right += 1
count += mid - left + 1
return count
if __name__ == '__main__':
doctest.testmod()
| ChangeMyUsername/algorithms-sedgewick-python | chapter_2/module_2_5.py | module_2_5.py | py | 6,830 | python | en | code | 272 | github-code | 1 | [
{
"api_name": "typing.MutableSequence",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "common.CT",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.MutableSequence",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "common.CT",
... |
31891577704 | # test_dbmanager.py
import unittest
import os
from binance import enums as k_binance
from src.pp_order import Order, OrderStatus
from polaris_old.pp_dbmanager import DBManager
TEST_DATABASE = 'test.db'
class TestDBManager(unittest.TestCase):
def setUp(self) -> None:
try:
os.remove(TEST_DATABASE)
except IOError as e:
print(e)
print('cwd: ', os.getcwd())
self.dbm = DBManager(db_name=TEST_DATABASE, order_tables=['orders'])
self.order = Order(
session_id='S_20210501_2008',
order_id='ORDER_ID',
pt_id='PT_ID',
k_side=k_binance.SIDE_BUY,
price=50_000.0,
amount=1.0,
)
self.order_2 = Order(
session_id='S_20210501_2008',
order_id='OR_000001',
pt_id='PT_000001',
k_side=k_binance.SIDE_SELL,
price=60_000.88,
amount=1.0876548765,
uid='0123456789abcdef'
)
def test_get_table_creation_query(self):
print(self.dbm.get_table_creation_query(table='orders'))
def test_add_order(self):
table = 'orders'
self.dbm.add_order(table=table, order=self.order)
c = self.dbm.conn.cursor()
rows = c.execute(f'SELECT * FROM {table};').fetchall()
self.assertEqual(1, len(rows))
row = rows[0]
self.assertEqual(OrderStatus.MONITOR.name, row[8])
def test_delete_order(self):
table = 'orders'
self.dbm.add_order(table=table, order=self.order)
self.dbm.add_order(table=table, order=self.order_2)
self.dbm.delete_order(table=table, order=self.order)
c = self.dbm.conn.cursor()
rows = c.execute(f'SELECT * FROM {table};').fetchall()
self.assertEqual(1, len(rows))
self.assertEqual('0123456789abcdef', rows[0][0])
def tearDown(self) -> None:
self.dbm.conn.close()
| xavibenavent/polaris_plus | tests/test_dbmanager.py | test_dbmanager.py | py | 1,945 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "polaris_old.pp_dbmanager.D... |
10913932737 | import logging
from tvm import topi, te
from tvm.target import Target
from .. import tag
def schedule_pool(outs, layout):
"""Create schedule for avgpool/maxpool"""
if layout != "NHWC":
logger = logging.getLogger("topi")
logger.warning(
"""We currently only support NHWC target specific pools on arm_cpu,
falling back on generic pool scheduling"""
)
return topi.generic.schedule_pool(outs, layout)
return schedule_pool_2d(outs)
def schedule_pool_2d(outs):
"""Create arm_cpu specific 2D schedule for avgpool/maxpool"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
schedule_ops = [x.op for x in outs]
schedule = te.create_schedule(schedule_ops)
scheduled_ops = []
def traverse(op):
# Recursively inline any injective operation that isn't the pooling
# operation or hasn't already been scheduled.
if tag.is_injective(op.tag):
if op not in schedule.outputs:
schedule[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule the actual pooling operation
elif op.tag.startswith("pool"):
n, height, width, channel = schedule[op].op.axis
# Average pool consists of two parts; a sum then a division.
# We can schedule the division loop to parallelize across height and
# vectorize across width.
enable_explicit_vectorization = not Target.current(allow_none=False).features.has_sve
if op != outs[0].op:
output = outs[0]
output_fused = schedule[output].fuse(output.op.axis[1], output.op.axis[2])
schedule[output].parallel(output_fused)
vectorization_factor = (
8 if enable_explicit_vectorization else output.op.axis[3].dom.extent
)
_, inner = schedule[output].split(output.op.axis[3], vectorization_factor)
schedule[output].vectorize(inner)
padded_input = op.input_tensors[0]
if isinstance(padded_input.op, te.tensor.ComputeOp):
schedule[padded_input].compute_inline()
# For targets without SVE try explicitly vectorizing the channel
# loop, For SVE targets leave the loop in place for LLVM to convert
# into a scalable vector loop.
vectorization_factor = 8 if enable_explicit_vectorization else channel.dom.extent
channel_outer, channel_inner = schedule[op].split(channel, vectorization_factor)
schedule[op].vectorize(channel_inner)
schedule[op].parallel(height)
if len(schedule[op].op.reduce_axis) > 0:
filter_height, filter_width = schedule[op].op.reduce_axis
# We consider any filter of area < 10 to be small enough to
# unroll; 3x3 filters have shown better performance when
# unrolled.
if filter_height.dom.extent * filter_width.dom.extent <= 9:
# For small filters, unrolling the filter loops allows us to
# vectorize over channels without reordering anything.
schedule[op].unroll(filter_width)
schedule[op].unroll(filter_height)
else:
# Reordering so that channels is the fastest moving axis allows
# LLVM to vectorize across contiguous memory in the NHWC
# ordering.
schedule[op].reorder(
n, height, width, filter_height, filter_width, channel_outer, channel_inner
)
else:
schedule[op].reorder(n, height, width, channel_outer, channel_inner)
else:
raise RuntimeError("Unsupported operator: %s" % op.tag)
scheduled_ops.append(op)
traverse(outs[0].op)
return schedule
| LiRWZ/tvm | python/tvm/topi/arm_cpu/pooling.py | pooling.py | py | 4,116 | python | en | code | null | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tvm.topi.generic.schedule_pool",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tvm.topi.generic",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name":... |
20199877377 | import random
from astral import Astral
from datetime import datetime, timedelta, time
from timedevent import TimedEvent
from scenes import VACATION_SCENES
import logging
logger = logging.getLogger(__name__)
#
# This event builds the script of events for vacation mode for a single day. Once it executes, it will rescue
#
class VacationBuildEvent(TimedEvent):
def __init__(self, event_time, parent):
TimedEvent.__init__(self, event_time)
self.parent = parent
def execute(self):
date = datetime.now().date()
events = self.parent.create_scene_script(date)
for event in events:
if event.event_time >= datetime.now():
self.parent.home.timed_event_queue.add_event(event)
date += timedelta(days=1)
next_build_time = datetime.combine(date, time(10)) # Build the next day's script at 10am
self.parent.home.timed_event_queue.add_event(VacationBuildEvent(next_build_time, self.parent))
class VacationLightEvent(TimedEvent):
def __init__(self, event_time, home, scene_off, scene_on):
TimedEvent.__init__(self, event_time)
self.home = home
self.scene_off = scene_off
self.scene_on = scene_on
def execute(self):
if self.scene_off is not None:
logger.info("Vacation turning off {0}".format(self.scene_off.names[0]))
self.scene_off.off(self.home.lights)
if self.scene_on is not None:
logger.info("Vacation turning on {0}".format(self.scene_on.names[0]))
self.scene_on.on(self.home.lights)
class VacationMode:
def __init__(self, home, start=timedelta(minutes=-30), end_time=time(23, 0, 0), interval=timedelta(minutes=30)):
self.start = start # Can be either a timedelta or a datetime.time
self.end_time = end_time
self.interval = interval
self.home = home
self.enabled = False
# After all the services have been started this method is called. If the persistent store shows that we are
# in vacation mode then we probably just had a power failure.
def init_from_persistent_store(self):
if self.home.persistent_store.get_value('vacation_mode', default=False):
logger.info("Enabling vacation mode because enabled in persistent store")
self.enable()
def enable(self, delay=timedelta(minutes=5)):
if not self.enabled:
logger.info("Enabling vacation mode")
self.enabled = True
self.home.persistent_store['vacation_mode'] = True
self.home.timed_event_queue.add_event(VacationBuildEvent(datetime.now() + delay, self))
def disable(self):
if self.enabled:
logger.info("Disabling vacation mode")
self.enabled = False
self.home.persistent_store['vacation_mode'] = False
self.home.timed_event_queue.remove_events(VacationBuildEvent)
self.home.timed_event_queue.remove_events(VacationLightEvent)
def create_scene_script(self, date):
if isinstance(self.start, timedelta):
a = Astral()
start_time = (a['seattle'].sun(date=date, local=True)['sunset'] + self.start).time()
else:
start_time = self.start
last_on = None
events = []
start_datetime = datetime.combine(date, start_time)
end_datetime = datetime.combine(date, self.end_time)
while start_datetime < end_datetime:
next_on = random.choice(VACATION_SCENES.all_scenes)
if next_on == last_on:
next_on = None # If we randomly choose the same scene twice in a row, don't turn anything on
events.append(VacationLightEvent(start_datetime, self.home, last_on, next_on))
last_on = next_on
start_datetime += self.interval
# always turn off the last_on light at the end of the script
events.append(VacationLightEvent(start_datetime, self.home, last_on, None))
return events
| RalphLipe/homecontrol | vacationmode.py | vacationmode.py | py | 4,044 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "timedevent.TimedEvent",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "timedevent.TimedEvent.__init__",
"line_number": 18,
"usage_type": "call"
},
{
"api_name":... |
74660377312 | """
Search module for searching the web based on user queries.
"""
import asyncio
from search_engine_parser.core.engines.yahoo import Search as YahooSearch
def searcher(query, n_results, n_pages, only_description):
"""
Search the web using Yahoo Search engine and retrieve search results.
Parameters:
query : str
The search query.
n_results : int
The number of search results to retrieve.
n_pages : int
The number of search result pages to crawl.
only_description : bool
If True, only retrieve the descriptions of the search results.
If False, retrieve the titles, links, and descriptions of the search results.
Returns:
The search results.
"""
results = []
for i in range(1, n_pages + 1):
search_args = (query, i)
ysearch = YahooSearch()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
yresults = ysearch.search(*search_args)
for title, link, description in zip(
yresults["titles"], yresults["links"], yresults["descriptions"]
):
if only_description:
results.append(f"|DESCRIPTIONS: {description}")
else:
results.append(
f"|TITLE: {title} LINK: {link} DESCRIPTIONS: {description}"
)
return results[:n_results]
| nthng-quan/FlixRS | modules/search.py | search.py | py | 1,431 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "search_engine_parser.core.engines.yahoo.Search",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "asyncio.new_event_loop",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "asyncio.set_event_loop",
"line_number": 36,
"usage_type": "call"
... |
6180786354 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from datetime import datetime
##
## begin util functies
##
def nvl(value,default_value) :
return default_value if value is None else value
##
## einde util functies
##
class AukArtikelRankVw(models.Model):
artikel_id = models.IntegerField()
artikel_titel = models.CharField(max_length=200)
krant = models.TextField()
cat_id = models.IntegerField()
cat_volgorde = models.IntegerField()
categorie = models.CharField(max_length=200)
pubdate = models.DateTimeField()
pubdate_yyyymmdd = models.TextField()
krant_weight = models.DecimalField(decimal_places=6,max_digits=10)
calculated_weight = models.DecimalField(decimal_places=6,max_digits=10)
cloud_weight = models.DecimalField(decimal_places=6,max_digits=10)
total_weight = models.DecimalField(decimal_places=6,max_digits=10)
rank = models.DecimalField(decimal_places=6,max_digits=10)
# class Meta:
# abstract = True
class AukArtikel(models.Model):
uniek = models.CharField(max_length=200, unique=True)
titel = models.CharField(max_length=200)
pubdate = models.DateTimeField()
rssfeeddescription = models.CharField(max_length=4000)
url = models.CharField(max_length=1000)
body = models.CharField(max_length=64000)
body_clean = models.CharField(max_length=64000)
# body_stemmed = models.CharField(max_length=64000)
class Meta:
db_table = u'auk_artikel'
def __unicode__(self):
return u'%s' % (self.uniek)
#Alles van de krant
class Krant(models.Model):
# id = models.IntegerField(primary_key=True)
naam = models.CharField(unique=True, max_length=100)
url = models.CharField(max_length=1000)
class Meta:
db_table = u'auk_krant'
def __unicode__(self):
return u'%s' % (self.naam)
class Rubriek(models.Model):
krt = models.ForeignKey(Krant)
naam = models.CharField(max_length=100)
url = models.CharField(unique=True,max_length=1000)
actief = models.CharField(max_length=1)
class Meta:
db_table = u'auk_rubriek'
def __unicode__(self):
return u'%s' % (self.naam)
class Categorie(models.Model):
# id = models.IntegerField(primary_key=True)
volgorde = models.IntegerField()
naam = models.CharField(unique=True,max_length=100)
cloud = models.CharField(max_length=4000,null=True,blank=True)
cloud_factor = models.DecimalField(default=1,max_digits=4, decimal_places=2)
aging_factor = models.DecimalField(default=1,max_digits=4, decimal_places=2)
class Meta:
db_table = u'auk_categorie'
def __unicode__(self):
return u'%s' % (self.naam)
class GebruikerCategorie(models.Model):
# id = models.IntegerField(primary_key=True)
usr = models.ForeignKey(User)
cat = models.ForeignKey(Categorie)
volgorde = models.IntegerField()
aantal_artikelen = models.IntegerField()
class Meta:
db_table = u'auk_gebruiker_categorie'
unique_together = ("usr","cat")
#
# Intersectie tussen rubriek en categorie
#
class AukRubriekCategorie(models.Model):
rbk = models.ForeignKey(Rubriek)
cat = models.ForeignKey(Categorie)
class Meta:
db_table = u'auk_rubriek_categorie'
unique_together = ("rbk","cat")
#
# Intersectie tussen artikel en categorie
#
class AukArtikelCategorie(models.Model):
art = models.ForeignKey(AukArtikel)
cat = models.ForeignKey(Categorie)
krant_weight = models.FloatField(null=True)
calculated_weight = models.FloatField(null=True)
cloud_weight = models.FloatField(null=True)
total_weight = models.FloatField(null=True)
rank = models.IntegerField(null=True)
class Meta:
db_table = u'auk_artikel_categorie'
unique_together = ("art","cat")
#
# Intersectie tussen artikel en rubriek
#
class AukArtikelPublicatie(models.Model):
art = models.ForeignKey(AukArtikel)
rbk = models.ForeignKey(Rubriek)
class Meta:
db_table = u'auk_artikel_publicatie'
unique_together = ("art", "rbk")
#
# Search and Rank tabellen
#
class SarIgnorewordlist(models.Model):
word = models.CharField(max_length=4000)
class Meta:
db_table = u'sar_ignorewordlist'
class SarWordlist(models.Model):
word = models.CharField(unique=True, max_length=4000)
class Meta:
db_table = u'sar_wordlist'
class SarWordlocation(models.Model):
art = models.ForeignKey(AukArtikel)
wrd = models.ForeignKey(SarWordlist)
location = models.IntegerField()
class Meta:
db_table = u'sar_wordlocation'
unique_together = ("art", "wrd", "location")
#
#
# Logging om hierna learning te bewerkstelligen
#
class LogAction(models.Model):
usr = models.ForeignKey(User,null=True)
art = models.ForeignKey(AukArtikel,null=True)
session_key = models.CharField(max_length=40)
first_action_time = models.DateTimeField(default=datetime.now)
last_action_time = models.DateTimeField()
action = models.CharField(max_length=100)
spent_time = models.IntegerField(default=0)
unique_together = ("usr","art","session_key")
class Vote(models.Model) :
usr = models.ForeignKey(User)
art = models.ForeignKey(AukArtikel)
score = models.IntegerField()
unique_together = ("usr","art")
| nosinga/parool | djp/dj_breev/breev/models.py | models.py | py | 5,799 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 18,
"usage_type": "call"
},
{
"api_na... |
24603697811 | from flask import Flask, request, jsonify
import json
# 将当前程序作为web app应用
# 配置static模式
# static_folder静态文件的路径
app = Flask(__name__,static_folder='static',static_url_path='/')
@app.route(rule='/sales', methods=['GET'])
def post_test():
# 附加题: 从excel表中读取这些数据
sales_data = [20, 20, 30, 10, 10, 15]
r = jsonify(sales_data)
return r
if __name__ == '__main__':
app.run(port=80, debug=True)
| miaozilong/ruantong-xinagjianguniversity | 上课演示/2023-07-06/上午/4.py | 4.py | py | 489 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 13,
"usage_type": "call"
}
] |
20635589950 | import pysam, sys, getopt
argv = sys.argv[1:]
opts, args = getopt.getopt(argv, 'i:o:t:f:')
input_file = None
output_directory = None
tag = None
names = None
for opt, arg in opts:
if opt == '-i':
input_file = arg
elif opt == '-o':
output_directory = arg
elif opt == '-t':
tag = arg
elif opt == '-f':
names = arg
if input_file is None or output_directory is None or tag is None:
print('arguments wrong')
sys.exit(2)
pure_bam = pysam.AlignmentFile(input_file)
memory_read_arrays = {}
uniq_names = None
if names is not None:
uniq_names = set()
names = open(names, mode='r')
for line in names.readlines():
uniq_names.add(line[0:len(line)-1])
for read in pure_bam:
try:
key = read.get_tag(tag)
except:
continue
if uniq_names is not None:
if key not in uniq_names:
continue
if memory_read_arrays.get(key) is None:
read_array = list()
else:
read_array = memory_read_arrays.get(key)
read_array.append(read)
memory_read_arrays[key] = read_array
for key in memory_read_arrays:
f = pysam.AlignmentFile(output_directory + key + '.bam', 'wb', template=pure_bam)
for read in memory_read_arrays.get(key):
f.write(read)
f.close()
pure_bam.close()
| vdblm/SinglePolyA | codes/bam_splitter.py | bam_splitter.py | py | 1,310 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "getopt.getopt",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pysam.AlignmentFile",
"line_nu... |
136133084 | """
PCBA dataset loader.
"""
import os
import logging
import deepchem
import gzip
logger = logging.getLogger(__name__)
DEFAULT_DIR = deepchem.utils.data_utils.get_data_dir()
def load_pcba(featurizer='ECFP',
split='random',
reload=True,
data_dir=None,
save_dir=None,
**kwargs):
return load_pcba_dataset(
featurizer=featurizer,
split=split,
reload=reload,
assay_file_name="pcba.csv.gz",
data_dir=data_dir,
save_dir=save_dir,
**kwargs)
def load_pcba_146(featurizer='ECFP',
split='random',
reload=True,
data_dir=None,
save_dir=None,
**kwargs):
return load_pcba_dataset(
featurizer=featurizer,
split=split,
reload=reload,
assay_file_name="pcba_146.csv.gz",
data_dir=data_dir,
save_dir=save_dir,
**kwargs)
def load_pcba_2475(featurizer='ECFP',
split='random',
reload=True,
data_dir=None,
save_dir=None,
**kwargs):
return load_pcba_dataset(
featurizer=featurizer,
split=split,
reload=reload,
assay_file_name="pcba_2475.csv.gz",
data_dir=data_dir,
save_dir=save_dir,
**kwargs)
def load_pcba_dataset(featurizer='ECFP',
split='random',
reload=True,
assay_file_name="pcba.csv.gz",
data_dir=None,
save_dir=None,
**kwargs):
"""Load PCBA dataset
PubChem BioAssay (PCBA) is a database consisting of biological activities of
small molecules generated by high-throughput screening. We use a subset of
PCBA, containing 128 bioassays measured over 400 thousand compounds,
used by previous work to benchmark machine learning methods.
Random splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "mol_id" - PubChem CID of the compound
- "smiles" - SMILES representation of the molecular structure
- "PCBA-XXX" - Measured results (Active/Inactive) for bioassays:
search for the assay ID at
https://pubchem.ncbi.nlm.nih.gov/search/#collection=bioassays
for details
References
----------
.. [1] Wang, Yanli, et al. "PubChem's BioAssay database."
Nucleic acids research 40.D1 (2011): D400-D412.
"""
if data_dir is None:
data_dir = DEFAULT_DIR
if save_dir is None:
save_dir = DEFAULT_DIR
if reload:
save_folder = os.path.join(save_dir,
assay_file_name.split(".")[0] + "-featurized",
featurizer)
if featurizer == "smiles2img":
img_spec = kwargs.get("img_spec", "std")
save_folder = os.path.join(save_folder, img_spec)
save_folder = os.path.join(save_folder, str(split))
dataset_file = os.path.join(data_dir, assay_file_name)
if not os.path.exists(dataset_file):
deepchem.utils.data_utils.download_url(
url="https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/{0}".
format(assay_file_name),
dest_dir=data_dir)
# Featurize PCBA dataset
logger.info("About to featurize PCBA dataset.")
if featurizer == 'ECFP':
featurizer = deepchem.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = deepchem.feat.ConvMolFeaturizer()
elif featurizer == 'Weave':
featurizer = deepchem.feat.WeaveFeaturizer()
elif featurizer == 'Raw':
featurizer = deepchem.feat.RawFeaturizer()
elif featurizer == "smiles2img":
img_spec = kwargs.get("img_spec", "std")
img_size = kwargs.get("img_size", 80)
featurizer = deepchem.feat.SmilesToImage(
img_size=img_size, img_spec=img_spec)
with gzip.GzipFile(dataset_file, "r") as fin:
header = fin.readline().rstrip().decode("utf-8")
columns = header.split(",")
columns.remove("mol_id")
columns.remove("smiles")
PCBA_tasks = columns
if reload:
loaded, all_dataset, transformers = deepchem.utils.data_utils.load_dataset_from_disk(
save_folder)
if loaded:
return PCBA_tasks, all_dataset, transformers
loader = deepchem.data.CSVLoader(
tasks=PCBA_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file)
if split == None:
transformers = [deepchem.trans.BalancingTransformer(dataset=dataset)]
logger.info("Split is None, about to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
return PCBA_tasks, (dataset, None, None), transformers
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
'scaffold': deepchem.splits.ScaffoldSplitter(),
'stratified': deepchem.splits.SingletaskStratifiedSplitter()
}
splitter = splitters[split]
logger.info("About to split dataset using {} splitter.".format(split))
frac_train = kwargs.get("frac_train", 0.8)
frac_valid = kwargs.get('frac_valid', 0.1)
frac_test = kwargs.get('frac_test', 0.1)
train, valid, test = splitter.train_valid_test_split(
dataset,
frac_train=frac_train,
frac_valid=frac_valid,
frac_test=frac_test)
transformers = [deepchem.trans.BalancingTransformer(dataset=train)]
logger.info("About to transform dataset.")
for transformer in transformers:
train = transformer.transform(train)
valid = transformer.transform(valid)
test = transformer.transform(test)
if reload:
deepchem.utils.data_utils.save_dataset_to_disk(save_folder, train, valid,
test, transformers)
return PCBA_tasks, (train, valid, test), transformers
| jem0101/BigSwag-SQA2022-AUBURN | TestOrchestrator4ML-main/resources/Data/supervised/GITHUB_REPOS/deepchem@deepchem/deepchem/molnet/load_function/pcba_datasets.py | pcba_datasets.py | py | 5,859 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "deepchem.utils.data_utils.get_data_dir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "deepchem.utils",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_n... |
36924729027 | import requests
from bs4 import BeautifulSoup
def music_Leaderboard(input_country):
country = {"西洋": "3",
"日韓": "2",
"華語": "1"}
req = requests.get(
'https://www.kiss.com.tw/music/billboard.php?a=%s' % (country[input_country]))
soup = BeautifulSoup(req.text, 'html.parser')
music_list = soup.find('tbody')
music_info = music_list.find_all('td')
music_name = []
for music in music_info:
music_name.append(music.get_text())
show = ''
for i in range(8, len(music_name)+8, 8):
show += "名次:%s\n歌名:%s\n歌手:%s\n專輯名稱:%s\n發行公司:%s\n\n" % (
music_name[i-8], music_name[i-7], music_name[i-6], music_name[i-5], music_name[i-4])
return show
| Yicheng-1218/line_bot | music.py | music.py | py | 773 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
}
] |
1095510646 | from pylab import *
from matplotlib.patches import FancyArrow
# Foreman-Mackey's taste in figures
rc("font", size=20, family="serif", serif="Computer Sans")
rc("text", usetex=True)
import word
import parameters
t = linspace(-10., 10., 10001)
plot(t, word.TwoExp(t).model(parameters.TwoExpParameters(1.0, 0.5, 1., 3., log=False)), linewidth=2)
xlabel('Time (seconds)')
ylabel('Poisson Rate')
ylim([0., 1.1])
axvline(1., color='r', linestyle='--')
title('A Word')
# Build an arrow.
ar1 = FancyArrow(1., 1.01*exp(-1.), -0.5, 0., length_includes_head=True,
color='k', head_width=0.01, head_length=0.2, width=0.001, linewidth=1)
ar2 = FancyArrow(1., 0.99*exp(-1.), 1.5, 0., length_includes_head=True,
color='k', head_width=0.01, head_length=0.2, width=0.001, linewidth=1)
ax = gca()
# Add the arrow to the axes.
ax.add_artist(ar1)
ax.add_artist(ar2)
# Add text
text(-0.4, 1.*exp(-1.), r'$\tau$')
text(2.7, 1.*exp(-1.), r'$\tau S$')
savefig('documents/word.pdf', bbox_inches='tight')
show()
| dhuppenkothen/magnetron_old | figs.py | figs.py | py | 1,025 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "word.TwoExp",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "parameters.TwoExpParameters",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.FancyArrow",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": ... |
73944547232 | import training.dataset
import unittest
import model.rigid_body_model as rbm
import model.param as model_param
import numpy as np
from scipy.spatial.transform import Rotation as R
class TestDexGraspDataset(unittest.TestCase):
def setUp(self) -> None:
self.hand_plant = rbm.AllegroHandPlantDrake(meshcat_open_brower=False,
num_viz_spheres=0)
def test_make_dataset_from_same_point_cloud(self):
rs = np.random.RandomState(0)
copies = 3
dut = training.dataset.make_dataset_from_same_point_cloud('003_cracker_box',
make_datset=False, random_xy=False,
random_yaw=False,
copies=copies,
data_idx_start=0, data_idx_end=1,
random_state=rs)
# Theres should be exactly 10 data points
self.assertEqual(len(dut), copies)
# construct with random orientation
dut_orn = training.dataset.make_dataset_from_same_point_cloud('003_cracker_box',
make_datset=False, random_xy=False,
random_yaw=True,
copies=copies,
data_idx_start=0, data_idx_end=1,
random_state=rs)
def test_dataset_consistency(d):
# Check that fingertip_normals is consistent with the fingertip positions
# computed from (base_position, base_quaternion, finger_q)
diagram_context, plant_context = self.hand_plant.create_context()
gt_fingertip_angles_dict = {}
gt_fingertip_angles_dict = model_param.finger_q_to_finger_angles_dict(d.finger_q)
gt_fingertip_angles_dict[model_param.AllegroHandFinger.RING] = np.zeros(4)
# Compute the corresponding fingeritp locations
gt_drake_q = self.hand_plant.convert_hand_configuration_to_q(d.base_position,
d.base_quaternion, gt_fingertip_angles_dict
)
gt_p_WF = self.hand_plant.compute_p_WF(gt_drake_q, plant_context)
gt_fingertip_normals = d.fingertip_normals
for fi, finger in enumerate(model_param.ActiveAllegroHandFingers):
np.testing.assert_allclose(gt_fingertip_normals[fi,:3],
np.squeeze(gt_p_WF[finger]),
atol=1e-2 # Allow large tolerance as gt_p_WF is from IK solution
)
for idx in range(len(dut)):
# Retrieve the random orientation
original_entry = dut[idx]
rotated_entry = dut_orn[idx]
original_base_quaternion = R.from_quat(original_entry.base_quaternion[[1,2,3,0]])
rotated_base_quaternion = R.from_quat(rotated_entry.base_quaternion[[1,2,3,0]])
rot_diff = rotated_base_quaternion*original_base_quaternion.inv()
# Rotate everything else back
rot_diff_matrix = rot_diff.as_matrix()
# Compare point clouds
pc_original = original_entry.point_cloud
pc_rot = rotated_entry.point_cloud
np.testing.assert_allclose(pc_rot,
(rot_diff_matrix @ (pc_original.T)).T)
# Compare normals
fingertip_normals_original = original_entry.fingertip_normals
fingertip_normals_rot = rotated_entry.fingertip_normals
# Check fingertip positions
np.testing.assert_allclose(fingertip_normals_rot[:,:3],
(rot_diff_matrix @ (fingertip_normals_original[:,:3].T)).T)
# Check fingertip normal vectors
np.testing.assert_allclose(fingertip_normals_rot[:,3:],
(rot_diff_matrix @ (fingertip_normals_original[:,3:].T)).T)
# Check finger q
np.testing.assert_allclose(original_entry.finger_q, rotated_entry.finger_q)
# Check base position
np.testing.assert_allclose(rotated_entry.base_position, rot_diff_matrix @ original_entry.base_position)
# Test consistency
test_dataset_consistency(original_entry)
test_dataset_consistency(rotated_entry)
# construct with random translation
dut_trans = training.dataset.make_dataset_from_same_point_cloud('003_cracker_box',
make_datset=False, random_xy=True,
random_yaw=False,
copies=copies,
data_idx_start=0, data_idx_end=1,
random_state=rs)
for idx in range(len(dut)):
# Retrieve the random translation
original_entry = dut[idx]
translated_entry = dut_trans[idx]
trans = translated_entry.base_position-original_entry.base_position
# Compare point clouds
pc_original = original_entry.point_cloud
pc_trans = translated_entry.point_cloud
pc_diff = pc_trans-pc_original
self.assertTrue(np.allclose(pc_diff, trans))
# Compare normals
fingertip_normals_original = original_entry.fingertip_normals
fingertip_normals_trans = translated_entry.fingertip_normals
# Check fingertip positions
self.assertTrue(np.allclose((fingertip_normals_trans-fingertip_normals_original)[:,:3],
trans))
# Check fingertip normal vectors
np.testing.assert_allclose(fingertip_normals_trans[:,3:], fingertip_normals_original[:,3:])
# Check finger q
np.testing.assert_allclose(original_entry.finger_q, translated_entry.finger_q)
np.testing.assert_allclose(original_entry.base_quaternion, translated_entry.base_quaternion)
test_dataset_consistency(original_entry)
test_dataset_consistency(translated_entry)
if __name__ == '__main__':
unittest.main() | Ericcsr/synthesize_pregrasp | neurals/test/test_dataset.py | test_dataset.py | py | 6,574 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "model.rigid_body_model.AllegroHandPlantDrake",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "model.rigid_body_model",
"line_number": 10,
"usage_type": "name"
},
... |
19136982972 | import cv2
import numpy as np
import matplotlib.pyplot as plt
def 垂直边缘提取():
#差分求图像梯度,应用特殊卷积求差分.
img = cv2.imread("opencv\\files\sudoku.jpg", 0)
#卷积核
kernel = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]], dtype=np.float32)
#这样的核可以提取竖直边缘,转置后可提取水平边缘
#提取竖直边缘
dst_v = cv2.filter2D(img, -1, kernel)
#提取水平边缘
dst_h = cv2.filter2D(img, -1, kernel.T)
cv2.imshow('img', np.hstack((img, dst_v, dst_h)))
cv2.waitKey(0)
#垂直边缘提取()
def Sobel算子():
#sobel算子是高斯平滑和微分操作的结合体,抗噪声能力强
#先在垂直方向计算梯度,后再水平方向计算梯度,然后求总梯度
img = cv2.imread("opencv\\files\sudoku.jpg", 0)
sobelx = cv2.Sobel(img, -1, 1, 0, ksize=3) #只计算x方向
sobely = cv2.Sobel(img, -1, 0, 1, ksize=3) #只计算y方向
sobel = np.sqrt((np.square(sobelx) + np.square(sobely)))
#Scharr算子
scharrx = cv2.Scharr(img, -1, 1, 0) # 只计算x方向
scharry = cv2.Scharr(img, -1, 0, 1) # 只计算y方向
scharr = np.sqrt((np.square(scharrx) + np.square(scharry)))
cv2.imshow("img", np.hstack((sobel.astype(int).astype(
float), scharr.astype(int).astype(float))))
cv2.waitKey(0)
'''
Prewitt算子k=[[-1,0,1],[-1,0,1],[-1,0,1]]
Scharr算子(比Sobel更好用)k=[[-3,0,3],[-10,0,10],[-3,0,3]]
'''
#Sobel算子()
#Laplacian算子是二姐边缘检测的典型代表
def Laplacian算子():
img = cv2.imread('opencv\\files\sudoku.jpg', 0)
laplacian = cv2.Laplacian(img, -1)
cv2.imshow('img', np.hstack((img, laplacian)))
cv2.waitKey(0)
#Laplacian算子()
def Canny边缘检测():
img = cv2.imread('opencv\\files\\number13.jpg', 0)
#阈值分割,(再检测边缘,效果更好)
ret, th = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#30,70分别为低、高阈值
edge = cv2.Canny(th, 30, 70)
cv2.imshow('img', np.hstack((img, th, edge)))
cv2.waitKey(0)
#Canny边缘检测()
def 图像金字塔():
'''
下采样:高斯内核卷积。去除偶数行列,压缩图像
上采样:在每个方向放大两倍,新增的行列用0填充。再用内核卷积。
'''
img = cv2.imread('opencv\\files\dog.jpg', 1)
#下采样,尺寸变小,分辨率降低
lower_reso = cv2.pyrDown(img)
#从下采样本再上采样,尺寸变大,但分辨率不会变高,因为信息丢失不会回来。
higher_reso=cv2.pyrUp(lower_reso)
cv2.imshow('img', img)
cv2.imshow('lower', lower_reso)
cv2.imshow('higher', higher_reso)
cv2.waitKey(0)
#图像金字塔()
def 金字塔混合():
'''
图像金字塔的一个应用就是图像融合。
例如,在图像缝合中,如果你需要将两幅图叠在一起,但是由于连接区域图像像素不连续性,政府图像效果看起来很差,这时就可以通过图像金字塔进行融合。
步骤如下:
1、读入两幅图像
2、构建img1和img2的高斯金字塔
3、根据高斯金字塔计算拉普拉斯金字塔(拉普拉斯金字塔适合用于重建)
4、在拉普拉斯的每一层进行图像融合
5、根据融合后的图像金字塔重建原始图像
'''
A = cv2.imread('opencv\\files\dog.jpg', 1)
B = cv2.imread('opencv\\files\dog2.jpg', 1)
A = cv2.resize(A, (256, 256), cv2.INTER_LINEAR)
B = cv2.resize(B, (256, 256), cv2.INTER_LINEAR)
print(A.shape, B.shape)
#生成A的高斯金字塔
G = A.copy()
gpA = [G]
for i in range(6):
G = cv2.pyrDown(G)
gpA.append(G)
print(G.shape)
#gpA=[A,1/4A,1/16A,...]
#生成B的高斯金字塔
G = B.copy()
gpB = [G]
for i in range(6):
G = cv2.pyrDown(G)
gpB.append(G)
print(G.shape)
lpA = [gpA[5]] # 进行拉普拉斯金字塔处理,总共5级处理
for i in range(5, 0, -1):
GE = cv2.pyrUp(gpA[i])
# print(GE.shape)
# print(gpA[i].shape)
L = cv2.subtract(gpA[i-1], GE)#subtract图像相减
lpA.append(L)
# generate Laplacian Pyramid for B
lpB = [gpB[5]] # 进行拉普拉斯金字塔处理,总共5级处理
for i in range(5, 0, -1):
GE = cv2.pyrUp(gpB[i])
L = cv2.subtract(gpB[i-1], GE)
lpB.append(L)
#左右拼接,在每一层拉普拉斯金字塔进行左右拼接
LS = []
for la, lb in zip(lpA, lpB):
rows, cols, dpt = la.shape
print('la', la.shape)
ls = np.hstack((la[:, 0:cols // 2,:], lb[:, cols // 2:,:]))#各取一半
LS.append(ls)
ls_ = LS[0]
#利用拉普拉斯金字塔,复原图像。add是图像相加
for i in range(1, 6):
ls_ = cv2.pyrUp(ls_)
ls_ = cv2.add(ls_, LS[i])
real = np.hstack((A[:,:cols // 2,:], B[:, cols // 2:,:]))
cv2.imshow('Direct_blending---Pyramid_blending',np.hstack((real,ls_)))
k=cv2.waitKey(0)
金字塔混合()
| AH-NAN/opencv | 算子和金字塔.py | 算子和金字塔.py | py | 5,144 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.filter2D",
"line_numb... |
24879635403 | from sqlalchemy.engine import Connection
from saltapi.repository.finder_chart_repository import FinderChartRepository
from saltapi.service.finder_chart_service import FinderChartService
def test_get_finder_chart(db_connection: Connection) -> None:
finding_chart_id = 55345
expected_proposal_code = "2015-2-SCI-028"
expected_finder_chart_path = (
expected_proposal_code + "/4/Included/FindingChart_1445723219288.pdf"
)
repository = FinderChartRepository(db_connection)
service = FinderChartService(repository)
proposal_code, finding_chart_path = service.get_finder_chart(
f"{finding_chart_id}.pdf"
)
assert expected_proposal_code == proposal_code
assert expected_finder_chart_path in str(finding_chart_path)
| saltastroops/salt-api | tests/service/test_finder_chart_service.py | test_finder_chart_service.py | py | 769 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.engine.Connection",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "saltapi.repository.finder_chart_repository.FinderChartRepository",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "saltapi.service.finder_chart_service.FinderChartServ... |
8967287236 | # The data set used in this example is from http://archive.ics.uci.edu/ml/datasets/Wine+Quality
# P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis.
# Modeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009.
"""
CREATE LOCAL mlruns
"""
import warnings
import sys
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
from urllib.parse import urlparse
import mlflow.sklearn
from mlflow.models.signature import infer_signature
from tests import MODEL_NAME
import logging
from shippedbrain import shippedbrain
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
def eval_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
def train_and_eval(alpha, l1_ratio, train_x, train_y, test_x, test_y):
lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=46)
lr.fit(train_x, train_y)
predicted_qualities = lr.predict(test_x)
(rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)
# Infer model signature
signature = infer_signature(test_x, predicted_qualities)
return lr, predicted_qualities, signature, rmse, mae, r2
def build_train():
# Read the wine-quality csv file from the URL
# data = pd.read_csv("./tests/resources/data/winequality-red.csv", sep=",", header=True)
csv_url = (
"http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
)
try:
print("Downloading dataset...")
data = pd.read_csv(csv_url, sep=";")
except Exception as e:
logger.exception(
"Unable to download training & test CSV, check your internet connection. Error: %s", e
)
# Split the data into training and test sets. (0.75, 0.25) split.
train, test = train_test_split(data)
# The predicted column is "quality" which is a scalar from [3, 9]
train_x = train.drop(["quality"], axis=1)
test_x = test.drop(["quality"], axis=1)
train_y = train[["quality"]]
test_y = test[["quality"]]
alpha = 0.5
l1_ratio = 0.5
return train_x, train_y, test_x, test_y, alpha, l1_ratio
def main(log_model_option: dict = {"flavor": "mlflow"}, run_inside_mlflow_context: bool = True):
""" Train and log model
:param log_model_option: Log model using options:
if {"flavor": "mlflow"}: log model using mlflow log_model method
else if {"flavor": "_log_model"} log model using shipped brain _log_model function
else ig {"flavor": "upload_run" | "upload_model", args...} log model using named function with args
NB: input_example and signature are not required
:param run_inside_mlflow_context: if True run log method from mlflow run context,
otherwise use shippedbrain.log_flavor outside without mlflow run context
"""
warnings.filterwarnings("ignore")
np.random.seed(46)
print("MLflow Tracking URI:", mlflow.get_tracking_uri())
train_x, train_y, test_x, test_y, alpha, l1_ratio = build_train()
lr, predicted_qualities, signature, rmse, mae, r2 = train_and_eval(alpha,
l1_ratio,
train_x,
train_y,
test_x,
test_y)
log_model_option["signature"] = signature
log_model_option["input_example"] = test_x.iloc[0:2]
print(f"[INFO] RUN INSIDE MLFLOW RUN CONTEXT={run_inside_mlflow_context}")
if run_inside_mlflow_context:
with mlflow.start_run() as run:
print("[INFO] Starting run with id:", run.info.run_id)
print("[INFO]Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
print("[INFO]\tRMSE: %s" % rmse)
print("[INFO]\tMAE: %s" % mae)
print("[INFO]\tR2: %s" % r2)
mlflow.log_param("alpha", alpha)
mlflow.log_param("l1_ratio", l1_ratio)
mlflow.log_metric("rmse", rmse)
mlflow.log_metric("r2", r2)
mlflow.log_metric("mae", mae)
# Model registry does not work with file store
print("[DEBUG] Log model option flavor:", log_model_option["flavor"])
if log_model_option["flavor"] == "_log_flavor":
_ = shippedbrain._log_flavor("sklearn", sk_model = lr, signature = signature, input_example = log_model_option["input_example"], artifact_path="model")
# INTEGRATION
elif log_model_option["flavor"] == "upload_model" or log_model_option["flavor"] == "upload_run":
flavor = log_model_option["flavor"]
log_model_option.pop("flavor")
log_func = eval(f"shippedbrain.{flavor}")
_ = log_func(**log_model_option)
elif log_model_option["flavor"] == "mlflow":
mlflow.sklearn.log_model(lr, "model", signature=signature, input_example=log_model_option["input_example"])
print(f"[INFO] Model URI runs:/{run.info.run_id}/model\n")
return run
else:
if log_model_option["flavor"] == "_log_flavor":
run = shippedbrain._log_flavor("sklearn",
sk_model=lr,
signature=signature,
input_example=log_model_option["input_example"],
artifact_path="model")
# INTEGRATION
elif log_model_option["flavor"] == "upload_model" or log_model_option["flavor"] == "upload_run":
flavor = log_model_option["flavor"]
log_model_option.pop("flavor")
log_func = eval(f"shippedbrain.{flavor}")
run = log_func(**log_model_option)
print(f"[INFO] Model URI runs:/{run.info.run_id}/model\n")
return run
| shippedbrain/shipped-brain-api | tests/resources/train.py | train.py | py | 6,467 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.WARN",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
... |
4016584342 | import os
import numpy as np
import rasterio as rio
import rasterio.features
import geopandas as gpd
import rasterstats
def find_underlying_vector_value(starting_objects, starting_objects_identifying_column, objects_to_select,
objects_to_select_attribute_column):
"""
Find the value of a specified attribute of a vector layer underlying another vector layer.
- Spatial join between input GeoDataFrames
- Create DataFrame with only desired results columns
- Aggregate underlying vector attribute values based on overlying vector identifying values
- Merge aggregated results column back onto original input starting GeoDataFrame
Parameters
----------
starting_objects : GeoDataFrame
Input object(s)
starting_objects_identifying_column : column label
Column name that uniquely identifies starting objects
objects_to_select : GeoDataFrame
Selecting objects
objects_to_select_attribute_column : column label
Desired attribute to find
Returns
-------
GeoDataFrame
Updated GeoDataFrame
"""
# create a new GeoDataFrame using gpd.sjoin()
joined = gpd.sjoin(starting_objects, objects_to_select, how='inner', lsuffix='left', rsuffix='right')
# the joined GeoDataFrame contains all columns from both input GeoDataFrames so create a new DataFrame with only
# the columns required for the output (the two identifying columns)
results = joined[[starting_objects_identifying_column, objects_to_select_attribute_column]].copy()
# aggregate the results DataFrame based on the starting objects identifying column value combining the vector
# attribute column values with a semicolon separating them
aggregated = results.groupby(starting_objects_identifying_column).agg(
{objects_to_select_attribute_column: '; ' .join}).reset_index()
# merge the aggregated results columns back onto the starting objects GeoDataFrame based on the shared starting
# object identifying column
updated = starting_objects.merge(aggregated, on=[starting_objects_identifying_column])
return updated # an updated version of the input GeoDataFrame
def calculate_percentage_underlying_raster_categories_for_polygons(starting_polygons,
starting_polygons_identifying_column,
raster, raster_category_map, desired_column_names,
affine, nodata=0):
"""
Calculate percentage cover values based on underlying raster values for each polygon in a GeoDataFrame.
- Calculate zonal statistics using rasterstats function giving pixel counts for each raster value
- Create dictionary of results assigning zonal statistics to each polygon in polygon GeoDataFrame
- Create dictionary of raster values and desired column names
- Assign zonal stats results to new columns in GeoDataFrame using try...except block giving a 0 value for no cover
- Convert pixel count values to percentages
Parameters
----------
starting_polygons : GeoDataFrame
Input polygon(s)
starting_polygons_identifying_column : column label
Column name that uniquely identifies starting polygons
raster : ndarray
Input raster - must be categorical
raster_category_map : dict
Input raster category map dictionary
desired_column_names : list of str
Desired output column names
affine : Affine instance
The input raster geotransform
nodata : int, default 0
The nodata value for the input raster
Returns
-------
GeoDataFrame
Updated GeoDataFrame
"""
# calculate zonal statistics using rasterstats.zonal_stats() function
zonal_stats = rasterstats.zonal_stats(starting_polygons, # the shapefile to use
raster, # the raster to use
affine=affine, # the geotransform for the raster
categorical=True, # this function only runs on categorised data
category_map=raster_category_map, # the raster category map dictionary
nodata=nodata # the nodata value for the raster
)
# create a dictionary with the zonal results added to each polygon in the input GeoDataFrame
polygons_dict = dict()
for ind, row in starting_polygons.iterrows():
polygons_dict[row[starting_polygons_identifying_column]] = zonal_stats[ind]
# use dict and zip with the category names to create a dictionary of category names and the names for the results
# columns
column_dict = dict(zip(raster_category_map.values(), desired_column_names))
# add rasterstats results to columns in GeoDataFrame
for ind, row in starting_polygons.iterrows(): # use iterrows to iterate over each row in the GeoDataFrame
results_data = polygons_dict[row[starting_polygons_identifying_column]] # get the category data for row/polygon
for category in raster_category_map.values(): # iterate over each of the category class names
# try...except block giving a 0 value for no cover and assigning value if there is cover
try:
# add the category count to a new column
starting_polygons.loc[ind, column_dict[category]] = results_data[category]
except KeyError:
# if category name is not present, value should be 0
starting_polygons.loc[ind, column_dict[category]] = 0
# convert the counts into percentages
for ind, row in starting_polygons.iterrows(): # iterate over each row in the GeoDataFrame
# multiply the pixel count values by 100 and divide by the sum of all cover column values
starting_polygons.loc[ind, desired_column_names] = 100 * row[desired_column_names] / row[
desired_column_names].sum()
return starting_polygons # an updated version of the input GeoDataFrame
def calculate_stat_values_underlying_raster_for_polygons(starting_polygons,
starting_polygons_identifying_column_integer,
desired_column_names, raster, affine, fill_value=0,
starting_polygons_geometry_column='geometry'):
"""
Calculate statistics values of underlying raster for each polygon in a GeoDataFrame.
- Create list of geometry, value pairs for polygons in GeoDataFrame
- Rasterize vector polygons using rasterio in order to create masks for each polygon extent
- Calculate statistics of underlying raster using masks by iterating over each polygon in GeoDataFrame
- Add values to input polygon GeoDataFrame by creating new columns with desired column names
Parameters
----------
starting_polygons : GeoDataFrame
Input polygon(s)
starting_polygons_identifying_column_integer : column label
Column name that uniquely identifies starting polygons - values in column must be of type integer and must not
contain values equal to the fill value used with the vector geometries below
desired_column_names : dict
Dictionary of desired output column names - dictionary must have keys as below
{'mean': 'desired column name',
'min': 'desired column name',
'max': 'desired column name',
'range': 'desired column name',
'median': 'desired column name',
'std': 'desired column name'}
raster : ndarray
Input raster
affine : Affine instance
The input raster geotransform
fill_value : int, default 0
The value to use for areas not covered by the polygon geometries
starting_polygons_geometry_column : column label, default 'geometry'
Column name of geometry column in polygon GeoDataFrame
Returns
-------
GeoDataFrame
Updated GeoDataFrame
"""
# get a list of geometry, value pairs
shapes = list(zip(starting_polygons[starting_polygons_geometry_column],
starting_polygons[starting_polygons_identifying_column_integer]))
# create a raster based on the vector polygons
site_mask = rio.features.rasterize(shapes=shapes, # the list of geometry/value pairs
fill=fill_value, # the value to use for cells not covered by any geometry
out_shape=raster.shape, # the shape of the new raster
transform=affine) # the geotransform of the new raster
for ind, row in starting_polygons.iterrows(): # iterate over each row in the GeoDataFrame
# calculate mean value using mask and assign to column using desired column name from column name dictionary
starting_polygons.loc[ind, desired_column_names['mean']] = np.nanmean(
raster[site_mask == row[starting_polygons_identifying_column_integer]])
for ind, row in starting_polygons.iterrows(): # iterate over each row in the GeoDataFrame
# calculate min value using mask and assign to column using desired column name from column name dictionary
starting_polygons.loc[ind, desired_column_names['min']] = np.nanmin(
raster[site_mask == row[starting_polygons_identifying_column_integer]])
for ind, row in starting_polygons.iterrows(): # iterate over each row in the GeoDataFrame
# calculate max value using mask and assign to column using desired column name from column name dictionary
starting_polygons.loc[ind, desired_column_names['max']] = np.nanmax(
raster[site_mask == row[starting_polygons_identifying_column_integer]])
for ind, row in starting_polygons.iterrows(): # iterate over each row in the GeoDataFrame
# calculate range value by finding max and min values and subtracting them using mask and assign to column using
# desired column name from column name dictionary
starting_polygons.loc[ind, desired_column_names['range']] = (np.nanmax(
raster[site_mask == row[starting_polygons_identifying_column_integer]]) - np.nanmin(
raster[site_mask == row[starting_polygons_identifying_column_integer]]))
for ind, row in starting_polygons.iterrows(): # iterate over each row in the GeoDataFrame
# calculate median value using mask and assign to column using desired column name from column name dictionary
starting_polygons.loc[ind, desired_column_names['median']] = np.nanmedian(
raster[site_mask == row[starting_polygons_identifying_column_integer]])
for ind, row in starting_polygons.iterrows(): # iterate over each row in the GeoDataFrame
# calculate standard deviation value using mask and assign to column using desired column name from column name
# dictionary
starting_polygons.loc[ind, desired_column_names['std']] = np.nanstd(
raster[site_mask == row[starting_polygons_identifying_column_integer]])
return starting_polygons # an updated version of the input GeoDataFrame
# load the input shapefile datasets from the data_files folder using gpd.read_file(os.path.abspath())
sites = gpd.read_file(os.path.abspath('data_files/Site_Locations.shp'))
counties = gpd.read_file(os.path.abspath('data_files/Counties.shp'))
LGDs = gpd.read_file(os.path.abspath('data_files/Local_Government_Districts.shp'))
# transform data files to Northern Ireland (NI) Universal Transverse Mercator zone (UTM(29) which has an epsg of 32629)
# which will give measurements in metres using gdf.to_crs() - see here for a list of EPSG codes: https://epsg.io/
# transform all input data_files to ensure all data is on the same reference system using inplace=true as we want to
# transform the datasets here and not create new ones
sites.to_crs(epsg=32629, inplace=True)
counties.to_crs(epsg=32629, inplace=True)
LGDs.to_crs(epsg=32629, inplace=True)
# find the county each site is situated within
# we want to improve the display of the county names in the output by ensuring the names are no longer in all capitals
# and by adding the characters 'County ' to the start of each county name string
for ind, row in counties.iterrows(): # iterate over each row in the GeoDataFrame
# assign the row's CountyName to a new column called County after converting the values out of uppercase and adding
# the 'County ' characters to the start of the string
counties.loc[ind, 'County'] = 'County '+row['CountyName'].title()
# find the underlying county using find_underlying_vector_value() function previously defined and assigning its name to
# a new column called County
sites = find_underlying_vector_value(sites, 'Name', counties, 'County')
# find the Local Government District (LGD) each site is situated within
# we want to improve the display of the LGD name column label in the output by renaming the LGDNAME column to LGD
LGDs = LGDs.rename(columns={'LGDNAME': 'LGD'})
# find the underlying LGD using find_underlying_vector_value() function previously defined and assigning its name to
# a new column called LGD
sites = find_underlying_vector_value(sites, 'Name', LGDs, 'LGD')
# calculate the areas of each site
for ind, row in sites.iterrows(): # iterate over each row in the GeoDataFrame
# calculate the area of the polygon and assign it to a new column called Area(km2)
sites.loc[ind, 'Area(km2)'] = row['geometry'].area
# divide the area by 1000000 to convert from metres squared to kilometres squared and round to 2 decimal places
sites['Area(km2)'] = (sites['Area(km2)']/1000000).round(2)
# calculate the perimeter of each site
for ind, row in sites.iterrows(): # iterate over each row in the GeoDataFrame
# calculate the perimeter of the polygon and assign it to a new column called Perimeter(km)
sites.loc[ind, 'Perimeter(km)'] = row['geometry'].length
# divide the length by 1000 to convert from metres to kilometres and round to 2 decimal places
sites['Perimeter(km)'] = (sites['Perimeter(km)']/1000).round(2)
# calculate the percentage landcover for each site using the
# calculate_percentage_underlying_raster_categories_for_polygons() function previously defined
# open the landcover raster and read the data - we will use with rio.open() here to read the data and ensure the file
# is then closed
with rio.open('data_files/Landcover.tif') as dataset:
lc_crs = dataset.crs # the raster crs
landcover = dataset.read(1) # the band the data values are stored in that we want to read (band 1)
lc_affine_tfm = dataset.transform # the raster geotransform
# we need to ensure the vector layer is in the same crs as the raster before performing the next step
sites.to_crs(lc_crs, inplace=True)
# to find the percentage landcover for each site we first need to define a landcover category map dictionary which
# maps the raster values to the landcover categories
landcover_names = {1: 'Broadleaf woodland',
2: 'Coniferous woodland',
3: 'Arable',
4: 'Improved grassland',
5: 'Semi-natural grassland',
6: 'Mountain, heath, bog',
7: 'Saltwater',
8: 'Freshwater',
9: 'Coastal',
10: 'Built-up areas and gardens'}
# we also need to define a desired column names list (in the same order as the above raster category map)
lc_short_names = ['%Broadleaf',
'%Coniferous',
'%Arable',
'%Imp_grass',
'%Nat_grass',
'%Mountain',
'%Saltwater',
'%Freshwater',
'%Coastal',
'%Built_up']
# calculate the percentage landcover using the calculate_percentage_underlying_raster_categories_for_polygons()
# function previously defined
calculate_percentage_underlying_raster_categories_for_polygons(sites, 'Name', landcover, landcover_names,
lc_short_names, lc_affine_tfm)
# calculate the percentage underlying superficial geology for each site using the
# calculate_percentage_underlying_raster_categories_for_polygons() function previously defined
# open the geology raster and read the data - we will use with rio.open() here to read the data and ensure the file
# is then closed
with rio.open('data_files/Superficial_Geology.tif') as dataset:
geol_crs = dataset.crs # the raster crs
geology = dataset.read(1) # the band the data values are stored in that we want to read (band 1)
geol_affine_tfm = dataset.transform # the raster geotransform
# we need to ensure the vector layer is in the same crs as the raster before performing the next step
sites.to_crs(geol_crs, inplace=True)
# to find the percentage underlying geology for each site we first need to define a geology category map dictionary
# which maps the raster values to the geology categories
geology_names = {1: 'Alluvium - Sand and Silt',
2: 'Glaciolacustrine Deposits - Silt and Clay',
3: 'Blown Sand',
4: 'Diatomite',
5: 'Glaciofluvial Sheet Deposits - Sand, Silt and Clay',
6: 'Glacial Sand and Gravel',
7: 'Lacustrine Alluvium - Clay, Silt and Sand',
8: 'Peat',
9: 'Raised Beach Deposits - Gravel, Sand and Silt',
10: 'Raised Marine Deposits - Clay, Silt and Sand',
11: 'Landslide Deposits - Unknown/Unclassified',
12: 'Till - Diamicton'}
# we also need to define a desired column names list (in the same order as the above raster category map)
geol_short_names = ['%Alluv_sand_silt',
'%Glaciolac_silt_clay',
'%Blown_sand',
'%Diatomite',
'%Glaciofulv_sand_silt_clay',
'%Glacial_sand_gravel',
'%Lac_alluv_clay_silt_sand',
'%Peat',
'%Raised_beach_gravel_sand_silt',
'%Raised_marine_clay_silt_sand',
'%Landslide_unknown',
'%Till_diamicton']
# calculate the percentage underlying geology using the calculate_percentage_underlying_raster_categories_for_polygons()
# function previously defined
calculate_percentage_underlying_raster_categories_for_polygons(sites, 'Name', geology, geology_names, geol_short_names,
geol_affine_tfm)
# before running the calculate_stat_values_underlying_raster_for_polygons() function previously defined we need to
# ensure we have an integer column that uniquely identifies each polygon in the GeoDataFrame that does not contain a
# value that is the same as the value we will use as our fill value (0)
# create an integer identifier column for the raster analysis called ID_RA which starts with 1 and increments by 1 for
# each row until the end of the sites GeoDataFrame
sites['ID_RA'] = range(1, 1+len(sites))
# calculate wind speeed statistics for each site
# open the wind speed raster and read the data - we will use with rio.open() here to read the data and ensure the file
# is then closed
with rio.open('data_files/Wind_Speed.tif') as dataset:
ws_crs = dataset.crs # the raster crs
wind_speed = dataset.read(1) # the band the data values are stored in that we want to read (band 1)
ws_affine_tfm = dataset.transform # the raster geotransform
# we need to ensure the vector layer is in the same crs as the raster before performing the next step
sites.to_crs(ws_crs, inplace=True)
# before running the function we need to create a dictionary of desired column names for the statistics
wind_speed_stat_columns_dict = {'mean': 'MeanWindSpeed(m/s)',
'min': 'MinWindSpeed(m/s)',
'max': 'MaxWindSpeed(m/s)',
'range': 'WindSpeedRange(m/s)',
'median': 'MedianWindSpeed(m/s)',
'std': 'WindSpeedStdDev'}
# calculate statistics for the wind speed raster for each site using
# calculate_stat_values_underlying_raster_for_polygons() function previously defined
calculate_stat_values_underlying_raster_for_polygons(sites, 'ID_RA', wind_speed_stat_columns_dict, wind_speed,
ws_affine_tfm, 0, 'geometry')
# calculate wind power density statistics for each site
# open the wind power density raster and read the data - we will use with rio.open() here to read the data and ensure
# the file is then closed
with rio.open('data_files/Wind_Power_Density.tif') as dataset:
wpd_crs = dataset.crs # the raster crs
wind_power_density = dataset.read(1) # the band the data values are stored in that we want to read (band 1)
wpd_affine_tfm = dataset.transform # the raster geotransform
# we need to ensure the vector layer is in the same crs as the raster before performing the next step
sites.to_crs(wpd_crs, inplace=True)
# before running the function we need to create a dictionary of desired column names for the statistics
wind_power_density_stat_columns_dict = {'mean': 'MeanWindPowerDensity(W/m2)',
'min': 'MinWindPowerDensity(W/m2)',
'max': 'MaxWindPowerDensity(W/m2)',
'range': 'WindPowerDensityRange(W/m2)',
'median': 'MedianWindPowerDensity(W/m2)',
'std': 'WindPowerDensityStdDev'}
# calculate statistics for the wind power density raster for each site using
# calculate_stat_values_underlying_raster_for_polygons() function previously defined
calculate_stat_values_underlying_raster_for_polygons(sites, 'ID_RA', wind_power_density_stat_columns_dict,
wind_power_density, wpd_affine_tfm, 0, 'geometry')
# calculate elevation statistics for each site
# open the elevation raster and read the data - we will use with rio.open() here to read the data and ensure the file
# is then closed
with rio.open('data_files/DEM.tif') as dataset:
elev_crs = dataset.crs # the raster crs
elevation = dataset.read(1) # the band the data values are stored in that we want to read (band 1)
elev_affine_tfm = dataset.transform # the raster geotransform
# we need to ensure the vector layer is in the same crs as the raster before performing the next step
sites.to_crs(elev_crs, inplace=True)
# before running the function we need to create a dictionary of desired column names for the statistics
elevation_stat_columns_dict = {'mean': 'MeanElevation(m)',
'min': 'MinElevation(m)',
'max': 'MaxElevation(m)',
'range': 'ElevationRange(m)',
'median': 'MedianElevation(m)',
'std': 'ElevationStdDev'}
# calculate statistics for the elevation raster for each site using
# calculate_stat_values_underlying_raster_for_polygons() function previously defined
calculate_stat_values_underlying_raster_for_polygons(sites, 'ID_RA', elevation_stat_columns_dict, elevation,
elev_affine_tfm, 0, 'geometry')
# for clarity and so that we can join the site characteristics data with the proximity data we will rename the Name
# column for the sites to Site Name
sites = sites.rename(columns={'Name': 'Site Name'})
# now to save the results - firstly, create a DataFrame to export to CSV by copying only desired columns from
# GeoDataFrame that contains results by dropping unnecessary columns (the geometry column as this is not needed in the
# csv results and the ID_RA column as this was only created for functions in the script and is not needed in the CSV
# results)
site_results = sites.drop(columns=['geometry', 'ID_RA']).copy()
# now that the results are in a DataFrame as opposed to a GeoDataFrame we can round all results to 2 decimal places,
# this step could not be undertaken on the GeoDataFame as geometry cannot be rounded
site_results = site_results.round(2)
# save the results DataFrame as a CSV called Proximity_analysis.csv to the output_files folder with the index removed
site_results.to_csv('output_files/Site_characteristics.csv', index=False) | DaisyMeadow/EGM722_Assessment | Site_characteristics.py | Site_characteristics.py | py | 25,072 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "geopandas.sjoin",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "rasterstats.zonal_stats",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "rasterio.features.rasterize",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "... |
23150403791 | import sys
import pathlib
import json
def get_settings_dir(name) -> pathlib.Path:
"""
Returns a parent directory path
where persistent application data can be stored.
# linux: ~/.local/share
# macOS: ~/Library/Application Support
# windows: C:/Users/<USER>/AppData/Roaming
"""
home = pathlib.Path.home()
if sys.platform == "win32":
config_path = home / "AppData/Roaming"
elif sys.platform == "linux":
config_path = home / ".local/share"
elif sys.platform == "darwin":
config_path = home / "Library/Application Support"
else:
raise OSError("OS not supported")
config_path = config_path / name / "config.json"
return config_path
def create_dir(settings_file):
try:
settings_file.parent.mkdir(parents=True)
except FileExistsError:
pass
class Settings(object):
def __init__(self, name):
self._settings = {
"jira_domain": "arcanys.atlassian.net",
"jira_username": None,
"jira_token": None,
"tempo_token": None,
}
self._file = get_settings_dir(name)
def load(self):
create_dir(self._file)
if pathlib.Path.is_file(self._file):
with open(self._file, "r") as fp:
data = json.load(fp)
for k, v in data.items():
if k in self._settings:
self._settings[k] = v
def save(self):
create_dir(self._file)
with open(self._file, "w") as fp:
json.dump(self._settings, fp)
def get(self, key):
return self._settings.get(key)
def set(self, key, value):
self._settings[key] = value
| djaney/arcassistant | arcassistant/shared/settings.py | settings.py | py | 1,710 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pathlib.Path.home",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.platform",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.platform",
... |
70960375393 | '''
Universidad del Valle de Guatemala
Redes - 2021
DVR.py
Roberto Figueroa 18306
Luis Quezada 18028
Esteban del Valle 18221
'''
from slixmpp.basexmpp import BaseXMPP
from node import Node
from asyncio import sleep
from aioconsole import aprint
from time import time
from xml.etree import ElementTree as ET
import json
import asyncio
import numpy as np
from scipy.sparse.csgraph import shortest_path
"""
---------
| A |
| Sec. |
| Age |
---------
| B | 0.3 |
| E | 0.5 |
---------
"""
EXPIRATION = 5
class DVR(Node):
def __init__(self, jid, password, entity, asoc_nodes = None, t_keys = None):
super().__init__(jid, password)
self.DVR_seqnum = 0
self.DVR = {}
self.entity = entity
self.basexmpp = BaseXMPP()
self.neighbors = asoc_nodes #should be a dict
self.neighbors_niknames = self.neighbors.keys() if self.neighbors != None else []
self.topo = []
self.all_nodes = [self.entity]
self.ady_matrix = []
self.prev_matrix = []
self.build_topo_package()
# ---------
self.topo_vector = t_keys.sort()
def send_hello(self, hto, hfrom):
"""
Function for neighbor discovery
"""
self.send_message(hto,
"<hello>",
mfrom=hfrom)
print("Sending hello to neighbor ...")
def eco(self, eco_to, eco_from):
"""
Function for measure cost between neighbors
"""
# print("Sending eco to {}".format(eco_to))
self.send_message(
mto=eco_to,
mbody="<eco time='%f' ></eco>" % time(),
mfrom=eco_from
)
def build_topo_package(self):
"""
Function for package build about the network
destination | dist | next hop
"""
for i in self.topo_keys:
if i == self.entity:
self.topo.append((i , 0, None))
self.topo.append((i , float('inf'), None))
def update_topo_package(self, node, weight):
"""
Function for package weights update+
"""
for i in self.topo:
if i[0] == node:
i[1] = weight
def send_topo_package(self, to):
"""
Send the topo package to neighbors
"""
dvr_json = json.dumps(self.topo)
self.send_message(to,
"<pack dvr='%s' from='%s'></pack>" % (dvr_json, self.entity),
mfrom=self.boundjid,
)
def recieve_topo_package(self, nfrom, topo_package):
"""
Recieve topo package from a neighbor,
this function must process the timestamp and
the sequence number in order to drop or send
package
"""
return "This should be a message stanza"
def shortest_path(self):
"""
Must be a Bellman-Ford algorithm implementation
"""
path = []
return path.reverse()
async def update_tables(self):
while True:
for router in self.neighbors_niknames:
self.eco(self.neighbors[router], self.boundjid)
await asyncio.sleep(5)
# print("Sending packages to neighbors ... ")
for router in self.neighbors_niknames:
self.send_topo_package(self.neighbors[router])
def get_nickname(self, jid):
key_list = list(self.neighbors.keys())
val_list = list(self.neighbors.values())
return key_list[val_list.index(jid)]
def init_listener(self):
self.loop.create_task(self.update_tables())
def flood(self, to, package):
self.send_message(to,
"<pack dvr='%s'></pack>" % package,
mfrom=self.boundjid,
)
def save_prev_matrix(self):
self.prev_matrix = self.ady_matrix
def update_ady_matrix(self):
self.save_prev_matrix()
length = len(self.all_nodes)
self.ady_matrix = np.zeros((length, length),dtype=np.float16)
for row_node in self.all_nodes:
for col_node in self.topo[row_node]['weights'].keys():
row = self.all_nodes.index(row_node)
if col_node in self.all_nodes:
col = self.all_nodes.index(col_node)
else:
return
self.ady_matrix[row][col] = self.topo[row_node]['weights'][col_node]
# compare tables, update if diff with Bellman Ford
optimized_matrix = shortest_path(self.ady_matrix,directed=True,method='BF',return_predecessors=False)
if np.allclose(self.ady_matrix,optimized_matrix) == False:
self.ady_matrix = optimized_matrix
def bellmanFord(self, destiny):
D, Pr = shortest_path(self.ady_matrix,directed=True,method='BF',return_predecessors=True)
_from = self.all_nodes.index(self.entity)
path = [destiny]
k = destiny
def parse_path(self, path):
return [self.all_nodes[i] for i in path]
def get_shortest_path(self, destiny): #should be a character
_from = self.all_nodes.index(self.entity)
destiny = self.all_nodes.index(destiny)
path = [destiny]
k = destiny
while self.ady_matrix[_from, k] != -9999:
path.append(self.ady_matrix[_from, k])
k = self.ady_matrix[_from, k]
return self.parse_path(path[::-1])
def send_msg(self, to, msg): # to should be a character
path = self.get_shortest_path(to)
print("%s: my best path: %s" %(self.entity,path))
if len(path) > 1:
self.send_message(
mto=self.neighbors[path[1]],
mbody="<msg chat='%s' to='%s' ></msg>" %(msg, to),
mfrom=self.boundjid
)
async def message(self, msg):
if msg['type'] in ('normal', 'chat'):
if msg['body'][:7] in ("<hello>"):
msg.reply(self.boundjid).send()
print("Recieved hello from neighbor, sending answer ...")
elif msg['body'][1:4] == "eco":
xml_parse = ET.fromstring(msg['body'])
timestamp = xml_parse.attrib['time']
msg.reply("<a_eco time='%s'></a_eco>" % timestamp).send()
elif msg['body'][1:6] == "a_eco":
pack_from = msg['from'].bare
node_entity = self.get_nickname(pack_from)
end_time = time()
msg_parse = ET.fromstring(msg['body'])
start_time = float(msg_parse.attrib['time'])
delta_time = (end_time - start_time) / 2
delta_time = round(delta_time, 1)
self.update_topo_package(node_entity, delta_time)
elif msg['body'][1:5] == "pack":
# p_from = msg['from'].bare
# n_entity = self.get_nickname(p_from)
parse = ET.fromstring(msg['body'])
pack_json = parse.attrib['dvr']
dvr = json.loads(pack_json)
n_entity = parse.attrib['from']
elif msg['body'][1:4] == "msg":
msg_parse = ET.fromstring(msg['body'])
bare_msg = msg_parse.attrib['chat']
msg_to = msg_parse.attrib['to']
if msg_to != self.entity:
self.send_msg(msg_to, bare_msg)
else:
print("Incoming message: %s" % bare_msg)
else:
pass
| Crismaria11/lab3-redes | DVR.py | DVR.py | py | 7,570 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "node.Node",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "slixmpp.basexmpp.BaseXMPP",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_... |
3267556703 | from __future__ import absolute_import, division, print_function
import os
import sys
import glob
import argparse
import numpy as np
import PIL.Image as pil
import PIL.ImageOps
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from skimage.segmentation import mark_boundaries
from scipy.optimize import linear_sum_assignment
import torch
import torch.nn.functional as F
from torchvision import transforms, datasets
import networks
from layers import disp_to_depth
from utils import download_model_if_doesnt_exist
#from fullcrf import fullcrf
#labels = ['unlabeled','ego vehicle','rectification border','out of roi','static','dynamic','ground','road','sidewalk','parking','rail track',
# 'building','wall','fence','guard rail','bridge','tunnel','pole','polegroup','traffic light','traffic sign','vegetation','terrain',
# 'sky','person','rider','car','truck','bus','caravan','trailer','train','motorcycle','bicycle','license plate']
labels = ['road','sidewalk','building','wall','fence','pole','traffic light','traffic sign','vegetation','terrain',
'sky','person','rider','car','truck','bus','train','motorcycle','bicycle']
eval_cls = [7,8,11,12,13,17,19,20,21,22,23,24,25,26,27,28,31,32,33]
void_cls = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
class_map = dict(zip(eval_cls, range(len(eval_cls))))
seg_class_map = dict(zip(range(len(eval_cls)), range(len(eval_cls))))
color_seg = [[128, 64, 128],[244, 35, 232],[70, 70, 70],[102, 102, 156],[190, 153, 153],
[153, 153, 153],[250, 170, 30],[220, 220, 0],[107, 142, 35],[152, 251, 152],
[0, 130, 180],[220, 20, 60],[255, 0, 0],[0, 0, 142],[0, 0, 70],[0, 60, 100],
[0, 80, 100],[0, 0, 230],[119, 11, 32]]
color_seg = np.array(color_seg).astype(np.uint8)
def load_data(config):
inputs = []
gts = []
output_dirs = {"input":[],"seg":[],"depth":[]}
input_paths = sorted(glob.glob(os.path.join(config.input_path, '*.png')))[1::2]
if config.gt_path != None:
gt_paths = sorted(glob.glob(os.path.join(config.gt_path, '*.png')))[1::2]
else:
gt_paths = []
model_name = config.model_name
output_dir_base = os.path.join(config.output_dir, model_name)
if not os.path.exists(output_dir_base):
os.makedirs(output_dir_base)
print("-> Predicting on {:d} eval images".format(len(input_paths)))
print("-> {:d} gt images".format(len(gt_paths)))
for image_path in input_paths:
image = pil.open(image_path).convert('RGB')
inputs.append(image)
output_name = os.path.splitext(os.path.basename(image_path))[0]
output_dir_input = os.path.join(output_dir_base, "{}_input.png".format(output_name))
output_dir_seg = os.path.join(output_dir_base, "{}_seg.png".format(output_name))
output_dir_depth = os.path.join(output_dir_base, "{}_zepth.png".format(output_name))
output_dirs["input"].append(output_dir_input)
output_dirs["seg"].append(output_dir_seg)
output_dirs["depth"].append(output_dir_depth)
for image_path in gt_paths:
image = pil.open(image_path)
original_width, original_height = image.size
gt = np.array(image,dtype=np.uint8)
for c in void_cls:
gt[gt==c] = 255
for c in eval_cls:
gt[gt==c] = class_map[c]
gts.append(gt)
return inputs, gts, output_dirs
def predict_seg(models, inputs, output_dirs, config):
"""Function to predict for a single image or folder of images
"""
for key, model in models.items():
model.eval()
model.to('cuda')
segs = []
with torch.no_grad():
for i, image in enumerate(inputs):
original_width, original_height = image.size
image.save(output_dirs["input"][i])
# Load image and preprocess
input_image = image.resize((config.width, config.height), pil.LANCZOS)
input_image = transforms.ToTensor()(input_image).unsqueeze(0)
# PREDICTION
input_image = input_image.to('cuda')
if config.architecture == "share":
seg_features = models["encoder"](input_image)
seg = models["seg"](seg_features)[("output", 0)]
depth = models["depth"](seg_features)[("output", 0)]
if config.architecture == "byol":
features = models["encoder"](input_image)
representation = models["decoder"](features)
representation = F.normalize(representation, dim=1, p=2)
seg = F.softmax(models["head"](representation.detach(), features)[("output", 0)],dim=1)
depth = None
seg = torch.nn.functional.interpolate(
seg, (original_height, original_width), mode="bilinear", align_corners=False)
seg = seg.squeeze()
seg = torch.argmax(seg, dim=0).cpu().numpy()
segs.append(seg)
if depth != None:
disp_resized = torch.nn.functional.interpolate(depth, (original_height, original_width), mode="bilinear", align_corners=False)
disp_resized_np = disp_resized.squeeze().cpu().numpy()
vmax = np.percentile(disp_resized_np, 95)
normalizer = mpl.colors.Normalize(vmin=disp_resized_np.min(), vmax=vmax)
mapper = cm.ScalarMappable(norm=normalizer, cmap='magma')
colormapped_im = (mapper.to_rgba(disp_resized_np)[:, :, :3] * 255).astype(np.uint8)
im = pil.fromarray(colormapped_im)
im.save(output_dirs["depth"][i])
return segs
def compute_metrics(segs, gts, output_dirs, h_match = False):
num_cls = len(eval_cls)
intersection = np.zeros((num_cls, num_cls))
for seg, gt in zip(segs, gts):
mask = (gt >= 0) & (gt < num_cls)
hist = np.bincount(
num_cls * seg[mask] + gt[mask], minlength=num_cls ** 2
).reshape(num_cls, num_cls)
intersection += hist
i = j = range(num_cls)
if h_match:
union = -intersection + intersection.sum(axis=1,keepdims=True) + intersection.sum(axis=0,keepdims=True)
cost = -intersection/union
cost[np.isnan(cost)] = -1
i, j = linear_sum_assignment(cost)
hist = intersection
print(j)
acc = hist[i,j].sum() / hist.sum()
acc_cls = hist[i,j] / hist.sum(axis=0)[j]
acc_cls = np.nanmean(acc_cls)
iu = hist[i,j] / (hist.sum(axis=1) + hist.sum(axis=0)[j] - hist[i,j])
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=0)[j] / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
gt_labels = [labels[index] for index in j]
cls_iu = dict(zip(gt_labels, iu))
metrics = {
"Overall Acc": acc,
"Mean Acc": acc_cls,
"FreqW Acc": fwavacc,
"Mean IoU": mean_iu
}
metrics.update(cls_iu)
for s, seg in enumerate(segs):
seg_img = color_seg[j][seg]
seg_img = pil.fromarray(seg_img)
seg_img.save(output_dirs["seg"][s])
return metrics
def build_models(config):
models = {}
if config.architecture == "share":
models["encoder"] = networks.ResnetEncoder(
num_layers = config.ResX,
pretrained = True)
models["seg"] = networks.Decoder(
out_channels = config.cls_num,
scales = config.seg_scales,
activate = 'softmax',
in_channels = models["encoder"].num_ch_enc)
models["depth"] = networks.Decoder(
out_channels = 1,
scales = config.seg_scales,
activate = 'sigmoid',
in_channels = models["encoder"].num_ch_enc)
models_to_load = ["encoder","seg","depth"]
if config.architecture == "byol":
models["encoder"] = networks.ResnetEncoder(
num_layers = config.ResX,
pretrained = False)
models["decoder"] = networks.Decoder_BYOL(
in_channels = models["encoder"].num_ch_enc)
models["head"] = networks.FullHead(
out_channels=19,
activate = 'softmax',
in_channels=models["encoder"].num_ch_enc)
models_to_load = ["encoder","decoder","head"]
config.load_weights_folder = os.path.expanduser(config.load_weights_folder)
assert os.path.isdir(config.load_weights_folder), \
"Cannot find folder {}".format(config.load_weights_folder)
print("loading model from folder {}".format(config.load_weights_folder))
for n in models_to_load:
print("Loading {} weights...".format(n))
path = os.path.join(config.load_weights_folder, "{}.pth".format(n))
model_dict = models[n].state_dict()
pretrained_dict = torch.load(path)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
models[n].load_state_dict(model_dict)
return models
def model_eval(config, models = None):
if models == None:
models = build_models(config)
inputs, gts, output_dirs = load_data(config)
segs = predict_seg(models, inputs, output_dirs, config)
metrics = compute_metrics(segs, gts, output_dirs, config.h_match)
return metrics
| LeungTsang/Depth-W-Net | seg_eval.py | seg_eval.py | py | 9,369 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numbe... |
419217840 | #!/usr/bin/env python
import os
import json
import argparse
from pprint import pprint
from pathlib import Path
import requests
from requests.auth import HTTPBasicAuth
import jwt
from dotenv import find_dotenv, load_dotenv
DOTENV_PATH = find_dotenv()
if DOTENV_PATH:
load_dotenv(DOTENV_PATH)
KEYCLOAK_HOST = os.environ.get("KEYCLOAK_HOST") or "localhost"
KEYCLOAK_PORT = os.environ.get("KEYCLOAK_PORT") or "8080"
KEYCLOAK_ISSUER = (
os.environ.get("KEYCLOAK_ISSUER") or
f"http://{KEYCLOAK_HOST}:{KEYCLOAK_PORT}/realms/fhir-dev"
)
KEYCLOAK_CLIENT_ID = (
os.environ.get("KEYCLOAK_CLIENT_ID") or "fhir-superuser-client"
)
KEYCLOAK_CLIENT_SECRET = os.environ.get("KEYCLOAK_CLIENT_SECRET") or "none"
SMILECDR_HOST = os.environ.get("SMILECDR_HOST") or "localhost"
SMILECDR_PORT = os.environ.get("SMILECDR_PORT") or "8000"
SMILECDR_FHIR_ENDPOINT = "http://{SMILECDR_HOST}:{SMILECDR_PORT}"
SMILECDR_AUDIENCE = "https://kf-api-fhir-smilecdr-dev.org"
def send_request(method, *args, **kwargs):
print("\n***** Sending request ******")
try:
requests_op = getattr(requests, method)
resp = requests_op(*args, **kwargs)
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
print("Problem sending request to endpoint")
print(resp.text)
raise e
return resp
def get_access_token(
client_id=KEYCLOAK_CLIENT_ID, client_secret=KEYCLOAK_CLIENT_SECRET,
issuer=KEYCLOAK_ISSUER, decoded=True
):
"""
Test OAuth2 stuff
"""
headers = {
"Content-Type": "application/json",
}
# Get OIDC configuration
print("\n****** Get OIDC Configuration *************")
openid_config_endpoint = (
f"{issuer}/.well-known/openid-configuration"
)
resp = send_request("get", openid_config_endpoint, headers=headers)
openid_config = resp.json()
pprint(openid_config)
# Authorize to get access token
print("\n****** Get Access Token *************")
token_endpoint = openid_config["token_endpoint"]
payload = {
"grant_type": "client_credentials",
"client_id": client_id,
"client_secret": client_secret,
"audience": SMILECDR_AUDIENCE
}
params = {
"scope": "fhir"
}
resp = send_request("post", token_endpoint, data=payload, params=params)
token_payload = resp.json()
access_token = token_payload["access_token"]
pprint(token_payload)
print("\n****** Introspect Token *************")
decoded_token = jwt.decode(
access_token, options={"verify_signature": False}
)
pprint(decoded_token)
token_payload.update({
"decoded_token": decoded_token
})
return token_payload
def cli():
"""
CLI for running this script
"""
parser = argparse.ArgumentParser(
description='Get access token for client'
)
parser.add_argument(
"--client_id",
default=KEYCLOAK_CLIENT_ID,
help="Keycloak Client ID",
)
parser.add_argument(
"--client_secret",
default=KEYCLOAK_CLIENT_SECRET,
help="Keycloak Client secret",
)
parser.add_argument(
"--issuer",
default=KEYCLOAK_ISSUER,
help="Keycloak Issuer URL",
)
args = parser.parse_args()
get_access_token(args.client_id, args.client_secret, args.issuer)
if __name__ == "__main__":
cli()
| kids-first/kf-api-fhir-service | web_app/auth.py | auth.py | py | 3,396 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "dotenv.find_dotenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
... |
42662950852 | # Video Capture, Classification and labeling
from keras.models import load_model
import cv2
import numpy as np
# Model from epoch 17 selected as it has the lowest validation loss and highest accuracy in contrast to the other epochs
#Video capture frame activated
model = load_model('model-017.model')
face_clsfr=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
source=cv2.VideoCapture(0)
# Binary label assigned to frame, No mask labeled with red and with mask labeled with green
labels_dict={0:'MASK',1:'NO MASK'}
color_dict={0:(0,255,0),1:(0,0,255)}
# Loop will capture each frame, extract region of interest (face),
# Process the ROI in the same way it was done in training (convert to grayscale and resize)
# Processed image is normalized and fed into the imported Neural model for prediction (0 :Mask | 1: No Mask)
while(True):
ret,img=source.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=face_clsfr.detectMultiScale(gray,1.3,5)
for x,y,w,h in faces:
face_img=gray[y:y+w,x:x+w]
resized=cv2.resize(face_img,(100,100))
normalized=resized/255.0
reshaped=np.reshape(normalized,(1,100,100,1))
result=model.predict(reshaped)
label=np.argmax(result,axis=1)[0]
#Draw a rectangle on the frame over the ROI with the approriate color obtained from dict ( Green= Mask , red=no mask )
cv2.rectangle(img,(x,y),(x+w,y+h),color_dict[label],2)
cv2.rectangle(img,(x,y-40),(x+w,y),color_dict[label],-1)
cv2.putText(img, labels_dict[label], (x, y-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,255),2) # add Binary label on display
cv2.imshow('LIVE',img)
key=cv2.waitKey(1)
if(key==27):
break
cv2.destroyAllWindows()
source.release()
| majedn01/Covid-19-Mask-Dectection | 3.0 detecting Masks.py | 3.0 detecting Masks.py | py | 1,808 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.models.load_model",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.cv... |
25632415761 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
import zipfile
from pathlib import Path
from datetime import date
import importlib
import json
import sphinx_rtd_theme
import git
import yaml
TMP_FOLDER = Path(__file__).parent / "tmp"
TMP_FOLDER.mkdir(exist_ok=True)
JSON_COMPATIBILITY_TABLE_FILE = TMP_FOLDER / "releases.json"
with open("additional/releases.yaml") as f:
compat_table = yaml.safe_load(f)
with open(JSON_COMPATIBILITY_TABLE_FILE, "w") as f:
json.dump(compat_table, f)
repo = git.Repo(search_parent_directories=True)
current_commit = repo.head.commit
tagged_commits = [tag.commit for tag in repo.tags]
if os.environ.get("READTHEDOCS_VERSION_TYPE") == "tag" or current_commit in tagged_commits:
# Index 0 means latest release
SUBSTRA_VERSION = compat_table["releases"][0]["components"]["substra"]["version"]
TOOLS_VERSION = compat_table["releases"][0]["components"]["substra-tools"]["version"]
SUBSTRAFL_VERSION = compat_table["releases"][0]["components"]["substrafl"]["version"]
else:
SUBSTRA_VERSION = "main"
TOOLS_VERSION = "main"
SUBSTRAFL_VERSION = "main"
print(
f"Versions of the components used:"
f"\n - substra: {SUBSTRA_VERSION}"
f"\n - substra-tools: {TOOLS_VERSION}"
f"\n - substrafl: {SUBSTRAFL_VERSION}"
)
class SubSectionTitleOrder:
"""Sort example gallery by title of subsection.
Assumes README.txt exists for all subsections and uses the subsection with
dashes, '---', as the adornment.
This class is adapted from sklearn
"""
def __init__(self, src_dir):
self.src_dir = src_dir
self.regex = re.compile(r"^([\w ]+)\n-", re.MULTILINE)
def __repr__(self):
return "<%s>" % (self.__class__.__name__,)
def __call__(self, directory):
src_path = os.path.normpath(os.path.join(self.src_dir, directory))
# Forces Release Highlights to the top
if os.path.basename(src_path) == "release_highlights":
return "0"
readme = os.path.join(src_path, "README.txt")
try:
with open(readme, "r") as f:
content = f.read()
except FileNotFoundError:
return directory
title_match = self.regex.search(content)
if title_match is not None:
return title_match.group(1)
return directory
# Nbsphinx config
nbsphinx_thumbnails = {
"examples/substra_core/diabetes_example/run_diabetes": "_static/example_thumbnail/diabetes.png",
"examples/substra_core/titanic_example/run_titanic": "_static/example_thumbnail/titanic.jpg",
"examples/substrafl/get_started/run_mnist_torch": "_static/example_thumbnail/mnist.png",
"examples/substrafl/go_further/run_diabetes_substrafl": "_static/example_thumbnail/diabetes.png",
"examples/substrafl/go_further/run_iris_sklearn": "_static/example_thumbnail/iris.jpg",
"examples/substrafl/go_further/run_mnist_cyclic": "_static/example_thumbnail/cyclic-mnist.png",
}
nbsphinx_prolog = r"""
{% set docname = 'docs/source/' + env.doc2path(env.docname, base=None) %}
.. raw:: html
<div class="notebook note">
Launch notebook online <span style="white-space: nowrap;"><a href="https://mybinder.org/v2/gh/Substra/substra-documentation/{{ env.config.release|e }}?filepath={{ docname|e }}"><img alt="Binder badge" src="https://mybinder.org/badge_logo.svg" style="vertical-align:text-bottom"></a></span>
or download it <span style="white-space: nowrap;"><a href="{{ env.docname.split('/')|last|e + '.ipynb' }}" download><img alt="Download badge" src="https://img.shields.io/badge/download_-notebook-orange?logo=jupyter" style="vertical-align:text-bottom"></a></span>
</div>
"""
nbsphinx_epilog = nbsphinx_prolog
# zip the assets directory found in the examples directory and place it in the current dir
def zip_dir(source_dir, zip_file_name):
# Create archive with compressed files
with zipfile.ZipFile(file=TMP_FOLDER / zip_file_name, mode="w", compression=zipfile.ZIP_DEFLATED) as ziph:
for root, _, files in os.walk(source_dir):
for file in files:
ziph.write(
os.path.join(root, file),
os.path.relpath(os.path.join(root, file), os.path.join(source_dir, "..")),
)
assets_dir_titanic = Path(__file__).parent / "examples" / "substra_core" / "titanic_example" / "assets"
zip_dir(assets_dir_titanic, "titanic_assets.zip")
assets_dir_diabetes = Path(__file__).parent / "examples" / "substra_core" / "diabetes_example" / "assets"
zip_dir(assets_dir_diabetes, "diabetes_assets.zip")
assets_dir_substrafl_torch_fedavg = (
Path(__file__).parent / "examples" / "substrafl" / "get_started" / "torch_fedavg_assets"
)
zip_dir(assets_dir_substrafl_torch_fedavg, "torch_fedavg_assets.zip")
assets_dir_substrafl_diabetes = (
Path(__file__).parent / "examples" / "substrafl" / "go_further" / "diabetes_substrafl_assets"
)
zip_dir(assets_dir_substrafl_diabetes, "diabetes_substrafl_assets.zip")
assets_dir_substrafl_sklearn_fedavg = (
Path(__file__).parent / "examples" / "substrafl" / "go_further" / "sklearn_fedavg_assets"
)
zip_dir(assets_dir_substrafl_sklearn_fedavg, "sklearn_fedavg_assets.zip")
assets_dir_substrafl_sklearn_fedavg = (
Path(__file__).parent / "examples" / "substrafl" / "go_further" / "torch_cyclic_assets"
)
zip_dir(assets_dir_substrafl_sklearn_fedavg, "torch_cyclic_assets.zip")
# Copy the source documentation files from substra and substrafl to their right place
# in the substra-documentation repository
from dataclasses import dataclass
from distutils.dir_util import copy_tree
import subprocess
import shutil
import sys
import typing
EDITABLE_LIB_PATH = Path(__file__).resolve().parents[1] / "src"
@dataclass
class Repo:
pkg_name: str
repo_name: str
installation_cmd: str
version: str
doc_dir: typing.Optional[str] = None
dest_doc_dir: typing.Optional[str] = None
SUBSTRA_REPOS = [
Repo(
pkg_name="substra",
repo_name="substra",
installation_cmd="#egg=substra",
version=SUBSTRA_VERSION,
doc_dir="references",
dest_doc_dir="documentation/references",
),
Repo(
pkg_name="substrafl",
repo_name="substrafl",
installation_cmd="#egg=substrafl[dev]",
version=SUBSTRAFL_VERSION,
doc_dir="docs/api",
dest_doc_dir="substrafl_doc/api",
),
Repo(
pkg_name="substratools",
repo_name="substra-tools",
installation_cmd="#egg=substratools",
version=TOOLS_VERSION,
),
]
def install_dependency(library_name, repo_name, repo_args, version):
try:
subprocess.run(
args=[
sys.executable,
"-m",
"pip",
"install",
"--src",
str(EDITABLE_LIB_PATH),
"--editable",
f"git+https://github.com/substra/{repo_name}.git@{version}{repo_args}",
],
check=True,
capture_output=True,
)
except subprocess.CalledProcessError as e:
print(e.stderr)
print(e.stdout)
raise
importlib.invalidate_caches()
sys.path.insert(0, str(EDITABLE_LIB_PATH / library_name))
def copy_source_files(src, dest):
full_dest_path = Path(__file__).resolve().parent / dest
if full_dest_path.exists():
shutil.rmtree(full_dest_path)
copy_tree(str(src), str(full_dest_path))
for repo in SUBSTRA_REPOS:
install_dependency(
library_name=repo.pkg_name,
repo_name=repo.repo_name,
repo_args=repo.installation_cmd,
version=repo.version,
)
if repo.doc_dir is not None:
imported_module = importlib.import_module(repo.pkg_name)
source_path = Path(imported_module.__file__).resolve().parents[1] / repo.doc_dir
copy_source_files(source_path, repo.dest_doc_dir)
# reformat links to a section in a markdown files (not supported by myst_parser)
def reformat_md_section_links(file_path: Path):
# Read in the file
with open(file_path, "r") as file:
filedata = file.read()
# replace ".md#" by ".html#"
filedata = filedata.replace(".md#", ".html#")
filedata = re.sub(r"#(.*)\)", lambda m: m.group().lower(), filedata)
# Write the file out again
with open(file_path, "w") as file:
file.write(filedata)
for file_path in Path(".").rglob("*.md"):
reformat_md_section_links(file_path)
# -- Project information -----------------------------------------------------
project = "Substra"
copyright = f"{date.today().year}, OWKIN"
author = "Owkin"
# parse the current doc version to display it in the menu
_doc_version = re.sub("^v", "", os.popen("git describe --tags").read().strip())
# The full version, including alpha/beta/rc tags
version = _doc_version
release = _doc_version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"nbsphinx",
]
extensions.extend(
[
"sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"sphinx_rtd_theme",
"sphinx.ext.napoleon",
"sphinx.ext.ifconfig",
"sphinx_click",
"sphinx.ext.autosectionlabel",
"sphinx.ext.todo",
"sphinx_fontawesome",
"myst_parser", # we need it for links between md files. Recommanded by sphinx : https://www.sphinx-doc.org/en/master/usage/markdown.html
"sphinx_copybutton",
]
)
sys.path.append(os.path.abspath("./_ext"))
extensions.append("compatibilitytable")
todo_include_todos = False
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"pandas": ("https://pandas.pydata.org/docs/", None),
"torch": ("https://pytorch.org/docs/stable/", None),
}
################
# Substrafl API
################
# generate autosummary even if no references
autosummary_generate = True
autosectionlabel_prefix_document = True
# autodoc settings
autodoc_default_options = {
"show-inheritance": True,
"members": True,
}
autoclass_content = "both"
autodoc_typehints = "both"
# Napoleon settings
# https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html
napoleon_google_docstring = True
napoleon_numpy_docstring = False
# Remove the prompt when copying examples
copybutton_prompt_text = ">>> "
# As we defined the type of our args, auto doc is trying to find a link to a
# documentation for each type specified
# The following elements are the link that auto doc were not able to do
nitpick_ignore = [
("py:class", "pydantic.main.BaseModel"),
("py:class", "BaseModel"),
("py:class", "torch.nn.modules.module.Module"),
("py:class", "torch.nn.modules.Module"),
("py:class", "torch.nn.modules.loss._Loss"),
("py:class", "torch.optim.optimizer.Optimizer"),
("py:class", "torch.optim.lr_scheduler._LRScheduler"),
("py:class", "torch.utils.data.dataset.Dataset"),
("py:class", "torch.nn.modules.module.T"),
("py:class", "string"),
("py:class", "Module"),
("py:class", "optional"),
("py:class", "Dropout"),
("py:class", "BatchNorm"),
("py:class", "torch.utils.hooks.RemovableHandle"),
("py:class", "torch.nn.Parameter"),
("py:class", "Parameter"),
("py:class", "Tensor"),
("py:class", "Path"),
("py:class", "module"),
("py:attr", "persistent"),
("py:attr", "grad_input"),
("py:attr", "strict"),
("py:attr", "grad_output"),
("py:attr", "requires_grad"),
("py:attr", "device"),
("py:attr", "non_blocking"),
("py:attr", "dst_type"),
("py:attr", "dtype"),
("py:attr", "device"),
("py:func", "register_module_forward_hook"),
("py:func", "register_module_forward_pre_hook"),
("py:func", "register_module_full_backward_hook"),
("py:func", "register_module_full_backward_pre_hook"),
("py:class", "substra.sdk.schemas.Permissions"),
("py:class", "substra.Client"),
("py:class", "substra.sdk.client.Client"),
("py:class", "substra.sdk.models.ComputePlan"),
("py:class", "substra.sdk.schemas.FunctionOutputSpec"),
("py:class", "substra.sdk.schemas.FunctionInputSpec"),
("py:class", "ComputePlan"),
]
# This must be the name of an image file (path relative to the configuration
# directory) that is the favicon of the docs. Modern browsers use this as
# the icon for tabs, windows and bookmarks. It should be a Windows-style
# icon file (.ico).
html_favicon = "static/favicon.png"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["templates/"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**/description.md"]
rst_epilog = f"""
.. |substra_version| replace:: {importlib.import_module('substra').__version__}
.. |substrafl_version| replace:: {importlib.import_module('substrafl').__version__}
"""
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["./static"]
html_extra_path = [str(JSON_COMPATIBILITY_TABLE_FILE)]
html_css_files = [
"owkin.css",
]
html_logo = "static/logo.svg"
html_show_sourcelink = False
html_show_sphinx = False
html_context = {
"display_github": False,
}
| Substra/substra-documentation | docs/source/conf.py | conf.py | py | 14,762 | python | en | code | 20 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "git.Repo",
"line_number": 3... |
37598665486 | from django.shortcuts import render
from article.models import Article
def HomePage(request):
articles = Article.objects.all().order_by('-created_date')[:3]
first_article = None
second_article = None
third_article = None
if len(articles) >= 1:
first_article = articles[0]
if len(articles) >= 2:
second_article = articles[1]
if len(articles) >= 3:
third_article = articles[2]
context = {
'first_article': first_article,
'second_article': second_article,
'third_article': third_article
}
return render(request, 'main/home.html', context)
| kimjonginil/Womens-Diary-Diploma | backend/app/main/views.py | views.py | py | 634 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "article.models.Article.objects.all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "article.models.Article.objects",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "article.models.Article",
"line_number": 7,
"usage_type": "name"
},... |
1309363052 | from flask import current_app
from sqlalchemy import asc, desc
from sqlalchemy.exc import SQLAlchemyError
from app import db
from app.businesses.exceptions import EntityNotFoundException
from app.businesses.models import Business, Category, Tag, BusinessUpload
CONTAINS = '%{}%'
class BaseRepository(object):
model = None
_session = None
def __init__(self):
self._session = db.session
@property
def session(self):
return self._session or current_app.session()
@property
def query(self):
assert self.model, "A model is required to use the query property."
return self.session.query(self.model)
def get(self, id, strict=False):
entity = self.query.get(id)
if strict and not entity:
raise EntityNotFoundException
return entity
def filter(self, **kwargs):
query = self.query.filter_by(**kwargs)
return query
def save(self, entity):
self.session.add(entity)
try:
self.session.commit()
except SQLAlchemyError:
self.session.rollback()
raise
return entity
def save_many(self, entities):
self.session.add_all(entities)
self.session.commit()
return entities
def update(self, id_, **kwargs):
db_entity = self.get(id_, strict=True)
self._update_fields(db_entity, **kwargs)
self.session.commit()
return db_entity
@classmethod
def _update_fields(cls, db_entity, **kwargs):
for key, value in kwargs.items():
setattr(db_entity, key, value)
def _delete(self, id_):
try:
db_entity = self.get(id_, strict=True)
self.session.delete(db_entity)
self.session.commit()
except EntityNotFoundException:
raise
class BusinessRepository(BaseRepository):
model = Business
def save(self, entity):
entity = self.save_tags(entity)
super(BusinessRepository, self).save(entity)
return entity
def save_tags(self, entity):
tag_repository = TagRepository()
existing_tag = {t.name: t for t in tag_repository.query.all()}
tag_to_be_add = []
for t in entity.tags:
if t.name in existing_tag:
tag_to_be_add.append(existing_tag[t.name])
else:
tag_to_be_add.append(t)
entity.tags = tag_to_be_add
return entity
def filter(self, id=None, querySearch=None, accepted_at=None, status=None, order=None, order_by=None,
exclude_deleted=None,
**kwargs):
query = self.query
if exclude_deleted:
query = query.filter(Business.deleted_at.is_(None))
if id:
return query.filter(Business.id == id)
if querySearch:
querySearch = CONTAINS.format(querySearch)
query = query.filter(Business.name.ilike(querySearch))
if accepted_at:
query = query.filter(Business.accepted_at == accepted_at)
if status:
for s in status:
query = query.filter(Business.status == s)
if order_by:
order = asc if order == "ASC" else desc
if order_by == "name":
query = query.order_by(order(Business.name))
return query
def delete(self, id):
business = self.get(id=id)
business.delete()
self.save(business)
class CategoryRepository(BaseRepository):
model = Category
def save(self, entity):
super(CategoryRepository, self).save(entity)
return entity
def filter(self, *args, **kwargs):
query = super(CategoryRepository, self).filter(**kwargs)
return query
def delete(self, id_):
self._delete(id_)
class TagRepository(BaseRepository):
model = Tag
def save(self, entity):
super(TagRepository, self).save(entity)
return entity
def filter(self, *args, **kwargs):
query = super(TagRepository, self).filter(**kwargs)
return query
def delete(self, id_):
self._delete(id_)
def get_tags_with_id(self, new_tags):
existing_tag_names = {t.name: t for t in self.query.all()}
tags_to_be_add = []
for t in new_tags:
if t.name in existing_tag_names:
tags_to_be_add.append(existing_tag_names[t.name])
else:
tags_to_be_add.append(t)
return tags_to_be_add
class BusinessUploadRepository(BaseRepository):
model = BusinessUpload
| Baobab-team/baobab-api | app/businesses/repositories.py | repositories.py | py | 4,612 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.db.session",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.current_app.session",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.current_ap... |
30297889715 | """ Module to analyze vessel pulsatility during the heart cycle in ecg-gated CT
radius change - area change - volume change
Authors: Almar Klein and Maaike Koenrades. Created 2019.
"""
import os
import sys
import time
import openpyxl
import pirt
import numpy as np
import visvis as vv
from stentseg.utils.datahandling import select_dir, loadvol, loadmodel, loadmesh
from stentseg.stentdirect.stentgraph import create_mesh
from stentseg.motion.vis import create_mesh_with_abs_displacement
from stentseg.utils import PointSet, fitting
from lspeas.utils.vis import showModelsStatic
from lspeas.utils.deforminfo import DeformInfo
from lspeas.utils.curvature import measure_curvature #todo: check xyz vs zyx!
from lspeas.utils import meshlib
assert openpyxl.__version__ < "2.4", "Do pip install openpyxl==2.3.5"
#todo: move function to lspeas utils?
def load_excel_centerline(basedirCenterline, vol, ptcode, ctcode, filename=None):
""" Load centerline data from excel
Centerline exported from Terarecon; X Y Z coordinates in colums
"""
if filename is None:
filename = '{}_{}_centerline.xlsx'.format(ptcode,ctcode)
excel_location = os.path.join(basedirCenterline,ptcode)
#read sheet
try:
wb = openpyxl.load_workbook(os.path.join(excel_location,filename),read_only=True)
except FileNotFoundError:
wb = openpyxl.load_workbook(os.path.join(basedirCenterline,filename),read_only=True)
sheet = wb.get_sheet_by_name(wb.sheetnames[0]) # data on first sheet
colStart = 2 # col C
rowStart = 1 # row 2 excel
coordx = sheet.columns[colStart][rowStart:]
coordy = sheet.columns[colStart+1][rowStart:]
coordz = sheet.columns[colStart+2][rowStart:]
#convert to values
coordx = [obj.value for obj in coordx]
coordy = [obj.value for obj in coordy]
coordz = [obj.value for obj in coordz]
#from list to array
centerlineX = np.asarray(coordx, dtype=np.float32)
centerlineY = np.asarray(coordy, dtype=np.float32)
centerlineZ = np.asarray(coordz, dtype=np.float32)
centerlineZ = np.flip(centerlineZ, axis=0) # z of volume is also flipped
# convert centerline coordinates to world coordinates (mm)
origin = vol1.origin # z,y,x
sampling = vol1.sampling # z,y,x
centerlineX = centerlineX*sampling[2] +origin[2]
centerlineY = centerlineY*sampling[1] +origin[1]
centerlineZ = (centerlineZ-0.5*vol1.shape[0])*sampling[0] + origin[0]
return centerlineX, centerlineY, centerlineZ
# Select the ssdf basedir
basedir = select_dir(
os.getenv('LSPEAS_BASEDIR', ''),
r'D:\LSPEAS\LSPEAS_ssdf',
r'F:\LSPEAS_ssdf_backup',
r'F:\LSPEAS_ssdf_BACKUP')
basedirMesh = select_dir(
r'D:\Profiles\koenradesma\SURFdrive\UTdrive\MedDataMimics\LSPEAS_Mimics',
r'C:\Users\Maaike\SURFdrive\UTdrive\MedDataMimics\LSPEAS_Mimics',
r"C:\stack\data\lspeas\vaatwand")
basedirCenterline = select_dir(
r"C:\stack\data\lspeas\vaatwand",
r'D:\Profiles\koenradesma\SURFdrive\UTdrive\LSPEAS_centerlines_terarecon',
r'C:\Users\Maaike\SURFdrive\UTdrive\LSPEAS_centerlines_terarecon',
r"C:\stack\data\lspeas\vaatwand")
# Select dataset
ptcode = 'LSPEAS_003'
ctcode1 = 'discharge'
cropname = 'ring'
modelname = 'modelavgreg'
cropvol = 'stent'
drawModelLines = False # True or False
drawRingMesh, ringMeshDisplacement = True, False
meshColor = [(1,1,0,1)]
removeStent = True # for iso visualization
dimensions = 'xyz'
showAxis = False
showVol = 'ISO' # MIP or ISO or 2D or None
showvol2D = False
drawVessel = True
clim = (0,2500)
clim2D = -200,500 # MPR
clim2 = (0,2)
isoTh = 180 # 250
## Load data
# Load CT image data for reference, and deform data to measure motion
try:
# If we run this script without restart, we can re-use volume and deforms
vol1
deforms
except NameError:
vol1 = loadvol(basedir, ptcode, ctcode1, cropvol, 'avgreg').vol
s_deforms = loadvol(basedir, ptcode, ctcode1, cropvol, 'deforms')
deforms = [s_deforms[key] for key in dir(s_deforms) if key.startswith('deform')]
deforms = [pirt.DeformationFieldBackward(*fields) for fields in deforms]
# Load vessel mesh (mimics)
# We make sure that it is a mesh without faces, which makes our sampling easier
try:
ppvessel
except NameError:
# Load mesh with visvis, then put in our meshlib.Mesh() and let it ensure that
# the mesh is closed, check the winding, etc. so that we can cut it with planes,
# and reliably calculate volume.
filename = '{}_{}_neck.stl'.format(ptcode,ctcode1)
vesselMesh = loadmesh(basedirMesh, ptcode[-3:], filename) #inverts Z
vv.processing.unwindFaces(vesselMesh)
vesselMesh = meshlib.Mesh(vesselMesh._vertices)
vesselMesh.ensure_closed()
ppvessel = PointSet(vesselMesh.get_flat_vertices()) # Must be flat!
# Load ring model
try:
modelmesh1
except NameError:
s1 = loadmodel(basedir, ptcode, ctcode1, cropname, modelname)
if drawRingMesh:
if not ringMeshDisplacement:
modelmesh1 = create_mesh(s1.model, 0.7) # Param is thickness
else:
modelmesh1 = create_mesh_with_abs_displacement(s1.model, radius = 0.7, dim=dimensions)
# Load vessel centerline (excel terarecon) (is very fast)
centerline = PointSet(np.column_stack(
load_excel_centerline(basedirCenterline, vol1, ptcode, ctcode1, filename=None)))
## Setup visualization
# Show ctvolume, vessel mesh, ring model - this uses figure 1 and clears it
axes1, cbars = showModelsStatic(ptcode, ctcode1, [vol1], [s1], [modelmesh1],
[vv.BaseMesh(*vesselMesh.get_vertices_and_faces())],
showVol, clim, isoTh, clim2, clim2D, drawRingMesh,
ringMeshDisplacement, drawModelLines, showvol2D, showAxis,
drawVessel, vesselType=1,
climEditor=True, removeStent=removeStent, meshColor=meshColor)
axes1 = axes1[0]
axes1.position = 0, 0, 0.6, 1
# Show or hide the volume (showing is nice, but also slows things down)
tex3d = axes1.wobjects[1]
tex3d.visible = False
# VesselMeshes
vesselVisMesh1 = axes1.wobjects[4]
vesselVisMesh1.cullFaces = "front" # Show the back
# vesselVisMesh2 = vv.Mesh(axes1, *vesselMesh.get_vertices_and_faces())
vesselVisMesh2 = vv.Mesh(axes1, np.zeros((6, 3), np.float32), np.zeros((3, 3), np.int32))
vesselVisMesh2.cullFaces = "back"
vesselVisMesh2.faceColor = "red"
# Show the centerline
vv.plot(centerline, ms='.', ls='', mw=8, mc='b', alpha=0.5)
# Initialize 2D view
axes2 = vv.Axes(vv.gcf())
axes2.position = 0.65, 0.05, 0.3, 0.4
axes2.daspectAuto = False
axes2.camera = '2d'
axes2.axis.showGrid = True
axes2.axis.axisColor = 'k'
# Initialize axes to put widgets and labels in
container = vv.Wibject(vv.gcf())
container.position = 0.65, 0.5, 0.3, 0.5
# Create labels to show measurements
labelpool = []
for i in range(16):
label = vv.Label(container)
label.fontSize = 11
label.position = 10, 100 + 25 * i, -20, 25
labelpool.append(label)
# Initialize sliders and buttons
slider_ref = vv.Slider(container, fullRange=(1, len(centerline)-2), value=10)
slider_ves = vv.Slider(container, fullRange=(1, len(centerline)-2), value=10)
button_go = vv.PushButton(container, "Take all measurements (incl. volume)")
slider_ref.position = 10, 5, -20, 25
slider_ves.position = 10, 40, -20, 25
button_go.position = 10, 70, -20, 25
button_go.bgcolor = slider_ref.bgcolor = slider_ves.bgcolor = 0.8, 0.8, 1.0
# Initialize line objects for showing the plane orthogonal to centerline
slider_ref.line_plane = vv.plot([], [], [], axes=axes1, ls='-', lw=3, lc='w', alpha = 0.9)
slider_ves.line_plane = vv.plot([], [], [], axes=axes1, ls='-', lw=3, lc='y', alpha = 0.9)
# Initialize line objects for showing selected points close to that plane
slider_ref.line_3d = vv.plot([], [], [], axes=axes1, ms='.', ls='', mw=8, mc='w', alpha = 0.9)
slider_ves.line_3d = vv.plot([], [], [], axes=axes1, ms='.', ls='', mw=8, mc='y', alpha = 0.9)
# Initialize line objects for showing selected points and ellipse in 2D
line_2d = vv.plot([], [], axes=axes2, ms='.', ls='', mw=8, mc='y')
line_ellipse1 = vv.plot([], [], axes=axes2, ms='', ls='-', lw=2, lc='b')
line_ellipse2 = vv.plot([], [], axes=axes2, ms='', ls='+', lw=2, lc='b')
## Functions to update visualization and do measurements
def get_plane_points_from_centerline_index(i):
""" Get a set of points that lie on the plane orthogonal to the centerline
at the given index. The points are such that they can be drawn as a line for
visualization purposes. The plane equation can be obtained via a plane-fit.
"""
if True:
# Cubic fit of the centerline
i = max(1.1, min(i, centerline.shape[0] - 2.11))
# Sample center point and two points right below/above, using
# "cardinal" interpolating (C1-continuous), or "basic" approximating (C2-continious).
pp = []
for j in [i - 0.1, i, i + 0.1]:
index = int(j)
t = j - index
coefs = pirt.interp.get_cubic_spline_coefs(t, "basic")
samples = centerline[index - 1], centerline[index], centerline[index + 1], centerline[index + 2]
pp.append(samples[0] * coefs[0] + samples[1] * coefs[1] + samples[2] * coefs[2] + samples[3] * coefs[3])
# Get center point and vector pointing down the centerline
p = pp[1]
vec1 = (pp[2] - pp[1]).normalize()
else:
# Linear fit of the centerline
i = max(0, min(i, centerline.shape[0] - 2))
index = int(i)
t = i - index
# Sample two points of interest
pa, pb = centerline[index], centerline[index + 1]
# Get center point and vector pointing down the centerline
p = t * pb + (1 - t) * pa
vec1 = (pb - pa).normalize()
# Get two orthogonal vectors that define the plane that is orthogonal
# to the above vector. We can use an arbitrary vector to get the first,
# but there is a tiiiiiny chance that it is equal to vec1 so that the
# normal collapese.
vec2 = vec1.cross([0, 1, 0])
if vec2.norm() == 0:
vec2 = vec1.cross((1, 0, 0))
vec3 = vec1.cross(vec2)
# Sample some point on the plane and get the plane's equation
pp = PointSet(3)
radius = 6
pp.append(p)
for t in np.linspace(0, 2 * np.pi, 12):
pp.append(p + np.sin(t) * radius * vec2 + np.cos(t) * radius * vec3)
return pp
def get_vessel_points_from_plane_points(pp):
""" Select points from the vessel points that are very close to the plane
defined by the given plane points. Returns a 2D and a 3D point set.
"""
abcd = fitting.fit_plane(pp)
# Get 2d and 3d coordinates of points that lie (almost) on the plane
# pp2 = fitting.project_to_plane(ppvessel, abcd)
# pp3 = fitting.project_from_plane(pp2, abcd)
signed_distances = fitting.signed_distance_to_plane(ppvessel, abcd)
distances = np.abs(signed_distances)
# Select points to consider. This is just to reduce the search space somewhat.
selection = np.where(distances < 5)[0]
# We assume that each tree points in ppvessel makes up a triangle (face)
# We make sure of that when we load the mesh.
# Select first index of each face (every 3 vertices is 1 face), and remove duplicates
selection_faces = set(3 * (selection // 3))
# Now iterate over the faces (triangles), and check each edge. If the two
# points are on different sides of the plane, then we interpolate on the
# edge to get the exact spot where the edge intersects the plane.
sampled_pp3 = PointSet(3)
visited_edges = set()
for fi in selection_faces: # for each face index
for edge in [(fi + 0, fi + 1), (fi + 0, fi + 2), (fi + 1, fi + 2)]:
if signed_distances[edge[0]] * signed_distances[edge[1]] < 0:
if edge not in visited_edges:
visited_edges.add(edge)
d1, d2 = distances[edge[0]], distances[edge[1]]
w1, w2 = d2 / (d1 + d2), d1 / (d1 + d2)
p = w1 * ppvessel[edge[0]] + w2 * ppvessel[edge[1]]
sampled_pp3.append(p)
return fitting.project_to_plane(sampled_pp3, abcd), sampled_pp3
def get_distance_along_centerline():
""" Get the distance along the centerline between the two reference points,
(using linear interpolation at the ends).
"""
i1 = slider_ref.value
i2 = slider_ves.value
index1 = int(np.ceil(i1))
index2 = int(np.floor(i2))
t1 = i1 - index1 # -1 < t1 <= 0
t2 = i2 - index2 # 0 <= t2 < 1
dist = 0
dist += -t1 * (centerline[index1] - centerline[index1 - 1]).norm()
dist += +t2 * (centerline[index2] - centerline[index2 + 1]).norm()
for index in range(index1, index2):
dist += (centerline[index + 1] - centerline[index]).norm()
return float(dist)
def triangle_area(p1, p2, p3):
""" Calcualate triangle area based on its three vertices.
"""
# Use Heron's formula to calculate a triangle's area
# https://www.mathsisfun.com/geometry/herons-formula.html
a = p1.distance(p2)
b = p2.distance(p3)
c = p3.distance(p1)
s = (a + b + c) / 2
return (s * (s - a) * (s - b) * (s - c)) ** 0.5
def deform_points_2d(pp2, plane):
""" Given a 2D pointset (and the plane that they are on),
return a list with the deformed versions of that pointset.
"""
pp3 = fitting.project_from_plane(pp2, plane) #todo: shouldn't origin be subtracted?! see dynamic.py
deformed = []
for phase in range(len(deforms)):
deform = deforms[phase]
dx = deform.get_field_in_points(pp3, 0) #todo: shouldn't this be z=0 y=1 x=2! see dynamic.py; adapt in all functions?!
dy = deform.get_field_in_points(pp3, 1)
dz = deform.get_field_in_points(pp3, 2)
deform_vectors = PointSet(np.stack([dx, dy, dz], 1))
pp3_deformed = pp3 + deform_vectors
deformed.append(fitting.project_to_plane(pp3_deformed, plane))
return deformed
def measure_centerline_strain():
""" Measure the centerline strain.
"""
i1 = slider_ref.value
i2 = slider_ves.value
# Get the centerline section of interest
index1 = int(np.ceil(i1))
index2 = int(np.floor(i2))
section = centerline[index1:index2 + 1]
# get this section of the centerline for each phase
sections = []
for phase in range(len(deforms)):
deform = deforms[phase]
dx = deform.get_field_in_points(section, 0)
dy = deform.get_field_in_points(section, 1)
dz = deform.get_field_in_points(section, 2)
deform_vectors = PointSet(np.stack([dx, dy, dz], 1))
sections.append(section + deform_vectors)
# Measure the strain of the full section, by measuring the total length in each phase.
lengths = []
for phase in range(len(deforms)):
section = sections[phase]
length = sum(float(section[i].distance(section[i + 1]))
for i in range(len(section) - 1))
lengths.append(length)
if min(lengths) == 0:
return 0
else:
# Strain as delta-length divided by initial length
return (max(lengths) - min(lengths)) / min(lengths)
# ... or as what Wikipedia calls "stretch ratio">
# return max(lengths) / min(lengths)
def take_measurements(measure_volume_change):
""" This gets called when the slider is releases. We take measurements and
update the corresponding texts and visualizations.
"""
# Get points that form the contour of the vessel in 2D
pp = get_plane_points_from_centerline_index(slider_ves.value)
pp2, pp3 = get_vessel_points_from_plane_points(pp)
plane = pp2.plane
# Collect measurements in a dict. That way we can process it in one step at the end
measurements = {}
# Store slider positions, so we can reproduce this measurement later
measurements["centerline indices"] = slider_ref.value, slider_ves.value
# Early exit?
if len(pp2) == 0:
line_2d.SetPoints(pp2)
line_ellipse1.SetPoints(pp2)
line_ellipse2.SetPoints(pp2)
vesselVisMesh2.SetFaces(np.zeros((3, 3), np.int32))
vesselVisMesh2.SetNormals(None)
process_measurements(measurements)
return
# Measure length of selected part of the centerline and the strain in that section
measurements["centerline distance"] = get_distance_along_centerline()
measurements["centerline strain"] = measure_centerline_strain()
# Measure centerline curvature
curvature_mean, curvature_max, curvature_max_pos, curvature_max_change = measure_curvature(centerline, deforms)
measurements["curvature mean"] = DeformInfo(curvature_mean)
measurements["curvature max"] = DeformInfo(curvature_max)
measurements["curvature max pos"] = DeformInfo(curvature_max_pos)
measurements["curvature max change"] = curvature_max_change
# Get ellipse and its center point
ellipse = fitting.fit_ellipse(pp2)
p0 = PointSet([ellipse[0], ellipse[1]])
# Sample ellipse to calculate its area
pp_ellipse = fitting.sample_ellipse(ellipse, 256) # results in N + 1 points
area = 0
for i in range(len(pp_ellipse)-1):
area += triangle_area(p0, pp_ellipse[i], pp_ellipse[i + 1])
# measurements["reference area"] = float(area)
# Do a quick check to be sure that this triangle-approximation is close enough
assert abs(area - fitting.area(ellipse)) < 2, "area mismatch" # mm2 typically ~ 0.1 mm2
# Measure ellipse area (and how it changes)
measurements["ellipse area"] = DeformInfo(unit="mm2")
for pp_ellipse_def in deform_points_2d(pp_ellipse, plane):
area = 0
for i in range(len(pp_ellipse_def)-1):
area += triangle_area(p0, pp_ellipse_def[i], pp_ellipse_def[i + 1])
measurements["ellipse area"].append(area)
# # Measure expansion of ellipse in 256 locations?
# # Measure distances from center to ellipse edge. We first get the distances
# # in each face, for each point. Then we aggregate these distances to
# # expansion measures. So in the end we have 256 expansion measures.
# distances_per_point = [[] for i in range(len(pp_ellipse))]
# for pp_ellipse_def in deform_points_2d(pp_ellipse, plane):
# # todo: Should distance be measured to p0 or to p0 in that phase?
# for i, d in enumerate(pp_ellipse_def.distance(p0)):
# distances_per_point[i].append(float(d))
# distances_per_point = distances_per_point[:-1] # Because pp_ellipse[-1] == pp_ellipse[0]
# #
# measurements["expansions"] = DeformInfo() # 256 values, not 10
# for i in range(len(distances_per_point)):
# distances = distances_per_point[i]
# measurements["expansions"].append((max(distances) - min(distances)) / min(distances))
# Measure radii of ellipse major and minor axis (and how it changes)
pp_ellipse4 = fitting.sample_ellipse(ellipse, 4) # major, minor, major, minor
measurements["ellipse expansion major1"] = DeformInfo(unit="mm")
measurements["ellipse expansion minor1"] = DeformInfo(unit="mm")
measurements["ellipse expansion major2"] = DeformInfo(unit="mm")
measurements["ellipse expansion minor2"] = DeformInfo(unit="mm")
for pp_ellipse4_def in deform_points_2d(pp_ellipse4, plane):
measurements["ellipse expansion major1"].append(float( pp_ellipse4_def[0].distance(p0) ))
measurements["ellipse expansion minor1"].append(float( pp_ellipse4_def[1].distance(p0) ))
measurements["ellipse expansion major2"].append(float( pp_ellipse4_def[2].distance(p0) ))
measurements["ellipse expansion minor2"].append(float( pp_ellipse4_def[3].distance(p0) ))
# Measure how the volume changes - THIS BIT IS COMPUTATIONALLY EXPENSIVE
submesh = meshlib.Mesh(np.zeros((3, 3)))
if measure_volume_change:
# Update the submesh
plane1 = fitting.fit_plane(get_plane_points_from_centerline_index(slider_ref.value))
plane2 = fitting.fit_plane(get_plane_points_from_centerline_index(slider_ves.value))
plane2 = [-x for x in plane2] # flip the plane upside doen
submesh = vesselMesh.cut_plane(plane1).cut_plane(plane2)
# Measure its motion
measurements["volume"] = DeformInfo(unit="mm3")
submesh._ori_vertices = submesh._vertices.copy()
for phase in range(len(deforms)):
deform = deforms[phase]
submesh._vertices = submesh._ori_vertices.copy()
dx = deform.get_field_in_points(submesh._vertices, 0)
dy = deform.get_field_in_points(submesh._vertices, 1)
dz = deform.get_field_in_points(submesh._vertices, 2)
submesh._vertices[:, 0] += dx
submesh._vertices[:, 1] += dy
submesh._vertices[:, 2] += dz
measurements["volume"].append(submesh.volume())
# Show measurements
process_measurements(measurements)
# Update line objects
line_2d.SetPoints(pp2)
line_ellipse1.SetPoints(fitting.sample_ellipse(ellipse))
major_minor = PointSet(2)
for p in [p0, pp_ellipse4[0], p0, pp_ellipse4[2], p0, pp_ellipse4[1], p0, pp_ellipse4[3]]:
major_minor.append(p)
line_ellipse2.SetPoints(major_minor)
axes2.SetLimits(margin=0.12)
# Update submesh object
vertices, faces = submesh.get_vertices_and_faces()
vesselVisMesh2.SetVertices(vertices)
vesselVisMesh2.SetFaces(np.zeros((3, 3), np.int32) if len(faces) == 0 else faces)
vesselVisMesh2.SetNormals(None)
# Global value that will be a dictionary with measurements
mm = {}
def process_measurements(measurements):
""" Show measurements. Now the results are shown in a label object, but we could do anything here ...
"""
# Store in global for further processing
mm.clear()
mm.update(measurements)
# Print in shell
print("Measurements:")
for key, val in measurements.items():
val = val.summary if isinstance(val, DeformInfo) else val
print(key.rjust(16) + ": " + str(val))
# Show in labels
index = 0
for key, val in measurements.items():
val = val.summary if isinstance(val, DeformInfo) else val
val = "{:0.4g}".format(val) if isinstance(val, float) else val
labelpool[index].text = key + ": " + str(val)
index += 1
# Clean remaining labels
for index in range(index, len(labelpool)):
labelpool[index].text = ""
def on_sliding(e):
""" When the slider is moved, update the centerline position indicator.
"""
slider = e.owner
pp = get_plane_points_from_centerline_index(slider.value)
slider.line_plane.SetPoints(pp)
def on_sliding_done(e):
""" When the slider is released, update the whole thing.
"""
slider = e.owner
pp = get_plane_points_from_centerline_index(slider.value)
slider.line_plane.SetPoints(pp)
take_measurements(False) # dont do volume
def on_button_press(e):
""" When the button is pressed, take measurements.
"""
take_measurements(True)
def set_sliders(value_ref, value_ves):
""" Set the sliders to a specific position, e.g. to reproduce a measurement.
"""
slider_ref.value = value_ref
slider_ves.value = value_ves
# Connect!
slider_ref.eventSliding.Bind(on_sliding)
slider_ves.eventSliding.Bind(on_sliding)
slider_ref.eventSliderChanged.Bind(on_sliding_done)
slider_ves.eventSliderChanged.Bind(on_sliding_done)
button_go.eventMouseDown.Bind(on_button_press)
#todo: visualize mesh with motion and use colors to represent radius change
#todo: add torsion and angular rotation of centerline
| almarklein/stentseg | lspeas/analysis/vessel_dynamics.py | vessel_dynamics.py | py | 24,229 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "openpyxl.__version__",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "openpyxl.load_wor... |
4469206526 | from pants.base.exceptions import TargetDefinitionException
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.fs import archive as archive_lib
from pants.contrib.node.targets.node_package import NodePackage
class NodeBundle(NodePackage):
"""A bundle of node modules."""
def __init__(self, node_module=None, archive='tgz', address=None, payload=None, **kwargs):
"""
:param dependencies: a list of node_modules
:param archive: a string, select from tar, tgz, tbz2, default to tgz
"""
if archive not in archive_lib.TYPE_NAMES_PRESERVE_SYMLINKS:
raise TargetDefinitionException(
self, '{} is not a valid archive type. Allowed archive types are {}'.format(
archive,
', '.join(sorted(list(archive_lib.TYPE_NAMES_PRESERVE_SYMLINKS)))))
if not node_module:
raise TargetDefinitionException(self, 'node_module can not be empty.')
payload = payload or Payload()
payload.add_fields({
'archive': PrimitiveField(archive),
'node_module': PrimitiveField(node_module),
})
super().__init__(address=address, payload=payload, **kwargs)
@classmethod
def compute_dependency_specs(cls, kwargs=None, payload=None):
for spec in super().compute_dependency_specs(kwargs, payload):
yield spec
target_representation = kwargs or payload.as_dict()
spec = target_representation.get('node_module')
if spec:
yield spec
@property
def node_module(self):
if len(self.dependencies) != 1:
raise TargetDefinitionException(
self,
'A node_bundle must define exactly one node_module dependency, have {}'.format(
self.dependencies))
else:
return self.dependencies[0]
| manonja/smart-portfolio | contrib/node/src/python/pants/contrib/node/targets/node_bundle.py | node_bundle.py | py | 1,763 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pants.contrib.node.targets.node_package.NodePackage",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pants.fs.archive.TYPE_NAMES_PRESERVE_SYMLINKS",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pants.fs.archive",
"line_number": 18,
... |
74043067873 | import bluetooth
import time
import serial # 导入模块
import zxing
import numpy as np
import time
import cv2 as cv
reader = zxing.BarCodeReader()
log_counter = 0
start_time = time.time()
IMG_PATH='/home/pi/Documents/gx/0003.jpg'
# set blue thresh 设置HSV中蓝色、天蓝色范围
lower_red = np.array([0,43,46])
upper_red = np.array([15,255,255])
# lower_blue
lower_blue=np.array([95, 43, 46])
upper_blue=np.array([ 124, 255, 255])
lower_g=np.array([35, 43, 46])
upper_g=np.array([ 77, 255, 255])
UPPER_BOUND=600#130#600
LOWER_BOUND=1700#400#1700
def voice(txt, portx="/dev/ttyUSB0", bps=9600, timex=5,):
try:
# #端口,GNU / Linux上的/ dev / ttyUSB0 等 或 Windows上的 COM3 等
# portx="/dev/ttyUSB0"
# #波特率,标准值之一:50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600,19200,38400,57600,115200
# bps=9600
# #超时设置,None:永远等待操作,0为立即返回请求结果,其他值为等待超时时间(单位为秒)
# timex=5
# # 打开串口,并得到串口对象
ser = serial.Serial(portx, bps, timeout=timex)
# 写数据
result = ser.write(("<G>"+txt).encode("gbk"))
print("写总字节数:", result)
ser.close() # 关闭串口
except Exception as e:
print("---异常---:", e)
return
def scanQRCode(ImgPAth):
barcode = reader.decode(ImgPAth)
# logPrinter()
print(barcode)
print(barcode.raw)
print(barcode.parsed)
return barcode.parsed
reader = zxing.BarCodeReader()
log_counter = 0
time_start = time.time()
def logPrinter(logtxt):
global log_counter
# global start_time
end_time = time.time()
print("log: %d time %.3f -- %s" %
(end_time-start_time, log_counter, logtxt))
log_counter += 1
return
import numpy as np
import cv2
import time
def catch_picture():
#调用笔记本内置摄像头,所以参数为0,如果有其他的摄像头可以调整参数为1,2
start_time=time.time()
cap=cv2.VideoCapture(2)
while True:
#从摄像头读取图片
sucess,img=cap.read()
#转为灰度图片
# gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#显示摄像头,背景是灰度。
# cv2.imshow("img",gray)
#保持画面的持续。
# k=cv2.waitKey(1)
end_time=time.time()
if end_time-start_time>2:
cv2.imwrite(IMG_PATH,img)
# cv2.destroyAllWindows()
break
return
# def if_qrcode():
# return
def classify_color():
frame=cv.imread(IMG_PATH)
frame=frame[UPPER_BOUND:LOWER_BOUND][:][:]
frame=cv2.resize(frame,(480,100))
# cv.imshow('Capture', frame)
# change to hsv model
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
# get mask 利用inRange()函数和HSV模型中蓝色范围的上下界获取mask,mask中原视频中的蓝色部分会被弄成白色,其他部分黑色。
mask_b = cv.inRange(hsv, lower_blue, upper_blue)
print(mask_b.shape)
mask_r = cv.inRange(hsv, lower_red, upper_red)
mask_g = cv.inRange(hsv, lower_g,upper_g)
res_r = cv.bitwise_and(frame, frame, mask=mask_r)
res_g = cv.bitwise_and(frame, frame, mask=mask_g)
res_b = cv.bitwise_and(frame, frame, mask=mask_b)
cv.imwrite('/home/pi/Documents/gx/r.jpg',res_r)
cv.imwrite('/home/pi/Documents/gx/g.jpg',res_g)
cv.imwrite('/home/pi/Documents/gx/b.jpg',res_b)
kernel=np.ones((5,5),np.uint8)
mask_b=cv.erode(mask_b,kernel,iterations=5)
mask_r=cv.erode(mask_r,kernel,iterations=5)
mask_g=cv.erode(mask_g,kernel,iterations=5)
# mask_b=1-mask_b
# mask_r=1-mask_r
# mask_g=1-mask_g
X_axis=np.arange(0,frame.shape[1],1)
# mask=cv.erode(mask,kernel,iterations=1)
mask_r=mask_r>0
mask_g=mask_g>0
mask_b=mask_b>0
r_sum=(np.sum(np.nonzero(mask_r))-1)
g_sum=(np.sum(np.nonzero(mask_g))-1)
b_sum=(np.sum(np.nonzero(mask_b))-1)
x_r=(np.multiply(X_axis,mask_r[:]).sum()/r_sum)if r_sum>200 else -1
x_g=(np.multiply(X_axis,mask_g[:]).sum()/g_sum)if g_sum>200 else -1
x_b=(np.multiply(X_axis,mask_b[:]).sum()/b_sum)if b_sum>200 else -1
# res = cv.bitwise_and(frame, frame, mask=mask1)
# cv.imwrite('det000.jpg',res)
# # if __name__ == '__main__':
# while True:
# cv.imshow('img', mask)
# if cv.waitKey() == ord('q'):
# break
# cv.destroyAllWindows()
return (x_r,x_g,x_b)
# def catch_
HOST_MAC_ADDR = 'DC:A6:32:AA:75:A3' # 目的蓝牙的地址
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
port = 1
if __name__ == "__main__":
# try:
print(classify_color())
# return
# sock.connect((HOST_MAC_ADDR, port)) # 连接蓝牙
# while True:
# now_time=time.time()
# now_time-=start_time
# print('now time')
# # 使用计时或者串口回传
# if now_time<10:
# logPrinter('直线')
# elif now_time<15:
# logPrinter('拍照')
# sock.send(('<G>'+scanQRCode(IMG_PATH)).encode())
# elif now_time<30:
# logPrinter('直线')
# elif now_time<45:
# logPrinter('左横')
# else:
# break
# # sock.send('hello!'.encode()) #每隔三秒发送一个字符串
# # time.sleep(3)
# # except:
# # sock.close()
# # logPrinter('disconnect')
# sock.close()
# logPrinter('disconnect')
# logPrinter('task finished')
# # if __name__ == "__main__":
# # reader = zxing.BarCodeReader()
# # log_counter=0
# # time_start=time.time()
# # # voice("绿色蓝色")
# # logPrinter("000")
| darrrt/EngineeringInnovationComp2023 | deprecated/multi-communication/deprecated/all-1.py | all-1.py | py | 5,732 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "zxing.BarCodeReader",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_numb... |
6803771447 | import cv2
from djitellopy import Tello
def initialize():
drone = Tello()
drone.connect()
drone.for_back_velocity = 0
drone.left_right_velocity = 0
drone.up_down_velocity = 0
drone.yaw_velocity = 0
drone.speed = 0
print(drone.get_battery())
drone.streamon()
return drone
def shutdown(drone):
drone.streamoff()
drone.land()
exit(0)
def detect(drone, mtcnn, width=360, height=240):
while True:
# read stream from drone
got_frame = drone.get_frame_read()
frame = got_frame.frame
vid = cv2.resize(frame, (width, height))
vid = cv2.resize(vid, (width, height))
box, prob, lms = mtcnn.detect(vid, landmarks=True)
draw(vid, box, prob, lms)
cv2.imshow("Drone Feed", vid)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
def draw(frame, box, prob, lms):
if box is not None and prob is not None:
for b, p, ld in zip(box, prob, lms):
x, y, w, h = int(b[0]), int(b[1]), int(b[2]), int(b[3])
# draw box
cv2.rectangle(frame, (x, y), (w, h), (0, 255, 0), 2)
# display probability
cv2.putText(frame, str(p), (w, h), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 255, 0), 2, cv2.LINE_AA) | ollin23/dsc609 | detector.py | detector.py | py | 1,390 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "djitellopy.Tello",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number":... |
43399303375 | import itertools
def check_gap(mat, mingap=0, maxgap=0):
for i, mat_i in enumerate(mat):
if i == 0:
pre = mat_i
continue
if mat_i <= pre + mingap or mat_i > pre + 1 + maxgap:
return False
pre = mat_i
return True
def sequenceMatching(edgeMatching, mingap=0, maxgap=0):
all_combinations = list(itertools.product(*edgeMatching))
all_matchings = []
for mat in all_combinations:
if check_gap(mat, mingap, maxgap):
all_matchings.append(list(mat))
return all_matchings
def relevant_pattern(pattern, patterns, min_gap=0, max_gap=0):
for b_pat in patterns:
if is_subsequence(b_pat, pattern, min_gap, max_gap):
return True
return False
def is_subsequence(pat1, pat2, mingap=0, maxgap=0):
pat1_itemsets = [edge.replace("[","").replace("(","").split(", ") for edge in pat1.split(")")][:-1]
pat2_itemsets = [edge.replace("[", "").replace("(", "").split(", ") for edge in pat2.split(")")][:-1]
matching = []
for itemset1 in pat1_itemsets:
j = 0
edge_matching = []
for itemset2 in pat2_itemsets:
if all(x in itemset2 for x in itemset1):
edge_matching.append(j)
j += 1
if len(edge_matching) == 0:
return False
matching.append(edge_matching)
matchings = sequenceMatching(matching, mingap, maxgap)
if len(matchings) == 0:
return False
return True
def read_pattern_by_dataset_dic(pat, dataset_dict):
(path, sup) = pat.split(':')
p = "["
elements = str(path).strip().split(" ")
for ele in elements:
el = "("
items = ele.strip().replace('{', "").replace('}', "").split(",")
items2 = []
for item in items:
el += dataset_dict[int(item.strip())] + ", "
items2.append(dataset_dict[int(item.strip())])
el = el[:-2]
el += ")"
p += el
p += "]"
return p, int(sup.strip())
def read_dataset_dictionary(file):
di = dict()
with open(file, "r") as f:
for line in f:
s, nb = line.strip().split("\t")
di[int(nb)] = s
return di
| yahuan-chen/PRED-ontologies | Helpers/spm_core_functions.py | spm_core_functions.py | py | 2,278 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.product",
"line_number": 14,
"usage_type": "call"
}
] |
34006810606 | import requests
# field to search
# https://www.zoho.com/crm/help/api/v2/#ra-search-records
"""
Only one of the above four parameters would work at one point of time.
Furthermore, if two parameters are given simultaneously, preference will be given in the order criteria,
email, phone and word, and only one of them would work.
"""
url = "https://www.zohoapis.com/crm/v2/Contacts/search"
querystring = {"email":"mail@gmail.com"}
headers = {
'Authorization': "Authorization code here",
'Cache-Control': "no-cache",
'Postman-Token': "Postman token here"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
| czam01/python | request_zohocrm.py | request_zohocrm.py | py | 674 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.request",
"line_number": 21,
"usage_type": "call"
}
] |
27170748674 | # Aaron Yerke, HW 2 for ML 2019
# 1. (50 points) Implement gradient descent-based logistic regression in Python. Use
# ∆J = 0.00001 as the stopping criterion.
# 2. (50 points total distributed as below) Apply your code from question 2 to the iris virginica and virsicolor flowers.
# Specifically, randomly select 99 of these flowers for training your logistic model and use the remaining one flower for testing.
# You only need to do training once and testing once with your specific choice of the training flowers and testing flowers.
# That is to say, you don’t need to do the leave-one-out cross validation 100 times.
# (a) (15 points) After your training, plot the total cost J vs iterations for your 99 training flowers for four scenarios.
# (b) (20 points) Predict the flower type of your testing flower for each of the four scenarios.
# (c) (15 points) Apply sklearn.linear model.LogisticRegression to your specific choice of training flowers. With the intercept and coefficients produced by sklearn, calculate the total final cost J for your 99 flowers.
# --------------------------------------------------------------------------
# Import external libraries
# --------------------------------------------------------------------------
# for my code
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from itertools import chain
# for comparison
import scipy.stats as stats
from sklearn import linear_model
from sklearn.metrics import f1_score
# for dataset
from sklearn.datasets import load_iris
from sklearn import preprocessing
# --------------------------------------------------------------------------
# set up plotting parameters
# --------------------------------------------------------------------------
line_width_1 = 2
line_width_2 = 2
marker_1 = '.' # point
marker_2 = 'o' # circle
marker_size = 12
line_style_1 = ':' # dotted line
line_style_2 = '-' # solid line
# --------------------------------------------------------------------------
# Some useful functions
# --------------------------------------------------------------------------
def logit(p):
return np.log(p/(1-p))
def sigmoid(line):
return 1/(1 + np.exp(-line))
def loss_j1(X,y, coef):
return 1/len(y) * np.sum((y @ np.log(sigmoid(X@coef)) - (1-y) @ np.log(1-sigmoid(X@coef)) ))
def loss_j(y, y_hat):
return -np.mean(y*np.log(y_hat) + (1-y) * np.log(1-y_hat))
# --------------------------------------------------------------------------
# set up log reg class
# --------------------------------------------------------------------------
class my_logistic_reg:
def __init__(self, lr = 0.001, n_iter = 1000, dj_stop = 0.00001):
self.slopes = None
self.y_intercept = None
self.lr = lr
self.n_iter = n_iter
self.dj_stop = dj_stop
def _loss_j(self, y, y_hat):
return -np.mean(y*np.log(y_hat) + (1-y) * np.log(1-y_hat))
def fit_model(self, my_x, my_y):
n_samples, n_features = my_x.shape
# init parameters
self.slopes = np.zeros(n_features)
self.y_intercept = 1
self.cost_j = []
# gradient descent
while len(self.cost_j) < self.n_iter:#this is basically a for-loop that will go n iterations
# approximate y with linear combination of slope and x, plus y_intercept
lin_model = np.dot(my_x, self.slopes) + self.y_intercept
# apply sigmoid function
y_predicted = sigmoid(lin_model)
loss = self._loss_j(my_y, y_predicted)
# compute gradients
dz = y_predicted -my_y
d_slope = (1 / n_samples) * np.matmul(my_x.T, dz)
d_intercept = np.sum(dz)
# update parameters
self.slopes -= self.lr * d_slope
self.y_intercept -= self.lr * d_intercept
if len(self.cost_j) == 0:
self.cost_j.append(loss)
else:
self.cost_j.append(loss)
if abs(loss - self.cost_j[-2]) < self.dj_stop:
break#get out of while loop
print("Fit completed!")
def test_model(self, my_x, y_val = None):
lin_model = np.dot(my_x, self.slopes) + self.y_intercept
y_predicted = sigmoid(lin_model)
if y_predicted > 0.5:
model_prediction = 1
else:
model_prediction = 0
if y_val != None:
print(f"The model prediction: {model_prediction}\nThe correct value: {y_val}")
return model_prediction
def plot_cost(self):
if len(self.cost_j) != 0:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter( range(0, len(self.cost_j) ), self.cost_j, label=f'Cost function values', color='red', marker=marker_1, linewidth=line_width_1)
# ax.plot(self.x, self.y_hat, color='blue', label='model from training data', linewidth=line_width_1)
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost values')
# ax.set_title("Linear regression of diabetes data with d_lm class")
# ax.legend(loc='lower right', fontsize=9)
fig.show()
# --------------------------------------------------------------------------
# create training and testing datasets
# --------------------------------------------------------------------------
iris = load_iris()
df = pd.DataFrame(iris.data, columns = iris['feature_names'])
df['species'] = pd.Categorical.from_codes(iris.target, iris.target_names)
df = df[df.species != "setosa"]
df = df.reset_index(drop = True)
df['species'] = df['species'].map({'versicolor': 1, 'virginica': 0})
df.to_csv('testing.tsv', sep='\t')
#print(df.shape[0])
df_x = df.iloc[:,list(range(df.shape[1]-1))]
#normalize data to max value
x = df_x.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df_x = pd.DataFrame(x_scaled)
print(f'df_x.shape: {df_x.shape}')
df_y = df.iloc[:,df.shape[1]-1]
print(f'df_y.shape: {df_y.shape}')
test_index = np.random.randint(low = 0, high = df.shape[0] - 1, size = 1 )[0]
print(test_index)
train_index = filter(lambda a: a != test_index, range(df.shape[0]))
print(train_index)
train_x = df_x.drop(test_index, axis = 0)
print(f'train_x.shape: {train_x.shape}')
train_y = df_y.drop(test_index, axis = 0)
print(f'train_y.shape: {train_y.shape}')
test_x = df_x.drop(train_index, axis = 0)
print(f'test_x.shape: {test_x.shape}')
test_y = df_y.drop(filter(lambda a: a != test_index, range(df.shape[0])), axis = 0)
print(f'test_y.shape: {test_y.shape}')
# --------------------------------------------------------------------------
# Fit and test homemade model
# --------------------------------------------------------------------------
my_lr = my_logistic_reg()
my_lr.fit_model(train_x, train_y)
my_lr.plot_cost()
my_lr.test_model(test_x, test_y.iloc[0])
# --------------------------------------------------------------------------
# Test homemade model with f1_score
# --------------------------------------------------------------------------
preds = []
for i in train_x.T.iteritems():
preds.append(my_lr.test_model(np.matrix(i[1])))
print("accuracy by homemade model")
print(f1_score(preds,train_y))
f1_score(preds,train_y)
# --------------------------------------------------------------------------
# run professional log reg model
# --------------------------------------------------------------------------
print('pro_stuff')
skl_log = linear_model.LogisticRegression(solver="lbfgs")
skl_log.fit(X=train_x, y=train_y)
skl_pred = skl_log.predict(test_x)
print(f'sklearn log prediction: {skl_pred}')
print(f'Correct answer: {test_y}, {test_y == skl_pred}')
print(skl_log.coef_)
print(skl_log.intercept_)
preds = []
for i in train_x.T.iteritems():
preds.append(skl_log.predict(np.matrix(i[1])))
print("accuracy by sklearn model")
print(f1_score(preds,train_y))
f1_score(preds,train_y)
skl_log_cost = loss_j1(train_x, train_y, skl_log.coef_.T)
print(f'2(c) the total final cost J: {skl_log_cost}') | palomnyk/machineLearningFall2019 | assignment2/assignment2.py | assignment2.py | py | 8,129 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.log",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 50,
... |
74944576993 | import sys
import pandas as pd
import sqlite3
import z_service
def make_fedtbl(sq_conn):
dtfReg=pd.read_csv(r'C:\Documents and Settings\ggolyshev\PycharmProjects\Cities\Base\regionsF.csv',
sep=';', encoding='cp1251')
dtfFO=dtfReg[['FederalDistrictName', 'FederalDistrictID']].drop_duplicates(['FederalDistrictName', 'FederalDistrictID'])
dtfFO.rename(columns={'FederalDistrictName':'name', 'FederalDistrictID':'ID'}, inplace=True)
dtfFO.to_sql('FDistricts', sq_conn, if_exists='replace', index_label='ID',
index=False, dtype={'name':'TEXT', 'ID':'INTEGER'})
return dtfFO
def make_cities_db(sq_conn):
dtfCities=pd.read_csv(r'C:\Documents and Settings\ggolyshev\PycharmProjects\Cities\Base\info.csv',
sep=';', encoding='cp1251')[['norm_name', 'name', 'norm_reg', 'href', 'lat', 'long', 'okato',
'oktmo', 'people', 'square', 'height', 'phone_code', 'post_index', 'timezone']]
dtfCities['oktmo']=dtfCities['oktmo'].apply(lambda x: z_service.codes_correct(x, iSize=11))
dtfCities.to_sql('Localities', sq_conn, if_exists='replace', index_label='oktmo',
index=False, dtype={'norm_name':'TEXT', 'name':'TEXT', 'norm_reg':'TEXT', 'href':'TEXT',
'lat':'REAL', 'long':'REAL', 'okato':'TEXT',
'oktmo':'TEXT', 'people':'INTEGER', 'square':'REAL', 'height':'REAL',
'phone_code':'TEXT', 'post_index':'TEXT', 'timezone':'TEXT'})
return dtfCities
def connect(strDBName):
return sqlite3.connect(strDBName)
def make_vliages_db(con):
dtfVil=pd.read_csv(r'C:\Documents and Settings\ggolyshev\PycharmProjects\Cities\Base\villages_info.csv',
sep=';', encoding='cp1251', dtype=str)[['name', 'okato1', 'okato2', 'oktmo', 'name2',
'oktmo_parent', 'name_parent', 'type', 'norm_name']]
dtfVil=dtfVil.drop(dtfVil[dtfVil['oktmo'].isnull()].index)
msk=dtfVil['oktmo'].str.len()==11
dtfVil[msk].to_sql('All_Places', con, if_exists='replace', index_label='oktmo',
index=False, dtype={'name': 'TEXT', 'okato1': 'TEXT', 'okato2': 'TEXT', 'oktmo':
'TEXT', 'name2': 'TEXT', 'oktmo_parent': 'TEXT',
'name_parent': 'TEXT', 'type': 'TEXT', 'norm_name': 'TEXT'})
return dtfVil[msk]
def make_munobr_db(con):
dtfVil = pd.read_csv(r'C:\Documents and Settings\ggolyshev\PycharmProjects\Cities\Base\villages_info.csv',
sep=';', encoding='cp1251', dtype=str)[['name', 'okato1', 'okato2', 'oktmo', 'name2',
'oktmo_parent', 'name_parent', 'type', 'norm_name']]
dtfVil = dtfVil.drop(dtfVil[dtfVil['oktmo'].isnull()].index)
msk = dtfVil['oktmo'].str.len() == 11
#sign_budget
#oktmo_budget
#name_budget
dtfRet = pd.read_csv(r'C:\Documents and Settings\ggolyshev\PycharmProjects\Cities\Base\munobr01042016.csv',
sep=';', encoding='cp1251', dtype=str)[['name', 'type', 'okato', 'oktmo', 'name2',
'sign_budget', 'oktmo_budget', 'name_budget',
'oktmo_parent', 'name_parent']]
lst=dtfRet.columns.tolist()
lstT=['TEXT'] * len(lst)
dtfRet=dtfRet.append(dtfVil[~msk])
dtfRet.to_sql('MunObrs', con, if_exists='replace',
index=False, dtype=dict(zip(lst, lstT)))
return dtfRet
def main():
conn = connect(r'cities.sqlite')
pdf=pd.read_sql('select * from All_Places', conn)
print(pdf)
#print(make_munobr_db(conn).shape)
#print(make_vliages_db(conn))
#print(make_cities_db(conn).shape)
#print(make_fedtbl(conn))
if __name__ == "__main__":
sys.exit(main()) | GeorgyGol/ReadWIKI_cities | make_db.py | make_db.py | py | 4,087 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "z_service.codes_correct",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlite3.connec... |
38810876405 | import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
####################################################################################################
######################################## NEURAL NETWORKS ###########################################
####################################################################################################
# for Q6.1.1
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.main = nn.Sequential(
nn.Linear(1024, 64),
nn.Sigmoid(),
nn.Linear(64, 36),
#nn.Softmax()
)
def forward(self, X):
return self.main(X)
# for Q6.1.2
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Conv2d(10, 20, kernel_size=5),
nn.Dropout(),
nn.MaxPool2d(2),
nn.ReLU()
)
self.fc_layers = nn.Sequential(
nn.Linear(500, 64),
nn.Sigmoid(),
nn.Linear(64, 36)
#nn.Softmax()
)
def forward(self, x):
x = self.conv_layers(x)
x = torch.flatten(x, 1)
x = self.fc_layers(x)
return x
# for Q6.1.3
class CNNcifar(nn.Module):
def __init__(self):
super(CNNcifar, self).__init__()
self.conv_layers = nn.Sequential(
# nn.Conv2d(3, 32, kernel_size=5),
# nn.MaxPool2d(2, 2),
# nn.ReLU(),
# nn.Conv2d(32, 64, kernel_size=5),
# nn.Dropout(0.2),
# nn.MaxPool2d(2, 2),
# nn.ReLU(),
# nn.BatchNorm2d(64)
# sees 32x32x3 image tensor
nn.Conv2d(3, 16, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
# sees 16x16x16 tensor
nn.Conv2d(16, 32, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
# sees 8x8x32 tensor
nn.Conv2d(32, 64, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2)
# nn.Conv2d(3, 32, 5, padding=1),
# nn.MaxPool2d(2, 2),
# nn.Conv2d(32, 64, 5, padding=1),
# nn.MaxPool2d(2, 2)
)
self.fc_layers = nn.Sequential(
nn.Linear(1024, 500),
nn.ReLU(),
nn.Linear(500, 10),
nn.ReLU()
)
def forward(self, x):
x = self.conv_layers(x)
x = torch.flatten(x, 1)
x = self.fc_layers(x)
return x
# for Q6.2
class CNN2(nn.Module):
def __init__(self):
super(CNN2, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 10, kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Conv2d(10, 20, kernel_size=5),
nn.Dropout(),
nn.MaxPool2d(2),
nn.ReLU(),
)
self.fc_layers = nn.Sequential(
nn.Linear(56180, 128),
nn.Sigmoid(),
nn.Linear(128, 17),
#nn.Softmax()
)
def forward(self, x):
x = self.conv_layers(x)
x = torch.flatten(x, 1)
x = self.fc_layers(x)
return x
####################################################################################################
######################################## HELPER FUNCTIONS ##########################################
####################################################################################################
# convert numpy array to dataloader
def np2loader(X, y, batchsize=128, shuffling=True):
y = y.argmax(axis=1)
X = torch.from_numpy(np.float32(X))
y = torch.from_numpy(y)
data = torch.utils.data.TensorDataset(X, y)
if batchsize != None:
loader = torch.utils.data.DataLoader(data, batch_size=batchsize, shuffle=shuffling)
else:
loader = torch.utils.data.DataLoader(data, shuffle=shuffling)
return loader
def training_loop(myNet, trainLoader, validLoader, device, max_iters, learning_rate, lossf, optimizer, fname, flatten=True):
# Training loop
# Initialize the network
myNet = myNet.to(device)
print(myNet)
train_loss_list, train_acc_list, val_loss_list, val_acc_list = [], [], [], []
for itr in range(max_iters):
myNet.train()
total_loss = 0.0
total_correct = 0.0
total_instances = 0
for times, data in enumerate(trainLoader):
inputs, labels = data[0].to(device), data[1].to(device)
if flatten:
inputs = inputs.view(inputs.shape[0], -1)
# Zero the parameter gradients
optimizer.zero_grad()
# Foward, backward, optimize
outputs = myNet(inputs)
loss = lossf(outputs, labels)
loss.backward()
optimizer.step()
total_loss += loss
train_accuracy, train_loss = evaluate_model(myNet, trainLoader, lossf, device, flatten)
train_acc_list.append(train_accuracy)
train_loss_list.append(train_loss)
val_accuracy, val_loss = evaluate_model(myNet, validLoader, lossf, device, flatten)
val_acc_list.append(val_accuracy)
val_loss_list.append(val_loss)
if itr % 10 == 0:
print(
"itr: {:02d} \t loss: {:.2f} \t acc : {:.2f} \t eval_acc : {:.2f}".format(
itr, total_loss, train_accuracy, val_accuracy
)
)
# save the weights
torch.save(myNet.state_dict(), fname)
# visualize
plot_train_valid(train_acc_list, val_acc_list, "accuracy")
plot_train_valid(train_loss_list, val_loss_list, "average loss")
plot_train(train_acc_list, "accuracy")
plot_train(train_loss_list, "average loss")
def evaluate_model(myNet, dataLoader, lossf, device, flatten=True, my_class=False):
myNet.eval()
total_loss = 0.0
total_correct = 0.0
total_instances = 0
for times, data in enumerate(dataLoader):
inputs, labels = data[0].to(device), data[1].to(device)
if my_class != False:
for i in range(len(labels)):
labels[i] = my_class
if flatten:
inputs = inputs.view(inputs.shape[0], -1)
outputs = myNet(inputs)
loss = lossf(outputs, labels)
# Total loss
total_loss += loss.item()
with torch.no_grad():
# average accuracy
classifications = torch.argmax(outputs, dim=1)
correct_predictions = sum(classifications==labels).item()
total_correct+=correct_predictions
total_instances+=len(inputs)
accuracy = round(total_correct/total_instances, 4)
return accuracy, total_loss/total_instances
# Plot train and valid loss / accuracies
def plot_train_valid(train_data, valid_data, datatype):
plt.plot(range(len(train_data)), train_data, label="training")
plt.plot(range(len(valid_data)), valid_data, label="validation")
plt.xlabel("epoch")
plt.ylabel(datatype)
plt.xlim(0, len(train_data) - 1)
plt.ylim(0, None)
plt.legend()
plt.grid()
plt.show()
# Plot train and loss / accuracies
def plot_train(train_data, datatype):
plt.plot(range(len(train_data)), train_data, label="training")
plt.xlabel("epoch")
plt.ylabel(datatype)
plt.xlim(0, len(train_data) - 1)
plt.ylim(0, None)
plt.legend()
plt.grid()
plt.show() | SilvesterYu/CMU-AdvancedComputerVision | lifany_hw4/python/nnq6.py | nnq6.py | py | 7,704 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
11866935175 | import os
from bcitp.utils.standards import PATH_TO_SESSION
from kivy.uix.screenmanager import Screen
from kivy.properties import ObjectProperty, StringProperty
class StartScreen(Screen):
# layout
session_name = ObjectProperty(None)
label_msg = StringProperty('')
def __init__(self, session_header, **kwargs):
super(StartScreen, self).__init__(**kwargs)
self.sh = session_header
def change_to_gen_settings(self, *args):
self.manager.current = 'GeneralSettings'
self.manager.transition.direction = 'left'
def change_to_bci(self, *args):
self.manager.current = 'BCIMenu'
self.manager.transition.direction = 'left'
def save_session_name(self, *args):
sname = self.session_name.text
if not os.path.isdir(PATH_TO_SESSION):
os.makedirs(PATH_TO_SESSION)
if sname == '':
# if no session_name is provided, use latest modified folder in
# data/session
all_subdirs = []
for d in os.listdir(PATH_TO_SESSION + '.'):
bd = os.path.join(PATH_TO_SESSION, d)
if os.path.isdir(bd):
all_subdirs.append(bd)
sname = max(all_subdirs, key=os.path.getmtime).split('/')[-1]
self.sh.info.name = sname
if os.path.isdir(PATH_TO_SESSION + sname):
self.label_msg = "Session " + sname \
+ " already exists. Data will be overwritten"
self.sh.loadFromPkl()
else:
os.makedirs(PATH_TO_SESSION + sname)
self.sh.saveToPkl()
self.label_msg = "Session Saved as: " + sname
self.sh.info.flag = True
| rafaelmendes/BCItp | bcitp/screens/start_screen.py | start_screen.py | py | 1,710 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "kivy.uix.screenmanager.Screen",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "kivy.properties.StringProperty",
"line_number": 12,
"usage_type": "call"
},... |
30645335501 | from snappy.snap import t3mlite as t3m
from snappy.snap.mcomplex_base import *
from snappy.SnapPy import matrix
from .hyperboloid_utilities import *
__all__ = ['RaytracingData']
class RaytracingData(McomplexEngine):
def add_weights(self, weights):
for tet in self.mcomplex.Tetrahedra:
tet.Weights = {
F : weights[4 * tet.Index + f] if weights else 0.0
for f, F in enumerate(t3m.TwoSubsimplices)}
def get_uniform_bindings(self):
d = {}
d['TetrahedraCombinatorics.otherTetNums'] = (
'int[]',
[ tet.Neighbor[F].Index
for tet in self.mcomplex.Tetrahedra
for F in t3m.TwoSubsimplices ])
d['TetrahedraCombinatorics.otherFaceNums'] = (
'int[]',
[ tet.Gluing[F][f]
for tet in self.mcomplex.Tetrahedra
for f, F in enumerate(t3m.TwoSubsimplices) ])
d['TetrahedraBasics.SO13tsfms'] = (
'mat4[]',
[ tet.O13_matrices[F]
for tet in self.mcomplex.Tetrahedra
for F in t3m.TwoSubsimplices ])
d['TetrahedraBasics.planes'] = (
'vec4[]',
[ tet.R13_planes[F]
for tet in self.mcomplex.Tetrahedra
for F in t3m.TwoSubsimplices ])
d['TetrahedraBasics.R13Vertices'] = (
'vec4[]',
[ tet.R13_vertices[V]
for tet in self.mcomplex.Tetrahedra
for V in t3m.ZeroSubsimplices ])
d['Colors.face_color_indices'] = (
'int[]',
[ tet.Class[F].Index
for tet in self.mcomplex.Tetrahedra
for F in t3m.TwoSubsimplices ])
d['Colors.edge_color_indices'] = (
'int[]',
[ tet.Class[E].Index
for tet in self.mcomplex.Tetrahedra
for E in t3m.OneSubsimplices ])
d['Colors.vertex_color_indices'] = (
'int[]',
[ tet.Class[V].Index
for tet in self.mcomplex.Tetrahedra
for V in t3m.ZeroSubsimplices ])
d['weights'] = (
'float[]',
[ tet.Weights[F]
for tet in self.mcomplex.Tetrahedra
for F in t3m.TwoSubsimplices ])
return d
def get_compile_time_constants(self):
d = {}
d[b'##num_tets##'] = len(self.mcomplex.Tetrahedra)
d[b'##num_cusps##'] = len(self.mcomplex.Vertices)
d[b'##num_edges##'] = len(self.mcomplex.Edges)
return d
def update_view_state(self, boost_tet_num_and_weight,
m = matrix([[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])):
boost, tet_num, weight = boost_tet_num_and_weight
boost = matrix(boost, ring = self.RF)
m = matrix(m, ring = self.RF)
boost = O13_orthonormalize(boost * m)
entry_F = -1
for i in range(100):
pos = boost.transpose()[0]
tet = self.mcomplex.Tetrahedra[tet_num]
amount, F = max(
[ (r13_dot(pos, tet.R13_planes[F]), F)
for F in t3m.TwoSubsimplices ])
if F == entry_F:
break
if amount < 0.0000001:
break
boost = O13_orthonormalize(tet.O13_matrices[F] * boost)
tet_num = tet.Neighbor[F].Index
entry_F = tet.Gluing[F].image(F)
weight += tet.Weights[F]
return boost, tet_num, weight
| ekim1919/SnapPy | python/raytracing/raytracing_data.py | raytracing_data.py | py | 3,659 | python | en | code | null | github-code | 1 | [
{
"api_name": "snappy.snap.t3mlite.TwoSubsimplices",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "snappy.snap.t3mlite",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "snappy.snap.t3mlite.TwoSubsimplices",
"line_number": 22,
"usage_type": "attr... |
25376932933 | from argparse import ArgumentParser
from .sensing_reader import SensingReader
from .common import SUPPORT_CAMERA_INFO_DICT
def parse_args(argv):
parser = ArgumentParser()
parser.add_argument(
"--camera",
type=str,
help="camera model",
)
parser.add_argument(
"--i2c_bus",
type=str,
help="i2c bus",
)
parser.add_argument(
"--i2c_addr",
type=str,
help="i2c addr",
)
args = parser.parse_args(argv)
# camera model should in support list
camera = args.camera
# a-z to A-Z
camera = camera.upper()
if camera not in SUPPORT_CAMERA_INFO_DICT.keys():
print("camera model should in support list")
return args
return args
def main(args, unknown):
args = parse_args(unknown)
camera_model = args.camera
i2c_bus = args.i2c_bus
i2c_addr = args.i2c_addr
sensing_reader = SensingReader(
camera_model=camera_model,
i2c_bus=i2c_bus,
i2c_addr=i2c_addr,
)
sensing_reader.read()
| windzu/apk | apk/calibration/read_sensing/main.py | main.py | py | 1,061 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "common.SUPPORT_CAMERA_INFO_DICT.keys",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "common.SUPPORT_CAMERA_INFO_DICT",
"line_number": 31,
"usage_type": "name"
}... |
19323790025 | import json
from unittest import mock
import pytest
from intergov.repos.message_lake.minio.miniorepo import MessageLakeMinioRepo
from tests.unit.domain.wire_protocols.test_generic_message import _generate_msg_object
CONNECTION_DATA = {
'host': 'minio.host',
'port': 1000,
'access_key': 'access_key',
'secret_key': 'secret_key',
'bucket': 'bucket',
'use_ssl': False
}
@mock.patch('intergov.repos.message_lake.minio.miniorepo.miniorepo.boto3')
def test_post(boto3):
repo = MessageLakeMinioRepo(CONNECTION_DATA)
boto3.client.assert_called_once()
msg = _generate_msg_object(sender_ref='xxxx-xxxx-xxxx')
assert repo.post(msg)
s3_client = boto3.client.return_value
assert s3_client.put_object.call_count == 2
@mock.patch('intergov.repos.message_lake.minio.miniorepo.miniorepo.boto3')
def test_update_metadata(boto3):
repo = MessageLakeMinioRepo(CONNECTION_DATA)
boto3.client.assert_called_once()
msg = _generate_msg_object(sender_ref='xxxx-xxxx-xxxx', status='pending')
metadata = {
'status': 'received'
}
def get_object(**kwargs):
key = kwargs['Key']
body = mock.MagicMock()
if key.endswith('metadata.json'):
body.read.return_value = json.dumps(metadata).encode('utf-8')
return {
'Body': body
}
return None
s3_client = boto3.client.return_value
s3_client.get_object.side_effect = get_object
assert repo.update_metadata(str(msg.sender), str(msg.sender_ref), {'status': 'received'})
@mock.patch('intergov.repos.message_lake.minio.miniorepo.ClientError', Exception)
@mock.patch('intergov.repos.message_lake.minio.miniorepo.miniorepo.boto3')
def test_get(boto3):
repo = MessageLakeMinioRepo(CONNECTION_DATA)
boto3.client.assert_called_once()
msg = _generate_msg_object(sender_ref='xxxx-xxxx-xxxx', status='pending')
msg_dict = msg.to_dict()
metadata = {
'status': 'received'
}
def get_object(**kwargs):
key = kwargs['Key']
body = mock.MagicMock()
data = None
if key.endswith('metadata.json'):
data = metadata
elif key.endswith('content.json'):
data = msg_dict
if data:
body.read.return_value = json.dumps(data).encode('utf-8')
return {
'Body': body
}
s3_client = boto3.client.return_value
s3_client.get_object.side_effect = get_object
assert repo.get(str(msg.sender), str(msg.sender_ref))
exception = Exception()
exception.response = {
'Error': {
'Code': 'NoSuchKey'
}
}
def raise_error(on_key):
def get_object_content(key):
if key.endswith(on_key):
raise exception
else:
return json.dumps(msg_dict)
return get_object_content
s3_client = boto3.client.return_value
repo.get_object_content = mock.Mock()
repo.get_object_content.side_effect = raise_error('metadata.json')
assert repo.get(str(msg.sender), str(msg.sender_ref))
repo.get_object_content.side_effect = raise_error('content.json')
assert not repo.get(str(msg.sender), str(msg.sender_ref))
exception.response['Error']['Code'] = 'Random'
for key in ['content.json', 'metadata.json']:
repo.get_object_content.side_effect = raise_error(key)
with pytest.raises(Exception):
repo.get(str(msg.sender), str(msg.sender_ref))
| bizcubed/intergov | tests/unit/repos/message_lake/test.py | test.py | py | 3,510 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "intergov.repos.message_lake.minio.miniorepo.MessageLakeMinioRepo",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tests.unit.domain.wire_protocols.test_generic_message._generate_msg_object",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "unitt... |
674861904 | import sys
if sys.version_info.major == 2:
import httplib
from base64 import decodestring as base64_decode
from StringIO import StringIO as StringLikeIO
else:
from http import client as httplib
from base64 import decodebytes as base64_decode
from io import BytesIO as StringLikeIO
import base64
import json
import numpy as np
import os
import random
import requests
import six
import skimage.io
import skimage.transform
import warnings
from collections import Mapping
from nose.tools import assert_equal
from nose.tools import assert_in
from nose.tools import assert_is_instance
from nose.tools import assert_true
from nose.tools import assert_tuple_equal
from numpy.testing import assert_array_almost_equal
from parameterized import parameterized
from unittest import TestSuite
def bounded_repr(obj, limit=100, trim=20):
obj_repr = repr(obj)
if len(obj_repr) > limit:
obj_repr = '{0}...<truncated>...{1}'.format(
obj_repr[:trim],
obj_repr[-trim:])
return obj_repr
def assert_response_expectations(
response, expected_status_codes, ok_key, expected_ok_value):
expected_status_codes = set(expected_status_codes)
assert_in(response.status_code, expected_status_codes,
msg='got invalid status code HTTP {0} from endpoint'.format(
response.status_code))
json_data = response.json()
assert_in(ok_key, json_data,
msg='expected "{0}" key to be present in response JSON data'.format(
ok_key))
assert_equal(json_data[ok_key], expected_ok_value,
msg='expected "{0}" key to be {1}, got {2} (type={3})'.format(
ok_key,
expected_ok_value,
repr(json_data[ok_key]),
type(json_data[ok_key]).__name__))
return json_data
def assert_follows_schema(schema, data, name):
if isinstance(schema, type):
assert_is_instance(data, schema,
msg=('expected `{name}` to be of type {expected_type}, got '
'{actual_type} [{repr}]').format(
name=name,
expected_type=schema,
actual_type=type(data).__name__,
repr=bounded_repr(data)))
elif isinstance(schema, tuple):
assert_true(isinstance(data, schema),
msg=('expected `{name}` to be one of types {expected_types}, got '
'{actual_type} [{repr}]').format(
name=name,
expected_types=json.dumps([t.__name__ for t in schema]),
actual_type=type(data).__name__,
repr=bounded_repr(data)))
elif isinstance(schema, Mapping):
for schema_key, schema_value in six.iteritems(schema):
assert_follows_schema(
schema_value,
data[schema_key], '{name}.{key}'.format(
name=name, key=schema_key))
elif isinstance(schema, list):
if len(schema) != 1:
raise ValueError(
('schema assertions for lists are expected to contain only 1 '
'element, but {num_elements} elements were provided').format(
num_elements=len(schema)))
for i, element in enumerate(data):
assert_follows_schema(
schema[0],
element,
'{name}.#{num}'.format(name=name, num=i))
else:
raise ValueError(
('got unsupported schema definition, got type {schema_type} '
'instead').format(schema_type=type(schema).__name__))
class TestSegmenter(TestSuite):
def setUp(self):
host = os.environ.get('SEGMENTER_TEST_HOST', 'http://localhost')
port = os.environ.get('SEGMENTER_TEST_PORT', 80)
self.max_size = os.environ.get('SEGMENTER_TEST_MAX_SIZE', 1080)
endpoint = '{host}:{port}'.format(host=host, port=port)
response = requests.get(endpoint, timeout=3)
if response.status_code != httplib.OK:
raise RuntimeError(
'got HTTP {0} from microservice at {1}'.format(
response.status_code,
endpoint))
before_filename = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'data', 'before.png'))
if not os.path.isfile(before_filename):
raise RuntimeError(
'could not locate "before" test image [{0}]'.format(
before_filename))
after_filename = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'data', 'after.npy'))
if not os.path.isfile(after_filename):
raise RuntimeError(
'could not locate "after" test image [{0}]'.format(
after_filename))
self.endpoint = endpoint
self.before_filename = before_filename
self.after_filename = after_filename
def test_ping_response(self):
response = requests.get(self.endpoint)
assert_equal(response.status_code, httplib.OK,
msg='expected HTTP {0}, got HTTP {1}'.format(
httplib.OK, response.status_code))
def test_inference_with_single_image(self):
before_image = skimage.io.imread(self.before_filename)
with open(self.before_filename, 'rb') as fp:
base64_before_image = base64.b64encode(fp.read()).decode('ascii')
response = requests.post(
self.endpoint,
data={
'images': json.dumps([base64_before_image]),
})
json_data = assert_response_expectations(
response,
expected_status_codes=[httplib.OK],
ok_key='ok',
expected_ok_value=True)
expected_schema = {
'ok': bool,
'masks': six.string_types,
}
assert_follows_schema(expected_schema, json_data, 'response')
returned_mask_string = json.loads(json_data['masks'])
stringlike_buffers = [
StringLikeIO(base64_decode(base64_response.encode('ascii')))
for base64_response in returned_mask_string]
returned_masks = [skimage.io.imread(b) for b in stringlike_buffers]
assert_equal(len(returned_masks), 1,
msg='expected only 1 mask in the response')
returned_mask = returned_masks[0]
after_mask = np.load(self.after_filename)
assert_array_almost_equal(after_mask, returned_mask,
err_msg='segmentation mask is incorrect')
def test_inference_with_multiple_images(self):
num_images = random.randint(2, 5)
before_image = skimage.io.imread(self.before_filename)
with open(self.before_filename, 'rb') as fp:
base64_before_image = base64.b64encode(fp.read()).decode('ascii')
response = requests.post(
self.endpoint,
data={
'images': json.dumps(
[base64_before_image for _ in range(num_images)]),
})
json_data = assert_response_expectations(
response,
expected_status_codes=[httplib.OK],
ok_key='ok',
expected_ok_value=True)
expected_schema = {
'ok': bool,
'masks': six.string_types,
}
assert_follows_schema(expected_schema, json_data, 'response')
returned_mask_string = json.loads(json_data['masks'])
stringlike_buffers = [
StringLikeIO(base64_decode(base64_response.encode('ascii')))
for base64_response in returned_mask_string]
returned_masks = [skimage.io.imread(b) for b in stringlike_buffers]
assert_equal(len(returned_masks), num_images,
msg='expected {0} masks in the response, got {1}'.format(
num_images, len(returned_masks)))
after_mask = np.load(self.after_filename)
for returned_mask in returned_masks:
assert_array_almost_equal(
after_mask,
returned_mask,
err_msg='segmentation mask is incorrect')
def test_inference_with_oversized_image(self):
before_image = skimage.io.imread(self.before_filename)
# catch skimage's warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
before_image = skimage.transform.resize(
before_image,
(int(1.1 * self.max_size), int(1.1 * self.max_size)))
stringlike_buffer = StringLikeIO()
skimage.io.imsave(stringlike_buffer, before_image)
base64_before_image = base64.b64encode(
stringlike_buffer.getvalue()).decode('ascii')
response = requests.post(
self.endpoint,
data={
'images': json.dumps([base64_before_image]),
})
json_data = assert_response_expectations(
response,
expected_status_codes=[httplib.OK],
ok_key='ok',
expected_ok_value=True)
expected_schema = {
'ok': bool,
'masks': six.string_types,
}
assert_follows_schema(expected_schema, json_data, 'response')
returned_mask_string = json.loads(json_data['masks'])
stringlike_buffers = [
StringLikeIO(base64_decode(base64_response.encode('ascii')))
for base64_response in returned_mask_string]
returned_masks = [skimage.io.imread(b) for b in stringlike_buffers]
assert_equal(len(returned_masks), 1,
msg='expected only 1 mask in the response')
returned_mask = returned_masks[0]
assert_tuple_equal(returned_mask.shape, before_image.shape[:2],
msg='expected mask shape to be {0}, got {1}'.format(
before_image.shape[:2], returned_mask.shape))
@parameterized.expand([
[
'mising_images',
{},
],
[
'no_images',
{
'images': json.dumps([]),
},
],
])
def test_inference_with_missing_parameters(self, name, payload):
response = requests.post(self.endpoint, data=payload)
json_data = assert_response_expectations(
response,
expected_status_codes=[httplib.BAD_REQUEST],
ok_key='ok',
expected_ok_value=False)
expected_schema = {
'ok': bool,
'error_message': six.string_types,
}
assert_follows_schema(expected_schema, json_data, 'response')
| maibrahim2016/background_removal | src/tests/test_microservice_response.py | test_microservice_response.py | py | 10,637 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "nose.tools.assert_in",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_in",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "nose.t... |
31941896761 | from flask import jsonify, request
from pydash import omit
from .. import routes
from middleware import db, auth
from models import User
@routes.route('/api/auth/register', methods=['POST'])
def register():
body = request.get_json()
user = User(
username = body['username'],
hashed_password = auth.hash_password(body['password']),
name = body['name'],
email = body['email'],
is_admin = 'admin' in body['username']
)
assert db.session.query(User).where(User.username == body['username']).first() == None
db.session.add(user)
db.session.commit()
return {
'access_token': auth.encode_jwt_token(
user,
username=user.username,
email=user.email,
name=user.name,
isAdmin=user.is_admin,
)
}
| joem2019a/devops-assignment | api/routes/auth/register.py | register.py | py | 768 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.request.get_json",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "models.User",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "middleware.auth.hash... |
18042314284 | # coding: utf-8
from tastypie.resources import ModelResource
from tastypie.authorization import Authorization
from tastypie import fields
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from ...models import Programacao
from django.utils import timezone
class ProgramacaoResource(ModelResource):
programa = fields.ForeignKey('radio.api.resources.ProgramaResource', 'programa', full=True)
class Meta:
queryset = Programacao.objects.all()
resource_name = 'programacao'
authorization = Authorization()
fields = ['data_inicio', 'data_fim']
filtering = {
'programa': ALL_WITH_RELATIONS,
}
class AoVivoResource(ModelResource):
programa = fields.ForeignKey('radio.api.resources.ProgramaResource', 'programa', full=True)
class Meta:
queryset = Programacao.objects.filter(data_inicio__lte=timezone.now(), data_fim__gte=timezone.now()).order_by('-programa__categoria__ordem_prioridade')
#queryset = Programacao.objects.filter(data_fim__gte=datetime.datetime.now())
resource_name = 'aovivo'
authorization = Authorization()
filtering = {
'programa': ALL_WITH_RELATIONS,
}
class ProgramaHorariosResource(ModelResource):
#programa = fields.ForeignKey('radio.api.resources.ProgramaResource', 'programa', full=True)
class Meta:
queryset = Programacao.objects.all()
resource_name = 'programacao'
authorization = Authorization()
fields = ['data_inicio', 'data_fim']
filtering = {
'programa': ALL_WITH_RELATIONS,
}
| rbiassusi/grade_programacao | grade_programacao/radio/api/resources/programacaoresource.py | programacaoresource.py | py | 1,631 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tastypie.resources.ModelResource",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "tastypie.fields.ForeignKey",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tastypie.fields",
"line_number": 10,
"usage_type": "name"
},
{
"api_na... |
19546355153 | #!/usr/bin/python
# This prints chromosome strand "left" or "right", depending on the argument.
# Usage: select-strand.py {0 | 1} < chromosome > single-strand
# Prints Nucleotide tab In-gene-mark
# where Nucleotide is one of [ACGT] or X for other input letters
# and In-gene-mark is 0 or 1
import sys
import re
from tempfile import TemporaryDirectory, NamedTemporaryFile
# When reversing the file, read only this many triplets into memory at once
length_limit = 1024*1024*32; # Requires approximately 2.5 GiB of memory. Adjust the limit as needed
# A pattern for recognizing correct triplets
triplet_regex = re.compile(r"\A[ACGTBKMNRWY][01][01]\Z")
#triplet_regex = re.compile(r"\A[ACGTN][01][01]\Z")
# Iterates over three-letter chunks from infile and returns correct nucleotide-triplets.
# Incorrect triplets are ignored.
# There mustn't be any whitespace in the file, except for possibly trailing whitespace, which is stripped
def triplets(infile):
triplet = infile.read(3)
while len(triplet) == 3:
if not triplet_regex.match(triplet):
sys.stderr.write("Incorrect triplet: '" + triplet + "'\n")
triplet = infile.read(3)
continue
yield triplet
triplet = infile.read(3)
triplet = triplet.rstrip()
if (len(triplet) != 0) and (len(triplet) != 3):
sys.stderr.write("Partial triplet read: '" + triplet + "' of length " + str(len(triplet)) + "\n")
return
# A mirroring table for nucleotides
nucl_mirror = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
# TODO what to do about these?
'B': 'X',
'K': 'X',
'M': 'X',
'N': 'X',
'R': 'X',
'W': 'X',
'Y': 'X'
}
# Returns a mirrored nucleotide; e.g. A->T T->A C->G G->C
def mirror_nucleotide(letter):
return nucl_mirror[letter]
usage = ("Incorrect arguments.\nUsage:\t" + sys.argv[0] + " {0 | 1}\n"
+ "\tSelects left (0) or right (1) strand of DNA code read from stdin.\n")
# Check the arguments and fill in the selected strand.
if len(sys.argv) == 2:
if sys.argv[1] == "0":
selected_strand = 0
elif sys.argv[1] == "1":
selected_strand = 1
else:
sys.exit(usage)
else:
sys.exit(usage)
# Read the triplets.
if selected_strand == 0:
# Positive strand was selected. Simply extract the right in-genome mark and print
for triplet in triplets(sys.stdin):
sys.stdout.write(triplet[0] + "\t" + triplet[1] + "\n")
else:
# Negative strand was selected. We have to reverse the strand and mirror the nucleotides.
# Reversing is done in chunks – the input might be very large, so we reverse it a chunk at a time,
# storing the partial results in a temporary directory. Then, these tempfiles are concatenated in
# reverse order; thus completing the reversal.
# Create a new temporary directory that will hold our tempfiles
with TemporaryDirectory(prefix = "revstrand") as tmpdirname:
genome_to_reverse = []
temp_file_names = []
for triplet in triplets(sys.stdin):
# Negative strand → mirror the nucleotide and store before reversing and printing
genome_to_reverse.append(mirror_nucleotide(triplet[0]) + "\t" + triplet[2] + "\n");
# We have overfilled the buffer
if (len(genome_to_reverse) > length_limit):
# Write the current buffer into a tempfile
with NamedTemporaryFile(dir = tmpdirname, mode = "w+t", delete = False) as tmpfile:
print("Writing the genome to file %s" % tmpfile.name, file = sys.stderr)
tmpfile.write("".join(reversed(genome_to_reverse)))
temp_file_names.append(tmpfile.name)
genome_to_reverse = []
# There might be an unprinted part in genome_to_reverse. Print that first.
for dublet in reversed(genome_to_reverse):
sys.stdout.write(dublet)
del(genome_to_reverse)
# Each of the files in temp_file_names now holds a reversed portion of the whole strand.
# Concatenate them together
for name in reversed(temp_file_names):
with open(name, "rt") as tmpfile:
sys.stdout.write(tmpfile.read())
| pompomon/nfpl104-genes | select-strand.py | select-strand.py | py | 3,913 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"l... |
42049926844 | import argparse
import datetime
import json
import logging
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
import apache_beam.transforms.window as window
import ast
from google.auth.transport.requests import Request
from google.oauth2.service_account import Credentials
from googleapiclient.discovery import build
import base64
from google.oauth2 import service_account
from google.auth.transport import requests
from googleapiclient.discovery import build
from google.auth import default, iam
from google.auth import impersonated_credentials
'''
To run locally:
export GOOGLE_APPLICATION_CREDENTIALS=/Users/anandjain/Documents/GitHub/gcptest/gmail-dataflow/src/main/java/com/google/cloud/pso/pipeline/anand-1-sa.json
gcloud auth activate-service-account --key-file=/Users/anandjain/Documents/GitHub/gcptest/gmail-dataflow/src/main/java/com/google/cloud/pso/pipeline/anand-1-sa.json
python3 gmailattachment.py --input_topic="projects/anand-1-291314/topics/gmail-messages" --output_path="gs://anand-1/gmailattachment/attachments/test.data/"
To run dataflow:
PROJECT=anand-1-291314
BUCKET=gs://anand-1/dataflow
REGION=us-east1
python -m gmailattachment --region $REGION --runner DataflowRunner --project $PROJECT --temp_location gs://$BUCKET/temp
'''
TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
EMAIL='anandj@eqxdemo.com'
MESSAGE_ID='177eaf1b98ab4a34'
ATTACHMENT_ID='ANGjdJ-MvhHmvbpGYPpgy3t4mfPEL-wLapjRrv4Bp40pqwEu2R24CvLqaXQLNGnTo70Ma4U2NO8SH_0uC98tSoqUnKyLqhvPvqfb28hEn91ICnvRK2Dpp4i2WCFum9gnKbT1wzfwrajeZ8mm6dj6KtlAgW0c0wjhPLTgSMwDXwt6YT8AAgHC6XHZ0K_NubQFax8_mIxoi4isZGLjY_Eb7Gv2uT5ZlThrR_tCjhiGWQ'
GSUITE_ADMIN_USER = 'test-anand-1@anand-1-291314.iam.gserviceaccount.com'
#SA_JSON=<INSERT_SA_JSON_HERE_AS_DICTIONARY_ENTRY_NO_QUOTES_ETC>
SA_JSON= ""
class WriteAttachmentToGCS(beam.DoFn):
def __init__(self, output_path):
self.output_path = output_path
def getattachment(self, email, messageId, attachementId):
credentials = Credentials.from_service_account_info(SA_JSON)
credentials = credentials.with_scopes(SCOPES)
credentials = credentials.with_subject(email)
try:
request = requests.Request()
credentials.refresh(request)
# Create an IAM signer using the default credentials.
# signer = iam.Signer(
# request,
# credentials,
# credentials.service_account_email
# )
# print('Service Account Email: ' + credentials.service_account_email)
# # Create OAuth 2.0 Service Account credentials using the IAM-based
# # signer and the bootstrap_credential's service account email.
# updated_credentials = service_account.Credentials(
# signer,
# credentials.service_account_email,
# TOKEN_URI,
# scopes=SCOPES,
# subject=credentials.service_account_email
# )
# target_credentials = impersonated_credentials.Credentials(
# source_credentials=updated_credentials,
# target_principal=credentials.service_account_email,
# target_scopes=SCOPES,
# lifetime=500)
# gmail = build('gmail', 'v1', credentials=updated_credentials,cache_discovery=False)
# gmail = build('gmail', 'v1', credentials=target_credentials,cache_discovery=False)
gmail = build('gmail', 'v1', credentials=credentials,cache_discovery=False)
att = gmail.users().messages().attachments().get(userId=email, messageId=messageId, id=attachementId).execute()
data = base64.b64decode(att['data'])
return data
except Exception as ex:
print('Error:', ex.content.decode('ascii'))
def process(self, element):
e = element.decode("UTF-8")
e = ast.literal_eval(e)
messageId=e['id']
att_file_name = ''
att_text = ''
for i in range(len(e['payload']['parts'])):
if ('attachmentId' in e['payload']['parts'][i]['body']):
attachmentId = e['payload']['parts'][i]['body']['attachmentId']
att_file_name = e['payload']['parts'][i]['filename']
for j in range(len(e['payload']['headers'])):
if (e['payload']['headers'][j]['name'] == 'To') :
em = e['payload']['headers'][j]['value']
start = em.find('<') + 1
end = em.find('>')
em = em[start:end]
att_text = self.getattachment(em, messageId, attachmentId)
break
break
filename = self.output_path + att_file_name #+ "-" + str(datetime.datetime.now())
print('File Name of Attachement:' + filename)
if (filename):
with beam.io.gcp.gcsio.GcsIO().open(filename=filename, mode="w") as f:
f.write(att_text)
def run(input_topic, output_path, window_size=1.0, pipeline_args=None):
# `save_main_session` is set to true because some DoFn's rely on
# globally imported modules.
pipeline_options = PipelineOptions(
pipeline_args, streaming=True, save_main_session=True
)
with beam.Pipeline(options=pipeline_options) as pipeline:
(
pipeline
| "Read PubSub Messages"
>> beam.io.ReadFromPubSub(topic=input_topic)
# | "Window into" >> GroupWindowsIntoBatches(window_size)
# | "Write to GCS" >> beam.ParDo(WriteBatchesToGCS(output_path))
| "Write attachment to GCS" >> beam.ParDo(WriteAttachmentToGCS(output_path))
)
if __name__ == "__main__": # noqa
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_topic",
help="The Cloud Pub/Sub topic to read from.\n"
'"projects/anand-1-291314/topics/gmail-messages".',
default="projects/anand-1-291314/topics/gmail-messages"
)
parser.add_argument(
"--window_size",
type=float,
default=1.0,
help="Output file's window size in number of minutes.",
)
parser.add_argument(
"--output_path",
help="GCS Path of the output file including filename prefix.",
default="gs://anand-1/gmailattachment/attachments/test.data/"
)
known_args, pipeline_args = parser.parse_known_args()
run(
known_args.input_topic,
known_args.output_path,
known_args.window_size,
pipeline_args,
) | anandj123/gcptest | gmail-attachment/gmailattachment.py | gmailattachment.py | py | 6,832 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "apache_beam.DoFn",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "google.oauth2.service_account.Credentials.from_service_account_info",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "google.oauth2.service_account.Credentials",
"line_n... |
11878217031 | from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework import serializers
from .models import Product
from .serializers import ProductSerializer
@api_view(['GET'])
def ApiOverview(request):
api_urls = {
'Add': '/create',
'view_all': '/all',
'Update': '/update/pk/',
'Delete': '/item/pk/'
}
return Response(api_urls)
@api_view(['POST'])
def add_product(request):
items=ProductSerializer(data=request.data)
if Product.objects.filter(**request.data).exists():
raise serializers.ValidationError('Product already exists')
if items.is_valid():
items.save()
return Response(items.data)
else:
return Response (status=status.HTTP_404_NOT_FOUND)
@api_view(['GET'])
def view_items(request):
items = Product.objects.all()
if items:
data = ProductSerializer(items,many=True)
return Response(data.data)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
@api_view(['POST'])
def update_items(request,pk):
item=Product.objects.get(id=pk)
serializer=ProductSerializer(instance=item,data=request.data,many=False)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(["DELETE"])
def delete_item(request,pk):
item=Product.objects.get(id=pk)
item.delete()
return Response("item deleted")
| Nidhunkumar/Drf_Crud | venv/DrfCrud/api/views.py | views.py | py | 1,460 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.response.Response",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "serializers.ProductSerializer",
"line_number": 22,
"usage_type": "call... |
6230064290 | import traceback
from aiogram import Router, F
from aiogram import types
from aiogram.fsm.context import FSMContext
from datetime import datetime, timedelta
from config_reader import myvars, DEBUG
from filters.permission import check_permission
from handlers.superuser_menu import InputData
from keyboards.for_doctor import run_calendar, get_kb_doctor_menu, get_kb_doctor_selected_day, \
get_kb_doctor_selected_patient
from libs.db_lib import pg_select_one_column as pg_soc, pg_execute, pg_select
from libs.dictanotry_lib import to_ru_dayofweek, to_ru_month3
from libs.google_ss import update_cell, google_get_vars
from libs.load_vars import update_appointments
from main import logger
router = Router()
async def get_user_data(callback: types.CallbackQuery, state: FSMContext):
user_data = await state.get_data()
user_data['doctor_id'] = callback.message.chat.id
user_data['appt_format'] = 'closed'
user_data['hours'] = [10, 11, 12, 13, 14, 15, 16]
for key in myvars.doctors.keys():
if myvars.doctors[key]['tid'] == callback.message.chat.id:
user_data['doctor'] = key
return user_data
@router.callback_query(F.data == "cb_doctor_workday_appt")
async def doctor_workday_appt(callback: types.CallbackQuery):
await callback.message.delete()
await callback.message.answer("Выбор дня:", reply_markup=await run_calendar())
await callback.answer("")
@router.callback_query(F.data.startswith("callback_doctor_calendar"))
async def pick_doctor_date(callback: types.CallbackQuery, state: FSMContext):
action = callback.data.split(":")[1]
year = callback.data.split(":")[2]
month = callback.data.split(":")[3]
day = callback.data.split(":")[4]
await state.update_data(year=year)
await state.update_data(month=month)
await state.update_data(day=day)
await state.update_data(user_tid=callback.from_user.id)
temp_date = datetime(int(year), int(month), 1)
if action == 'PREV-MONTH':
prev_date = temp_date - timedelta(days=31)
await callback.message.edit_reply_markup(reply_markup=await run_calendar(year=int(prev_date.year), month=int(prev_date.month)))
if action == 'NEXT-MONTH':
next_date = temp_date + timedelta(days=31)
await callback.message.edit_reply_markup(reply_markup=await run_calendar(year=int(next_date.year), month=int(next_date.month)))
if action == "SELECTED":
query = f"SELECT id FROM tb_appointments WHERE date(appt_date) = date('{year}-{month}-{day}') and is_closed = 1"
res = pg_soc(query)
# status_apt = ''
if len(res) > 0:
status_apt = 'закрыта'
else:
status_apt = 'открыта'
day_of_week = to_ru_dayofweek[datetime(year=int(year), month=int(month), day=int(day)).weekday()]
text = f'<b> {day_of_week}</b>\n' + \
f'<b>{day} {to_ru_month3[int(month)]} {year}</b>\n' + \
'━━━━━━━━━━━━━━━\n' + \
f'Запись: <u>{status_apt}</u>'
await callback.message.delete()
await callback.message.answer(text=text, reply_markup=await get_kb_doctor_selected_day(state))
await callback.answer("Успешно")
if action == "BACK":
await callback.message.delete()
await callback.message.answer(f'Меню', reply_markup=await get_kb_doctor_menu(state))
await state.clear()
await callback.answer('')
if action == "IGNORE":
await callback.answer('Выберите число')
@router.callback_query(F.data.startswith("cb_doctor_close_appt_exec"))
async def doctor_close_appt(callback: types.CallbackQuery, state: FSMContext):
user_data = await get_user_data(callback, state)
try:
user_data['spreadsheet_id'] = myvars.doctors[user_data['doctor']]['spreadsheet_id']
service, sheets, title = await google_get_vars(user_data, callback)
for hour in user_data['hours']:
appt_date = datetime(year=int(user_data['year']), month=int(user_data['month']), day=int(user_data['day']), hour=hour)
# Проверка существует ли запись на этот день и время
query = f"SELECT EXISTS (SELECT cid FROM tb_appointments WHERE appt_date = '{appt_date}'::timestamp)"
res = pg_soc(query)[0]
if res:
query = f"UPDATE tb_appointments SET is_closed = 1 WHERE appt_date = '{appt_date}'::timestamp"
pg_execute(query)
else:
query = f"INSERT INTO tb_appointments (is_closed, cid, doctor_id, appt_format, appt_date) " + \
f"SELECT 1, id, {int(user_data['doctor_id'])}, '{user_data['appt_format']}', '{appt_date}' " \
f"FROM tb_customers WHERE tid = {int(user_data['doctor_id'])}"
pg_execute(query)
await callback.answer('БД обновлена. Обновляется Google таблица', cache_time=35)
if DEBUG == 0:
await update_cell(myvars.doctors[user_data['doctor']]['spreadsheet_id'], int(hour),
int(user_data['month']), int(user_data['year']),
int(user_data['day']), user_data['appt_format'], service, sheets, title)
else:
await update_cell(myvars.doctors['Соболевский В.А.']['spreadsheet_id'], int(hour),
int(user_data['month']), int(user_data['year']),
int(user_data['day']), user_data['appt_format'], service, sheets, title)
await update_appointments()
await callback.message.delete()
# await callback.answer('Успешно')
except Exception as e:
logger.error(f"exception: {e} traceback: {traceback.format_exc()}")
@router.callback_query(F.data.startswith("cb_doctor_open_appt_exec"))
async def doctor_open_appt(callback: types.CallbackQuery, state: FSMContext):
user_data = await get_user_data(callback, state)
try:
user_data['spreadsheet_id'] = myvars.doctors[user_data['doctor']]['spreadsheet_id']
service, sheets, title = await google_get_vars(user_data, callback)
for hour in user_data['hours']:
appt_date = datetime(year=int(user_data['year']), month=int(user_data['month']), day=int(user_data['day']),
hour=hour)
# Проверка существует ли запись на этот день и время от пациента
query = f"SELECT EXISTS (SELECT cid FROM tb_appointments " \
f"WHERE appt_date = '{appt_date}'::timestamp and " \
f"cid != (SELECT id FROM tb_customers WHERE tid = doctor_id))"
print(query)
res = pg_soc(query)[0]
value = None
if res:
query = "SELECT lastname, name, surname, appt_format FROM tb_customers as cu " \
"LEFT JOIN tb_appointments as ap ON cu.id = ap.cid " \
f"WHERE (appt_date = '{appt_date}'::timestamp)"
rows = pg_select(query)
for row in rows:
value = f"openedФИО: {row[0]} {row[1]} {row[2]}\n Формат: {row[3]}"
query = f"UPDATE tb_appointments SET is_closed = 0 " \
f"WHERE appt_date = '{appt_date}'::timestamp and " \
f"cid != (SELECT id FROM tb_customers WHERE tid = doctor_id)"
pg_execute(query)
else:
value = "opened"
query = f"DELETE FROM tb_appointments " \
f"WHERE appt_date = '{appt_date}'::timestamp and is_closed=1"
pg_execute(query)
await callback.answer('БД обновлена. Обновляется Google таблица', cache_time=35)
if DEBUG == 1:
await update_cell(myvars.doctors['Соболевский В.А.']['spreadsheet_id'], int(hour),
int(user_data['month']), int(user_data['year']),
int(user_data['day']),
value, service, sheets, title)
else:
await update_cell(myvars.doctors[user_data['doctor']]['spreadsheet_id'], int(hour),
int(user_data['month']), int(user_data['year']),
int(user_data['day']), value, service, sheets, title)
await update_appointments()
await callback.message.delete()
# await callback.answer('Успешно')
except Exception as e:
logger.error(f"exception: {e} traceback: {traceback.format_exc()}")
@router.callback_query(F.data == "cb_doctor_patients")
async def set_doctor(callback: types.CallbackQuery, state: FSMContext):
is_superuser = await check_permission(myvars.superuser, callback=callback)
is_doctor = await check_permission(myvars.doctors, callback=callback)
if is_superuser or is_doctor:
await state.update_data(role="patient")
await callback.message.delete()
await callback.message.answer("Введите фамилию или её часть для поиска:")
await callback.answer("")
await state.set_state(InputData.lastname)
else:
await callback.message.delete()
await callback.answer("")
@router.callback_query(F.data.startswith("cb_doctor_search_user_result_"))
async def manage_seleted_patient(callback: types.CallbackQuery, state: FSMContext):
is_superuser = await check_permission(myvars.superuser, callback=callback)
is_doctor = await check_permission(myvars.doctors, callback=callback)
if is_superuser or is_doctor:
role = callback.data.split("_")[5]
tid = int(callback.data.split("_")[6])
await state.update_data(selected_user=tid)
user_data = await state.get_data()
await callback.message.delete()
text = f"<b>Пациент</b>\n" \
f"━━━━━━━━━━━━━━━\n" \
f"<b>{[tid]} {user_data['search_user'][tid]['lastname']} " \
f"{user_data['search_user'][tid]['name']} {user_data['search_user'][tid]['surname']}</b>"
await callback.message.answer(text=text, reply_markup=await get_kb_doctor_selected_patient())
await callback.answer("")
else:
await callback.message.delete()
await callback.answer("")
@router.callback_query(F.data == "cb_doctor_get_patient_files")
async def set_doctor(callback: types.CallbackQuery, state: FSMContext):
user_data = await state.get_data()
tid = user_data['selected_user']
# media_group_photo = []
# media_group_doc = []
query = f"SELECT file_id, file_type " \
f"FROM tb_files as f " \
f"LEFT JOIN tb_customers as cu " \
f"ON cu.id = f.cid " \
f"WHERE cu.tid={tid} " \
f"ORDER BY created DESC"
print(query)
rows = pg_select(query)
for row in rows:
if row[1] == 'photo':
# media_group_photo.append(InputMediaPhoto(media=row[0]))
# await callback.message.answer_media_group(media_group)
await callback.message.answer_photo(photo=row[0])
elif row[1] == 'document':
# media_group_doc.append(InputMediaDocument(media=row[0]))
await callback.message.answer_document(document=row[0])
elif row[1] == 'audio':
await callback
# await callback.message.answer_media_group(media_group_photo)
# await callback.message.answer_media_group(media_group_doc)
# await callback.message.do answer_media_group(media_group_doc)
await callback.message.answer("Загрузка завершена")
await callback.answer("Успешно") | kuromaster/mednotebot | handlers/docotor_menu.py | docotor_menu.py | py | 11,983 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "aiogram.Router",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "aiogram.types.CallbackQuery",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "aiogram... |
30189967092 |
import pandas as pd
from catboost import CatBoostRegressor as cbr
from catboost import CatBoostClassifier as cbc
from sklearn.preprocessing import LabelEncoder as LE
from sklearn.preprocessing import OneHotEncoder
import random
from sklearn.metrics import f1_score
training_dataset = pd.read_csv( r'train.csv')
test_dataset = pd.read_csv(r'test.csv')
test_dataset['is_promoted'] = [0]*len(test_dataset)
dataset = pd.concat([training_dataset, test_dataset], ignore_index = 1)
'''
dataset = pd.read_csv(r'train.csv')
'''
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, -1].values
#Handling Missing values
# predict "education" column by regression
# "education" column
lst = []
foo = ["Bachelor's", "Master's & above","Below Secondary"]
#print(random.choice(foo))
for i in list(X[:, 2]):
if str(i) == 'nan' :
lst.append(random.choice(foo))
else:
lst.append(str(i))
X[:, 2] = lst
# predict "previous_year_rating" column by regression
# "previous_year_rating" column
lst = []
foo1 = [1,2,3,4,5]
#print(random.choice(foo))
for i in list(X[:, 7]):
if str(i) == 'nan' :
lst.append(random.choice(foo1))
else:
lst.append(str(i))
X[:, 7] = lst
'''
reg_dataset = pd.DataFrame(X).dropna()
X_reg_train = reg_dataset.iloc[:, [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11]].values
y_reg_train = reg_dataset.iloc[:, 7].values
#extract rows with NaN
rows_to_be_pred = pd.DataFrame(X)[pd.DataFrame(X)[7].isnull()]
#test data for regressor-rows for which values need to be predicted.
X_reg_test = rows_to_be_pred.iloc[:, [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11]].values
regressor = cbr(cat_features=[0, 1, 2, 3, 4])
regressor.fit(X_reg_train, y_reg_train)
y_pred=regressor.predict(X_reg_test)
y_pred2 = [round(i) for i in list(y_pred)]
lst = []
index = 0
for i in list(X[:, 7]):
if str(i) == 'nan':
lst.append(y_pred2[index])
index += 1
else: lst.append(i)
X[:, 7] = lst
'''
'''
for i in list(X[:, 7]):
if str(i) == 'nan' :
print('nan')
else:
print('not')'''
#encoding categorical data to numeric
'''
labelencoder = LE()
X[:, 0] = labelencoder.fit_transform(X[:, 0])
labelencoder1 = LE()
X[:, 1] = labelencoder1.fit_transform(X[:, 1])
labelencoder1 = LE()
X[:, 2] = labelencoder1.fit_transform(X[:, 2])
labelencoder3 = LE()
X[:, 3] = labelencoder3.fit_transform(X[:, 3])
labelencoder4 = LE()
X[:, 4] = labelencoder4.fit_transform(X[:, 4])
onehotencoder = OneHotEncoder(categorical_features = [-12])
X = onehotencoder.fit_transform(X).toarray()
onehotencoder1 = OneHotEncoder(categorical_features = [-11])
X = onehotencoder1.fit_transform(X).toarray()
onehotencoder2 = OneHotEncoder(categorical_features = [-10])
X = onehotencoder2.fit_transform(X).toarray()
onehotencoder3 = OneHotEncoder(categorical_features = [-9])
X = onehotencoder3.fit_transform(X).toarray()
onehotencoder4 = OneHotEncoder(categorical_features = [-8])
X = onehotencoder4.fit_transform(X).toarray()
'''
#splitting data to training and test data
X_train = X[:len(training_dataset), :]
X_test = X[len(training_dataset):, :]
y_train = y[:len(training_dataset)]
'''
from sklearn.cross_validation import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0, stratify=y)
'''
#oversampling
train_df = pd.concat([pd.DataFrame(X_train), pd.DataFrame(y_train)], axis=1)
train_df.columns = list(range( len(train_df.columns) )) # list(range(13))
train_df__0 = train_df[train_df[len(train_df.columns)-1] == 0]
train_df__1 = train_df[train_df[len(train_df.columns)-1] == 1]
#oversampled_df = pd.concat([train_df__0 , pd.concat([train_df__1] * 10)], ignore_index=1)
oversampled_df = pd.concat([train_df__0, train_df__1], ignore_index=1)
from sklearn.utils import shuffle
oversampled_df = shuffle(oversampled_df)
X_train = oversampled_df.iloc[:, :-1].values
y_train = oversampled_df.iloc[:, -1].values
'''
#feature scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train.reshape(-1, 1))
'''
'''
#handling imbalanced data
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train, y_train = sm.fit_sample(X_train, y_train.ravel())
'''
#Classifier
classifier = cbc( eval_metric='F1', scale_pos_weight = 3.1,cat_features=[0, 1, 2, 3, 4])
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
'''
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm, f1_score(y_test, y_pred))
'''
#output
output = test_dataset[['employee_id']]
output['is_promoted'] = list(y_pred)
output.to_csv(r'sample_submission.csv', sep=',', encoding='utf-8', index=False)
| DataScienceWorks/AV-WNS-2018-September-JobPromotionPrediction | others_solutions/pre-final/084r_S0umya_409_578825_cf_final_5ag0EsU.py | 084r_S0umya_409_578825_cf_final_5ag0EsU.py | py | 4,918 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "random.choice",
"li... |
23844910762 | from collections import deque
from contextlib import contextmanager
from string import Template
from textwrap import dedent
import dominate
from dominate.tags import *
from dominate.util import text
import arcade.examples
import pathlib
import math
import demos.lib02
import demos.movement
def main():
with open('reveal/reveal.js-3.6.0/index.html.template',
encoding='utf-8') as f:
tmpl = Template(f.read())
output = tmpl.safe_substitute(
slides='\n'.join(str(s) for s in slides)
)
print(output)
with open('reveal/reveal.js-3.6.0/indexgen.html', 'w', encoding='utf-8') as f:
f.write(output)
print('Total slides: ', len(slides))
slides = []
def add_slide(f):
@section
def inner(*args, **kwargs):
f(*args, **kwargs)
slides.append(inner())
def add_slide_plain(f):
@section
def inner(*args, **kwargs):
f(*args, **kwargs)
slides.append(inner())
def add_slide50(f):
@section(style='top: 50px')
def inner(*args, **kwargs):
f(*args, **kwargs)
slides.append(inner())
def code_block(filename, lines=None, size='0.4em', highlights=None):
code_path = pathlib.Path('demos') / filename
with pre(style=f'font-size: {size}'):
with code(cls='hljs python', data_trim='true',
style='max-height: unset',
data_noescape='true'
):
with open(code_path) as f:
data = f.readlines()
if lines:
data = data[slice(lines[0] - 1, lines[1] - 0)]
content = ''.join(data)
if highlights:
c = content
for s in highlights:
a, b, c = c.partition(s)
text(a)
mark(b)
text(c)
else:
text(''.join(data))
@add_slide
def slide_title():
h1('multiplayer 2D gaming')
h2('with python-arcade')
with p(style='font-size: 0.7em'):
text('@caleb_hattingh ● ')
# a('github.com/cjrh', href='github.com/cjrh')
a('github.com/cjrh/pyconau2018-arcade2Dmultiplayer', href='github.com/cjrh/pyconau2018-arcade2Dmultiplayer')
@add_slide
def slide_goals():
h2('Goals')
with p():
text('Show you how to build a simple multiplayer game!')
@add_slide
def slide_goals():
h2('Goals')
with p():
s('Show you how to build a simple multiplayer game!')
div(style='margin-top: 30px;')
p('Show you the building blocks')
@add_slide
def slide_about_me():
h2('about me')
with ul():
with li():
with p():
text('Network automation at ')
img(src='/img/pccwglobal.png', height=42, style='vertical-align:middle;')
# li('"Network Automation" at PCCW Global')
li("Books and videos at O'Reilly Safari:")
with div():
img(src='/img/cythoncover.jpg', height=250)
img(src='/img/20libscover.png', height=250)
with div(style='display: inline-block'):
p('April 2018!', style='font-size: 0.6em; margin: 0')
img(src='/img/aiocover.png', height=250)
a('https://www.safaribooksonline.com/search/?query=caleb%20hattingh',
href='https://www.safaribooksonline.com/search/?query=caleb%20hattingh',
style='font-size: 0.6em;')
@add_slide
def slide_python_arcade():
h2('Python-Arcade')
with p():
text('created by Paul Craven ●')
a('http://arcade.academy', href='http://arcade.academy')
with div(style='display: flex; margin: auto'):
img(data_src='img/penguin.png', width=48, height=48)
with pre():
with code(cls='hljs bash', data_trim=True,
contenteditable=True):
text('''\
(venv) $ pip install arcade
''')
with div(style='display: flex; margin: auto'):
img(data_src='img/windows_icon.png', width=48, height=48)
with pre():
with code(cls='hljs cmd', data_trim=True,
contenteditable=True):
text('''\
(venv) C:\mygame> pip install arcade
''')
@add_slide
def slide_arcade_docs():
img(src='/img/arcade_doc_screenshot.png')
@add_slide
def slide_why_arcade():
h2('Why Python-Arcade?')
with ul():
li('Easy to install')
li('OpenGL (via Pyglet)')
li('Modern (Python 3 only, type annotations)')
with li():
code('(0, 0)')
text(' is at the bottom-left')
li('Very clean, simple API')
with li(cls='fragment'):
strong('Examples 🎁🎁🎁')
@add_slide
def slide_lots_of_examples():
with h2():
u('lots')
text(' of examples')
examples_path = pathlib.Path(arcade.examples.__file__)
examples_dir = examples_path.parent
examples = deque(sorted(
f.name for f in filter(
lambda x: x.suffix == '.py' and x.name != '__init__.py',
list(examples_dir.iterdir()))
))
columns = 3
column_height = math.ceil(len(examples) / columns)
with div(cls='container', style='font-size: 0.3em;'):
for i in range(columns):
with div(cls='col'):
for j in range(column_height):
if examples:
p(examples.popleft())
@contextmanager
def code_bullet(btn_text='', cmd=''):
with li():
with pre():
with code(cls='hljs bash', data_trim='true',
contenteditable='true', data_noescape='true'):
yield
if btn_text:
button(
btn_text,
cls='runprogram',
cmd=cmd)
@add_slide
def slide_run_examples():
h2('lots of examples')
exnames = ['bouncing_ball', 'sprite_collect_coins',
'sprite_move_keyboard']
with ul():
for example in exnames:
cmd = f'arcade.examples.{example}'
with code_bullet(btn_text=example, cmd=cmd):
text('(venv) $ python -m arcade.examples.')
mark(example)
@add_slide
def slide_get_started_intro():
h2('Intro: Python-Arcade')
@add_slide50
def slide_getting_started():
h2('Getting started')
code_block('getting_started.py',
highlights=[
'def update',
'def on_draw',
'def on_key_press',
'def on_key_release',
])
button('getting_started.py', cls='runprogram', cmd='demos.getting_started')
@add_slide50
def slide_getting_started():
h2('Getting started')
code_block('getting_started.py',
highlights=[
'self.keys_pressed',
'apply_movement',
'self.keys_pressed.keys[key] = True',
'self.keys_pressed.keys[key] = False',
])
button('getting_started.py', cls='runprogram', cmd='demos.getting_started')
@add_slide
def slide_keyspressed():
h2('"Movement" utils')
code_block('movement.py',
size='0.5em',
highlights=[
'current_position + delta_position * speed * dt'
])
@add_slide
def slide_tip_vector():
h2('🎁tip #1: use a vector class')
with p():
text('Use the one in ')
strong('pymunk')
with pre():
with code(cls='hljs python', data_trim='true', contenteditable='true'):
text(dedent('''\
>>> from pymunk.vec2d import Vec2d
>>> pixel = Vec2d(3, 4)
>>> pixel.x
3
>>> pixel.y
4
>>>
>>> pixel + 2 * pixel
Vec2d(9, 12)
>>> pixel.length
5.0
>>> pixel.length = 1
>>> pixel
Vec2d(0.6, 0.8)
'''))
@add_slide
def slide_keyspressed():
h2('"Movement" utils')
import inspect
# code_text = inspect.getsource(demos.movement)
code_block('movement.py',
size='0.5em',
highlights=['.normalized()'])
# with pre(style='font-size: 0.4em'):
# with code(cls='hljs python', data_trim='true',
# style='max-height: unset'):
# text(code_text)
button('Normalize your movement vector!',
cls='runprogram', cmd='demos.getting_started_norm')
# @add_slide
def slide_lag_compensation():
p(dedent('''\
Good discussion about lag compensation
https://www.reddit.com/r/Overwatch/comments/3u5kfg/everything_you_need_to_know_about_tick_rate/
https://en.wikipedia.org/wiki/Netcode
https://www.pcgamer.com/netcode-explained/
Some python code here:
https://www.gamedev.net/forums/topic/652377-network-tick-rates/
Book on safari:
https://www.safaribooksonline.com/library/view/fundamentals-of-network/9781584505570/ch01.html
'''))
@add_slide_plain
def slide_shall():
return section(data_background_image='/img/shallplaygame.jpg')
@add_slide_plain
def slide_fortnite():
return section(data_background_image='/img/fortnite3.jpg')
@add_slide_plain
def slide_sc2():
return section(data_background_image='/img/starcraft2.jpg')
@add_slide_plain
def slide_awesomenauts():
return section(data_background_image='/img/awesomenautsplay.jpg')
@add_slide
def slide_network_models():
h2('Network models')
with section():
with ol():
with li(cls='fragment'):
strong('Client-server: ')
text('clients only "capture inputs"')
with ul(style='font-size: 0.7em;'):
examples = ['Fortnite', 'Quake', 'Unreal Tournament', 'Overwatch']
for ex in examples:
li(ex)
with li(cls='fragment'):
strong('Peer-to-peer (lockstep): ')
text('synced sim on each client')
with ul(style='font-size: 0.7em;'):
examples = ['Command & Conquer', 'Age of Empires',
'StarCraft', 'Supreme Commander 2']
for ex in examples:
li(ex)
with li(cls='fragment'):
strong('Peer-to-peer: ')
text('each client calculates self')
with ul(style='font-size: 0.7em;'):
examples = ['Awesomenauts']
for ex in examples:
li(ex)
with section():
links = [
a('What every programmer needs to know about game networking - Glenn Fiedler',
href='https://gafferongames.com/post/what_every_programmer_needs_to_know_about_game_networking/'),
a('Core network structures for games - Joost van Dongen',
href='http://joostdevblog.blogspot.com/2014/09/core-network-structures-for-games.html'),
a('Source Multiplayer Networking - Valve',
href='https://developer.valvesoftware.com/wiki/Source_Multiplayer_Networking'),
]
p('References', style='font-size: 0.6em')
for link in links:
p(link, style='font-size: 0.6em')
@add_slide
def slide_big_picture():
h2('Client-server: The Big Picture')
img(src='/img/server-based-network.svg', width=500)
@add_slide
def slide_basic_networking():
h2('Communication')
with ol():
li('Player inputs: client 🠊 server')
li('Game state: client 🠈 server')
li('TCP versus UDP')
@add_slide
def player_inputs():
h2('1. Player input state')
p('send client 🠊 server')
with ul():
with li():
b('inputs ')
text('not "speed" or "position"')
li('Use dataclasses:')
code_block('player_event.py', highlights=['class PlayerEvent'])
@add_slide
def game_state():
h2('2. Game state')
p('send server 🠊 client')
code_block('game_state.py', highlights=[
'x: float = 0',
'y: float = 0',
'class GameState'
])
@add_slide
def slide_tcp_udp():
h2('3. TCP vs UDP')
with ul():
with li(cls='fragment'):
text('Problem: TCP is ')
strong('too ')
text('reliable')
with ul():
li('Dropped packets causes latency (bad!)')
li("Sometimes packet loss is ok")
with li(cls='fragment'):
text('UDP chosen for ')
u('control')
text(' (not merely speed)')
with ul():
li('Can choose when to allow packet loss...')
li("BUT: it's much more work")
# TODO: maybe an image showing how one lost packet causes a bunch of
# others to wait.
@add_slide
def slide_zmq_excuse():
with p():
text("No time for UDP!")
img(src='/img/greenninja.jpg')
with p():
text("Instead, we'll just show TCP (and ZeroMQ) for simplicity.")
@add_slide
def slide_zmq():
h2('Brief intro to ZeroMQ')
p('Thin abstraction over TCP ● "magic" sockets')
with ul():
with li(cls='fragment'):
text('Handling player inputs')
with ul():
with li():
b('PUSH + PULL ')
text('sockets')
li('all clients push to a single server')
with li(cls='fragment'):
text('Handling game state')
with ul():
with li():
b('PUB + SUB ')
text('sockets')
li('Server pushes to all clients')
@add_slide
def slide_zmq_demo():
h2('ZMQ client & server')
with div(cls='container'):
with div(cls='col', style='padding: 10px'):
h3('Client (player)')
with pre():
with code(cls='hljs python', data_trim='true', contenteditable='true'):
text(dedent('''\
from asyncio import run, Queue
import zmq
from zmq.asyncio import Context, Socket
async def zmq_push(q: Queue):
ctx = Context()
sock = ctx.socket(zmq.PUSH)
sock.connect('127.0.0.1', 9999)
while True:
payload: Dict = await q.get()
await sock.send_json(payload)
ctx.destroy()
'''))
with div(cls='col', style='padding: 10px'):
h3('Server')
with pre():
with code(cls='hljs python', data_trim='true', contenteditable='true'):
text(dedent('''\
from asyncio import run, Queue
import zmq
from zmq.asyncio import Context, Socket
async def zmq_pull(q: Queue):
ctx = Context()
sock = ctx.socket(zmq.PULL)
sock.bind('127.0.0.1', 9999)
while True:
payload = await sock.recv_json()
await q.put()
ctx.destroy()
'''))
@add_slide
def slide_strategy():
h2('Client-server: Design')
def lip(text=None):
if text:
return li(text, cls='fragment', style='font-size: 0.7em')
else:
return li(cls='fragment', style='font-size: 0.7em')
with div(cls='container'):
with div(cls='col', style='padding: 10px'):
h3('Client')
with ol():
lip('client connects to server')
with lip():
strong('Task A: ')
text('send player input (keyboard, mouse) '
'e.g. 30 Hz')
with lip():
strong('Task B: ')
text('receive game state (position, health) '
'from server')
with lip():
strong('Task C: ')
text('draw game state on screen')
with div(cls='col', style='padding: 10px'):
h3('Server')
with ol():
with lip():
strong('Task A: ')
text('accept client connections')
with lip():
strong('Task B: ')
with ol():
li('receive player input')
li('update game state')
with lip():
strong('Task C: ')
text('send game state to clients, e.g. 15 Hz')
p('Each of the internal tasks runs independently.',
cls='fragment')
@add_slide
def begin_with_server():
h4("Let's begin with the server")
p("(It's easier)")
@add_slide
def server_code_main():
h3("Server code - main (1/2)")
code_block('server03.py', lines=[44, 67], size='0.5em',
highlights=[
'Task A',
'task_B',
'task_C'
])
@add_slide
def server_code_tasks():
h3("Server code - task detail (2/2)")
code_block('server03.py', lines=[15, 42], highlights=['TASK B', 'TASK C'])
# Client code
@add_slide
def client_code1():
h2('Client code needs TWO loops!')
with ul():
li('python-arcade: game loop')
li('asyncio: IO loop')
with li():
b('Cannot ')
text('run both loops in same thread')
p('Least-effort solution: run asyncio loop in a thread')
# mention - because of asyncio, only need 1 extra thread, whereas
# blocking sockets would need more.
@add_slide
def client_code_whole():
h3('Client code - main (1/3)')
code_block('client03.py', size='0.5em', lines=[131, 146],
highlights=['iomain', 'MyGame']
)
@add_slide
def client_code_gross():
img(src='/img/gross.gif')
@add_slide
def client_code_io():
h3('Client code - iomain thread (2/3)')
# should also use the "empty" version here
# rename window.t to window.time_since_state_update
code_block('client03-empty.py', size='0.4em', lines=[61, 92],
highlights=['iomain', 'window.player_input',
'window.game_state.from_json',
'window.player.position'])
@add_slide
def client_code_game():
h3('Client code - game object (3/3)')
# Need a version without any prediction!
# And then later also need a version WITH prediction.
code_block('client03-empty.py', size='0.4em', lines=[27, 60],
highlights=['MyGame', 'pass'])
@add_slide
def client_code_interp():
h2('Success demo!')
button('server', cls='runprogram', cmd='demos.server02')
button('client', cls='runprogram', cmd='demos.client02b')
@add_slide
def end_slide01():
p('End*')
@add_slide
def client_not_success():
h5("(Not success - it's laggy)!")
@add_slide
def client_code_prediction():
h2('Sprite interpolation')
img(src='/img/prevlatestupdate.svg', width='80%')
@add_slide
def slide_client_interpolation():
h3('Client-side extrapolation')
with p():
text('Need to understand ')
u('motion')
text(', i.e., speed')
with script(type='math/tex; mode=display'):
text(dedent(r'''
v_x = \frac{x_1 - x_0}{t_1 - t_0} \qquad v_y = \frac{y_1 - y_0}{t_1 - t_0}
'''))
with p(cls='fragment'):
text('This describes the past—what about the future?')
with div(cls='fragment'):
with script(cls='fragment', type='math/tex; mode=display'):
text(dedent(r'''
v_x = \frac{\color{red}{x_2} - x_1}
{\color{red}{t_2} - t_1} \qquad v_y =
\frac{\color{red}{y_2} - y_1}{\color{red}{t_2} - t_1}
'''))
@add_slide
def slide_client_interpolation_2():
h3('Client-side extrapolation')
with script(type='math/text; mode=display'):
text(dedent(r'''
v_x = \frac{\color{red}{x_2} - x_1}
{\color{red}{t_2} - t_1} \qquad v_y =
\frac{\color{red}{y_2} - y_1}{\color{red}{t_2} - t_1}
'''))
with p(cls='fragment'):
text('Make the predicted values explicit:')
with div(cls='fragment'):
with script(cls='fragment', type='math/tex; mode=display'):
text(dedent(r'''
\color{red}{x_2} = v_x \times \Delta t + x_1
\qquad
\color{red}{y_2} = v_y \times \Delta t + y_1
'''))
@add_slide
def slide_client_interpolation_3():
h3('Client-side extrapolation')
with ul():
li('Store last 2 server updates')
li('Calculate predicted future update (extrapolation)')
li('DRAW interpolated positions between "now" and the predicted '
'future position.')
@add_slide
def slide_client_interp_codeA():
h3('Client-side extrapolation')
code_block('client02d.py', size='0.5em',
lines=[145, 155],
highlights=[
'window.position_buffer.append',
'window.t',
'window.player_position_snapshot'
])
@add_slide
def slide_client_interp_codeB():
h3('Client-side extrapolation')
code_block('client02d.py', size='0.5em',
lines=[89, 108],
highlights=[
'predicted_position',
'x = (self.t - 0) / (t1 - t0)',
])
@add_slide
def client_code_demo_4_updates_per_second():
h2('Success demo attempt #2!')
p('Prediction + interpolation (2 Hz server updates)')
button('server', cls='runprogram', cmd='demos.server02c')
button('client', cls='runprogram', cmd='demos.client02c')
p('Prediction + interpolation (10 Hz server updates)')
button('server', cls='runprogram', cmd='demos.server02')
button('client', cls='runprogram', cmd='demos.client02')
@add_slide
def fin():
p('The end!')
if __name__ == '__main__':
main()
| cjrh/pyconau2018-arcade2Dmultiplayer | generate_slides.py | generate_slides.py | py | 22,383 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "string.Template",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "dominate.util.text",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "dominate.util.text",
... |
74014407074 | #-*- coding: UTF-8 -*-
import flask, pandas
from flask import render_template, url_for
data = pandas.read_csv('static\data\parsed_EURUSD_y.csv', delimiter=';')
act = data.is_actual.tail(1).to_string(index=False)
cls = data.last_CLOSE.tail(1).to_string(index=False)
app = flask.Flask(__name__)
@app.route('/')
@app.route('/index.html')
def index():
print(url_for('index'))
return render_template('index.html', act=act, cls=cls)
@app.route('/month.html')
def month():
print(url_for('month'))
return render_template('month.html', act=act, cls=cls)
@app.route('/week.html')
def week():
print(url_for('week'))
return render_template('week.html', act=act, cls=cls)
@app.route('/patterns.html')
def patterns():
print(url_for('patterns'))
return render_template('patterns.html', act=act, cls=cls)
if __name__ == "__main__":
app.run() | Bagaviev/BMSTU | Web Stock (Masters)/manage.py | manage.py | py | 868 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"... |
40709061375 |
"""
Disclaimer
This software was developed by employees of the National Institute of Standards and Technology (NIST), an agency of the Federal Government and is being made available as a public service. Pursuant to title 17 United States Code Section 105, works of NIST employees are not subject to copyright protection in the United States. This software may be subject to foreign copyright. Permission in the United States and in foreign countries, to the extent that NIST may hold copyright, to use, copy, modify, create derivative works, and distribute this software and its documentation without fee is hereby granted on a non-exclusive basis, provided that this notice and disclaimer of warranty appears in all copies.
THE SOFTWARE IS PROVIDED 'AS IS' WITHOUT ANY WARRANTY OF ANY KIND, EITHER EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTY THAT THE SOFTWARE WILL CONFORM TO SPECIFICATIONS, ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND FREEDOM FROM INFRINGEMENT, AND ANY WARRANTY THAT THE DOCUMENTATION WILL CONFORM TO THE SOFTWARE, OR ANY WARRANTY THAT THE SOFTWARE WILL BE ERROR FREE. IN NO EVENT SHALL NIST BE LIABLE FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO, DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES, ARISING OUT OF, RESULTING FROM, OR IN ANY WAY CONNECTED WITH THIS SOFTWARE, WHETHER OR NOT BASED UPON WARRANTY, CONTRACT, TORT, OR OTHERWISE, WHETHER OR NOT INJURY WAS SUSTAINED BY PERSONS OR PROPERTY OR OTHERWISE, AND WHETHER OR NOT LOSS WAS SUSTAINED FROM, OR AROSE OUT OF THE RESULTS OF, OR USE OF, THE SOFTWARE OR SERVICES PROVIDED HEREUNDER.
"""
__author__ = "Tim Blattner"
__copyright__ = "Copyright 2020, The IARPA funded TrojAI project"
__credits__ = ["Peter Bajcsy", "Michael Majurski", "Tim Blattner", "Derek Juba", "Walid Keyrouz"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Peter Bajcsy"
__email__ = "peter.bajcsy@nist.gov"
__status__ = "Research"
import torch
from skimage import io, transform
from itertools import repeat
from nltk.corpus import wordnet
import random
"""
This class supports creating datasets in PyTorch
The code was adopted from https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel
"""
class extended_dataset_ner(torch.utils.data.Dataset):
'Characterizes a dataset for PyTorch'
#def __init__(self, list_IDs, labels):
def __init__(self, list_filenames, tokenizer, max_input_length, num_iterations=1):
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
input_ids = []
attention_mask = []
labels = []
labels_mask = []
original_words = []
num_samples = 0
for fn in list_filenames:
# For this example we parse the raw txt file to demonstrate tokenization.
if fn.endswith('_tokenized.txt'):
continue
# load the example
_original_words = []
original_labels = []
with open(fn, 'r') as fh:
lines = fh.readlines()
for line in lines:
split_line = line.split('\t')
word = split_line[0].strip()
label = split_line[2].strip()
_original_words.append(word)
original_labels.append(int(label))
# Select your preference for tokenization
#input_ids, attention_mask, labels, labels_mask = tokenize_and_align_labels(tokenizer, original_words, original_labels, max_input_length)
# input_ids, attention_mask, labels, labels_mask = manual_tokenize_and_align_labels(tokenizer, original_words,
# original_labels,
# max_input_length)
_input_ids, _attention_mask, _labels, _labels_mask = manual_tokenize_and_align_labels(tokenizer, _original_words,
original_labels,
max_input_length)
input_ids.append(_input_ids)
attention_mask.append(_attention_mask)
labels.append(_labels)
labels_mask.append(_labels_mask)
original_words.append((_original_words))
num_samples += 1
###############################################
# input_ids = torch.as_tensor(input_ids)
# attention_mask = torch.as_tensor(attention_mask)
# labels_tensor = torch.as_tensor(labels)
#
# if device != 'cpu':
# input_ids = input_ids.to(device)
# attention_mask = attention_mask.to(device)
# labels_tensor = labels_tensor.to(device)
#
# # Create just a single batch
# input_ids = torch.unsqueeze(input_ids, axis=0)
# attention_mask = torch.unsqueeze(attention_mask, axis=0)
# labels_tensor = torch.unsqueeze(labels_tensor, axis=0)
#############################################################
'Initialization'
self.input_ids = input_ids
self.attention_mask = attention_mask
#self.labels_tensor = labels_tensor
self.original_words = original_words
self.labels_mask = labels_mask
self.labels = labels
self.num_samples = num_samples
self.tokenizer = tokenizer
self.max_input_length = max_input_length
self.num_iterations = num_iterations
print('Total length = {}'.format(len(self.input_ids)))
def __len__(self):
'Denotes the total number of samples'
return len(self.input_ids)
def getarrayitem(self, index, device):
if index < 0 or index >= self.num_samples:
print("ERROR: index is out of range:", index, " range=[", 0, ", ", self.num_samples)
input_ids = torch.as_tensor(self.input_ids[index])
attention_mask = torch.as_tensor(self.attention_mask[index])
labels_tensor = torch.as_tensor(self.labels[index])
if device != 'cpu':
input_ids = input_ids.to(device)
attention_mask = attention_mask.to(device)
labels_tensor = labels_tensor.to(device)
# Create just a single batch
input_ids = torch.unsqueeze(input_ids, axis=0)
attention_mask = torch.unsqueeze(attention_mask, axis=0)
labels_tensor = torch.unsqueeze(labels_tensor, axis=0)
self.labels_tensor = labels_tensor
return input_ids, attention_mask, self.labels[index], self.labels_mask[index], self.labels_tensor, self.original_words[index]
# Adapted from: https://github.com/huggingface/transformers/blob/2d27900b5d74a84b4c6b95950fd26c9d794b2d57/examples/pytorch/token-classification/run_ner.py#L318
# Create labels list to match tokenization, only the first sub-word of a tokenized word is used in prediction
# label_mask is 0 to ignore label, 1 for correct label
# -100 is the ignore_index for the loss function (https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html)
# Note, this requires 'fast' tokenization
def tokenize_and_align_labels(tokenizer, original_words, original_labels, max_input_length):
tokenized_inputs = tokenizer(original_words, padding=True, truncation=True, is_split_into_words=True,
max_length=max_input_length)
labels = []
label_mask = []
word_ids = tokenized_inputs.word_ids()
previous_word_idx = None
for word_idx in word_ids:
if word_idx is not None:
cur_label = original_labels[word_idx]
if word_idx is None:
labels.append(-100)
label_mask.append(0)
elif word_idx != previous_word_idx:
labels.append(cur_label)
label_mask.append(1)
else:
labels.append(-100)
label_mask.append(0)
previous_word_idx = word_idx
return tokenized_inputs['input_ids'], tokenized_inputs['attention_mask'], labels, label_mask
# Alternate method for tokenization that does not require 'fast' tokenizer (all of our tokenizers for this round have fast though)
# Create labels list to match tokenization, only the first sub-word of a tokenized word is used in prediction
# label_mask is 0 to ignore label, 1 for correct label
# -100 is the ignore_index for the loss function (https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html)
# This is a similar version that is used in trojai.
def manual_tokenize_and_align_labels(tokenizer, original_words, original_labels, max_input_length):
labels = []
label_mask = []
sep_token = tokenizer.sep_token
cls_token = tokenizer.cls_token
tokens = []
attention_mask = []
# Add cls token
tokens.append(cls_token)
attention_mask.append(1)
labels.append(-100)
label_mask.append(0)
for i, word in enumerate(original_words):
token = tokenizer.tokenize(word)
tokens.extend(token)
label = original_labels[i]
# Variable to select which token to use for label.
# All transformers for this round use bi-directional, so we use first token
token_label_index = 0
for m in range(len(token)):
attention_mask.append(1)
if m == token_label_index:
labels.append(label)
label_mask.append(1)
else:
labels.append(-100)
label_mask.append(0)
if len(tokens) > max_input_length - 1:
tokens = tokens[0:(max_input_length - 1)]
attention_mask = attention_mask[0:(max_input_length - 1)]
labels = labels[0:(max_input_length - 1)]
label_mask = label_mask[0:(max_input_length - 1)]
# Add trailing sep token
tokens.append(sep_token)
attention_mask.append(1)
labels.append(-100)
label_mask.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
return input_ids, attention_mask, labels, label_mask
| usnistgov/trojai-baseline-pruning | extended_dataset_ner.py | extended_dataset_ner.py | py | 10,286 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "torch.utils",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "torch.as_tensor",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.as_tensor",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.as_tensor",
... |
73010306594 | from flask import Flask
from config.settings import config
from api.app1.api import blueprint as bp_app1
from api.app2.api import blueprint as bp_app2
class Application:
def __init__(self):
self.app = Flask(__name__)
self.create()
def __str__(self):
pass
def message(self):
"""
Startup message
"""
print("""
_____ ______ _____ _______ _____ _____
| __ \| ____|/ ____|__ __| /\ | __ \_ _|
| |__) | |__ | (___ | |______ / \ | |__) || |
| _ /| __| \___ \ | |______/ /\ \ | ___/ | |
| | \ \| |____ ____) | | | / ____ \| | _| |_
|_| \_\______|_____/ |_| /_/ \_\_| |_____|
""")
def config(self):
"""
Configure all the parameters required by Flask App
"""
self.app.config['SWAGGER_UI_DOC_EXPANSION'] = config['RESTX']['RESTPLUS_SWAGGER_UI_DOC_EXPANSION']
self.app.config['RESTPLUS_VALIDATE'] = config['RESTX']['RESTPLUS_VALIDATE']
self.app.config['RESTPLUS_MASK_SWAGGER'] = config['RESTX']['RESTPLUS_MASK_SWAGGER']
self.app.config['ERROR_404_HELP'] = config['RESTX']['RESTPLUS_ERROR_404_HELP']
def create(self):
"""
Flask app bootstrap
"""
self.config()
self.app.register_blueprint(bp_app1)
self.app.register_blueprint(bp_app2)
def run(self):
self.message()
self.app.run(host=config['FLASK']['HOSTNAME'], port=config['FLASK']['PORT'], debug=config['FLASK']['DEBUG'])
| joagonzalez/rest-api-seed | src/application.py | application.py | py | 1,541 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "config.settings.config",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "config.settings.config",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "config.sett... |
41649853983 | import sys
import librosa
from mir_eval.onset import f_measure
import numpy as np
import matplotlib.pyplot as plt
import math
from tensor_hero.preprocessing.audio import compute_mel_spectrogram_from_audio, filter_spec_by_amplitude
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
def ninos(audio, sr, spec=None, gamma=0.94):
'''Calculates Normalized Identifying Note Onsets based on Spectral Sparsity (NINOS)
over time for audio.
Implementation as described in
https://www.eurasip.org/Proceedings/Eusipco/Eusipco2016/papers/1570256369.pdf
Each time bin in the returned frame corresponds to approx 4.6ms of audio data.
Args:
audio (1D numpy array): Raw audio samples
sr (int): sample rate
gamma (float in (0,1]): Proportion of frequency bins to keep
Returns:
ninos (1D numpy array): Normalized inverse-sparsity measure
J (int): Number of retained frequency bins
hop_length (int): Hop length used to compute spectrogram
'''
# Define spectrogram parameters
if sr == 22050:
n_fft = 1024
hop_length = 102
elif sr == 44100:
n_fft = 2048
hop_length = 205
else:
raise ValueError(f'ERROR: sr = {sr}, sr must be either 22050 or 44100')
if spec is None:
# Compute spectrogram if not provided
spec = np.abs(librosa.stft(audio, n_fft, hop_length))
# Order by magnitude within each time bin
spec = np.sort(spec, axis=0)
# Remove the highest energy frames, cut down by factor of gamma
J = math.floor(spec.shape[0]*gamma)
spec = spec[:J,:]
# Compute squared l2 norm and l4 norm of spec along time axis
l2_squared = np.square(np.linalg.norm(spec, ord=2, axis=0))
l4 = np.linalg.norm(spec, ord=4, axis=0)
# Convert to NINOS
ninos = l2_squared / ((J**(1/4))*l4)
return ninos, J, hop_length
def squeeze_idx(idx, min, max):
'''Helper function that ensures the indices of the windows stays between 0 and max.
Args:
idx (int): Candidate index
min (int): Minimum value to be assigned (typically 0)
max (int): Maximum value to be assigned (typically len(arr) - 1)
Returns:
idx (int): Correct index to be used for window bounds
'''
if idx < min:
return min
elif idx > max:
return max
else:
return idx
def onset_select(odf_arr, w1=3, w2=3, w3=7, w4=1, w5=0, delta=0, plot=False):
'''Implements peak-picking for the results of ninos ODF data.
Implementation as described in
https://ismir2012.ismir.net/event/papers/049_ISMIR_2012.pdf
Args:
odf_arr (1D numpy array): Values of ninos ODF function
w1 (int): Hyperparameter for left boundary of window for condition 1
w2 (int): Hyperparameter for right boundary of window for condition 1
w3 (int): Hyperparameter for left boundary of window for condition 2
w4 (int): Hyperparameter for right boundary of window for condition 2
w5 (int): Hyperparameter for onset threshold (how many windows to use as buffer before selecting a new onset)
delta (float in [0,infinity)): Threshold for condition 2
plot (bool): Whether or not to plot onsets overlaid on ninos data
Returns:
onsets (1D numpy array): Frame indices for onsets
'''
onsets = []
for frame in range(len(odf_arr)):
# Determine whether candidate frame is a local maximum
idx1 = squeeze_idx(frame-w1, 0, len(odf_arr)-1)
idx2 = squeeze_idx(frame+w2, 0, len(odf_arr)-1)
max_frame = idx1 + np.argmax(odf_arr[idx1:idx2])
cond1 = frame == max_frame
# Determine whether candidate frame surpasses local average by delta
idx1 = squeeze_idx(frame-w3, 0, len(odf_arr)-1)
idx2 = squeeze_idx(frame+w4, 0, len(odf_arr)-1)
mean_frame = np.mean(odf_arr[idx1:idx2]) + delta
cond2 = odf_arr[frame] >= mean_frame
# True by default if onsets is empty
cond3 = True
if len(onsets) > 0:
# Determine whether candidate frame surpasses a threshold since last onset
cond3 = frame - onsets[len(onsets) - 1] > w5
if cond1 and cond2 and cond3:
onsets.append(frame)
onsets = np.array(onsets)
if plot:
plt.figure(figsize=(20, 15))
plt.plot(odf_arr[:1000])
plt.vlines(onsets[np.where(onsets < 1000)[0]], ymin=0, ymax=np.max(odf_arr[:1000]), colors=['red'])
plt.show()
return onsets
def onset_frames_to_time(onsets, sr, hop_len):
'''Converts a list of onset frames to corresponding time
Args:
onsets (1D numpy array): Frames of onsets, as determined by hop_len parameter from ninos()
sr (int): sample rate
hop_len (int): hop length, determines how frames are converted to times
Returns:
_type_: _description_
'''
time_per_frame = hop_len/sr
return [x*time_per_frame for x in onsets]
def onset_times_to_bins(onset_times):
'''Converts onset times in seconds to onset times in 10ms bins
Args:
onset_times (1D numpy array): array of onset times in seconds
Returns:
onset_times (1D numpy array): array of onset times in rounded 10ms bins
'''
onset_times = [round(x*100) for x in onset_times]
return onset_times
def get_10ms_onset_frames(audio, sr, odf, hop_len, p, gamma, w1=10, w2=1, w3=1, w4=8, w5=10, delta=1.0, spec=None):
'''Takes raw audio and uses desired odf function and parameters to predict onsets, which
are returned as 10ms time frames relative to start and end (full audio if start and end
aren't specified)
The default onset_select() hyperparameters were chosen as they performed the best on average
for the tested segments during grid search
Args:
audio (1D numpy array): Raw audio waveform
sr (int): sample rate
odf (str): desired onset detection function
w1 (int, optional): see onset_select(). Defaults to 10.
w2 (int, optional): see onset_select(). Defaults to 1.
w3 (int, optional): see onset_select(). Defaults to 1.
w4 (int, optional): see onset_select(). Defaults to 8.
w5 (int, optional): see onset_select(). Defaults to 10.
delta (float, optional): see onset_select(). Defaults to 1.0.
start (int, optional): start of portion of song to compute in seconds. Defaults to -1.
- If negative or zero, will assume start is beginning
end (int, optional): end of portion of song to compute in seconds. Defaults to -1.
- If negative or zero, will assume end is end of audio.
Returns:
onset_time_bins (list of ints): predicted 10ms time bins corresponding to onsets
'''
assert odf in ['odf', 'energy', 'odf_energy', 'log_energy_novelty', 'spectral_novelty',
'odf_unnormalized', 'odf_sum_energy', 'log_energy_novelty_mult_odf_energy',
'd_energy', 'd_energy_mult_energy'], f'ERROR: {odf} is not a valid option'
# Get spectrogram
if spec is None:
spec = compute_mel_spectrogram_from_audio(audio, sr)
spec = filter_spec_by_amplitude(spec, p=p)
if odf == 'odf':
o = ninos(audio, sr, spec=spec, gamma=gamma)
o = -(o - np.min(o)) / np.max(o - np.min(o)) + 1
elif odf == 'odf_unnormalized':
o = ninos(audio, sr, spec=spec, gamma=gamma)
o = -o
elif odf == 'energy':
o = librosa.feature.rms(S=spec, frame_length=1022)[0]
o = -(o - np.min(o)) / np.max(o - np.min(o)) + 1
elif odf == 'odf_energy':
o = ninos(audio, sr, spec=spec, gamma=gamma)
o = -(o - np.min(o)) / np.max(o - np.min(o)) + 1
e = librosa.feature.rms(S=spec, frame_length=1022)[0]
e = -(e - np.min(e)) / np.max(e - np.min(e)) + 1
o = np.multiply(o, e)
o = (o - np.min(o)) / np.max(o - np.min(o))
elif odf == 'log_energy_novelty':
e = librosa.feature.rms(S=spec, frame_length=1022)[0]
log_energy = np.log1p(10*e)
log_energy_diff = np.zeros_like(log_energy)
log_energy_diff[1:] = np.diff(log_energy)
o = np.max([np.zeros_like(log_energy_diff), log_energy_diff], axis=0)
o = (o - np.min(o)) / np.max(o - np.min(o))
elif odf == 'spectral_novelty':
o = librosa.onset.onset_strength(S=spec)
o = (o - np.min(o)) / np.max(o - np.min(o))
elif odf == 'odf_sum_energy':
o = ninos(audio, sr, spec=spec, gamma=gamma)
e = librosa.feature.rms(S=spec, frame_length=1022)[0]
o = o+e
o = -(o - np.min(o)) / np.max(o - np.min(o)) + 1
elif odf == 'log_energy_novelty_mult_odf_energy':
e = librosa.feature.rms(S=spec, frame_length=1022)[0]
log_energy = np.log1p(10*e)
log_energy_diff = np.zeros_like(log_energy)
log_energy_diff[1:] = np.diff(log_energy)
log_energy_novelty = np.max([np.zeros_like(log_energy_diff), log_energy_diff], axis=0)
o = ninos(audio, sr, spec=spec, gamma=gamma)
o = np.multiply(o, e)
o = np.multiply(o, log_energy_novelty)
o = (o - np.min(o)) / np.max(o - np.min(o))
elif odf == 'd_energy':
e = librosa.feature.rms(S=spec, frame_length=1022)[0]
o = librosa.feature.delta(e)
o = -(o - np.min(o)) / np.max(o - np.min(o)) + 1
elif odf == 'd_energy_mult_energy':
e = librosa.feature.rms(S=spec, frame_length=1022)[0]
e = -(e - np.min(e)) / np.max(e - np.min(e)) + 1
o = librosa.feature.delta(e)
o = -(o - np.min(o)) / np.max(o - np.min(o)) + 1
o = np.multiply(o, e)
o = (o - np.min(o)) / np.max(o - np.min(o))
# Peak pick
onsets = onset_select(o, w1, w2, w3, w4, w5, delta, plot=False)
onset_times = onset_frames_to_time(onsets, sr=44100, hop_len=hop_len)
onset_time_bins = onset_times_to_bins(onset_times) # convert to 10ms time bins
return onset_time_bins
def compare_onsets(audio, sr, notes_array, start, end,
w1=3, w2=3, w3=7, w4=1, w5=0, delta=0,
plot= False):
'''Takes onsets from ground truth notes array, computes them from corresponding
audio, then compares using f1 measure. Plot optional
Args:
audio (1D numpy array): Raw waveform of audio
sr (int): sample rate of audio
notes_array (1D numpy array): notes_array corresponding to audio
start (int): start of section to measure in seconds
end (int): end of section to measure in seconds
[w1:w5, delta] (ints): hyperparameters of onset_select
plot (bool): if True, will print plot of compared onsets
Returns:
f1 (float): f1 score of predicted onsets vs ground truth onsets
'''
# Measure onsets using spectral sparsity
odf, _, hop_len = ninos(audio[sr*start:sr*end], sr)
onsets = onset_select(odf, w1, w2, w3, w4, w5, delta, plot=False)
onset_times = onset_frames_to_time(onsets, sr, hop_len)
onset_time_bins = onset_times_to_bins(onset_times)
# Get ground truth clone hero onsets
ch_onsets = np.where(notes_array[start*100:end*100] > 0)[0]
ch_onset_times = [x/100 for x in ch_onsets]
# Compare with f_measure
f1, _, _ = f_measure(np.array(ch_onset_times), np.array(onset_times))
# plot
if plot:
plt.figure(figsize=(15,5))
for o in ch_onsets:
plt.axvline(x=o, ymin=0, ymax=0.5, color='r')
for o in onset_time_bins:
plt.axvline(x=o, ymin=0.5, ymax=1, color='g')
return f1
def notes_array_onset_f1(ground_truth, candidate):
'''Generates the onset f1 score between a ground truth and predicted notes array
Args:
ground_truth (1D numpy array): ground truth notes array
candidate (1D numpy array): candidate notes array
Returns:
_type_: _description_
'''
gt_onset_times = np.where(ground_truth>0)[0]/100
candidate_onset_times = np.where(candidate>0)[0]/100
f1, _, _ = f_measure(gt_onset_times, candidate_onset_times)
return f1 | elliottwaissbluth/tensor-hero | tensor_hero/onset.py | onset.py | py | 12,326 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.warnoptions",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "warnings.simplefilter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "librosa.stft",
... |
23550856102 | import os,sys
import argparse
import random
from collections import defaultdict, deque
import signal
from multiprocessing import Pool
from time import time
'''
Below awesome fast[a/q] reader function taken
from https://github.com/lh3/readfq/blob/master/readfq.py
'''
def readfq(fp): # this is a generator function
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last: break
name, seqs, last = last[1:].split()[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, (''.join(seqs), None) # yield a fasta record
if not last: break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, (seq, ''.join(seqs)); # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, (seq, None) # yield a fasta record instead
break
def reverse_complement(string):
rev_nuc = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'U':'A', 'u':'a', 'a':'t', 'c':'g', 'g':'c', 't':'a', 'N':'N', 'X':'X', 'n':'n', 'Y':'R', 'R':'Y', 'K':'M', 'M':'K', 'S':'S', 'W':'W', 'B':'V', 'V':'B', 'H':'D', 'D':'H', 'y':'r', 'r':'y', 'k':'m', 'm':'k', 's':'s', 'w':'w', 'b':'v', 'v':'b', 'h':'d', 'd':'h'}
rev_comp = ''.join([rev_nuc[nucl] for nucl in reversed(string)])
return(rev_comp)
def get_minimizers(seq, k_size, w, seed_counts):
# kmers = [seq[i:i+k_size] for i in range(len(seq)-k_size) ]
window_kmers = deque([hash(seq[i:i+k_size]) for i in range(w)])
curr_min = min(window_kmers)
j = list(window_kmers).index(curr_min)
# minimizers = [ seq[j:j+k_size] ]
kmer = seq[j:j+k_size]
kmer_rc = reverse_complement(kmer)
if kmer < kmer_rc:
seed_counts[kmer] += 1
else:
seed_counts[kmer_rc] += 1
for i in range(w+1,len(seq) - k_size):
new_kmer = hash(seq[i:i+k_size])
# updateing window
discarded_kmer = window_kmers.popleft()
window_kmers.append(new_kmer)
# we have discarded previous windows minimizer, look for new minimizer brute force
if curr_min == discarded_kmer:
curr_min = min(window_kmers)
j = list(window_kmers).index(curr_min) + i - w
# minimizers.append( seq[j:j+k_size] )
# seed_counts[seq[j:j+k_size]] += 1
kmer = seq[j:j+k_size]
kmer_rc = reverse_complement(kmer)
if kmer < kmer_rc:
seed_counts[kmer] += 1
else:
seed_counts[kmer_rc] += 1
# Previous minimizer still in window, we only need to compare with the recently added kmer
elif new_kmer < curr_min:
curr_min = new_kmer
# minimizers.append( seq[i:i+k_size] )
# seed_counts[seq[i:i+k_size]] += 1
kmer = seq[i:i+k_size]
kmer_rc = reverse_complement(kmer)
if kmer < kmer_rc:
seed_counts[kmer] += 1
else:
seed_counts[kmer_rc] += 1
# return minimizers
def get_syncmers(seq, k, s, t, seed_counts):
window_smers = deque([hash(seq[i:i+s]) for i in range(0, k - s + 1 )])
curr_min = min(window_smers)
pos_min = window_smers.index(curr_min)
syncmers = []
if pos_min == t:
kmer = seq[0 : k]
kmer_rc = reverse_complement(kmer)
if kmer < kmer_rc:
seed_counts[kmer] += 1
else:
seed_counts[kmer_rc] += 1
for i in range(k - s + 1, len(seq) - s):
new_smer = hash(seq[i:i+s])
# updating window
discarded_smer = window_smers.popleft()
window_smers.append(new_smer)
# Make this faster by storing pos of minimum
curr_min = min(window_smers)
pos_min = window_smers.index(curr_min)
if pos_min == t:
kmer = seq[i - (k - s) : i - (k - s) + k]
kmer_rc = reverse_complement(kmer)
if kmer < kmer_rc:
seed_counts[kmer] += 1
else:
seed_counts[kmer_rc] += 1
# return syncmers
def print_stats(method, k, seed_counts):
# seed_counts = defaultdict(int)
# for minm_list in results:
# for m in minm_list:
# seed_counts[m] += 1
total_seed_count_sq = 0
total_seed_count = 0
total_seed_count_sq_1000_lim = 0
total_seed_count_1000_lim = 0
for seed_id, cnt in seed_counts.items():
total_seed_count += cnt
total_seed_count_sq += cnt**2
if cnt <= 1000:
total_seed_count_1000_lim += cnt
total_seed_count_sq_1000_lim += cnt**2
frac_masked = 1 - total_seed_count_1000_lim/total_seed_count
print("{0},{1},{2},{3},{4}".format(method, k, total_seed_count, int(round(total_seed_count_sq / total_seed_count,0)), round(100*frac_masked, 1) ))
# print("{0},{1},{2},{3},{4}".format(method, k, total_seed_count_1000_lim, int(round(total_seed_count_sq_1000_lim / total_seed_count_1000_lim,0)), 1000))
def min_single_helper(arguments):
return get_minimizers(*arguments)
def syncmers_single_helper(arguments):
return get_syncmers(*arguments)
def main(args):
genome = {acc: seq.upper() for (acc, (seq, _)) in readfq(open(args.fasta, 'r'))}
n = 10000000
for acc,seq in list(genome.items()):
acc = acc.split()[0]
# print(acc)
genome[acc] = seq.replace("N", "") # remove Ns
k = args.k
if args.type == "minimizers":
w = 9
seed_counts = defaultdict(int)
for acc, seq in genome.items():
M = get_minimizers(seq, k, w, seed_counts)
print_stats("minimizers", k, seed_counts)
elif args.type == "syncmers":
s = k-4
t = 2 # creates open syncmer with mid point with is used in strobealign
seed_counts = defaultdict(int)
for acc, seq in genome.items():
get_syncmers(seq, k, s, t, seed_counts)
print_stats("syncmers", k, seed_counts)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Calc identity", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('fasta', type=str, default=False, help='Path to genome')
# parser.add_argument('n', type=int, default=4, help='Nr cores')
parser.add_argument('k', type=int, default=20, help='k-mer size')
parser.add_argument('--type', type=str, default=False, help='Either syncmenrs or minimizers')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit()
main(args) | ksahlin/alignment_evaluation | scripts/compute_seed_E_hits.py | compute_seed_E_hits.py | py | 7,348 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "collect... |
21148876915 | import json
import os
class Landmarks:
def __init__(self, name: str):
self.name = name
json_path = "landmarks/" + name + ".json"
with open(json_path) as json_file:
data = json.load(json_file)
self.emotion, self.version = name.split("_")
self.fps = data["fps"]
self.frames = data["coords"]
self.faces = data["face"]
class LandmarksMetadata:
def __init__(self, filename):
self.name = filename.split(".")[0]
self.emotion = self.name.split("_")[0]
self.version = self.name.split("_")[1]
def serialize(self):
return {
"name": self.name,
"emotion": self.emotion,
"version": self.version
}
def list_landmarks_meta():
'''
list_landmarks returns a list of all existing LandmarksMetadata
'''
return [LandmarksMetadata(l) for l in os.listdir("landmarks")]
def dict_landmarks_meta():
'''
dict_landmarks returns a dictionary of all landmarks' metadatas mapped by type and version
'''
out = {}
for l in list_landmarks_meta():
if l.emotion not in out:
out[l.emotion] = {}
out[l.emotion][l.version] = l.serialize()
return out
def get_landmarks(type, version):
'''
get_landmarks returns a Landmarks object based on name
'''
return Landmarks(get_name(type, version))
def get_name(type, version):
return f"{type}_{version}"
| jjustin/paintings-animator | src/storage/landmark.py | landmark.py | py | 1,482 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 35,
"usage_type": "call"
}
] |
4453184793 | import bs4 as bs
import urllib.request
from time import strptime
from datetime import datetime
import mysql.connector
import arrow
ghtorrentDb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="ghtorrent"
)
project_id = 34674827
cursor = ghtorrentDb.cursor()
cursor.execute("SELECT url FROM projects WHERE id={0}".format(project_id))
url = cursor.fetchone()[0].replace("api.","").replace("/repos","")
print(url)
repoName = url.replace("https://github.com","")
repoName += "/tree/"
#print(repoName)
page = urllib.request.urlopen(url).read()
dom = bs.BeautifulSoup(page,'lxml')
totalNoOfCommits = int(dom.body.find_all('span',class_='text-emphasized')[0].text.replace("\n",""))
aList = dom.body.find_all('a')
for shaKey in aList:
print(shaKey.get('href'))
if("master" not in shaKey.get('href') and repoName in shaKey.get('href')):
print(shaKey.get('href'))
shaKey = shaKey.get('href').split("/")[4]
print(totalNoOfCommits)
nextIndex = str(totalNoOfCommits - (totalNoOfCommits%34))
page = urllib.request.urlopen(url + '/commits').read()
dom = bs.BeautifulSoup(page,'lxml')
dateString = dom.body.find_all('div',class_='commit-group-title')[0].text.replace("\n","").replace("Commits on ","").replace(",","").split(" ")
Y1 = int(dateString[2])
M1 = int(strptime(dateString[0],'%b').tm_mon)
D1 = int(dateString[1])
end = datetime(Y1,M1,D1)
print(url+'/commits/master?after='+shaKey+"+"+nextIndex)
page = urllib.request.urlopen(url+'/commits/master?after='+shaKey+"+"+nextIndex).read()
dom = bs.BeautifulSoup(page,'lxml')
domGroup = dom.body.find_all('div',class_='commit-group-title')
dateString = domGroup[len(domGroup)-1].text.replace("\n","").replace("Commits on ","").replace(",","").split(" ")
Y2 = int(dateString[2])
M2 = int(strptime(dateString[0],'%b').tm_mon)
D2 = int(dateString[1])
start = datetime(Y2,M2,D2)
numberOfMonths = 0
for d in arrow.Arrow.range('month', start, end):
numberOfMonths += 1
print(numberOfMonths)
avgNumberOfCommitsPerMonth = float(totalNoOfCommits)/(float(numberOfMonths)*1.0)
print('avgNumberOfCommunity:',avgNumberOfCommitsPerMonth)
break | pombredanne/ghtorrent_repear | history2.py | history2.py | py | 2,333 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 7,
"usage_type": "name"
},
{
"api... |
9709356118 | from abc import ABC
from typing import List
from visualBase import GameBase
from visualization.conf import *
from graph.graphCreator import createGraphVertex
from graph.graphBase import GraphType
from graph.graphBase import Graph
from graph.graphCreator import createGraphEdge
from algorithm.graphSearch import breadthFirstSearchGenerator
from visualization.utils import findMouseNode
from graph.graphCreator import GridGraphCreator
from graph.graphCreator import GraphCreator
from visualization.visualBase import GameSurface
from visualization.visualBase import UserInterface
from visualization.visualBase import NormalSurface
from graph.graphImp import GameFeasible
from graph.graphBase import Feasible
from graph.graphBase import VertexType
from algorithm.graphSearch import getPath
from algorithm.graphSearch import SearchAlgorithm
from visualization.visualBase import GameState
import pygame
class Visual(GameBase, ABC):
def __init__(self, graphCreator: GraphCreator, searchAlgo: SearchAlgorithm):
super(Visual, self).__init__()
self.graphManager = graphCreator
self.algo = searchAlgo
self.algo.iniGenerator(self.graphManager.getVertexByInd(15, 15))
self.gameSurface = GameSurface((800, 600), self.graphManager)
self.helpSurface = NormalSurface((800, 200))
self.userInterface = UserInterface()
self.currentNode = None
self.comeFrom = None
self.gameState = GameState
def processInput(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self._running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
try:
currentNode, comeFrom = self.algo.next()
self.userInterface.updateComeFrom(comeFrom)
self.userInterface.updateCurrentVertex(currentNode)
except StopIteration as e:
if self.comeFrom is not None:
pass
else:
currentNode, comeFrom = e.value
self.currentNode = currentNode
self.comeFrom = comeFrom
elif event.key == pygame.K_g:
mousePos = pygame.mouse.get_pos()
x, y = findMouseNode(mousePos, 20)
self.userInterface.updateGoal((x, y))
elif event.key == pygame.K_s:
mousePos = pygame.mouse.get_pos()
x, y = findMouseNode(mousePos, 20)
self.userInterface.updateStart((x, y))
self.gameState = GameState.START
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 3:
# process mouse input
mousePos = pygame.mouse.get_pos()
x, y = findMouseNode(mousePos, 20)
vertex = self.graphManager.getVertexByInd(y, x)
self.userInterface.updateBarrier((x, y))
self.algo.feasible.addBarrier(vertex)
elif event.button == 1:
mousePos = pygame.mouse.get_pos()
x, y = findMouseNode(mousePos, 20)
self.userInterface.updatePathTarget((x, y))
def update(self):
self.gameSurface.update(self.userInterface)
self.helpSurface.updateGameMessage(self.userInterface)
def render(self):
self._screen.blit(self.gameSurface.surface, (0, 0))
self._screen.blit(self.helpSurface.surface, (0, 600))
pygame.display.update()
self._clock.tick(FPS)
def run(self):
while self._running:
self.processInput()
self.update()
self.render()
def main():
feasible = GameFeasible()
graph = GridGraphCreator()
graph.createGraph(GraphType.AdjList)
algorithm = SearchAlgorithm(graph.graph, feasible)
game = Visual(graph, algorithm)
game.run()
pygame.quit()
if __name__ == "__main__":
main()
| chrispaulint3/AStar | visualization/visual.py | visual.py | py | 4,158 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "visualBase.GameBase",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "abc.ABC",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "graph.graphCreator.GraphCreator",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "algorithm... |
23870105129 | from PyQt5 import QtWidgets
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QSize, Qt
from .widgets import ControlButton, ScrollableButton, Seekbar, PlaybackModeControlButton
from .uilib.util import mask_image_circ, shadowify, setElide
class PlayerInfoFrame(QtWidgets.QFrame):
def __init__(self, p):
super(PlayerInfoFrame, self).__init__(p)
self.hlay = QtWidgets.QHBoxLayout(self)
self.hlay.setContentsMargins(4, 0, 16, 0)
self.vlay = QtWidgets.QVBoxLayout()
self.setFixedSize(208, 64)
self.setObjectName("player-info-frame")
self.coverArt = QtWidgets.QLabel(self)
self.coverArt.setObjectName("player-info-cover-art")
self.coverArt.setFixedSize(58, 58)
self.coverArt.setPixmap(mask_image_circ("res/icons/cd.png", imgtype="png", size=58))
self.trackTitle = QtWidgets.QLabel("----", self)
self.trackTitle.setObjectName("player-info-track-title")
self.trackArtist = QtWidgets.QLabel("--", self)
self.trackArtist.setObjectName("player-info-track-artist")
self.vlay.addWidget(self.trackTitle)
self.vlay.addSpacing(-35)
self.vlay.addWidget(self.trackArtist)
self.hlay.addWidget(self.coverArt)
self.hlay.addLayout(self.vlay)
shadowify(self)
def setCoverArt(self, coverPath):
if coverPath is None:
self.coverArt.setPixmap(mask_image_circ("res/icons/cd.png", imgtype="png", size=58))
else:
self.coverArt.setPixmap(mask_image_circ(coverPath, imgtype="jpg", size=58))
def setTitle(self, trackTitle):
setElide(self.trackTitle, trackTitle)
def setArtist(self, trackArtist):
setElide(self.trackArtist, trackArtist)
class PlayerControllerFrame(QtWidgets.QFrame):
def __init__(self, p):
super(PlayerControllerFrame, self).__init__(p)
self.hlay = QtWidgets.QHBoxLayout(self)
self.hlay.setContentsMargins(8, 0, 8, 0)
self.hlay.setSpacing(0)
self.hlay.setAlignment(Qt.AlignCenter)
self.setFixedSize(168, 34)
self.setObjectName("player-controller-frame")
self.playPause = ControlButton(24, QIcon("res/icons/play.svg"), "", self)
self.previousButton = ControlButton(16, QIcon("res/icons/skipback.svg"), "", self)
self.nextButton = ControlButton(16, QIcon("res/icons/skipforward.svg"), "", self)
self.rewind = ControlButton(16, QIcon("res/icons/rewind.svg"), "", self)
self.fastForward = ControlButton(16, QIcon("res/icons/forward.svg"), "", self)
self.rewind.setAutoRepeat(True)
self.rewind.setAutoRepeatDelay(500)
self.rewind.setAutoRepeatInterval(100)
self.fastForward.setAutoRepeat(True)
self.fastForward.setAutoRepeatDelay(500)
self.fastForward.setAutoRepeatInterval(100)
for button in self.findChildren(QtWidgets.QPushButton):
button.setFixedSize(30, 30)
self.hlay.addWidget(self.rewind)
self.hlay.addWidget(self.previousButton)
self.hlay.addWidget(self.playPause)
self.hlay.addWidget(self.nextButton)
self.hlay.addWidget(self.fastForward)
shadowify(self)
class PlaybackControllerFrame(QtWidgets.QFrame):
def __init__(self, p):
super(PlaybackControllerFrame, self).__init__(p)
self.hlay = QtWidgets.QHBoxLayout(self)
self.hlay.setContentsMargins(0, 0, 0, 0)
self.hlay.setSpacing(0)
self.hlay.setAlignment(Qt.AlignCenter)
self.setFixedSize(97, 34)
self.setObjectName("playback-controller-frame")
self.volumeButton = ScrollableButton(16, QIcon("res/icons/volume.svg"), "", self)
self.playbackModeButton = PlaybackModeControlButton(16, QIcon("res/icons/repeatoff.svg"), "", self)
self.playbackModeButton.set_state(0)
self.equalizerButton = ControlButton(16, QIcon("res/icons/equalizer.svg"), "", self)
for button in self.findChildren(QtWidgets.QPushButton):
button.setIconSize(QSize(16, 16))
button.setFixedSize(30, 30)
self.hlay.addWidget(button)
shadowify(self)
class SeekbarFrame(QtWidgets.QFrame):
def __init__(self, p):
super(SeekbarFrame, self).__init__(p)
self.setObjectName("seekbar-frame")
self.setFixedHeight(25)
self.hlay = QtWidgets.QHBoxLayout(self)
self.hlay.setContentsMargins(8, 0, 8, 0)
self.seekbar = Seekbar(self)
self.seekbar.setOrientation(Qt.Horizontal)
self.hlay.addWidget(self.seekbar)
shadowify(self)
class FavouriteFrame(QtWidgets.QFrame):
def __init__(self, p):
super(FavouriteFrame, self).__init__(p)
self.setObjectName("favourite-frame")
self.setFixedSize(34, 34)
self.hlay = QtWidgets.QHBoxLayout(self)
self.hlay.setContentsMargins(0, 0, 0, 0)
self.favouriteButton = QtWidgets.QPushButton(QIcon("res/icons/fav-untoggled.svg"), "", self)
self.favouriteButton.setIconSize(QSize(16, 16))
self.favouriteButton.setFixedSize(30, 30)
self.hlay.addWidget(self.favouriteButton)
shadowify(self)
class TimeFrame(QtWidgets.QFrame):
def __init__(self, p):
super(TimeFrame, self).__init__(p)
self.setObjectName("time-frame")
self.setFixedHeight(34)
self.hlay = QtWidgets.QHBoxLayout(self)
self.time = QtWidgets.QLabel("--:--/--:--", self)
self.hlay.addWidget(self.time, alignment=Qt.AlignCenter)
shadowify(self)
class PlayerPanelLayout(QtWidgets.QHBoxLayout):
def __init__(self):
super(PlayerPanelLayout, self).__init__()
self.setSpacing(10)
self.bottom_hlay = QtWidgets.QHBoxLayout()
self.vlay = QtWidgets.QVBoxLayout()
self.playerInfoFrame = PlayerInfoFrame(self.parent())
self.playerControllerFrame = PlayerControllerFrame(self.parent())
self.playbackControllerFrame = PlaybackControllerFrame(self.parent())
self.seekbarFrame = SeekbarFrame(self.parent())
self.favouriteFrame = FavouriteFrame(self.parent())
self.timeFrame = TimeFrame(self.parent())
self.bottom_hlay.addWidget(self.playerControllerFrame)
self.bottom_hlay.addWidget(self.playbackControllerFrame)
self.bottom_hlay.addStretch()
self.bottom_hlay.addWidget(self.favouriteFrame)
self.bottom_hlay.addWidget(self.timeFrame)
self.vlay.addWidget(self.seekbarFrame)
self.vlay.addLayout(self.bottom_hlay)
self.addWidget(self.playerInfoFrame)
self.addLayout(self.vlay)
| blitpxl/phonoid | src/app/ui/playerpanel.py | playerpanel.py | py | 6,635 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHBoxLayout",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": ... |
69814537633 | from pydantic import BaseModel
from typing import List
from config import db
from models.enums.tableType import TableType
class Constraint(BaseModel):
table_name: TableType
attribute_name: str
name: str = "is_unique"
condition: bool
def to_dict(self):
return {
"table_name": self.table_name.value,
"attribute_name": self.attribute_name,
"is_unique": self.condition,
}
class Constraints(BaseModel):
constraints: List[Constraint] = []
def __new__(cls):
if not hasattr(cls, "_instance"):
cls._instance = super(Constraints, cls).__new__(cls)
cls.constraints: List[Constraint] = []
cls.set_constraints()
return cls.constraints
@classmethod
def set_constraints(cls):
connection = db.get_connection()
cursor = connection.cursor()
select_query = "SELECT * FROM attributes_details;"
cursor.execute(select_query)
rows = cursor.fetchall()
cls.constraints = [
Constraint(
table_name=TableType.EMPLOYEE
if row[0].upper() == "EMPLOYEE"
else TableType.PROJECT,
attribute_name=row[1],
condition=row[2],
)
for row in rows
]
cursor.close()
connection.close()
@classmethod
def get_columns(cls, table_name: TableType) -> List[Constraint]:
Constraints()
return [
constraint
for constraint in cls.constraints
if constraint.table_name == table_name
]
| DB2Dev/costex | app/models/metadata/attrs_constraints.py | attrs_constraints.py | py | 1,636 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "models.enums.tableType.TableType",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "... |
70813001955 | import cv2
def car_lic_split(img_path):
binary_threshold = 100
segmentation_spacing = 0.9
# 前處理:灰階、二值化
img = cv2.imread(img_path)
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# cv2.imshow('gray', img_gray)
# cv2.waitKey(0)
img_thre = img_gray
cv2.threshold(img_gray, binary_threshold, 255, cv2.THRESH_BINARY_INV, img_thre)
# cv2.imshow('threshold', img_thre)
# cv2.waitKey(0)
# 分割字符
white = [] # 記錄每一列的白色像素總和
black = [] # 記錄每一列的黑色像素總和
height, width = img_thre.shape[:2]
white_max = 0 # 僅保存每列,取列中白色最多的像素總數
black_max = 0 # 僅保存每列,取列中黑色最多的像素總數
# 循環計算每一列的黑白色像素總和
for i in range(width):
w_count = 0 # 這一列白色總數
b_count = 0 # 這一列黑色總數
for j in range(height):
if img_thre[j][i] == 255:
w_count += 1
else:
b_count += 1
white_max = max(white_max, w_count)
black_max = max(black_max, b_count)
white.append(w_count)
black.append(b_count)
# False表示白底黑字;True表示黑底白字
arg = black_max > white_max
# 分割圖像,給定參數爲要分割字符的開始位
def find_end(start_):
end_ = start_ + 1
for m in range(start_+1, width - 1):
if(black[m] if arg else white[m]) > (segmentation_spacing * black_max if arg else segmentation_spacing * white_max):
end_ = m
break
return end_
n = 1
count = 0
start = 0
end = 0
while n < width - 1:
n += 1
if(white[n] if arg else black[n]) > ((1 - segmentation_spacing) * white_max if arg else (1 - segmentation_spacing) * black_max):
# 上面這些判斷用來辨別是白底黑字還是黑底白字
start = n
end = find_end(start)
n = end
if end - start > 5:
cj = img_thre[1:height, start:end]
cv2.imwrite('./car_lic_char_img/{0}_{1}.png'.format(d, count), cj)
# cv2.imshow('cutChar', cj)
# cv2.waitKey(0)
count += 1
if __name__ == '__main__':
data = ["Train01.jpg", "Train05.jpg", "Train09.jpg", "Train13.jpg", "Train17.jpg",
"Train02.jpg", "Train06.jpg", "Train10.jpg", "Train14.jpg",
"Train03.jpg", "Train07.jpg", "Train11.jpg", "Train15.jpg",
"Train04.jpg", "Train08.jpg", "Train12.jpg", "Train16.jpg"]
for d in data:
img_path = './car_license_img/'+d
car_lic_split(img_path)
# img_path = './car_license_img/Train02.jpg'
# car_lic_split(img_path)
| JamesYeh2017/License-Plate-Recognition-System | car_lic_split.py | car_lic_split.py | py | 2,852 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2GRAY",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"l... |
7076098975 | import argparse
import sys
import os
def create_arg_parser():
""""Creates and returns the ArgumentParser object."""
parser = argparse.ArgumentParser(description='Description of your app.')
parser.add_argument('inputDirectory',
help='Path to the input directory.')
parser.add_argument('--outputDirectory',
help='Path to the output that contains the resumes.')
return parser
if __name__ == "__main__":
arg_parser = create_arg_parser()
parsed_args = arg_parser.parse_args(sys.argv[1:])
if os.path.exists(parsed_args.inputDirectory):
print("File exist")
| a-n-n-a-c-g/Scripts | pythonacceptargs.py | pythonacceptargs.py | py | 633 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"li... |
10786181943 | from django import template
register = template.Library()
@register.simple_tag
def bizz_or_fuzz(number):
if number % 3 == 0 and number % 5 == 0:
return 'BizzFuzz'
elif number % 3 == 0:
return 'Bizz'
elif number % 5 == 0:
return 'Fuzz'
else:
return number
| lebvlad/milo-django | test_case/templatetags/bizzfuzz.py | bizzfuzz.py | py | 323 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.template.Library",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 4,
"usage_type": "name"
}
] |
69905461153 | """Support for myUplink sensors."""
from __future__ import annotations
import logging
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.const import Platform
from .api import Parameter
from .const import DOMAIN
from .entity import MyUplinkParameterEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the sensors."""
coordinator = hass.data[DOMAIN][entry.entry_id]
entities: list[SwitchEntity] = []
for system in coordinator.data:
for device in system.devices:
for parameter in device.parameters:
if parameter.find_fitting_entity() == Platform.SWITCH:
entities.append(
MyUplinkParameterSwitchEntityEntity(
coordinator, device, parameter
)
)
async_add_entities(entities)
class MyUplinkParameterSwitchEntityEntity(MyUplinkParameterEntity, SwitchEntity):
"""Representation of a myUplink paramater binary sensor."""
def _update_from_parameter(self, parameter: Parameter) -> None:
"""Update attrs from parameter."""
super()._update_from_parameter(parameter)
self._attr_is_on = bool(int(self._parameter.value))
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
await self._parameter.update_parameter(1)
await self.async_update()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
await self._parameter.update_parameter(0)
await self.async_update()
| jaroschek/home-assistant-myuplink | custom_components/myuplink/switch.py | switch.py | py | 1,872 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "homeassistant.core.HomeAssistant",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "homeassistant.config_entries.ConfigEntry",
"line_number": 20,
"usage_type": "name"
}... |
23720578792 | #Michael Holloway SID: 001215316
import csv
import datetime
# entry point for the entire program to begin running
def start():
#import distance and location data
global distances, locations
distances, locations = importDistanceList();
#intial load of trucks
#manually set to balance loads and group matching addresses together
truck1Load = ['1', '2', '4', '5', '10', '13', '15', '19', '20', '23', '26', '27', '30', '35', '39'];
truck2Load = ['3', '7', '8', '11', '12', '17', '18', '21', '22', '24', '29', '31', '33', '34', '36', '37', '38', '40'];
#create our truck objects - pass in the load information
truck1 = Truck(truck1Load);
truck2 = Truck(truck2Load);
#variable to allow for easy modification of maximum allowed packages
global numberOfPackages
numberOfPackages = 40;
#load hub with packages that can't leave at 8am(constraints/special notes)
hub =['6', '9', '14', '16', '25', '28', '32'];
#import our list of packages
global packages
packages = importPackageList(hub, numberOfPackages);
#iterate through truck #1 to successfully deliver every package on board
while truck1.packageList:
#every run find the next closest destination
packNum, dist, packObj = findClosestNext(truck1);
#add the distance traveled to this truck
truck1.travel(dist);
#deliver primary package found
deliverPackage(packObj, packNum, truck1);
#set the delivery time within this particular package object
packages.search(packNum).setDelivery(truck1.currentTime);
#find any package with matching location and deliver to save mileage
for package in truck1.packageList:
packageObj = packages.search(package);
if(packageObj.address == packObj.address):
if packageObj.id in truck1.packageList:
deliverPackage(packageObj, packageObj.id, truck1);
packages.search(package).setDelivery(truck1.currentTime);
#iterate through truck #2 to deliver all packages on board
#truck 2 manually told to return to pickup additional packages at the hub at a particular time
while truck2.packageList:
now = datetime.datetime.now();
#schedule a time to return to the hub to pickup remaining/delayed packages
returnToHubTime = now.replace(hour=9, minute=6, second=0, microsecond=0);
truckTime = truck2.currentTime;
if(truckTime > returnToHubTime and hub):
#pickup remaining packages a hub and deliver
hubIndex = destinationIndex = findLocationIndex('HUB');
distanceBetween = findDistance(truck2.currentLocation, hubIndex);
truck2.travel(distanceBetween);
truck2.currentLocation = hubIndex;
#grab the package list we already have and combine it with the packages from the hub
tempArray = hub + truck2.packageList;
truck2.packageList = tempArray;
#set the package pickupTime for hub elements
for p in hub:
packages.search(p).departureTime = truckTime;
hub.clear(); #remove all elements from the hub so we don't repeat this
else:
#every run find the next closest destination
packNum, dist, packObj = findClosestNext(truck2);
#add the distance traveled to this truck
truck2.travel(dist);
#deliver primary package found
deliverPackage(packObj, packNum, truck2);
#set the delivery time within this particular package object
packages.search(packNum).setDelivery(truck2.currentTime);
#find any package with matching location and deliver to save mileage
for package in truck2.packageList:
packageObj = packages.search(package);
if(packageObj.address == packObj.address):
id = str(packageObj.id);
if id in truck2.packageList:
deliverPackage(packageObj, packageObj.id, truck2);
packages.search(package).setDelivery(truck2.currentTime);
#being interface
print("Select an option:");
print("1. package information by id");
print("2. package information by id and at a specific time");
print("3. all package information at a specific time");
print("4. mileage information");
print("5. exit");
x = input('option: '); #varibale to use as a switch for interface options
while x != "5":
if(x=="1"):
packID = input('package id: ');
packObject = packages.search(packID);
print("-----------------------------------");
print("Package #", packObject.id);
print("Address:", packObject.address);
print("City:", packObject.city);
print("Zip Code:", packObject.zip);
print("Package Weight:", packObject.weight);
print("Deadline:", packObject.deadline);
print("Delivery Time:", packObject.deliveryTime);
print("-----------------------------------");
if(x=="2"):
packID = input('package id: ');
timeQuery = input('Enter a time in HH:MM AM/PM format(i.e. 10:00 AM/2:30 PM): ');
try: #if a single digit is provided for the hour catch the error and reformat
hour = int(timeQuery[:2]);
min = int(timeQuery[3:5]);
period = timeQuery[6:].upper();#force to upper case
except:
hour = int(timeQuery[:1]);
min = int(timeQuery[2:4]);
period = timeQuery[5:].upper();#force to upper case
if(period == "PM"): #convert to 24 hour time scale
if(hour != 12):
hour = hour + 12;
now = datetime.datetime.now();
queryTime = now.replace(hour=hour, minute=min, second=0, microsecond=0);
packObject = packages.search(packID);
timePickedUp = packObject.departureTime;
timeDelivered = packObject.deliveryTime;
#set a default status and test values against queryTime
status = "At the hub";
if(queryTime > timePickedUp and queryTime < timeDelivered):
timeOutput = timeDelivered.strftime('%H:%M %p');
status = "Out for delivery, ETA: " + timeOutput;
elif(queryTime >= timeDelivered):
timeOutput = timeDelivered.strftime('%H:%M %p');
status = "Delivered at: " + timeOutput;
#print package information to user
print("-----------------------------------");
print("Package #", packObject.id);
print("Address:", packObject.address);
print("City:", packObject.city);
print("Zip Code:", packObject.zip);
print("Package Weight:", packObject.weight);
print("Deadline:", packObject.deadline);
print("Stauts:", status);
print("-----------------------------------");
if(x=="3"):
#printout all package information at given time
timeQuery = input('Enter a time in HH:MM AM/PM format(i.e. 10:00 AM/2:30 PM): ');
try: #if a single digit is provided for the hour catch the error and reformat
hour = int(timeQuery[:2]);
min = int(timeQuery[3:5]);
period = timeQuery[6:].upper();#force to upper case
except:
hour = int(timeQuery[:1]);
min = int(timeQuery[2:4]);
period = timeQuery[5:].upper();#force to upper case
if(period == "PM"): #convert to 24 hour time scale
if(hour != 12):
hour = hour + 12;
now = datetime.datetime.now();
queryTime = now.replace(hour=hour, minute=min, second=0, microsecond=0);
print("-----------------------------------");
#iterate through entire package list and print info
for packID in range(1, numberOfPackages + 1): #known number of packages
status = "At the hub";
packObject = packages.search(str(packID));
timePickedUp = packObject.departureTime;
timeDelivered = packObject.deliveryTime;
status = "At the hub";
if(queryTime > timePickedUp and queryTime < timeDelivered):
timeOutput = timeDelivered.strftime('%H:%M %p');
status = "Out for delivery, ETA: " + timeOutput;
elif(queryTime >= timeDelivered):
timeOutput = timeDelivered.strftime('%H:%M %p');
status = "Delivered at: " + timeOutput;
#print all package information - condensed to save visual space on screen
print("Package #:" + packObject.id + " address: " + packObject.address + ", " + packObject.city + ", " + packObject.zip + " weight: " + packObject.weight + " deadline: " + packObject.deadline + " status: " + status );
if(x=="4"): #printout truck mileages
truck1Miles = truck1.distanceTraveled;
truck2Miles = truck2.distanceTraveled;
print("-----------------------------------");
print('Truck #1 mileage: ' + str(round(truck1Miles,2)));
print('Truck #2 mileage: ' + str(round(truck2Miles,2)));
print('total miles: ',round(truck1Miles + truck2Miles, 2));
print("-----------------------------------");
#loop through UI to menu
print("Select an option:");
print("1. package information by id");
print("2. package information by id and at a specific time");
print("3. all package information at a specific time");
print("4. mileage information");
print("5. exit");
x = input('option: ');
#end main process
#start functions
#function to process our csv file and setup the data for distances/locations
#returns distanceList - a 2d array designed to allow for quick reference
#returns locationList - a list to help convert addresses to indexes
def importDistanceList():
filename = "distanceData.csv"
distanceList = []
locationList = []
with open(filename,'r') as data:
for line in csv.reader(data, delimiter=","):
individualDistanceList = [];
for d in line:
if(d.replace('.','',1).isdigit()):
individualDistanceList.append(d);
address = line[1].strip().split('\n')[0];
locationList.append(address);
distanceList.append(individualDistanceList);
return distanceList, locationList;
#function to process csv file for packages
#creates individual package objects that are added the the hash table for lookup speed
#returns the HashTable object packageTable
def importPackageList(hub, size):
filename = "packageData.csv"
packageTable = HashTable(size);
with open(filename,'r') as data:
for line in csv.reader(data, delimiter=","):
id = line[0];
address = line[1];
pickupTime = datetime.datetime.now().replace(hour=8, minute=0, second=0, microsecond=0);
for p in hub:
if id == p:
pickupTime = datetime.datetime.now().replace(hour=9, minute=6, second=0, microsecond=0);
deadline = line[5];
city = line[2];
zip = line[4];
weight = line[6];
#create package objects and add to packageList
packObj = package(id, address, pickupTime, deadline, city, zip, weight);
packageTable.insert(id, packObj);
return packageTable;
#convert address name to the indexed location
#returns the index of the item so we can crossrefernce with our distance table
def findLocationIndex(addr):
for items in locations:
if addr in items:
index = locations.index(items);
return index;
#distance from indexed location to indexed location, each machtes index with location in distance array, use addres to find index
#returns the mileage between the 2 specified locations
def findDistance(curr, dest):
return distances[curr][dest];
#remaining packages in truck, find closest next location
#returns the information for the closest package as well as the package object itself
def findClosestNext(truck):
lowestDist = 999;
lowestID = '000';
for packageID in truck.packageList:
packageObj = packages.search(packageID);
packageLocIndex = findLocationIndex(packageObj.address);
dist = findDistance(truck.currentLocation, packageLocIndex);
f = float(dist);
if (f < lowestDist):
lowestDist = float(dist);
lowestID = packageID;
return lowestID, lowestDist, packageObj;
#interacts with the package to set delivery status
#sends a call to deliver the package in this truck object
def deliverPackage(package, packageID, truck):
package.status = "delivered";
truck.deliver(packageID);
#easy function to set the status of the package specified
def packageInTransit(package):
package.status = "out for delivery";
#begin classes
#class to hold and interact with information on each truck
class Truck:
def __init__(self, packageList):
self.packageList = packageList;
self.currentLocation = findLocationIndex('HUB');
self.distanceTraveled = 0;
self.speed = 18;
self.currentTime = datetime.datetime.now().replace(hour=8, minute=0, second=0, microsecond=0); #set starting time
def deliver(self, pack):
self.packageList.remove(pack);
def travel(self, distance):
self.distanceTraveled = self.distanceTraveled + float(distance);
timeTraveled = float(distance)/self.speed; #divide number of miles by miles per hour speed
self.currentTime = self.currentTime + datetime.timedelta(hours = timeTraveled); #add appropriate partial of hour
def addTimeByDistance(self, miles):
timeTraveled = miles/self.speed; #divide number of miles by miles per hour speed
self.currentTime = self.currentTime + datetime.timedelta(hours = timeTraveled); #add appropriate partial of hour
#class to hold and set delivery status for each package object
class package:
def __init__(self, id, address, departureTime, deadline, city, zip, weight):
self.id = id;
self.address = address;
self.deadline = deadline;
self.city = city;
self.zip = zip;
self.weight = weight;
self.status = "At the HUB";
self.departureTime = departureTime;
self.deliveryTime = datetime.datetime.now().replace(hour=8, minute=0, second=0, microsecond=0);
def setDelivery(self, time):
self.deliveryTime = time;
#an empty class to use as a placeholder in HashTable
class EmptyItem:
pass
#class to hold our hash table
#brings in packages, indexes them based on a hash key
#using linearing probing to handle collisions
#function to insert an object with a given key
#function to retrieve the object based on mimicing the insert functions hash design
#function to remove the object and indexed information give the key
class HashTable:
def __init__(self, size):
self.EMPTY_AT_START = EmptyItem();
self.EMPTY_AFTER_REMOVE = EmptyItem();
self.table = [self.EMPTY_AT_START] * size;
#take in the key(our package id) and the obj(package object)
def insert (self, key, obj):
#set our hashed index based on key and table size
bucketKey = hash(key) % len(self.table);
#utilize a key and package object so we can retrieve easily
bucketList = [key, obj];
#linear search of our table based on hashed key location
bucketIndex = 0;
while bucketIndex < len(self.table):
#test expected bucket location
#if filled with EmptyItem insert the paired key and package object
if type(self.table[bucketKey]) is EmptyItem:
self.table[bucketKey] = bucketList;
return True;
bucketKey = (bucketKey + 1) % len(self.table);
bucketIndex = bucketIndex + 1;
#return false if we can't find a location for this key within the table
return False;
#take in the package id as the key
def search (self, key):
#set our hashed index based on the key and table size
bucketKey = hash(key) % len(self.table);
bucketIndex = 0;
#search starting at expected location and increase index until found
while self.table[bucketKey] is not self.EMPTY_AT_START and bucketIndex < len(self.table):
#get the id value from the expected bucket location
value = (self.table[bucketKey]);
if value[0] == key:
retValue = self.table[bucketKey]
#return the package object to the reqeust
return retValue[1];
# +1 the key if the bucket is already in use and doesn't match our id
# collabs with our insert function to ensure the exact location is found
bucketKey = (bucketKey + 1) % len(self.table);
bucketIndex = bucketIndex + 1;
#if key isn't found
return None;
#take in the package id as the key
def remove (self, key):
#set our hashed index based on the key and table size
bucketKey = hash(key) % len(self.table);
bucketIndex = 0;
#search starting at expected location and increase index until found
#ignore empty locations and fail if not found within the size of the table
while self.table[bucketKey] is not self.EMPTY_AT_START and bucketIndex < len(self.table):
#get the id value from the expected bucket location
value = (self.table[bucketKey]);
if value[0] == key:
#if found, set to our EmptyItem class under the removed name
self.table[bucketKey] = self.EMPTY_AFTER_REMOVE;
# +1 the key if the bucket is already in use and doesn't match our id
# collabs with our insert function to ensure the exact location is found
bucketKey = (bucketKey + 1) % len(self.table);
bucketIndex = bucketIndex + 1;
#if key isn't found
return None;
#begin the program
start();
| m-holloway-cw/PackageRouting | main.py | main.py | py | 18,549 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "d... |
33974716392 | # pragma: no cover
"""
This module defines a bounding box type and implements a constraint solver
that can position multiple bounding boxes s/t they do not overlap
(in addition to other constraints).
main() function used for testing purposes. Primary function made available to outside callers
is run_model()
"""
from itertools import combinations
from typing import Mapping, AbstractSet, Optional, List, Iterable, Tuple, DefaultDict
from attr import attrs, attrib
from collections import defaultdict
from math import isnan, pi
import numpy as np
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.nn import PairwiseDistance
from panda3d.core import LPoint3f # pylint: disable=no-name-in-module
from immutablecollections import immutabledict, immutableset, ImmutableDict, ImmutableSet
from vistautils.preconditions import check_arg
from adam.axes import (
Axes,
HorizontalAxisOfObject,
straight_up,
# directed,
symmetric,
symmetric_vertical,
FacingAddresseeAxis,
)
from adam.ontology.phase1_spatial_relations import (
PROXIMAL,
DISTAL,
EXTERIOR_BUT_IN_CONTACT,
Region,
)
from adam.ontology.phase1_spatial_relations import (
Direction,
GRAVITATIONAL_UP,
GRAVITATIONAL_DOWN,
)
from adam.perception import ObjectPerception, GROUND_PERCEPTION
# see https://github.com/pytorch/pytorch/issues/24807 re: pylint issue
ORIGIN = torch.zeros(3, dtype=torch.float) # pylint: disable=not-callable
# penalty weighting for keeping objects on the ground
GRAVITY_PENALTY = torch.tensor([1], dtype=torch.float) # pylint: disable=not-callable
# penalty weighting for keeping objects from clipping into the ground
BELOW_GROUND_PENALTY = 2 * GRAVITY_PENALTY
# penalty weighting for keeping objects from colliding with one another
COLLISION_PENALTY = 5 * GRAVITY_PENALTY
LOSS_EPSILON = 1.0e-04
# penalty weighting for adjusting the angle between relatively-positioned objects
ANGLE_PENALTY = torch.tensor([1.5], dtype=torch.float) # pylint: disable=not-callable
# penalty weighting for adjusting the separation of relatively-positioned objects
DISTANCE_PENALTY = torch.tensor([1], dtype=torch.float) # pylint: disable=not-callable
# concreteized definitions of the relative distance categories:
PROXIMAL_MIN_DISTANCE = torch.tensor( # pylint: disable=not-callable
[1], dtype=torch.float
)
PROXIMAL_MAX_DISTANCE = torch.tensor( # pylint: disable=not-callable
[2.5], dtype=torch.float
)
DISTAL_MIN_DISTANCE = torch.tensor([3], dtype=torch.float) # pylint: disable=not-callable
EXTERIOR_BUT_IN_CONTACT_EPS = torch.tensor( # pylint: disable=not-callable
[1e-2], dtype=torch.float
)
PI = torch.tensor([pi], dtype=torch.float) # pylint: disable=not-callable
def main() -> None:
top_to_bottom = straight_up("top-to-bottom")
side_to_side_0 = symmetric("side-to-side-0")
side_to_side_1 = symmetric("side-to-side-1")
box = ObjectPerception(
"box",
geon=None,
axes=Axes(
primary_axis=top_to_bottom,
orienting_axes=immutableset([side_to_side_0, side_to_side_1]),
),
)
generating_axis = symmetric_vertical("ball-generating")
orienting_axis_0 = symmetric("ball-orienting-0")
orienting_axis_1 = symmetric("ball-orienting-1")
# ball situated on top of box
ball = ObjectPerception(
"ball",
geon=None,
axes=Axes(
primary_axis=generating_axis,
orienting_axes=immutableset([orienting_axis_0, orienting_axis_1]),
),
)
in_region_relations: Mapping[ObjectPerception, List[Region[ObjectPerception]]] = {
ball: [Region[ObjectPerception](box, EXTERIOR_BUT_IN_CONTACT, GRAVITATIONAL_UP)]
}
# other objects have no particular constraints:
positioning_model = PositioningModel.for_objects_random_positions(
object_perceptions=immutableset([ball, box]),
sub_objects={},
in_region_relations=in_region_relations,
)
# we will start with an aggressive learning rate
optimizer = optim.SGD(positioning_model.parameters(), lr=1.0)
# but will decrease it whenever the loss plateaus
learning_rate_schedule = ReduceLROnPlateau(
optimizer,
"min",
# decrease the rate if the loss hasn't improved in
# 3 epochs
patience=3,
)
iterations = 100
for iteration in range(iterations):
print(f"====== Iteration {iteration} ======")
positioning_model.dump_object_positions(prefix="\t")
loss = positioning_model()
print(f"\tLoss: {loss.item()}")
loss.backward()
optimizer.step()
optimizer.zero_grad()
learning_rate_schedule.step(loss)
print("========= Final Positions ========")
positioning_model.dump_object_positions(prefix="\t")
@attrs(frozen=True, auto_attribs=True)
class PositionsMap:
"""Convenience type: list of positions corresponding to objects in a scene."""
name_to_position: Mapping[str, torch.Tensor]
name_to_scale: Mapping[str, torch.Tensor]
def __len__(self) -> int:
return len(self.name_to_position)
def run_model(
top_level_objects: ImmutableSet[ObjectPerception],
sub_objects: Mapping[str, Mapping[str, LPoint3f]],
in_region_map: Mapping[ObjectPerception, List[Region[ObjectPerception]]],
object_scales: Mapping[str, Tuple[float, float, float]],
*,
frozen_objects: ImmutableSet[str],
num_iterations: int = 200,
yield_steps: Optional[int] = None,
previous_positions: Optional[PositionsMap] = None,
) -> Iterable[PositionsMap]:
r"""
Construct a positioning model given a list of objects to position, return their position values.
The model will return final positions either after the given number of iterations, or if the model
converges in a position where it is unable to find a gradient to continue.
Args:
top_level_objects: set of top-level objects requested to be positioned
sub_objects: mapping of sub-objects to their parent, and their relative position to that parent
in_region_map: in-region relations for all top-level objects in this scene
*num_iterations*: total number of SGD iterations.
*yield_steps*: If provided, the current positions of all objects will be returned after this many steps
previous_positions: If provided, attempt to use the positions contained within to initialize the scene
Returns: PositionsMap: Map of object name -> Tensor (3,) of its position
"""
if previous_positions:
positioning_model = PositioningModel.for_scaled_objects_positioned(
top_level_objects,
sub_objects,
in_region_relations=in_region_map,
scale_map=object_scales,
positions_map=previous_positions,
frozen_objects=frozen_objects,
)
else:
positioning_model = PositioningModel.for_scaled_objects_random_positions(
top_level_objects,
sub_objects,
in_region_relations=in_region_map,
scale_map=object_scales,
)
patience = 10
# we will start with an aggressive learning rate
optimizer = optim.SGD(positioning_model.parameters(), lr=0.25)
# but will decrease it whenever the loss plateaus
learning_rate_schedule = ReduceLROnPlateau(
optimizer,
"min",
# decrease the rate if the loss hasn't improved in
# 10 epochs
patience=patience,
)
iterations = num_iterations
quit_patience = patience * 3
loss_eps = 1e-6
prev_loss = 0.0
epochs_without_improvement = 0
for i in range(iterations):
# print(f"====== Iteration {i} =======")
loss = positioning_model()
# if we lose any substantial gradient, stop the search
if loss < LOSS_EPSILON:
break
if isnan(loss) or prev_loss - loss < loss_eps:
epochs_without_improvement += 1
else:
epochs_without_improvement = 0
if epochs_without_improvement >= quit_patience:
break
prev_loss = loss
# print(f"\tLoss: {loss.item()} bad: {epochs_without_improvement}")
loss.backward()
optimizer.step()
optimizer.zero_grad()
learning_rate_schedule.step(loss)
# positioning_model.dump_object_positions(prefix="\t")
if yield_steps and i % yield_steps == 0:
yield positioning_model.get_objects_positions()
# this *looks* redundant, but it helps to ensure that the final positions are rendered
yield positioning_model.get_objects_positions()
return positioning_model.get_objects_positions()
@attrs(slots=True, eq=False)
class AxisAlignedBoundingBox:
"""
Defines a 3D Box that is oriented to world axes.
This box is defined by a center point of shape (3,),
and a scale (also of shape (3,)) which defines how a unit cube
with the given center will be scaled in each dimension to create
this box.
For example: a box centered at (0, 0, 0) and with a scale of (1, 1, 1) would have opposite
corners at (-1, -1, -1) and (1, 1, 1), giving the box a volume of 2(^3)
"""
center: torch.Tensor = attrib() # tensor shape: (3,)
scale: torch.Tensor = attrib() # tensor shape: (3, 3) - diagonal matrix
# rotation: torch.Tensor = attrib()
offset: Optional[torch.Tensor] = attrib()
def center_distance_from_point(self, point: torch.Tensor) -> torch.Tensor:
return torch.dist(self.center, point, 2)
def nearest_center_face_distance_from_point(
self, point: torch.Tensor
) -> torch.Tensor:
""" returns the distance from the closest face center to the given point
Args:
point: tensor (3,) of a coordinate to check distance from this box's face centers
Returns: tensor (1,)
"""
centers = self.get_face_centers()
return torch.min(PairwiseDistance().forward(centers, point.expand(6, 3)))
def nearest_center_face_point(self, point: torch.Tensor) -> torch.Tensor:
"""
Returns the closest face center of the box to the given point
Args:
point: tensor (3,) x,y,z coordinate: an arbitrary point in 3d space
Returns: tensor (3,) x,y,z coordinate: the center face of the box closest to the function argument
"""
face_centers = self.get_face_centers()
return face_centers[
torch.argmin(PairwiseDistance().forward(face_centers, point.expand(6, 3)))
]
def z_coordinate_of_lowest_corner(self) -> torch.Tensor:
"""
Get the position of the lowest corner of the box.
The third coordinate is interpreted as the height relative to the ground at z=0.
Returns: (1,) tensor
"""
# all corners are considered (in case of a rotated box)
corners = self.get_corners()
# find the minimum z value
min_corner_z = torch.min(
# gather just the z coordinate from the tensor of all corners
# turning (8,3) into (8,1)
torch.gather(
corners,
1,
# create a (8, 1) tensor filled with elements corresponding to the index of the Z coordinate
# these indices are used by torch.gather() to retrieve the correct elements from the corners tensor
torch.repeat_interleave(
torch.tensor([[2]]), # pylint: disable=not-callable
torch.tensor([8]), # pylint: disable=not-callable
dim=0,
),
)
)
return min_corner_z - ORIGIN[2]
@staticmethod
def create_at_random_position(
*, min_distance_from_origin: float, max_distance_from_origin: float
):
return AxisAlignedBoundingBox.create_at_random_position_scaled(
min_distance_from_origin=min_distance_from_origin,
max_distance_from_origin=max_distance_from_origin,
object_scale=torch.ones(3),
)
@staticmethod
def create_at_center_point(*, center: np.array):
return AxisAlignedBoundingBox(
Parameter(
torch.tensor(center, dtype=torch.float), # pylint: disable=not-callable
requires_grad=True,
),
torch.diag(torch.ones(3)),
offset=None,
)
@staticmethod
def create_at_center_point_scaled(
*,
center: np.array,
object_scale: torch.Tensor,
is_parameter: bool,
offset: Optional[np.array] = None,
):
# if this AABB does not have an offset, it is a top-level object
if offset is None:
return AxisAlignedBoundingBox(
Parameter(
torch.tensor( # pylint: disable=not-callable
center, dtype=torch.float
),
requires_grad=is_parameter,
),
torch.diag(object_scale),
offset,
)
# otherwise: this is a sub-object. It isn't a parameter and does have an offset from its parent object
return AxisAlignedBoundingBox(
torch.tensor(center, dtype=torch.float), # pylint: disable=not-callable
torch.diag(object_scale),
torch.tensor(offset, dtype=torch.float), # pylint: disable=not-callable
)
@staticmethod
def create_at_random_position_scaled(
*,
min_distance_from_origin: float,
max_distance_from_origin: float,
object_scale: torch.Tensor,
):
check_arg(min_distance_from_origin > 0.0)
check_arg(min_distance_from_origin < max_distance_from_origin)
# we first generate a random point on the unit sphere by
# generating a random vector in cube...
center = np.random.randn(3, 1).squeeze()
# and then normalizing.
center /= np.linalg.norm(center)
# then we scale according to the distances above
scale_factor = np.random.uniform(
min_distance_from_origin, max_distance_from_origin
)
center *= scale_factor
return AxisAlignedBoundingBox(
Parameter(
torch.tensor(center, dtype=torch.float), # pylint: disable=not-callable
requires_grad=True,
),
torch.diag(object_scale),
offset=None,
)
def get_corners(self) -> torch.Tensor:
return self.center.expand(8, 3) + torch.tensor( # pylint: disable=not-callable
[
[-1, -1, -1],
[1, -1, -1],
[-1, 1, -1],
[1, 1, -1],
[-1, -1, 1],
[1, -1, 1],
[-1, 1, 1],
[1, 1, 1],
],
dtype=torch.float,
).matmul(self.scale)
# see https://github.com/pytorch/pytorch/issues/24807 re: pylint issue
def get_face_centers(self) -> torch.Tensor:
"""
Returns the center point of each of the box's 6 faces.
Returns: tensor (6, 3)
"""
corners = self.get_corners()
return torch.stack( # pylint: disable=not-callable
[
torch.div(corners[0] + corners[6], 2), # left face
torch.div(corners[0] + corners[5], 2), # backward face
torch.div(corners[1] + corners[7], 2), # right face
torch.div(corners[2] + corners[7], 2), # forward face
torch.div(corners[0] + corners[3], 2), # bottom face
torch.div(corners[4] + corners[7], 2), # top face
],
dim=0,
)
def _minus_ones_corner(self) -> torch.Tensor:
"""
Corner in the direction of the negative x, y, z axes from center
Returns: Tensor (3,)
"""
return self.center + torch.tensor( # pylint: disable=not-callable
[-1, -1, -1], dtype=torch.float
).matmul(self.scale)
# functions returning normal vectors from three perpendicular faces of the box
def right_face_normal_vector(self) -> torch.Tensor:
"""
Normal vector for the right face of the box (toward positive x axis when aligned to world axes)
Returns: Tensor (3,)
"""
diff = (
self.center
+ torch.tensor( # pylint: disable=not-callable
[1, -1, -1], dtype=torch.float
).matmul(self.scale)
- self._minus_ones_corner()
)
return diff / torch.norm(diff)
def forward_face_normal_vector(self) -> torch.Tensor:
"""
Normal vector for the forward face of the box (toward positive y axis when aligned to world axes)
Returns: Tensor (3,)
"""
diff = (
self.center
+ torch.tensor( # pylint: disable=not-callable
[-1, 1, -1], dtype=torch.float
).matmul(self.scale)
- self._minus_ones_corner()
)
return diff / torch.norm(diff)
def up_face_normal_vector(self) -> torch.Tensor:
"""
Normal vector for the up face of the box (toward positive z axis when aligned to world axes)
Returns: Tensor (3,)
"""
diff = (
self.center
+ torch.tensor( # pylint: disable=not-callable
[-1, -1, 1], dtype=torch.float
).matmul(self.scale)
- self._minus_ones_corner()
)
return diff / torch.norm(diff)
def face_normal_vectors(self) -> torch.Tensor:
"""
Stacks the face norms from the right, forward, and up faces of the box
Returns: Tensor (3,3)
"""
# in axis-aligned case these are always the same
return torch.stack(
[
self.right_face_normal_vector(),
self.forward_face_normal_vector(),
self.up_face_normal_vector(),
]
)
def corners_onto_axes_projections(self, axes: torch.Tensor) -> torch.Tensor:
"""
Projects each of 8 corners onto each of three axes.
Args:
axes: (3,3) tensor -> the three axes we are projecting points onto
Returns:
(3, 8) tensor -> each point projected onto each of three dimensions
"""
check_arg(axes.shape == (3, 3))
corners = self.get_corners()
return axes.matmul(corners.transpose(0, 1))
class PositioningModel(torch.nn.Module): # type: ignore
"""
Model that combines multiple constraints on AxisAlignedBoundingBoxes.
"""
def __init__(
self,
object_perception_to_bounding_box: Mapping[
ObjectPerception, AxisAlignedBoundingBox
],
object_perception_to_excluded_bounding_box: Mapping[
ObjectPerception, AxisAlignedBoundingBox
],
object_to_sub_object_to_bounding_box: Mapping[
str, Mapping[str, AxisAlignedBoundingBox]
],
in_region_relations: Mapping[ObjectPerception, List[Region[ObjectPerception]]],
) -> None:
super().__init__()
self.object_perception_to_bounding_box = object_perception_to_bounding_box
self.object_perception_to_excluded_bounding_box = (
object_perception_to_excluded_bounding_box
)
self.object_to_sub_object_to_bounding_box = object_to_sub_object_to_bounding_box
self.in_region_relations = in_region_relations
self.object_bounding_boxes = immutableset(
object_perception_to_bounding_box.values()
)
self.excluded_bounding_boxes = immutableset(
object_perception_to_excluded_bounding_box.values()
)
self.included_and_excluded_boxes = self.object_bounding_boxes.union(
self.excluded_bounding_boxes
)
sub_object_to_bounding_box = {}
for sub_obj_dict in self.object_to_sub_object_to_bounding_box.values():
for sub_obj_handle, aabb in sub_obj_dict.items():
sub_object_to_bounding_box[sub_obj_handle] = aabb
for (
object_perception,
bounding_box,
) in self.object_perception_to_bounding_box.items():
# suppress mypy error about supplying a Tensor where it expects a Parameter
self.register_parameter( # type: ignore
object_perception.debug_handle, bounding_box.center
)
self.collision_penalty = CollisionPenalty()
self.below_ground_penalty = BelowGroundPenalty()
self.weak_gravity_penalty = WeakGravityPenalty(
object_perception_to_bounding_box, in_region_relations
)
self.in_region_penalty = InRegionPenalty(
object_perception_to_bounding_box,
object_perception_to_excluded_bounding_box,
sub_object_to_bounding_box,
in_region_relations,
)
@staticmethod
def for_objects_random_positions(
object_perceptions: AbstractSet[ObjectPerception],
sub_objects: Mapping[str, Mapping[str, LPoint3f]],
*,
in_region_relations: Mapping[ObjectPerception, List[Region[ObjectPerception]]],
) -> "PositioningModel":
objects_to_bounding_boxes: ImmutableDict[
ObjectPerception, AxisAlignedBoundingBox
] = immutabledict(
(
object_perception,
AxisAlignedBoundingBox.create_at_random_position(
min_distance_from_origin=10, max_distance_from_origin=20
),
)
for object_perception in object_perceptions
)
sub_object_mapping = PositioningModel._create_sub_objs_to_bounding_boxes(
sub_objects
)
return PositioningModel(
objects_to_bounding_boxes, {}, sub_object_mapping, in_region_relations
)
@staticmethod
def for_scaled_objects_random_positions(
object_perceptions: AbstractSet[ObjectPerception],
sub_objects: Mapping[str, Mapping[str, LPoint3f]],
*,
in_region_relations: Mapping[ObjectPerception, List[Region[ObjectPerception]]],
scale_map: Mapping[str, Tuple[float, float, float]],
) -> "PositioningModel":
return PositioningModel._scaled_objects_helper(
object_perceptions=object_perceptions,
sub_objects=sub_objects,
in_region_relations=in_region_relations,
scale_map=scale_map,
positions_map=None,
)
@staticmethod
def for_scaled_objects_positioned(
object_perceptions: AbstractSet[ObjectPerception],
sub_objects: Mapping[str, Mapping[str, LPoint3f]],
*,
in_region_relations: Mapping[ObjectPerception, List[Region[ObjectPerception]]],
scale_map: Mapping[str, Tuple[float, float, float]],
positions_map: PositionsMap,
frozen_objects: ImmutableSet[str],
) -> "PositioningModel":
return PositioningModel._scaled_objects_helper(
object_perceptions=object_perceptions,
sub_objects=sub_objects,
in_region_relations=in_region_relations,
scale_map=scale_map,
positions_map=positions_map,
frozen_objects=frozen_objects,
)
@staticmethod
def _scaled_objects_helper(
object_perceptions: AbstractSet[ObjectPerception],
sub_objects: Mapping[str, Mapping[str, LPoint3f]],
*,
in_region_relations: Mapping[ObjectPerception, List[Region[ObjectPerception]]],
scale_map: Mapping[str, Tuple[float, float, float]],
positions_map: Optional[PositionsMap],
frozen_objects: Optional[ImmutableSet[str]] = None,
) -> "PositioningModel":
sub_object_mapping = PositioningModel._create_sub_objs_to_bounding_boxes(
sub_objects
)
dict_items: List[Tuple[ObjectPerception, AxisAlignedBoundingBox]] = []
excluded_main_objects: List[Tuple[ObjectPerception, AxisAlignedBoundingBox]] = []
for object_perception in object_perceptions:
# print(f"Adding {object_perception.debug_handle} to model")
model_lookup = object_perception.debug_handle.split("_")[0]
try:
scale = scale_map[model_lookup]
except KeyError:
print(f"couldn't find scale for {object_perception.debug_handle}")
scale = (1.0, 1.0, 1.0)
is_parameter = True
if positions_map:
# selectively freeze particular objects from positioning model
if frozen_objects and object_perception.debug_handle in frozen_objects:
is_parameter = False
# print(
# f"Creating AABB as parameter: {is_parameter}\ncenter: {positions_map.name_to_position[object_perception.debug_handle]}\nscale: {scale}"
# )
bounding_box = AxisAlignedBoundingBox.create_at_center_point_scaled(
center=positions_map.name_to_position[object_perception.debug_handle],
object_scale=torch.tensor( # pylint: disable=not-callable
[scale[0], scale[1], scale[2]]
),
is_parameter=is_parameter,
)
else:
bounding_box = AxisAlignedBoundingBox.create_at_random_position_scaled(
min_distance_from_origin=10,
max_distance_from_origin=20,
object_scale=torch.tensor( # pylint: disable=not-callable
[scale[0], scale[1], scale[2]]
),
)
if is_parameter:
dict_items.append((object_perception, bounding_box))
else:
excluded_main_objects.append((object_perception, bounding_box))
return PositioningModel(
immutabledict(dict_items),
immutabledict(excluded_main_objects),
sub_object_mapping,
in_region_relations,
)
@staticmethod
def _create_sub_objs_to_bounding_boxes(
sub_objects: Mapping[str, Mapping[str, LPoint3f]],
) -> Mapping[str, Mapping[str, AxisAlignedBoundingBox]]:
# create bounding boxes for sub-objects
parent_to_child: DefaultDict[
str, Mapping[str, AxisAlignedBoundingBox]
] = defaultdict(dict)
for parent_obj, sub_obj_map in sub_objects.items():
sub_object_to_aabb = {}
for sub_obj, offset in sub_obj_map.items():
sub_object_to_aabb[
sub_obj
] = AxisAlignedBoundingBox.create_at_center_point_scaled(
center=np.array([offset.x, offset.y, offset.z], dtype=float),
# TODO: FIX THIS WITH REAL SCALE
object_scale=torch.ones(3), # pylint: disable=not-callable
is_parameter=False,
offset=np.array([offset.x, offset.y, offset.z], dtype=float),
)
parent_to_child[parent_obj] = sub_object_to_aabb
return parent_to_child
def forward(self): # pylint: disable=arguments-differ
self._update_subobject_positions()
collision_penalty = sum(
self.collision_penalty(box1, box2)
for (box1, box2) in combinations(self.included_and_excluded_boxes, 2)
)
below_ground_penalty = sum(
self.below_ground_penalty(box) for box in self.object_bounding_boxes
)
weak_gravity_penalty = sum(
self.weak_gravity_penalty(
bounding_box, immutableset(self.in_region_relations[object_perception])
)
for object_perception, bounding_box in self.object_perception_to_bounding_box.items()
if object_perception in self.in_region_relations
)
in_region_penalty = sum(
self.in_region_penalty(
object_perception,
immutableset(self.in_region_relations[object_perception]),
)
for object_perception in self.object_perception_to_bounding_box
if object_perception in self.in_region_relations
)
# print(
# f"collision penalty: {collision_penalty}"
# f"\nout of bounds penalty: {below_ground_penalty}"
# f"\ngravity penalty: {weak_gravity_penalty}"
# f"\nin-region penalty: {in_region_penalty}"
# )
return (
collision_penalty
+ below_ground_penalty
+ weak_gravity_penalty
+ in_region_penalty
)
def dump_object_positions(self, *, prefix: str = "") -> None:
for (
object_perception,
bounding_box,
) in self.object_perception_to_bounding_box.items():
print(
f"{prefix}{object_perception.debug_handle} = {bounding_box.center.data}\n{prefix}scale:{bounding_box.scale.data}"
)
# print("Sub-object bounding box positions:")
# for (
# main_object,
# sub_object_to_bounding_box,
# ) in self.object_to_sub_object_to_bounding_box.items():
# print(main_object)
# for (sub_object, bounding_box) in sub_object_to_bounding_box.items():
# print(
# f"\t{sub_object} = {bounding_box.center.data}\n\t{prefix}scale:{bounding_box.scale.data}"
# )
def get_object_position(self, obj: ObjectPerception) -> torch.Tensor:
"""
Retrieves the (center) position of an AdamObject contained in this model.
Args:
obj: AdamObject whose position is requested
Returns: (3,) tensor of the requested object's position.
Raises KeyError if an AdamObject not contained in this model is queried.
"""
return self.object_perception_to_bounding_box[obj].center.data
def get_objects_positions(self) -> PositionsMap:
"""
Retrieves positions of all AdamObjects contained in this model.
Returns: PositionsList
"""
position_pairs: List[Tuple[str, torch.Tensor]] = []
scale_pairs: List[Tuple[str, torch.Tensor]] = []
for (
object_perception,
bounding_box,
) in self.object_perception_to_bounding_box.items():
position_pairs.append(
(object_perception.debug_handle, bounding_box.center.data)
)
scale_pairs.append((object_perception.debug_handle, bounding_box.scale.data))
for (
object_perception,
bounding_box,
) in self.object_perception_to_excluded_bounding_box.items():
position_pairs.append(
(object_perception.debug_handle, bounding_box.center.data)
)
scale_pairs.append((object_perception.debug_handle, bounding_box.scale.data))
return PositionsMap(immutabledict(position_pairs), immutabledict(scale_pairs))
def _update_subobject_positions(self) -> None:
for main_object, main_aabb in self.object_perception_to_bounding_box.items():
for _, sub_aabb in self.object_to_sub_object_to_bounding_box[
main_object.debug_handle
].items():
sub_aabb.center = sub_aabb.offset + main_aabb.center
class BelowGroundPenalty(nn.Module): # type: ignore
"""
Model that penalizes boxes lying outside of the scene (i.e. below the ground plane) or off-camera)
"""
def __init(self) -> None: # pylint: disable=useless-super-delegation
super().__init__()
def forward( # type: ignore
self, bounding_box: AxisAlignedBoundingBox
): # pylint: disable=arguments-differ
distance_above_ground = bounding_box.z_coordinate_of_lowest_corner()
if distance_above_ground >= 0:
return 0
else:
return -distance_above_ground
class WeakGravityPenalty(nn.Module): # type: ignore
"""
Model that penalizes boxes that are not resting on the ground.
"""
# TODO: exempt birds from this constraint https://github.com/isi-vista/adam/issues/485
def __init__(
self,
object_perception_to_bounding_box: Mapping[
ObjectPerception, AxisAlignedBoundingBox
],
in_region_relations: Mapping[ObjectPerception, List[Region[ObjectPerception]]],
) -> None: # pylint: disable=useless-super-delegation
super().__init__()
self.object_perception_to_bounding_box = object_perception_to_bounding_box
self.in_region_relations = in_region_relations
self.ground_region = Region(
GROUND_PERCEPTION, EXTERIOR_BUT_IN_CONTACT, GRAVITATIONAL_UP
)
def forward( # type: ignore
self,
bounding_box: AxisAlignedBoundingBox,
designated_regions: ImmutableSet[Region[ObjectPerception]],
): # pylint: disable=arguments-differ
ground_region = None
for region in designated_regions:
if region.reference_object.debug_handle == "the ground":
ground_region = region
# if this object is not supposed to be on the ground, don't apply the gravity constraint.
if not ground_region:
return 0.0
distance_above_ground = bounding_box.z_coordinate_of_lowest_corner()
if ground_region.distance == EXTERIOR_BUT_IN_CONTACT:
if distance_above_ground <= 0:
return 0.0
else:
# a linear penalty leads to a constant gradient, just like real gravity
return GRAVITY_PENALTY * distance_above_ground
elif ground_region.distance == DISTAL:
if distance_above_ground < DISTAL_MIN_DISTANCE:
return GRAVITY_PENALTY * DISTAL_MIN_DISTANCE - distance_above_ground
elif ground_region.distance == PROXIMAL:
if PROXIMAL_MIN_DISTANCE <= distance_above_ground <= PROXIMAL_MAX_DISTANCE:
return 0.0
elif distance_above_ground < PROXIMAL_MIN_DISTANCE:
return GRAVITY_PENALTY * PROXIMAL_MIN_DISTANCE - distance_above_ground
else:
return GRAVITY_PENALTY * distance_above_ground - PROXIMAL_MAX_DISTANCE
return 0.0
class CollisionPenalty(nn.Module): # type: ignore
"""
Model that penalizes boxes that are colliding with other boxes.
"""
def __init__(self): # pylint: disable=useless-super-delegation
super().__init__()
def forward( # type: ignore
self,
bounding_box_1: AxisAlignedBoundingBox,
bounding_box_2: AxisAlignedBoundingBox,
): # pylint: disable=arguments-differ
# get face norms from one of the boxes:
face_norms = bounding_box_2.face_normal_vectors()
return CollisionPenalty.overlap_penalty(
CollisionPenalty.get_min_max_overlaps(
CollisionPenalty.get_min_max_corner_projections(
bounding_box_1.corners_onto_axes_projections(face_norms)
),
CollisionPenalty.get_min_max_corner_projections(
bounding_box_2.corners_onto_axes_projections(face_norms)
),
)
)
@staticmethod
def get_min_max_corner_projections(projections: torch.Tensor):
"""
Retrieve the minimum and maximum corner projection (min/max extent in that dimension) for each axis
Args:
projections: Tensor(3, 8) -> corner projections onto each of three dimensions
Returns:
Tensor(3, 2) -> (min, max) values for each of three dimensions
"""
check_arg(projections.shape == (3, 8))
min_indices = torch.min(projections, 1)
max_indices = torch.max(projections, 1)
# these are tuples of (values, indices), both of which are tensors
# helper variable for representing dimension numbers
# see https://github.com/pytorch/pytorch/issues/24807 re: pylint issue
dims = torch.tensor([0, 1, 2], dtype=torch.int) # pylint: disable=not-callable
# select the indexed items (from a 24 element tensor)
minima = torch.take(projections, min_indices[1] + (dims * 8))
maxima = torch.take(projections, max_indices[1] + (dims * 8))
# stack the minim
return torch.stack((minima, maxima), 1)
@staticmethod
def get_min_max_overlaps(
min_max_proj_0: torch.Tensor, min_max_proj_1: torch.Tensor
) -> torch.Tensor:
"""
Given min/max corner projections onto 3 axes from two different objects,
return an interval for each dimension representing the degree of overlap or
separation between the two objects.
Args:
min_max_proj_0: Tensor(3,2) min_max_projections for box 0
min_max_proj_1: Tensor(3,2) min_max projections for box 1
Returns:
(3, 2) tensor -> ranges (start, end) of overlap OR separation in each of three dimensions.
If (start - end) is positive, this indicates that the boxes do not overlap along this dimension,
otherwise, a negative value indicates an overlap along that dimension.
"""
check_arg(min_max_proj_0.shape == (3, 2))
check_arg(min_max_proj_1.shape == (3, 2))
# see https://github.com/pytorch/pytorch/issues/24807 re: pylint issue
dims = torch.tensor([0, 1, 2], dtype=torch.int) # pylint: disable=not-callable
mins_0 = min_max_proj_0.gather(1, torch.zeros((3, 1), dtype=torch.long))
mins_1 = min_max_proj_1.gather(1, torch.zeros((3, 1), dtype=torch.long))
combined_mins = torch.stack((mins_0, mins_1), 1).squeeze()
max_indices = torch.max(combined_mins, 1)
maximum_mins = torch.take(combined_mins, max_indices[1] + (dims * 2))
# should stick together the minimum parts and the maximum parts
# with columns like:
# [ min0x min1x
# min0y min1y
# min0z min1z
# ]
# then find the maximum element from each row
# repeat the process for the min of the max projections
maxs_0 = min_max_proj_0.gather(1, torch.ones((3, 1), dtype=torch.long))
maxs_1 = min_max_proj_1.gather(1, torch.ones((3, 1), dtype=torch.long))
combined_maxes = torch.stack((maxs_0, maxs_1), 1).squeeze()
min_indices = torch.min(combined_maxes, 1)
minimum_maxes = torch.take(combined_maxes, min_indices[1] + (dims * 2))
return torch.stack((maximum_mins, minimum_maxes), 1)
@staticmethod
def overlap_penalty(min_max_overlaps: torch.Tensor) -> torch.Tensor:
"""
Return penalty depending on degree of overlap between two 3d boxes.
Args:
min_max_overlaps: (3, 2) tensor -> intervals describing degree of overlap between the two boxes
Returns: Tensor with a positive scalar of the collision penalty, or tensor with zero scalar
for no collision.
"""
check_arg(min_max_overlaps.shape == (3, 2))
# subtract each minimum max from each maximum min:
overlap_distance = min_max_overlaps[:, 0] - min_max_overlaps[:, 1]
# as long as at least one dimension's overlap distance is positive (not overlapping),
# then the boxes are not colliding
for dim in range(3):
if overlap_distance[dim] >= 0:
return torch.zeros(1, dtype=torch.float)
# otherwise the penetration distance is the maximum negative value
# (the smallest translation that would disentangle the two
# overlap is represented by a negative value, which we return as a positive penalty
return overlap_distance.max() * -1 * COLLISION_PENALTY
class InRegionPenalty(nn.Module): # type: ignore
""" Model that penalizes boxes for not adhering to relational (distance and direction)
constraints with other boxes -- for being outside of the Region it is supposed to occupy
"""
def __init__(
self,
object_perception_to_bounding_box: Mapping[
ObjectPerception, AxisAlignedBoundingBox
],
object_perception_to_excluded_bounding_box: Mapping[
ObjectPerception, AxisAlignedBoundingBox
],
sub_object_to_bounding_box: Mapping[str, AxisAlignedBoundingBox],
in_region_relations: Mapping[ObjectPerception, List[Region[ObjectPerception]]],
) -> None: # pylint: disable=useless-super-delegation
super().__init__()
self.handle_to_bounding_box: Mapping[str, AxisAlignedBoundingBox] = {}
for object_perception, aabb in object_perception_to_bounding_box.items():
self.handle_to_bounding_box[object_perception.debug_handle] = aabb
for (
object_perception,
excluded_aabb,
) in object_perception_to_excluded_bounding_box.items():
self.handle_to_bounding_box[object_perception.debug_handle] = excluded_aabb
for sub_object, aabb in sub_object_to_bounding_box.items():
self.handle_to_bounding_box[sub_object] = aabb
self.in_region_relations = in_region_relations
def forward( # type: ignore
self,
target_object: ObjectPerception,
designated_region: ImmutableSet[Region[ObjectPerception]],
): # pylint: disable=arguments-differ
# print(f"{target_object.debug_handle} positioned w/r/t {designated_region}")
# return 0 if object has no relative positions to apply
if not designated_region:
# print(f"{target_object.debug_handle} has no relative positioning constraints")
return torch.zeros(1)
return sum(
self.penalty(
self.handle_to_bounding_box[target_object.debug_handle],
self.handle_to_bounding_box[region.reference_object.debug_handle],
region,
)
# positioning w/r/t the ground is handled by other constraints
for region in designated_region
if region.reference_object.debug_handle != "the ground"
)
def penalty(
self,
target_box: AxisAlignedBoundingBox,
reference_box: AxisAlignedBoundingBox,
region: Region[ObjectPerception],
):
"""
Assign a penalty for target_box if it does not comply with its relation to reference_box according to
the degree of difference between the expected angle between the boxes and the expected distance between
the two objects.
Args:
target_box: box to be penalized if positioned outside of region
reference_box: box referred to by region
region: region that target_box should be in
Returns: Tensor(1,) with penalty
"""
# print(
# f"TARGET: {target_box.center} REFERENCE: {reference_box.center} REGION:{region}"
# )
assert region.distance is not None
# get direction that box 1 should be in w/r/t box 2
# TODO: allow for addressee directions
# if direction is not provided, this vector is zero
direction_vector = self.direction_as_unit_vector(region.direction, reference_box)
current_direction_from_reference_to_target = (
target_box.center - reference_box.nearest_center_face_point(target_box.center)
)
angle = angle_between(
direction_vector, current_direction_from_reference_to_target
)
if not angle or torch.isnan(angle):
angle = torch.zeros(1, dtype=torch.float)
distance = target_box.nearest_center_face_distance_from_point(
reference_box.center
)
# distal has a minimum distance away from object to qualify
if region.distance == DISTAL:
if distance < DISTAL_MIN_DISTANCE:
distance_penalty = DISTAL_MIN_DISTANCE - distance
else:
distance_penalty = torch.zeros(1)
# proximal has a min/max range
elif region.distance == PROXIMAL:
if PROXIMAL_MIN_DISTANCE <= distance <= PROXIMAL_MAX_DISTANCE:
distance_penalty = torch.zeros(1)
elif distance < PROXIMAL_MIN_DISTANCE:
distance_penalty = PROXIMAL_MIN_DISTANCE - distance
else:
distance_penalty = distance - PROXIMAL_MAX_DISTANCE
# exterior but in contact has a tiny epsilon of acceptable distance
# assuming that collisions are handled elsewhere
elif region.distance == EXTERIOR_BUT_IN_CONTACT:
if distance > EXTERIOR_BUT_IN_CONTACT_EPS:
distance_penalty = distance * 1.5
else:
distance_penalty = torch.zeros(1)
else:
raise RuntimeError(
"Currently unable to support Interior distances w/ positioning solver"
)
if angle and angle.allclose(PI):
return distance_penalty * DISTANCE_PENALTY
# print(f"ANGLE: {angle}, DISTANCE: {distance}")
return angle * ANGLE_PENALTY + distance_penalty * DISTANCE_PENALTY
def direction_as_unit_vector(
self,
direction: Optional[Direction[ObjectPerception]],
direction_reference: AxisAlignedBoundingBox,
addressee_reference: Optional[AxisAlignedBoundingBox] = None,
) -> torch.Tensor:
"""
Convert a direction to a unit vector (3,) tensor to represent the direction.
Args:
direction: Direction object
direction_reference: AABB corresponding to the object referenced by the direction parameter
addressee_reference: AABB corresponding the an addressee referenced by the direction parameter
Returns: (3,) Tensor. A unit vector describing a direction.
"""
if direction is None:
return torch.zeros(3)
# special case: gravity
if direction == GRAVITATIONAL_UP:
return torch.tensor( # pylint: disable=not-callable
[0, 0, 1], dtype=torch.float
)
elif direction == GRAVITATIONAL_DOWN:
return torch.tensor( # pylint: disable=not-callable
[0, 0, -1], dtype=torch.float
)
# horizontal axes mapped to world axes (as one of many possible visualizations)
# We make the executive decision to map a horizontal relationship onto the X axis,
# the better to view from a static camera position.
# TODO: change to reflect distance from box extents https://github.com/isi-vista/adam/issues/496
if isinstance(direction.relative_to_axis, HorizontalAxisOfObject):
if direction.positive:
return torch.tensor( # pylint: disable=not-callable
[1, 0, 0], dtype=torch.float
)
else:
return torch.tensor( # pylint: disable=not-callable
[-1, 0, 0], dtype=torch.float
)
# in this case, calculate a vector facing toward or away from the addressee
if (
isinstance(direction.relative_to_axis, FacingAddresseeAxis)
and addressee_reference is not None
):
if direction.positive:
# pointing toward addressee
return addressee_reference.center - direction_reference.center
else:
# pointing away from addressee
return direction_reference.center - addressee_reference.center
raise NotImplementedError(f"direction_to_world called with {direction}")
def angle_between(vector0: torch.Tensor, vector1: torch.Tensor) -> Optional[torch.Tensor]:
"""
Returns angle between two vectors (tensors (3,) )
Args:
vector0: tensor (3,)
vector1: tensor (3,)
Returns: tensor (1,) with the angle (in radians) between the two vectors.
Will return NaN if either of the inputs is zero.
"""
if torch.nonzero(vector0).size(0) == 0 or torch.nonzero(vector1).size(0) == 0:
return None
unit_vector0 = vector0 / torch.norm(vector0, 2)
unit_vector1 = vector1 / torch.norm(vector1, 2)
return unit_vector0.dot(unit_vector1).acos()
if __name__ == "__main__":
main()
| Tubbz-alt/adam | adam/visualization/positioning.py | positioning.py | py | 48,738 | python | en | code | null | github-code | 1 | [
{
"api_name": "torch.zeros",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_num... |
14466852358 | import docker
import os
import pytest
import subprocess
import sys
SIMPLE_CPP_SOURCE = '''
#include <string>
#include <iostream>
int main()
{
std::cout << "Hello World" << std::endl;
int a = 5;
std::cout << "Five: " << a << " / " << std::to_string(a) << std::endl;
return 0;
}
'''
SIMPLE_C_SOURCE = r'''
#include <string.h>
#include <stdio.h>
int main()
{
printf("Hello World\n");
int a = 5;
printf("Five: %d\n", a);
return 0;
}
'''
class TestContainer(object):
@pytest.fixture(autouse=True)
def goto_temp(self, tmpdir):
''' ensures that each test has its own temp directory to run in '''
print("\nRunning Test In tmpdir: %s" % str(tmpdir))
tmpdir.chdir() # change to pytest-provided temporary directory
def test_get_container_info(self, bc):
''' used to get some info that may be useful to future debug '''
bc.cmd('update-alternatives --get-selections')
# no idea how to do this just with the python docker sdk
print ("Image Size: %s" % subprocess.check_output('docker images %s --format "{{.Size}}"' % bc.image.tags[0], shell=True).decode())
def test_volume_mount(self, bc):
bc.cmd('touch /mnt/cwd/test')
os.remove(os.path.join(os.getcwd(), 'test'))
def test_has_old_glibc(self, bc):
output = bc.cmd('ldd --version')
assert 'EGLIBC 2.11.1' in output
def test_gcc_version(self, bc):
output = bc.cmd('gcc --version')
assert '8.2.0' in output
def test_gpp_version(self, bc):
output = bc.cmd('g++ --version')
assert '8.2.0' in output
def test_git_can_clone_https(self, bc):
output = bc.cmd('git clone https://github.com/csm10495/ginst.git')
def test_curl_version(self, bc):
output = bc.cmd('curl --version')
assert 'unreleased' in output
def test_openssl_version(self, bc):
output = bc.cmd('openssl version')
assert '1.1.1' in output
def test_simple_cpp_exe_old_abi_32(self, bc):
with open("test.cpp", 'w') as f:
f.write(SIMPLE_CPP_SOURCE)
# -D_GLIBCXX_USE_CXX11_ABI=0 is implicit with our specially built gcc
bc.cmd('g++ -m32 -std=c++17 /mnt/cwd/test.cpp -o /mnt/cwd/test')
bc.cmd('chmod +x /mnt/cwd/test')
bc.cmd('/mnt/cwd/test')
assert os.system('./test') == 0
os.remove('test.cpp')
os.remove('test')
def test_simple_cpp_exe_static_cpp_32(self, bc):
with open("test.cpp", 'w') as f:
f.write(SIMPLE_CPP_SOURCE)
bc.cmd('g++ -m32 -std=c++17 -static-libstdc++ /mnt/cwd/test.cpp -o /mnt/cwd/test')
bc.cmd('chmod +x /mnt/cwd/test')
bc.cmd('/mnt/cwd/test')
assert os.system('./test') == 0
os.remove('test.cpp')
os.remove('test')
def test_simple_c_exe_32(self, bc):
with open("test.c", 'w') as f:
f.write(SIMPLE_C_SOURCE)
bc.cmd('gcc -m32 /mnt/cwd/test.c -o /mnt/cwd/test')
bc.cmd('chmod +x /mnt/cwd/test')
bc.cmd('/mnt/cwd/test')
assert os.system('./test') == 0
os.remove('test.c')
os.remove('test')
def test_simple_cpp_exe_old_abi_64(self, bc):
with open("test.cpp", 'w') as f:
f.write(SIMPLE_CPP_SOURCE)
# -D_GLIBCXX_USE_CXX11_ABI=0 is implicit with our specially built gcc
bc.cmd('g++ -std=c++17 -m64 /mnt/cwd/test.cpp -o /mnt/cwd/test')
bc.cmd('chmod +x /mnt/cwd/test')
bc.cmd('/mnt/cwd/test')
assert os.system('./test') == 0
os.remove('test.cpp')
os.remove('test')
def test_simple_cpp_exe_static_cpp_64(self, bc):
with open("test.cpp", 'w') as f:
f.write(SIMPLE_CPP_SOURCE)
bc.cmd('g++ -std=c++17 -m64 -static-libstdc++ /mnt/cwd/test.cpp -o /mnt/cwd/test')
bc.cmd('chmod +x /mnt/cwd/test')
bc.cmd('/mnt/cwd/test')
assert os.system('./test') == 0
os.remove('test.cpp')
os.remove('test')
def test_simple_c_exe_64(self, bc):
with open("test.c", 'w') as f:
f.write(SIMPLE_C_SOURCE)
bc.cmd('gcc -m64 /mnt/cwd/test.c -o /mnt/cwd/test')
bc.cmd('chmod +x /mnt/cwd/test')
bc.cmd('/mnt/cwd/test')
assert os.system('./test') == 0
os.remove('test.c')
os.remove('test')
| csm10495/ubuntu_10_04_build | static/tests/test_container.py | test_container.py | py | 4,378 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pytest.fixture",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"... |
32161971126 | """Get E3FP default parameters and read parameters from files.
Author: Seth Axen
E-mail: seth.axen@gmail.com
"""
import os
import copy
import ast
from configparser import (
ConfigParser,
NoSectionError,
DuplicateSectionError,
)
CONFIG_DIR = os.path.dirname(os.path.realpath(__file__))
DEF_PARAM_FILE = os.path.join(CONFIG_DIR, "defaults.cfg")
def read_params(params=None, fill_defaults=False):
"""Get combination of provided parameters and default parameters.
Parameters
----------
params : str or ConfigParser, optional
User provided parameters as an INI file or `ConfigParser`.
Any parameters provided will replace default parameters.
fill_defaults : bool, optional
Fill values that aren't provided with package defaults, if `params`
is file.
Returns
-------
all_params : ConfigParser
Combination of default and user-provided parameters.
"""
if isinstance(params, ConfigParser):
return copy.copy(params)
params_list = []
if fill_defaults:
params_list.append(DEF_PARAM_FILE)
if params is not None:
params_list.append(params)
all_params = ConfigParser()
all_params.read(params_list)
return all_params
def write_params(params, params_file="params.cfg"):
"""Write params to file.
Parameters
----------
params : ConfigParser
Params
params_file : str
Params file
"""
with open(params_file, "w") as f:
params.write(f)
def get_value(
params, section_name, param_name, dtype=str, auto=False, fallback=None
):
"""Get value from params with fallback.
Parameters
----------
params : ConfigParser
Parameters
section_name : str
Name of section in `params`
param_name : str
Name of parameter in `section`
dtype : type, optional
Type to return data as.
auto : bool, optional
Auto-discover type of value. If provided, `dtype` is ignored.
fallback : any, optional
Value to return if getting value fails.
Returns
-------
value : any
Value of parameter or `fallback`.
"""
if auto:
try:
value = params.get(section_name, param_name)
except ValueError:
return fallback
try:
return ast.literal_eval(value)
except (ValueError, SyntaxError):
return value
else:
get_function = params.get
if dtype is int:
get_function = params.getint
elif dtype is float:
get_function = params.getfloat
elif dtype is bool:
get_function = params.getboolean
try:
return get_function(section_name, param_name)
except ValueError:
return fallback
def get_default_value(*args, **kwargs):
global default_params
return get_value(default_params, *args, **kwargs)
def update_params(
params_dict, params=None, section_name=None, fill_defaults=False
):
"""Set `ConfigParser` values from a sections dict.
Sections dict key must be parameter sections, and value must be dict
matching parameter name to value. If existing `ConfigParser` is
provided, parameter values are updated.
Parameters
----------
params_dict : dict
If `section_name` is provided, dict must match parameter names to
values. If `section_name` is not provided, dict key(s) must be
parameter sections, and value(s) must be parameter dict.
params : ConfigParser, optional
Existing parameters.
section_name : str, optional
Name of section to which to add parameters in `params_dict`
fill_defaults : bool, optional
Fill values that aren't provided with package defaults, if `params`
is file.
"""
if params is None:
params = ConfigParser()
else:
params = read_params(params, fill_defaults=fill_defaults)
if section_name is not None:
try:
params.add_section(section_name)
except DuplicateSectionError:
pass
for param_name, param_value in params_dict.items():
params.set(section_name, param_name, str(param_value))
else:
sections_dict = params_dict
for section_name, params_dict in sections_dict.items():
for param_name, param_value in params_dict.items():
params.set(section_name, param_name, param_value)
return params
def params_to_sections_dict(params, auto=True):
"""Get dict of sections dicts in params, with optional type discovery.
Parameters
----------
params : str or ConfigParser
Params to read
auto : bool, optional
Auto typing of parameter values.
Returns
----------
dict : dict matching sections to parameters to values.
"""
params = read_params(params)
sections = default_params.sections()
params_dicts = {}
for section in sections:
try:
params_dict = dict(params.items(section))
except NoSectionError:
continue
if auto:
params_dict = {
param_name: get_value(params, section, param_name, auto=True)
for param_name in params_dict
}
params_dicts[section] = params_dict
return params_dicts
default_params = read_params(fill_defaults=True)
| keiserlab/e3fp | e3fp/config/params.py | params.py | py | 5,425 | python | en | code | 114 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"lin... |
44401796444 | from flask import redirect, render_template, request
from app import app
import users
import topics
import threads
import messages
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
topic = request.form["topic"]
if len(topic) < 3 or len(topic) > 50:
return render_template("error.html", message="Aiheen täytyy olla 3-50 merkkiä")
topics.create_topic(topic)
adminrights = users.is_admin()
topiclist = topics.get_all()
threadcounts = threads.get_all_threadcounts_by_topic()
messagecounts = messages.get_all_messagecounts_by_topic()
latestmessages = messages.get_all_latest_messages_by_topic()
return render_template(
"index.html",
topics=topiclist,
threadcounts=threadcounts,
messagecounts=messagecounts,
latestmessages=latestmessages,
adminrights=adminrights
)
@app.route("/topic/<int:topic_id>", methods=["GET", "POST"])
def topic(topic_id):
if not topics.exists(topic_id):
return render_template("error.html", message="Keskustelualuetta ei ole olemassa tai se on poistettu")
else:
if request.method == "POST":
header = request.form["header"]
if len(header) < 3 or len(header) > 300:
return render_template("error.html", message="Otsikon täytyy olla 3-300 merkkiä")
init_msg = request.form["init_msg"]
if len(init_msg) > 5000:
return render_template("error.html", message="Aloitusviesti on liian pitkä (>5000 merkkiä)")
if not threads.create(topic_id, header, init_msg):
return render_template("error.html", message="Ketjun luonti epäonnistui")
threadlist = threads.get_all_by_topic(topic_id)
msgcount = messages.get_all_messagecounts_by_thread(topic_id)
return render_template(
"topicview.html",
topic_id=topic_id,
threads=threadlist,
msgcount=msgcount
)
@app.route("/topic/<int:topic_id>/thread/<int:thread_id>", methods=["GET", "POST"])
def thread(topic_id, thread_id):
if not threads.is_visible(thread_id):
return render_template("error.html", message="Ketjua ei ole olemassa tai se on poistettu")
else:
if request.method == "POST":
content = request.form["content"]
if len(content) < 1 or len(content) > 5000:
return render_template("error.html", message="Viestin täytyy olla 1-5000 merkkiä")
elif not messages.post(topic_id, thread_id, content):
return render_template("error.html", message="Viestin lähetys epäonnistui")
messagelist = messages.get_all_by_thread_with_usernames(thread_id)
thread = threads.get_by_id(thread_id)
op = users.get_by_id(thread.user_id)
return render_template("threadview.html", topic_id=topic_id, messages=messagelist, thread=thread, op=op)
@app.route("/topic/<int:topic_id>/thread/<int:thread_id>/delete", methods=["POST"])
def delete_thread(topic_id, thread_id):
if not threads.is_visible(thread_id):
return render_template("error.html", message="Ketjua ei ole olemassa tai se on poistettu")
else:
threads.delete(thread_id)
messages.delete_by_thread(thread_id)
return redirect(f"/topic/{topic_id}")
@app.route("/topic/<int:topic_id>/thread/<int:thread_id>/edit", methods=["GET", "POST"])
def edit_thread(topic_id, thread_id):
if not threads.is_visible(thread_id):
return render_template("error.html", message="Ketjua ei ole olemassa tai se on poistettu")
elif not users.user_id() == threads.get_user_id(thread_id):
return render_template("error.html", message="Sinulla ei ole oikeuksia nähdä tätä sivua")
else:
if request.method == "GET":
thread = threads.get_by_id(thread_id)
return render_template("edit_thread.html", thread=thread)
if request.method == "POST":
header = request.form["header"]
if len(header) < 3 or len(header) > 300:
return render_template("error.html", message="Otsikon täytyy olla 3-300 merkkiä")
init_msg = request.form["init_msg"]
if len(init_msg) > 5000:
return render_template("error.html", message="Aloitusviesti on liian pitkä (>5000 merkkiä)")
threads.edit(thread_id, header, init_msg)
return redirect(f"/topic/{topic_id}")
@app.route("/topic/<int:topic_id>/thread/<int:thread_id>/message/<int:message_id>/delete", methods=["POST"])
def delete_message(topic_id, thread_id, message_id):
if not messages.is_visible(message_id):
return render_template("error.html", message="Viestiä ei ole olemassa tai se on poistettu")
else:
messages.delete(message_id)
return redirect(f"/topic/{topic_id}/thread/{thread_id}")
@app.route("/topic/<int:topic_id>/thread/<int:thread_id>/message/<int:message_id>/edit", methods=["GET", "POST"])
def edit_message(topic_id, thread_id, message_id):
if not messages.is_visible(message_id):
return render_template("error.html", message="Viestiä ei ole olemassa tai se on poistettu")
elif not users.user_id() == messages.get_user_id(message_id):
return render_template("error.html", message="Sinulla ei ole oikeuksia nähdä tätä sivua")
else:
if request.method == "GET":
message = messages.get_by_id(message_id)
return render_template("edit_message.html", message=message)
if request.method == "POST":
content = request.form["content"]
if len(content) < 1 or len(content) > 5000:
return render_template("error.html", message="Viestin täytyy olla 1-5000 merkkiä")
messages.edit(message_id, content)
return redirect(f"/topic/{topic_id}/thread/{thread_id}")
@app.route("/signup", methods=["GET", "POST"])
def signup():
if request.method == "GET":
return render_template("signup.html")
if request.method == "POST":
username = request.form["username"]
if len(username) < 3 or len(username) > 20:
return render_template("error.html", message="Käyttäjätunnuksen täytyy olla 3-20 merkkiä")
password = request.form["password"]
if len(password) < 10 or len(password) > 40:
return render_template("error.html", message="Salasanan täytyy olla 10-40 merkkiä")
password2 = request.form["password2"]
admin = request.form["admin"]
if password != password2:
return render_template("error.html", message="Salasanat eivät ole samat")
if users.sign_up(username, password, admin):
return render_template("newuser.html", username=username)
else:
return render_template("error.html", message="Käyttäjätunnuksen luonti epäonnistui")
@app.route("/signin", methods=["GET", "POST"])
def signin():
if request.method == "GET":
return render_template("signin.html")
if request.method == "POST":
username = request.form["username"]
password = request.form["password"]
user = users.sign_in(username, password)
if user:
return redirect("/")
else:
return render_template("error.html", message="Väärä käyttäjätunnus tai salasana")
@app.route("/signout")
def signout():
users.sign_out()
return redirect("/")
@app.route("/search", methods=["GET", "POST"])
def search():
if request.method == "GET":
return render_template("search.html")
if request.method == "POST":
keyword = request.form["keyword"]
if len(keyword) < 1:
return render_template("error.html", message="Anna hakusana")
messagelist = messages.search_by_keyword(keyword)
init_messagelist = threads.search_by_keyword(keyword)
return render_template("search.html", messages=messagelist, init_messages=init_messagelist) | alanenpa/tsoha-message-board-app | routes.py | routes.py | py | 8,047 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.request.method",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask... |
40419762544 | from pathlib import Path
from collections import deque
import re
import numpy as np
data_folder = Path(".").resolve()
reg_floor = re.compile(r"an? (\w+)(-compatible microchip| generator)")
class Factory:
def __init__(self,data):
locations = dict()
item_index = {"-compatible microchip":0," generator":1}
for i,line in enumerate(data.split('\n')):
for item in reg_floor.findall(line):
if item[0] not in locations:
locations[item[0]] = [None,None]
locations[item[0]][item_index[item[1]]] = i
self.state = [0] + [tuple(locations[element]) for element in locations]
self.state = tuple(self.state)
self.seen_states = {self.state : 0}
def find_optimal_route(self):
candidates = deque([[self.state,0]])
end_state = [3] + [(3,3) for locs in self.state[1:]]
end_state = tuple(end_state)
while self.state != end_state:
self.state,n_moves = candidates.pop()
for state_candidate in self._possible_moves():
if state_candidate not in self.seen_states:
self.seen_states[state_candidate] = n_moves+1
candidates.appendleft([state_candidate,n_moves+1])
return(self.seen_states[end_state])
def _possible_moves(self):
same_floor_indexes = []
elevator_floor = self.state[0]
valid_new_states = []
for i in range(1,len(self.state)):
for j in range(2):
if self.state[i][j] == elevator_floor:
same_floor_indexes.append((i,j))
if elevator_floor == 0:
new_floors = [1]
elif elevator_floor == 3:
new_floors = [2]
else:
new_floors = [elevator_floor-1,elevator_floor+1]
for floor in new_floors:
for k,item_index in enumerate(same_floor_indexes):
i,j = item_index
new_state = list(self.state)
new_state[0] = floor
new_state[i] = list(new_state[i])
new_state[i][j] = floor
new_state[i] = tuple(new_state[i])
if not self._fried(new_state):
valid_new_states.append(tuple(new_state))
for l in range(k+1,len(same_floor_indexes)):
q,p = same_floor_indexes[l]
newer_state = new_state.copy()
newer_state[q] = list(newer_state[q])
newer_state[q][p] = floor
newer_state[q] = tuple(newer_state[q])
if not self._fried(newer_state):
valid_new_states.append(tuple(newer_state))
return valid_new_states
def _fried(self,state):
generator_locs = [item[1] for item in state[1:]]
fried = False
for i in range(1,len(state)):
if (state[i][1] != state[i][0]) and (state[i][0] in generator_locs):
fried = True
break
return fried
def main():
data = data_folder.joinpath("input_1.txt").read_text()
f = Factory(data)
print("Part 1")
print(f"It takes a minimum of {f.find_optimal_route()} steps to get all the parts up to the assembly")
print()
data = data_folder.joinpath("input_2.txt").read_text()
f = Factory(data)
print("Part 2")
print(f"It takes a minimum of {f.find_optimal_route()} steps to get all the parts up to the assembly")
print()
if __name__ == "__main__":
main()
| eirikhoe/advent-of-code | 2016/11/sol.py | sol.py | py | 3,611 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 23,
"usage_type": "call"
}
] |
26082568433 | # pylint: disable=protected-access
from asyncio import create_task
from unittest.mock import AsyncMock, Mock, call, patch
from unittest.async_case import IsolatedAsyncioTestCase
from faker import Faker
from fetcher import Fetcher
class TestFetcher(IsolatedAsyncioTestCase):
def test_parse_url(self):
test_cases = [
{
"text": "word, ..'word'', qwerty %(QWERTY something!@ +)WORD&(@%", # noqa
"expected_result": {"word": 3, "qwerty": 2, "something": 1},
},
{"text": "", "expected_result": {}},
{
"text": "word qwerty something",
"expected_result": {"word": 1, "qwerty": 1, "something": 1},
},
]
with patch("fetcher.BeautifulSoup") as bs_mock:
bs_instance = bs_mock.return_value
for case in test_cases:
bs_instance.get_text.return_value = case["text"]
fetcher = Fetcher(1, 4)
result = fetcher.parse_url("")
self.assertEqual(case["expected_result"], result)
async def test_fetch_url(self):
fake = Faker()
urls = [fake.url() for _ in range(5)]
page_text = fake.text()
fetcher = Fetcher()
for url in urls:
await fetcher._queue.put(url)
session_mock = Mock()
resp_mock = AsyncMock()
session_mock.get.return_value = resp_mock
resp_mock.__aenter__.return_value.text.return_value = page_text
callback_mock = Mock()
task = create_task(fetcher._fetch_url(session_mock, callback_mock))
await fetcher._queue.join()
task.cancel()
self.assertTrue(fetcher._queue.empty())
expected_result = fetcher.parse_url(page_text)
self.assertEqual(callback_mock.call_count, len(urls))
self.assertEqual(
callback_mock.mock_calls, [call(expected_result)] * len(urls)
)
async def test_start(self):
fetcher = Fetcher(workers_count=5)
fetcher.start(Mock())
for worker in fetcher._workers:
self.assertFalse(worker.done())
async def test_fetch(self):
fake = Faker()
urls = [fake.url() for _ in range(10)]
fetcher = Fetcher(workers_count=5, max_size=11)
await fetcher.fetch(urls)
for url in urls:
self.assertEqual(await fetcher._queue.get(), url)
fetcher._queue.task_done()
fetcher.stop()
| ilya0100/DeepPythonHW | 08/tests.py | tests.py | py | 2,501 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.async_case.IsolatedAsyncioTestCase",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "fetcher.Fetcher",
"line_number": 30,
"usage_type": "call"
},
{
"a... |
30890499502 | # -*- coding: utf-8 -*-
'''
django_chime/forms
------------------
forms for the django-chime app
'''
from django.core.validators import MinValueValidator, MaxValueValidator
from django.forms import ModelForm
from django.forms.fields import FloatField, TextInput
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import (
Field,
Layout,
Submit,
)
from djcorecap.forms import cancel
from .core import is_number
from .models import ChimeSite
class PercentageField(FloatField):
'''
custom percentage field
'''
widget = TextInput(attrs={"class": "percentInput"})
def to_python(self, value):
v = super().to_python(value)
if is_number(v):
return v/100
return v
def prepare_value(self, value):
v = super().prepare_value(value)
if is_number(v) and not isinstance(v, str):
return str((float(v)*100))
return v
class ChimeSiteCreateForm(ModelForm):
'''
chime site create form
'''
hospitalized_rate = PercentageField(
initial=0.025,
validators=[MinValueValidator(0.00001), MaxValueValidator(1.0)],
help_text='Hospitalization %(total infections)',
)
icu_rate = PercentageField(
initial=0.0075,
validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
help_text='ICU %(total infections)',
)
market_share = PercentageField(
initial=0.15,
validators=[MinValueValidator(0.00001), MaxValueValidator(1.0)],
help_text='Hospital market share %',
)
relative_contact_rate = PercentageField(
initial=0.30,
validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
help_text='Social distancing (% reduction in social contact going forward)',
)
ventilated_rate = PercentageField(
initial=0.005,
validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
help_text='Ventilated %(total infections)',
)
class Meta:
model = ChimeSite
fields = [
'name',
'population',
'current_hospitalized',
'date_first_hospitalized',
'doubling_time',
'hospitalized_rate',
'hospitalized_days',
'icu_rate',
'icu_days',
'infectious_days',
'market_share',
'n_days',
'mitigation_date',
'relative_contact_rate',
'ventilated_rate',
'ventilated_days',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
*[
Field(
f,
autocomplete='off',
) for f in self.fields
],
FormActions(
Submit('submit', 'Confirm'),
cancel,
),
)
| ChrisPappalardo/django-chime | django_chime/forms.py | forms.py | py | 3,022 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "django.forms.fields.FloatField",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.forms.fields.TextInput",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "core.is_number",
"line_number": 37,
"usage_type": "call"
},
{
"api_n... |
32411269390 | from .models import Leaderboard, Score
import json
class Service():
@staticmethod
def get_all_leaderboard():
leaderboard = Leaderboard.objects()
json_lead = leaderboard.to_json()
dicts = json.loads(json_lead)
return dicts
@staticmethod
def add_new_score(game, score, user):
leaderboard = Leaderboard.objects(game=game).first()
new_score = Score(user=user, score=score)
if leaderboard is None:
new_lead = Leaderboard(game=game, leaderboard=[new_score])
new_lead.save()
else:
leaderboard.leaderboard.append(new_score)
leaderboard.save()
new_leaderboard = Leaderboard.objects(game=game).first()
json_lead = new_leaderboard.to_json()
dicts = json.loads(json_lead)
return dicts['leaderboard']
@staticmethod
def get_leaderboard(game):
new_leaderboard = Leaderboard.objects(game=game).first()
if(new_leaderboard is None):
new_lead = Leaderboard(game=game)
new_lead.save()
new_leaderboard = new_lead
json_lead = new_leaderboard.to_json()
dicts = json.loads(json_lead)
return dicts['leaderboard']
| phong1233/website-backend | src/services.py | services.py | py | 1,243 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "models.Leaderboard.objects",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.Leaderboard",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Leaderbo... |
21877799762 | import sys
from collections import deque
rows, columns = map(int, sys.stdin.readline().strip().split())
maze = [[0 for _ in range(columns)] for _ in range(rows)]
dx = [0, 0, -1, 1]
dy = [-1, 1, 0, 0]
def bfs(v, w):
queue = deque()
queue.append((v, w))
while queue:
v, w = queue.popleft()
for i in range(4):
x = v + dx[i]
y = w + dy[i]
if x < 0 or y < 0 or x >= rows or y >= columns:
continue
if maze[x][y] == 0:
continue
if maze[x][y] == 1:
maze[x][y] = maze[v][w] + 1
queue.append((x, y))
return maze[rows-1][columns-1]
if __name__ == '__main__':
for i in range(rows):
temp = list(sys.stdin.readline().strip())
for j in range(columns):
maze[i][j] = int(temp.pop(0))
print(bfs(0, 0)) | iceprins/study-codingtest | 2178.py | 2178.py | py | 885 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin.readline",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline"... |
32146339364 | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView
urlpatterns = [
# path('', include(router.urls)),
# path to djoser end points
path('auth/', include('djoser.urls')), # создание нового Usera /auth/users/
path('auth/', include('djoser.urls.authtoken')),
path('auth/', include('djoser.urls.jwt')), # залогинивание Usera /auth/jwt/create
path('auth/', include('rest_framework_social_oauth2.urls')),
path('api-auth/', include('rest_framework.urls')),
path('api/token/', TokenObtainPairView.as_view()),
path('api/token/refresh/', TokenRefreshView.as_view()),
path('api/token/verify/', TokenVerifyView.as_view()),
path('admin/', admin.site.urls),
path('ckeditor/', include('ckeditor_uploader.urls')),
path('api/v1/digest/', include('backend.digest.urls')),
path('api/v1/event/', include('backend.event.urls')),
path('api/v1/club/', include('backend.club.urls')),
path('api/v1/category/', include('backend.sports_category.urls')),
path('api/v1/registration/', include('backend.registration.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| FirstWind/OZABRU | OZABRU/urls.py | urls.py | py | 1,391 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.inc... |
39475147519 | from collections import defaultdict
instructions = dict()
bots = defaultdict(list)
outputs = defaultdict(list)
with open("../inputs/10.txt") as f:
for line in f:
line = line.split()
if line[0] == "value":
bot = int(line[5])
microchip = int(line[1])
bots[bot].append(microchip)
else:
bot = int(line[1])
low = int(line[6])
is_low_output = line[5] == "output"
high = int(line[-1])
is_high_output = line[-2] == "output"
instructions[bot] = [(is_low_output, low), (is_high_output, high)]
bot_q = [k for k, v in bots.items() if len(v) == 2]
while len(bot_q):
bot = bot_q.pop(0)
low, high = sorted(bots[bot])
bots[bot] = []
if low == 17 and high == 61:
print(f"Day 10 part 1: {bot}")
low_, high_ = instructions[bot]
is_low_output, low_out = low_
is_high_output, high_out = high_
if is_low_output:
outputs[low_out].append(low)
else:
bots[low_out].append(low)
if len(bots[low_out]) == 2:
bot_q.append(low_out)
if is_high_output:
outputs[high_out].append(high)
else:
bots[high_out].append(high)
if len(bots[high_out]) == 2:
bot_q.append(high_out)
mul = outputs[0][0] * outputs[1][0] * outputs[2][0]
print(f"Day 10 part 2: {mul}")
| Lalica/adventofcode | 2016/solutions/day10.py | day10.py | py | 1,388 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 6,
"usage_type": "call"
}
] |
69955543713 | """Handler for vocab data saves and updating."""
import json
import parse_raw
SAVEPATH = "../data/"
OUTPATH = "../output/"
SAVEFILE = "vocab.json"
INFILE = "raw_terms.txt"
DATAFILE = "raw.json"
def fetch_json(filepath=SAVEPATH+SAVEFILE):
"""Fetch data from filepath, if it exists."""
try:
with open(filepath, 'r') as fptr:
return json.load(fptr)
except IOError:
# In case of no file, use an empty JSON element.
return {}
def save_json(data, filepath=SAVEPATH+SAVEFILE):
"""Save json as JSON."""
with open(filepath, 'w') as fptr:
fptr.write(json.dumps(data, sort_keys=True, indent=2,
separators=(',', ': ')))
def merge_definitions(arr1, arr2):
"""Merge two definitions, the only criterion being strict equivalence."""
new_definitions = arr1
for _ in arr2:
if _ not in new_definitions:
new_definitions.append(_)
return new_definitions
def merge_entry(new_data, vocabulary):
"""Merges new_data into vocabulary."""
keys = new_data.keys()
for key in keys:
if key not in vocabulary.keys():
vocabulary[key] = new_data[key]
else:
old_entry = vocabulary[key]
# Merge definitions.
old_entry['data']['definitions'] =\
merge_definitions(old_entry['data']['definitions'],
new_data[key]['data']['definitions'])
# Always overwrite old metadata.
old_entry['data']['metadata'] = new_data[key]['data']['metadata']
# Update vocabulary
vocabulary[key] = old_entry
def vocab_to_json(file_in=SAVEPATH+INFILE, file_out=SAVEPATH+DATAFILE):
"""Parse raw data into json."""
save_json(parse_raw.parse_file(file_in), filepath=file_out)
def merge_from_array(file_in, vocab_out=SAVEPATH+SAVEFILE):
"""Merge elements from a JSON array at file_in into a vocabulary at
file_out.
"""
vocabulary = fetch_json(vocab_out)
print("Merging from {}.".format(file_in))
for entry in fetch_json(file_in):
key = entry['term']
chapter = entry['section']['chapter']
section = entry['section']['part']
definitions = entry['definitions']
grammar_classes = entry['class']
vocab_entry = {
key: {
"data": {
"definitions": [definitions],
"metadata": {
"chapter": chapter,
"section": section,
"classes": grammar_classes,
}
}
}
}
merge_entry(vocab_entry, vocabulary)
save_json(vocabulary)
if __name__ == "__main__":
vocab_to_json()
merge_from_array(SAVEPATH+DATAFILE, OUTPATH+SAVEFILE)
| JDongian/LangGrind | src/vocab.py | vocab.py | py | 2,847 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "parse_raw.parse_file",
"line_number": 53,
"usage_type": "call"
}
] |
41436539593 | import logging
import string
import uuid
from time import sleep
import pytest
import sqlalchemy
from streamsets.testframework.markers import database
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
@database
def test_query_consumer_network(sdc_builder, sdc_executor, database):
"""Test simple JDBC query consumer origin for network fault tolerance. We delay the pipeline using a Delay stage
so as we get time to shut the SDC container network to test retry and resume logic of origin stage.
The pipeline looks like:
jdbc_query_consumer >> delay >> trash
jdbc_query_consumer >= finisher
"""
number_of_rows = 10_000
table_name = get_random_string(string.ascii_lowercase, 20)
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_query_consumer = pipeline_builder.add_stage('JDBC Query Consumer')
jdbc_query_consumer.set_attributes(incremental_mode=False, sql_query=f'SELECT * FROM {table_name}')
delay = pipeline_builder.add_stage('Delay')
# milliseconds to delay between batches, so as we get time to disconnect network
delay.set_attributes(delay_between_batches=1000)
trash = pipeline_builder.add_stage('Trash')
finisher = pipeline_builder.add_stage('Pipeline Finisher Executor')
jdbc_query_consumer >> delay >> trash
jdbc_query_consumer >= finisher
pipeline = pipeline_builder.build('JDBC Query Origin').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name, metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(40)))
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding %s rows into %s database ...', number_of_rows, database.type)
connection = database.engine.connect()
connection.execute(table.insert(), [{'id': i, 'name': str(uuid.uuid4())} for i in range(1, number_of_rows+1)])
pipeline_cmd = sdc_executor.start_pipeline(pipeline)
pipeline_cmd.wait_for_pipeline_output_records_count(int(number_of_rows/3))
sdc_executor.container.network_disconnect()
sleep(5) # sleep few seconds to have pipeline go into retry mode
sdc_executor.container.network_reconnect()
pipeline_cmd.wait_for_finished()
history = sdc_executor.get_pipeline_history(pipeline)
# -2 to take out two events generated from record count
pipeline_record_count = (history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count - 2)
assert pipeline_record_count == number_of_rows
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
| streamsets/datacollector-tests | fault/test_jdbc.py | test_jdbc.py | py | 2,975 | python | en | code | 17 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamsets.testframework.utils.get_random_string",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "string.ascii_lowercase",
"line_number": 23,
"usage_type": "attribute"
... |
40898109195 | # 백준 강의 알고리즘 기초 2/2 610-BFS
# 1697번 숨바꼭질
import sys
sys.stdin = open('input.txt')
input = sys.stdin.readline
# 여기부터 제출해야 한다.
# N, K = map(int, input().split())
# list_visit = [[N, K, 0]]
# def next_step(start, goal, depth):
# next_step_1 = start + 1
# next_step_2 = start - 1
# next_step_3 = start * 2
# depth += 1
# if next_step_1 not in list_visit:
# list_visit.append([next_step_1, goal, depth])
# if next_step_2 not in list_visit:
# list_visit.append([next_step_2, goal, depth])
# if next_step_3 not in list_visit:
# list_visit.append([next_step_3, goal, depth])
# for i in list_visit:
# start = i[0]
# goal = i[1]
# depth = i[2]
# if start == goal:
# print(depth)
# break
# else:
# next_step(start, goal, depth)
# list_visit.append([next_step_1, goal, depth]) 메모리 초과
# if next_step_1 not in list_visit: 시간초과
import sys
from collections import deque
def bfs(v):
q = deque([v])
while q:
v = q.popleft()
if v == k:
return array[v]
for next_v in (v-1, v+1, 2*v):
if 0 <= next_v < MAX and not array[next_v]:
array[next_v] = array[v] + 1
q.append(next_v)
MAX = 100001
n, k = map(int, sys.stdin.readline().split())
array = [0] * MAX
print(bfs(n))
| boogleboogle/baekjoon | etc/bfs/610/1_1697.py | 1_1697.py | py | 1,415 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
... |
45182407716 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
'''
Simple anti-bot API for Google reCaptcha service.
'''
import logging
import urllib
RECAPTCHA_URL = 'http://www.google.com/recaptcha/api/verify'
TEST_DOMAIN = 'localhost'
TEST_PUB_KEY = '6LeAOr0SAAAAALQX_KWv_JhJpXrHNE5Xo0Z-UJwe'
TEST_PRI_KEY = '6LeAOr0SAAAAAAftuAAf6hI7McUzejjY2qLy4ukC'
def _encode(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
def get_public_key():
return TEST_PUB_KEY
def get_private_key():
return TEST_PRI_KEY
def verify_captcha(recaptcha_challenge_field, recaptcha_response_field, private_key, remote_ip):
if not (recaptcha_challenge_field and recaptcha_response_field and private_key):
return False, 'Invalid captcha data'
params = urllib.urlencode ({
'privatekey': _encode(private_key),
'remoteip' : _encode(remote_ip),
'challenge': _encode(recaptcha_challenge_field),
'response' : _encode(recaptcha_response_field),
})
f = None
try:
f = urllib.urlopen(RECAPTCHA_URL, params)
resp = f.read()
logging.info('Get recaptcha result: %s' % resp)
if resp.splitlines()[0]=='true':
return True, 'Correct captcha'
return False, 'Incorrect captcha words'
except:
logging.exception('Error when open url: %s' % RECAPTCHA_URL)
return False, 'Network error'
finally:
if f is not None:
f.close()
| Albertnnn/express-me | src/framework/recaptcha.py | recaptcha.py | py | 1,582 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib.urlencode",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "urllib.urlopen",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.exception",
... |
34813103313 | import ffmpy3
from multiprocessing import Pool
def main(name, link):
ffmpy3.FFmpeg(inputs={link: None}, outputs={name: None}).run()
if __name__ == '__main__':
name = './test.mp4'
link = 'https://daqqzz.com/20200118/a187bf139b8fe6452130d28921c4b5cf.mp4/index.m3u8?ts=1598258626000&token=1ccf90cce80953842dc75a60f865a0ba'
p = Pool(8)
p.apply_async(main, args=(name, link))
p.close()
p.join() | mediew/pynote | spyder/ck资源网/test.py | test.py | py | 421 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ffmpy3.FFmpeg",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 12,
"usage_type": "call"
}
] |
20188320963 | import mysql.connector
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
driver = webdriver.Chrome(ChromeDriverManager().install())
url_prefix = "https://www.sas.am"
def insert_products(products):
conn = mysql.connector.connect(
host='localhost',
user='root',
passwd='1111',
db='BASE'
)
cursor = conn.cursor()
query = """insert into products (product_id, title, descriptions,
price, image_path, country, product_code)
values(%s, %s, %s, %s, %s, %s, %s)"""
for product in products:
cursor.execute(query, [product.get('product_id'), product.get('title'), product.get('descriptions'),
product.get('price'), product.get('image_path'), product.get('country'),
product.get('product_code')])
conn.commit()
def open_sas_page(url):
driver.get(url_prefix)
def get_all_categories_link():
links = []
try:
a_elements = driver.find_element_by_xpath('//*[@class="sidebar"]').find_elements_by_tag_name('a')
for a_element in a_elements:
links.append(a_element.get_attribute('href'))
except:
print('Error')
return links
def get_all_sub_categories():
links = []
try:
a_elements = driver.find_element_by_xpath(
'//*[@class="product_list frequent_items"]').find_elements_by_tag_name(
'a')
for a_element in a_elements:
links.append(a_element.get_attribute('href'))
except:
print('Error')
return links
def get_all_products(link, links):
try:
print(' ' + link)
driver.get(link)
products = driver.find_elements_by_class_name('td-overally')
if products:
for product in products:
links.append(product.find_element_by_tag_name('a').get_attribute('href'))
next_page = driver.find_elements_by_xpath('//*[@class="next history_filter_paging_el"]')
if next_page:
get_all_products(next_page[0].get_attribute('href'), links)
else:
for sub_category in get_all_sub_categories():
get_all_products(sub_category, links)
except:
print('Error')
def get_product_data(url):
driver.get(url)
product = {}
product_table = driver.find_element_by_xpath('//*[@class="product card-prod"]')
img_sr = product_table.find_element_by_tag_name('img').get_attribute('src')
product['image_path'] = img_sr
title = product_table.find_element_by_tag_name('h2').text
product['title'] = title
description = product_table.find_element_by_tag_name('strong').find_element_by_xpath('..').text.replace(
'Ապրանքի նկարագրությունը`', '').strip()
product['descriptions'] = description
price = product_table.find_element_by_class_name('priceValue').text
product['price'] = price
properties = product_table.find_element_by_class_name('ingrid').find_elements_by_tag_name('td')[2::2]
if len(properties) == 2:
product['product_code'] = properties[0].text
product['product_id'] = properties[1].text
else:
product['country'] = properties[0].text
product['product_code'] = properties[1].text
product['product_id'] = properties[2].text
return product
def process():
categories = get_all_categories_link()[1:2]
links = []
for category in categories:
get_all_products(category, links)
products = []
for link in links:
products.append(get_product_data(link))
print(products)
insert_products(products)
open_sas_page(url_prefix)
process()
| karlosgevorgyan/requests | Requests/selenium_SAS.py | selenium_SAS.py | py | 3,716 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "webdriver_manager.chrome.ChromeDriverManager",
"line_number": 5,
"usage_type": "call"
},
{... |
24379844080 | from django.shortcuts import get_object_or_404, render
from django.http import JsonResponse
from django.contrib.auth.models import User
from .models import SiteMessage, Payment
from comments.models import Comment
from article.models import ArticlesPost
from album.models import Album
from vlog.models import Vlog
from payjs import PayJS, PayJSNotify
from django.views.decorators.csrf import csrf_exempt
from utils.templatetags.filter_utils import time_since_zh
from dusainet2.settings import LOGGING, PAYJS_MCHID, PAYJS_KEY, PAYJS_NOTIFY_URL
from time import strftime, localtime
from random import randint
import json
import logging
logging.config.dictConfig(LOGGING)
logger = logging.getLogger('django.request')
# 获取最后一条站通知
def latest_site_message(request):
try:
message = SiteMessage.objects.last()
data = {
'content': message.content.replace("\r\n", "<br/>"),
'created': message.created.strftime("%Y/%m/%d"),
}
except:
data = {
"content": "o(╥﹏╥)o服务器连接失败~"
}
return JsonResponse(data, safe=True)
# 点赞计数
def increase_likes(request, obj_type, obj_id):
response = {}
if obj_type == 'comment':
obj = get_object_or_404(Comment, id=obj_id)
response.update({'node_type': 'comment'})
elif obj_type == 'article':
obj = get_object_or_404(ArticlesPost, id=obj_id)
response.update({'node_type': 'article'})
elif obj_type == 'album':
obj = get_object_or_404(Album, id=obj_id)
response.update({'node_type': 'album'})
elif obj_type == 'vlog':
obj = get_object_or_404(Vlog, id=obj_id)
response.update({'node_type': 'vlog'})
else:
logger.error(
'extends increase_likes: get object went wrong.\n request url: {0}'.format(
request.path_info
)
)
return JsonResponse({'state': 500})
obj.likes += 1
obj.save(update_fields=['likes'])
response.update({'state': 200})
return JsonResponse(response)
def payjs_QRpay(request):
# 初始化
payjs = PayJS(PAYJS_MCHID, PAYJS_KEY)
try:
if request.method == 'POST':
total_fee = int(request.POST.get('total_fee'))
username = request.POST.get('username').strip()[:20]
message = request.POST.get('message').strip()[:70]
if not username:
username = '[游客]'
if not message:
message = '赞赏博主'
else:
total_fee = 800
username = '[游客]'
message = '赞赏博主'
except:
logger.error('extends payjs_QRpay: get total_fee failed.')
total_fee = 800
username = '[游客]'
message = '赞赏博主'
if request.user.is_superuser:
total_fee = 1
# 扫码支付
OUT_TRADE_NO = strftime("%Y%m%d%H%M%S", localtime()) + '-{}'.format(randint(10000, 99999))
TOTAL_FEE = total_fee
BODY = '文章赞赏'
NOTIFY_URL = PAYJS_NOTIFY_URL
payjs_response = payjs.QRPay(
out_trade_no=OUT_TRADE_NO,
total_fee=TOTAL_FEE,
body=BODY,
notify_url=NOTIFY_URL,
)
if payjs_response:
payment = Payment.objects.create(
total_fee=TOTAL_FEE,
out_trade_no=OUT_TRADE_NO,
payjs_order_id=payjs_response.payjs_order_id,
body=BODY,
)
payment.username = username
payment.message = message
try:
if request.method == 'POST':
article_id = int(request.POST.get('article_id'))
if article_id:
payment.article = ArticlesPost.objects.get(pk=article_id)
user_id = request.user.id
if user_id:
payment.user = User.objects.get(pk=user_id)
payment.username = User.objects.get(pk=user_id).username
except:
logger.error('extends payjs_QRpay: get ArticlesPost or User failed.')
payment.save()
context = {
'payjs_response': payjs_response,
'code': 200
}
else:
logger.error(
'extends payjs_QRpay: get QRPay error.\n code: {0} error_no: {1} error_msg: {2}'.format(
payjs_response.STATUS_CODE,
payjs_response.ERROR_NO,
payjs_response.error_msg
)
)
context = {
'payjs_response': payjs_response,
'code': 400
}
return render(request, 'extends/appreciate.html', context=context)
@csrf_exempt
def check_payment(request):
if request.method == 'POST':
payjs_order_id = request.POST.get('payjs_order_id')
try:
payment = Payment.objects.get(payjs_order_id=payjs_order_id)
except:
logger.error('extends check_payment_is_paid: get payment failed.')
return JsonResponse({'status': 500})
if payment.is_paid == 'T':
return JsonResponse({'status': 200, 'is_paid': 'T'})
else:
return JsonResponse({'status': 200, 'is_paid': 'F'})
@csrf_exempt
def payjs_wechat_notify(request):
if request.method == 'POST':
data = request.POST
notify = PayJSNotify(PAYJS_KEY, data)
return_code = notify.return_code[0] if notify.return_code is list else notify.return_code
order_id = notify.payjs_order_id[0] if notify.payjs_order_id is list else notify.payjs_order_id
if return_code == '1':
try:
payment = Payment.objects.get(payjs_order_id=order_id)
payment.is_paid = 'T'
payment.save()
except:
logger.error('extends payjs_wechat_notify: get payment failed. order_id: {}'.format(order_id))
elif return_code == '0':
logger.error('extends payjs_wechat_notify: return_code is 0.')
else:
logger.error('extends payjs_wechat_notify: return_code is {}.'.format(return_code))
return JsonResponse({'code': 200})
else:
logger.error('extends payjs_wechat_notify: just handle post method.')
return JsonResponse({'code': 403})
def sponsor_list(request):
sponsors = Payment.objects.filter(is_paid='T').order_by('-created')
if request.is_ajax() and request.method == 'GET':
data = []
for sponsor in sponsors[:10]:
data.append({
'total_fee': int(sponsor.total_fee / 100),
'username': sponsor.username,
'message': sponsor.message,
'created': time_since_zh(sponsor.created)
})
return JsonResponse({'sponsors': data})
return render(request, 'extends/sponsor_list.html', {'sponsors': sponsors})
| budaLi/dusainet | extends/views.py | views.py | py | 6,846 | python | en | code | null | github-code | 1 | [
{
"api_name": "logging.config.dictConfig",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "dusainet2.settings.LOGGING",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "logging.config",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_... |
6205039220 | #!/usr/bin/env python
"""
track.py -- reconnect localizations into trajectories
"""
# Numeric
import numpy as np
# Dataframes
import pandas as pd
# Distance between two sets of 2D points
from scipy.spatial import distance_matrix
# Hungarian algorithm
from munkres import Munkres
hungarian_solver = Munkres()
# Custom utilities
from .helper import (
connected_components
)
# Deep copy
from copy import copy
# Filter on the number of spots per frame
from .trajUtils import filter_on_spots_per_frame
##################################
## LOW-LEVEL TRACKING UTILITIES ##
##################################
def is_duplicates(trajs):
"""
Return True if there are duplicates in a set of Trajectories.
"""
if len(trajs) < 2:
return False
for j in range(len(trajs)-1):
for i in range(j+1, len(trajs)):
R = (trajs[i].get_slice()[:,:2]==trajs[j].get_slice()[:,:2])
if isinstance(R, bool):
if R:
return True
elif R.all():
return True
else:
pass
return False
def has_match(trajs_0, trajs_1):
"""
Return True if a trajectory in trajs_0 exactly matches a trajectory
in trajs_1.
"""
for i in range(len(trajs_0)):
for j in range(len(trajs_1)):
R = (trajs_0[i].get_slice()[:,:2] == trajs_1[j].get_slice()[:,:2])
if isinstance(R, bool):
if R:
return True
elif R.all():
return True
else:
pass
return False
class Trajectory():
"""
Convenience class used internally by track_locs().
A Trajectory object specifies a set of indices to
localizations in a large array that are to be reconnected
into trajectories.
It also holds a reference to the original array, so
that it can grab information about its localization when
necessary.
When Trajectories are not reconnected in a given frame,
their blink counter (self.n_blinks) is incremented. When
this exceeds *max_blinks*, the Trajectories are marked
for termination.
The Trajectory class is also convenient to hold associated
information about the tracking problem, such as the
number of competing trajectories and localizations, etc.,
that are returned at the end of tracking.
init
----
start_idx : int, the index of the first
localization in this Trajectory
locs : 2D ndarray, all localizations
subproblem_shape: (int, int), the number of trajs
and locs in the subproblem that
created this trajectory
max_blinks : int, the maximum tolerated number
of gaps in tracking
"""
def __init__(self, start_idx, locs, subproblem_shape, max_blinks=0):
self.indices = [int(start_idx)]
self.locs = locs
self.max_blinks = max_blinks
self.n_blinks = 0
self.active = True
self.subproblem_shapes = [subproblem_shape]
def add_index(self, idx, subproblem_shape):
"""
Extend this trajectory by one localization.
args
----
idx : int, the index of the localization
in self.locs to add
subproblem_shape : (int, int), the size
of the tracking subproblem in which
this localization was added
"""
self.indices.append(int(idx))
self.subproblem_shapes.append(subproblem_shape)
def blink(self):
"""
Skip a frame. If a Trajectory has been in blink for
more than *self.n_blinks* frames, it marks itself
for termination by setting self.active = False.
"""
self.n_blinks += 1
if self.n_blinks > self.max_blinks:
self.active = False
def get_slice(self):
"""
Return the slice of the localization array that
corresponds to this trajectory.
returns
-------
2D ndarray of shape (traj_len, 5)
"""
return self.locs[tuple(self.indices), :]
def last_pos(self):
"""
Return the last known position of this Trajectory.
returns
-------
(float y, float x), in pixels
"""
return self.locs[self.indices[-1], 2:4]
def traj_loc_distance(trajs, locs):
"""
Return the distance between each trajectory and each
localization.
args
----
trajs : list of Trajectory
locs : 2D ndarray with columns loc_idx,
frame, y, x, I0
returns
-------
2D ndarray D, where D[i,j] is the distance between
Trajectory i and localization j
"""
return distance_matrix(
np.asarray([t.last_pos() for t in trajs]),
locs[:,2:4]
)
def diffusion_weight_matrix(trajs, locs, frame_interval=0.00548,
pixel_size_um=0.16, k_return_from_blink=1.0, d_max=5.0,
y_diff=0.9, search_radius=2.5, d_bound_naive=0.1, init_cost=50.0):
"""
Generate the weight matrix for reconnection between a set of
Trajectories and a set of localizations for the "diffusion"
method.
In this method, the weight of reconnecting trajectory A to
localization B is equal to the negative log likelihood of
trajectory A diffusing to localization B in the relevant
frame interval (equal to *frame_interval* if there are no
gaps). The likelihood is evaluated with a 2D Brownian motion
model.
A weighted combination of two such negative log-likelihoods
is used. The first assumes a diffusion coefficient equal to
the maximum likelihood diffusion coefficient for that
trajectory (using the MSD method). The second assumes a
diffusion coefficient equal to d_max. The relative weights
of the two estimates are set by *y_diff*.
args
----
trajs : list of Trajectory
locs : 2D ndarray, localizations to consider
for connection
frame_interval : float, seconds
pixel_size_um : float, um
k_return_from_blink : float, penalty to return a trajectory
from blinking status
d_max : float, the maximum expected diffusion
coefficient in um^2 s^-1
y_diff : float, the relative influence of the
particle's local history on its estimated
diffusion coefficient
search_radius : float, um
d_bound_naive : float, naive estimate for a particle's
local diffusion coefficient, um^2 s^-1
init_cost : float, static cost to initialize a new
trajectory when reconnections are available
in the search radius
returns
-------
2D ndarray of shape (n_trajs, n_locs), the weights
for reconnection
"""
n_traj = len(trajs)
n_locs = locs.shape[0]
n_dim = n_traj + n_locs
# Unit conversions
search_radius_pxl = search_radius / pixel_size_um
max_var2 = 2.0 * d_max * frame_interval / (pixel_size_um ** 2)
# The weight matrix
W = np.zeros((n_dim, n_dim), dtype="float64")
# Set the static cost of starting a new trajectory
for li in range(n_locs):
W[n_traj:, li] = init_cost
W[:, n_locs:] = init_cost
# For each trajectory, calculating the weight of
# assignments to each localization
for ti in range(n_traj):
# Last known position of this trajectory
last_pos = trajs[ti].last_pos()
# Penalize blinking trajectories
L_blink = -k_return_from_blink * trajs[ti].n_blinks + np.log(k_return_from_blink)
# Distances to each localization
R = traj_loc_distance([trajs[ti]], locs)[0, :]
R2 = R ** 2
# Estimate the local diffusion coefficient of this
# trajectory.
# If no prior displacements are available, use the
# naive estimate
if len(trajs[ti].indices) == 1:
local_var2 = 2*d_bound_naive*frame_interval * \
(1+trajs[ti].n_blinks)
# Otherwise, use the MSD method
else:
traj_slice = trajs[ti].get_slice()
frames = traj_slice[:,1].astype('int64')
pos = traj_slice[:,2:4]
delta_frames = frames[1:] - frames[:-1]
local_var2 = 0.5 * (((pos[1:,:]-pos[:-1,:])**2).sum(1) / \
delta_frames).mean() * (1+trajs[ti].n_blinks)
# Log-likelihood of diffusing from last
# known position to each new position
L_diff = np.log(
y_diff * (R/local_var2) * np.exp(-R2/(2*local_var2))
+ (1-y_diff) * (R/max_var2) * np.exp(-R2/(2*max_var2))
)
# Make sure we do NOT reconnect trajectories to localizations
# outside of their search radii
L_diff[R>search_radius_pxl] = -np.inf
# Assign reconnection weight
W[ti,:n_locs] = -(L_blink+L_diff)
return W
def euclidean_weight_matrix(trajs, locs, pixel_size_um=0.16,
scale=1.0, search_radius=2.5, init_cost=50.0, **kwargs):
"""
Generate the weight matrix for reconnection between
Trajectories and localizations for the "euclidean"
reconnection method.
Here, the weight to reconnect traj I with localization J
is just the distance between the last known position of I
and J, scaled by the constant *scale*.
If J is outside the search radius of I, the weight is
infinite.
The weight to drop *I* or to start a new trajectory from
*J* when other reconnections are available is *init_cost*.
args
----
trajs : list of Trajectory
locs : 2D ndarray, localizations to consider
for connection
pixel_size_um : float, um
scale : float, inflation factor for the distances
search_radius : float, um
init_cost : float, penalty for not performing
available reconnections
kwargs : discarded
returns
-------
2D ndarray of shape (n_trajs, n_locs), the reconnection
weights
"""
n_traj = len(trajs)
n_locs = locs.shape[0]
n_dim = n_traj + n_locs
# Weight matrix
W = np.zeros((n_dim, n_dim), dtype='float64')
# Distance from each trajectory to each localization
distances = traj_loc_distance(trajs, locs)
# Reconnections out of the search radius are impossible
out_of_radius = distances > search_radius/pixel_size_um
distances[out_of_radius] = np.inf
# Rescale if desired
if scale != 1.0:
distances[~out_of_radius] = distances[~out_of_radius] * scale
# Weight of traj:loc reconnection is proportional to Euclidean distance
W[:n_traj, :n_locs] = distances
# Static cost to start new trajectory
W[n_traj:, :] = init_cost
# Penalize reconnecting to nothing
W[:n_traj, n_locs:] = init_cost
return W
######################################
## FRAME-FRAME RECONNECTION METHODS ##
######################################
def reconnect_conservative(trajs, locs, locs_array, max_blinks=0,
frame_interval=0.00548, pixel_size_um=0.16, **kwargs):
"""
Only reassign trajs to locs when the assignment is
unambiguous (1 traj, 1 loc within the search radius).
For all other trajectories, terminate them.
For all other locs, start new trajectories.
args
----
trajs : list of Trajectory
locs : 2D ndarray with columns loc_idx, frame,
y, x, I0
max_blinks : int
frame_interval : float
pixel_size_um : float
kwargs : ignored, possibly passed due to upstream
method disambiguation
returns
-------
list of Trajectory
"""
out = []
# A single localization and trajectory pair - assignment
# is unambiguous. This is the only situation where reconnection
# is allowed.
n_trajs = len(trajs)
n_locs = locs.shape[0]
if n_trajs==1 and n_locs==1:
trajs[0].add_index(locs[0,0], (n_trajs,n_locs))
out = trajs
# Multiple localizations and/or trajectories
else:
# Terminate all existing trajectories
for ti in range(n_trajs):
trajs[ti].active = False
out += trajs
# Start new trajectories from all localizatoins
for li in range(n_locs):
out.append(Trajectory(locs[li,0], locs_array,
(n_trajs,n_locs), max_blinks=max_blinks))
return out
WEIGHT_MATRIX_METHODS = {
'diffusion': diffusion_weight_matrix,
'euclidean': euclidean_weight_matrix
}
def reconnect_hungarian(trajs, locs, locs_array, max_blinks=0,
weight_method=None, min_I0=0.0, **kwargs):
"""
Assign Trajectories to localizations by assigning each
possible reconnection a weight, then finding the assignment
that minimizes the summed weights with the Hungarian
algorithm.
args
----
trajs : list of Trajectory
locs : 2D ndarray, localizations to consider
for connection
locs_array : 2D ndarray, all localizations in this
movie
max_blinks : int
weight_method : str, the method to use to generate
the weight matrix
min_I0 : float, minimum intensity to start
a new Trajectory
kwargs : to weight_method
returns
-------
list of Trajectory
"""
out = []
# Get the size of the assignment problem
n_traj = len(trajs)
n_locs = locs.shape[0]
# Unambiguous - only one trajectory and localization
if n_traj==1 and n_locs==1:
trajs[0].add_index(locs[0,0], (n_traj,n_locs))
# Otherwise, solve the assignment problem by finding
# weights and minimizing with the Hungarian algorithm
else:
# Get the weight matrix for reconnection
W = WEIGHT_MATRIX_METHODS[weight_method](
trajs, locs, **kwargs)
# Minimize negative log likelihood with
# Hungarian algorithm
for i, j in hungarian_solver.compute(W):
# traj:loc
if (i<n_traj) and (j<n_locs):
trajs[i].add_index(locs[j,0], (n_traj,n_locs))
# traj:(empty)
elif (i<n_traj) and (j>=n_locs):
trajs[i].blink()
# (empty):loc
elif (j<n_locs) and (i>=n_traj):
if locs[j,4] >= min_I0:
out.append(Trajectory(locs[j,0], locs_array,
(n_traj,n_locs), max_blinks=max_blinks))
else:
pass
# Combine new and existing trajs
out += trajs
return out
def reconnect_diffusion(trajs, locs, locs_array, max_blinks=0,
min_I0=0.0, **kwargs):
"""
Assign Trajectories to localizations on the basis of their
expected probability of diffusion and their blinking status.
Each of the Trajectories is assumed to be a Brownian motion
in 2D. Its diffusion coefficient is evaluated from its
history by MSD if it is greater than length 1, or from
d_bound_naive otherwise.
args
----
trajs : list of Trajectory
locs : 2D ndarray, localizations to consider
for connection
locs_array : 2D ndarray, all localizations in this
movie
max_blinks : int
frame_interval : float, seconds
pixel_size_um : float, um
min_I0 : float, AU
k_return_from_blink : float, penalty to return a trajectory
from blinking status
d_max : float, the maximum expected diffusion
coefficient in um^2 s^-1
y_diff : float, the relative influence of the
particle's local history on its estimated
diffusion coefficient
search_radius : float, um
d_bound_naive : float, naive estimate for a particle's
local diffusion coefficient, um^2 s^-1
returns
-------
list of Trajectory
"""
return reconnect_hungarian(trajs, locs, locs_array,
weight_method='diffusion', max_blinks=0, min_I0=min_I0, **kwargs)
def reconnect_euclidean(trajs, locs, locs_array, max_blinks=0,
min_I0=0.0, **kwargs):
"""
Assign Trajectories to localizations purely by minimizing
the total Trajectory-localization distances.
args
----
trajs : list of Trajectory
locs : 2D ndarray, localizations to consider
for connection
locs_array : 2D ndarray, all localizations in this
movie
max_blinks : int
min_I0 : float, minimum intensity to start a
new trajectory
pixel_size_um : float, um
scale : float, inflation factor for the distances
search_radius : float, um
init_cost : float, cost to start a new trajectory
if reconnections are available
returns
-------
list of Trajectory
"""
return reconnect_hungarian(trajs, locs, locs_array,
weight_method='euclidean', max_blinks=0, min_I0=min_I0, **kwargs)
########################################
## ALL AVAILABLE RECONNECTION METHODS ##
########################################
METHODS = {
'conservative': reconnect_conservative,
'diffusion': reconnect_diffusion,
'euclidean': reconnect_euclidean
}
#############################
## MAIN TRACKING FUNCTIONS ##
#############################
def track(locs, method="diffusion", search_radius=2.5,
pixel_size_um=0.16, frame_interval=0.00548, min_I0=0.0,
max_blinks=0, debug=False, max_spots_per_frame=None,
reindex_unassigned=True, **kwargs):
"""
Given a dataframe with localizations, reconnect into
trajectories.
Each frame-frame reconnection problem is considered
separately and sequentially. For each problem:
1. Figure out which localizations lie within the
the search radii of the current trajectories
2. Identify disconnected "subproblems" in this
trajectory-localization adjacency map
3. Solve all of the subproblems by a method
specified by the *method* kwarg
4. Update the trajectories and proceed to the
next frame
The result is an assignment of each localization to a
trajectory index. Localizations that were not reconnected
into a trajectory for whatever reason are assigned a
trajectory index of -1.
args
----
locs : pandas.DataFrame, set of localizations
method : str, the tracking method. Currently either
"diffusion", "euclidean", or "conservative"
search_radius : float, max jump length in um
pixel_size_um : float, um per pixel
frame_interval : float, seconds
min_I0 : float, only track spots with at least this
intensity
max_blinks : int, number of gaps allowed
debug : bool
max_spots_per_frame
: int, don't track in frames with more than
this number of spots
**kwargs : additional keyword arguments to the tracking
method
returns
-------
pandas.Series, trajectory indices for each localization.
"""
# Filter on the number of spots per frame, if desired
if not max_spots_per_frame is None:
return track_subset(
locs,
[lambda T: filter_on_spots_per_frame(
T,
max_spots_per_frame=max_spots_per_frame,
filter_kernel=21)],
method=method,
search_radius=search_radius,
pixel_size_um=pixel_size_um,
frame_interval=frame_interval,
min_I0=min_I0,
max_blinks=max_blinks,
debug=debug,
**kwargs
)
# If passed an empty dataframe, do nothing
if locs.empty:
for c in ["trajectory", "subproblem_n_locs", "subproblem_n_traj"]:
locs[c] = []
return locs
# Determine frame limits for tracking
start_frame = int(locs['frame'].min())
stop_frame = int(locs['frame'].max())+1
# Get the reconnection method
method_f = METHODS.get(method)
# Sort the localizations by frame (unecessary, but easier
# to interpret)
locs = locs.sort_values(by='frame')
# Assign each localization a unique index
locs['loc_idx'] = np.arange(len(locs))
# Convert locs to ndarray for speed
cols = ['loc_idx', 'frame', 'y', 'x', 'I0']
L = np.asarray(locs[cols])
# Maximum tolerated traj-loc jump distance (search radius)
search_radius_pxl = search_radius / pixel_size_um
# Convenience function: get all of locs from one frame
def get_locs(frame):
return L[L[:,1]==frame,:]
# Convenience function: in a list of Trajectory, find
# trajectories that have finished
def get_finished(trajs):
_finished = [t for t in trajs if not t.active]
_active = [t for t in trajs if t.active]
return _active, _finished
# Start by grabbing the locs in the first frame and
# initializing Trajectories from each of them
frame_locs = get_locs(start_frame)
active = [Trajectory(int(i), L, (0,1), max_blinks) for i in frame_locs[:,0]]
# During tracking, Trajectories are tossed between
# three categories: "active", "new", and "completed".
# "active" Trajectories are eligible for reconnection in
# this frame, "new" Trajectories will become active
# Trajectories in the next frame, and "completed" Trajectories
# have been removed from the pool.
new = []
completed = []
for fi in range(start_frame+1, stop_frame):
# # DEBUG
# print("FRAME:\t%d" % fi)
# print("Duplicates in active:\t", is_duplicates(active))
# print("Duplicates in new:\t", is_duplicates(new))
# print("Duplicates in completed:\t", is_duplicates(completed))
# print("Completed trajectories:")
# for i, t in enumerate(completed):
# print("\tTrajectory %d" % i)
# print(t.get_slice()[:,:2])
# print("\n")
# if fi > 7:
# break
frame_locs = get_locs(fi)
# If there are no locs in this frame, set all active
# trajectories into blink
if len(frame_locs.shape)<2 or frame_locs.shape[0]==0:
# Increment blink counter
for t in active: t.blink()
# Find which trajectories are finished
active, done = get_finished(active)
completed += done
# To next frame
continue
# If there are no active trajectories, consider starting
# one from each localization if it passes the intensity
# threshold
elif len(active)==0:
for i in frame_locs[frame_locs[:,4]>=min_I0, 0]:
new.append(Trajectory(int(i), L, (0,1), max_blinks))
active = new
new = []
# To next frame
continue
# Otherwise, there is some combination of active trajectories
# and localizations in this frame.
else:
# Calculate the adjacency graph: which localizations are
# within the search radius of which trajectories?
adj_g = (traj_loc_distance(active, frame_locs) <=
search_radius_pxl).astype(np.int64)
# Break this graph into subgraphs, each of which represents
# a separate tracking subproblem
subgraphs, Ti, Li, traj_singlets, loc_singlets = \
connected_components(adj_g)
# # DEBUG - PASSED
# for i in range(len(Ti)):
# for j in [k for k in range(len(Ti)) if k != i]:
# for element in Ti[i]:
# assert element not in Ti[j]
# # DEBUG - PASSED
# for i in range(len(Li)):
# for j in [k for k in range(len(Li)) if k != i]:
# for element in Li[i]:
# assert element not in Li[j]
# # DEBUG - a trajectory cannot be simultaneously in the
# # singlet list and also in a subproblem group - PASSED
# for i in traj_singlets:
# for j in range(len(Ti)):
# assert i not in Ti[j]
# for i in loc_singlets:
# for j in range(len(Li)):
# assert i not in Li[j]
# If a trajectory does not have localizations in its
# search radius, set it into blink
for ti in traj_singlets:
active[ti].blink()
if active[ti].active:
new.append(active[ti])
else:
completed.append(active[ti])
# If a localization has no nearby trajectories, start
# a new trajectory if it passes the intensity threshold
for li in loc_singlets:
if frame_locs[li,4] >= min_I0:
new.append(Trajectory(frame_locs[li,0], L, (0,1), max_blinks))
# If there are both trajectories and localizations in the
# subproblem, reconnect according to the reconnection method
for si, subgraph in enumerate(subgraphs):
# Only one traj and one loc: assignment is unambiguous
if subgraph.shape[0]==1 and subgraph.shape[1]==1:
active[Ti[si][0]].add_index(frame_locs[Li[si][0], 0], (1,1))
new.append(active[Ti[si][0]])
# Otherwise, pass to the reconnection method
else:
in_trajs = [active[i] for i in Ti[si]]
out_trajs = method_f([active[i] for i in Ti[si]],
frame_locs[Li[si],:], L, max_blinks=max_blinks,
pixel_size_um=pixel_size_um, frame_interval=frame_interval,
search_radius=search_radius, **kwargs)
# Find finished trajectories
not_done, done = get_finished(out_trajs)
completed += done
new += not_done
# For trajs eligible for reconnection in the next frame,
# transfer to *active*
active = new
new = []
# Finish any trajectories still running
completed += active
# Trajectory indices
ids = np.full(L.shape[0], -1, dtype=np.int64)
# Number of competing trajectories and competing localizations
# for the subproblem in which each localization was connected
# (1: no competition)
subproblem_sizes_traj = np.full(L.shape[0], -1, dtype=np.int64)
subproblem_sizes_locs = np.full(L.shape[0], -1, dtype=np.int64)
# For each trajectory, add its information to these arrays
for ti, t in enumerate(completed):
indices = np.asarray(t.indices)
T_size = [t.subproblem_shapes[i][0] for i in range(len(indices))]
L_size = [t.subproblem_shapes[i][1] for i in range(len(indices))]
ids[np.asarray(t.indices)] = ti
subproblem_sizes_traj[np.asarray(t.indices)] = T_size
subproblem_sizes_locs[np.asarray(t.indices)] = L_size
# Assign traj index as a column in the original dataframe
locs['trajectory'] = ids
locs['subproblem_n_traj'] = subproblem_sizes_traj
locs['subproblem_n_locs'] = subproblem_sizes_locs
# For localizations unassigned to any trajectory, assign
# unique trajectory indices
if reindex_unassigned:
max_index = locs["trajectory"].max() + 1
unassigned = locs["trajectory"] == -1
n_unassigned = unassigned.sum()
locs.loc[unassigned, "trajectory"] = np.arange(
max_index, max_index+n_unassigned)
# If desired, return the Trajectory objects for testing
if debug:
return locs, completed
else:
return locs
def track_subset(locs, filters, **kwargs):
"""
Run tracking on a subset of trajectories - for instance,
only on localizations in frames that meet a particular
criterion.
*filters* should be a set of functions that take trajectory
dataframes as argument and return a boolean pandas.Series
indicating whether each localization passed or failed the
criterion.
Example for the *filters* argument:
filters = [
lambda locs: filter_on_spots_per_frame(
locs,
max_spots_per_frame=3,
filter_kernel=21
)
]
This would only track localizations belonging to frames with
3 or fewer total localizations in that frame. (After smoothing
with a uniform kernel of width 21 frames, that is.)
Parameters
----------
locs : pandas.DataFrame, localizations
filters : list of functions, the filters
kwargs : keyword arguments to the tracking method
Returns
-------
pandas.DataFrame, trajectories
"""
# Determine which localizations pass the QC filters
filter_pass = np.ones(len(locs), dtype=bool)
for f in filters:
filter_pass = np.logical_and(filter_pass, f(locs))
# Track the subset of localizations that pass QC filters
L = locs[filter_pass].copy()
L = track(L, **kwargs)
# Join the results with the original dataframe
for c in [j for j in L.columns if j not in locs.columns]:
locs[c] = L[c]
locs[c] = (locs[c].fillna(-1)).astype(np.int64)
return locs
| alecheckert/quot | quot/track.py | track.py | py | 30,761 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "munkres.Munkres",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance_matrix",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "numpy.z... |
2276280862 | import random
from flask import Flask, request
from pymessenger.bot import Bot
import process
app = Flask(__name__)
ACCESS_TOKEN = 'EAALxONaYePsBAM5oBExC3ZC9yFGVucIiZB7fxP00AKhZBc9gjPZAqtk7Ed8T8UlD8bhZBsA8pWIcSpPrGpItvUSEk1ZAMPfZBr6B7S5vXRUqzbpxJciSGFZCCWwRei8laoSqmCreAhYgXWva680ftzeZB89S9gbqZBCdPm5Tf0jcxxFQZDZD'
VERIFY_TOKEN = 'mylife'
bot = Bot(ACCESS_TOKEN)
#We will receive messages that Facebook sends our bot at this endpoint
@app.route("/", methods=['GET', 'POST'])
def receive_message():
if request.method == 'GET':
token_sent = request.args.get("hub.verify_token")
return verify_fb_token(token_sent)
#if the request was not get, it must be POST and we can just proceed with sending a message back to user
else:
# get whatever message a user sent the bot
output = request.get_json()
for event in output['entry']:
messaging = event['messaging']
for message in messaging:
if message.get('message'):
#Facebook Messenger ID for user so we know where to send response back to
recipient_id = message['sender']['id']
print("something")
if message['message'].get('text'):
text = message['message'].get('text')
response_sent_text = process.get_message(text, recipient_id, 'messenger')
# response_sent_text = "ss"
send_message(recipient_id, response_sent_text)
return "Message Processed"
def verify_fb_token(token_sent):
#take token sent by facebook and verify it matches the verify token you sent
#if they match, allow the request, else return an error
if token_sent == VERIFY_TOKEN:
return request.args.get("hub.challenge")
return 'Invalid verification token'
#chooses a random message to send to the user
def get_message():
sample_responses = ["You are stunning!", "We're proud of you.", "Keep on being you!", "We're greatful to know you :)"]
# return selected item to the user
return random.choice(sample_responses)
#uses PyMessenger to send response to user
def send_message(recipient_id, response):
#sends user the text message provided via input response parameter
for reply in response:
bot.send_text_message(recipient_id, reply)
return "success"
def start():
print('START MESSENGER!')
app.run()
if __name__ == "__main__":
app.run(debug=True) | vsvipul/ContestBot | messenger.py | messenger.py | py | 2,460 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pymessenger.bot.Bot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.request... |
28069348557 | from telegram import Bot
from telegram.ext import Dispatcher, PicklePersistence
from handlers import handlers
def setup_bot(token, persistence_filename='persistence'):
# Create bot, update queue and dispatcher instances
bot = Bot(token)
bot_persistence = PicklePersistence(filename=persistence_filename)
dispatcher = Dispatcher(bot, None, workers=0,
use_context=True,
persistence=bot_persistence)
##### Register handlers here #####
for handler in handlers:
dispatcher.add_handler(handler)
return bot, dispatcher
| The0nix/pythonanywhere-tg-bot | bot.py | bot.py | py | 610 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "telegram.Bot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "telegram.ext.PicklePersistence",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "telegram.ext.Dispatcher",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ha... |
24040618942 | from dataclasses import dataclass
@dataclass
class Protocols:
arp = "arp"
bootp = "bootp"
icmp = "icmp"
all = "all"
p = Protocols()
print(p.arp)
print("arp" in p)
print(p["arp"])
exit()
protocols = dict(arp="arp", bootp="bootp", icmp="icmp", all="all")
protocols["0"] = "all"
p = lambda: None
p.__dict__.update(protocols)
print(p.arp)
# print(p["0"])
| atrox3d/pycharm-scratches | data-structures/protocol-dataclass.py | protocol-dataclass.py | py | 373 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 4,
"usage_type": "name"
}
] |
34902005124 | from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
import json
from sqlalchemy import desc,and_
import os
from db_manager import db
from models import Event
from datetime import datetime
app = Flask(__name__)
CORS(app)
app.secret_key = os.getenv('SECRET_KEY')
if app.config['ENV'] == 'production':
app.config.from_object('config.ProductionConfig')
else:
app.config.from_object('config.DevelopmentConfig')
db.init_app(app)
with app.app_context():
db.create_all()
UPLOAD_DIR = os.path.curdir = 'static/uploads/'
app.secret_key = '1dae11441a1a2acf1cad3eca'
# cors = CORS(app, resources={r"*": {"origins": "*"}})
# app.config['CORS_HEADERS'] = 'Content-Type'
@app.get('/events')
def events_list():
events = db.session.query(Event.Event).filter(Event.Event.published == True).order_by(
desc(Event.Event.created_at)).all()
return jsonify([{
'first_name': event.first_name,
'id': event.id,
'last_name': event.last_name,
'title': event.title,
'price': event.price,
'holding_at': event.holding_at,
'time': event.time,
'url': event.url,
'image': event.image,
'address': event.address,
'description': event.description,
'phone': event.phone,
'created_at': event.created_at,
}for event in events])
@app.get('/event/<int:id>')
def event(id):
event = db.session.query(Event.Event).filter(and_(Event.Event.id == id,Event.Event.published==True)).first()
if event is None:
return jsonify({
'error':'Event not found'
}),404
return jsonify([{
'first_name': event.first_name,
'id': event.id,
'last_name': event.last_name,
'title': event.title,
'price': event.price,
'holding_at': event.holding_at,
'time': event.time,
'url': event.url,
'image': event.image,
'address': event.address,
'description': event.description,
'phone': event.phone,
'created_at': event.created_at,
}])
@app.post('/add_event')
@cross_origin()
def add_event():
event = request.get_json()
print(event)
if event['holding_at'] is '':
event['holding_at'] ='2000-01-01'
if len(event) < 10:
return jsonify({
'error': 'Fill All Values'
}), 404
db.session.add(Event.Event(
first_name=event['first_name'],
last_name=event['last_name'],
title=event['title'],
price=event['price'],
holding_at=datetime.strptime(
event['holding_at'], '%Y-%m-%d'),
time=event['time'],
url=event['url'],
image=event['image'],
address=event['address'],
description=event['description'],
phone=event['phone'],
))
db.session.commit()
return jsonify(event)
@app.get('/events/phone/<string:phone>')
def find_by_phone(phone):
events = db.session.query(Event.Event).filter(Event.Event.phone == phone).all()
if events is None:
return jsonify({
'error':'Event not found'
}),404
return jsonify([{
'first_name': event.first_name,
'id': event.id,
'last_name': event.last_name,
'title': event.title,
'published':event.published
}for event in events])
if __name__ == '__main__':
app.run(debug=True, port=2022)
| MetiKh2/flask-eventsManager | app.py | app.py | py | 3,415 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "db_manager.db.init_app",
"li... |
4409582757 | """Module to handle different set operations."""
import itertools
import matplotlib.pyplot as plt
class Set:
"""Class representing a set of different objects."""
def __init__(self, iteratable_data=None):
"""Initialize a set with given data."""
if iteratable_data is not None:
self.item_list = self.eliminateDuplicates(iteratable_data)
else:
self.item_list = []
self.initIterator()
def initIterator(self):
"""
Encapsulate the iterator initialization in a separate method.
Is used to simplify inheritance.
"""
self.actual_position_of_iterator = 0
def __str__(self):
"""Return a string representation of all items within the set."""
string_representation = "{"
item_counter = 1
for item in self.item_list:
string_representation += str(item)
if item_counter < len(self):
string_representation += ", "
item_counter += 1
string_representation += "}"
return string_representation
def __iter__(self):
"""Return the instance object."""
return self
def __next__(self):
"""Return the next element in the set as long as it exists."""
if self.actual_position_of_iterator < len(self):
item = self.item_list[self.actual_position_of_iterator]
self.actual_position_of_iterator += 1
return item
else:
self.actual_position_of_iterator = 0
raise StopIteration()
def __contains__(self, element):
"""Return true if the specified item exists in the set."""
for item in self.item_list:
if item == element:
return True
return False
def __len__(self):
"""Return the number of elements in the set."""
return len(self.item_list)
def __getitem__(self, index):
"""Access to item via index."""
return self.item_list[index]
def __add__(self, iteratable_data):
"""Use the + operator as a shortcut for the union function."""
return union(self, iteratable_data)
def __mul__(self, iteratable_data):
"""Use the * operator as a shortcut for the cartesian product."""
return cartesianProduct(self, iteratable_data)
def __sub__(self, iteratable_data):
"""Use the - operator as a shortcut for the complement function."""
return complement(self, iteratable_data)
def getSpecifiedSubset(self, selection_function):
"""Create a subset according to given selection function."""
return getSpecifiedSubset(self, selection_function)
def getPowerSet(self):
"""Return a set containing all possible subsets."""
power_set = []
for i in range(len(self) + 1):
# itertools.combinations(given_list, i) returns all subsets of
# length i regarding the given list as a list of tuple instances
subsets_of_length_i = itertools.combinations(self, i)
# convert the list of tuples to a list of lists
for subset_tuple in subsets_of_length_i:
subset_tuple_as_list = []
for num in subset_tuple:
subset_tuple_as_list.append(num)
power_set.append(subset_tuple_as_list)
return Set(power_set)
def eliminateDuplicates(self, iteratable_data):
"""Take given data and create a list without duplicates."""
items_without_duplicates = []
for item in iteratable_data:
if item not in items_without_duplicates:
items_without_duplicates.append(item)
return items_without_duplicates
class CartesianProduct(Set):
"""Class representing the cartesian product of two iteratable classes."""
def __init__(
self,
iteratable_data_a=None,
iteratable_data_b=None,
selection_function=None):
"""Compute the cartesian product and create a corresponding set."""
if selection_function is None:
super().__init__(
cartesianProduct(iteratable_data_a, iteratable_data_b)
)
else:
specified_subset = getSpecifiedSubset(
cartesianProduct(iteratable_data_a, iteratable_data_b),
selection_function
)
super().__init__(specified_subset)
def __call__(self, x):
"""
Make the class callable.
Return the y values of all inner tuples whose x values equal the input.
"""
y = []
for item in self.item_list:
if item[0] == x:
y.append(item[1])
return y
def getCoordinates(self):
"""Return the data's x and y values as different lists."""
x_values = []
y_values = []
for item in self.item_list:
x_values.append(item[0])
y_values.append(item[1])
return x_values, y_values
def plot(self):
"""Plot the encapsulated data."""
x_values, y_values = self.getCoordinates()
plt.plot(x_values, y_values, 'o')
plt.show()
def union(iteratable_data_a, iteratable_data_b):
"""Return the union of iteratable data A and B as a set."""
# check if one of the inputs is a None object or an empty list
if iteratable_data_a is None or len(iteratable_data_a) < 1:
if iteratable_data_b is None or len(iteratable_data_b) < 1:
return Set([])
else:
return Set(iteratable_data_b)
else:
if iteratable_data_b is None or len(iteratable_data_b) < 1:
return Set(iteratable_data_a)
else:
# both inputs are correct, therefore compute the union operation
# as usual
union_items = []
# all items of dataset A have to be present in the union set
for item in iteratable_data_a:
union_items.append(item)
# insert an element of B only if it is not already present
for item in iteratable_data_b:
if item not in iteratable_data_a:
union_items.append(item)
union_set = Set(union_items)
return union_set
def intersect(iteratable_data_a, iteratable_data_b):
"""Return the intersection of iteratable data A and B as a set."""
# return immediately if input A or B is invalid
if iteratable_data_a is None or len(iteratable_data_a) < 1:
return Set([])
if iteratable_data_b is None or len(iteratable_data_b) < 1:
return Set([])
# computation as usual because both inputs are valid
intersect_items = []
for item in iteratable_data_a:
if item in iteratable_data_b:
intersect_items.append(item)
intersect_set = Set(intersect_items)
return intersect_set
def complement(iteratable_data_a, iteratable_data_b=None):
"""Return the complement of iteratable data A and B as a set."""
# return immediately if input A is invalid
if iteratable_data_a is None or len(iteratable_data_a) < 1:
return Set([])
# validate input B and decide whether further processing is neccessary
if iteratable_data_b is None or len(iteratable_data_b) < 1:
return Set(iteratable_data_a)
else:
# both inputs are correct, therefore compute the complement as usual
complement_items = []
for item in iteratable_data_a:
if item not in iteratable_data_b:
complement_items.append(item)
complement_set = Set(complement_items)
return complement_set
def cartesianProduct(iteratable_data_a, iteratable_data_b):
"""Return the cartesian product of two iteratable classes."""
# check if one of the inputs is a None object or an empty list
if iteratable_data_a is None or len(iteratable_data_a) < 1:
if iteratable_data_b is None or len(iteratable_data_b) < 1:
return Set([])
else:
return Set(iteratable_data_b)
else:
if iteratable_data_b is None or len(iteratable_data_b) < 1:
return Set(iteratable_data_a)
else:
# both inputs are correct, therefore compute the cartesian product
# as usual
cartesian_product = []
for item_a in iteratable_data_a:
for item_b in iteratable_data_b:
cartesian_product.append([item_a, item_b])
return Set(cartesian_product)
def getSpecifiedSubset(iteratable_data, selection_function):
"""Create a set only with elements specified by given function."""
specified_subset = []
for item in iteratable_data:
if isinstance(item, list):
# item is a list, therefore use its first element as x value for
# the selection function
if selection_function(item[0], item[1]):
specified_subset.append(item)
else:
# item is not a list and can be used directly for the
# selection function
if selection_function(item):
specified_subset.append(item)
return Set(specified_subset)
def vonNeumannOrdinalConstruction(n):
"""Use von Neumann's method to represent natural numbers."""
if n == 0:
return Set([])
elif n == 1:
return Set([[]])
else:
return union(
Set(vonNeumannOrdinalConstruction(n - 1)),
Set([vonNeumannOrdinalConstruction(n - 1)])
)
def binomialCoefficient(n, k):
"""Compute binomial coefficients using power sets."""
dummy_power_set = Set(range(n)).getPowerSet()
number_of_subsets_with_length_k = 0
for subset in dummy_power_set:
if len(subset) == k:
number_of_subsets_with_length_k += 1
return number_of_subsets_with_length_k
| rkleee/aragonit | set/CustomSet.py | CustomSet.py | py | 9,928 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.combinations",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "mat... |
74874803553 | import re
import os
import json
import requests
from datetime import datetime
from bs4 import BeautifulSoup
# base_url = 'https://www.kickstarter.com/discover/advanced?category_id=12&woe_id=0&sort=most_funded&seed=2591527'
# base_url = 'https://www.kickstarter.com/discover/advanced?category_id=332&sort=most_funded&seed=2591762'
def scrape_project(json_data):
final_data = {}
# Copy base json project data
final_data['id'] = json_data['id']
final_data['name'] = json_data['name']
final_data['blurb'] = json_data['blurb']
final_data['specific-category'] = json_data['category']['name']
final_data['link'] = json_data['urls']['web']['project']
final_data['status'] = json_data['state']
final_data['backers'] = json_data['backers_count']
try:
final_data['location'] = json_data['location']['displayable_name']
except:
final_data['location'] = None
final_data['launch_date'] = datetime.utcfromtimestamp(
int(json_data['launched_at'])).strftime('%Y-%m-%d %H:%M:%S')
final_data['deadline'] = datetime.utcfromtimestamp(
int(json_data['deadline'])).strftime('%Y-%m-%d %H:%M:%S')
final_data['finances'] = {}
final_data['finances']['goal'] = json_data['goal']
final_data['finances']['pledged'] = json_data['pledged']
final_data['finances']['currency'] = json_data['currency']
final_data['finances']['percent_funded'] = json_data['percent_funded']
# Scrape description from individual project page
print('Scraping {}...'.format(json_data['name']))
project_url = json_data['urls']['web']['project']
page = requests.get(project_url)
soup = BeautifulSoup(page.content, 'html.parser')
description_container = soup.find(
'div', {'class': 'description-container'})
# Format description text in readable string form
description_content = description_container.find_all('p', text=True)
description_content = [p.text for p in description_content]
description_content = list(
filter(lambda x: (x != '\n') and (x != '\xa0') and (x != ' '), description_content))[1:]
description_string = ''
for string in description_content:
string = string.strip()
string = re.sub(r'\xa0', '', string)
description_string = description_string + ' ' + string
final_data['description'] = description_string
# Scrape reward tiers information from project page
tiers = {}
rewards_container = soup.find('div', {'class': 'js-project-rewards'})
rewards_list = rewards_container.find_all(
'li', {'class': 'pledge-selectable-sidebar'})
for reward in rewards_list:
if (int(reward['data-reward-id'].strip()) == 0):
continue
reward_data = {}
# Scrape tier amount and currency
reward_amount_string = reward.find('span', {'class': 'money'}).text
reward_match = re.match(r'(\D+)(\d+[,.]?\d*)', reward_amount_string)
reward_data['amount'] = int(reward_match.group(2).replace(',', ''))
reward_data['currency'] = reward_match.group(1).strip()
# Scrape reward title
try:
reward_data['title'] = reward.find(
'h3', {'class': 'pledge__title'}).text.strip('\n')
except (AttributeError):
reward_data['title'] = None
# Scrape reward description and convert to readable form
try:
reward_description = reward.find(
'div', {'class': 'pledge__reward-description'}).find_all('p', text=True)
reward_description = [p.text for p in reward_description]
reward_description = list(
filter(lambda x: (x != '\n') and (x != '\xa0') and (x != ' '), reward_description))[1:]
reward_description_string = ''
for string in reward_description:
string = string.strip()
string = re.sub(r'\xa0', '', string)
reward_description_string = reward_description_string + ' ' + string
reward_data['description'] = reward_description_string
except (AttributeError):
reward_data['description'] = None
# Scrape number of backers for each tier
try:
reward_backer_string = reward.find(
'span', {'class': 'pledge__backer-count'}).text.strip()
reward_data['backers'] = int(
re.match(r'(\S+) \D+', reward_backer_string).group(1).replace(',', ''))
except (AttributeError):
reward_data['backers'] = None
tiers[reward['data-reward-id']] = reward_data
final_data['rewards'] = tiers
return final_data
def scrape(base_url, output):
projects = {}
if not os.path.isdir('./data'):
os.mkdir('./data')
else:
for i in range(1, 201):
print('Scraping page {}...'.format(i))
projects_html = []
page = requests.get('{}&page={}'.format(base_url, i))
soup = BeautifulSoup(page.content, 'html.parser')
projects_list = soup.find('div', {'id': 'projects_list'})
projects_outer = projects_list.find_all(
'div', {'class': 'grid-row'})
for div in projects_outer:
projects_html.extend(div.find_all(
'div', {'class': 'js-react-proj-card'}))
for project in projects_html:
json_data = json.loads(project['data-project'])
projects[json_data['id']] = scrape_project(json_data)
with open(output, 'w') as file:
json.dump(projects, file, indent=4)
| yanchenm/dessa-comp | web_scraper.py | web_scraper.py | py | 5,636 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 34,
"usage_type": "call"
},
... |
19503054314 | from openerp.exceptions import except_orm, ValidationError
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.exceptions import Warning as UserError
from openerp import models, fields, api, _
from openerp import workflow
import time
import datetime
from datetime import date
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from datetime import timedelta
from pychart.arrow import default
from openerp.osv import osv, expression
class AccountMoveNewApi(models.Model):
_inherit = 'account.move'
is_clicked = fields.Boolean(string="Is Clicked")
@api.one
def delete_all_related_cash_ban_expense_entry(self):
if self.id:
bank_books = self.env['bank.book.line'].search([('move_id', '=', self.id)])
cash_books = self.env['cash.book.line'].search([('move_id', '=', self.id)])
expense_books = self.env['expense.book.line'].search([('move_id', '=', self.id)])
if bank_books:
for b in bank_books:
b.unlink()
if cash_books:
for c in cash_books:
c.unlink()
if expense_books:
for e in expense_books:
e.unlink()
cash_confirm_transfer = self.env['cash.confirm.transfer'].search([('move_id','=',self.id)])
if cash_confirm_transfer:
for cash in cash_confirm_transfer:
cash.unlink()
for move_line in self.line_id:
move_line.unlink()
supervisor_cash_transfer = self.env['supervisor.payment.cash'].search([('move_id','=',self.id)])
if supervisor_cash_transfer:
for supervisor in supervisor_cash_transfer:
supervisor.line_id.approve_ids2.unlink()
supervisor.line_id.unlink()
supervisor.cash_ids.unlink()
supervisor.unlink()
supervisor_bank_transfer = self.env['supervisor.payment.bank'].search([('move_id', '=', self.id)])
if supervisor_cash_transfer:
for supervisor in supervisor_bank_transfer:
supervisor.line_id.approve_ids2.unlink()
supervisor.line_id.unlink()
supervisor.cash_ids.unlink()
supervisor.unlink()
self.is_clicked = True
class CashBook(models.Model):
_name = 'cash.book'
_rec_name = 'date'
_order = 'date desc'
READONLY_STATES = {
'close': [('readonly', True)],
}
@api.onchange('account_id')
def onchange_account(self):
if self.account_id:
self.opening = self.account_id.balance
@api.model
def default_get(self, fields):
res = super(CashBook, self).default_get(fields)
line_id = self.env['cash.book.configuration.line'].search([('cashbook_config_id','=',self.env.user.company_id.id)], limit=1, order='id asc')
if line_id:
# return line_id.account_id.id
res.update({'account_id': line_id.account_id.id})
return res
@api.multi
@api.depends('move_lines')
def compute_current_balance(self):
for rec in self:
debit = 0
credit = 0
for lines in rec.move_lines:
debit += lines.debit
credit += lines.credit
rec.current_balance = rec.opening + (debit - credit)
@api.multi
@api.depends('actual_balance')
def compute_difference(self):
if self.actual_balance:
if self.actual_balance == self.current_balance:
self.is_difference = False
else:
self.is_difference = True
cash_book_config = self.env['res.company'].search([], limit=1)
if cash_book_config:
self.write_off_account = cash_book_config.write_off_account.id
else:
self.is_difference = False
date = fields.Date('Date', states=READONLY_STATES, default=lambda self: fields.datetime.now())
user_id = fields.Many2one('res.users', 'User', default=lambda self: self.env.user, states=READONLY_STATES)
account_id = fields.Many2one('account.account', 'Account', states=READONLY_STATES)
opening = fields.Float('Opening Balance')
move_lines = fields.One2many('cash.book.line', 'cash_book_id', 'Transactions', states=READONLY_STATES)
state = fields.Selection([('draft', 'Draft'),('open', 'Open'),('approve','Approve'),('reject','Locked'),('close', 'Closed')], 'State', default='draft')
balance = fields.Float('Balance', states=READONLY_STATES)
remarks = fields.Text('Remarks', states=READONLY_STATES)
current_balance = fields.Float(compute='compute_current_balance', string="Balance")
actual_balance = fields.Float('Actual Balance', states=READONLY_STATES)
write_off_account = fields.Many2one('account.account', 'Write Off Account', states=READONLY_STATES)
is_difference = fields.Boolean(compute='compute_difference', string="Difference")
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env.user.company_id)
@api.multi
def action_open(self):
for rec in self:
rec.state = 'open'
@api.multi
def action_approve(self):
for rec in self:
rec.state = 'approve'
@api.multi
def action_reject(self):
for rec in self:
rec.state = 'reject'
@api.multi
def action_close(self):
for rec in self:
if rec.actual_balance != rec.current_balance:
journal = self.env['account.journal'].search([('type','=','general'),('company_id','=',rec.company_id.id)], limit=1)
if not journal:
raise UserError("Please create a journal with type 'General'.")
move = self.env['account.move'].create({'journal_id':journal.id,'date':rec.date})
move_line = self.env['account.move.line']
amount = 0
debit_account = False
credit_account = False
if rec.actual_balance < rec.current_balance:
amount = rec.current_balance - rec.actual_balance
debit_account = rec.write_off_account.id
credit_account = rec.account_id.id
else:
amount = rec.actual_balance - rec.current_balance
debit_account = rec.account_id.id
credit_account = rec.write_off_account.id
move_line.create({'move_id':move.id,
'state': 'valid',
'name': 'Write Off amount from '+' '+rec.user_id.name+' on'+ str(rec.date),
'account_id':credit_account,
'debit':0,
'credit':amount,
'closed':True
})
move_line.create({'move_id':move.id,
'state': 'valid',
'name': 'Write Off amount from '+' '+rec.user_id.name+' on'+ str(rec.date),
'account_id':debit_account,
'debit':amount,
'credit':0,
'closed':True
})
for lines in rec.move_lines:
if lines.closed == True:
lines.cash_book_id = False
move.button_validate()
rec.state = 'close'
else:
rec.state = 'close'
@api.model
def create(self,vals):
result = super(CashBook, self).create(vals)
result.opening = result.account_id.balance
return result
class CashBookLines(models.Model):
_name = "cash.book.line"
cash_book_id = fields.Many2one('cash.book', 'CashBook')
narration = fields.Char('Description')
# sorter = fields.Char('Sorter')
account_id = fields.Many2one('account.account', 'Account')
debit = fields.Float('Debit')
credit = fields.Float('Credit')
closed = fields.Boolean('Closed', default=False)
move_id = fields.Many2one('account.move', 'Journal Entry')
def open_line_transactions(self, cr,uid,ids,args=None):
obj = ''
for s in self.browse(cr,uid,ids):
obj = s
return {
'name': _('Transaction History'),
'view_type': 'tree',
'view_mode': 'tree',
'res_model': 'account.move.line',
'type': 'ir.actions.act_window',
'target': 'current',
'domain': [('account_id', '=', obj.account_id.id)],
}
class AccountMoveLine(models.Model):
_inherit = 'account.move.line'
def open_line_transactions(self, cr,uid,ids,args=None):
obj = ''
for s in self.browse(cr,uid,ids):
obj = s
return {
'name': _('Transaction History'),
'view_type': 'tree',
'view_mode': 'tree',
'res_model': 'account.move.line',
'type': 'ir.actions.act_window',
'target': 'current',
'domain': [('account_id', '=', obj.account_id.id)],
}
@api.constrains('account_id')
def _check_duplicate_account_id(self):
if self.move_id.date:
account_ids = [line.account_id.id for line in self.company_id.account_ids]
if self.account_id.id in account_ids:
cash_book = self.env['cash.book'].search([('date','=',self.move_id.date),('account_id','=',self.account_id.id),('state','=','open')])
if not cash_book:
raise UserError("Please open today's cash book...........!")
@api.model
def create(self,vals):
result = super(AccountMoveLine, self).create(vals)
if result.move_id.date:
account_ids = [line.account_id.id for line in result.company_id.account_ids]
accounts_ids = [line.account_id.id for line in result.company_id.accounts_ids]
# if result.account_id.id in account_ids and result.account_id.parent_id.id not in accounts_ids or result.account_id.parent_id.parent_id.id not in accounts_ids:
if account_ids:
cash_book = self.env['cash.book'].search(
[('date', '=', result.move_id.date), ('account_id', '=', account_ids[0]),
('state', '=', 'open')])
if result.move_id.journal_id.default_credit_account_id.id in account_ids and not result.move_id.copy_to_expense:
if result.account_id.id not in account_ids:
if cash_book:
if result.debit > 0.0:
self.env['cash.book.line'].create({
'cash_book_id': cash_book.id,
'narration': result.name,
'account_id': result.account_id.id,
'move_id': result.move_id.id,
'debit': 0.0,
'credit': result.debit,
})
if result.credit > 0.0:
self.env['cash.book.line'].create({
'cash_book_id': cash_book.id,
'narration': result.name,
'account_id': result.account_id.id,
'move_id': result.move_id.id,
'debit': result.credit,
'credit': 0.0,
})
else:
raise UserError("Please open today's Cash Book...........!")
return result
class CashBookConfigurationLine(models.Model):
_name = 'cash.book.configuration.line'
cashbook_config_id = fields.Many2one('res.company', 'Cashbook Configuration')
account_id = fields.Many2one('account.account', "Account")
class ResCompany(models.Model):
_inherit = 'res.company'
account_ids = fields.One2many('cash.book.configuration.line', 'cashbook_config_id','Acconts fo Cash Register')
write_off_account = fields.Many2one('account.account', 'Account for Write Off', domain=[('type','=','other')])
from openerp.osv import osv
class AccountMove(osv.osv):
_inherit = 'account.move'
def button_cancel(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
for ex in self.pool.get('cash.book.line').search(cr, uid, [('move_id', '=', line.id)]):
if ex:
self.pool.get('cash.book.line').browse(cr, uid, ex).unlink()
cr.execute('UPDATE account_move_line ' \
'SET is_posted=False ' \
'WHERE move_id = %s',
(line.id,))
if not line.journal_id.update_posted:
raise osv.except_osv(_('Error!'), _(
'You cannot modify a posted entry of this journal.\nFirst you should set the journal to allow cancelling entries.'))
if ids:
cr.execute('UPDATE account_move ' \
'SET state=%s ' \
'WHERE id IN %s', ('draft', tuple(ids),))
self.invalidate_cache(cr, uid, context=context)
return True
| hosterp/BUREAU_GREEN_20_06_23 | hiworth_cashbook/models/cash_book.py | cash_book.py | py | 11,025 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openerp.models.Model",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "openerp.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Boolean",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "opene... |
7602478255 | import requests
import pprint
import csv
for page in range(1, 11):
print('======第{}页======'.format(page))
base_url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36'}
data = {
'cname': '',
'pid': '',
'keyword': '杭州',
'pageIndex': str(page),
'pageSize': '10'
}
response = requests.post(url=base_url, data=data, headers=headers)
json_data = response.json()
# pprint.pprint(json_data)
json_list = json_data['Table1']
# print(json_list)
for json_1 in json_list:
storeName = json_1['storeName'] + '餐厅'
provinceName = json_1['provinceName']
addressDetail = json_1['addressDetail']
pro = json_1['pro']
print(storeName, provinceName, addressDetail, pro, sep='|')
with open('kfc.csv', mode='a', newline="") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow([storeName, provinceName, addressDetail, pro])
| Ricechips/-python- | bilibili/kfc.py | kfc.py | py | 1,156 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.post",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 33,
"usage_type": "call"
}
] |
43497709134 | import torch
from torchvision import datasets, models, transforms
import torch.nn as nn
import random
from torch.utils.data import Dataset
import glob
from PIL import Image
import cv2
import albumentations as A
from albumentations.pytorch import ToTensorV2
class CustomDataset(Dataset):
def __init__(self, img_dir,transform=None,name_label=None):
self.path_list_class = glob.glob(img_dir + "/*")
self.path_img = [ j for i in self.path_list_class for j in glob.glob(i + '/*')]
self.transform = transform
if name_label == None:
self.name_label = [i.split('/')[-1] for i in self.path_list_class ]
self.name_label.sort()
else:
self.name_label = name_label
self.classes = len(self.name_label)
def __len__(self):
return len(self.path_img)
def __getitem__(self, idx):
label = self.name_label.index(self.path_img[idx].split('/')[-2])
try:
image = cv2.imread(self.path_img[idx], 1)
if self.transform:
image = self.transform(image=image)["image"]
except:
print(self.path_img[idx])
return image, label, self.path_img[idx]
def get_data_loader(input_path,batch_size_train=32,batch_size_test=16,name_label=None):
data_transforms = {
'train':
A.Compose(
[
A.OneOf([
A.Affine(scale=None, rotate=(-30, 30), shear=(-16, 16), fit_output = True, p=0.5),
], p=0.5),
A.Flip(p=0.5),
A.OneOf([
A.RandomBrightnessContrast(brightness_limit=0.5, contrast_limit=0.5, p=1),
A.Compose([
A.CLAHE(p=1),
A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=1)]),
], p=0.5),
A.Blur(blur_limit=(10, 10), p=0.3),
A.Resize(224, 224, interpolation=1, p=1),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
ToTensorV2()
]),
'validation':
A.Compose(
[
A.Resize(224, 224, interpolation=1, p=1),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
ToTensorV2()
]),
}
image_datasets = {
'train':
CustomDataset(input_path + 'train', data_transforms['train'],name_label),
'validation':
CustomDataset(input_path + 'val', data_transforms['validation'],name_label),
}
size_train = len(image_datasets['train'])
size_val = len(image_datasets['validation'])
print(f'[INFO]: Number of training examples: { size_train }')
print(f'[INFO]: Number of validation examples: {size_val}')
dataloaders = {
'train':
torch.utils.data.DataLoader(image_datasets['train'],
batch_size=batch_size_train,
shuffle=True,
num_workers=4),
'validation':
torch.utils.data.DataLoader(image_datasets['validation'],
batch_size=batch_size_test,
shuffle=False,
num_workers=4),
}
return dataloaders | tinhnguyen0110/cars-classification | dataset.py | dataset.py | py | 3,467 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_n... |
2032464725 | from rrpam_wds.gui import set_pyqt_api # isort:skip # NOQA
import logging
from rrpam_wds.tests.test_utils import Test_Parent
from rrpam_wds.tests.test_utils import main
class TC(Test_Parent):
logger = logging.getLogger()
def test__initialize_all_components_will_not_close_log_dialog(self):
self.aw.show_logwindow() # log dialog is visible now
oldlog = id(self.aw.logdialog)
oldnet = id(self.aw.networkmap)
self.aw._initialize_all_components()
newlog = id(self.aw.logdialog)
newnet = id(self.aw.networkmap)
self.assertEqual(oldlog, newlog)
self.assertNotEqual(oldnet, newnet)
if __name__ == "__main__":
main(TC, test=False)
| asselapathirana/RRPam-WDS | src/rrpam_wds/tests/test_main_window2.py | test_main_window2.py | py | 708 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "rrpam_wds.tests.test_utils.Test_Parent",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "rrpam_wds.tests.test_utils.main",
"line_number": 24,
"usage_type": "call"
}
] |
2937883579 | import requests
import json
# fs = '1'
import time
count = 1
while True:
try:
r = requests.post("http://ec2-3-84-46-25.compute-1.amazonaws.com", data="getdata")
print("##############")
print(r.text)
print("count: ", count)
break
except:
count += 1
time.sleep(3)
# print(r.headers)
# if r:
# print(r.text.decode('utf-8'))
# print(r.text)
# import socket
# from socket import socket, AF_INET, SOCK_STREAM
# import os
# import sys
# s = socket(AF_INET, SOCK_STREAM)
# s.connect(('ec2-3-84-46-25.compute-1.amazonaws.com', 80))
# s.sendall('1'.encode('utf-8'))
# print("receive: ", s.recv(1024))
# while True:
# # s2 = socket(AF_INET, SOCK_STREAM)
# # s.connect(('ec2-3-84-46-25.compute-1.amazonaws.com', 80))
# # s2.listen(1)
# print("receive: ", s.recv(1024))
# conn, address = s.recv()
# print("Connect with: ", addr)
# msg = conn.recv(1024)
# print(msg.decode('utf-8'))
# s.close()
# | MapleMilk/IoT-smart-road-light | clientForTestAndroid.py | clientForTestAndroid.py | py | 991 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.post",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 16,
"usage_type": "call"
}
] |
11882940759 | # 需求: 对指定的路由进行访问限制
# 分析: 部分视图需要身份校验,这部分视图每个单独校验仍会出现大量的代码冗余
# 解决办法: 封装 装饰器 完成身份校验逻辑, 对指定视图函数设置装饰器
# 代码示例
from flask import Flask, session, g, abort
from functools import wraps
app = Flask(__name__)
app.secret_key = 'test'
@app.before_request
def prepare():
g.name = session.get('username')
@app.route('/')
def index():
if g.name:
return '欢迎回来, %s' % g.name
else:
return '首页'
@app.route('/login')
def login():
'''登录'''
session['username'] = 'zs'
return '登录成功'
# 需求2: 对部分视图进行访问限制 如个人中心必须登录才能访问
# 解决方案: 使用装饰器封装访问限制 减少代码冗余
def login_required(f): # f = user
@wraps(f)
def wrapper(*args, **kwargs):
# 获取函数名
print(wrapper.__name__)
if g.name: #用户已经登录
return f(*args, **kwargs) # 正常访问视图函数
else: # 用户未登录
abort(401) # 400 语法/参数错误 401 未认证 403 已认证, 权限不足 404 资源不存在 405 请求方式不支持 500 服务器错误
return wrapper
@app.route('/user')
@login_required
def user():
'''个人中心'''
return '访问%s的个人中心' % g.name
if __name__ == '__main__':
print(app.url_map)
app.run(debug=True)
'''
functools.wraps
系统内置的装饰器, 主要用于装饰器中的闭包函数
作用是: 将被装饰的函数(wrapper)的函数信息替换为 指定函数(f)的函数信息(包括name 函数名, doc函数注释等)
''' | xiaoxiao131111/flask_high_user | 访问限制.py | 访问限制.py | py | 1,769 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.g.name",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_nu... |
33277651136 | import json
import os
import tempfile
from typing import List, Literal, Optional
from typing_extensions import TypedDict
from loguru import logger
from fastapi import APIRouter, Request, Response
from fastapi.responses import PlainTextResponse
from modeling.animation import build_anim_spec
from service.utils import range_requests_response, save_to_archives
router = APIRouter()
GenerationRequest = TypedDict(
'GenerationRequest', {"tap_rules": str, "senario_traces": str, })
@router.post("/api/generate-animation-specification", response_class=PlainTextResponse)
async def generate_animation_specification(request: Request) -> str:
gen_request: GenerationRequest = await request.json()
tap_rules = gen_request["tap_rules"]
senario_traces = gen_request["senario_traces"]
logger.info(f"input trace length: {len(senario_traces)}")
specification = json.dumps(build_anim_spec(
senario_traces, tap_rules), indent=2)
save_to_archives("specifications", os.path.basename(
tempfile.NamedTemporaryFile().name), specification)
logger.info(f"output specification length: {len(specification)}")
return specification
RenderingTask = TypedDict('RenderingTask', {
'token': str,
'status': Literal["pending", "processing", "finished", "failed", "canceled"],
'input': str,
'result': str, # PATH TO FILE IF NOT FAILED
'assignee': str
})
task_queue: List[RenderingTask] = []
machines = 0
@router.post("/api/add-rendering-task", response_class=PlainTextResponse)
async def add_rendering_task(request: Request) -> str:
# TODO: add layout info from input
token = os.path.basename(tempfile.NamedTemporaryFile().name)
task_queue.append({
'token': token,
'status': "pending",
'input': str(await request.body(), "utf-8"),
'result': "",
'assignee': ""
})
return token
@router.get("/api/query-rendering-task-status", response_class=PlainTextResponse)
async def query_rendering_task_status(token: str) -> str:
for task in task_queue:
if task["token"] == token:
return task["status"]
return ""
@router.get("/api/query-rendering-task-assignee", response_class=PlainTextResponse)
async def query_rendering_task_assignee(token: str) -> str:
for task in task_queue:
if task["token"] == token:
return task["assignee"]
return ""
@router.get("/api/cancel-rendering-task")
async def cancel_rendering_task(token: str) -> None:
for task in task_queue:
if task["token"] == token:
task["status"] = "canceled"
@router.get("/api/pull-rendering-result")
def pull_rendering_result(token: str, request: Request) -> Response:
for task in task_queue:
if task["token"] == token and task["status"] == "finished":
return range_requests_response(
request, file_path=task["result"], content_type="video/mp4"
)
return Response(status_code=404)
@router.get("/api/pull-rendering-task")
async def pull_rendering_task(server_name: str) -> Optional[RenderingTask]:
for task in task_queue:
if task["status"] == "pending":
task["status"] = "processing"
task["assignee"] = server_name
logger.info(f"assign {task['token']} to {server_name}")
return task
logger.info(f"{server_name} asks for task, but nothing exists.")
return None
@router.post("/api/push-rendering-result")
async def push_rendering_task(token: str, request: Request) -> None:
for task in task_queue:
if task["token"] == token:
video_data = await request.body()
task["status"] = "finished"
task["result"] = save_to_archives(
"animation", task["token"], video_data)
logger.info(f"save {token} to {task['result']}")
return
logger.warning(f"task {token} not found")
| AIOT-Learning-Group/uppaal-modeling-smart-home | service/rendering.py | rendering.py | py | 3,913 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "typing_extensions.TypedDict",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "fastapi.Request",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "loguru.... |
20686734920 | import json
import sys
import traceback
from datetime import datetime
from agent.server import Server
if __name__ == "__main__":
info = []
server = Server()
for bench in server.benches.values():
for site in bench.sites.values():
try:
timestamp = str(datetime.utcnow())
info = {
"timestamp": timestamp,
"analytics": site.get_analytics(),
}
with open(site.analytics_file, "w") as f:
json.dump(info, f, indent=1)
except Exception:
exception = traceback.format_exc()
error_log = f"ERROR [{site.name}:{timestamp}]: {exception}"
print(error_log, file=sys.stderr)
| frappe/agent | agent/analytics.py | analytics.py | py | 775 | python | en | code | 52 | github-code | 1 | [
{
"api_name": "agent.server.Server",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "json.d... |
19294778278 | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import gensim
import gensim.models
from gensim.models import KeyedVectors
from get_tweets import get_tweets
from wakati import wakati
from tweet_evaluation import evaluation
app = FastAPI()
origins = [
"http://localhost:3000"
]
#model = gensim.models.KeyedVectors.load_word2vec_format('machine_learn/ja.vec')
#you can run this app at backend directory with " source bin/activate && python main.py"
app.add_middleware(
CORSMiddleware,
allow_origins= ["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.post("/users/")
def read_user(user_id:str):
return {"user_id":user_id[1:] + " is your user id"}
@app.post("/get_tweets/")
def pass_tweets(userId:str):
print(userId)
return_tweets = []
tweetsList = get_tweets(userId)
print(tweetsList)
wakati_tweets = wakati(tweetsList)
badTweets = evaluation(wakati_tweets)
sorted_bad_tweets = sorted(badTweets.items(), key=lambda x:x[1], reverse=True)
print(sorted_bad_tweets)
for n in sorted_bad_tweets:
return_tweets.append(tweetsList[n[0]])
if len(return_tweets) > 2:
break
print(badTweets)
print("--------------------------------------------------------")
return return_tweets
@app.post("/all")
def all_tweet(userId:str):
print(userId)
return len(get_tweets(userId))
@app.post("/test/")
async def test(userId:str):
tweetsList = get_tweets(userId)
print(tweetsList)
wakati_tweets = wakati(tweetsList)
badTweets = evaluation(wakati_tweets)
print(badTweets)
return len(badTweets)
@app.get("/items/{item_id}")
def read_item(item_id: int, q: str = None):
return {"item_id": item_id, "q": q}
if __name__ == "__main__":
uvicorn.run("main:app", host="127.0.0.1", port=5000, reload=True) | renasami/ts-twitter | backend/main.py | main.py | py | 1,957 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "get_tweets.get_tweets",
"line_number": 41,
"usage_type": "call"
},
{
"... |
24918521865 | #!/usr/bin/python
import argparse
import os
import re
import influxdb as idb
import pandas as pd
class Testo():
def __init__(self, saveDirectory):
self.saveDirectory = saveDirectory
def querySelAllFromMeasureResAsDataFrame(client, meas, lborder, rborder):
query = 'SELECT * FROM "{}" where time > {} and time < {}'.format(
meas, lborder, rborder)
points = client.query(query, chunked=True,
chunk_size=10000).get_points()
return pd.DataFrame(points)
def queryAndGetPathToResults(self, host, port, DBname, filePrefix, leftBorder, rightBorder):
print("Starting to query results")
client = idb.InfluxDBClient(host=host, port=port)
client.switch_database(database=DBname)
listOfMeasurements = client.get_list_measurements()
print ("List Of Meas: {}".format(listOfMeasurements))
exportPath = '{}/{}.xlxs'.format(self.saveDirectory, filePrefix)
exWriter = pd.ExcelWriter(exportPath)
print("Found <{}> measurements in total. Gathering will start now.".format(
len(listOfMeasurements)))
for measurement in listOfMeasurements:
nameOfMeas = measurement['name']
print("Start to collect from {}".format(nameOfMeas))
resDataFrame = self.querySelAllFromMeasureResAsDataFrame(
client, nameOfMeas, 000000000000, 9999999999)
cleanedNameOfMeas = re.sub('[^A-Za-z0-9]+', '_', nameOfMeas)
resDataFrame.to_excel(exWriter, sheet_name=cleanedNameOfMeas)
exWriter.close()
return exportPath
| FutureApp/a-bench | dir_bench/images/influxdb-client/image/rest_server/lib/Testo.py | Testo.py | py | 1,635 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "influxdb.InfluxDBClient",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.ExcelWriter",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "re.sub",
... |
36787065489 | import os
import platform
import re
from time import time
import cmdstanpy
from cmdstanpy import CmdStanModel, cmdstan_path
import pandas as pd
def get_timing(fit):
"""Extract timing."""
timings = []
for i, path in enumerate(fit.runset.stdout_files):
with open(path) as f:
timing = ""
add_timing = False
for line in f:
if "Elapsed Time" in line:
add_timing = True
if add_timing:
timing += "\n" + line.strip()
chain_timing = dict(
zip(
("warmup", "sampling", "total"),
map(float, re.findall(r"\s*(\d+.\d*)\sseconds\s", timing)),
)
)
chain_timing["chain"] = i
timings.append(chain_timing)
return pd.DataFrame(timings)
def t(func, *args, timing_name=None, **kwargs):
"""Time function."""
start_time = time()
res = func(*args, **kwargs)
duration = time() - start_time
duration_unit = "seconds"
if duration > (60 * 60):
duration /= 60 * 60
duration_unit = "hours"
elif duration > (60 * 3):
duration /= 60
duration_unit = "minutes"
print(
f"{timing_name + ': ' if timing_name is not None else ''}Duration",
f"{duration:.1f}",
duration_unit,
flush=True,
)
return res
if __name__ == "__main__":
from glob import glob
stan_files = glob("./Stan_models/*.stan")
stan_datas = [re.sub(r".stan$", ".data.R", path) for path in stan_files]
# DEFAULTS
for stan_model, stan_data in zip(stan_files, stan_datas):
model_name = os.path.basename(stan_model)
print(f"\n\n{model_name}\n\n")
model = t(
CmdStanModel,
model_name=model_name,
stan_file=stan_model,
timing_name=f"CmdStanModel {model_name}",
)
print(f"model: {model_name}, done", flush=True)
fit = t(
model.sample,
timing_name=f"{model_name}.sample",
data=stan_data,
chains=4,
cores=2,
seed=1111,
iter_warmup=1000,
iter_sampling=1000,
metric="diag_e",
show_progress=True,
)
print(f"fit: {model_name}, done", flush=True)
timing_df = t(get_timing, fit, timing_name=f"{model_name}: get_timing")
import arviz as az
summary_df = t(az.summary, fit, timing_name=f"{model_name}: az.summary")
if platform.system() == "Windows":
import sys
rtools = sys.argv[1]
savepath_timing = f"./results/CmdStanPy_{model_name}_timing_{platform.system()}_RTools_{rtools}.csv"
savepath_summary = f"./results/CmdStanPy_{model_name}_summary_{platform.system()}_RTools_{rtools}.csv"
else:
savepath_timing = (
f"./results/CmdStanPy_{model_name}_timing_{platform.system()}.csv"
)
savepath_summary = (
f"./results/CmdStanPy_{model_name}_summary_{platform.system()}.csv"
)
os.makedirs("results", exist_ok=True)
t(
timing_df.to_csv,
savepath_timing,
timing_name=f"{model_name}: timing_df.to_csv",
)
t(
summary_df.to_csv,
savepath_summary,
timing_name=f"{model_name}: summary_df.to_csv",
)
print(model_name, flush=True)
print(f"Timing: {model_name}", flush=True)
print(timing_df, flush=True)
print(f"Summary: {model_name}", flush=True)
print(summary_df, flush=True)
print("\n\nFinished", flush=True)
| ahartikainen/stan_performance_testing | run_CmdStanPy.py | run_CmdStanPy.py | py | 3,723 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.findall",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": ... |
4504389118 | # -*- coding: utf-8 -*-
import sys, os
from time import time
from PyQt5.QtWidgets import QApplication, QMainWindow
from Main_framework import Ui_Form
from Vivo_x23_data import Vivo_x23_data_main
from Vivo_x23_data_analysis import Vivo_x23_data_analysis_main
from Huawei_p20_data import Huawei_p20_data_main
from Huawei_p20_data_analysis import Huawei_p20_data_analysis_main
from Oppo_r17_data import Oppo_r17_data_main
from Oppo_r17_data_analysis import Oppo_r17_data_analysis_main
from Iphone_xs_max_data import Iphone_xs_max_data_main
from Iphone_xs_max_data_analysis import Iphone_xs_max_data_analysis_main
class MainWindow(QMainWindow, Ui_Form):
def __init__(self):
super(MainWindow, self).__init__()
self.setupUi(self)
#数据采集
def data(self):
Name = self.mobileComboBox.currentText()
file1 = os.path.exists(str(Name) + '.txt')
file2 = os.path.exists(str(Name) + '_clean.txt')
if not file1 and not file2:
self.resultText.append('{}数据开始采集'.format(Name))
begin = time()
self.data_spider(Name)
end = time()
self.resultText.append('{}数据采集清洗完毕,用时{}s'.format(Name,(end - begin)))
else:
self.resultText.append('请先清空文件')
#数据分析
def data_analysis(self):
Name = self.mobileComboBox.currentText()
file = os.path.exists(str(Name)+'_clean.txt')
if file:
self.resultText.append('{}数据开始分析'.format(Name))
self.data_analysis_spider(Name)
self.resultText.append('{}数据分析完成'.format(Name))
else:
self.resultText.append('请先开始数据采集')
#手机分析型号判断
def data_analysis_spider(self,Name):
if Name == 'Vivo_x23':
Vivo_x23_data_analysis_main()
elif Name == 'Huawei_p20':
Huawei_p20_data_analysis_main()
elif Name == 'Oppo_r17':
Oppo_r17_data_analysis_main()
elif Name == 'Iphone_xs_max':
Iphone_xs_max_data_analysis_main()
else:
pass
#手机采集型号判断
def data_spider(self, Name):
if Name == 'Vivo_x23':
Vivo_x23_data_main()
elif Name == 'Huawei_p20':
Huawei_p20_data_main()
elif Name == 'Oppo_r17':
Oppo_r17_data_main()
elif Name == 'Iphone_xs_max':
Iphone_xs_max_data_main()
else:
pass
#清空文本
def clearResult(self):
self.resultText.clear()
#清空文件
def clearFile(self):
Name = self.mobileComboBox.currentText()
file1 = os.path.exists(str(Name)+'.txt')
file2 = os.path.exists(str(Name)+'_clean.txt')
file3 = os.path.exists(str(Name) + '_sentiments.csv')
if file1:
os.remove(str(Name)+'.txt')
if file2:
os.remove(str(Name)+'_clean.txt')
if file3:
os.remove(str(Name)+'_sentiments.csv')
self.resultText.append('文件清空完毕')
if __name__=="__main__":
app = QApplication(sys.argv)
win = MainWindow()
win.show()
sys.exit(app.exec_())
| WQ1213/Mobile_phone_analysis | Mobile_phone_analysis.py | Mobile_phone_analysis.py | py | 3,248 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "Main_framework.Ui_Form",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.... |
18354714736 | from xml.sax.saxutils import quoteattr
import argparse
import os
import re
import sys
import unittest
# Read at most 100MB of a test log.
# Rarely would this be exceeded, but we don't want to end up
# swapping, etc.
MAX_MEMORY = 100 * 1024 * 1024
START_TESTCASE_RE = re.compile(r'\[ RUN\s+\] (.+)$')
END_TESTCASE_RE = re.compile(r'\[\s+(?:OK|FAILED)\s+\] (.+)$')
ASAN_ERROR_RE = re.compile('ERROR: (AddressSanitizer|LeakSanitizer)')
TSAN_ERROR_RE = re.compile('WARNING: ThreadSanitizer.*')
END_TSAN_ERROR_RE = re.compile('SUMMARY: ThreadSanitizer.*')
UBSAN_ERROR_RE = re.compile(r'SUMMARY: UndefinedBehaviorSanitizer')
FATAL_LOG_RE = re.compile(r'^F\d\d\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d\s+\d+ (.*)')
LINE_RE = re.compile(r"^.*$", re.MULTILINE)
STACKTRACE_ELEM_RE = re.compile(r'^ @')
IGNORED_STACKTRACE_ELEM_RE = re.compile(
r'(google::logging|google::LogMessage|\(unknown\)| testing::)')
TEST_FAILURE_RE = re.compile(r'.*\d+: Failure$')
GLOG_LINE_RE = re.compile(r'^[WIEF]\d\d\d\d \d\d:\d\d:\d\d')
class ParsedTest(object):
"""
The LogParser creates one instance of this class for each test that is discovered
while parsing the log.
"""
def __init__(self, test_name):
self.test_name = test_name
self.errors = []
class LogParser(object):
"""
Parser for textual gtest output
"""
def __init__(self):
self._tests = []
self._cur_test = None
@staticmethod
def _consume_rest(line_iter):
""" Consume and return the rest of the lines in the iterator. """
return [l for l in line_iter]
@staticmethod
def _consume_until(line_iter, end_re):
"""
Consume and return lines from the iterator until one matches 'end_re'.
The line matching 'end_re' will not be returned, but will be consumed.
"""
ret = []
for line in line_iter:
if end_re.search(line):
break
ret.append(line)
return ret
@staticmethod
def _remove_glog_lines(lines):
""" Remove any lines from the list of strings which appear to be GLog messages. """
return [l for l in lines if not GLOG_LINE_RE.search(l)]
def _record_error(self, error):
if self._cur_test is None:
# TODO(todd) would be nice to have a more specific name indicating which
# test file caused the issue.
self._start_test("General.OutsideOfAnyTestCase")
self._record_error(error)
self._end_test()
else:
self._cur_test.errors.append(error)
def _start_test(self, test_name):
assert test_name is not None
self._cur_test = ParsedTest(test_name)
self._tests.append(self._cur_test)
def _end_test(self):
self._cur_test = None
@staticmethod
def _fast_re(substr, regexp, line):
"""
Implements a micro-optimization: returns true if 'line' matches 'regexp,
but short-circuited by a much faster check whether 'line' contains 'substr'.
This provides a big speed-up since substring searches execute much more
quickly than regexp matches.
"""
if substr not in line:
return None
return regexp.search(line)
def parse_text(self, log_text):
# Iterate over the lines, using finditer instead of .split()
# so that we don't end up doubling memory usage.
def line_iter():
for match in LINE_RE.finditer(log_text):
yield match.group(0)
self.parse_lines(line_iter())
def parse_lines(self, line_iter):
"""
Arguments:
lines: generator which should yield lines of log output
"""
for line in line_iter:
# Track the currently-running test case
m = self._fast_re('RUN', START_TESTCASE_RE, line)
if m:
self._start_test(m.group(1))
continue
m = self._fast_re('[', END_TESTCASE_RE, line)
if m:
self._end_test()
continue
# Look for ASAN errors.
m = self._fast_re('ERROR', ASAN_ERROR_RE, line)
if m:
error_signature = line + "\n"
# ASAN errors kill the process, so we consume the rest of the log
# and remove any lines that don't look like part of the stack trace
asan_lines = self._remove_glog_lines(self._consume_rest(line_iter))
error_signature += "\n".join(asan_lines)
self._record_error(error_signature)
continue
# Look for TSAN errors
m = self._fast_re('ThreadSanitizer', TSAN_ERROR_RE, line)
if m:
error_signature = m.group(0)
error_signature += "\n".join(self._remove_glog_lines(
self._consume_until(line_iter, END_TSAN_ERROR_RE)))
self._record_error(error_signature)
continue
# Look for UBSAN errors
m = self._fast_re('UndefinedBehavior', UBSAN_ERROR_RE, line)
if m:
# UBSAN errors are a single line.
# TODO: there is actually some info on the previous line but there
# is no obvious prefix to look for.
self._record_error(line)
continue
# Look for test failures
# - slight micro-optimization to check for substring before running the regex
m = self._fast_re('Failure', TEST_FAILURE_RE, line)
if m:
error_signature = m.group(0) + "\n"
error_signature += "\n".join(self._remove_glog_lines(
self._consume_until(line_iter, END_TESTCASE_RE)))
self._record_error(error_signature)
self._end_test()
continue
# Look for fatal log messages (including CHECK failures)
# - slight micro-optimization to check for 'F' before running the regex
m = line and line[0] == 'F' and FATAL_LOG_RE.search(line)
if m:
error_signature = m.group(1) + "\n"
remaining_lines = self._consume_rest(line_iter)
remaining_lines = [l for l in remaining_lines if STACKTRACE_ELEM_RE.search(l) and
not IGNORED_STACKTRACE_ELEM_RE.search(l)]
error_signature += "\n".join(remaining_lines)
self._record_error(error_signature)
# Sometimes we see crashes that the script doesn't know how to parse.
# When that happens, we leave a generic message to be picked up by Jenkins.
if self._cur_test and not self._cur_test.errors:
self._record_error("Unrecognized error type. Please see the error log for more information.")
# Return failure summary formatted as text.
def text_failure_summary(self):
msgs = []
for test in self._tests:
for error in test.errors:
msgs.append("%s: %s\n" % (test.test_name, error))
return "\n".join(msgs)
def xml_failure_summary(self):
# Example format:
"""
<testsuites>
<testsuite name="ClientTest">
<testcase name="TestReplicatedMultiTabletTableFailover" classname="ClientTest">
<error message="Check failed: ABC != XYZ">
<![CDATA[ ... stack trace ... ]]>
</error>
</testcase>
</testsuite>
</testsuites>
"""
ret = ""
cur_test_suite = None
ret += '<testsuites>\n'
found_test_suites = False
for test in self._tests:
if not test.errors:
continue
(test_suite, test_case) = test.test_name.split(".")
# Test suite initialization or name change.
if test_suite and test_suite != cur_test_suite:
if cur_test_suite:
ret += ' </testsuite>\n'
cur_test_suite = test_suite
ret += ' <testsuite name="%s">\n' % cur_test_suite
found_test_suites = True
# Print each test case.
ret += ' <testcase name="%s" classname="%s">\n' % (test_case, cur_test_suite)
errors = "\n\n".join(test.errors)
first_line = re.sub("\n.*", '', errors)
ret += ' <error message=%s>\n' % quoteattr(first_line)
ret += '<![CDATA[\n'
ret += errors
ret += ']]>\n'
ret += ' </error>\n'
ret += ' </testcase>\n'
if found_test_suites:
ret += ' </testsuite>\n'
ret += '</testsuites>\n'
return ret
# Parse log lines and return failure summary formatted as text.
#
# Print failure summary based on desired output format.
# 'tests' is a list of all tests run (in order), not just the failed ones.
# This allows us to print the test results in the order they were run.
# 'errors_by_test' is a dict of lists, keyed by test name.
#
# This helper function is part of a public API called from test_result_server.py
def extract_failure_summary(log_text, format='text'):
p = LogParser()
p.parse_text(log_text)
if format == 'text':
return p.text_failure_summary()
else:
return p.xml_failure_summary()
class Test(unittest.TestCase):
_TEST_DIR = os.path.join(os.path.dirname(__file__), "build-support-test-data")
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
self.regenerate = os.environ.get('REGENERATE_TEST_EXPECTATIONS') == '1'
def test_all(self):
for child in os.listdir(self._TEST_DIR):
if not child.endswith(".txt") or '-out' in child:
continue
base, _ = os.path.splitext(child)
p = LogParser()
with open(os.path.join(self._TEST_DIR, child)) as f:
p.parse_text(f.read())
self._do_test(p.text_failure_summary(), base + "-out.txt")
self._do_test(p.xml_failure_summary(), base + "-out.xml")
def _do_test(self, got_value, filename):
path = os.path.join(self._TEST_DIR, filename)
if self.regenerate:
print("Regenerating %s" % path)
with open(path, "w") as f:
f.write(got_value)
else:
with open(path) as f:
self.assertEquals(got_value, f.read())
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-x", "--xml",
help="Print output in JUnit report XML format (default: plain text)",
action="store_true")
parser.add_argument("path", nargs="?", help="File to parse. If not provided, parses stdin")
args = parser.parse_args()
if args.path:
in_file = open(args.path)
else:
in_file = sys.stdin
log_text = in_file.read(MAX_MEMORY)
if in_file is not sys.stdin:
in_file.close()
format = args.xml and 'xml' or 'text'
sys.stdout.write(extract_failure_summary(log_text, format))
if __name__ == "__main__":
main()
| apache/kudu | build-support/parse_test_failure.py | parse_test_failure.py | py | 10,125 | python | en | code | 1,762 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 16,
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.