seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25208897980 | from dataclasses import dataclass, asdict, field, make_dataclass
from typing import List, Union, Any, Dict
from enum import Enum
from sigrok.core.classes import ConfigKey
__all__ = [
"SrBlankState",
"SrDeviceState",
#"SrFileState",
#"AnalogChannel",
#"LogicChannel",
]
colorsArray = [
'#fce94f', '#fcaf3e', '#e9b96e', '#8ae234', '#729fcf', '#ad7fa8', '#cf72c3', '#ef2929',
'#edd400', '#f57900', '#c17d11', '#73d216', '#3465a4', '#75507b', '#a33496', '#cc0000',
'#c4a000', '#ce5c00', '#8f5902', '#4e9a06', '#204a87', '#5c3566', '#87207a', '#a40000',
'#16191a', '#2e3436', '#555753', '#888a8f', '#babdb6', '#d3d7cf', '#eeeeec', '#ffffff'
]
def factory(data):
return dict(x for x in data if x[1] is not None)
def build_opts(opts: list):
#NOTE: key, id, name, desc, keyName, caps
data = []
for opt in opts:
bases = []
opt_fields = []
if 'caps' in opt:
if 'LIST' in opt['caps']:
bases.append(ConfList)
elif 'GET' in opt['caps']:
bases.append(ConfValue)
for k, v in opt.items():
opt_fields.append((k, type(v)))
opt_fields.append( ('keyName', str))
keyName=ConfigKey.get(opt['key']).name
SrOpt = make_dataclass(cls_name='SrOpt', fields=opt_fields, bases=tuple(bases))
srOpt = SrOpt(**opt, keyName = keyName)
data.append( (keyName, srOpt) )
return data
@dataclass
class ConfValue:
value: Any
@dataclass
class ConfList:
values: List[Any] = field(default_factory=list)
@dataclass
class Channel:
name: str
text: str
color: str
enabled: bool
index: int
type: str
def update(self, opts: Dict):
for key, value in opts.items():
if hasattr(self, key):
setattr(self, key, value)
@dataclass
class LogicChannel(Channel):
traceHeight: int = 34
@dataclass
class AnalogChannel(Channel):
pVertDivs: int = 1
nVertDivs: int = 1
divHeight: int = 51
vRes: float = 20.0
autoranging: bool = True
conversion: str = ''
convThres: str = ''
showTraces: str = ''
class ChTypesEnum(Enum):
analog = AnalogChannel
logic = LogicChannel
@dataclass
class ChnlsContBase:
def set(self, chOpts: List[Dict] ):
result = []
for opts in chOpts:
chName = opts.pop('chName', None)
if hasattr(self, chName):
attr = getattr(self, chName)
attr.update(opts)
result.append(chName)
return result
def get(self):
data = asdict(self, dict_factory=factory)
return data.values()
def get_list(self):
data = asdict(self, dict_factory=factory)
return list( { item['type'] for item in data.values() })
@dataclass
class OptsContBase:
def set(self, opts: List[Dict] ):
result = []
for item in opts:
optName = item['keyName']
if hasattr(self, optName):
attr = getattr(self, optName)
attr.value = item['value']
result.append(optName)
return result
def get(self):
return asdict(self, dict_factory=factory)
def get_list(self):
data = [ item['key'] for item in asdict(self).values() ] #list(asdict(self).values()
return data
def make_ch_container(channels: List[Dict]):
fields = []
for i, item in enumerate(channels):
chInst = ChTypesEnum[item['type']].value(**item, color= colorsArray[i], text=item['name'])
fields.append( ( item['name'], chInst) )
ChnlsContainer = make_dataclass('ChnlsContainer', [ item[0] for item in fields], bases=tuple([ChnlsContBase]) )
data = ChnlsContainer(**{ item[0]: item[1] for item in fields})
return data
def make_opts_container(opts: List[Dict]):
fields = build_opts(opts)
OptsContainer = make_dataclass('OptsContainer', [ item[0] for item in fields], bases=tuple([OptsContBase]))
data = OptsContainer(**{ item[0]: item[1] for item in fields})
return data
#----------------- STATE TYPES -----------------#
@dataclass
class SrBlankState:
id: str
name: str
sourcename: str
type: str = field(init=False)
def __post_init__(self):
self.type = "BLANK"
def get(self):
data = asdict(self, dict_factory=factory)
return data
def copy(self):
return self.__class__(self.get())
@dataclass
class SrDeviceState(SrBlankState):
drvopts: Dict[ str, Any ]
devopts: Dict[ str, Any ]
channels: Dict[ str, Union[ AnalogChannel, LogicChannel ] ]#Any
def __init__(self, id, name, sourcename, drvopts: List, devopts: List, channels: List[ChTypesEnum]):
SrBlankState.__init__(self, id, name, sourcename)
self.type = 'DEVICE'
self.drvopts = make_opts_container(drvopts)
self.devopts = make_opts_container(devopts)
self.channels = make_ch_container(channels)
def get(self):
data = super().get()
data['drvopts'] = self.drvopts.get_list()
data['devopts'] = self.devopts.get_list()
data['channels'] = self.channels.get_list()
return data
| drdbrr/webrok | pysigrok/srtypes.py | srtypes.py | py | 5,361 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sigrok.core.classes.ConfigKey.get",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sigrok.core.classes.ConfigKey",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "dataclasses.make_dataclass",
"line_number": 43,
"usage_type": "call"
},... |
10158111328 | from django.shortcuts import render, redirect
from app14.models import ProductModel
def openmainpage(request):
if request.method == "POST":
name = request.POST.get("product_name")
price = request.POST.get("product_price")
photo = request.FILES["product_photo"]
ProductModel(name=name, price=price, photo=photo).save()
return redirect('main')
else:
return render(request, 'main.html')
| suchishree/django_assignment1 | project14/app14/views.py | views.py | py | 443 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app14.models.ProductModel",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 13,
"usage_type": "call"
}
] |
277437358 | import torch
import numpy as np
from shapmagn.global_variable import DATASET_POOL
from shapmagn.utils.obj_factory import partial_obj_factory
# todo reformat the import style
class DataManager(object):
def __init__(
self,
):
"""
the class for data management
return a dict, each train/val/test/debug phase has its own dataloader
"""
self.data_path = None
self.data_opt = None
self.phases = []
def set_data_path(self, data_path):
"""
set data path
:param data_path:
:return:
"""
self.data_path = data_path
def set_data_opt(self, data_opt):
"""
set data opt
:param data_opt:
:return:
"""
self.data_opt = data_opt
def init_dataset_loader(self, transformed_dataset, batch_size):
"""
initialize the data loaders: set work number, set work type( shuffle for trainning, order for others)
:param transformed_dataset:
:param batch_size: the batch size of each iteration
:return: dict of dataloaders for train|val|test|debug
"""
def _init_fn(worker_id):
np.random.seed(12 + worker_id)
num_workers_reg = {
"train": 8,
"val": 8,
"test": 8,
"debug": 4,
} # {'train':0,'val':0,'test':0,'debug':0}#{'train':8,'val':4,'test':4,'debug':4}
shuffle_list = {"train": True, "val": False, "test": False, "debug": False}
batch_size = (
[batch_size] * 4 if not isinstance(batch_size, list) else batch_size
)
batch_size = {
"train": batch_size[0],
"val": batch_size[1],
"test": batch_size[2],
"debug": batch_size[3],
}
dataloaders = {
x: torch.utils.data.DataLoader(
transformed_dataset[x],
batch_size=batch_size[x],
shuffle=shuffle_list[x],
num_workers=num_workers_reg[x],
worker_init_fn=_init_fn,
pin_memory=True,
)
for x in self.phases
}
return dataloaders
def build_data_loaders(self, batch_size=20, is_train=True):
"""
build the data_loaders for the train phase and the test phase
:param batch_size: the batch size for each iteration
:param is_train: in train mode or not
:return: dict of dataloaders for train phase or the test phase
"""
if is_train:
self.phases = ["train", "val", "debug"]
else:
self.phases = ["test"]
name = self.data_opt["name"]
dataset_opt = self.data_opt[(name, {}, "settings for {} dataset".format(name))]
assert name in DATASET_POOL, "{} not in dataset pool {}".format(
name, DATASET_POOL
)
if name!="custom_dataset":
transformed_dataset = {
phase: DATASET_POOL[name](self.data_path, dataset_opt, phase=phase)
for phase in self.phases
}
else:
transformed_dataset = {
phase: partial_obj_factory(dataset_opt["name"])(self.data_path, dataset_opt, phase=phase)
for phase in self.phases
}
dataloaders = self.init_dataset_loader(transformed_dataset, batch_size)
dataloaders["data_size"] = {
phase: len(dataloaders[phase]) for phase in self.phases
}
print("dataloader is ready")
return dataloaders
| uncbiag/shapmagn | shapmagn/datasets/data_manager.py | data_manager.py | py | 3,588 | python | en | code | 94 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch... |
12799024256 | import webapp2, jinja2, os
# Import requests with app engine adapter
import requests, requests_toolbelt.adapters.appengine
from bs4 import BeautifulSoup
import datetime, time
from google.appengine.ext import ndb
# Patch adapter
requests_toolbelt.adapters.appengine.monkeypatch()
# os.path.dirname(__file__) is the current location of the file
# os.path.join joins the current location with templates
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
baseUrl = "https://woodroffehs.ocdsb.ca/Home%20Page%20Images/"
srcLocation = 'http://woodroffehs.ocdsb.ca/Home%20Page%20Images/Forms/AllItems.aspx'
garbageImages = [
'',
]
acceptableFormats = [
'.png',
'.PNG',
'.jpg',
'.JPG'
]
for item in range(len(garbageImages)):
garbageImages[item] = srcLocation + garbageImages[item]
def GetImages():
images = []
page = BeautifulSoup(requests.get(srcLocation).content)
for image in page.findAll('img'):
#images.append(image)
try:
alt = str(image['alt']).replace(" ", '%20')
if alt not in garbageImages and any(acceptable in alt for acceptable in acceptableFormats):
images.append(baseUrl + alt)
except KeyError:
pass
return images
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
# MainPage is a child of Handler, therefore it has all the functions and variables of Handler
class MainPage(Handler):
'''
def get(self):
now = datetime.datetime.now()
latestUpdateObj = Timeout.query().fetch()[0]
latestUpdate = latestUpdateObj.updateTime
images = Image.query().fetch()
self.render('slideshow.html', images=images)
if now >= now:
latestUpdate.updateTime = now + datetime.timedelta(hours=2)
ndb.delete_multi(Image.query().fetch())
for image in GetImages():
img = Image(link = image)
img.put()
#self.render('slideshow.html', images=GetImages())
'''
def get(self):
now = datetime.datetime.now()
class GenerateInitial(Handler):
def get(self):
timeout = Timeout(updateTime = datetime.datetime.now())
timeout.put()
for image in GetImages():
img = Image(link = image)
img.put()
class EscapeCache(Handler):
def get(self):
self.render('slideshow.html', images=GetImages())
class Image(ndb.Model):
link = ndb.StringProperty(required = True)
uuid = ndb.StringProperty()
updateTime = ndb.DateTimeProperty()
app = webapp2.WSGIApplication([
('/', MainPage),
('/init', GenerateInitial),
('/escape', EscapeCache)
], debug=True)
| jQwotos/better-shareproint-slides | main.py | main.py | py | 3,061 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests_toolbelt.adapters.appengine.adapters.appengine.monkeypatch",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests_toolbelt.adapters.appengine.adapters",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "requests_toolbelt.adapters... |
36689801955 | from ..nmf import (PreambleEndRecord, ViaRecord, VersionRecord,
ModeRecord, KnownEncodingRecord, UpgradeRequestRecord,
UpgradeResponseRecord, Record, register_types,
SizedEnvelopedMessageRecord, PreambleAckRecord, EndRecord)
from .gssapi import GSSAPIStream
class NMFStream:
def __init__(self, stream, url, server_name=None):
self._inner = stream
self._server_name = server_name
self.url = url
register_types()
def preamble(self):
data = [
VersionRecord(MajorVersion=1, MinorVersion=0),
ModeRecord(Mode=2),
ViaRecord(ViaLength=len(self.url), Via=self.url),
KnownEncodingRecord(Encoding=8),
]
self._inner.write(b''.join(d.to_bytes() for d in data))
if self._server_name:
msg = UpgradeRequestRecord(UpgradeProtocolLength=21,
UpgradeProtocol='application/negotiate').to_bytes()
self._inner.write(msg)
d = self._inner.read(1)
if d != UpgradeResponseRecord().to_bytes():
raise IOError('Negotiate not supported')
self._inner = GSSAPIStream(self._inner, self._server_name)
self._inner.write(PreambleEndRecord().to_bytes())
if self._inner.read(1) != PreambleAckRecord().to_bytes():
raise IOError('Preamble end not acked')
def write(self, data):
msg = SizedEnvelopedMessageRecord(Size=len(data), Payload=data)
self._inner.write(msg.to_bytes())
def read(self, count=None):
if count:
data = self._inner.read(count)
s, msg = Record.parse(data)
return data[s:msg.Size+s]
else:
msg = Record.parse_stream(self._inner)
return msg.Payload
def close(self):
self._inner.write(EndRecord().to_bytes())
self._inner.close()
| ernw/net.tcp-proxy | nettcp/stream/nmf.py | nmf.py | py | 1,954 | python | en | code | 54 | github-code | 6 | [
{
"api_name": "nmf.register_types",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nmf.VersionRecord",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "nmf.ModeRecord",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nmf.ViaRecord",
... |
36011235958 | # encoding: utf8
# Import local files:
import colors as COLORS
import margins as MARGINS
import roi as ROI
# External:
external = ROI.ROI('External', 'External', COLORS.external)
body = ROI.ROI('Body', 'Organ', COLORS.external)
# Support:
couch = ROI.ROI('Couch', 'Support', COLORS.couch)
# Target volumes:
gtv = ROI.ROI('GTV', 'Gtv', COLORS.gtv)
ctv = ROI.ROIExpanded('CTV', 'Ctv', COLORS.ctv, gtv, margins = MARGINS.uniform_5mm_expansion)
ptv = ROI.ROIExpanded('PTV', 'Ptv', COLORS.ptv, ctv, margins = MARGINS.uniform_5mm_expansion)
ctv_ext = ROI.ROIAlgebra('CTV', 'Ctv', COLORS.ctv, sourcesA = [gtv], sourcesB = [external], operator = 'Intersection', marginsA = MARGINS.uniform_5mm_expansion, marginsB = MARGINS.uniform_5mm_contraction)
ctv_underived = ROI.ROI('CTV', 'Ctv', COLORS.ctv)
igtv = ROI.ROI('IGTV', 'Gtv', COLORS.gtv)
igtv1 = ROI.ROI('IGTV1', 'Gtv', COLORS.gtv)
igtv2 = ROI.ROI('IGTV2', 'Gtv', COLORS.gtv)
igtv3 = ROI.ROI('IGTV3', 'Gtv', COLORS.gtv)
ictv = ROI.ROIExpanded('ICTV', 'Ctv', COLORS.ctv, igtv, margins = MARGINS.uniform_5mm_expansion)
ictv1 = ROI.ROIExpanded('ICTV1', 'Ctv', COLORS.ctv, igtv1, margins = MARGINS.uniform_5mm_expansion)
ictv2 = ROI.ROIExpanded('ICTV2', 'Ctv', COLORS.ctv, igtv2, margins = MARGINS.uniform_5mm_expansion)
ictv3 = ROI.ROIExpanded('ICTV3', 'Ctv', COLORS.ctv, igtv3, margins = MARGINS.uniform_5mm_expansion)
iptv = ROI.ROIExpanded('PTV', 'Ptv', COLORS.ptv, ictv, margins = MARGINS.uniform_5mm_expansion)
iptv_gtv = ROI.ROIExpanded('PTV', 'Ptv', COLORS.ptv, igtv, margins = MARGINS.uniform_5mm_expansion)
iptv1 = ROI.ROIExpanded('PTV1', 'Ptv', COLORS.ptv, ictv1, margins = MARGINS.uniform_5mm_expansion)
iptv2 = ROI.ROIExpanded('PTV2', 'Ptv', COLORS.ptv, ictv2, margins = MARGINS.uniform_5mm_expansion)
iptv3 = ROI.ROIExpanded('PTV3', 'Ptv', COLORS.ptv, ictv3, margins = MARGINS.uniform_5mm_expansion)
gtv_p = ROI.ROI('GTVp','Gtv', COLORS.gtv)
gtv_n = ROI.ROI('GTVn','Gtv', COLORS.gtv)
gtv_n1 = ROI.ROI('GTVn','Gtv', COLORS.gtv)
ctv_e = ROI.ROI('CTVe','Ctv', COLORS.ctv_med)
ptv_e = ROI.ROI('PTVe','Ptv', COLORS.ptv)
gtv_groin_l = ROI.ROI('GTV_Groin_L','Gtv', COLORS.gtv)
gtv_groin_r = ROI.ROI('GTV_Groin_R','Gtv', COLORS.gtv)
ctv_groin_l = ROI.ROI('CTV_Groin_L','Ctv', COLORS.ctv_med)
ctv_groin_r = ROI.ROI('CTV_Groin_R','Ctv', COLORS.ctv_med)
ptv_groin_l = ROI.ROI('PTV_Groin_L','Ptv', COLORS.ptv)
ptv_groin_r = ROI.ROI('PTV_Groin_R','Ptv', COLORS.ptv)
ctv_p = ROI.ROI('CTVp', 'Ctv', COLORS.ctv)
ctv_n = ROI.ROI('CTVn','Ctv', COLORS.ctv)
igtv_p = ROI.ROI('IGTVp','Gtv', COLORS.gtv)
igtv_n = ROI.ROI('IGTVn','Gtv', COLORS.gtv)
ictv_p = ROI.ROI('ICTVp', 'Ctv', COLORS.ctv)
ictv_n = ROI.ROI('ICTVn', 'Ctv', COLORS.ctv)
gtv1 = ROI.ROI('GTV1', 'Gtv', COLORS.gtv)
gtv2 = ROI.ROI('GTV2', 'Gtv', COLORS.gtv)
gtv3 = ROI.ROI('GTV3', 'Gtv', COLORS.gtv)
gtv4 = ROI.ROI('GTV4', 'Gtv', COLORS.gtv)
ctv1 = ROI.ROI('CTV1', 'Ctv', COLORS.ctv)
ctv2 = ROI.ROI('CTV2', 'Ctv', COLORS.ctv)
ctv3 = ROI.ROI('CTV3', 'Ctv', COLORS.ctv)
ctv4 = ROI.ROI('CTV4', 'Ctv', COLORS.ctv)
gtv_sb = ROI.ROI('GTVsb', 'Gtv', COLORS.gtv)
ctv_sb = ROI.ROI('CTVsb', 'Ctv', COLORS.ctv)
vb = ROI.ROI('VB','Ctv', COLORS.ctv_med)
ctv_l = ROI.ROI('CTV_L', 'Ctv', COLORS.ctv)
ctv_r = ROI.ROI('CTV_R', 'Ctv', COLORS.ctv)
ptvc_l = ROI.ROI('PTVc_L', 'Ptv', COLORS.ptv)
ptvc_r = ROI.ROI('PTVc_R', 'Ptv', COLORS.ptv)
# OARs: Empty (will be delineated manually):
# Head:
eye_l = ROI.ROI('Eye_L', 'Organ', COLORS.eye)
eye_r = ROI.ROI('Eye_R', 'Organ', COLORS.eye)
lens_l = ROI.ROI('Lens_L', 'Organ', COLORS.lens)
lens_r = ROI.ROI('Lens_R', 'Organ', COLORS.lens)
optic_nrv_l = ROI.ROI('OpticNerve_L', 'Organ', COLORS.optic_nrv)
optic_nrv_r = ROI.ROI('OpticNerve_R', 'Organ', COLORS.optic_nrv)
optic_chiasm = ROI.ROI('OpticChiasm','Organ', COLORS.chiasma)
lacrimal_l =ROI.ROI('LacrimalGland_L', 'Organ', COLORS.lacrimal)
lacrimal_r =ROI.ROI('LacrimalGland_R', 'Organ', COLORS.lacrimal)
cochlea_l = ROI.ROI('Cochlea_L','Organ', COLORS.cochlea)
cochlea_r = ROI.ROI('Cochlea_R','Organ', COLORS.cochlea)
hippocampus_l = ROI.ROI('Hippocampus_L','Organ', COLORS.hippocampus)
hippocampus_r = ROI.ROI('Hippocampus_R','Organ', COLORS.hippocampus)
brainstem = ROI.ROI('Brainstem', 'Organ', COLORS.brainstem)
nasal_cavity = ROI.ROI('NasalCavity', 'Organ', COLORS.nasal_cavity)
oral_cavity = ROI.ROI('OralCavity', 'Organ', COLORS.oral_cavity)
pituitary = ROI.ROI('Pituitary', 'Organ', COLORS.pituitary)
submand_l = ROI.ROI('SubmandGland_L', 'Organ', COLORS.submand)
submand_r = ROI.ROI('SubmandGland_R', 'Organ', COLORS.submand)
cornea_l = ROI.ROI('Cornea_L', 'Organ', COLORS.cornea)
cornea_r = ROI.ROI('Cornea_R', 'Organ', COLORS.cornea)
retina_l = ROI.ROI('Retina_L', 'Organ', COLORS.retina)
retina_r = ROI.ROI('Retina_R', 'Organ', COLORS.retina)
brainstem_core = ROI.ROIExpanded('BrainstemCore', 'Organ', COLORS.brainstem_core, brainstem, margins = MARGINS.uniform_2mm_contraction)
brainstem_surface = ROI.ROIAlgebra('BrainstemSurface', 'Organ', COLORS.brainstem_surface, sourcesA = [brainstem], sourcesB = [brainstem_core], operator = 'Subtraction')
# Thorax:
esophagus = ROI.ROI('Esophagus', 'Organ', COLORS.esophagus)
spinal_cord = ROI.ROI('SpinalCord', 'Organ', COLORS.spinal_cord)
heart = ROI.ROI('Heart', 'Organ', COLORS.heart)
# Breast:
thyroid = ROI.ROI('ThyroidGland','Organ', COLORS.thyroid)
a_lad = ROI.ROI('A_LAD','Organ', COLORS.lad)
# Lung, stereotactic:
chestwall = ROI.ROI('Chestwall', 'Organ', COLORS.chestwall)
greatves = ROI.ROI('GreatVessel','Organ', COLORS.heart)
trachea = ROI.ROI('Trachea','Organ', COLORS.trachea)
spleen = ROI.ROI('Spleen','Organ', COLORS.spleen)
stomach = ROI.ROI('Stomach','Organ', COLORS.stomach)
liver = ROI.ROI('Liver','Organ', COLORS.liver)
rib_x_l = ROI.ROI('Ribx_L','Organ', COLORS.rib)
rib_x_r = ROI.ROI('Ribx_R','Organ', COLORS.rib)
rib_y_l = ROI.ROI('Riby_L','Organ', COLORS.rib)
rib_y_r = ROI.ROI('Riby_R','Organ', COLORS.rib)
ribs = ROI.ROI('Ribs','Organ', COLORS.ribs)
main_bronchus_l = ROI.ROI('BronchusMain_L','Organ', COLORS.main_bronchus)
main_bronchus_r = ROI.ROI('BronchusMain_R','Organ', COLORS.main_bronchus)
# Spine SBRT:
cauda_equina = ROI.ROI('CaudaEquina','Organ', COLORS.cauda)
small_bowel = ROI.ROI('BowelSmall','Organ', COLORS.small_bowel)
colon = ROI.ROI('Colon','Organ', COLORS.colon)
brachial = ROI.ROI('BrachialPlexus','Organ', COLORS.brachial)
# Pelvis, prostate:
bowel_space = ROI.ROI('BowelBag', 'Organ', COLORS.bowel_space)
rectum = ROI.ROI('Rectum', 'Organ', COLORS.rectum)
pelvic_nodes = ROI.ROI('LN_Iliac', 'Ctv', COLORS.pelvic_nodes)
prostate = ROI.ROI('Prostate', 'Ctv', COLORS.prostate)
prostate_bed = ROI.ROI('SurgicalBed', 'Ctv', COLORS.prostate_bed)
urethra = ROI.ROI('Urethra', 'Organ', COLORS.urethra)
vesicles = ROI.ROI('SeminalVes', 'Ctv', COLORS.vesicles)
penile_bulb = ROI.ROI('PenileBulb', 'Organ', COLORS.penile_bulb)
anal_canal = ROI.ROI('AnalCanal','Organ', COLORS.anal_canal)
levator_ani = ROI.ROI('LevatorAni', 'Organ', COLORS.levator_ani)
# Bone ROIs:
humeral_l = ROI.ROI('HumeralHead_L', 'Organ', COLORS.bone_color1)
humeral_r = ROI.ROI('HumeralHead_R', 'Organ', COLORS.bone_color1)
sternum = ROI.ROI('Sternum', 'Organ', COLORS.bone_color3)
l2 = ROI.ROI('L2', 'Organ', COLORS.bone_color1)
l3 = ROI.ROI('L3', 'Organ', COLORS.bone_color2)
l4 = ROI.ROI('L4', 'Organ', COLORS.bone_color1)
l5 = ROI.ROI('L5', 'Organ', COLORS.bone_color2)
sacrum = ROI.ROI('Sacrum', 'Organ', COLORS.bone_color1)
coccyx = ROI.ROI('Coccyx', 'Organ', COLORS.bone_color2)
pelvic_girdle_l = ROI.ROI('PelvicGirdle_L', 'Organ', COLORS.bone_color3)
pelvic_girdle_r = ROI.ROI('PelvicGirdle_R', 'Organ', COLORS.bone_color3)
femur_head_neck_l = ROI.ROI('FemurHeadNeck_L', 'Organ', COLORS.bone_color1)
femur_head_neck_r = ROI.ROI('FemurHeadNeck_R', 'Organ', COLORS.bone_color1)
# Vessels:
a_descending_aorta = ROI.ROI('A_DescendingAorta', 'Organ', COLORS.artery_color1)
a_common_iliac_l = ROI.ROI('A_CommonIliac_L', 'Organ', COLORS.artery_color2)
a_common_iliac_r = ROI.ROI('A_CommonIliac_R', 'Organ', COLORS.artery_color2)
a_internal_iliac_l = ROI.ROI('A_InternalIliac_L', 'Organ', COLORS.artery_color3)
a_internal_iliac_r = ROI.ROI('A_InternalIliac_R', 'Organ', COLORS.artery_color3)
a_external_iliac_l = ROI.ROI('A_ExternalIliac_L', 'Organ', COLORS.artery_color4)
a_external_iliac_r = ROI.ROI('A_ExternalIliac_R', 'Organ', COLORS.artery_color4)
v_inferior_vena_cava = ROI.ROI('V_InferiorVenaCava', 'Organ', COLORS.vein_color1)
v_common_iliac_l = ROI.ROI('V_CommonIliac_L', 'Organ', COLORS.vein_color2)
v_common_iliac_r = ROI.ROI('V_CommonIliac_R', 'Organ', COLORS.vein_color2)
v_internal_iliac_l = ROI.ROI('V_InternalIliac_L', 'Organ', COLORS.vein_color3)
v_internal_iliac_r = ROI.ROI('V_InternalIliac_R', 'Organ', COLORS.vein_color3)
v_external_iliac_l = ROI.ROI('V_ExternalIliac_L', 'Organ', COLORS.vein_color4)
v_external_iliac_r = ROI.ROI('V_ExternalIliac_R', 'Organ', COLORS.vein_color4)
# Undefined / Other ROIs
# Breast organs:
surgical_bed_l = ROI.ROI('SurgicalBed_L','Undefined', COLORS.breast_draft)
surgical_bed_r = ROI.ROI('SurgicalBed_R','Undefined', COLORS.breast_draft)
imn_l = ROI.ROI('LN_IMN_L', 'Undefined', COLORS.imn)
imn_r = ROI.ROI('LN_IMN_R', 'Undefined', COLORS.imn)
breast_l_draft = ROI.ROI('Breast_L_Draft', 'Undefined', COLORS.contralat_draft)
breast_r_draft = ROI.ROI('Breast_R_Draft', 'Undefined', COLORS.contralat_draft)
level4_l = ROI.ROI('LN_Ax_L4_L', 'Undefined', COLORS.level4)
level3_l = ROI.ROI('LN_Ax_L3_L', 'Undefined', COLORS.level3)
level2_l = ROI.ROI('LN_Ax_L2_L', 'Undefined', COLORS.level2)
level1_l = ROI.ROI('LN_Ax_L1_L', 'Undefined', COLORS.level1)
level_l = ROI.ROI('LN_Ax_Pectoral_L', 'Undefined', COLORS.level)
level4_r = ROI.ROI('LN_Ax_L4_R', 'Undefined', COLORS.level4)
level3_r = ROI.ROI('LN_Ax_L3_R', 'Undefined', COLORS.level3)
level2_r = ROI.ROI('LN_Ax_L2_R', 'Undefined', COLORS.level2)
level1_r = ROI.ROI('LN_Ax_L1_R', 'Undefined', COLORS.level1)
level_r = ROI.ROI('LN_Ax_Pectoral_R', 'Undefined', COLORS.level)
artery1_l = ROI.ROI('A_Subclavian_L+A_Axillary_L', 'Undefined', COLORS.artery2)
artery2_l = ROI.ROI('A_Carotid_L', 'Undefined', COLORS.artery2)
vein1_l = ROI.ROI('V_Brachioceph', 'Undefined', COLORS.vein2)
vein2_l = ROI.ROI('V_Subclavian_L+V_Axillary_L','Undefined', COLORS.vein2)
vein3_l = ROI.ROI('V_Jugular_L','Undefined', COLORS.vein2)
scalene_muscle_l = ROI.ROI('ScaleneMusc_Ant_L', 'Undefined', COLORS.muscle)
scalene_muscle_r = ROI.ROI('ScaleneMusc_Ant_R', 'Undefined', COLORS.muscle)
artery1_r = ROI.ROI('A_Brachioceph', 'Undefined', COLORS.artery2)
artery2_r = ROI.ROI('A_Subclavian_R+A_Axillary_R', 'Undefined', COLORS.artery2)
artery3_r = ROI.ROI('A_Carotid_R', 'Undefined', COLORS.artery2)
vein1_r = ROI.ROI('V_Brachioceph_R', 'Undefined', COLORS.vein2)
vein2_r = ROI.ROI('V_Subclavian_R+V_Axillary_R', 'Undefined', COLORS.vein2)
vein3_r = ROI.ROI('V_Jugular_R','Undefined', COLORS.vein2)
prosthesis = ROI.ROI('Prosthesis','Undefined', COLORS.prosthesis)
prosthesis_l = ROI.ROI('Prosthesis_L','Undefined', COLORS.prosthesis)
prosthesis_r = ROI.ROI('Prosthesis_R','Undefined', COLORS.prosthesis)
# Markers:
markers = ROI.ROI('Markers', 'Marker', COLORS.clips)
seed1 = ROI.ROI('Marker1', 'Marker', COLORS.seed)
seed2 = ROI.ROI('Marker2', 'Marker', COLORS.seed)
seed3 = ROI.ROI('Marker3', 'Marker', COLORS.seed)
seed4 = ROI.ROI('Marker4', 'Marker', COLORS.seed)
marker1 = ROI.ROI('Marker1', 'Marker', COLORS.seed)
marker2 = ROI.ROI('Marker2', 'Marker', COLORS.seed)
marker3 = ROI.ROI('Marker3', 'Marker', COLORS.seed)
marker4 = ROI.ROI('Marker4', 'Marker', COLORS.seed)
# OARs: MBS (delineated by model based segmentation):
brain = ROI.ROI('Brain', 'Organ', COLORS.brain, case ='HeadNeck', model = 'Brain')
#brainstem = ROI.ROI('Brainstem', 'Organ', COLORS.brainstem, case ='HeadNeck', model = 'Brainstem')
spinal_canal = ROI.ROI('SpinalCanal', 'Organ', COLORS.spinal_canal, case ='Thorax', model = 'SpinalCord (Thorax)')
spinal_canal_head = ROI.ROI('SpinalCanal', 'Organ', COLORS.spinal_canal, case ='HeadNeck', model = 'SpinalCord')
parotid_l = ROI.ROI('Parotid_L', 'Organ', COLORS.parotid, case ='HeadNeck', model = 'ParotidGland (Left)')
parotid_r = ROI.ROI('Parotid_R', 'Organ', COLORS.parotid, case ='HeadNeck', model = 'ParotidGland (Right)')
lung_l = ROI.ROI('Lung_L', 'Organ', COLORS.lung, case ='Thorax', model = 'Lung (Left)')
lung_r = ROI.ROI('Lung_R', 'Organ', COLORS.lung, case ='Thorax', model = 'Lung (Right)')
kidney_l = ROI.ROI('Kidney_L', 'Organ', COLORS.kidney, case ='Abdomen', model = 'Kidney (Left)')
kidney_r = ROI.ROI('Kidney_R', 'Organ', COLORS.kidney, case ='Abdomen', model = 'Kidney (Right)')
bladder = ROI.ROI('Bladder', 'Organ', COLORS.bladder, case ='PelvicMale', model = 'Bladder')
femoral_l = ROI.ROI('FemoralHead_L', 'Organ', COLORS.femoral, case = 'PelvicMale', model = 'FemoralHead (Left)')
femoral_r = ROI.ROI('FemoralHead_R', 'Organ', COLORS.femoral, case = 'PelvicMale', model = 'FemoralHead (Right)')
# OARs: Unions:
parotids = ROI.ROIAlgebra('Parotids', 'Organ', COLORS.parotid, sourcesA=[parotid_l], sourcesB=[parotid_r])
submands = ROI.ROIAlgebra('SubmandGlands', 'Organ', COLORS.submand, sourcesA=[submand_l], sourcesB=[submand_r])
lungs = ROI.ROIAlgebra('Lungs', 'Organ', COLORS.lungs, sourcesA=[lung_l], sourcesB=[lung_r])
kidneys = ROI.ROIAlgebra('Kidneys', 'Organ', COLORS.kidneys, sourcesA=[kidney_l], sourcesB=[kidney_r])
ribs_l = ROI.ROIAlgebra('Ribs_L','Organ', COLORS.ribs, sourcesA=[rib_x_l], sourcesB=[rib_y_l])
ribs_r = ROI.ROIAlgebra('Ribs_R','Organ', COLORS.ribs, sourcesA=[rib_x_r], sourcesB=[rib_y_r])
lungs_igtv = ROI.ROIAlgebra('Lungs-IGTV', 'Organ', COLORS.lungs, sourcesA=[lungs], sourcesB=[igtv], operator = 'Subtraction')
breast_l = ROI.ROIAlgebra('Breast_L', 'Organ', COLORS.contralat, sourcesA = [breast_l_draft], sourcesB = [external], operator = 'Intersection', marginsA = MARGINS.zero, marginsB = MARGINS.uniform_5mm_contraction)
breast_r = ROI.ROIAlgebra('Breast_R', 'Organ', COLORS.contralat, sourcesA = [breast_r_draft], sourcesB = [external], operator = 'Intersection', marginsA = MARGINS.zero, marginsB = MARGINS.uniform_5mm_contraction)
# OARs: Target subtracted
# Other:
other_ptv = ROI.ROI('Other_PTV', 'Organ', COLORS.other_ptv)
# PRVs:
spinal_cord_prv = ROI.ROIExpanded('SpinalCord_PRV', 'Avoidance', COLORS.prv, source=spinal_cord, margins=MARGINS.uniform_2mm_expansion)
# Walls:
skin_srt = ROI.ROIWall('Skin','Organ', COLORS.skin, body, 0, 0.3)
skin = ROI.ROIWall('Skin','Organ', COLORS.skin, external, 0, 0.3)
skin_brain_5 = ROI.ROIWall('Skin','Organ', COLORS.skin, body, 0, 0.5)
skin_brain = ROI.ROIWall('Skin','Organ', COLORS.skin, external, 0, 0.5)
wall_ptv = ROI.ROIWall('zPTV_Wall', 'Undefined', COLORS.wall, iptv, 1, 0)
wall_ptv1 = ROI.ROIWall('zPTV1_Wall', 'Undefined', COLORS.wall, iptv1, 1, 0)
wall_ptv2 = ROI.ROIWall('zPTV2_Wall', 'Undefined', COLORS.wall, iptv2, 1, 0)
wall_ptv3 = ROI.ROIWall('zPTV3_Wall', 'Undefined', COLORS.wall, iptv3, 1, 0)
# ROIs for optimization:
z_water = ROI.ROI('zWater', 'Undefined', COLORS.other_ptv)
z_heart = ROI.ROI('zHeart', 'Undefined', COLORS.heart)
z_esophagus = ROI.ROI('zEsophagus', 'Undefined', COLORS.esophagus)
z_bladder = ROI.ROI('zBladder','Undefined', COLORS.bladder)
z_spc_bowel = ROI.ROI('zBowelBag','Undefined', COLORS.bowel_space)
z_rectum = ROI.ROI('zRectum', 'Undefined', COLORS.rectum)
dorso_rectum = ROI.ROI('zRectum_P', 'Undefined', COLORS.dorso_rectum)
z_rectum_p = ROI.ROI('zRectum_P', 'Undefined', COLORS.dorso_rectum)
z_ptv_77_wall = ROI.ROI('zPTV_77_Wall', 'Undefined', COLORS.wall)
z_ptv_70_77_wall = ROI.ROI('zPTV_70+77_Wall', 'Undefined', COLORS.wall)
z_ptv_67_5_wall = ROI.ROI('zPTV_67.5_Wall', 'Undefined', COLORS.wall)
z_ptv_62_5_67_5_wall = ROI.ROI('zPTV_62.5+67.5_Wall', 'Undefined', COLORS.wall)
z_ptv_50_62_5_67_5_wall = ROI.ROI('zPTV_50+62.5+67.5_Wall', 'Undefined', COLORS.wall)
z_ptv_60_wall = ROI.ROI('zPTV_60_Wall', 'Undefined', COLORS.wall)
z_ptv_50_wall = ROI.ROI('zPTV_50_Wall', 'Undefined', COLORS.wall)
z_ptv_47_50_wall = ROI.ROI('zPTV_47+50_Wall', 'Undefined', COLORS.wall)
z_ptv_57_60_wall = ROI.ROI('zPTV_57+60_Wall', 'Undefined', COLORS.wall)
z_ptv_70_wall = ROI.ROI('zPTV_70_Wall', 'Undefined', COLORS.wall)
z_ptv_62_5_wall = ROI.ROI('zPTV_62.5_Wall', 'Undefined', COLORS.wall)
z_ptv_56_temp = ROI.ROI('zPTV_56_Temp', 'Undefined', COLORS.wall)
z_ptv_56_wall = ROI.ROI('zPTV_56_Wall', 'Undefined', COLORS.wall)
z_ptv_50_temp = ROI.ROI('zPTV_50_Temp', 'Undefined', COLORS.wall)
z_ptv_50_wall = ROI.ROI('zPTV_50_Wall', 'Undefined', COLORS.wall)
z_ptv_wall = ROI.ROI('zPTV_Wall', 'Undefined', COLORS.wall)
z_ptv1_wall = ROI.ROI('zPTV1_Wall', 'Undefined', COLORS.wall)
z_ptv2_wall = ROI.ROI('zPTV2_Wall', 'Undefined', COLORS.wall)
z_ptv3_wall = ROI.ROI('zPTV3_Wall', 'Undefined', COLORS.wall)
z_ptv4_wall = ROI.ROI('zPTV4_Wall', 'Undefined', COLORS.wall)
ctv_oars = ROI.ROI('zCTV-OARs', 'Ctv', COLORS.ctv)
ptv_oars = ROI.ROI('zPTV-OARs', 'Ptv', COLORS.ptv)
ptv_and_oars = ROI.ROI('zPTV_AND_OARs', 'Ptv', COLORS.other_ptv)
z_eye_l = ROI.ROIWall('zEye_L','Undefined', COLORS.wall, eye_l, 0, 0.2)
z_eye_r = ROI.ROIWall('zEye_R','Undefined', COLORS.wall, eye_r, 0, 0.2)
# Substitute ROI objects (only used for naming):
# Targets:
ptv_77 = ROI.ROI('PTV_77', 'Ptv', COLORS.ptv_high)
ptv_70 = ROI.ROI('PTV_70', 'Ptv', COLORS.ptv_med)
ptv_70_sib = ROI.ROI('PTV!_70', 'Ptv', COLORS.ptv_med)
ctv_77 = ROI.ROI('CTV_77', 'Ctv', COLORS.ctv_high)
ctv_70 = ROI.ROI('CTV_70', 'Ctv', COLORS.ctv_med)
ctv_70_sib = ROI.ROI('CTV!_70', 'Ctv', COLORS.ctv_med)
ptv_56 = ROI.ROI('PTV!_56', 'Ptv', COLORS.ptv_low)
ctv_56 = ROI.ROI('CTV!_56', 'Ctv', COLORS.ctv_low)
ptv_70_77 = ROI.ROI('PTV_70+77', 'Ptv', COLORS.ptv_low)
ctv_70_77 = ROI.ROI('CTV_70+77', 'Ctv', COLORS.ctv_low)
ptv_56_70_77 = ROI.ROI('PTV_56+70+77', 'Ptv', COLORS.ptv_low)
ptv_56_70 = ROI.ROI('PTV_56+70', 'Ptv', COLORS.ptv_low)
ptv_67_5 = ROI.ROI('PTV_67.5', 'Ptv', COLORS.ptv_high)
ptv_62_5 = ROI.ROI('PTV_62.5', 'Ptv', COLORS.ptv_med)
ptv_62_5_sib = ROI.ROI('PTV!_62.5', 'Ptv', COLORS.ptv_med)
ctv_67_5 = ROI.ROI('CTV_67.5', 'Ctv', COLORS.ctv_high)
ctv_62_5 = ROI.ROI('CTV_62.5', 'Ctv', COLORS.ctv_med)
ctv_62_5_sib = ROI.ROI('CTV!_62.5', 'Ctv', COLORS.ctv_med)
ptv__50 = ROI.ROI('PTV!_50', 'Ptv', COLORS.ptv_low)
ctv__50 = ROI.ROI('CTV!_50', 'Ctv', COLORS.ctv_low)
ptv_62_5_67_5 = ROI.ROI('PTV_62.5+67.5', 'Ptv', COLORS.ptv_low)
ctv_62_5_67_5 = ROI.ROI('CTV_62.5+67.5', 'Ctv', COLORS.ctv_low)
ptv_50_62_5_67_5 = ROI.ROI('PTV_50+62.5+67.5', 'Ptv', COLORS.ptv_low)
ptv_50_62_5 = ROI.ROI('PTV_50+62.5', 'Ptv', COLORS.ptv_low)
ptv_57_60 = ROI.ROI('PTV_57+60', 'Ptv', COLORS.ptv_low)
ctv_57_60 = ROI.ROI('CTV_57+60', 'Ctv', COLORS.ctv_low)
ptv_60 = ROI.ROI('PTV_60', 'Ptv', COLORS.ptv_high)
ctv_60 = ROI.ROI('CTV_60', 'Ctv', COLORS.ctv_high)
ptv_57 = ROI.ROI('PTV!_57', 'Ptv', COLORS.ptv_med)
ctv_57 = ROI.ROI('CTV!_57', 'Ctv', COLORS.ctv_med)
ptv_semves = ROI.ROI('PTV_SeminalVes', 'Ptv', COLORS.ptv_med)
ptv_pc = ROI.ROI('PTVpc', 'Ptv', COLORS.ptv)
ptv_pc_l = ROI.ROI('PTVpc_L', 'Ptv', COLORS.ptv)
ptv_pc_r = ROI.ROI('PTVpc_R', 'Ptv', COLORS.ptv)
ptv_p = ROI.ROI('PTVp', 'Ptv', COLORS.ptv)
ptv_n = ROI.ROI('PTVn', 'Ptv', COLORS.ptv)
ptv_nc = ROI.ROI('PTVnc', 'Ptv', COLORS.ptv)
ptv1 = ROI.ROI('PTV1', 'Ptv', COLORS.ptv)
ptv2 = ROI.ROI('PTV2', 'Ptv', COLORS.ptv)
ptv3 = ROI.ROI('PTV3', 'Ptv', COLORS.ptv)
ptv4 = ROI.ROI('PTV4', 'Ptv', COLORS.ptv)
ptv_c = ROI.ROI('PTVc', 'Ptv', COLORS.ptv)
ptv_sb = ROI.ROI('PTVsb', 'Ptv', COLORS.ptv)
ptv_sbc = ROI.ROI('PTVsbc', 'Ptv', COLORS.ptv)
ctv_47_50 = ROI.ROI('CTV_47+50', 'Ctv', COLORS.ctv_low)
ptv_47_50 = ROI.ROI('PTV_47+50', 'Ptv', COLORS.ptv_low)
ptv_50 = ROI.ROI('PTV_50', 'Ptv', COLORS.ptv_high)
ptv_50c = ROI.ROI('PTV_50c', 'Ptv', COLORS.ptv_high)
ctv_50 = ROI.ROI('CTV_50', 'Ctv', COLORS.ctv_high)
ctv_47 = ROI.ROI('CTV!_47', 'Ctv', COLORS.ctv_low)
ctv_47_tot = ROI.ROI('CTV_47', 'Ctv', COLORS.ctv_low)
ptv_47 = ROI.ROI('PTV!_47', 'Ptv', COLORS.ptv_med)
ptv_47_tot = ROI.ROI('PTV_47', 'Ptv', COLORS.ptv_med)
ptv_47c = ROI.ROI('PTV!_47c', 'Ptv', COLORS.ptv_med)
# Miscellaneous:
brain_gtv = ROI.ROI('Brain-GTV','Organ', COLORS.brain)
brain_ptv = ROI.ROI('Brain-PTV','Organ', COLORS.other_ptv)
lungs_gtv = ROI.ROI('Lungs-GTV', 'Organ', COLORS.lungs)
lungs_ctv = ROI.ROI('Lungs-CTV', 'Organ', COLORS.lungs)
ctv_p_ctv_sb = ROI.ROI('CTVp-CTVsb', 'Ctv', COLORS.ctv)
ctv_ctv_sb = ROI.ROI('CTV-CTVsb', 'Ctv', COLORS.ctv)
ptv_pc_ptv_sbc = ROI.ROI('PTVpc-PTVsbc', 'Ptv', COLORS.ptv)
ptv_c_ptv_sbc = ROI.ROI('PTVc-PTVsbc', 'Ptv', COLORS.ptv)
ptv_gtv = ROI.ROI('PTV-GTV', 'Ptv', COLORS.ptv_med)
ptv_spinal = ROI.ROI('PTV-SpinalCord_PRV', 'Ptv', COLORS.ptv_med)
mask_ptv = ROI.ROI('Mask_PTV','Undefined', COLORS.mask_ptv)
mask_ptv1 = ROI.ROI('Mask_PTV1','Undefined', COLORS.mask_ptv)
mask_ptv2 = ROI.ROI('Mask_PTV2','Undefined', COLORS.mask_ptv)
mask_ptv3 = ROI.ROI('Mask_PTV3','Undefined', COLORS.mask_ptv)
box = ROI.ROI('zBox','Undefined', COLORS.mask_ptv)
box1 = ROI.ROI('zBox1','Undefined', COLORS.mask_ptv)
box_l = ROI.ROI('zBox_L','Undefined', COLORS.mask_ptv)
box_r = ROI.ROI('zBox_R','Undefined', COLORS.mask_ptv)
box3 = ROI.ROI('zBox3','Undefined', COLORS.mask_ptv)
box4 = ROI.ROI('zBox4','Undefined', COLORS.mask_ptv)
| dicom/raystation-scripts | settings/rois.py | rois.py | py | 21,183 | python | en | code | 40 | github-code | 6 | [
{
"api_name": "roi.ROI",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "colors.external",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "roi.ROI",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "colors.external",
"line_numb... |
9357978894 | #!/usr/bin/env/python
# coding: utf-8
# YouTube Data API Collector
# Mat Morrison @mediaczar
# Last updated: 2020-03-31
''' Query the YouTube Data API for an individual channel URL or a file list of URLs.
You may also specify which 'page' you'd like to start on (useful when the script
breaks during a long data capture.)
You _must_ supply a URL or file handle at the command line.
Data collected:
* publication date
* title
* description
* duration (ISO8601 duration format - parsed with [isodate](https://pypi.python.org/pypi/isodate))
* view count
* comment count
* likes & dislikes
To run it, you'll need:
1. a very basic knowledge of Python
2. some ability to install Python libraries (for legacy reasons,
I'm using a somewhat non-standard library called
[scraperwiki](https://pypi.python.org/pypi/scraperwiki) to save the data.)
3. (to read the output) some basic knowledge of SQL and
[SQLite](https://www.sqlite.org/) (it comes as standard on OS X,
and I use the excellent [Base 2](https://menial.co.uk/base/) to manage and
query the files that this script produces.)
4. an API Key from the Google Developers site
(get it here: [Google Developer Console](https://console.developers.google.com/)) -
add to SET UP. Take care not to share this publicly.
'''
import requests
import json
import scraperwiki
import isodate
import sys
import argparse
def get_file_contents(filename):
""" Given a filename,
return the contents of that file
"""
try:
with open(filename, 'r') as f:
# It's assumed our file contains a single line,
# with our API key
return f.read().strip()
except FileNotFoundError:
print("'%s' file not found" % filename)
def load_data(request):
x = json.loads(requests.get(request).text)
return x
def get_author(url):
elements = url.split("/")
elements = [i for i in elements if i] # remove 'None' values
if len(elements) > 3:
author = elements[3]
print(author)
if elements[2] == "channel":
authortype = "id"
elif elements[2] == "user":
authortype = "forUsername"
else:
sys.exit('malformed URL: %s' % url)
else:
author = elements[2]
authortype = "channel"
return author, authortype
def build_channel_request(author, authortype):
part = 'snippet,contentDetails,statistics,id'
field_items = ['snippet(title, publishedAt, description)',
'contentDetails/relatedPlaylists/uploads',
'statistics(subscriberCount,videoCount,viewCount)',
'id']
x = ('https://www.googleapis.com/youtube/v3/' +
'channels?part=' + part +
'&fields=items(' + ','.join(field_items) + ')&' +
authortype + '=' + author +
'&key=' + API_key)
return x
def write_channel(json):
channel = {}
channel['id'] = json['items'][0]['id']
channel['title'] = title
channel['uploads'] = uploadsId
channel['subscriberCount'] = json['items'][0]['statistics']['subscriberCount']
channel['videoCount'] = json['items'][0]['statistics']['videoCount']
channel['publishedAt'] = json['items'][0]['snippet']['publishedAt']
channel['description'] = json['items'][0]['snippet']['description']
scraperwiki.sqlite.save(unique_keys=['id'], table_name='channel', data=channel)
def initialise_playlist(uploadsId):
part = 'snippet,contentDetails'
field_items = ['snippet(title, publishedAt, description)',
'contentDetails(videoId, duration)',
'nextPageToken']
max_results = '50'
x = ('https://www.googleapis.com/youtube/v3/' +
'playlistItems?playlistId=' + uploadsId +
'&part=' + part +
# '&fields=items(' + ','.join(field_items) + ')' +
'&maxResults=' + max_results +
'&key=' + API_key)
return x
print('getting channel data for: %s' % url) # log progress
def gather_channel(url):
global title
global uploadsId
author, authortype = get_author(url)
API_request = build_channel_request(author, authortype)
channel_data = load_data(API_request)
# verify data in case bad url
try:
title = channel_data['items'][0]['snippet']['title']
except:
print("bad URL")
return
# need following to get playlistId
uploadsId = channel_data['items'][0]['contentDetails']['relatedPlaylists']['uploads']
write_channel(channel_data)
print('...complete') # log progress
# now create a list of the videos for that channel
print('collecting uploads for: %s (playlist ID: %s)' % (title, uploadsId)) # log progress
API_request_base = initialise_playlist(uploadsId)
# test to see if a page has been submitted as a command line option
if args.page:
page_token = args.page[0]
else:
page_token = ''
# loop through the playlist collecting video data
gather_video_data(API_request_base, page_token)
def gather_video_data(API_request_base, page_token):
global videodata
while True:
paging_request = API_request_base + '&pageToken=' + page_token
if page_token == '':
print('gathering first page of %s' % title)
else:
print('gathering %s page %s' % (title, page_token))
videos = load_data(paging_request)
vcount = 1
videodata = {}
videodata['channel'] = title
for video in videos['items']:
write_video(video)
if vcount > 1:
# log progress
print('\033[1A...completed ' + str(vcount)) # cursor up
else:
print('...completed ' + str(vcount))
vcount += 1
try:
# log progress
print('...gathering next page of videos')
page_token = videos['nextPageToken']
except:
print('...last page reached')
page_token = ''
break
def write_video(json):
videodata['videoId'] = json['contentDetails']['videoId']
videodata['title'] = json['snippet']['title']
videodata['publishedAt'] = json['snippet']['publishedAt']
videodata['description'] = json['snippet']['description']
video_request = ('https://www.googleapis.com/youtube/v3/' +
'videos?part=statistics,contentDetails&id=' +
videodata['videoId'] +
'&key=' + API_key)
stats_json = load_data(video_request)
for stat in ['viewCount', 'likeCount', 'dislikeCount', 'commentCount']:
try:
videodata[stat] = int(stats_json['items'][0]['statistics'][stat])
except:
videodata[stat] = None
duration = isodate.parse_duration(stats_json['items'][0]['contentDetails']['duration']).total_seconds()
videodata['duration'] = duration
scraperwiki.sqlite.save(unique_keys=['videoId'], table_name='videos', data=videodata)
# SET UP
# credentials
API_key = get_file_contents("api_key") # obtain from Google Developer Console, store in api_key locally
# parse arguments at command line
parser = argparse.ArgumentParser(description="""Query the YouTube Data API for an individual channel URL or a file list of URLs.
You may also specify which 'page' you'd like to start on (useful when the script breaks during a long data capture.)
You *must* supply a URL or file handle at the command line.""")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-u", "--url", nargs=1, type=str, help="url of YouTube channel")
group.add_argument("-f", "--file", nargs=1, type=str, help="file list of URLs")
parser.add_argument("-p", "--page", nargs=1, type=str, help="page code")
args = parser.parse_args()
if args.file:
f = open(args.file[0])
urls = [url.rstrip('\n') for url in f]
f.close
elif args.url:
urls = args.url
# iterate through the URLs collecting basic channel data
for url in urls:
print("gathering %s" % url)
gather_channel(url)
| DigitalWhiskey/youtube_collector | youtube_collector.py | youtube_collector.py | py | 8,081 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.loads",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "scraperwiki.sqlite.save",
"line_... |
19772432260 | #!/usr/bin/env python
# coding: utf-8
# In[40]:
import math
import pandas as pd
import numpy as np
from pyspark.sql import SparkSession
import pyspark.sql.functions as fc
import json
import requests
spark = SparkSession.builder.config("spark.sql.warehouse.dir", "file:///C:/temp").appName("readCSV").getOrCreate()
Data = spark.read.csv('Realtor_info.csv', header=True)
Data1 = spark.read.csv('School_location1.csv', header=True)
Data = Data.toPandas()
Data1 = Data1.toPandas()
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) +
math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *
math.sin(dlon / 2) * math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
Data = Data.dropna()
Data = Data.reset_index(drop=True)
# In[41]:
Data['Avg_sqft(PCode)'] = np.nan
postal_codes = Data['postal_code'].tolist()
postal_codes = list(set(postal_codes))
for postal in postal_codes:
postal_values = Data[Data['postal_code'] == postal]
sqfts = postal_values['sqft'].tolist()
sqfts1 = []
for i in sqfts:
try:
s = int(''.join(filter(str.isdigit,i)))
except:
s = 0
sqfts1.append(s)
if len(sqfts1) !=0 :
avg = sum(sqfts1)/len(sqfts1)
Data['Avg_sqft(PCode)'].loc[postal_values.index] = avg
# In[46]:
Distance = []
School = []
for i in range(len(Data)):
distance1 = []
school1 = []
for j in range(len(Data1)):
if Data['city'][i] == Data1['CITY'][j]:
lat1 = (float(Data['lat'][i]),float((Data['lon'][i])))
lat2 = (float(Data1['LAT'][j]),float((Data1['LON'][j])))
dist = round(distance(lat1, lat2), 1)
distance1.append(dist)
school1.append(Data1['NAME'][j])
else:
continue
if len(distance1) !=0:
Distance.append(max(distance1))
School.append(school1[distance1.index(max(distance1))])
else:
Distance.append(10000)
School.append('No-school')
# In[45]:
'''
Distance_1 = Distance
School_1 = School
print(Distance_1)
print(School_1)
Distance_1.extend(Distance)
School_1.extend(School)
'''
# In[49]:
school_distinct = []
for i in Distance:
if i < 10:
school_distinct.append('Yes')
else:
school_distinct.append('No')
# In[50]:
Data['School_district'] = school_distinct
Data['Nearly_school'] = School
# In[59]:
Data['Avg_price(PCode)'] = np.nan
postal_codes = Data['postal_code'].tolist()
for postal in postal_codes:
postal_values = Data[Data['postal_code'] == postal]
prices = postal_values['price'].tolist()
prices1 = []
for j in prices:
try:
p = int(''.join(filter(str.isdigit,j)))
except:
p = 0
prices1.append(p)
if len(prices1) !=0 :
avg1 = sum(prices1)/len(prices1)
Data['Avg_price(PCode)'].loc[postal_values.index] = avg1
# In[61]:
Data.to_csv("Realtor_info1.csv", index=False, sep=',')
with open('Realtor_info1.json', 'w') as f:
f.write(Data.to_json(orient='records'))
# In[62]:
result = Data.to_json(orient="records")
parsed = json.loads(result)
data = json.dumps(parsed)
url = 'https://house-9f5c0-default-rtdb.firebaseio.com/Realtor.json'
response = requests.put(url,data)
# In[58]:
| Lucas0717/house-search | spark_py/process_distinct_school.py | process_distinct_school.py | py | 3,500 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyspark.sql.SparkSession.builder.config",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 15,
"usage_type":... |
26043231137 | import cv2
import pickle
import numpy as np
espacios = []
with open('prueba.pkl', 'rb') as file:
espacios = pickle.load(file)
video = cv2.VideoCapture('video.mp4')
# Inicializar el contador de cuadros ocupados
full = 0
# Inicializar el estado de cada cuadro
estado = [False] * len(espacios)
while True:
check, img = video.read()
imgBN = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgTH = cv2.adaptiveThreshold(imgBN, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 25, 16)
imgMedian = cv2.medianBlur(imgTH, 5)
kernel = np.ones((5, 5), np.int8)
imgDil = cv2.dilate(imgMedian, kernel)
for i, (x, y, w, h) in enumerate(espacios):
espacio = imgDil[y:y+h, x:x+w]
count = cv2.countNonZero(espacio)
cv2.putText(img, str(count), (x, y+h-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
if count >= 600:
if not estado[i]:
full += 1
estado[i] = True
# Cambié el color a azul para lugar ocupado
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
else:
if estado[i]:
full -= 1
estado[i] = False
# Cuadro verde para lugar vacío
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Mostrar el número de cuadros ocupados en el texto
texto = f"Personas: {full}"
cv2.putText(img, texto, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
cv2.imshow('video', img)
cv2.waitKey(10)
| Kinartb/CC431A_Grafica | PC1/PC1_TRABAJO/main.py | main.py | py | 1,599 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "pickle.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"lin... |
43424744194 | import os
from aiohttp import web
from cdtz import config
LIST_TZ = []
def create_list_tz():
"""Create list timezones. """
dir_zoneinfo = '/usr/share/zoneinfo'
for dirpath, dirnames, filenames in os.walk(dir_zoneinfo):
for file in filenames:
filepath = os.path.join(dirpath, file)
if open(filepath, 'rb').read(4) == b'TZif':
filename = filepath.replace(f'{dir_zoneinfo}/', '')
if filename not in ['Factory', 'localtime', 'posixrules', 'Riyadh8']:
LIST_TZ.append(filename)
LIST_TZ.sort()
async def get_list_tz(request):
"""Returns list timezone. """
return web.json_response(LIST_TZ)
async def main():
"""The main entry point. """
app = web.Application()
app.router.add_get('/tz/list_time_zones', get_list_tz)
return app
if __name__ == '__main__':
create_list_tz()
web.run_app(main(), host=config.HOST, port=config.PORT)
| IrovoyVlad/cusdeb-tz | bin/server.py | server.py | py | 967 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.walk",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "aiohttp.web.json_response",
"li... |
73944509946 | import os
import numpy as np
import cv2
from config import cfg
from numpy.linalg import inv
import sys
class RawData(object):
def __init__(self, use_raw = False):
self.mapping_file = cfg.MAPPING_FILE
self.rand_map = cfg.RAND_MAP
self.path_prefix = ""
self.ext = ""
self.files_path_mapping = {}
self.use_raw = use_raw
self.train_val_list = cfg.TRAIN_VAL_LIST
def get_trainval_mapping(self):
trainval_list_dict = {}
with open(self.train_val_list, 'r') as f:
lines = f.read().splitlines()
line_list = [[line, os.path.join(self.path_prefix, "%s" % (line)) + self.ext] for line in lines]
trainval_list_dict = dict(line_list)
return trainval_list_dict
def get_paths_mapping(self):
"""
:return: frame_tag_mapping (key: frame_tag val: file_path)
"""
reverse_file_dict = {}
with open(self.rand_map, "r") as f:
line = f.read().splitlines()[0]
for i, field in enumerate(line.split(',')):
reverse_file_dict[int(field)] = os.path.join(self.path_prefix, "%06d" % (i)) + self.ext
with open(self.mapping_file, "r") as f:
lines = f.read().splitlines()
lines_splitted = [line.split() for line in lines]
frame_tag_lines = [('/'.join(line), index) for index, line in enumerate(lines_splitted)]
frame_tag_map = {}
for one_frame_tag_line in frame_tag_lines:
frame_tag, index = one_frame_tag_line
frame_tag_map[frame_tag] = reverse_file_dict[int(index) + 1]
return frame_tag_map
def get_tags(self):
tags = [tag for tag in self.files_path_mapping]
tags.sort()
return tags
class Image(RawData):
def __init__(self, use_raw = False):
RawData.__init__(self, use_raw)
self.path_prefix = os.path.join(cfg.RAW_DATA_SETS_DIR, "data_object_image_2", "training", "image_2")
self.ext = ".png"
if use_raw:
self.files_path_mapping= self.get_paths_mapping()
else:
self.files_path_mapping= self.get_trainval_mapping()
def load(self, frame_tag):
return cv2.imread(self.files_path_mapping[frame_tag])
class ObjectAnnotation(RawData):
def __init__(self, use_raw = False):
RawData.__init__(self, use_raw)
self.path_prefix = os.path.join(cfg.RAW_DATA_SETS_DIR, "data_object_label_2", "training", "label_2")
self.ext = ".txt"
if use_raw:
self.files_path_mapping= self.get_paths_mapping()
else:
self.files_path_mapping= self.get_trainval_mapping()
def parseline(self, line):
obj = type('object_annotation', (), {})
fields = line.split()
obj.type, obj.trunc, obj.occlu = fields[0], float(fields[1]), float(fields[2])
obj.alpha, obj.left, obj.top, obj.right, obj.bottom = float(fields[3]), float(fields[4]), float(fields[5]), \
float(fields[6]), float(fields[7])
obj.h, obj.w, obj.l, obj.x, obj.y, obj.z, obj.rot_y = float(fields[8]), float(fields[9]), float(fields[10]), \
float(fields[11]), float(fields[12]), float(fields[13]), float(fields[14])
return obj
def load(self, frame_tag):
"""
load object annotation file, including bounding box, and object label
:param frame_tag:
:return:
"""
annot_path = self.files_path_mapping[frame_tag]
objs = []
with open(annot_path, 'r') as f:
lines = f.read().splitlines()
for line in lines:
if not line:
continue
objs.append(self.parseline(line))
return objs
class Calibration(RawData):
def __init__(self, use_raw = False):
RawData.__init__(self, use_raw)
self.path_prefix = os.path.join(cfg.RAW_DATA_SETS_DIR, "data_object_calib", "training", "calib")
self.ext = ".txt"
if use_raw:
self.files_path_mapping= self.get_paths_mapping()
else:
self.files_path_mapping= self.get_trainval_mapping()
def load(self, frame_tag):
"""
load P2 (for rgb camera 2), R0_Rect, Tr_velo_to_cam, and compute the velo_to_rgb
:param frame_tag: e.g.,2011_09_26/2011_09_26_drive_0009_sync/0000000021
:return: calibration matrix
"""
calib_file_path = self.files_path_mapping[frame_tag]
obj = type('calib', (), {})
with open(calib_file_path, 'r') as f:
lines = f.read().splitlines()
for line in lines:
if not line:
continue
fields = line.split(':')
name, calib_str = fields[0], fields[1]
calib_data = np.fromstring(calib_str, sep=' ', dtype=np.float32)
if name == 'P2':
obj.P2 = np.hstack((calib_data, [0, 0, 0, 1])).reshape(4, 4)
elif name == 'R0_rect':
obj.R0_rect = np.zeros((4, 4), dtype = calib_data.dtype)
obj.R0_rect[:3, :3] = calib_data.reshape((3, 3))
obj.R0_rect[3, 3] = 1
elif name == 'Tr_velo_to_cam':
obj.velo_to_cam = np.hstack((calib_data, [0, 0, 0, 1])).reshape(4, 4)
obj.velo_to_rgb = np.dot(obj.P2, np.dot(obj.R0_rect, obj.velo_to_cam))
obj.cam_to_rgb = np.dot(obj.P2, obj.R0_rect)
obj.cam_to_velo = inv(obj.velo_to_cam)
return obj
class Lidar(RawData):
def __init__(self, use_raw = False):
RawData.__init__(self, use_raw)
self.path_prefix = os.path.join(cfg.RAW_DATA_SETS_DIR, "data_object_velodyne", "training", "velodyne")
self.ext = ".bin"
if use_raw:
self.files_path_mapping= self.get_paths_mapping()
else:
self.files_path_mapping= self.get_trainval_mapping()
def load(self, frame_tag):
lidar =np.fromfile(self.files_path_mapping[frame_tag], np.float32)
return lidar.reshape((-1, 4))
if __name__ == '__main__':
pass
| yzhou-saic/MV3D_Yang | src/raw_data_from_mapping.py | raw_data_from_mapping.py | py | 6,272 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "config.cfg.MAPPING_FILE",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "config.cfg",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "config.cfg.RAND_MAP",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "conf... |
29584667291 | # -*- coding: utf-8 -*-
from datetime import datetime
import calendar
from openerp import models, fields, api, sql_db
from openerp.addons.avancys_orm import avancys_orm as orm
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DSDF, DEFAULT_SERVER_DATETIME_FORMAT as DSTF, float_compare
from openerp.exceptions import Warning
from dateutil.relativedelta import relativedelta
import unicodedata
import base64
import math
import calendar as cal
FORM_TYPES = [
('E', '[E] Planilla empleados empresas'),
# ('Y', '[Y] Planilla independientes empresas'),
# ('A', '[A] Planilla cotizantes con novedad de ingreso'),
# ('S', '[S] Planilla empleados de servicio domestico'),
# ('M', '[M] Planilla mora'),
# ('N', '[N] Planilla correcciones'),
# ('H', '[H] Planilla madres sustitutas'),
# ('T', '[T] Planilla empleados entidad beneficiaria del sistema general de participaciones'),
# ('F', '[F] Planilla pago aporte patronal faltante'),
# ('J', '[J] Planilla para pago seguridad social en cumplimiento de sentencia digital'),
# ('X', '[X] Planilla para pago empresa liquidada'),
# ('U', '[U] Planilla de uso UGPP para pagos por terceros'),
# ('K', '[K] Planilla estudiantes')
]
FORM_STATES = [
('draft', 'Borrador'),
('closed', 'Cerrada')
]
#Segun el resolucion 454
TYPE_WAGE = [
('X', 'Integral'),
('F', 'Fijo'),
('V', 'Variable'),
(' ', 'Aprendiz')
]
def monthrange(year=None, month=None):
today = datetime.today()
y = year or today.year
m = month or today.month
return y, m, cal.monthrange(y, m)[1]
def strip_accents(s):
new_string = ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
new_string = new_string.encode('ascii', 'replace').replace('?', ' ')
return new_string
def prep_field(s, align='left', size=0, fill=' ', date=False):
if s in [False, None]:
s = ''
if date:
s = datetime.strftime(s, "%Y-%m-%d")
if align == 'right':
s = str(s)[0:size].rjust(size, str(fill))
elif align == 'left':
s = str(s)[0:size].ljust(size, str(fill))
return s
def rp(value):
if value % 100.0 >= 0.01:
val = int(math.ceil(value / 100.0)) * 100
else:
val = round(value, 0)
return val
def rp1(value):
if value - round(value) > 0.0001:
res = round(value) + 1
else:
res = round(value)
return res
class HrContributionFormLine(models.Model):
_name = 'hr.contribution.form.line'
contribution_id = fields.Many2one('hr.contribution.form', 'Autoliquidacion', ondelete="cascade")
employee_id = fields.Many2one('hr.employee', 'Empleado')
contract_id = fields.Many2one('hr.contract', 'Contrato')
leave_id = fields.Many2one('hr.holidays', 'Ausencia')
main = fields.Boolean('Linea principal')
# Campos PILA
ing = fields.Selection([('X', 'X'), ('R', 'R'), ('C', 'C')], 'ING', help='Ingreso')
ret = fields.Selection([('P', 'P'), ('R', 'R'), ('C', 'C'), ('X', 'X')], 'RET', help='Retiro')
tde = fields.Boolean('TDE', help='Traslado desde otra EPS o EOC')
tae = fields.Boolean('TAE', help='Traslado a otra EPS o EOC')
tdp = fields.Boolean('TDP', help='Traslado desde otra administradora de pensiones')
tap = fields.Boolean('TAP', help='Traslado a otra administradora de pensiones')
vsp = fields.Boolean('VSP', help='Variacion permanente de salario')
fixes = fields.Selection([('A', 'A'), ('C', 'C')], 'Correcciones')
vst = fields.Boolean('VST', help='Variacion transitoria de salario')
sln = fields.Boolean('SLN', help='Licencia no remunerada o suspension temporal del contrato')
ige = fields.Boolean('IGE', help='Incapacidad general')
lma = fields.Boolean('LMA', help='Licencia de maternidad o paternidad')
vac = fields.Selection([('X', 'X'), ('L', 'L')], 'VAC', help='Vacaciones/LR')
avp = fields.Boolean('AVP', help='Aporte voluntario de pension')
vct = fields.Boolean('VCT', help='Variacion de centros de trabajo')
irl = fields.Float('IRL', help='Dias de incapacidad por accidente de trabajo o enfermedad laboral')
afp_code = fields.Char('Codigo AFP')
afp_to_code = fields.Char('Codigo AFP a la cual se traslada')
eps_code = fields.Char('Codigo EPS')
eps_to_code = fields.Char('Codigo EPS a la cual se traslada')
ccf_code = fields.Char('Codigo CCF')
pens_days = fields.Integer('Dias cotizados pension')
eps_days = fields.Integer('Dias cotizados EPS')
arl_days = fields.Integer('Dias cotizados ARL')
ccf_days = fields.Integer('Dias cotizados CCF')
wage = fields.Integer('Salario basico')
int_wage = fields.Boolean('Salario integral')
wage_type = fields.Selection(string='Tipo de salario', selection=TYPE_WAGE)
pens_ibc = fields.Float('IBC pension')
eps_ibc = fields.Float('IBC EPS')
arl_ibc = fields.Float('IBC ARL')
ccf_ibc = fields.Float('IBC CCF')
global_ibc = fields.Float('IBC Global')
pens_rate = fields.Float('Tarifa pension')
pens_cot = fields.Float('Cotizacion pension')
ap_vol_contributor = fields.Float('Aportes voluntarios del afiliado')
ap_vol_company = fields.Float('Aportes voluntarios del aportante')
pens_total = fields.Float('Aportes totales de pension')
fsol = fields.Float('Aportes a fondo de solidaridad')
fsub = fields.Float('Aportes a fondo de subsistencia')
ret_cont_vol = fields.Float('Valor no retenido por aportes voluntarios')
eps_rate = fields.Float('Tarifa EPS')
eps_cot = fields.Float('Cotizacion EPS')
ups = fields.Float('Total UPS')
aus_auth = fields.Char('Numero de autorizacion de incapacidad')
gd_amount = fields.Float('Valor de la incapacidad EG')
mat_auth = fields.Char('Numero de autorizacion de licencia')
mat_amount = fields.Float('Valor de licencia')
arl_rate = fields.Float('Tarifa ARL')
work_center = fields.Char('Centro de trabajo')
arl_cot = fields.Float('Cotizacion ARL')
ccf_rate = fields.Float('Tarifa CCF')
ccf_cot = fields.Float('Cotizacion CCF')
sena_rate = fields.Float('Tarifa SENA')
sena_cot = fields.Float('Cotizacion SENA')
icbf_rate = fields.Float('Tarifa ICBF')
icbf_cot = fields.Float('Cotizacion ICBF')
esap_rate = fields.Float('Tarifa ESAP')
esap_cot = fields.Float('Cotizacion ESAP')
men_rate = fields.Float('Tarifa MEN')
men_cot = fields.Float('Cotizacion MEN')
exonerated = fields.Boolean('Exonerado de aportes')
arl_code = fields.Char('Codigo ARL')
arl_risk = fields.Char('Clase de riesgo')
k_start = fields.Date('Fecha de ingreso')
k_end = fields.Date('Fecha de retiro')
vsp_start = fields.Date('Fecha de inicio de VSP')
sln_start = fields.Date('Inicio licencia no remunerada')
sln_end = fields.Date('Fin licencia no remunerada')
ige_start = fields.Date('Inicio incapacidad EG')
ige_end = fields.Date('Fin incapacidad EG')
lma_start = fields.Date('Inicio licencia maternidad')
lma_end = fields.Date('Fin licencia maternidad')
vac_start = fields.Date('Inicio vacaciones')
vac_end = fields.Date('Fin vacaciones')
vct_start = fields.Date('Inicio cambio centro de trabajo')
vct_end = fields.Date('Fin cambio de centro de trabajo')
atep_start = fields.Date('Inicio ATEP')
atep_end = fields.Date('Fin ATEP')
other_ibc = fields.Float('IBC otros parafiscales')
w_hours = fields.Integer('Horas laboradas')
class HrContributionForm(models.Model):
_name = 'hr.contribution.form'
name = fields.Char('Nombre')
period_id = fields.Many2one('payslip.period', 'Periodo', domain=[('schedule_pay', '=', 'monthly')])
group_id = fields.Many2one('hr.contract.group', 'Grupo de contratos')
form_type = fields.Selection(FORM_TYPES, 'Tipo de planilla', default='E')
branch_code = fields.Char('Codigo de sucursal')
presentation = fields.Char('Presentacion', size=1, default='U')
contract_ids = fields.Many2many('hr.contract', 'pila_contract_rel', 'pila_id', 'contract_id')
state = fields.Selection(FORM_STATES, 'Estado', default='draft')
file = fields.Binary('Archivo plano', readonly=True)
journal_id = fields.Many2one('account.journal', "Diario contable")
move_id = fields.Many2one('account.move', 'Asiento')
move_id_name = fields.Char('Move Name')
form_line_ids = fields.One2many('hr.contribution.form.line', 'contribution_id', string='Detalle')
error_log = fields.Text('Reporte de errores')
@api.multi
def fix_o_rights(self, rights,start_p,end_p,contract):
""" Retorna el IBC basado en los ingresos por otros derechos del empleado """
if rights <= 0:
return 0
query="""select HH.id, HH.absence_id, HHD.sequence, HHS.gi_b2, HHS.gi_b90, HHS.gi_b180, HHS.gi_a180, HHS.sub_wd, HHS.no_payable
from hr_holidays_days as HHD
inner join hr_holidays as HH
on HH.id = HHD.holiday_id
inner join hr_holidays_status as HHS
on HHS.id = HHD.holiday_status_id and HHS.active and (HHS.general_illness or (sub_wd and no_payable))
where HHD.contract_id = {contrato} and
HHD.name BETWEEN '{s_p}' and '{e_p}'""".format(
contrato=contract,
s_p = start_p,
e_p=end_p)
holiday_days = orm.fetchall(self._cr, query)
#Organizar cantidad de dias por rangos de descuento por enfermedad general o prorroga
days_b2, days_b90, days_b180, days_a180, otros = [0,None],[0,None],[0,None],[0,None], 0 #[CantidadDias, Porcentaje]
for day in holiday_days:
leave_id = self.env['hr.holidays'].browse(day[0])
if day[0] == day[1]:
raise Warning("La ausencia {aus} no puede tener una prorroga a si misma, se sugire borrar y crear una nueva ausencia".format(aus=leave_id.name))
if day[7] and day[8]:# Evalua primero si es ausencia que modifique el IBC
otros += 1
elif day[2] <= 2: #Evalua ausencias de tipo Enfermedad general
days_b2[0] += 1
if not days_b2[1]:
days_b2[1] = day[3]
if days_b2[1] and days_b2[1] != day[3]:
raise Warning("La ausencia {aus} tiene <Porcentaje a reconocer por enfermedad de 1 y 2 dias> diferente a otras ausencias reportadas en el periodo de {P}, revisar ausencias del contrato con id = {C}".format(aus=leave_id.name,P=start_p[:-2],C=contract))
elif 2 < day[2] <= 90:
days_b90[0] += 1
if not days_b90[1]:
days_b90[1] = day[4]
if days_b90[1] and days_b90[1] != day[4]:
raise Warning("La ausencia {aus} tiene <Porcentaje a reconocer por enfermedad de 3 a 90 dias> diferente a otras ausencias reportadas en el periodo de {P}, revisar ausencias del contrato con id = {C}".format(aus=leave_id.name,P=start_p[:-2],C=contract))
elif 90 < day[2] <= 180:
days_b180[0] += 1
if not days_b180[1]:
days_b180[1] = day[5]
if days_b180[1] and days_b180[1] != day[5]:
raise Warning("La ausencia {aus} tiene <Porcentaje a reconocer por enfermedad de 91 a 180 dias> diferente a otras ausencias reportadas en el periodo de {P}, revisar ausencias del contrato con id = {C}".format(aus=leave_id.name,P=start_p[:-2],C=contract))
else:
days_a180[0] += 1
if not days_a180[1]:
days_a180[1] = day[6]
if days_a180[1] and days_a180[1] != day[6]:
raise Warning("La ausencia {aus} tiene <Porcentaje a reconocer por enfermedad de 181 días en adelante> diferente a otras ausencias reportadas en el periodo de {P}, revisar ausencias del contrato con id = {C}".format(aus=leave_id.name,P=start_p[:-2],C=contract))
#---------------------Calcular el IBC
#Calcular NumeroDias por Porcentaje
DiasPorcentaje = [days_b2, days_b90, days_b180, days_a180]
total = [0,0] #[DiasPorcentaje, DiasAusencias]
for DP in DiasPorcentaje:
if not DP[1]:
continue
total[0] += float(DP[0] * DP [1])/100
total[1] += DP[0]
#Calculo IBC por otros derechos
rights = float(rights * total[1])/ total[0] if total[0] and total[0] else rights
rights += float(self.env['hr.contract'].browse(contract).wage)/30 * otros if otros > 0 else 0
return rights
@api.multi
def compute_ibc(self, contract, month, main):
sdt = month + '-01'
edt = month + "-" + str(monthrange(int(month[0:4]), int(month[5:7]))[2])
plp = self.env['hr.payslip']
earnings = plp.get_interval_category('earnings', sdt, edt, contract=contract.id)#DEVENGADO
o_salarial_earnings = plp.get_interval_category('o_salarial_earnings', sdt, edt, contract=contract.id)#OTROS DEVENGOS SALARIALES
comp_earnings = plp.get_interval_category('comp_earnings', sdt, edt, contract=contract.id)#INGRESOS COMPLEMENTARIOS
if main != 'main':
orig_exc = ('VAC_PAG', 'VAC_LIQ', 'PRIMA', 'PRIMA_LIQ')
o_rights = plp.get_interval_category('o_rights', sdt, edt, exclude=orig_exc, contract=contract.id)
if o_rights:
o_rights = self.fix_o_rights(o_rights[0][1], sdt, edt, contract.id)
else:
o_rights = 0
else:
o_rights = 0
sal_earnings_itv = earnings + o_salarial_earnings + comp_earnings
sal_earnings = sum([x[1] for x in sal_earnings_itv]) + o_rights
o_earnings_itv = plp.get_interval_category('o_earnings', sdt, edt, contract=contract.id)
o_earnings = sum([x[1] for x in o_earnings_itv])
top40 = (sal_earnings + o_earnings) * 0.4
if o_earnings > top40:
amount = sal_earnings + o_earnings - top40
sal_earnings += o_earnings - top40
else:
amount = sal_earnings
if contract.type_id.type_class == 'int':
amount = amount * 0.7
sal_earnings = sal_earnings * 0.7
e_v = self.env['variables.economicas']
smmlv = e_v.getValue('SMMLV', sdt + " 05:00:00") or 0.0
# TOP25
if amount > 25 * smmlv:
amount = 25 * smmlv
days = self.get_wd(contract, month=month, main=main)[0]
sal_days = plp.get_interval_concept_qty('BASICO', sdt, edt, contract=contract.id)
if sal_days:
sal_days = sal_days[0][2]
else:
sal_days = 0
days_to_add = days - sal_days if days != sal_days else 0
if main == 'main':
if amount < contract.wage and contract.wage != 0:
amount += contract.wage * days_to_add / 30
sal_earnings += contract.wage * days_to_add / 30
return [amount, days], sal_earnings
@api.multi
def get_wd(self, contract, period=False, month="", main=False):
if period:
start_period = period.start_period
end_period = period.end_period
else:
start_period = month + "-01"
max_day = monthrange(int(month[0:4]), int(month[5:7]))[2]
end_period = month + "-" + str(max_day)
# Amarre a 30 dias o menos e ignorar incapacidades de dia 31
max_day = int(end_period[8:10])
max_day = 30 if max_day > 30 else max_day
end_period = end_period[0:7] + "-" + str(max_day)
ld_query = ("SELECT hhd.name, hhd.holiday_id, hhs.code, hhd.sequence "
"FROM hr_holidays_days hhd "
"INNER JOIN hr_holidays_status hhs ON hhs.id = hhd.holiday_status_id "
"WHERE hhd.name BETWEEN '{sd}' AND '{ed}' "
"AND hhd.contract_id = {k} "
"AND hhd.state in ('paid','validate') ".format(
sd=start_period, ed=end_period, k=contract.id))
#Se debe mantener esto el estado en 'paid' y 'validate'
#Si un empleado se incapacida despues de causar la nomina
#Se debe pagar la autoliquidacion a lo real
ld_data = orm.fetchall(self._cr, ld_query)
year = int(end_period[:4])
month = int(end_period[5:7])
end_day_month = calendar.monthrange(year,month)[1]
day31 = end_day_month == 31
if day31:
query_day31 = """ select HHD.holiday_id
from hr_holidays_days as HHD
inner join hr_holidays_status as HHS
on HHS.id = HHD.holiday_status_id
where HHD.contract_id = {contrato}
and HHD.state in ('paid', 'validate')
and HHD.name = '{day31}' """.format(
contrato=contract.id,
day31=end_period[:-2] + '31')
day31 = orm.fetchall(self._cr, query_day31)
# Agrupacion por ausencia
leaves, total_leaves = {}, 0
for ld in ld_data:
if ld[1] not in leaves:
leaves[ld[1]] = [1, ld[2], ld[3], ld[3]]
else:
leaves[ld[1]][0] += 1
leaves[ld[1]][2] = ld[3] if leaves[ld[1]][2] > ld[3] else leaves[ld[1]][2]
leaves[ld[1]][3] = ld[3] if leaves[ld[1]][3] < ld[3] else leaves[ld[1]][3]
total_leaves += 1
if total_leaves > 30:
total_leaves = 30
w102 = 30 - total_leaves if main == 'main' else 30
# Date format
dt_sp = datetime.strptime(start_period, DSDF).date()
dt_ep = datetime.strptime(end_period, DSDF).date()
dt_ksd = datetime.strptime(contract.date_start, DSDF).date()
dt_ked = datetime.strptime(contract.date_end, DSDF).date() if contract.date_end else False
# Calculo de decuccion de contrato por inicio o fin
ded_start_days, ded_end_days = 0, 0
if dt_ksd > dt_sp:
if dt_ep >= dt_ksd:
ded_start_days = (dt_ksd - dt_sp).days
else:
ded_start_days = 30
if dt_ked and dt_ked <= dt_ep:
ded_end_days = (dt_ep - dt_ked).days
if dt_ep.day == 31 and ded_end_days:
ded_end_days -= 1
if dt_ked.month == 2:# Q hacer cuando el empeado se liquida el 28 o 29 de FEB
ded_end_days += 2 if end_day_month == 28 else 1
w102 -= ded_start_days
w102 -= ded_end_days
w102 = 0 if w102 < 0 else w102
return w102, leaves, day31
@api.multi
def calculate_pila(self):
self.get_contract_repeated()
error_log = ""
self._cr.execute("DELETE FROM hr_contribution_form_line where contribution_id = %s" % self.id)
emp_lsq = ("SELECT hc.employee_id, hc.id FROM pila_contract_rel rel "
"INNER JOIN hr_contract hc ON rel.contract_id = hc.id "
"WHERE rel.pila_id = {pila} "
"GROUP BY hc.employee_id, hc.id "
"ORDER BY hc.employee_id asc, hc.id asc".format(pila=self.id))
emp_ls = orm.fetchall(self._cr, emp_lsq)
payslip_obj = self.env['hr.payslip']
start_period = self.period_id.start_period
end_period = self.period_id.end_period
i, j = 0, len(emp_ls)
bar = orm.progress_bar(i, j)
lines = []
e_v = self.env['variables.economicas']
smmlv = e_v.getValue('SMMLV', end_period) or 0.0
for emp in emp_ls:
contract_id = self.env['hr.contract'].browse(emp[1])
cot_type = prep_field(contract_id.fiscal_type_id.code, size=2)
subcot_type = prep_field(contract_id.fiscal_subtype_id.code or '00', size=2)
retired = True if contract_id.fiscal_subtype_id.code not in ['00', False] \
or contract_id.fiscal_type_id.code in ('12', '19') else False
apr = contract_id.fiscal_type_id.code in ('12', '19')
apr_lect = contract_id.fiscal_type_id.code == '12'
# Consolidacion de dias de ausencia pagas del contrato en el periodo definido
w102, leaves, day31 = self.get_wd(contract_id, period=self.period_id, main='main')
# Generacion de lineas
fl = []
if w102:
fl.append(['main', w102, 'WORK102', 0, 0])
fl += [[k,
leaves[k][0] if leaves[k][0] <= 30 else 30,
leaves[k][1],
leaves[k][2],
leaves[k][3]]
for k in leaves]
total_days = sum([x[1] for x in fl])
if total_days > 30:
error_log += "Hay mas de 30 dias reportados en contrato {k} \n".format(k=contract_id.name)
# Asignacion de IBC GLOBAL en lineas
# ref_wage = contract_id.wage if contract_id.wage >= smmlv else smmlv
ref_wage = smmlv
for line in fl:
if line[0] == 'main':
current_comp_ibc, total_ingreso = self.compute_ibc(contract_id, self.period_id.start_period[0:7], line[0])
line_ibc = current_comp_ibc[0]
else:
leave_id = self.env['hr.holidays'].browse(line[0])
line_ibc, total_ingreso = 0, 0
if leave_id.holiday_status_id.general_illness:
#{code_concept: [start,end, vaue]}
concepts_to_eval = {'EG_B2':[1,2,0], 'EG_B90':[3,90,0], 'EG_B180':[91,180,0],'EG_A180':[181,-1,0]}
leave_days_ids = filter(lambda z: start_period <= z.name <= end_period, leave_id.line_ids)
for cte in concepts_to_eval:
gis = payslip_obj.get_interval_concept_qty(cte, start_period, end_period, contract_id.id)
leave_total, leave_qty = 0, 0
for gi in gis:
leave_total += gi[1] if gi[1] else 0
leave_qty += gi[2] if gi[2] else 0
unit_value = leave_total / leave_qty if leave_qty else 0
concepts_to_eval[cte][2] = unit_value
for leave_day in leave_days_ids:
if not leave_day.days_payslip:
continue
for dc in concepts_to_eval.values():
if dc[2] and dc[0] <= leave_day.sequence <= (dc[1] if dc[1] > 0 else leave_day.sequence):
line_ibc += dc[2]
total_ingreso += dc[2]
elif leave_id.holiday_status_id.maternal_lic:
ml = payslip_obj.get_interval_concept_qty('MAT_LIC', start_period, end_period, contract_id.id)
line_ibc = total_ingreso = sum([x[1] for x in ml])
elif leave_id.holiday_status_id.paternal_lic:
pl = payslip_obj.get_interval_concept_qty('PAT_LIC', start_period, end_period, contract_id.id)
line_ibc = total_ingreso = sum([x[1] for x in pl])
elif leave_id.holiday_status_id.atep:
atep = payslip_obj.get_interval_concept_qty('ATEP', start_period, end_period, contract_id.id)
atep_p2 = payslip_obj.get_interval_concept_qty('ATEP_P2', start_period, end_period, contract_id.id)
line_ibc = total_ingreso = sum([x[1] for x in atep + atep_p2])
else:
ref_date = datetime.strptime(leave_id.date_from[0:10], "%Y-%m-%d") - relativedelta(months=1)
month = datetime.strftime(ref_date, "%Y-%m")
leave_ibc, total_ingreso = self.compute_ibc(contract_id, month, line[0])
if leave_ibc[0] == 0 or leave_ibc[1] == 0:
line_ibc = total_ingreso = contract_id.wage * line[1] / 30
else:
line_ibc = leave_ibc[0] * line[1] / leave_ibc[1]
line.append(line_ibc)
line.append(total_ingreso) if total_ingreso else line.append(line_ibc)
total_ibc = sum([x[5] for x in fl])
ingreso = start_period <= contract_id.date_start <= end_period
retiro = (start_period <= contract_id.date_end <= end_period) and contract_id.state == 'done'
#Ajuste de tope minimo por linea, donde la sumatoria de lineas no debe ser menor a un SMMLV
if total_ibc < smmlv and not (retiro or ingreso):
for x in fl:
x[5] = float(smmlv * x[1])/30
x[6] = float(smmlv * x[1])/30
#Ajuste de tope maximo por linea, donde la sumatoria de lineas no debe ser mayor a 25 SMMLV
for x in fl:
x.append(x[5])
x.append(x[6])
if total_ibc > smmlv * 25:
for x in fl:
x[5] = (smmlv * 25 * x[1])/30
x[6] = (smmlv * 25 * x[1])/30
total_ibc = sum([x[5] for x in fl])
if total_days and total_ibc * 30 / total_days < ref_wage and not contract_id.type_id.type_class == 'int':
ibc_to_adj = ref_wage * total_days / 30 - total_ibc
else:
ibc_to_adj = 0
if ibc_to_adj:
fl[0][5] += ibc_to_adj
# ITERACION PRINCIPAL----
pay_vac_comp = True
apply_ret = True
wage_type_main_line = False
for line in fl:
if isinstance(line[0], basestring) and line[0] == 'main':
leave_id = False
main = True
else:
leave_id = self.env['hr.holidays'].browse(line[0])
leave_type = leave_id.holiday_status_id
lstart = leave_id.date_from[0:10]
if lstart < start_period:
lstart = start_period
lend = max([x.name for x in leave_id.line_ids])
if lend > end_period:
lend = end_period
main = False
# Novedad de ingreso
ing = "X" if start_period <= contract_id.date_start <= end_period and main else ''
# Novedad de retiro
wm = fl[0][0] == 'main'
ret = (start_period <= contract_id.date_end <= end_period) and contract_id.state == 'done'#((main and wm) or (not main and leave_type.vacaciones))
ret = ret and apply_ret
ret = 'X' if ret else ''
apply_ret = False
# Variacion salario permanente
wage_change_q = ("SELECT id, date "
"FROM hr_contract_salary_change "
"WHERE contract_id = {c} "
"AND date BETWEEN '{df}' AND '{dt}'".format(
c=contract_id.id, df=start_period, dt=end_period))
wage_change = orm.fetchall(self._cr, wage_change_q)
vsp = False
if wage_change:
for wc in wage_change:
if not ing:
vsp = True and main
vsp_date = wc[1]
# Variacion transitoria de salario
is_itv = payslip_obj.get_interval_category('earnings', start_period, end_period,
exclude=('BASICO',),
contract=contract_id.id)
comp_itv = payslip_obj.get_interval_category('comp_earnings', start_period, end_period,
contract=contract_id.id)
os_itv = payslip_obj.get_interval_category('o_salarial_earnings', start_period, end_period,
contract=contract_id.id)
devibc = line[5] * 30 / line[1] > contract_id.wage
if ((is_itv or comp_itv or os_itv or devibc) and main and not cot_type in ('12', '19')) or contract_id.part_time:
vst = True
else:
vst = False
# Indicador de licencia no remunerada
sln = not main and leave_type.no_payable
# Indicador novedad por incapacidad eg
ige = not main and not sln and leave_type.general_illness
# Indicador novedad por licencia de maternidad o paternidad
lma = not main and (leave_type.maternal_lic or leave_type.paternal_lic) and not sln
# Indicador por vacaciones
vac = 'X' if not main and leave_type.vacaciones and not sln \
else 'L' if not main and not leave_type.vacaciones \
and not (leave_type.maternal_lic or leave_type.paternal_lic) \
and not leave_type.general_illness and not leave_type.atep and not sln else ''
# Indicador aporte voluntario pension
avp_itv = payslip_obj.get_interval_avp(start_period, end_period, contract=contract_id.id)
if avp_itv and not retired:
avp = True
else:
avp = False
# Dias de incapacidad ATEP
if not main and leave_type.atep and not sln:
irl = leaves[line[0]][0]
else:
irl = 0
# Codigos administradoras
afp_code = contract_id.pensiones.codigo_afp if not retired else False
eps_code = contract_id.eps.codigo_eps
ccf_code = contract_id.cajacomp.codigo_ccf if not apr else False
# Validacion de ciudad de caja y ciudad de desempeño contrato
if contract_id.cajacomp and contract_id.cajacomp.city_id.provincia_id != contract_id.cuidad_desempeno.provincia_id:
error_log += u"La caja asignada en el contrato {k} " \
u"no corresponde al departamento de desempeño \n".format(k=contract_id.name)
# Dias de pension, siempre van full excepto si esta pensionado
pens_days = line[1] if not retired else 0
# Dias de EPS, ARL y CCF siempre van full excepto caja en aprendices
eps_days = line[1]
arl_days = line[1] if not (cot_type in ('12') and subcot_type in ('00')) else 0
ccf_days = line[1] if not apr else 0
# Salario
wage_actual_q = ("SELECT id, date "
"FROM hr_contract_salary_change "
"WHERE contract_id = {c} "
"AND date >= '{dt}'".format(
c=contract_id.id, dt=end_period))
wage_actual = orm.fetchall(self._cr, wage_actual_q)
if not wage_actual:
wage = contract_id.wage if contract_id.wage >= smmlv else smmlv
else:
wages = contract_id.wage_historic_ids.sorted(key=lambda r: r.date, reverse=True)
if len(wages) > 1:
wage = wages[-2].wage
int_wage = contract_id.type_id.type_class == 'int'
#Resolucion 454
if not main and wage_type_main_line:
wage_type = wage_type_main_line
elif int_wage:
wage_type = 'X'
elif vst:
wage_type = 'V'
elif apr:
wage_type = ' '
else:
wage_type = 'F'
if not wage_type_main_line:
wage_type_main_line = wage_type
# IBC
if (cot_type == '01' and subcot_type in ('01', '03', '06', '04')) or \
(cot_type in ('12', '19') and subcot_type in ('00')):
pens_ibc = 0
else:
pens_ibc = rp1(25 * smmlv if line[5] > 25 * smmlv else line[5])
eps_ibc = rp1(25 * smmlv if line[5] > 25 * smmlv else line[5])
if line[0] != 'main':
pens_ibc = rp1(25 * smmlv if line[5] > 25 * smmlv else line[5])
eps_ibc = rp1(25 * smmlv if line[5] > 25 * smmlv else line[5])
arl_ibc = rp1(line[5]) if not (cot_type in ('12') and subcot_type in ('00')) else 0
arl_ibc = rp1(arl_ibc if arl_ibc <= 25 * smmlv else 25 * smmlv)
vac_pag = payslip_obj.get_interval_concept('VAC_PAG', start_period, end_period, contract_id.id)
vac_disf_data = payslip_obj.get_interval_concept_qty('VAC_DISF', start_period, end_period, contract_id.id)
vac_liq = payslip_obj.get_interval_concept('VAC_LIQ', start_period, end_period, contract_id.id)
vac_money = sum([x[1] for x in vac_pag + vac_liq])
vac_disf = 0 if not vac_disf_data else vac_disf_data[0][1] if vac_disf_data[0][1] else 0
vac_dist_qty = 0 if not vac_disf_data else vac_disf_data[0][2] if vac_disf_data[0][2] else 0
ccf_ibc = 0
if main and not apr:
ccf_ibc = line[8]
if vac_money > 0:
ccf_ibc += vac_money
pay_vac_comp = False
else:
if not apr:
leave_id = self.env['hr.holidays'].browse(line[0])
if leave_id.holiday_status_id.vacaciones:
if self.env.user.company_id.fragment_vac:
leave_days_ids = len(filter(lambda z: start_period <= z.name <= end_period, leave_id.line_ids))
else:
leave_days_ids = leave_id.number_of_days_in_payslip
ccf_ibc += (vac_disf * leave_days_ids / vac_dist_qty) if vac_dist_qty else 0
elif leave_id.holiday_status_id.general_illness or leave_id.holiday_status_id.no_payable or leave_id.holiday_status_id.atep:
ccf_ibc = 0#Se pone para que no entre al else, como control de q configuren bien las ausencias
elif (leave_id.holiday_status_id.maternal_lic or leave_id.holiday_status_id.paternal_lic) and leave_id.holiday_status_id.ibc:
ccf_ibc += line[5]
else:
ccf_ibc += float(line[8]*line[1])/30
#Intenta arreglar el problema de las vacaciones liquidadas negativas
#Se debe poner en cero si definitivamente no hay como compensarlo
#Se debe intentar pagar con otras vacaciones disfrutadas
if pay_vac_comp and (ccf_ibc + vac_money) > 0:
ccf_ibc += vac_money
pay_vac_comp = False
else:
ccf_ibc = 0
ccf_ibc = rp1(ccf_ibc)
global_ibc = total_ibc
# Indicador de exonerabilidad
exonerated = global_ibc < 10 * smmlv and not int_wage and not apr
# IBC de otros parafiscales
other_ibc = ccf_ibc if not exonerated else 0
# Tarifa de pension van en cero solo si es pensionado y 12 si es no remunerasdo
pens_rate = self.env.user.company_id.percentage_total/100
if contract_id.high_risk:
pens_rate = 0.26
if not main and leave_type.no_payable:
if contract_id.high_risk:
pens_rate = 0.22
else:
percentage = 3.0 if self.env.user.company_id.percentage_total == 3.0 else self.env.user.company_id.percentage_employer
pens_rate = percentage/100
pens_rate = pens_rate if not retired and not apr else 0
# Cotizacion de pension
pens_cot = rp(pens_ibc * pens_rate)
# Aporte voluntario
if avp:
ap_vol_contributor = rp(sum([x[1] for x in avp_itv]) if not retired else 0)
else:
ap_vol_contributor = 0
# Total pensiones
pens_total = rp(pens_cot + ap_vol_contributor)
# Fondo de solidaridad
fsol = rp(pens_ibc * 0.005 if global_ibc >= 4 * smmlv and not retired and not sln else 0)
fsol = fsol if self.env.user.company_id.cal_fond_sol_sub else 0
# Fondo de subsistencia
fsrate = 0
if global_ibc > 4 * smmlv:
fsrate += 0.005
if 16 * smmlv <= global_ibc <= 17 * smmlv:
fsrate += 0.002
elif 17 * smmlv <= global_ibc <= 18 * smmlv:
fsrate += 0.004
elif 18 * smmlv <= global_ibc <= 19 * smmlv:
fsrate += 0.006
elif 19 * smmlv <= global_ibc <= 20 * smmlv:
fsrate += 0.008
elif global_ibc > 20 * smmlv:
fsrate += 0.01
fsub = rp(pens_ibc * fsrate if not retired and not sln else 0)
fsub = fsub if self.env.user.company_id.cal_fond_sol_sub else 0
ret_cont_vol_itv = payslip_obj.get_interval_concept('RET_CTG_DIF_FVP', start_period, end_period,
contract=contract_id.id)
ret_cont_vol = sum([x[1] for x in ret_cont_vol_itv]) if avp else 0
if ret_cont_vol < 0:
ret_cont_vol = 0
# Tarifa EPS Todas pagan
eps_rate = 0.04
if global_ibc >= 10 * smmlv or int_wage or apr:
eps_rate = 0.125
if not main and leave_type.no_payable:
eps_rate = 0
# Cotizacion EPS
eps_cot = rp(eps_ibc * eps_rate)
# Autorizacion de incapacidad
# aus_auth = line.no_incapacidad if not main and leave_type.general_illness else False
aus_auth, mat_auth = False, False # Campo exclusivo de aportes en linea.
# mat_auth = line.no_incapacidad if not main and (leave_type.maternal_lic or leave_type.paternal_lic) \
# else False
# Tarifa ARL
arl_rate = contract_id.pct_arp / 100 if main and not apr_lect else 0
# Cotizacion ARL
arl_cot = rp(arl_ibc * arl_rate)
work_center = contract_id.workcenter
# Tarifa CCF
if (main or (self.env.user.company_id.quote_rate_ibc_ccf_lics and (leave_type.paternal_lic or leave_type.maternal_lic)) or leave_type.vacaciones or (not main and ret == 'X')) and not apr and ccf_ibc:
ccf_rate = 0.04
else:
ccf_rate = 0
# Cotizacion CCF
ccf_cot = rp(ccf_ibc * ccf_rate)
# Tarifa SENA
sena_rate = 0.02 if global_ibc >= 10 * smmlv or int_wage else 0
if sln:
sena_rate = 0
# Cotizacion SENA
sena_cot = rp(other_ibc * sena_rate)
# Tarifa ICBF
icbf_rate = 0.03 if global_ibc >= 10 * smmlv or int_wage else 0
if sln:
icbf_rate = 0
# Cotizacion ICBF
icbf_cot = rp(other_ibc * icbf_rate)
# Codigo ARL
arl_code = contract_id.arl.codigo_arl if not apr_lect else False
# Riesgo ARL
arl_risk = contract_id.riesgo.name if not apr_lect else False
# Datos de contrato
k_start = contract_id.date_start if ing else False
k_end = contract_id.date_end if ret else False
# Fechas de novedades
vsp_start = vsp_date if vsp else False
sln_start = lstart if not main and sln else False
sln_end = lend if not main and sln else False
ige_start = lstart if not main and ige else False
ige_end = lend if not main and ige else False
lma_start = lstart if not main and lma else False
lma_end = lend if not main and lma else False
vac_start = lstart if not main and vac else False
vac_end = lend if not main and vac else False
atep = leave_type.atep if not main else False
atep_start = lstart if not main and atep else False
atep_end = lend if not main and atep else False
w_hours = line[1] * 8
data = {
'contribution_id': self.id,
'employee_id': emp[0],
'contract_id': contract_id.id,
'leave_id': leave_id.id if leave_id else False,
'main': main,
'ing': ing,
'ret': ret,
'tde': False, # TODO
'tae': False, # TODO
'tdp': False, # TODO
'tap': False, # TODO
'vsp': vsp,
'fixes': False, # TODO
'vst': vst,
'sln': sln,
'ige': ige,
'lma': lma,
'vac': vac,
'avp': avp,
'vct': False, # TODO
'irl': irl,
'afp_code': afp_code,
'afp_to_code': False, # TODO
'eps_code': eps_code,
'eps_to_code': False, # TODO
'ccf_code': ccf_code,
'pens_days': pens_days,
'eps_days': eps_days,
'arl_days': arl_days,
'ccf_days': ccf_days,
'wage': wage,
'int_wage': int_wage,
'pens_ibc': pens_ibc,
'eps_ibc': eps_ibc,
'arl_ibc': arl_ibc,
'ccf_ibc': ccf_ibc,
'global_ibc': global_ibc,
'pens_rate': pens_rate,
'pens_cot': pens_cot,
'ap_vol_contributor': ap_vol_contributor,
'ap_vol_company': 0, # TODO
'pens_total': pens_total,
'fsol': fsol,
'fsub': fsub,
'ret_cont_vol': ret_cont_vol,
'eps_rate': eps_rate,
'eps_cot': eps_cot,
'ups': 0, # TODO
'aus_auth': aus_auth,
'gd_amohnt': False, # TODO
'mat_auth': mat_auth,
'arl_rate': arl_rate,
'work_center': work_center,
'arl_cot': arl_cot,
'ccf_rate': ccf_rate,
'ccf_cot': ccf_cot,
'sena_rate': sena_rate,
'sena_cot': sena_cot,
'icbf_rate': icbf_rate,
'icbf_cot': icbf_cot,
'esap_rate': 0, # TODO
'esap_cot': 0, # TODO
'men_rate': 0, # TODO
'men_cot': 0, # TODO
'exonerated': exonerated,
'arl_code': arl_code,
'arl_risk': arl_risk,
'k_start': k_start,
'k_end': k_end,
'vsp_start': vsp_start,
'sln_start': sln_start,
'sln_end': sln_end,
'ige_start': ige_start,
'ige_end': ige_end,
'lma_start': lma_start,
'lma_end': lma_end,
'vac_start': vac_start,
'vac_end': vac_end,
'vct_start': False, # TODO
'vct_end': False, # TODO
'atep_start': atep_start,
'atep_end': atep_end,
'other_ibc': other_ibc,
'w_hours': w_hours,
'wage_type':wage_type,
}
lines.append(data)
i += 1
bar = orm.progress_bar(i, j, bar, emp[0])
orm.direct_create(self._cr, self._uid, 'hr_contribution_form_line', lines)
self.error_log = error_log
@api.multi
def generate_pila(self):
total_text = ''
break_line = '\r\n'
# ----- HEADER ----- #
hl = [''] * (22 + 1)
# 1: Tipo de registro
hl[1] = '01'
# 2: Modalidad de la planilla
hl[2] = '1'
# 3: Secuencia # TODO Está generando el 0001 pero se debe validar que siempre sea el mismo
hl[3] = '0001'
# 4: Nombre o razon social del aportante
hl[4] = prep_field(self.env.user.company_id.partner_id.name, size=200)
# 5: Tipo de documento del aportante # TODO Asignado directamente tipo de documento NIT
hl[5] = 'NI'
# 6: Numero de identificacion del aportante
hl[6] = prep_field(self.env.user.company_id.partner_id.ref, size=16)
# 7: Digito de verificacion
hl[7] = str(self.env.user.company_id.partner_id.dev_ref)
# 8: Tipo de planilla
hl[8] = self.form_type
# 9: Numero de la planilla asociada a esta planilla # TODO revisar casos de planillas N y F
if self.form_type in ['E']:
hl[9] = prep_field(" ", size=10)
else:
raise Warning("Tipo de planilla no soportada temporalmente")
# 10: Fecha de planilla de pago asociada a esta planilla
if self.form_type not in ['N', 'F']:
hl[10] = prep_field(" ", size=10)
else:
raise Warning("Tipo de planilla no soportada temporalmente")
# 11: Forma de presentacion # TODO temporalmente forma de presentacion unica
hl[11] = prep_field(self.presentation, size=1)
# 12: Codigo de sucursal # TODO referente campo 11
hl[12] = prep_field(self.branch_code, size=10)
# 13: Nombre de la sucursal
hl[13] = prep_field(self.branch_code, size=40)
# 14: Código de la ARL a la cual el aportante se encuentra afiliado
hl[14] = prep_field(self.env.user.company_id.arl_id.codigo_arl, size=6)
# 15: Período de pago para los sistemas diferentes al de salud
hl[15] = prep_field(self.period_id.start_period[0:7], size=7)
# 16: Período de pago para el sistema de salud.
pay_ref_date = datetime.strptime(self.period_id.start_period, "%Y-%m-%d") + relativedelta(months=1)
pay_month = datetime.strftime(pay_ref_date, "%Y-%m")
hl[16] = prep_field(pay_month, size=7)
# 17: Número de radicación o de la Planilla Integrada de Liquidación de Aportes. (Asignado por el sistema)
hl[17] = prep_field(" ", size=10)
# 18: Fecha de pago (aaaa-mm-dd) (Asignado por el siustema)
hl[18] = prep_field(" ", size=10)
# 19: Numero total de empleados
emp_count_q = ("SELECT count(hc.employee_id) FROM pila_contract_rel rel "
"INNER JOIN hr_contract hc on hc.id = rel.contract_id "
"INNER JOIN hr_employee he on he.id = hc.employee_id "
"WHERE rel.pila_id = {pila} "
"GROUP by hc.employee_id".format(pila=self.id))
emp_count = orm.fetchall(self._cr, emp_count_q)
hl[19] = prep_field(len(emp_count), align='right', fill='0', size=5)
# 20: Valor total de la nomina
ibp_sum = sum([x.ccf_ibc for x in self.form_line_ids])
hl[20] = prep_field(int(ibp_sum), align='right', fill='0', size=12)
# 21: Tipo de aportante
hl[21] = prep_field("1", size=2)
# 22: Codigo de operador de informacion
hl[22] = prep_field(" ", size=2)
for x in hl:
total_text += x
total_text += break_line
# ----- BODY ----- #
i, j = 0, len(self.form_line_ids)
bar = orm.progress_bar(i, j)
seq = 0
for l in self.form_line_ids:
seq += 1
employee = l.employee_id
ref_type = employee.partner_id.ref_type.code
bl = [''] * (98 + 1)
# 1: Tipo de registro
bl[1] = '02'
# 2: Secuencia
bl[2] = prep_field(seq, align='right', fill='0', size=5)
# 3: Tipo de documento de cotizante
bl[3] = prep_field(ref_type, size=2)
# 4: Numero de identificacion cotizante
bl[4] = prep_field(employee.partner_id.ref, size=16)
# 5: Tipo de cotizante
bl[5] = prep_field(l.contract_id.fiscal_type_id.code if l.contract_id.fiscal_type_id.code != '51' else '01',
size=2)
# 6: Subtipo de cotizante
bl[6] = prep_field(l.contract_id.fiscal_subtype_id.code or '00', size=2)
# 7: Extranjero no obligado a cotizar pensiones
foreign = False
# foreign = employee.partner_id.country_id.code != 'CO' and ref_type in ('CE', 'PA', 'CD')
bl[7] = 'X' if foreign else ' '
# 8: Colombiano en el exterior
is_col = True if ref_type in ('CC', 'TI') and employee.partner_id.country_id.code == 'CO' else False
in_ext = False
if l.contract_id.cuidad_desempeno:
in_ext = True if l.contract_id.cuidad_desempeno.provincia_id.country_id.code != 'CO' else False
bl[8] = 'X' if is_col and in_ext else ' '
# 9: Código del departamento de la ubicación laboral
bl[9] = prep_field(l.contract_id.cuidad_desempeno.provincia_id.code, size=2)
# 10: Código del municipio de ubicación laboral
bl[10] = prep_field(l.contract_id.cuidad_desempeno.code, size=3)
# 11: Primer apellido
if employee.partner_id.primer_apellido:
pap = strip_accents(employee.partner_id.primer_apellido.upper()).replace(".", "")
bl[11] = prep_field(pap, size=20)
else:
bl[11] = prep_field(' ', size=20)
# 12: Segundo apellido
if employee.partner_id.segundo_apellido:
sap = strip_accents(employee.partner_id.segundo_apellido.upper()).replace(".", "")
bl[12] = prep_field(sap, size=30)
else:
bl[12] = prep_field(' ', size=30)
# 13: Primer nombre
if employee.partner_id.primer_nombre:
pno = strip_accents(employee.partner_id.primer_nombre.upper()).replace(".", "")
bl[13] = prep_field(pno, size=20)
else:
bl[13] = prep_field(' ', size=20)
# 14: Segundo nombre
if employee.partner_id.otros_nombres:
sno = strip_accents(employee.partner_id.otros_nombres.upper()).replace(".", "")
bl[14] = prep_field(sno, size=30)
else:
bl[14] = prep_field(' ', size=30)
# 15: Ingreso
bl[15] = 'X' if l.ing else ' '
# 16: Retiro
bl[16] = 'X' if l.ret else ' '
# 17: Traslasdo desde otra eps
bl[17] = 'X' if l.tde else ' '
# 18: Traslasdo a otra eps
bl[18] = 'X' if l.tae else ' '
# 19: Traslasdo desde otra administradora de pensiones
bl[19] = 'X' if l.tdp else ' '
# 20: Traslasdo a otra administradora de pensiones
bl[20] = 'X' if l.tap else ' '
# 21: Variacion permanente del salario
bl[21] = 'X' if l.vsp else ' '
# 22: Correcciones
bl[22] = 'X' if l.fixes else ' '
# 23: Variacion transitoria del salario
bl[23] = 'X' if l.vst else ' '
# 24: Suspension temporal del contrato
bl[24] = 'X' if l.sln else ' '
# 25: Incapacidad temporal por enfermedad general
bl[25] = 'X' if l.ige else ' '
# 26: Licencia de maternidad o paternidad
bl[26] = 'X' if l.lma else ' '
# 27: Vacaciones, licencia remunerada
bl[27] = l.vac if l.vac else ' '
# 28: Aporte voluntario
bl[28] = 'X' if l.avp else ' '
# 29: Variacion de centro de trabajo
bl[29] = 'X' if l.vct else ' '
# 30: Dias de incapacidad por enfermedad laboral
bl[30] = prep_field("{:02.0f}".format(l.irl), align='right', fill='0', size=2)
# 31: Codigo de la administradora de fondos de pensiones
bl[31] = prep_field(l.afp_code, size=6)
# 32: Codigo de administradora de pensiones a la cual se traslada el afiliado #TODO
bl[32] = prep_field(l.afp_to_code, size=6)
# 33: Codigo de EPS a la cual pertenece el afiliado
bl[33] = prep_field(l.eps_code, size=6)
# 34: Codigo de eps a la cual se traslada el afiliado
bl[34] = prep_field(l.eps_to_code, size=6)
# 35: Código CCF a la cual pertenece el afiliado
bl[35] = prep_field(l.ccf_code, size=6)
# 36: Numero de dias cotizados a pension
bl[36] = prep_field("{:02.0f}".format(l.pens_days), align='right', fill='0', size=2)
# 37: Numero de dias cotizados a salud
bl[37] = prep_field("{:02.0f}".format(l.eps_days), align='right', fill='0', size=2)
# 38: Numero de dias cotizados a ARL
bl[38] = prep_field("{:02.0f}".format(l.arl_days), align='right', fill='0', size=2)
# 39: Numero de dias cotizados a CCF
bl[39] = prep_field("{:02.0f}".format(l.ccf_days), align='right', fill='0', size=2)
# 40: Salario basico
bl[40] = prep_field("{:09.0f}".format(l.wage), align='right', fill='0', size=9)
# 41: Salario integral, resolucion 454
bl[41] = l.wage_type
# 42: IBC pension
bl[42] = prep_field("{:09.0f}".format(l.pens_ibc), align='right', fill='0', size=9)
# 43: IBC salud
bl[43] = prep_field("{:09.0f}".format(l.eps_ibc), align='right', fill='0', size=9)
# 44: IBC arl
bl[44] = prep_field("{:09.0f}".format(l.arl_ibc), align='right', fill='0', size=9)
# 45: IBC CCF
bl[45] = prep_field("{:09.0f}".format(l.ccf_ibc), align='right', fill='0', size=9)
# 46: Tarifa de aporte a pensiones
bl[46] = prep_field("{:01.5f}".format(l.pens_rate), align='right', fill='0', size=7)
# 47: Cotizacion pension
bl[47] = prep_field("{:09.0f}".format(l.pens_cot), align='right', fill='0', size=9)
# 48: Aportes voluntarios del afiliado
bl[48] = prep_field("{:09.0f}".format(l.ap_vol_contributor), align='right', fill='0', size=9)
# 49: Aportes voluntarios del aportante
bl[49] = prep_field("{:09.0f}".format(l.ap_vol_company), align='right', fill='0', size=9)
# 50: Total cotizacion pensiones
bl[50] = prep_field("{:09.0f}".format(l.pens_total), align='right', fill='0', size=9)
# 51: Aportes a fondo solidaridad
bl[51] = prep_field("{:09.0f}".format(l.fsol), align='right', fill='0', size=9)
# 52: Aportes a fondo subsistencia
bl[52] = prep_field("{:09.0f}".format(l.fsub), align='right', fill='0', size=9)
# 53: Valor no retenido por aportes voluntarios
bl[53] = prep_field("{:09.0f}".format(l.ret_cont_vol), align='right', fill='0', size=9)
# 54: Tarifa de aportes salud
bl[54] = prep_field("{:01.5f}".format(l.eps_rate), align='right', fill='0', size=7)
# 55: Aportes salud
bl[55] = prep_field("{:09.0f}".format(l.eps_cot), align='right', fill='0', size=9)
# 56: Total UPS adicional
bl[56] = prep_field("{:09.0f}".format(l.ups), align='right', fill='0', size=9)
# 57: Numero de autorizacion de incapacidad
bl[57] = prep_field(l.aus_auth, size=15)
# 58: Valor de la incapacidad por enf general
bl[58] = prep_field("{:09.0f}".format(l.gd_amount), align='right', fill='0', size=9)
# 59: Numero de autorizacion por licencia de maternidad
bl[59] = prep_field(l.mat_auth, size=15)
# 60: Valor de licencia de maternidad
bl[60] = prep_field("{:09.0f}".format(l.mat_amount), align='right', fill='0', size=9)
# 61: Tarifa de aportes a riesgos laborales
bl[61] = prep_field("{:01.5f}".format(l.arl_rate), align='right', fill='0', size=9)
# 62: Centro de trabajo
bl[62] = prep_field(l.work_center, align='right', fill='0', size=9)
# 63: Cotizacion obligatoria a riesgos laborales
bl[63] = prep_field("{:09.0f}".format(l.arl_cot), align='right', fill='0', size=9)
# 64: Tarifa de aportes CCF
bl[64] = prep_field("{:01.5f}".format(l.ccf_rate), align='right', fill='0', size=7)
# 65: Aportes CCF
bl[65] = prep_field("{:09.0f}".format(l.ccf_cot), align='right', fill='0', size=9)
# 66: Tarifa SENA
bl[66] = prep_field("{:01.5f}".format(l.sena_rate), align='right', fill='0', size=7)
# 67: Aportes SENA
bl[67] = prep_field("{:09.0f}".format(l.sena_cot), align='right', fill='0', size=9)
# 68: Tarifa ICBF
bl[68] = prep_field("{:01.5f}".format(l.icbf_rate), align='right', fill='0', size=7)
# 69: Aportes ICBF
bl[69] = prep_field("{:09.0f}".format(l.icbf_cot), align='right', fill='0', size=9)
# 70: Tarifa ESAP
bl[70] = prep_field("{:01.5f}".format(l.esap_rate), align='right', fill='0', size=7)
# 71: Aportes ESAP
bl[71] = prep_field("{:09.0f}".format(l.esap_cot), align='right', fill='0', size=9)
# 72: Tarifa MEN
bl[72] = prep_field("{:01.5f}".format(l.men_rate), align='right', fill='0', size=7)
# 73: Aportes MEN
bl[73] = prep_field("{:09.0f}".format(l.men_cot), align='right', fill='0', size=9)
# 74: Tipo de documento del cotizante principal
bl[74] = prep_field(' ', size=2)
# 75: Numero de documento de cotizante principal
bl[75] = prep_field(' ', size=16)
# 76: Exonerado de aportes a paraficales y salud
bl[76] = 'S' if l.exonerated else 'N'
# 77: Codigo de la administradora de riesgos laborales
bl[77] = prep_field(l.arl_code, size=6)
# 78: Clase de riesgo en la cual se encuentra el afiliado
bl[78] = prep_field(l.arl_risk, size=1)
# 79: Indicador de tarifa especial de pensiones
bl[79] = prep_field(' ', size=1)
# 80: Fecha de ingreso
bl[80] = prep_field(l.k_start, size=10)
# 81: Fecha de retiro
bl[81] = prep_field(l.k_end, size=10)
# 82: Fecha de inicio de VSP
bl[82] = prep_field(l.vsp_start, size=10)
# 83: Fecha de inicio SLN
bl[83] = prep_field(l.sln_start, size=10)
# 84: Fecha de fin SLN
bl[84] = prep_field(l.sln_end, size=10)
# 85: Fecha de inicio IGE
bl[85] = prep_field(l.ige_start, size=10)
# 86: Fecha de fin IGE
bl[86] = prep_field(l.ige_end, size=10)
# 87: Fecha de inicio LMA
bl[87] = prep_field(l.lma_start, size=10)
# 88: Fecha de fin LMA
bl[88] = prep_field(l.lma_end, size=10)
# 89: Fecha de inicio VAC
bl[89] = prep_field(l.vac_start, size=10)
# 90: Fecha de fin VAC
bl[90] = prep_field(l.vac_end, size=10)
bl[91] = prep_field(l.vct_start, size=10)
bl[92] = prep_field(l.vct_end, size=10)
# 93: Fecha de inicio ATEP
bl[93] = prep_field(l.atep_start, size=10)
# 94: Fecha de fin ATEP
bl[94] = prep_field(l.atep_end, size=10)
# 95: IBC otros parafiscales
bl[95] = prep_field("{:09.0f}".format(l.other_ibc), align='right', fill='0', size=9)
# 96: Numero de horas laboradas
bl[96] = prep_field("{:03.0f}".format(l.w_hours), align='right', fill='0', size=3)
bl[97] = prep_field('', size=10)
i += 1
bar = orm.progress_bar(i, j, bar)
for x in bl:
total_text += x
total_text += break_line
# decode and generate txt
final_content = strip_accents(total_text.encode('utf-8', 'replace').decode('utf-8'))
file_text = base64.b64encode(final_content)
self.write({'file': file_text})
return total_text
@api.multi
def load_contract(self):
self._cr.execute("DELETE FROM pila_contract_rel where pila_id = %s" % self.id)
if self.group_id:
groupwh = " AND hc.group_id = {group} ".format(group=self.group_id.id)
else:
groupwh = " "
active = """
SELECT hc.id FROM hr_contract hc
INNER JOIN hr_payslip hp ON hp.contract_id = hc.id
WHERE hp.liquidacion_date BETWEEN '{date_from}' AND '{date_to}'
{groupwh}
and hc.id not in (
select contract_id
from pila_contract_rel
where pila_id in (select id from hr_contribution_form where period_id = {periodo}) )
GROUP BY hc.id""".format(date_from=self.period_id.start_period,
date_to=self.period_id.end_period,
groupwh=groupwh,
periodo=self.period_id.id)
ca = [x[0] for x in orm.fetchall(self._cr, active)]
for contract in ca:
self._cr.execute("INSERT into pila_contract_rel (pila_id, contract_id) VALUES ({pila}, {contract})".format(
pila=self.id, contract=contract))
return True
@api.multi
def load_pending(self):
self._cr.execute("DELETE FROM pila_contract_rel where pila_id = %s" % self.id)
if self.group_id:
groupwh = " AND hc.group_id = {group} ".format(group=self.group_id.id)
else:
groupwh = " "
calculated = ("SELECT hcfl.contract_id from hr_contribution_form_line hcfl "
"LEFT JOIN hr_contribution_form hcf on hcf.id = hcfl.contribution_id "
"WHERE hcf.period_id = {period} "
"group by hcfl.contract_id".format(period=self.period_id.id))
clc = tuple([x[0] for x in orm.fetchall(self._cr, calculated)] + [0])
active = ("SELECT hc.id FROM hr_contract hc "
"INNER JOIN hr_payslip hp ON hp.contract_id = hc.id "
"WHERE hp.liquidacion_date BETWEEN '{date_from}' AND '{date_to}' "
"AND hc.id not in {clc} "
"{groupwh} GROUP BY hc.id".format(date_from=self.period_id.start_period,
date_to=self.period_id.end_period,
clc=clc,
groupwh=groupwh))
ca = [x[0] for x in orm.fetchall(self._cr, active)]
for contract in ca:
self._cr.execute("INSERT into pila_contract_rel (pila_id, contract_id) VALUES ({pila}, {contract})".format(
pila=self.id, contract=contract))
return True
@api.multi
def get_acc_type(self, contract_id):
kt = contract_id.type_id
acc = kt.type_class + "_" + kt.section[0:3]
return acc
@api.multi
def draft_contform(self):
self.state = 'draft'
if self.move_id:
account_move_line_sel ="""
select id from account_move_line where move_id = {asiento}
""".format(asiento=self.move_id.id)
account_move_line = [x[0] for x in orm.fetchall(self._cr, account_move_line_sel)]
if account_move_line:
account_move_line_tuple = tuple(account_move_line if len(account_move_line) > 1 else [account_move_line[0],0])
analytic_lines_sel = """
select id from account_analytic_line where move_id in {moves}
""".format(moves=account_move_line_tuple)
analytic_lines = [x[0] for x in orm.fetchall(self._cr, analytic_lines_sel)]
if analytic_lines:
orm.fast_delete(self._cr, 'account_analytic_line', ('id', analytic_lines))
orm.fast_delete(self._cr, 'account_move_line', ('id', account_move_line))
orm.fast_delete(self._cr, 'account_move', ('id', self.move_id.id))
self._cr.execute('update hr_contribution_form set move_id = null where id = {pila}'.format(pila=self.id))
@api.multi
def close_contform(self):
liquid_date = self.period_id.end_period
start_date = self.period_id.start_period
start_date_tmp = datetime.strftime(
datetime.strptime(start_date, "%Y-%m-%d") - relativedelta(months=1),
"%Y-%m-%d")
account_period = self.env['account.period'].find(liquid_date)[0]
po = self.env['hr.payslip']
smmlv = self.env['variables.economicas'].getValue('SMMLV', liquid_date) or 0.0
if not self.move_id_name:
journal_seq = self.journal_id.sequence_id
name = self.env['ir.sequence'].next_by_id(journal_seq.id)
self.move_id_name = name
else:
name = self.move_id_name
move_data = {
'narration': "APORTES {p}".format(p=self.period_id.name),
'date': liquid_date,
'name': name,
'ref': self.name,
'journal_id': self.journal_id.id,
'period_id': account_period.id,
'partner_id': self.env.user.company_id.partner_id.id,
'state': 'posted'
}
move_id = orm.direct_create(self._cr, self._uid, 'account_move', [move_data], company=True)[0][0]
self.move_id = self.env['account.move'].browse(move_id)
p_query = ("SELECT contract_id, "
"sum(pens_total) + sum(fsol) + sum(fsub) - sum(ap_vol_contributor) - sum(ap_vol_company) as pens, sum(eps_cot) as eps, sum(arl_cot) as arl, "
"sum(ccf_cot) as ccf, sum(sena_cot) as sena, sum(icbf_cot) as icbf "
"from hr_contribution_form_line "
"WHERE contribution_id = {cont} "
"GROUP BY contract_id".format(cont=self.id))
hcfl = orm.fetchall(self._cr, p_query)
ap_template = {
'reg_adm_debit': False, 'reg_com_debit': False, 'reg_ope_debit': False,
'int_adm_debit': False, 'int_com_debit': False, 'int_ope_debit': False,
'apr_adm_debit': False, 'apr_com_debit': False, 'apr_ope_debit': False,
'reg_adm_credit': False, 'reg_com_credit': False, 'reg_ope_credit': False,
'int_adm_credit': False, 'int_com_credit': False, 'int_ope_credit': False,
'apr_adm_credit': False, 'apr_com_credit': False, 'apr_ope_credit': False,
'partner_type': False,
}
ap_concepts = {'AP_PENS': ap_template.copy(),
'AP_EPS': ap_template.copy(),
'AP_ARL': ap_template.copy(),
'AP_CCF': ap_template.copy(),
'AP_SENA': ap_template.copy(),
'AP_ICBF': ap_template.copy()
}
for apc in ap_concepts:
concept_id = self.env['hr.concept'].search([('code', '=', apc)])
if not concept_id:
raise Warning("No se ha encontrado el concepto {c} necesario para "
"la consulta de cuentas para la causacion de aportes".format(c=apc))
for acc in ap_concepts[apc]:
ap_concepts[apc][acc] = getattr(concept_id, '{a}'.format(a=acc))
ap_concepts[apc]['concept_id'] = concept_id
aml_data = []
for kdata in hcfl:
index = 1
contract_id = self.env['hr.contract'].browse(kdata[0])
aa_id = contract_id.analytic_account_id
employee_id = contract_id.employee_id
for apc in ap_concepts:
partner_type = ap_concepts[apc]['partner_type']
if partner_type == 'eps':
c_partner = contract_id.eps
elif partner_type == 'arl':
c_partner = contract_id.arl
elif partner_type == 'caja':
c_partner = contract_id.cajacomp
elif partner_type == 'cesantias':
c_partner = contract_id.cesantias
elif partner_type == 'pensiones':
c_partner = contract_id.pensiones
elif partner_type == 'other':
c_partner = ap_concepts[apc]['concept_id'].partner_other
else:
c_partner = employee_id.partner_id
apc_amount = kdata[index]
acc_type = self.get_acc_type(contract_id)
debit_account = ap_concepts[apc][acc_type+'_debit']
credit_account = ap_concepts[apc][acc_type+'_credit']
pyg = [4, 5, 6, 7, 8]
tot_ded = 0
if index == 1: # PENSION
ded_pens = po.get_interval_concept('DED_PENS', start_date, liquid_date, contract_id.id)
fsol = po.get_interval_concept('FOND_SOL', start_date, liquid_date, contract_id.id)
fsub = po.get_interval_concept('FOND_SUB', start_date, liquid_date, contract_id.id)
tot_pens = ded_pens + fsol + fsub
tot_ded = sum([x[1] for x in tot_pens])
elif index == 2: # EPS
ded_eps = po.get_interval_concept('DED_EPS', start_date, liquid_date, contract_id.id)
tot_ded = sum([x[1] for x in ded_eps])
if apc_amount and apc_amount - rp(tot_ded) > 0:
global_ibc = orm.fetchall(self._cr, "select global_ibc from hr_contribution_form_line where contract_id = {contract} and contribution_id = {contribution} limit 1".format(
contract=contract_id.id, contribution=self.id))
if not global_ibc:
raise Warning("Como putas el contrato {contract} en esta PILA no tiene ibc global ????? Sea serio calcule primero y luego cause".format(contract=contract_id.name))
if not (global_ibc[0][0] >= 10 * smmlv or contract_id.type_id.type_class == 'int' or contract_id.fiscal_type_id.code in ('12', '19')):
ded_eps = po.get_interval_concept('DED_EPS', start_date_tmp, liquid_date, contract_id.id)
tot_ded_previos = sum([rp(x[1]) for x in ded_eps])
ap_previos = """
select sum(HCFL.eps_cot) from hr_contribution_form_line as HCFL
inner join hr_contribution_form as HCF on HCF.id = HCFL.contribution_id
inner join payslip_period as PP on PP.id = HCF.period_id
where HCFL.contract_id = {contract}
and (HCF.state = 'closed' or HCF.id = {HCF_id})
and PP.start_period >= '{sp}' and PP.end_period <= '{ep}'
""".format(contract=contract_id.id, HCF_id=self.id,sp=start_date_tmp, ep=liquid_date)
ap_previos = sum([rp(x[0]) for x in orm.fetchall(self._cr, ap_previos) if x[0]])
if tot_ded_previos == ap_previos:
apc_amount, tot_ded = tot_ded_previos, tot_ded_previos
else:
apc_amount, tot_ded = ap_previos, tot_ded_previos
amount = apc_amount - tot_ded
if amount > 0:
# DEBIT - GASTOS
if not debit_account:
raise Warning(u"No se ha definido una cuenta debito para el "
u"concepto {c}".format(c=ap_concepts[apc]['concept_id'].name))
aml_data.append({
'name': ap_concepts[apc]['concept_id'].name,
'ref1': ap_concepts[apc]['concept_id'].code,
'date': liquid_date,
'ref': employee_id.partner_id.ref,
'partner_id': c_partner.id,
'account_id': debit_account.id,
'journal_id': self.journal_id.id,
'period_id': account_period.id,
'debit': amount,
'credit': 0,
'analytic_account_id': aa_id.id if debit_account.code[0] in pyg else False,
'tax_code_id': False,
'tax_amount': 0,
'move_id': self.move_id.id,
'state': 'valid',
'date_maturity': liquid_date,
'contract_id': contract_id.id
})
# CREDIT CxC 23
if not credit_account:
raise Warning(u"No se ha definido una cuenta credito para el "
u"concepto {c}".format(c=ap_concepts[apc]['concept_id'].name))
aml_data.append({
'name': ap_concepts[apc]['concept_id'].name,
'ref1': ap_concepts[apc]['concept_id'].code,
'date': liquid_date,
'ref': employee_id.partner_id.ref,
'partner_id': c_partner.id,
'account_id': credit_account.id,
'journal_id': self.journal_id.id,
'period_id': account_period.id,
'debit': 0,
'credit': amount,
'analytic_account_id': aa_id.id if credit_account.code[0] in pyg else False,
'tax_code_id': False,
'tax_amount': 0,
'move_id': self.move_id.id,
'state': 'valid',
'date_maturity': liquid_date,
'contract_id': contract_id.id
})
index += 1
orm.direct_create(self._cr, self._uid, 'account_move_line', aml_data, company=True, progress=True)
self.state = 'closed'
self.create_distribition_analytic(self.move_id.id)
return
def create_distribition_analytic(self, move_id):
move_line_ids = self.env['account.move.line'].search([('move_id','=',move_id)])
is_hr_roster = orm.fetchall(self._cr,"select id from ir_module_module where state = 'installed' and name = 'hr_roster'")
is_analytic_cvs = orm.fetchall(self._cr,"select id from ir_module_module where state = 'installed' and name = 'account_analytic_cvs'")
distribucion_analitica = self.env['hr.roster.close.distribution'] if is_hr_roster else False
partner_aaa = orm.fetchall(self._cr, "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'account_analytic_line' and column_name = 'partner_aaa'")
analytic_lines_data = []
for move_line in move_line_ids:
if int(move_line.account_id.code[0]) <= 3 and not self.env.user.company_id.config_analytic_global:
continue
if not move_line.contract_id:
raise Warning("El movimieto < {m} > no tiene un contrato asociado".format(m=move_line.name))
contrato = move_line.contract_id
if not contrato.employee_id:
raise Warning("El contrato < {c} > no tiene un empleado asociado".format(c=move_line.contract_id.name))
employee_id = contrato.employee_id
if distribucion_analitica:
distri_employee = distribucion_analitica.search([('employee_id','=',employee_id.id), ('date', '>=', move_line.period_id.date_start),('date', '<=', move_line.period_id.date_stop)])
else:
distri_employee = [] # se deja [] por el for que itera distri_employee
if not distri_employee:
if not contrato.analytic_account_id:
raise Warning("El contrato < {c} > no tiene una cuenta analitica asociada".format(c=contrato.name))
self._cr.execute('update account_move_line set analytic_account_id = {AA} where id = {AML}'.format(
AA=contrato.analytic_account_id.id, AML=move_line.id))
analytic_line = {
'name': move_line.name,
'account_id': contrato.analytic_account_id.id,
'journal_id': move_line.journal_id.analytic_journal_id.id,
'user_id': self._uid,
'date': move_line.date,
'ref': move_line.ref,
'amount': (move_line.credit - move_line.debit),
'general_account_id': move_line.account_id.id,
'move_id': move_line.id,
'cc1': contrato.analytic_account_id.cc1 if not is_analytic_cvs else contrato.analytic_account_id.regional_id.name,
'cc2': contrato.analytic_account_id.cc2 if not is_analytic_cvs else contrato.analytic_account_id.city_id.name,
'cc3': contrato.analytic_account_id.cc3 if not is_analytic_cvs else contrato.analytic_account_id.linea_servicio_id.name,
'cc4': contrato.analytic_account_id.cc4 if not is_analytic_cvs else contrato.analytic_account_id.sede,
'cc5': contrato.analytic_account_id.cc5 if not is_analytic_cvs else contrato.analytic_account_id.puesto,
}
if partner_aaa:
analytic_line['partner_aaa'] = contrato.analytic_account_id.partner_id.id
analytic_lines_data.append(analytic_line)
for dis_emp in distri_employee:
analytic_line = {
'name': move_line.name,
'account_id': dis_emp.analytic_account_id.id,
'journal_id': move_line.journal_id.analytic_journal_id.id,
'user_id': self._uid,
'date': move_line.date,
'ref': move_line.ref,
'amount': (move_line.credit - move_line.debit)*dis_emp.rate/100,
'general_account_id': move_line.account_id.id,
'move_id': move_line.id,
'cc1': dis_emp.analytic_account_id.cc1 if not is_analytic_cvs else dis_emp.analytic_account_id.regional_id.name,
'cc2': dis_emp.analytic_account_id.cc2 if not is_analytic_cvs else dis_emp.analytic_account_id.city_id.name,
'cc3': dis_emp.analytic_account_id.cc3 if not is_analytic_cvs else dis_emp.analytic_account_id.linea_servicio_id.name,
'cc4': dis_emp.analytic_account_id.cc4 if not is_analytic_cvs else dis_emp.analytic_account_id.sede,
'cc5': dis_emp.analytic_account_id.cc5 if not is_analytic_cvs else dis_emp.analytic_account_id.puesto,
}
if partner_aaa:
analytic_line['partner_aaa'] = dis_emp.analytic_account_id.partner_id.id
analytic_lines_data.append(analytic_line)
orm.direct_create(self._cr, self._uid, 'account_analytic_line', analytic_lines_data, company=True)
return True
def get_contract_repeated(self):
if self.contract_ids.ids:
contracts_ids = tuple( self.contract_ids.ids if len(self.contract_ids.ids) > 1 else [self.contract_ids.ids[0],0])
contracts_ids = 'and contract_id in ' + str(contracts_ids)
else:
contracts_ids = ""
get_contract_repeated_sel = """
select name
from hr_contract
where id in (
select contract_id from pila_contract_rel where pila_id in (select id from hr_contribution_form where period_id = {periodo})
{contracts}
group by contract_id
having count(pila_id) > 1) """.format(periodo=self.period_id.id, pila=self.id, contracts=contracts_ids)
contract_repeated = [str(x[0]) for x in orm.fetchall(self._cr, get_contract_repeated_sel)]
if contract_repeated:
raise Warning ('Error, hay contratos que estan en varias autoliquidaciones en el mismo periodo, por favor validar los siguientes nombres de contratos: {ids}'.format(ids=contract_repeated))
| odoopruebasmp/Odoo_08 | v8_llevatelo/hr_payroll_extended/models/hr_contribution_form.py | hr_contribution_form.py | py | 82,426 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.today",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "calendar.monthrange",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "unicode... |
42339522829 | from gtts import gTTS
import speech_recognition as sr
import os
import time
import webbrowser
r = sr.Recognizer()
order = "what can i do for you?"
tts = gTTS(order)
tts.save("order.mp3")
os.startfile(r"C:\Users\Yodi\PycharmProjects\try\order.mp3")
time.sleep(3)
arr = [["Paradise", "someone"], ["kZ2xL_Nuzag", "XsHWQdriEO8"]]
with sr.Microphone() as source:
print(order)
audio = r.listen(source)
try:
text = r.recognize_google(audio)
print("command: {}".format(text))
if any(word in "play music" for word in text):
print("Music Option is Activated")
songorder = "which coldplay song should i play?"
tts = gTTS(songorder)
tts.save("indonesia.mp3")
os.startfile(r"C:\Users\Yodi\PycharmProjects\try\indonesia.mp3")
time.sleep(3)
try:
print("mention the song")
audio = r.listen(source)
song = r.recognize_google(audio)
print("Song Selected: {}".format(song))
for i in range(len(arr)):
if song.lower() in arr[0][i]:
song_url = arr[1][i]
base_url = "https://www.youtube.com/watch?v="
main_url = base_url + song_url
webbrowser.open(main_url, new=2)
time.sleep(3)
print("music option exit")
except:
print("sorry could not hear your request")
except:
print("sorry could not hear from you")
| yodifm/SpeechRecognition | SR/main.py | main.py | py | 1,657 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "speech_recognition.Recognizer",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "gtts.gTTS",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.startfile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
24213199857 | import pygame
import serial
import time
import sys
class GUI:
def __init__(self):
# screen
pygame.init()
self.screen = pygame.display.set_mode((350, 400))
self.DELTA = 40
self.wide_value = 7500
self.angle_y = 3890
self.angle_x = 1230
self.ser = serial.Serial('COM6', baudrate=9600, timeout=0.01)
time.sleep(1)
# creating buttons`
button_up_surface = pygame.transform.scale(pygame.image.load("up.jpg"), (50, 50))
button_down_surface = pygame.transform.scale(pygame.image.load("down.jpg"), (50, 50))
button_right_surface = pygame.transform.scale(pygame.image.load("right.png"), (50, 50))
button_left_surface = pygame.transform.scale(pygame.image.load("left.jpg"), (50, 50))
button_wide_surface = pygame.transform.scale(pygame.image.load("wide.jpg"), (100, 50))
button_narrow_surface = pygame.transform.scale(pygame.image.load("narrow.png"), (100, 50))
self.button_up = Button(button_up_surface, 175, 50)
self.button_down = Button(button_down_surface, 175, 150)
self.button_right = Button(button_right_surface, 250, 100)
self.button_left = Button(button_left_surface, 100, 100)
self.button_wide = Button(button_wide_surface, 175, 225)
self.button_narrow = Button(button_narrow_surface, 175, 280)
def listen(self):
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if self.button_down.checking_input(pygame.mouse.get_pos()):
self.angle_y -= self.DELTA if self.angle_y >= 3880 else 0
self.ser.write((str(self.angle_y) + '\n').encode('utf-8'))
elif self.button_up.checking_input(pygame.mouse.get_pos()):
self.angle_y += self.DELTA if self.angle_y <= 4110 else 0
self.ser.write((str(self.angle_y) + '\n').encode('utf-8'))
elif self.button_right.checking_input(pygame.mouse.get_pos()):
self.angle_x -= self.DELTA if self.angle_x >= 1230 else 0
self.ser.write((str(self.angle_x) + '\n').encode('utf-8'))
elif self.button_left.checking_input(pygame.mouse.get_pos()):
self.angle_x += self.DELTA if self.angle_x <= 2230 else 0
self.ser.write((str(self.angle_x) + '\n').encode('utf-8'))
elif self.button_wide.checking_input(pygame.mouse.get_pos()):
# wider, from 1500 to 1750
self.ser.write((str(7750) + '\n').encode('utf-8'))
time.sleep(0.2)
self.ser.write((str(7500) + '\n').encode('utf-8'))
elif self.button_narrow.checking_input(pygame.mouse.get_pos()):
# wider, from 1280 to 1500
self.ser.write((str(7300) + '\n').encode('utf-8'))
time.sleep(0.2)
self.ser.write((str(7500) + '\n').encode('utf-8'))
self.screen.fill("white")
# updating buttons
self.screen.fill("white")
self.button_up.update(self.screen)
self.button_down.update(self.screen)
self.button_right.update(self.screen)
self.button_left.update(self.screen)
self.button_wide.update(self.screen)
self.button_narrow.update(self.screen)
pygame.display.update()
def close_port(self):
self.ser.close()
class Button:
def __init__(self, image, x_pos, y_pos):
self.image = image
self.x_pos = x_pos
self.y_pos = y_pos
self.rect = self.image.get_rect(center=(self.x_pos, self.y_pos))
def update(self, screen):
screen.blit(self.image, self.rect)
def checking_input(self, position):
if position[0] in range(self.rect.left, self.rect.right) and position[1] in range(self.rect.top,
self.rect.bottom):
return True
class Controller:
def __init__(self):
# controller
pygame.init()
pygame.joystick.init()
self.controller = pygame.joystick.Joystick(0)
self.DELTA = 35
self.angle_y = 3880
self.angle_x = 1230
self.controller.init()
self.ser = serial.Serial('COM6', baudrate=9600, timeout=0.01)
time.sleep(1)
def listen(self):
while True:
pygame.event.get()
if self.controller.get_button(1):
# right
self.angle_x -= self.DELTA if self.angle_x >= 1230 else 0
self.ser.write((str(self.angle_x) + '\n').encode('utf-8'))
time.sleep(0.1)
elif self.controller.get_button(2):
# left
self.angle_x += self.DELTA if self.angle_x <= 2230 else 0
self.ser.write((str(self.angle_x) + '\n').encode('utf-8'))
time.sleep(0.1)
elif self.controller.get_button(3):
# up
self.angle_y += self.DELTA if self.angle_y <= 4110 else 0
self.ser.write((str(self.angle_y) + '\n').encode('utf-8'))
time.sleep(0.1)
elif self.controller.get_button(0):
self.angle_y -= self.DELTA if self.angle_y >= 3880 else 0
self.ser.write((str(self.angle_y) + '\n').encode('utf-8'))
time.sleep(0.1)
elif round(self.controller.get_axis(5), 2) != -1.0:
# wider
axis_data = round(self.controller.get_axis(5), 2)
new_value = int(self.map_data(axis_data, -1.0, 1.0, 7500, 7750))
self.ser.write((str(new_value) + '\n').encode('utf-8'))
time.sleep(0.1)
elif round(self.controller.get_axis(4), 2) != -1.0:
# narrower
axis_data = round(self.controller.get_axis(4), 2)
new_value = int(self.map_data(axis_data, -1.0, 1.0, 7500, 7280))
self.ser.write((str(new_value) + '\n').encode('utf-8'))
time.sleep(0.1)
def close_port(self):
self.ser.close()
def map_data(self, x, in_min, in_max, out_min, out_max):
# converting one interval to another
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
if __name__ == "__main__":
try:
try:
ps4 = Controller()
ps4.listen()
except KeyboardInterrupt:
ps4.close_port()
except pygame.error:
try:
gui = GUI()
gui.listen()
except KeyboardInterrupt:
gui.close_port()
| chinchilla2019/bebentos | main_and_pictures/main.py | main.py | py | 7,165 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "serial.Serial... |
3666745897 | from flask import Flask, request
from flask_cors import CORS
from pingback import ping_urls
app = Flask(__name__)
cors = CORS(app, resources=r'/pingback/', methods=['POST'])
@app.route('/pingback/', methods=['GET', 'POST'])
def api():
if request.method == 'POST':
try:
if request.is_json:
return handle_request(request.get_json())
else:
return {"error": "Request not json"}, 400
except ValueError:
return {"error": "Bad Request"}, 400
else:
return {"error": "method don't support."}, 405
def handle_request(object):
source_url = object["source_url"]
target_url_list = object["target_url_list"]
if isinstance(source_url, str) and isinstance(target_url_list, list):
return ping_urls(source_url, target_url_list)
else:
raise ValueError
if __name__ == '__main__':
app.run(debug=True)
| yingziwu/pingback | api.py | api.py | py | 924 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
... |
29979204168 | import gensim
from gensim.models import Word2Vec
import gradio as gr
# Load your trained Word2Vec model
model = Word2Vec.load("word2vecsg2.model")
def recommend_ingredients(*ingredients):
# Filter out any None values from the ingredients
ingredients = [i for i in ingredients if i]
# Get most similar ingredients
similar_ingredients = model.wv.most_similar(positive=ingredients, topn=8)
# Format the output
output = "\n".join([f"{ingredient}: %{round(similarity*100, 2)}" for ingredient, similarity in similar_ingredients])
return output
# Get the vocabulary of the model and sort it alphabetically
vocab = sorted(model.wv.index_to_key)
# Allow user to select multiple ingredients
ingredient_selections = [gr.inputs.Dropdown(choices=vocab, label=f"Ingredients {i+1}") for i in range(6)]
# Create the interface
iface = gr.Interface(
fn=recommend_ingredients,
inputs=ingredient_selections,
outputs="text",
title="Ingredient Recommender",
description="Select up to 6 ingredients to get recommendations for similar ingredients.",
layout="vertical"
)
iface.launch()
| egecandrsn/FoodPair.v0 | app.py | app.py | py | 1,134 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gensim.models.Word2Vec.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "gradio.inputs.Dropdown",
"line_number": 24,
"usage_type": "call"
},
{
"api_name"... |
529377311 | from email.Utils import formataddr
from zope import interface, component
from zope.app.component.hooks import getSite
from zope.traversing.browser import absoluteURL
from zojax.principal.profile.interfaces import IPersonalProfile
message = u"""
Your invitation code: %s
Or use link %s
"""
class InvitationMail(object):
def update(self):
super(InvitationMail, self).update()
context = self.context
self.url = u'%s/join.html?invitationCode=%s'%(
absoluteURL(getSite(), self.request), context.id)
profile = IPersonalProfile(self.request.principal, None)
if profile is not None and profile.email:
self.addHeader(u'From', formataddr((profile.title, profile.email),))
self.addHeader(u'To', formataddr((context.name, context.principal),))
@property
def subject(self):
return self.context.subject
def render(self):
return self.context.message + message%(self.context.id, self.url)
| Zojax/zojax.personal.invitation | src/zojax/personal/invitation/template.py | template.py | py | 990 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "zope.traversing.browser.absoluteURL",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "zope.app.component.hooks.getSite",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "zojax.principal.profile.interfaces.IPersonalProfile",
"line_number": 26,... |
40322657669 | # i have created this file prajakta ...
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request,'index.html')
def analyze(request):
#get the text
djtext=request.POST.get('text','default')
#checkbox value
removepunc=request.POST.get('removepunc','off')
fullcaps=request.POST.get('fullcaps','off')
newlineremover=request.POST.get('newlineremover','off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount=request.POST.get('charcount', 'off')
#check wich checkbox on
if(len(djtext)==0):
return render(request,'error.html')
if (removepunc=="on"):
analyzed=""
punctuations = '''!()-[];:'"\,<>./?@#$%^&*_~'''
for char in djtext:
if char not in punctuations:
analyzed=analyzed+char
params={'purpose':'Removed Punctuations','analyzed_text':analyzed}
djtext=analyzed
#return render(request,'analyze.html',params)
if (fullcaps=='on'):
analyzed=""
for char in djtext:
analyzed = analyzed+char.upper()
params = {'purpose': 'CHANGED STATEMENT IN UPPERCASE', 'analyzed_text': analyzed}
djtext = analyzed
#return render(request, 'analyze.html', params)
if (newlineremover=='on'):
analyzed = ""
for char in djtext:
if char !="\n" and char !="\r":
analyzed = analyzed+char
params = {'purpose': 'removed newline', 'analyzed_text': analyzed}
djtext = analyzed
if (extraspaceremover=='on'):
analyzed = ""
for index,char in enumerate(djtext):
if djtext[index]==" " and djtext[index+1]==" " :
pass
else:
analyzed = analyzed + char
params = {'purpose': 'removed space', 'analyzed_text': analyzed}
djtext = analyzed
#return render(request, 'analyze.html', params)
if (charcount=='on'):
analyzed = 0
for char in djtext:
if char != " ":
analyzed=analyzed+1
params = {'purpose': 'Character counted are', 'analyzed_text': analyzed}
djtext = analyzed
if(charcount!="on" and extraspaceremover!="on" and removepunc!="on" and newlineremover!="on" and fullcaps!="on"):
return render(request,'error.html')
return render(request, 'analyze.html', params) | maneprajakta/textprox | harry/views.py | views.py | py | 2,439 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "django.shortcuts.render",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 63,
"usage_type": "call"
},
{
"api_name":... |
70779220028 | import os
from accelerate.utils import is_tpu_available
from ..dataset import FlattenDataset, ReprDataset
from ..models import CachedS4, FlattenS4
from ..utils.trainer_utils import PredLoss, PredMetric
from . import register_trainer
from .base import Trainer
@register_trainer("flatten_s4")
class FlattenS4Trainer(Trainer):
def __init__(self, args):
assert not is_tpu_available(), "TPU is not supported for this task"
super().__init__(args)
self.dataset = FlattenDataset
self.architecture = FlattenS4
self.criterion = PredLoss(self.args)
self.metric = PredMetric(self.args)
self.data_path = os.path.join(
args.input_path, f"{args.pred_time}h", f"{args.src_data}.h5"
)
@register_trainer("cached_s4")
class CachedS4Trainer(Trainer):
def __init__(self, args):
assert not is_tpu_available(), "TPU is not supported for this task"
super().__init__(args)
self.dataset = ReprDataset
self.architecture = CachedS4
self.criterion = PredLoss(self.args)
self.metric = PredMetric(self.args)
if args.encoded_dir:
self.data_path = os.path.join(
args.encoded_dir, f"{args.src_data}_encoded.h5"
)
else:
self.data_path = os.path.join(
args.save_dir, args.pretrained, f"{args.src_data}_encoded.h5"
)
| starmpcc/REMed | src/trainer/s4.py | s4.py | py | 1,418 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "base.Trainer",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "accelerate.utils.is_tpu_available",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dataset.FlattenDataset",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": ... |
18921275912 | """Test the codeclimate JSON formatter."""
from __future__ import annotations
import json
import os
import pathlib
import subprocess
import sys
from tempfile import NamedTemporaryFile
import pytest
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.formatters import SarifFormatter
from ansiblelint.rules import AnsibleLintRule, RulesCollection
class TestSarifFormatter:
"""Unit test for SarifFormatter."""
rule1 = AnsibleLintRule()
rule2 = AnsibleLintRule()
matches: list[MatchError] = []
formatter: SarifFormatter | None = None
collection = RulesCollection()
collection.register(rule1)
collection.register(rule2)
def setup_class(self) -> None:
"""Set up few MatchError objects."""
self.rule1.id = "TCF0001"
self.rule1.severity = "VERY_HIGH"
self.rule1.description = "This is the rule description."
self.rule1.link = "https://rules/help#TCF0001"
self.rule1.tags = ["tag1", "tag2"]
self.rule2.id = "TCF0002"
self.rule2.severity = "MEDIUM"
self.rule2.link = "https://rules/help#TCF0002"
self.rule2.tags = ["tag3", "tag4"]
self.matches.extend(
[
MatchError(
message="message1",
lineno=1,
column=10,
details="details1",
lintable=Lintable("filename1.yml", content=""),
rule=self.rule1,
tag="yaml[test1]",
ignored=False,
),
MatchError(
message="message2",
lineno=2,
details="",
lintable=Lintable("filename2.yml", content=""),
rule=self.rule1,
tag="yaml[test2]",
ignored=True,
),
MatchError(
message="message3",
lineno=666,
column=667,
details="details3",
lintable=Lintable("filename3.yml", content=""),
rule=self.rule2,
tag="yaml[test3]",
ignored=False,
),
],
)
self.formatter = SarifFormatter(pathlib.Path.cwd(), display_relative_path=True)
def test_sarif_format_list(self) -> None:
"""Test if the return value is a string."""
assert isinstance(self.formatter, SarifFormatter)
assert isinstance(self.formatter.format_result(self.matches), str)
def test_sarif_result_is_json(self) -> None:
"""Test if returned string value is a JSON."""
assert isinstance(self.formatter, SarifFormatter)
output = self.formatter.format_result(self.matches)
json.loads(output)
# https://github.com/ansible/ansible-navigator/issues/1490
assert "\n" not in output
def test_sarif_single_match(self) -> None:
"""Test negative case. Only lists are allowed. Otherwise, a RuntimeError will be raised."""
assert isinstance(self.formatter, SarifFormatter)
with pytest.raises(RuntimeError):
self.formatter.format_result(self.matches[0]) # type: ignore[arg-type]
def test_sarif_format(self) -> None:
"""Test if the return SARIF object contains the expected results."""
assert isinstance(self.formatter, SarifFormatter)
sarif = json.loads(self.formatter.format_result(self.matches))
assert len(sarif["runs"][0]["results"]) == 3
for result in sarif["runs"][0]["results"]:
# Ensure all reported entries have a level
assert "level" in result
# Ensure reported levels are either error or warning
assert result["level"] in ("error", "warning")
def test_validate_sarif_schema(self) -> None:
"""Test if the returned JSON is a valid SARIF report."""
assert isinstance(self.formatter, SarifFormatter)
sarif = json.loads(self.formatter.format_result(self.matches))
assert sarif["$schema"] == SarifFormatter.SARIF_SCHEMA
assert sarif["version"] == SarifFormatter.SARIF_SCHEMA_VERSION
driver = sarif["runs"][0]["tool"]["driver"]
assert driver["name"] == SarifFormatter.TOOL_NAME
assert driver["informationUri"] == SarifFormatter.TOOL_URL
rules = driver["rules"]
assert len(rules) == 3
assert rules[0]["id"] == self.matches[0].tag
assert rules[0]["name"] == self.matches[0].tag
assert rules[0]["shortDescription"]["text"] == self.matches[0].message
assert rules[0]["defaultConfiguration"][
"level"
] == SarifFormatter.get_sarif_rule_severity_level(self.matches[0].rule)
assert rules[0]["help"]["text"] == self.matches[0].rule.description
assert rules[0]["properties"]["tags"] == self.matches[0].rule.tags
assert rules[0]["helpUri"] == self.matches[0].rule.url
results = sarif["runs"][0]["results"]
assert len(results) == 3
for i, result in enumerate(results):
assert result["ruleId"] == self.matches[i].tag
assert (
result["locations"][0]["physicalLocation"]["artifactLocation"]["uri"]
== self.matches[i].filename
)
assert (
result["locations"][0]["physicalLocation"]["artifactLocation"][
"uriBaseId"
]
== SarifFormatter.BASE_URI_ID
)
assert (
result["locations"][0]["physicalLocation"]["region"]["startLine"]
== self.matches[i].lineno
)
if self.matches[i].column:
assert (
result["locations"][0]["physicalLocation"]["region"]["startColumn"]
== self.matches[i].column
)
else:
assert (
"startColumn"
not in result["locations"][0]["physicalLocation"]["region"]
)
assert result["level"] == SarifFormatter.get_sarif_result_severity_level(
self.matches[i],
)
assert sarif["runs"][0]["originalUriBaseIds"][SarifFormatter.BASE_URI_ID]["uri"]
assert results[0]["message"]["text"] == self.matches[0].details
assert results[1]["message"]["text"] == self.matches[1].message
def test_sarif_parsable_ignored() -> None:
"""Test that -p option does not alter SARIF format."""
cmd = [
sys.executable,
"-m",
"ansiblelint",
"-v",
"-p",
]
file = "examples/playbooks/empty_playbook.yml"
result = subprocess.run([*cmd, file], check=False)
result2 = subprocess.run([*cmd, "-p", file], check=False)
assert result.returncode == result2.returncode
assert result.stdout == result2.stdout
@pytest.mark.parametrize(
("file", "return_code"),
(
pytest.param("examples/playbooks/valid.yml", 0, id="0"),
pytest.param("playbook.yml", 2, id="1"),
),
)
def test_sarif_file(file: str, return_code: int) -> None:
"""Test ability to dump sarif file (--sarif-file)."""
with NamedTemporaryFile(mode="w", suffix=".sarif", prefix="output") as output_file:
cmd = [
sys.executable,
"-m",
"ansiblelint",
"--sarif-file",
str(output_file.name),
]
result = subprocess.run([*cmd, file], check=False, capture_output=True)
assert result.returncode == return_code
assert os.path.exists(output_file.name) # noqa: PTH110
assert pathlib.Path(output_file.name).stat().st_size > 0
@pytest.mark.parametrize(
("file", "return_code"),
(pytest.param("examples/playbooks/valid.yml", 0, id="0"),),
)
def test_sarif_file_creates_it_if_none_exists(file: str, return_code: int) -> None:
"""Test ability to create sarif file if none exists and dump output to it (--sarif-file)."""
sarif_file_name = "test_output.sarif"
cmd = [
sys.executable,
"-m",
"ansiblelint",
"--sarif-file",
sarif_file_name,
]
result = subprocess.run([*cmd, file], check=False, capture_output=True)
assert result.returncode == return_code
assert os.path.exists(sarif_file_name) # noqa: PTH110
assert pathlib.Path(sarif_file_name).stat().st_size > 0
pathlib.Path.unlink(pathlib.Path(sarif_file_name))
| ansible/ansible-lint | test/test_formatter_sarif.py | test_formatter_sarif.py | py | 8,597 | python | en | code | 3,198 | github-code | 6 | [
{
"api_name": "ansiblelint.rules.AnsibleLintRule",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "ansiblelint.rules.AnsibleLintRule",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "ansiblelint.errors.MatchError",
"line_number": 24,
"usage_type": "nam... |
5503988928 | # https://www.hackerrank.com/challenges/validating-named-email-addresses/problem
import email.utils
import re
email_pattern = r'([a-zA-Z](\w|\d|_|-|[.])*)@([a-zA-Z])*[.]([a-zA-Z]{1,3})'
def is_valid_email_address(person):
email = person[1]
return re.fullmatch(email_pattern, email) is not None
people = []
n = int(input())
for _ in range(n):
line = input()
people.append(email.utils.parseaddr(line))
for element in (filter(is_valid_email_address, people)):
print(email.utils.formataddr(element))
| Nikit-370/HackerRank-Solution | Python/validating-parsing-email-address.py | validating-parsing-email-address.py | py | 524 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "email.utils",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "re.fullmatch",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "email.utils",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "email.utils.utils.parseaddr"... |
37131271257 | #!/usr/bin/env python
#_*_coding:utf-8_*_
import numpy as np
from sklearn.decomposition import PCA
def pca(encodings, n_components = 2):
encodings = np.array(encodings)
data = encodings[:, 1:]
shape = data.shape
data = np.reshape(data, shape[0] * shape[1])
data = np.reshape([float(i) for i in data], shape)
newData = PCA(n_components = n_components).fit_transform(data)
pca = []
for i in range(len(data)):
pca.append([encodings[i][0]] + list(newData[i]))
return np.array(pca) | Superzchen/iFeature | clusters/pca.py | pca.py | py | 505 | python | en | code | 152 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
... |
17763602061 | '''
UserList objects¶
This class acts as a wrapper around list objects. It is a useful base class for your own list-like classes which can inherit
from them and override existing methods or add new ones. In this way, one can add new behaviors to lists.
The need for this class has been partially supplanted by the ability to subclass directly from list; however,
this class can be easier to work with because the underlying list is accessible as an attribute.
class collections.UserList([list])
Class that simulates a list. The instance’s contents are kept in a regular list, which is accessible via the
dataattribute of UserList instances. The instance’s contents are initially set to a copy of list, defaulting to the empty list [].
list can be any iterable, for example a real Python list or a UserList object.
In addition to supporting the methods and operations of mutable sequences, UserList instances provide the following attribute:
data
A real list object used to store the contents of the UserList class
'''
'''
# Python program to demonstrate
# userstring
from collections import UserString
d = 12344
# Creating an UserDict
userS = UserString(d)
print(userS.data)
# Creating an empty UserDict
userS = UserString("")
print(userS.data)
'''
# Python program to demonstrate
# userstring
from collections import UserString
d = 12344
# Creating an UserDict
userS = UserString(d)
print(userS.data)
# Creating an empty UserDict
userS = UserString("")
print(userS.data)
'''
= RESTART: F:/python/python_programs/5competetive programming/collections module demo/userlist and usersting.py
12344
'''
# Python program to demonstrate
# userstring
from collections import UserString
# Creating a Mutable String
class Mystring(UserString):
# Function to append to
# string
def append(self, s):
self.data += s
# Function to rmeove from
# string
def remove(self, s):
self.data = self.data.replace(s, "")
# Driver's code
s1 = Mystring("Geeks")
print("Original String:", s1.data)
# Appending to string
s1.append("s")
print("String After Appending:", s1.data)
# Removing from string
s1.remove("e")
print("String after Removing:", s1.data)
'''
Original String: Geeks
String After Appending: Geekss
String after Removing: Gkss
>>>
'''
| aparna0/competitive-programs | 1collections module/userlist and usersting.py | userlist and usersting.py | py | 2,368 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.UserString",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "collections.UserString",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "collections.UserString",
"line_number": 74,
"usage_type": "name"
}
] |
5131563876 | from django.contrib.auth import get_user_model
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .validators import validate_year
NUMBER_OF_SYMBOLS = 20
User = get_user_model()
class Category(models.Model):
name = models.CharField(
verbose_name='Название',
max_length=256,
db_index=True
)
slug = models.SlugField(
verbose_name='Slug',
max_length=50,
unique=True,
db_index=True
)
class Meta:
ordering = ('name',)
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
def __str__(self):
return self.name[:NUMBER_OF_SYMBOLS]
class Genre(models.Model):
name = models.CharField(
verbose_name='Название',
max_length=256,
db_index=True
)
slug = models.SlugField(
verbose_name='Slug',
max_length=50,
unique=True,
db_index=True
)
class Meta:
ordering = ('name',)
verbose_name = 'Жанр'
verbose_name_plural = 'Жанры'
def __str__(self):
return self.name[:NUMBER_OF_SYMBOLS]
class Title(models.Model):
name = models.CharField(
verbose_name='Название',
max_length=256,
db_index=True
)
year = models.IntegerField(
verbose_name='Год выпуска',
db_index=True,
validators=[validate_year]
)
description = models.TextField(
verbose_name='Описание'
)
genre = models.ManyToManyField(
Genre,
verbose_name='Жанр',
related_name='titles'
)
category = models.ForeignKey(
Category,
verbose_name='Категория',
related_name='titles',
on_delete=models.PROTECT
)
class Meta:
ordering = ('name',)
verbose_name = 'Произведение'
verbose_name_plural = 'Произведения'
def __str__(self):
return self.name[:NUMBER_OF_SYMBOLS]
class Review(models.Model):
text = models.TextField(
verbose_name='Текст'
)
author = models.ForeignKey(
User,
verbose_name='Автор',
related_name='reviews',
on_delete=models.CASCADE
)
score = models.PositiveSmallIntegerField(
verbose_name='Оценка',
validators=[MinValueValidator(1), MaxValueValidator(10)],
db_index=True
)
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,
db_index=True
)
title = models.ForeignKey(
Title,
verbose_name='Произведение',
related_name='reviews',
on_delete=models.CASCADE
)
class Meta:
ordering = ('-pub_date',)
verbose_name = 'Отзыв'
verbose_name_plural = 'Отзывы'
constraints = [
models.UniqueConstraint(
fields=['title', 'author'], name='unique review'
)
]
def __str__(self):
return self.text[:NUMBER_OF_SYMBOLS]
class Comments(models.Model):
text = models.TextField(
verbose_name='Текст'
)
author = models.ForeignKey(
User,
verbose_name='Автор',
related_name='comments',
on_delete=models.CASCADE
)
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,
db_index=True
)
review = models.ForeignKey(
Review,
verbose_name='Отзыв',
related_name='comments',
on_delete=models.CASCADE
)
class Meta:
ordering = ('-pub_date',)
verbose_name = 'Комментарий'
verbose_name_plural = 'Комментарии'
def __str__(self):
return self.text[:NUMBER_OF_SYMBOLS]
| Toksi86/yamdb_final | api_yamdb/reviews/models.py | models.py | py | 3,953 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"ap... |
13969225316 | import tweepy
import math
from io import open
#consumer key, consumer secret, access token, access secret.
ckey="x"
csecret="x"
atoken="x-x"
asecret="x"
auth = tweepy.OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
api = tweepy.API(auth)
hashtag = 'dilma'
result_type = 'recent'
total = 350
iteracoes = int( math.ceil( total / 100.0 ) )
concatena = ''
max_id = 0
for x in range(0, iteracoes):
print ('Iteracao: ' + str(x+1) + ' de ' + str(iteracoes))
if max_id > 0:
public_tweets = api.search(count='100', result_type=result_type, q=hashtag, max_id=max_id)
#public_tweets = api.search(count='100', result_type=result_type, q=hashtag, until='2015-08-23', max_id=max_id)
else:
public_tweets = api.search(count='100', result_type=result_type, q=hashtag)
for tweet in public_tweets:
concatena += tweet.id_str + ': '
concatena += tweet.text.replace('\n', '')
concatena += '\n'
if max_id == 0 or tweet.id < max_id:
max_id = (tweet.id - 1)
with open("Output.txt", "w", encoding='utf-8') as text_file:
text_file.write(concatena)
| ZackStone/PUC_6_RI | Test/twitter_api_search_3.py | twitter_api_search_3.py | py | 1,072 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tweepy.OAuthHandler",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number":... |
21379769063 | import test, gui, wx, config
from unittests import dummy
from domain import task, effort, category
class ViewerContainerTest(test.wxTestCase):
def setUp(self):
self.settings = config.Settings(load=False)
self.taskList = task.sorter.Sorter(task.TaskList(),
settings=self.settings)
self.container = gui.viewercontainer.ViewerNotebook(self.frame,
self.settings, 'mainviewer')
self.container.addViewer(dummy.ViewerWithDummyWidget(self.container,
self.taskList, gui.uicommand.UICommands(self.frame, None, None,
self.settings, self.taskList, effort.EffortList(self.taskList),
category.CategoryList()), self.settings), 'Dummy')
def testCreate(self):
self.assertEqual(0, self.container.size())
def testAddTask(self):
self.taskList.append(task.Task())
self.assertEqual(1, self.container.size())
| HieronymusCH/TaskCoach | branches/Release0_62_Branch/taskcoach/tests/unittests/guiTests/ViewerContainerTest.py | ViewerContainerTest.py | py | 928 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "test.wxTestCase",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "config.Settings",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "domain.task.sorter.Sorter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "domain.ta... |
29474328826 | """ Hand-not-Hand creator """
""" this code is complete and ready to use """
import random
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
import helpers
#utility funtcion to compute area of overlap
def overlapping_area(detection_1, detection_2):
x1_tl = detection_1[0]
x2_tl = detection_2[0]
x1_br = detection_1[0] + detection_1[3]
x2_br = detection_2[0] + detection_2[3]
y1_tl = detection_1[1]
y2_tl = detection_2[1]
y1_br = detection_1[1] + detection_1[4]
y2_br = detection_2[1] + detection_2[4]
# Calculate the overlapping Area
x_overlap = max(0, min(x1_br, x2_br)-max(x1_tl, x2_tl))
y_overlap = max(0, min(y1_br, y2_br)-max(y1_tl, y2_tl))
overlap_area = x_overlap * y_overlap
area_1 = detection_1[3] * detection_2[4]
area_2 = detection_2[3] * detection_2[4]
total_area = area_1 + area_2 - overlap_area
return overlap_area / float(total_area)
#loads data for binary classification (hand/not-hand)
def load_binary_data(user_list, data_directory):
data1,df = helpers.getHandSet(user_list, data_directory) # data 1 - actual images , df is actual bounding box
# third return, i.e., z is a list of hog vecs, labels
z = buildhandnothand_lis(df,data1)
return data1,df,z[0],z[1]
#Creates dataset for hand-not-hand classifier to train on
#This function randomly generates bounding boxes
#Return: hog vector of those cropped bounding boxes along with label
#Label : 1 if hand ,0 otherwise
def buildhandnothand_lis(frame,imgset):
poslis =[]
neglis =[]
for nameimg in frame.image:
tupl = frame[frame['image']==nameimg].values[0]
x_tl = tupl[1]
y_tl = tupl[2]
side = tupl[5]
conf = 0
dic = [0, 0]
arg1 = [x_tl,y_tl,conf,side,side]
poslis.append(helpers.convertToGrayToHOG(helpers.crop(imgset['Dataset/'+nameimg],x_tl,x_tl+side,y_tl,y_tl+side)))
while dic[0] <= 1 or dic[1] < 1:
x = random.randint(0,320-side)
y = random.randint(0,240-side)
crp = helpers.crop(imgset['Dataset/'+nameimg],x,x+side,y,y+side)
hogv = helpers.convertToGrayToHOG(crp)
arg2 = [x,y, conf, side, side]
z = overlapping_area(arg1,arg2)
if dic[0] <= 1 and z <= 0.5:
neglis.append(hogv)
dic[0] += 1
if dic[0]== 1:
break
label_1 = [1 for i in range(0,len(poslis)) ]
label_0 = [0 for i in range(0,len(neglis))]
label_1.extend(label_0)
poslis.extend(neglis)
return poslis,label_1
# Does hard negative mining and returns list of hog vectos , label list and no_of_false_positives after sliding
def do_hardNegativeMining(cached_window,frame, imgset, model, step_x, step_y):
lis = []
no_of_false_positives = 0
for nameimg in frame.image:
tupl = frame[frame['image']==nameimg].values[0]
x_tl = tupl[1]
y_tl = tupl[2]
side = tupl[5]
conf = 0
dic = [0, 0]
arg1 = [x_tl,y_tl,conf,side,side]
for x in range(0,320-side,step_x):
for y in range(0,240-side,step_y):
arg2 = [x,y,conf,side,side]
z = overlapping_area(arg1,arg2)
prediction = model.predict([cached_window[str(nameimg)+str(x)+str(y)]])[0]
if prediction == 1 and z<=0.5:
lis.append(cached_window[str(nameimg)+str(x)+str(y)])
no_of_false_positives += 1
label = [0 for i in range(0,len(lis))]
return lis,label, no_of_false_positives
# Modifying to cache image values before hand so as to not redo that again and again
def cacheSteps(imgset, frame ,step_x,step_y):
# print "Cache-ing steps"
list_dic_of_hogs = []
dic = {}
i = 0
for img in frame.image:
tupl = frame[frame['image']==img].values[0]
x_tl = tupl[1]
y_tl = tupl[2]
side = tupl[5]
conf = 0
i += 1
if i%10 == 0:
print(i, " images cached")
image = imgset['Dataset/'+img]
for x in range(0,320-side,step_x):
for y in range(0,240-side,step_y):
dic[str(img+str(x)+str(y))]=helpers.convertToGrayToHOG(helpers.crop(image,x,x+side,y,y+side))
return dic
def improve_Classifier_using_HNM(hog_list, label_list, frame, imgset, threshold=50, max_iterations=25): # frame - bounding boxes-df; yn_df - yes_or_no df
# print "Performing HNM :"
no_of_false_positives = 1000000 # Initialise to some random high value
i = 0
step_x = 32
step_y = 24
mnb = MultinomialNB()
cached_wind = cacheSteps(imgset, frame, step_x, step_y)
while True:
i += 1
model = mnb.partial_fit(hog_list, label_list, classes = [0,1])
ret = do_hardNegativeMining(cached_wind,frame, imgset, model, step_x=step_x, step_y=step_y)
hog_list = ret[0]
label_list = ret[1]
no_of_false_positives = ret[2]
if no_of_false_positives == 0:
return model
print("Iteration", i, "- No_of_false_positives:", no_of_false_positives)
if no_of_false_positives <= threshold:
return model
if i>max_iterations:
return model
def trainHandDetector(train_list, threshold, max_iterations):
imageset, boundbox, hog_list, label_list = load_binary_data(train_list, 'Dataset/')
print('Binary data loaded')
handDetector = improve_Classifier_using_HNM(hog_list, label_list, boundbox, imageset, threshold=threshold, max_iterations=max_iterations)
print('Hand Detector Trained')
return handDetector | tqiu8/asl-cv | train_hand_detector.py | train_hand_detector.py | py | 5,815 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "helpers.getHandSet",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "helpers.convertToGrayToHOG",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "helpers.crop",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "random.ran... |
39069895078 | import time
import serial
import re
from .haversine import haversine
from threading import Thread
from datetime import datetime, timedelta
class Gps:
def __init__(self, serial_port: str, timezone_hours: int = 0, serial_baudrate: int = 9600, round_number: int = 2):
self.__serial = serial.Serial(serial_port, serial_baudrate)
# self.file = open("aaa.txt", "a+")
self._round_number = round_number
self.__timezone_offset = timedelta(hours=timezone_hours)
self.__quality = 0 # 0 not fixed / 1 standard GPS / 2 differential GPS / 3 estimated (DR) fix
self.__speed = 0 # speed over ground km/h
self.__altitude = 0 # altitude (m)
self.__latitude = 0
self.__travelled_distance = 0
self.__longitude = 0
self.__satellites = 0 # number of satellites in use
self.__time = 0 # UTC time hhmmss.ss
self.__date = 0 # date in day, month, year format ddmmyy es. 091219
self.__pos_0 = ('', '')
self._worker = Thread(target=self._run, daemon=False)
self._worker.start()
@property
def fixed(self):
return self.__quality != 0
@property
def position(self):
position = (self.latitude, self.longitude)
return position
@property
def altitude(self):
altitude = safe_cast(self.__altitude, float, .0)
return altitude
@property
def latitude(self):
latitude = safe_cast(self.__latitude, float, .0)
return latitude
@property
def longitude(self):
longitude = safe_cast(self.__longitude, float, .0)
return longitude
@property
def speed(self):
speed = safe_cast(self.__speed, float, .0)
return round(speed, self._round_number)
@property
def satellites(self):
satellites = safe_cast(self.__satellites, int, 0)
return satellites
@property
def time(self):
# UTC time hhmmss.ss
p_hours = str(self.__time)[0:2]
p_minutes = str(self.__time)[2:4] if isinstance(self.__time, str) else "00"
p_seconds = str(self.__time)[4:6] if isinstance(self.__time, str) else "00"
return '{}:{}:{}'.format(p_hours, p_minutes, p_seconds)
@property
def date(self):
# date in day, month, year format ddmmyy es. 091219
self.__date = "010100" if self.__date == 0 else str(self.__date)
p_day = self.__date[0:2]
p_month = self.__date[2:4]
p_year = self.__date[4:6]
return '20{}-{}-{}'.format(p_year, p_month, p_day)
@property
def timestamp(self):
# timestamp = '{} {}'.format(self.date, self.time)
# utc = datetime.strptime(timestamp, '%y-%m-%d %H:%M:%S')
# timestamp_f = utc + self.timezone_offset
return '{} {}'.format(self.date, self.time)
def distance(self, latitude: float, longitude: float):
position_distance = (latitude, longitude)
return round(haversine(self.position, position_distance), self._round_number)
@property
def travelled_distance(self):
return round(self.__travelled_distance, self._round_number)
def _run(self):
last_print = time.monotonic()
while True:
try:
# serial read line by line
line = self.__serial.read_until('\n'.encode()).decode()
# print(line)
self.parse_line(line)
except UnicodeDecodeError:
print("Invalid line format")
time.sleep(0.1)
def parse_line(self, line: str):
splitted_line = line.split(',')
name = splitted_line[0]
if re.match("^\$..GGA$", name):
self.parse_xxGGA(splitted_line)
elif re.match("^\$..GLL$", name):
self.parse_xxGLL(splitted_line)
elif re.match("^\$..RMC$", name):
self.parse_xxRMC(splitted_line)
elif re.match("^\$..VTG$", name):
self.parse_xxVTG(splitted_line)
def parse_xxGGA(self, line: list):
if line.__len__() < 15:
return
self.__time = line[1]
self.nmea_cord_to_decimal(line[3], line[5])
self.add_distance(self.latitude, self.longitude)
self.__quality = line[6]
self.__satellites = line[7]
self.__altitude = line[9]
def parse_xxGLL(self, line):
if line.__len__() < 8:
return
self.nmea_cord_to_decimal(line[1], line[3])
def parse_xxRMC(self, line):
if line.__len__() < 13:
return
self.__time = line[1]
self.nmea_cord_to_decimal(line[3], line[5])
self.__date = line[9]
def parse_xxVTG(self, line):
if line.__len__() < 10:
return
self.__speed = line[7]
@travelled_distance.setter
def travelled_distance(self, value):
self.__travelled_distance = value
def add_distance(self, latitude, longitude):
if self.fixed:
pos_1 = (latitude, longitude)
if longitude != '' and latitude != '' and self.__pos_0[0] != '' and self.__pos_0[1] != '':
d = haversine(self.__pos_0, pos_1)
if d > 1:
self.__travelled_distance += d
self.__pos_0 = pos_1
def nmea_cord_to_decimal(self, latitude, longitude):
if not re.match("[0-9]{4}.[0-9]+", latitude) or not re.match("[0-9]{5}.[0-9]+", longitude):
return
self.__latitude = float(latitude[0:2]) + float(latitude[2:])/60
self.__longitude = float(longitude[0:3]) + float(longitude[3:])/60
def safe_cast(val, to_type, default=None):
try:
return to_type(val)
except (ValueError, TypeError):
return default
| policumbent/bob | modules/gps/src/steGPS/gps.py | gps.py | py | 5,732 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "serial.Serial",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "haversine.haversine... |
40687962293 | import json
import logging
import threading
import traceback
from enum import Enum
from typing import Any, Dict, List, Optional, Set, Tuple
from lte.protos.policydb_pb2 import FlowMatch
from magma.common.redis.client import get_default_client
from magma.configuration.service_configs import load_service_config
from magma.pipelined.qos.qos_meter_impl import MeterManager
from magma.pipelined.qos.qos_tc_impl import TCManager, TrafficClass
from magma.pipelined.qos.types import (
QosInfo,
get_data,
get_data_json,
get_key,
get_key_json,
get_subscriber_data,
get_subscriber_key,
)
from magma.pipelined.qos.utils import QosStore
from redis import ConnectionError # pylint: disable=redefined-builtin
LOG = logging.getLogger("pipelined.qos.common")
# LOG.setLevel(logging.DEBUG)
def normalize_imsi(imsi: str) -> str:
imsi = imsi.lower()
if imsi.startswith("imsi"):
imsi = imsi[4:]
return imsi
class QosImplType(Enum):
LINUX_TC = "linux_tc"
OVS_METER = "ovs_meter"
@staticmethod
def list():
return list(map(lambda t: t.value, QosImplType))
class SubscriberSession(object):
def __init__(self, ip_addr: str):
self.ip_addr = ip_addr
self.ambr_ul = 0
self.ambr_ul_leaf = 0
self.ambr_dl = 0
self.ambr_dl_leaf = 0
self.rules: Set = set()
def set_ambr(self, d: FlowMatch.Direction, root: int, leaf: int) -> None:
if d == FlowMatch.UPLINK:
self.ambr_ul = root
self.ambr_ul_leaf = leaf
else:
self.ambr_dl = root
self.ambr_dl_leaf = leaf
def get_ambr(self, d: FlowMatch.Direction) -> int:
if d == FlowMatch.UPLINK:
return self.ambr_ul
return self.ambr_dl
def get_ambr_leaf(self, d: FlowMatch.Direction) -> int:
if d == FlowMatch.UPLINK:
return self.ambr_ul_leaf
return self.ambr_dl_leaf
class SubscriberState(object):
"""
Map subscriber -> sessions
"""
def __init__(self, imsi: str, qos_store: Dict):
self.imsi = imsi
self.rules: Dict = {} # rule -> qos handle
self.sessions: Dict = {} # IP -> [sessions(ip, qos_argumets, rule_no), ...]
self._redis_store = qos_store
def check_empty(self) -> bool:
return not self.rules and not self.sessions
def get_or_create_session(self, ip_addr: str):
session = self.sessions.get(ip_addr)
if not session:
session = SubscriberSession(ip_addr)
self.sessions[ip_addr] = session
return session
def remove_session(self, ip_addr: str) -> None:
del self.sessions[ip_addr]
def find_session_with_rule(self, rule_num: int) -> Optional[SubscriberSession]:
for session in self.sessions.values():
if rule_num in session.rules:
return session
return None
def _update_rules_map(
self, ip_addr: str, rule_num: int, d: FlowMatch.Direction,
qos_data,
) -> None:
if rule_num not in self.rules:
self.rules[rule_num] = []
session = self.get_or_create_session(ip_addr)
session.rules.add(rule_num)
self.rules[rule_num].append((d, qos_data))
def update_rule(
self, ip_addr: str, rule_num: int, d: FlowMatch.Direction,
qos_handle: int, ambr: int, leaf: int,
) -> None:
k = get_key_json(get_subscriber_key(self.imsi, ip_addr, rule_num, d))
qos_data = get_data_json(get_subscriber_data(qos_handle, ambr, leaf))
LOG.debug("Update: %s -> %s", k, qos_data)
self._redis_store[k] = qos_data
self._update_rules_map(ip_addr, rule_num, d, qos_data)
def remove_rule(self, rule_num: int) -> None:
session_with_rule = self.find_session_with_rule(rule_num)
if session_with_rule:
for (d, _) in self.rules[rule_num]:
k = get_subscriber_key(
self.imsi, session_with_rule.ip_addr,
rule_num, d,
)
if get_key_json(k) in self._redis_store:
del self._redis_store[get_key_json(k)]
session_with_rule.rules.remove(rule_num)
del self.rules[rule_num]
def find_rule(self, rule_num: int):
return self.rules.get(rule_num)
def get_all_rules(self) -> Dict[int, List[Tuple[Any, str]]]:
return self.rules
def get_all_empty_sessions(self) -> List:
return [s for s in self.sessions.values() if not s.rules]
def get_qos_handle(self, rule_num: int, direction: FlowMatch.Direction) -> int:
rule = self.rules.get(rule_num)
if rule:
for d, qos_data in rule:
if d == direction:
_, qid, _, _ = get_data(qos_data)
return qid
return 0
class QosManager(object):
"""
Qos Manager -> add/remove subscriber qos
"""
def init_impl(self, datapath):
"""
Takese in datapath, and initializes appropriate QoS manager based on config
"""
if not self._qos_enabled:
return
if self._initialized:
return
try:
impl_type = QosImplType(self._config["qos"]["impl"])
if impl_type == QosImplType.OVS_METER:
self.impl = MeterManager(datapath, self._loop, self._config)
else:
self.impl = TCManager(datapath, self._config)
self.setup()
except ValueError:
LOG.error("%s is not a valid qos impl type", impl_type)
raise
@classmethod
def debug(cls, _, __, ___):
config = load_service_config('pipelined')
qos_impl_type = QosImplType(config["qos"]["impl"])
qos_store = QosStore(cls.__name__, client=get_default_client())
for k, v in qos_store.items():
_, imsi, ip_addr, rule_num, d = get_key(k)
_, qid, ambr, leaf = get_data(v)
print('imsi :', imsi)
print('ip_addr :', ip_addr)
print('rule_num :', rule_num)
print('direction :', d)
print('qos_handle:', qid)
print('qos_handle_ambr:', ambr)
print('qos_handle_ambr_leaf:', leaf)
if qos_impl_type == QosImplType.OVS_METER:
MeterManager.dump_meter_state(v)
else:
dev = config['nat_iface'] if d == FlowMatch.UPLINK else 'gtpu_sys_2152'
print("Dev: ", dev)
TrafficClass.dump_class_state(dev, qid)
if leaf and leaf != qid:
print("Leaf:")
TrafficClass.dump_class_state(dev, leaf)
if ambr:
print("AMBR (parent):")
TrafficClass.dump_class_state(dev, ambr)
if qos_impl_type == QosImplType.LINUX_TC:
dev = config['nat_iface']
print("Root stats for: ", dev)
TrafficClass.dump_root_class_stats(dev)
dev = 'gtpu_sys_2152'
print("Root stats for: ", dev)
TrafficClass.dump_root_class_stats(dev)
def _is_redis_available(self):
try:
self._redis_store.client.ping()
except ConnectionError:
return False
return True
def __init__(self, loop, config, client=get_default_client()):
self._initialized = False
self._clean_restart = config["clean_restart"]
self._subscriber_state = {}
self._loop = loop
self._redis_conn_retry_secs = 1
self._config = config
# protect QoS object create and delete across a QoSManager Object.
self._lock = threading.Lock()
if 'qos' not in config.keys():
LOG.error("qos field not provided in config")
return
self._qos_enabled = config["qos"]["enable"]
if not self._qos_enabled:
return
self._apn_ambr_enabled = config["qos"].get("apn_ambr_enabled", True)
LOG.info("QoS: apn_ambr_enabled: %s", self._apn_ambr_enabled)
self._redis_store = QosStore(self.__class__.__name__, client)
self.impl = None
def setup(self):
with self._lock:
if not self._qos_enabled:
return
if self._is_redis_available():
self._setupInternal()
else:
LOG.info(
"failed to connect to redis..retrying in %d secs",
self._redis_conn_retry_secs,
)
self._loop.call_later(self._redis_conn_retry_secs, self.setup)
def _setupInternal(self):
if self._initialized:
return
if self._clean_restart:
LOG.info("Qos Setup: clean start")
self.impl.setup()
self.impl.destroy()
self._redis_store.clear()
self._initialized = True
else:
# read existing state from qos_impl
LOG.info("Qos Setup: recovering existing state")
self.impl.setup()
cur_qos_state, apn_qid_list = self.impl.read_all_state()
LOG.debug("Initial qos_state -> %s", json.dumps(cur_qos_state, indent=1))
LOG.debug("apn_qid_list -> %s", apn_qid_list)
LOG.debug("Redis state: %s", self._redis_store)
try:
# populate state from db
in_store_qid = set()
in_store_ambr_qid = set()
purge_store_set = set()
for rule, sub_data in self._redis_store.items():
_, qid, ambr, leaf = get_data(sub_data)
if qid not in cur_qos_state:
LOG.warning("missing qid: %s in TC", qid)
purge_store_set.add(rule)
continue
if ambr and ambr != 0 and ambr not in cur_qos_state:
purge_store_set.add(rule)
LOG.warning("missing ambr class: %s of qid %d", ambr, qid)
continue
if leaf and leaf != 0 and leaf not in cur_qos_state:
purge_store_set.add(rule)
LOG.warning("missing leaf class: %s of qid %d", leaf, qid)
continue
if ambr:
qid_state = cur_qos_state[qid]
if qid_state['ambr_qid'] != ambr:
purge_store_set.add(rule)
LOG.warning("Inconsistent amber class: %s of qid %d", qid_state['ambr_qid'], ambr)
continue
in_store_qid.add(qid)
if ambr:
in_store_qid.add(ambr)
in_store_ambr_qid.add(ambr)
in_store_qid.add(leaf)
_, imsi, ip_addr, rule_num, direction = get_key(rule)
subscriber = self._get_or_create_subscriber(imsi)
subscriber.update_rule(ip_addr, rule_num, direction, qid, ambr, leaf)
session = subscriber.get_or_create_session(ip_addr)
session.set_ambr(direction, ambr, leaf)
# purge entries from qos_store
for rule in purge_store_set:
LOG.debug("purging qos_store entry %s", rule)
del self._redis_store[rule]
# purge unreferenced qos configs from system
# Step 1. Delete child nodes
lost_and_found_apn_list = set()
for qos_handle in cur_qos_state:
if qos_handle not in in_store_qid:
if qos_handle in apn_qid_list:
lost_and_found_apn_list.add(qos_handle)
else:
LOG.debug("removing qos_handle %d", qos_handle)
self.impl.remove_qos(
qos_handle,
cur_qos_state[qos_handle]['direction'],
recovery_mode=True,
)
if len(lost_and_found_apn_list) > 0:
# Step 2. delete qos ambr without any leaf nodes
for qos_handle in lost_and_found_apn_list:
if qos_handle not in in_store_ambr_qid:
LOG.debug("removing apn qos_handle %d", qos_handle)
self.impl.remove_qos(
qos_handle,
cur_qos_state[qos_handle]['direction'],
recovery_mode=True,
skip_filter=True,
)
final_qos_state, _ = self.impl.read_all_state()
LOG.info("final_qos_state -> %s", json.dumps(final_qos_state, indent=1))
LOG.info("final_redis state -> %s", self._redis_store)
except Exception as e: # pylint: disable=broad-except
# in case of any exception start clean slate
LOG.error("error %s. restarting clean %s", e, traceback.format_exc())
self._clean_restart = True
self._initialized = True
def _get_or_create_subscriber(self, imsi):
subscriber_state = self._subscriber_state.get(imsi)
if not subscriber_state:
subscriber_state = SubscriberState(imsi, self._redis_store)
self._subscriber_state[imsi] = subscriber_state
return subscriber_state
def add_subscriber_qos(
self,
imsi: str,
ip_addr: str,
apn_ambr: int,
units: str,
rule_num: int,
direction: FlowMatch.Direction,
qos_info: QosInfo,
cleanup_rule=None,
):
with self._lock:
if not self._qos_enabled or not self._initialized:
LOG.debug("add_subscriber_qos: not enabled or initialized")
return None, None, None
LOG.debug(
"adding qos for imsi %s rule_num %d direction %d apn_ambr %d, %s",
imsi, rule_num, direction, apn_ambr, qos_info,
)
imsi = normalize_imsi(imsi)
# ip_addr identifies a specific subscriber session, each subscriber session
# must be associated with a default bearer and can be associated with dedicated
# bearers. APN AMBR specifies the aggregate max bit rate for a specific
# subscriber across all the bearers. Queues for dedicated bearers will be
# children of default bearer Queues. In case the dedicated bearers exceed the
# rate, then they borrow from the default bearer queue
subscriber_state = self._get_or_create_subscriber(imsi)
qos_handle = subscriber_state.get_qos_handle(rule_num, direction)
if qos_handle:
LOG.debug(
"qos exists for imsi %s rule_num %d direction %d",
imsi, rule_num, direction,
)
return self.impl.get_action_instruction(qos_handle)
ambr_qos_handle_root = 0
ambr_qos_handle_leaf = 0
if self._apn_ambr_enabled and apn_ambr > 0:
session = subscriber_state.get_or_create_session(ip_addr)
ambr_qos_handle_root = session.get_ambr(direction)
LOG.debug("existing root rec: ambr_qos_handle_root %d", ambr_qos_handle_root)
if not ambr_qos_handle_root:
ambr_qos_handle_root = self.impl.add_qos(
direction, QosInfo(gbr=None, mbr=apn_ambr),
cleanup_rule, units=units,
skip_filter=True,
)
if not ambr_qos_handle_root:
LOG.error(
'Failed adding root ambr qos mbr %u direction %d',
apn_ambr, direction,
)
return None, None, None
else:
LOG.debug(
'Added root ambr qos mbr %u direction %d qos_handle %d ',
apn_ambr, direction, ambr_qos_handle_root,
)
ambr_qos_handle_leaf = session.get_ambr_leaf(direction)
LOG.debug("existing leaf rec: ambr_qos_handle_leaf %d", ambr_qos_handle_leaf)
if not ambr_qos_handle_leaf:
ambr_qos_handle_leaf = self.impl.add_qos(
direction,
QosInfo(gbr=None, mbr=apn_ambr),
cleanup_rule, units=units,
parent=ambr_qos_handle_root,
)
if ambr_qos_handle_leaf:
session.set_ambr(direction, ambr_qos_handle_root, ambr_qos_handle_leaf)
LOG.debug(
'Added ambr qos mbr %u direction %d qos_handle %d/%d ',
apn_ambr, direction, ambr_qos_handle_root, ambr_qos_handle_leaf,
)
else:
LOG.error(
'Failed adding leaf ambr qos mbr %u direction %d',
apn_ambr, direction,
)
self.impl.remove_qos(ambr_qos_handle_root, direction, skip_filter=True)
return None, None, None
qos_handle = ambr_qos_handle_leaf
if qos_info:
qos_handle = self.impl.add_qos(
direction, qos_info, cleanup_rule, units=units,
parent=ambr_qos_handle_root,
)
LOG.debug("Added ded brr handle: %d", qos_handle)
if qos_handle:
LOG.debug(
'Adding qos %s direction %d qos_handle %d ',
qos_info, direction, qos_handle,
)
else:
LOG.error('Failed adding qos %s direction %d', qos_info, direction)
return None, None, None
if qos_handle:
subscriber_state.update_rule(
ip_addr, rule_num, direction,
qos_handle, ambr_qos_handle_root, ambr_qos_handle_leaf,
)
return self.impl.get_action_instruction(qos_handle)
return None, None, None
def remove_subscriber_qos(self, imsi: str = "", del_rule_num: int = -1):
with self._lock:
if not self._qos_enabled or not self._initialized:
LOG.debug("remove_subscriber_qos: not enabled or initialized")
return
LOG.debug("removing Qos for imsi %s del_rule_num %d", imsi, del_rule_num)
if not imsi:
LOG.error('imsi %s invalid, failed removing', imsi)
return
imsi = normalize_imsi(imsi)
subscriber_state = self._subscriber_state.get(imsi)
if not subscriber_state:
LOG.debug('imsi %s not found, nothing to remove ', imsi)
return
to_be_deleted_rules = set()
qid_to_remove = {}
qid_in_use = set()
if del_rule_num == -1:
# deleting all rules for the subscriber
rules = subscriber_state.get_all_rules()
for (rule_num, rule) in rules.items():
for (d, qos_data) in rule:
_, qid, ambr, leaf = get_data(qos_data)
if ambr != qid:
qid_to_remove[qid] = d
if leaf and leaf != qid:
qid_to_remove[leaf] = d
to_be_deleted_rules.add(rule_num)
else:
rule = subscriber_state.find_rule(del_rule_num)
if rule:
rules = subscriber_state.get_all_rules()
for (rule_num, rule) in rules.items():
for (d, qos_data) in rule:
_, qid, ambr, leaf = get_data(qos_data)
if rule_num == del_rule_num:
if ambr != qid:
qid_to_remove[qid] = d
if leaf and leaf != qid:
qid_to_remove[leaf] = d
else:
qid_in_use.add(qid)
LOG.debug("removing rule %s %s ", imsi, del_rule_num)
to_be_deleted_rules.add(del_rule_num)
else:
LOG.debug("unable to find rule_num %d for imsi %s", del_rule_num, imsi)
for (qid, d) in qid_to_remove.items():
if qid not in qid_in_use:
self.impl.remove_qos(qid, d)
for rule_num in to_be_deleted_rules:
subscriber_state.remove_rule(rule_num)
# purge sessions with no rules
for session in subscriber_state.get_all_empty_sessions():
for d in (FlowMatch.UPLINK, FlowMatch.DOWNLINK):
ambr_qos_handle = session.get_ambr(d)
if ambr_qos_handle:
LOG.debug("removing root ambr qos handle %d direction %d", ambr_qos_handle, d)
self.impl.remove_qos(ambr_qos_handle, d, skip_filter=True)
LOG.debug("purging session %s %s ", imsi, session.ip_addr)
subscriber_state.remove_session(session.ip_addr)
# purge subscriber state with no rules
if subscriber_state.check_empty():
LOG.debug("purging subscriber state for %s, empty rules and sessions", imsi)
del self._subscriber_state[imsi]
| magma/magma | lte/gateway/python/magma/pipelined/qos/common.py | common.py | py | 22,178 | python | en | code | 1,605 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "lte.protos.policydb_pb2.FlowMa... |
18648883751 | #!/usr/bin/env python3.7
import matplotlib
matplotlib.use('Agg')
import pylab as pl
from matplotlib import rc
import matplotlib.cm as cm
from matplotlib.colors import Normalize
rc('text', usetex=True)
import numpy as np
import numpy.linalg as nl
import numpy.random as nr
import os.path
from numpy import cos, sin
import sys
basename = os.path.splitext(sys.argv[0])[0]
pdfname = basename + '.pdf'
pngname = basename + '.png'
n = 4000
r=25.000
tx=np.pi*0
ty=np.pi/3
tz=np.pi
Rx = np.array([[1, 0, 0], [0, cos(tx), -sin(tx)], [0, sin(tx), cos(tx)]])
Ry = np.array([[cos(ty), 0, sin(ty)], [0, 1, 0], [-sin(ty), 0, cos(ty)]])
Rz = np.array([[cos(tz), -sin(tz), 0],[sin(tz), cos(tz), 0], [0,0,1]])
def mkmat(x):
ret = np.zeros( (len(x), len(x)))
for i,xi in enumerate(x):
for j,xj in enumerate(x):
ret[i,j]= np.exp(-r*nl.norm(xi-xj))
return ret
nr.seed(3)
X0 = nr.random((n,2))
X0[:,0]*=2*np.pi
X0[:,1]-=0.5
X0[:,1]*=2
X = np.array([((1+(t/2)*cos(s/2))*cos(s), (1+(t/2)*cos(s/2))*sin(s), t/2*sin(s/2)) for s,t in X0])
X = X@Rx@Ry@Rz
M = mkmat(X)
IM = nl.inv(M)
print(sum(sum(IM)))
Xx = [_[0] for _ in X]
Xy = [_[1] for _ in X]
Xz = [_[2] for _ in X]
fig = pl.figure(figsize=(6,3))
ax = fig.add_subplot(121, projection='3d')
# ax = fig.add_subplot(121)
bx = fig.add_subplot(122)
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
ax.axes.zaxis.set_ticklabels([])
ax.xaxis._axinfo["grid"]['linewidth'] = 0.1
ax.yaxis._axinfo["grid"]['linewidth'] = 0.1
ax.zaxis._axinfo["grid"]['linewidth'] = 0.1
C = IM.dot(np.ones(len(X)))
sa = 50*abs(C)**3
sb = sa
cmap = pl.get_cmap()
norm = Normalize(vmin=min(sa), vmax=max(sa))
for idx, row in enumerate(X):
ax.plot(row[0], row[1], '.', color = cmap(norm(sa[idx])), markersize=sa[idx], markeredgecolor='none')
# ax.plot(Xx, Xy, Xz, c=C, markersize=sa/10, lw=0)
sc = bx.scatter(X0[:,0], X0[:,1], c=C, s=sb, lw=0)
xoffset=0.08
bx.arrow(0-xoffset, -1, 0, 2, length_includes_head=True, head_width=0.2, head_length=0.1, fc='0')
bx.arrow(2*np.pi+xoffset, 1, 0, -2, length_includes_head=True, head_width=0.2, head_length=0.1, fc='0')
bx.axis('off')
pl.colorbar(sc, shrink=0.9)
pl.savefig(pngname, dpi=300)
| AmFamMLTeam/metric-space-magnitude | src/mobius.py | mobius.py | py | 2,198 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "matplotlib.rc",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.path.splitext",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"... |
36696903435 | from typing import Dict, List
from tips.framework.actions.sql_action import SqlAction
from tips.framework.actions.sql_command import SQLCommand
from tips.framework.metadata.additional_field import AdditionalField
from tips.framework.metadata.table_metadata import TableMetaData
from tips.framework.metadata.column_info import ColumnInfo
from tips.framework.utils.sql_template import SQLTemplate
from tips.framework.actions.clone_table_action import CloneTableAction
from tips.framework.utils.globals import Globals
class AppendAction(SqlAction):
_source: str
_target: str
_whereClause: str
_isOverwrite: bool
_metadata: TableMetaData
_binds: List[str]
_additionalFields: List[AdditionalField]
_isCreateTempTable: bool
def __init__(
self,
source: str,
target: str,
whereClause: str,
metadata: TableMetaData,
binds: List[str],
additionalFields: List[AdditionalField],
isOverwrite: bool,
isCreateTempTable: bool,
) -> None:
self._source = source
self._target = target
self._whereClause = whereClause
self._isOverwrite = isOverwrite
self._metadata = metadata
self._binds = binds
self._additionalFields = additionalFields
self._isCreateTempTable = isCreateTempTable
def getBinds(self) -> List[str]:
return self._binds
def getCommands(self) -> List[object]:
globalsInstance = Globals()
cmd: List[object] = []
## if temp table flag is set on metadata, than create a temp table with same name as target
## in same schema
if (
self._isCreateTempTable
and globalsInstance.isNotCalledFromNativeApp() ## Native apps don't allow create table or create temporary table privilege
):
cmd.append(
CloneTableAction(
source=self._target,
target=self._target,
tableMetaData=self._metadata,
isTempTable=True,
)
)
commonColumns: List[ColumnInfo] = self._metadata.getCommonColumns(
self._source, self._target
)
fieldLists: Dict[str, List[str]] = self._metadata.getSelectAndFieldClauses(
commonColumns, self._additionalFields
)
selectClause: List[str] = fieldLists.get("SelectClause")
fieldClause: List[str] = fieldLists.get("FieldClause")
selectList = self._metadata.getCommaDelimited(selectClause)
fieldList = self._metadata.getCommaDelimited(fieldClause)
## append quotes with bind variable
cnt = 0
while True:
cnt += 1
if (
(self._whereClause is not None and f":{cnt}" in self._whereClause)
or (selectList is not None and f":{cnt}" in selectList)
or (fieldList is not None and f":{cnt}" in fieldList)
):
self._whereClause = (
self._whereClause.replace(f":{cnt}", f"':{cnt}'")
if self._whereClause is not None
else None
)
selectList = (
selectList.replace(f":{cnt}", f"':{cnt}'")
if selectList is not None
else None
)
fieldList = (
fieldList.replace(f":{cnt}", f"':{cnt}'")
if fieldList is not None
else None
)
else:
break
cmdStr = SQLTemplate().getTemplate(
sqlAction="insert",
parameters={
"isOverwrite": self._isOverwrite,
"target": self._target,
"fieldList": fieldList,
"selectList": selectList,
"source": self._source,
"whereClause": self._whereClause,
},
)
cmd.append(SQLCommand(sqlCommand=cmdStr, sqlBinds=self.getBinds()))
return cmd
| ProjectiveGroupUK/tips-snowpark | tips/framework/actions/append_action.py | append_action.py | py | 4,223 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "tips.framework.actions.sql_action.SqlAction",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tips.framework.metadata.table_metadata.TableMetaData",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 19,
"usage_... |
38029553646 | # 分数/概率视图
from rest_framework import generics, filters, status
from rest_framework.response import Response
from ..models import User, Score, Probability, Question, History, Detail
from ..serializer.results import ScoreSerializer, ProbabilitySerialzer
from ..serializer.history import HistorySerializer, DetailSerializer
class CalcProbability(generics.CreateAPIView):
"""
计算5/10年期癌症概率
"""
serializer_class = ScoreSerializer
def smoking_status(self):
answers = self.request.data['answers']
# print(answers)
if (answers['smoking'] == 1):
smoking = 'NEVER'
else:
if (answers['packYear'] == 3):
smoking = 'HEAVY'
else:
smoking = 'LIGHT'
return smoking
def get_queryset(self):
query_set = Score.objects.filter(smoke=self.smoking_status())
return query_set
def create(self, request, *args, **kwargs):
# 计算分数,计算概率
answers = request.data['answers']
queryset = self.filter_queryset(self.get_queryset())
score = 0
year = 'five'
prob_dict = {}
for query in queryset.iterator():
serializer = ScoreSerializer(query)
for k, v in answers.items():
if (serializer.data['questionid'] == k and serializer.data['choice'] == v):
score += serializer.data['score']
# print(serializer.data)
prob_queryset = Probability.objects.filter(
year=year, smoke=self.smoking_status(), point=score)
prob_serializer = ProbabilitySerialzer(
prob_queryset, many=True)
prob_dict[k] = prob_serializer.data[0]['probability']
prob_queryset = Probability.objects.filter(
year=year, smoke=self.smoking_status(), point=score)
prob_serializer = ProbabilitySerialzer(prob_queryset, many=True)
# 存储结果
userid = request.data['userid']
user = User.objects.get(userid=userid)
history_serializer = HistorySerializer()
history = history_serializer.create(validated_data={
'smoke': self.smoking_status(),
'probability': prob_serializer.data[0]['probability'],
'userid': user
})
# print({
# 'smoke': self.smoking_status(),
# 'probability': prob_serializer.data[0]['probability'],
# 'userid': user
# })
history = HistorySerializer(history)
# print(history.data)
# 存储answers
detail_serializer = DetailSerializer()
for k, v in answers.items():
question = Question.objects.get(questionid=k)
item = detail_serializer.create(validated_data={
'pollid_id': history.data['pollid'],
'choice': v,
'questionid': question,
'probability': prob_dict[k]})
return Response({
"smoking": self.smoking_status(),
"probability": prob_dict,
"pollid": history.data['pollid']
})
| Frank-LSY/LungCancerModel | back/lungcancer/polls/view/results.py | results.py | py | 3,205 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.generics.CreateAPIView",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "serializer.results.ScoreSerializer",
"line_number": 13,
"usage_type": "n... |
70454780987 | #Libraries----------------------------------------------------------------------
"""
Dependencies and modules necessary for analytical functions to work
"""
#Cheminformatics
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
from rdkit.Chem.Draw import IPythonConsole
from rdkit.Chem import Descriptors
from rdkit.Chem import PandasTools
from rdkit import DataStructs
#Data processing
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import gridspec
# Clustering
from scipy.cluster.hierarchy import dendrogram, linkage
#Similarity analysis----------------------------------------------------------------------
def similarity_dendogram(data):
"""
Function takes the data file provided by the data_prep function and plots a dendogram based on compound similarity.
Fingerprinting is based on Morgan fingerprints and similarity search is based on Tanimoto Similarity.
#Input: data frame generated by data_prep function.
#Output: dendogram and a data frame with similarity values.
"""
radius=2
nBits=1024
#generate fingerprints for database of compounds
ECFP6_data = [AllChem.GetMorganFingerprintAsBitVect(mol,radius=radius, nBits=nBits) for mol in data['Rdkit_mol']]
data["Morgan_fpt"]=ECFP6_data
#build array with similarity scores
length=len(ECFP6_data)
array=pd.DataFrame(index=range(length),columns=range(length))
array.columns=list(data.CID)
array.index=list(data.CID)
linkage_array=np.empty(shape=(length,length))
for i in range(length):
var1=list(data.CID)[i]
mol1=list(data.Morgan_fpt)[i]
for j in range(length):
#calculate similarity
var2=list(data.CID)[j]
mol2=list(data.Morgan_fpt)[j]
similarity=DataStructs.FingerprintSimilarity(mol1,mol2)
array.iloc[i,j]=similarity
linkage_array[i,j]=similarity
linked = linkage(linkage_array,'single')
labelList = list(data.CID)
plt.figure(figsize=(8,15))
#Draw dendogram
ax1=plt.subplot()
plot=dendrogram(linked,
orientation='left',
labels=labelList,
distance_sort='descending',
show_leaf_counts=True)
ax1.spines['left'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
plt.title('Similarity clustering',fontsize=20,weight='bold')
plt.tick_params ('both',width=2,labelsize=8)
plt.tight_layout()
plt.show()
return (array) | AusteKan/Chemexpy | chemexpy/chemexpy/similarity_dendogram.py | similarity_dendogram.py | py | 2,593 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "rdkit.Chem.AllChem",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 49,
"usage_type": "call"
},
{
... |
34519324302 |
''' Pydantic Models '''
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel
class User(BaseModel):
id: int
name = "Yebin Lee"
signup_ts : Optional[datetime]=None
friends: List[int]=[]
external_data = {
"id":"123",
"signup_ts":"2017-06-01 12:22",
"friends":[1, "2", b"3"], # int형만 인식
}
user = User(**external_data) # external_data 객체의 전체 데이터 전달
print(user)
print(user.id) | YebinLeee/fast-api | Study_FastAPI/pydantic_models.py | pydantic_models.py | py | 481 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.List",
... |
3981451688 | '''
Given a sorted list of numbers, change it into a balanced binary search tree. You can assume there will be no duplicate numbers in the list.
Here's a starting point:
'''
from collections import deque
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def __str__(self):
# level-by-level pretty-printer
nodes = deque([self])
answer = ''
while len(nodes):
node = nodes.popleft()
if not node:
continue
answer += str(node.value)
nodes.append(node.left)
nodes.append(node.right)
return answer
def createBalancedBST(nums):
index = len(nums) // 2
root_val = nums[index]
root = Node(root_val)
build_BST(nums[:index], root, 'left')
build_BST(nums[index + 1:], root, 'right')
return root
def build_BST(nums, root, side):
if len(nums) == 0:
return
index = len(nums) // 2
node_val = nums[index]
node = Node(node_val)
if side == 'left':
root.left = node
else:
root.right = node
build_BST(nums[:index], node, 'left')
build_BST(nums[index + 1:], node, 'right')
print(createBalancedBST([1, 2, 3, 4, 5, 6, 7]))
# 4261357
# 4
# / \
# 2 6
#/ \ / \
#1 3 5 7 | MateuszMazurkiewicz/CodeTrain | InterviewPro/2019.11.22/task.py | task.py | py | 1,347 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 17,
"usage_type": "call"
}
] |
21360428026 | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
path1 = 'project_data/DC_Crime.csv'
path2 = 'project_data/DC_Properties.csv'
path3 = 'project_data/DC_crime_test.csv'
data = pd.read_csv(path1)
Features =['SHIFT', 'OFFENSE', 'METHOD','BID',"NEIGHBORHOOD_CLUSTER",'ucr-rank',\
'sector','ANC','BLOCK_GROUP','BLOCK', 'DISTRICT','location','offensegroup',\
'PSA','WARD','VOTING_PRECINCT','CCN','END_DATE','OCTO_RECORD_ID','offense-text',\
'offensekey', 'XBLOCK', 'YBLOCK', 'START_DATE','REPORT_DAT','CENSUS_TRACT']
X = data.drop(columns=Features)
y= data['offensegroup']
X_train,X_test,y_train,y_test = train_test_split(X,y,stratify =y)
imp= SimpleImputer()
X_train = imp.fit_transform(X_train)
X_test = imp.transform(X_test)
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier(max_depth=50, random_state=40)
dtree.fit(X_train,y_train)
y_pred=dtree.predict(X_test)
treefeatures=dtree.feature_importances_
print(dtree.score(X_train,y_train))
print(list(y_pred))
#预测用dtree.predict,data是导入数据
| montpelllier/MA333_Introduction-to-Big-Data-Science | decisiontree.py | decisiontree.py | py | 1,129 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.impute.SimpleImputer",
"line_number": 18,
"usage_type": "call"
},
{
... |
17779473058 | #!/usr/bin/python3
"""Query reddit API for work count in hot list using recusion"""
import requests
def count_words(subreddit, word_list, after=None, count={}):
"""Count words in word_list in subreddit"""
if after is None:
subred_URL = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)
else:
subred_URL = 'https://www.reddit.com/r/{}/hot.json?after={}'.format(
subreddit, after)
subreddit_req = requests.get(subred_URL,
headers={"user-agent": "user"},
allow_redirects=False)
try:
data = subreddit_req.json().get("data")
except:
return
for word in word_list:
word = word.lower()
if word not in count.keys():
count[word] = 0
children = data.get("children")
for child in children:
title = (child.get("data").get("title").lower())
title = title.split(' ')
for word in word_list:
word = word.lower()
count[word] += title.count(word)
after = data.get("after")
if after is not None:
return count_words(subreddit, word_list, after, count)
else:
sorted_subs = sorted(count.items(), key=lambda x: (-x[1], x[0]))
for i in sorted_subs:
if i[1] != 0:
print(i[0] + ": " + str(i[1]))
| robertrowe1013/holbertonschool-interview | 0x13-count_it/0-count.py | 0-count.py | py | 1,364 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
}
] |
35471156635 | # -*- coding: utf-8 -*-
from torch import cuda
import transformers
from transformers import AutoTokenizer
from transformers import DataCollatorForTokenClassification, AutoConfig
from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer
from datasets import load_metric, Dataset
import pandas as pd
import numpy as np
import re
import argparse
import csv
import sys
import time
from os import path
import json
device = 'cuda' if cuda.is_available() else 'cpu'
print(device)
seed = 22
transformers.set_seed(seed)
"""
Models Used are:
model_checkpoint = "dbmdz/bert-base-italian-uncased" -> italian
model_checkpoint = "bert-base-uncased" -> english
model_checkpoint = "camembert-base" -> french
model_checkpoint = "GroNLP/bert-base-dutch-cased" -> dutch
model_checkpoint = "deepset/gbert-base -> german
model_checkpoint = "EMBEDDIA/sloberta-> slovene
"""
def sentence_num(row):
sentenceNum = row['Sentence-Token'].split("-")[0]
return sentenceNum
def to_label_id(row, id_dict):
label = row['Tag']
if label not in id_dict:
label = 'O'
labelId = id_dict[label]
return labelId
def to_clean_label(row):
clean_tag = row['Tag'].replace("\\", "").replace("\_","_")
clean_tag = clean_tag.split('|')[0]
clean_tag = clean_tag.replace("B-I-", "B-")
return clean_tag
def replace_punctuation(row):
"""Error case in Italian: 'bianco', '-', 'gialliccio' -> 'bianco-gialliccio'
Bert tokenizer uses also punctuations to separate the tokens along with the whitespaces, although we provide the
sentences with is_split_into_words=True. Therefore, if there is a punctuation in a single word in a CONLL file
we cannot 100% guarantee the exact same surface realization (necessary to decide on a single label for a single word)
after classification for that specific word:
e.g., bianco-gialliccio becomes 3 separate CONLL lines: 1) bianco 2) - 3) gialliccio
Things could have been easier and faster if we were delivering simple sentences as output instead of the exact
CONLL file structure given as input. """
word = row['Word'].strip()
if len(word) > 1:
word = re.sub(r'[^a-zA-Z0-9]', '', word)
if word is None or word == "" or word == "nan":
word = " "
return word
"""The script will extract the label list from the data itself. Please be sure your data and labels are clean.
3 Labels: ['Smell_Word', 'Smell_Source', 'Quality']
7 Labels: ['Smell_Word', 'Smell_Source', 'Quality', 'Location', 'Odour_Carrier', 'Evoked_Odorant', 'Time']"""
def read_split_fold(split='train', fold="0", lang="english", label_dict=None):
#change the path template as needed.
path = 'data_{}/folds_{}_{}.tsv'.format(lang, fold, split)
try:
data = pd.read_csv(path, sep='\t', skip_blank_lines=True,
encoding='utf-8', engine='python', quoting=csv.QUOTE_NONE,
names=['Document', 'Sentence-Token', 'Chars', 'Word', 'Tag', 'Empty'], header=None)
except:
print(f"Cannot read the file {path}")
if split == "train":
sys.exit()
return None, None
time.sleep(5)
data.drop('Empty', inplace=True, axis=1)
#For the reusability purposes, we still extract the label ids from the training data.
data['Tag'] = data.apply(lambda row: to_clean_label(row), axis=1)
print("Number of tags: {}".format(len(data.Tag.unique())))
frequencies = data.Tag.value_counts()
print(frequencies)
if not label_dict:
labels_to_ids = {k: v for v, k in enumerate(data.Tag.unique())}
else:
labels_to_ids = label_dict
ids_to_labels = {v: k for v, k in enumerate(data.Tag.unique())}
data = data.astype({"Word": str})
data['Word'] = data.apply(lambda row: replace_punctuation(row), axis=1)
data['Tag'] = data.apply(lambda row: to_label_id(row, labels_to_ids), axis=1)
data['Num'] = data.apply(lambda row: sentence_num(row), axis=1)
# Important point is that we need unique document+Sentence-Token
data = data.astype({"Num": int})
data.set_index(['Document', 'Num'])
df = data.groupby(['Document', 'Num'])['Word'].apply(list)
df2 = data.groupby(['Document', 'Num'])['Tag'].apply(list)
mergeddf = pd.merge(df, df2, on=['Document', 'Num'])
mergeddf.rename(columns={'Word': 'sentence', 'Tag': 'word_labels'}, inplace=True)
print("Number of unique sentences: {}".format(len(mergeddf)))
return mergeddf, labels_to_ids, ids_to_labels
def tokenize_and_align_labels(examples, tokenizer, label_all_tokens=True):
tokenized_inputs = tokenizer(examples["sentence"], max_length=512, truncation=True, is_split_into_words=True)
labels = []
for i, label in enumerate(examples["word_labels"]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label[word_idx])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
label_ids.append(label[word_idx] if label_all_tokens else -100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
def cn_hp_space(trial):
return {
"learning_rate": trial.suggest_categorical("learning_rate", [1e-5, 2e-5, 3e-5, 4e-5, 5e-5]),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8]),
"num_train_epochs": trial.suggest_int("num_train_epochs", 3, 10, log=True)
}
def main():
parser = argparse.ArgumentParser(description='Training with Folds')
parser.add_argument("--lang", help="Languages: english,german, slovene, dutch, multilingual, french, italian",
default="english")
parser.add_argument("--fold", help="Fold Name", default="0")
parser.add_argument("--hypsearch", help="Flag for Hyperparameter Search", action='store_true')
parser.add_argument("--do_train", help="To train the model", action='store_true')
parser.add_argument("--do_test", help="To test the model", action='store_true')
parser.add_argument("--learning_rate", type=float, help="Learning Rate for training.", default=2e-5)
parser.add_argument("--train_batch_size", type=int, help="Training batch size.", default=4)
parser.add_argument("--train_epochs", type=int, help="Training epochs.", default=3)
parser.add_argument("--model", action='store', default="bert-base-multilingual-uncased",
help="Model Checkpoint to fine tune. If none is given, bert-base-multilingual-uncased will be used.")
args = parser.parse_args()
model_checkpoint = args.model
fold = str(args.fold)
language = str(args.lang).strip().lower()
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
assert isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
if language not in ['english', 'german', 'italian', 'slovene', 'dutch', 'french']:
raise Exception(f"Language error: {language} is not among the project languages.")
if args.do_train and args.hypsearch:
raise Exception(f"Action error: Cannot do hyperparameter search and train in a single run. Please first run"
f"hypsearch and with the parameters obtained as the best, run do_train.")
config = AutoConfig.from_pretrained(model_checkpoint)
labels_to_ids = config.label2id
ids_to_labels = config.id2label
def model_init():
m = AutoModelForTokenClassification.from_pretrained(model_checkpoint, config=config)
m.to(device)
return m
if args.hypsearch or args.do_train:
trn, labels_to_ids, ids_to_labels = read_split_fold(fold=fold, lang=language)
train_dataset = Dataset.from_pandas(trn, split="train")
val, _, _ = read_split_fold(fold=fold, lang=language, split="dev", label_dict=labels_to_ids)
val_dataset = Dataset.from_pandas(val, split="validation")
print(labels_to_ids)
tokenized_train = train_dataset.map(lambda x: tokenize_and_align_labels(x, tokenizer), batched=True)
tokenized_val = val_dataset.map(lambda x: tokenize_and_align_labels(x, tokenizer), batched=True)
label_list = list(labels_to_ids.values())
config.label2id = labels_to_ids
config.id2label = ids_to_labels
config.num_labels = len(label_list)
model_name = model_checkpoint.split("/")[-1]
if args.hypsearch:
tr_args = TrainingArguments(
f"{model_name}-{language}-{fold}-hyp",
evaluation_strategy="epoch",
save_strategy="epoch",
per_device_eval_batch_size=8,
warmup_ratio=0.1,
seed=22,
weight_decay=0.01
)
elif args.do_train:
tr_args = TrainingArguments(
f"{model_name}-{language}-{fold}",
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=args.learning_rate,
per_device_train_batch_size=args.train_batch_size,
per_device_eval_batch_size=8,
num_train_epochs=args.train_epochs,
warmup_ratio=0.1,
seed=22,
weight_decay=0.01
)
data_collator = DataCollatorForTokenClassification(tokenizer)
metric = load_metric("seqeval")
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[ids_to_labels[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[ids_to_labels[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
results = metric.compute(predictions=true_predictions, references=true_labels)
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
if args.do_train or args.hypsearch:
trainer = Trainer(
model_init=model_init,
args=tr_args,
train_dataset=tokenized_train,
eval_dataset=tokenized_val,
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics
)
elif args.do_test:
#for testing
if path.exists(f"{model_checkpoint}/{language}-id2label.json"):
ids_to_labels = json.load(open(f"{model_checkpoint}/{language}-id2label.json", "r"))
ids_to_labels = {int(k): v for k, v in ids_to_labels.items()}
labels_to_ids = {v: int(k) for k, v in ids_to_labels.items()}
config.label2id = labels_to_ids
config.id2label = ids_to_labels
label_list = list(labels_to_ids.values())
config.num_labels = len(label_list)
m = AutoModelForTokenClassification.from_pretrained(model_checkpoint, config=config)
m.to(device)
trainer = Trainer(m, data_collator=data_collator, tokenizer=tokenizer)
if args.hypsearch:
# hyperparam search with compute_metrics: default maximization is through the sum of all the metrics returned
best_run = trainer.hyperparameter_search(n_trials=10, direction="maximize", hp_space=cn_hp_space)
best_params = best_run.hyperparameters
print(f"Best run is with the hyperparams:{best_params}. You either have to find the right run and checkpoint "
f"from the models saved or retrain with the correct parameters: referring to "
f"https://discuss.huggingface.co/t/accessing-model-after-training-with-hyper-parameter-search/20081")
elif args.do_train:
trainer.train()
if args.do_test:
print("TEST RESULTS")
test, _, _ = read_split_fold(split="test", label_dict=labels_to_ids, lang=language, fold=fold)
test_dataset = Dataset.from_pandas(test, split="test")
tokenized_test = test_dataset.map(lambda x: tokenize_and_align_labels(x, tokenizer),
batched=True)
predictions, labels, _ = trainer.predict(tokenized_test)
predictions = np.argmax(predictions, axis=2)
true_predictions = [
[ids_to_labels[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[ids_to_labels[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
results = metric.compute(predictions=true_predictions, references=true_labels)
print("\n")
print(results)
if __name__ == "__main__":
main()
| Odeuropa/wp3-information-extraction-system | SmellClassifier/training/train.py | train.py | py | 13,483 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "transformers.set_seed",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.sub",
... |
41734453103 | from __future__ import absolute_import, unicode_literals
from lol_stats_api.helpers.redis import db_metadata, db_matchlist
from celery.decorators import task, periodic_task
from celery.schedules import crontab
from redis import Redis
from celery_singleton import Singleton, clear_locks
import os
from datetime import datetime as dt, timedelta as td
from lol_stats_api.helpers.variables import LAST_IMPORTANT_PATCH, DAYS_TO_REMOVE_DATA
from stats import get_players, get_matches, calculations
from assets.load_data import load_data
from lol_stats_api.helpers.mongodb import get_mongo_stats
from celery.signals import worker_ready
from assets.ddragon_routes import get_current_version
from lol_stats_api.helpers.mongodb import get_saved_version, get_last_calculated_patch
from stats.models import *
from lol_stats_api.celeryApp import app
import json
db_stats = get_mongo_stats()
@worker_ready.connect
def unlock_all(**kwargs):
print("Test")
clear_locks(app)
# Jugadores
@app.task(name='periodically_update_player_list', base=Singleton)
def periodically_update_player_list():
"""
Actualiza periodicamente la lista de jugadores
"""
update_player_list.delay()
@app.task(base=Singleton, name="update_player_list")
def update_player_list():
print("Inicio el updateo de jugadores")
get_players.update_player_list()
@task(base=Singleton, name="update_players")
def update_player_detail_in_celery(current_player):
"""
Actualiza la informacion de un jugador
"""
get_players.update_player_detail(current_player)
# Limpieza periodica
@app.task(base=Singleton, name="clear_old_data")
def clear_old_data():
"""
Elimina los datos viejos
"""
timestamp = get_matches.x_days_ago(DAYS_TO_REMOVE_DATA)
more_time_ago = get_matches.x_days_ago(DAYS_TO_REMOVE_DATA + 2)
# Parche 10.23 en adelante
timestamp = max(LAST_IMPORTANT_PATCH, timestamp)
timestamp = max(LAST_IMPORTANT_PATCH, more_time_ago)
print("Eliminando datos anteriores a {}".format(timestamp))
# Timelines
print("Eliminando timelines")
Timeline.objects.filter(gameTimestamp__lt=more_time_ago).delete()
print("Eliminando skill_ups")
SkillUp.objects.filter(timestamp__lt=more_time_ago).delete()
# Bans
print("Eliminando bans")
Ban.objects.filter(timestamp__lt=timestamp).delete()
# Champ data
print("Eliminando champ data")
ChampData.objects.filter(timestamp__lt=timestamp).delete()
# Playstyle
print("Eliminando champ playstyle")
ChampPlaystyle.objects.filter(timestamp__lt=timestamp).delete()
print("Eliminando first buy")
FirstBuy.objects.filter(timestamp__lt=more_time_ago).delete()
@app.task(name='clear_redis_old_data', base=Singleton)
def clear_redis_old_data():
"""
Reviso key por key si la ultima es muy vieja, y mientras lo sea sigo eliminando
"""
for server in db_matchlist.keys():
print("Revisando server - {}".format(server))
while True:
# Tomo la ultima del actual
match = db_matchlist.rpop(server)
if match is None:
break
data_match = json.loads(match)
timestamp = get_matches.x_days_ago(DAYS_TO_REMOVE_DATA)
# Si esta dentro del rango, la vuelvo a colocar y continuo
if data_match['timestamp'] > timestamp:
db_matchlist.rpush(server, match)
break
print("Elimino match: {}".format(match))
# Matches
@app.task(base=Singleton, name="process_match")
def process_match_with_celery(match):
"""
Procesa una partida con celery
"""
get_matches.process_match(match)
# Estadisticas
@app.task(base=Singleton, name="periodically_generate_new_stats")
def periodically_generate_new_stats():
"""
Ejecuta periodicamente el calculo de estadisticas
"""
generate_new_stats.delay()
@task(base=Singleton, name="generate_stats")
def generate_new_stats():
"""
Genera las estadisticas
"""
calculations.generate_builds_stats_by_champ()
# Assets
@app.task(base=Singleton, name="periodically_update_assets")
def periodically_update_assets():
"""
Ejecuta periodicamente el update de assets
"""
update_assets.delay()
@app.task(base=Singleton, name="update_assets")
def update_assets():
"""
Actualiza los assets
"""
saved_version = get_saved_version()
game_version = get_current_version()
# Si la version es distinta actualizo
if saved_version != game_version:
load_data()
last_calculated_patch = get_last_calculated_patch()
if last_calculated_patch != game_version:
# Recalculo las estadisticas
generate_new_stats.delay()
| fabran99/LolStatisticsBackend | bard_app_api/lol_stats_api/tasks.py | tasks.py | py | 4,723 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "lol_stats_api.helpers.mongodb.get_mongo_stats",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "celery_singleton.clear_locks",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "lol_stats_api.celeryApp.app",
"line_number": 28,
"usage_type":... |
20763005925 | from datetime import datetime
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import Text
from aiogram.types import Message, CallbackQuery, LabeledPrice, PreCheckoutQuery, ContentType, ShippingQuery
from data.config import PAYMENT_TOKEN
from data.shop_config import IS_PREPAYMENT, CURRENCY, NEED_NAME, NEED_EMAIL, NEED_PHONE_NUMBER, NEED_SHIPPING_ADDRESS, \
RUSSIAN_POST_SHIPPING_OPTION, PICKUP_SHIPPING_OPTION
from keyboards.inline.general import confirmation_cancel_kb
from loader import dp, bot
from states.ordering import Ordering
from utils.db_api.api import db_api as db
@dp.callback_query_handler(text="order", state='*')
async def order(call: CallbackQuery, state: FSMContext):
await confirm_address(call.message, state)
@dp.message_handler(Text(ignore_case=True, contains=['оформить заказ']), state='*')
async def confirm_address(message: Message, state: FSMContext):
if not await db.count_cart():
await message.answer("Ваша корзина пуста!")
return
user = await db.get_current_user()
cart = await db.get_cart_items_by_user(user.id)
text = "===============ЗАКАЗ===============\n\n"
to_pay = 0
prices = []
for record in cart:
product = await db.get_product(record.product_id)
text += f"{product.name} x {record.amount} \t\t\t\t\t {product.price * record.amount}р.\n"
to_pay += product.price * record.amount
prices.append(
LabeledPrice(label=product.name + f" x {record.amount}", amount=product.price * record.amount * 100))
async with state.proxy() as data:
data["prices"] = prices
text += f"\nСумма: {to_pay}р.\n" \
f"Оформить заказ?"
await message.answer(text, reply_markup=confirmation_cancel_kb)
await Ordering.OrderConfirmation.set()
@dp.callback_query_handler(text="yes", state=Ordering.OrderConfirmation)
async def create_order(call: CallbackQuery, state: FSMContext):
async with state.proxy() as data:
prices = data["prices"]
if IS_PREPAYMENT:
await call.message.answer("Оплатите сумму заказа")
await bot.send_invoice(chat_id=call.from_user.id, title=f"ЗАКАЗ ОТ {datetime.today()}",
description='Или введите "Отмена"',
payload=0, start_parameter=0, currency=CURRENCY,
prices=prices,
provider_token=PAYMENT_TOKEN,
need_name=NEED_NAME,
need_email=NEED_EMAIL,
need_phone_number=NEED_PHONE_NUMBER,
need_shipping_address=NEED_SHIPPING_ADDRESS,
is_flexible=True)
await Ordering.Payment.set()
else:
await db.create_order()
await call.message.answer("Заказ оформлен!")
await state.finish()
@dp.pre_checkout_query_handler(lambda query: True, state=Ordering.Payment)
async def checkout(query: PreCheckoutQuery):
await bot.answer_pre_checkout_query(query.id, True)
@dp.shipping_query_handler(lambda query: True, state=Ordering.Payment)
async def process_shipping_query(query: ShippingQuery):
await bot.answer_shipping_query(query.id, ok=True, shipping_options=[
RUSSIAN_POST_SHIPPING_OPTION,
PICKUP_SHIPPING_OPTION])
@dp.message_handler(content_types=ContentType.SUCCESSFUL_PAYMENT, state=Ordering.Payment)
async def proceed_successful_payment(message: Message, state: FSMContext):
await db.create_order()
await bot.send_message(chat_id=message.from_user.id, text="Спасибо за покупку!")
await state.finish()
| shehamane/kuriBot | src/handlers/users/ordering.py | ordering.py | py | 3,797 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "aiogram.types.CallbackQuery",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "aiogram.dispatcher.FSMContext",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "loader.dp.callback_query_handler",
"line_number": 16,
"usage_type": "call"
},... |
43266445764 | from urllib.request import urlopen
import json
import matplotlib.pyplot as plt
url = "http://cyrilserver.ddns.net:8080/hardware/esp32/all"
# store the response of URL
response = urlopen(url)
arrData = []
# storing the JSON response
# from url in data
data_json = json.loads(response.read())
for i in range(len(data_json)):
arrData.append(data_json[i]['data_esp32'])
plt.plot(arrData)
plt.show()
# print the json response | Monest-eco/Tools | graphData/allData.py | allData.py | py | 434 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.py... |
40025821479 | from django.urls import path
from . import views
from .views import (
TicketCreateView,
AssignCreateView,
StatusCreateView,
StatusLstCreateView,
CgyCreateView,
CgyListView,
TicketListView
)
urlpatterns = [
path('', views.home, name='sticket-home'),
path('categories', CgyListView.as_view(), name='categories'),
path('new_stkt', TicketCreateView.as_view(), name='new-stkt'),
path('new_assgn', AssignCreateView.as_view(), name='new-assgn'),
path('new_status', StatusCreateView.as_view(), name='new-status'),
path('add_status', StatusLstCreateView.as_view(), name='add-status'),
path('add_cgy', CgyCreateView.as_view(), name='add-cgy'),
path('ticket_list', TicketListView.as_view(), name='ticket_list')
] | uppgrayedd1/webapp | webapp/sticket/urls.py | urls.py | py | 783 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.home",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.CgyListView.a... |
32328440349 | #--------------------------------- 라이브러리 ---------------------------------
# 소켓 관련 라이브러리
from PIL import Image, ImageFile
from io import BytesIO
import socket
from PIL import Image
import pybase64
# 모델 관련 라이브러리
from PIL import Image
import tensorflow as tf
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing import image
import numpy as np
model = tf.keras.models.load_model('./model/ResNet50_Adadelta_Patience10.h5')
# DB 관련 라이브러리
# !pip3 install influxdb-client
# pip install tensorflow
import influxdb_client, os, time
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
#--------------------------------- 변수 설정 ---------------------------------
# 소켓 관련 변수
global buf
buf = b''
global data
global result
#모델 관련 변수
fish_weight = 0
fish_img = './fish.jpg'
img_size = 224
fish_id=0
small_cnt=0 # 치어 포획량
# DB 관련 변수 (커넥션 설정)
bucket="SeaProject"
org = "mint3024@daum.net"
token = "Q7-n7NN5Bf-1tTgpr2eOs6-hi6e7S7g8_z2vYR98KsQXM-1j75-ytnnSOue8dMm_cWSjMMGDzqXMTWTa0xU1NA=="
url = "https://europe-west1-1.gcp.cloud2.influxdata.com"
client = influxdb_client.InfluxDBClient(url=url, token=token, org=org)
write_api = client.write_api(write_options=SYNCHRONOUS) # 쓰기 API 구성 : 실시간 동기화
# --------------------------------- 소켓 함수 1 (클라이언트의 데이터 수신) ---------------------------------
# 클라이언트 측의 메시지를 받기 위한 함수
def _get_bytes_stream(sock, length):
global buf
global data
data = b''
# recv 함수에 할당된 버퍼 크기보다 클라이언트 메시지가 더 큰 경우를 대비
try:
step = length
while True:
# 클라이언트 측의 메시지 수신
data = sock.recv(step)
buf += data
# 빈문자열을 수신한다면 루프 종료
if data==b'':
break
# 메시지가 더 남아있다면 실행
elif len(buf) < length:
step = length - len(buf)
except Exception as e:
print(e)
return buf[:length]
#--------------------------------- 소켓 함수 2 ( Model에 서버를 오픈 ) ---------------------------------
def PC_server(HOST,PORT) :
server_HOST = HOST # hostname, ip address, 빈 문자열 ""이 될 수 있음
server_PORT = PORT # 클라이언트 접속을 대기하는 포트 번호. 1-65535 사이의 숫자 사용 가능
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 소켓 객체 생성. 주소체계: IPv4, 소켓타입: TCP 사용
# 포트 사용중이라 연결할 수 없다는 WinError 10048 에러 해결을 위해 필요
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 소켓을 특정 네트워크 인터페이스와 포트 번호에 연결하는데 사용
# 빈 문자열이면 모든 네트워크 인터페이스로부터의 접속을 허용
server_socket.bind((server_HOST, server_PORT))
# 서버가 클라이언트의 접속 허용
server_socket.listen()
# accept 함수에서 대기하다가 클라이언트가 접속하면 새로운 소켓을 리턴
client_socket, addr = server_socket.accept()
# 접속한 클라이언트의 주소
print('Connected by', addr)
_get_bytes_stream(client_socket,10000)
client_socket.close()
server_socket.close()
# -------------------------------- 소켓 함수 3 ( RaspberryPi => Model 데이터값 전송 ) ---------------------------------
def receive_data(data):
global fish_img
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
fish_weight=int.from_bytes(data[-2:],"little") # 생선 무게 받기
buf_new = data + bytes('=','utf-8') * (4-len(data) % 4) # 전송 받은 데이터에 문제 발생 방지를 위한 코드
img = Image.open(BytesIO(pybase64.b64decode(buf_new)))
img = img.convert('RGB')
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
img.save('fish.jpg',"JPEG") # 이미지 저장
fish_img = image.load_img(fish_img, target_size=(img_size,img_size)) #이미지로드
# -------------------------------- 소켓 함수 4 ( Model => RaspberryPi 데이터값 전송 ) ---------------------------------
def PC_client(HOST, PORT):
global result
client_HOST = HOST
client_PORT = PORT
fish_type = bytes(result[0],'utf-8')
fish_check = bytes(result[1],'utf-8')
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((client_HOST, client_PORT))
client_socket.send(fish_type)
client_socket.send(fish_check)
client_socket.close()
#--------------------------------- Model 함수 ( Input : 이미지, 무게 => Output: 어종, 치어여부 ) ---------------------------------
def AI_check(fish_img, fish_weight):
global result
#샘플이미지 전처리
model_fish_img = image.img_to_array(fish_img)
model_fish_img = np.expand_dims(fish_img, axis=0)
model_fish_img = preprocess_input(model_fish_img)
# Model 가동
result_img = model.predict(model_fish_img)
# 어종판별, 기준무게 설정
if np.argmax(result_img) == 0: # 감성돔
fish_type='BP'
standard_weight= 392
elif np.argmax(result_img) == 1: # 돌돔
fish_type= 'RB'
standard_weight= 331
elif np.argmax(result_img) == 2: # 참돔
fish_type='RS'
standard_weight= 210
# 치어판별, 치어갯수 추가
if fish_weight < standard_weight:
fish_check = 'small'
else:
fish_check = 'adult'
# 라즈베리파이로 전송되는 결과
result = [fish_type, fish_check] # 어종 / 치어여부
return result
#--------------------------------- DB 전송을 위한 데이터 가공 함수 ( Input : 어종, 치어여부 => Output: ID, 치어비율, 어종, 치어여부 ) ---------------------------------
def DB_preprocess(fish_type, fish_check):
global fish_id
global small_rate
global small_cnt
# 어종 한글 변환
if fish_type =='BP':
fish_type ='감성돔'
elif fish_type =='RB':
fish_type = '돌돔'
elif fish_type =='RS':
fish_type ='참돔'
# 치어여부 한글 변환
if fish_check =='adult':
fish_check ='성어'
elif fish_check =='small':
fish_check = '치어'
small_cnt+=1 # 치어 마리수 계산
fish_id += 1 # 물고기 마리수 계산
small_rate= (small_cnt/ fish_id)*100 # 물고기 비율 계산
result = [fish_id, small_rate, fish_type, fish_check] # 아이디, 치어 비율, 어종, 치어 여부
return result
#--------------------------------- DB 전송 함수 ---------------------------------
def send_to_DB(id, small_rate, fish_type, fish_check):
points = (
Point("어종4") # Point1: ID, 어종
.tag(key="id", value=id)
.field(fish_type, value=int(1)),
Point("치어여부4") # Point2: ID, 치어여부
.tag(key="id", value=id)
.field(fish_check, value=int(1)),
Point("치어비율4" ) # Point3: 치어비율
.field("치어_비율", value=small_rate)
)
write_api = client.write_api(write_options=SYNCHRONOUS) # 쓰기 API 구성 : 실시간 동기화
return points
def final ():
global buf
global fish_img
global fish_weight
PC_server('',9999) # Model 서버 Open
while True:
if not buf=='': # buf에 데이터값이 들어왔다면
receive_data(buf) # RaspberryPi에서 데이터값 수신
AI_result = AI_check(fish_img, fish_weight) # Model 가동
PC_client('192.168.1.44', 9999) # RaspberryPi에 Model 결과값 전송
DB_result= DB_preprocess(AI_result[0], AI_result[1]) # DB에 보낼 데이터값 가공
points = send_to_DB(DB_result[0], DB_result[1], DB_result[2], DB_result[3])
write_api.write(bucket=bucket, org=org, record=points) # DB에 데이터값 전송
buf='' # buf에 데이터값 삭제
break
# final() # 무한반복
# 이러면 특정 measurement
# client = InfluxDBClient(url=url, token=token, org=org)
# delete_api = client.delete_api()
# delete_api.delete('1970-01-01T00:00:00Z', '2022-11-11T00:00:00Z', '_measurement="여기다가 measurement 이름을 넣으세요"',bucket=bucket )
final() | MultiFinal/Fish_Project | PC.py | PC.py | py | 8,991 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "influxdb_client.InfluxDBClient",
"line_number": 55,
"usage_type": "call"
},
... |
10430336572 | from pathlib import Path
import argparse
import sys
import random
from lib.conll import CoNLLReader
def main():
parser = argparse.ArgumentParser(description="""Extract data based on comments info""")
parser.add_argument('input', help="conllu file")
parser.add_argument('output', help="target file", type=Path)
parser.add_argument('--input-format', choices=['conll2006', 'conll2006dense', 'conllu'], default="conllu")
parser.add_argument('--mapping', help="mapping file", required=True)
args = parser.parse_args()
lines=[line.strip() for line in open(args.mapping)]
mapping={}
for line in lines:
commentpart, target = line.split()
mapping[commentpart] = target
print("loaded mapping:", mapping, file=sys.stderr)
cio = CoNLLReader()
if args.input_format == "conllu":
orig_treebank = cio.read_conll_u(args.input)
elif args.input_format == "conll2006":
orig_treebank = cio.read_conll_2006(args.input)
elif args.input_format == "conll2006dense":
orig_treebank = cio.read_conll_2006_dense(args.input)
num_trees = len(orig_treebank)
print("Loaded treebank {} with {} sentences".format(args.input,num_trees), file=sys.stderr)
split = {mapping[k] : [] for k in mapping.keys()}
default = "various"
split[default] = []
for tree in orig_treebank:
found_mapping=False
for token in " ".join(tree.graph['comment']).strip().split():
if token in mapping:
split[mapping[token]].append(tree)
found_mapping=True
continue
if not found_mapping:
split[default].append(tree)
for key in split:
print(key, len(split[key]), file=sys.stderr)
cio.write_conll(split[key], Path(args.output.name + "_" + key), "conll2006")
#sample = orig_treebank[0:args.k]
#print("sampled {} trees. seed: {}".format(len(sample), args.seed))
#cio.write_conll(sample, args.output, "conll2006")
if __name__ == "__main__":
main()
| coastalcph/ud-conversion-tools | extract.py | extract.py | py | 2,049 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sys.stderr",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "lib.conll.CoNLLRe... |
74740608828 | from tkinter import *
from PIL import Image,ImageTk
from tkinter import messagebox
import pymysql
def bookRegister():
## When the user clicks the submit button this bookRegister function is run
# BookInfos are stored in these variables.
# and then these are uploaded to the database using the cursor method of pymysql
#
bookid = bookInfo1.get()
title = bookInfo2.get()
title = title.upper()
author = bookInfo3.get()
author = author.upper()
status = bookInfo4
insertBook = "insert into "+bookTable+" values('"+bookid+"','"+title+"','"+author+"','"+status+"')"
print(insertBook) ### debug purpose
try:
cur.execute(insertBook)
con.commit()
messagebox.showinfo("Success","Added the book successfully")
except:
messagebox.showinfo("Error","Cant add to Database, errors occurred")
print(bookid)
print(title)
print(author) ###### debug purposes
print(status)
root.destroy()
def addBook():
global bookInfo1,bookInfo2,bookInfo3,bookInfo4,Canvas1,con,cur,bookTable,root
root = Tk() ##this creates a gui window
root.title("Library")
root.minsize(width=400,height=400)
root.geometry("600x500")
# Add your own database name and password here to reflect in the code
mypass = "abc_123"
mydatabase="library_db"
con = pymysql.connect(host="localhost",user="librarian",password=mypass,database=mydatabase)
cur = con.cursor()
# Enter Table Names here
bookTable = "books" # Book Table
Canvas1 = Canvas(root)
Canvas1.config(bg="#ff6e40")
Canvas1.pack(expand=True,fill=BOTH)
headingFrame1 = Frame(root,bg="#050300",bd=5)
headingFrame1.place(relx=0.25,rely=0.1,relwidth=0.5,relheight=0.13)
headingLabel = Label(headingFrame1, text="Add Books", bg='yellow', fg='blue', font=('Courier',15))
headingLabel.place(relx=0,rely=0, relwidth=1, relheight=1)
labelFrame = Frame(root,bg='black')
labelFrame.place(relx=0.1,rely=0.4,relwidth=0.8,relheight=0.4)
# Book ID
lb1 = Label(labelFrame,text="Book ID : ", bg='black', fg='white')
lb1.place(relx=0.05,rely=0.2, relheight=0.08)
bookInfo1 = Entry(labelFrame) ## creates the text entry box
bookInfo1.place(relx=0.3,rely=0.2, relwidth=0.62, relheight=0.08)
# Title
lb2 = Label(labelFrame,text="Title : ", bg='black', fg='white')
lb2.place(relx=0.05,rely=0.35, relheight=0.08)
bookInfo2 = Entry(labelFrame)
bookInfo2.place(relx=0.3,rely=0.35, relwidth=0.62, relheight=0.08)
# Book Author
lb3 = Label(labelFrame,text="Author : ", bg='black', fg='white')
lb3.place(relx=0.05,rely=0.50, relheight=0.08)
bookInfo3 = Entry(labelFrame)
bookInfo3.place(relx=0.3,rely=0.50, relwidth=0.62, relheight=0.08)
bookInfo4 = 'avail'
#Submit Button
SubmitBtn = Button(root,text="SUBMIT",bg='#d1ccc0', fg='red',command=bookRegister)
SubmitBtn.place(relx=0.28,rely=0.9, relwidth=0.18,relheight=0.08)
quitBtn = Button(root,text="Quit",bg='#f7f1e3', fg='red', command=root.destroy)
quitBtn.place(relx=0.53,rely=0.9, relwidth=0.18,relheight=0.08)
root.mainloop() | DarkCodeOrg/library_management_system | AddBook.py | AddBook.py | py | 3,270 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 30,
"usage_type": "call"
},
{
"api_na... |
30906484831 | from reportlab.platypus import (SimpleDocTemplate, Paragraph, PageBreak, Image, Spacer, Table, TableStyle)
from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
from reportlab.pdfgen import canvas
from reportlab.graphics.shapes import Line, LineShape, Drawing
from reportlab.lib.pagesizes import LETTER, inch
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.lib.colors import Color
from scripts.python.pdfGen.table_generation import table_handler
from datetime import datetime
pdfmetrics.registerFont(TTFont('Poppins-Bold', '/home/eggzo/airflow/scripts/python/pdfGen/fonts/Poppins-Bold.ttf'))
pdfmetrics.registerFont(TTFont('Poppins-Light', '/home/eggzo/airflow/scripts/python/pdfGen/fonts/Poppins-Light.ttf'))
pdfmetrics.registerFont(TTFont('Poppins-Medium', '/home/eggzo/airflow/scripts/python/pdfGen/fonts/Poppins-Medium.ttf'))
pdfmetrics.registerFont(TTFont('Poppins-Regular', '/home/eggzo/airflow/scripts/python/pdfGen/fonts/Poppins-Regular.ttf'))
pdfmetrics.registerFont(TTFont('Poppins-SemiBold', '/home/eggzo/airflow/scripts/python/pdfGen/fonts/Poppins-SemiBold.ttf'))
class page_format_handler(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self.pages = []
self.width, self.height = LETTER
def showPage(self):
self.pages.append(dict(self.__dict__))
self._startPage()
def save(self):
page_count = len(self.pages)
for page in self.pages:
self.__dict__.update(page)
if (self._pageNumber > 1):
self.draw_canvas(page_count)
elif (self._pageNumber <= 1):
self.draw_front_page()
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
# Function to handle header and footer sans front page
def draw_canvas(self, page_count):
page = "Page %s of %s" % (self._pageNumber, page_count)
copyright = f'© 2022 - {datetime.today().year} Holmly Ltd. All Rights Reserved'
x = 128
self.saveState()
self.setStrokeColorRGB(0, 0, 0)
self.setLineWidth(0.5)
self.setFont('Poppins-Bold', 16)
self.drawImage("/home/eggzo/airflow/scripts/python/pdfGen/sps_logo.png", self.width - inch * 9 + 30, self.height - 45, width=100, height=35,
preserveAspectRatio=True, mask='auto')
self.drawString(66, 755, "Scottish Property Sourcing")
self.line(30, 740, LETTER[0] - 50, 740)
self.line(66, 78, LETTER[0] - 66, 78)
self.setFont('Poppins-Light', 10)
self.drawString(LETTER[0] - x, 65, page)
self.drawString(66, 65, copyright)
self.restoreState()
# Function to handle formatting for the front page
def draw_front_page(self):
self.saveState()
self.setFont('Poppins-Light', 10)
self.drawImage("/home/eggzo/airflow/scripts/python/pdfGen/sps_logo.png", inch * 4 - 20, -inch * 0.3, width=700, height=700,
preserveAspectRatio=True, mask='auto')
self.restoreState()
class colour_handler():
def __init__(self):
self.colour_theme = {}
def add_colour(self, colour_name, r, g, b, alpha_val):
self.colour_theme[colour_name] = Color((r / 255), (g / 255), (b / 255), alpha=alpha_val)
def gc(self, colour_name):
return(self.colour_theme[colour_name])
def front_page(elements):
title_style = ParagraphStyle('title', fontName='Poppins-Bold', fontSize=70, leading=72,
alignment=TA_LEFT, leftIndent=0)
subtitle_style = ParagraphStyle('title', fontName='Poppins-SemiBold', fontSize=36, leading=72,
alignment=TA_LEFT, leftIndent=0)
summary_style = ParagraphStyle('summary', fontName='Poppins-Light', fontSize=12, leading=20, justifyBreaks=1,
alignment=TA_LEFT, justifyLastLine=1)
title_text = 'Scottish Property Sourcing'
subtitle_text = 'Daily Report'
summary_text = f"""
Report Type: Top Properties For Sale<br/>
Publication Date: {datetime.today().strftime("%b %d %Y")}<br/>
"""
title = Paragraph(title_text, title_style)
elements.append(title)
spacer = Spacer(10, 280)
elements.append(spacer)
subtitle = Paragraph(subtitle_text, subtitle_style)
elements.append(subtitle)
spacer = Spacer(10, 10)
elements.append(spacer)
paragraph_report_summary = Paragraph(summary_text, summary_style)
elements.append(paragraph_report_summary)
elements.append(PageBreak())
return elements
def information_page(elements, colours, title, description, input_dataframe):
page_title_style = ParagraphStyle('Hed0', fontSize=16, alignment=TA_LEFT, borderWidth=3,
textColor=colours.gc('colorGreen0'))
normal_style = ParagraphStyle('summary', fontName='Poppins-Light', fontSize=12, leading=20, justifyBreaks=1,
alignment=TA_LEFT, justifyLastLine=1)
page_title = Paragraph(title, page_title_style)
page_description = Paragraph(description, normal_style)
elements.append(page_title)
spacer = Spacer(10, 10)
elements.append(spacer)
d = Drawing(500, 1)
line = Line(-15, 0, 483, 0)
line.strokeColor = colours.gc('colorBlue0')
line.strokeWidth = 2
d.add(line)
elements.append(d)
spacer = Spacer(10, 1)
elements.append(spacer)
d = Drawing(500, 1)
line = Line(-15, 0, 483, 0)
line.strokeColor = colours.gc('colorBlue0')
line.strokeWidth = 0.5
d.add(line)
elements.append(d)
spacer = Spacer(10, 10)
elements.append(spacer)
elements.append(page_description)
elements = table_handler(elements, input_dataframe, colours)
return elements
| GregorMonsonFD/holmly_sourcing_legacy | scripts/python/pdfGen/page_format_handler.py | page_format_handler.py | py | 5,949 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "reportlab.pdfbase.pdfmetrics.registerFont",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "reportlab.pdfbase.pdfmetrics",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "reportlab.pdfbase.ttfonts.TTFont",
"line_number": 13,
"usage_type"... |
30804251376 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
driver = webdriver.PhantomJS()
driver.get('http://fund.eastmoney.com/fund.html')
page_text = driver.find_element_by_id('pager').find_element_by_xpath('span[@class="nv"]').text
page_count = ''.join(filter(str.isdigit, page_text))
# 循环获取页面数据
def get_data(start, end):
for x in range(start, end+1):
tonum = driver.find_element_by_id('tonum') # 获取文本框
btn_jump = driver.find_element_by_id('btn_jump') # 获取点击按钮
tonum.clear()
tonum.send_keys(str(x))
btn_jump.click()
WebDriverWait(driver, 20).until(lambda driver:driver.find_element_by_id('pager').find_element_by_xpath('span[@value="{0}" and @class != "end page"]'.format(x))\
.get_attribute("class").find("at") != -1)
with open('./htmls/page_{0}.txt'.format(x), 'wb') as f:
f.write(driver.find_element_by_id('tableDiv').get_attribute('innerHTML').encode('utf-8'))
f.close()
get_data(1, 5)
| bobchi/learn_py | 14.py | 14.py | py | 1,125 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.PhantomJS",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 21,
"usage_type": "call"
}
] |
3847347050 | import matplotlib
matplotlib.use('Agg')
import numpy as np
import tinyarray
import matplotlib.pyplot as plt
from scipy.sparse import spdiags
from scipy.sparse import eye
from scipy.sparse import kron
from scipy.sparse.linalg import inv
from scipy.sparse import csr_matrix
import adaptive
from functools import partial
from scipy.interpolate import griddata
import sys
from mpi4py.futures import MPIPoolExecutor
import argparse
s0 = tinyarray.array([[1, 0], [0, 1]]);
sx = tinyarray.array([[0, 1], [1, 0]]);
sy = tinyarray.array([[0, -1j], [1j, 0]]);
sz = tinyarray.array([[1, 0], [0, -1]]);
def hdis(a,mu,delta,vz,alpha_R,dim,vimp):
t=25/a**2
alpha=alpha_R/(2*a)
band11sm=spdiags(np.vstack([np.ones(dim),np.ones(dim)]),np.array([-1,1]),dim,dim,format = 'csr')
band1m1sm=spdiags(np.vstack([np.ones(dim),-np.ones(dim)]),np.array([-1,1]),dim,dim,format = 'csr')
eyesm=eye(dim)
mulist=mu*np.ones(dim)-vimp
diagmulist=spdiags(mulist,0,dim,dim)
return kron(sz,(kron(eye(2),-t*band11sm+(2*t)*eyesm-diagmulist)+kron(sy,1j*alpha*band1m1sm)))\
+kron(eye(2),kron(sz,vz*eyesm))+kron(sx,kron(eye(2),delta*eyesm))
def ldosall_dis(a,mu,Delta,Vz,alpha_R,mulist,dim,omega,delta):
ham=hdis(a,mu,Delta,Vz,alpha_R,dim,mulist);
hh=csr_matrix((omega+1j*delta)*eye(4*dim)-ham)
G=inv(hh)
Gdiag=(G).diagonal()
return -np.sum((np.reshape(Gdiag,(4,-1))),0).imag/np.pi
def LDOS_dis(p,a,mu,Delta,alpha_R,mulist,dim,delta):
Vz,energy=p
z=ldosall_dis(a,mu,Delta,Vz,alpha_R,mulist,dim,energy,delta)
return np.array([z.mean(),z[0],z[int(dim/2)],z[-1]])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--loss', default=0.1)
parser.add_argument('--dim', default=100)
parser.add_argument('--mu', default=1)
parser.add_argument('--Delta', default=0.2)
parser.add_argument('--alpha_R', default=5)
parser.add_argument('--muVar', default=0)
parser.add_argument('--mulist', default=0)
parser.add_argument('--NmuVar', default=0)
parser.add_argument('--Vzmax', default=2.048)
parser.add_argument('--Vbiasmax', default=0.3)
args = parser.parse_args();
print("loss = %s" % args.loss)
print("dim = %s" % args.dim)
print("mu = %s" % args.mu)
print("Delta = %s" % args.Delta)
print("alpha_R = %s" % args.alpha_R)
print("muVar = %s" % args.muVar)
print("mulist = %s" % args.mulist)
print("NmuVar = %s" % args.NmuVar)
print("Vzmax = %s" % args.Vzmax)
print("Vbiasmax = %s" % args.Vbiasmax)
loss=float(args.loss)
dim=int(args.dim)
mu=float(args.mu)
Delta=float(args.Delta)
alpha_R=float(args.alpha_R)
muVar=float(args.muVar)
NmuVar=float(args.NmuVar)
Vzmax=float(args.Vzmax)
Vbiasmax=float(args.Vbiasmax)
if isinstance(args.mulist,str):
muVarfn=args.mulist
print('Use disorder file:',muVarfn)
try:
mulist=np.loadtxt(muVarfn)
except:
print('Cannot find disorder file: ',muVarfn)
elif muVar!=0:
mulist=np.random.normal(0,muVar,int(NmuVar))
mulist=[mulist.flatten()[int(NmuVar/dim*x)] for x in range(dim)]
else:
mulist=args.mulist
fn='loss'+str(loss)+'m'+str(mu)+'D'+str(Delta)+'muVar'+str(muVar)+'L'+str(dim)
fname=fn+'.sav'
learner = adaptive.Learner2D(partial(LDOS_dis,a=1,mu=mu,Delta=Delta,alpha_R=alpha_R,mulist=mulist,dim=dim,delta=1e-3),\
bounds=[(0., Vzmax), (-Vbiasmax, Vbiasmax)])
learner.load(fname)
runner = adaptive.Runner(learner, executor=MPIPoolExecutor(),shutdown_executor=True,\
goal=lambda l: l.loss() < loss)
runner.start_periodic_saving(dict(fname=fname), interval=600)
runner.ioloop.run_until_complete(runner.task)
learner.save(fname)
dd=np.array(list(learner.data.items()))
dz=dd[:,1]
dx=np.empty(dd.shape[0])
dy=np.empty(dd.shape[0])
for i in range(dd.shape[0]):
dx[i],dy[i]=dd[i,0]
dz=np.vstack(dz)
dxx, dyy = np.meshgrid(np.linspace(0,Vzmax,401),np.linspace(-Vbiasmax,Vbiasmax,401))
dzz0 = griddata((dx,dy),dz[:,0],(dxx,dyy), method='linear')
dzz1 = griddata((dx,dy),dz[:,1],(dxx,dyy), method='linear')
dzz2 = griddata((dx,dy),dz[:,2],(dxx,dyy), method='linear')
dzz3 = griddata((dx,dy),dz[:,3],(dxx,dyy), method='linear')
fig,ax=plt.subplots()
ax.pcolormesh(dxx,dyy,dzz0)
fig.savefig(fn+'_DOS.png')
fig,ax=plt.subplots()
ax.pcolormesh(dxx,dyy,dzz1)
fig.savefig(fn+'_LDOS_L.png')
fig,ax=plt.subplots()
ax.pcolormesh(dxx,dyy,dzz2)
fig.savefig(fn+'_LDOS_M.png')
fig,ax=plt.subplots()
ax.pcolormesh(dxx,dyy,dzz3)
fig.savefig(fn+'_LDOS_R.png')
np.savetxt(fn+'_Vz.dat',dxx)
np.savetxt(fn+'_Vbias.dat',dyy)
np.savetxt(fn+'_DOS.dat',dzz0)
np.savetxt(fn+'_LDOS_L.dat',dzz1)
np.savetxt(fn+'_LDOS_M.dat',dzz2)
np.savetxt(fn+'_LDOS_R.dat',dzz3)
scatterpts=np.vstack([dx,dy,dz.T]).T
np.savetxt(fn+'_s.dat',scatterpts)
if muVar!=0:
np.savetxt(fn+'_randlist.dat',mulist)
if __name__=="__main__":
main()
| hainingpan/nanowire_matlab | Ldos_dis.py | Ldos_dis.py | py | 5,110 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "tinyarray.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tinyarray.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tinyarray.array",
"... |
3029439171 | #!/usr/bin/env python3
import os
import csv
import sys
import matplotlib.pyplot as plt
# Get theta values from file (if it exists)
def readTheta (thetaFile):
theta0 = 0
theta1 = 0
dataFile = ""
if os.path.isfile(thetaFile):
with open(thetaFile, newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
if row[0] == 'dataFile':
dataFile = row[1]
if (row[0] == 'theta0'):
try:
theta0 = float(row[1])
except ValueError:
print(filename, "is invalid.")
sys.exit()
if (row[0] == 'theta1'):
try:
theta1 = float(row[1])
except ValueError:
print(filename, "is invalid.")
sys.exit()
return theta0, theta1, dataFile
# Reading data CSV for x and y values
def readData (dataFile):
x = []
y = []
if not os.path.isfile(dataFile):
print("Error : data file doesn't exist")
sys.exit()
with open(dataFile, newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
try:
x.append(int(row[0]))
y.append(int(row[1]))
except ValueError:
pass
return x, y
# Loop until user supplies mileage value
def getMileage ():
waiting_for_input = True
while (waiting_for_input):
mileage = input("Type a mileage to see the estimated price : ")
try:
mileage = int(mileage)
waiting_for_input = False
except ValueError:
print("Please enter an integer !\n")
return mileage
def displayEstimate (x, y, theta0, theta1, mileage):
price = theta0 + (theta1 * mileage)
plt.title('Relationship between a car mileage and its price', fontdict = {'family':'serif','color':'black','size':16})
plt.xlabel('Mileage in km', fontdict = {'family':'serif','color':'green','size':13})
plt.ylabel('Price in $', fontdict = {'family':'serif','color':'green','size':13})
plt.plot([min(x), max(x)], [theta0 + theta1 * min(x), theta0 + theta1 * max(x)], color='C1', label="f(x) = {0}*x + {1}".format(round(theta1, 2), round(theta0, 2)))
plt.plot(x, y, 'o', color='C0')
plt.stem([mileage], [price], bottom=(theta0 + theta1 * max(x)), orientation='vertical', linefmt='--C2', markerfmt='oC2')
plt.stem([price], [mileage], bottom=min(x), orientation='horizontal', linefmt='--C2', markerfmt='oC2')
plt.legend()
plt.show()
##########################################################################
################## MAIN ################
##########################################################################
# Theta file
thetaFile = './theta.csv'
# Get data
theta0, theta1, dataFile = readTheta(thetaFile)
mileage = getMileage()
# Output estimation based on theta values
print("\nBased on current predictions, a car with a mileage of", mileage, "kilometers would be worth :")
print("$", int(theta0 + (theta1 * mileage)))
if (theta0 == 0 and theta1 == 0):
print("\n(Without a trained model, estimating won't get us far...)")
if len(sys.argv) == 2 and sys.argv[1] == '--visualize':
x, y = readData(dataFile)
displayEstimate(x, y, theta0, theta1, mileage) | cclaude42/ft_linear_regression | estimate.py | estimate.py | py | 3,472 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.isfile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": ... |
30571932153 | import asyncio
import datetime
from queue import PriorityQueue
import validators
import youtube_dl
class Player:
queue = asyncio.Queue()
# queue = PriorityQueue()
play_next_song = asyncio.Event()
next_song = None
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': False,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0',
'verbose': True,
'skip_download': True
}
ffmpeg_options = {
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
def __init__(self, discord, client):
self.discord = discord
self.client = client
async def add_song_to_queue(self, ctx, url, voice_client):
is_valid_url = validators.url(url)
if is_valid_url:
info = self.ytdl.extract_info(url, download=False)
else:
info = self.ytdl.extract_info('ytsearch:{0}'.format(url), download=False)['entries'][0]
audio = self.discord.FFmpegPCMAudio(info['url'], **self.ffmpeg_options)
if self.queue.empty():
self.next_song = info['title']
await self.queue.put({
'audio': audio,
'info': info,
'ctx': ctx,
'voice_client': voice_client
})
return info
async def player_queue_task(self):
while True:
self.play_next_song.clear()
current = await self.queue.get()
if self.queue.empty():
self.next_song = None
await self.play_song(current['ctx'], current['info'], current['voice_client'], current['audio'])
await self.play_next_song.wait()
async def play_song(self, ctx, info, voice_client, audio):
async with ctx.typing():
embed = self.discord.Embed(title=':notes: Teraz leci ta piosenka :notes:',
colour=self.discord.Color.green(),
description='```css\n{0}\n```'.format(info['title']),
url=info['webpage_url'])
embed.set_image(url=info['thumbnail'])
embed.add_field(name='Czas trwania:', value=datetime.timedelta(seconds=info['duration']), inline=True)
embed.add_field(name='Dodał:', value='<@{0}>'.format(ctx.message.author.id), inline=True)
embed.add_field(name='Następne w kolejce:', value=self.get_next_song())
await ctx.send(embed=embed)
voice_client.play(audio, after=self.toggle_next)
def get_next_song(self):
if not self.next_song:
return 'Kolejka jest pusta'
else:
return self.next_song
def toggle_next(self, idk_why_i_need_this):
self.client.loop.call_soon_threadsafe(self.play_next_song.set)
| MKA01/grajdelko | pl/savera/grajdelko/player/Player.py | Player.py | py | 3,155 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "asyncio.Queue",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "asyncio.Event",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "youtube_dl.utils",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "youtube_dl.YoutubeD... |
74131023869 | #!/usr/bin/python
# Ce programme envoie la chaine 12345678 vers TTN
# Importer les bibliothèques
import serial
import time
# Définition des flags
is_join = False # On peut joindre la carte
is_exist = False # La carte Grove LoRa E5 a été détectée
# Définition du timeout
read_timeout = 0.2
# Créer l'instance pour gérer la carte via le port série
lora = serial.Serial(
port='/dev/serial0',
baudrate=9600,
bytesize=8,
parity='N',
timeout=1,
stopbits=1,
xonxoff=False,
rtscts=False,
dsrdtr=False
)
def envoi_test_reponse(chaine_verif, timeout_ms, commande):
startMillis = int(round(time.time() * 1000))
# Tester si la chaine à vérifier existe
if chaine_verif == "":
return 0
# Envoyer la commande
fin_ligne = "\r\n"
cmd = "%s%s" % (commande, fin_ligne)
print ("Commande envoyée = ",cmd)
lora.write(cmd.encode())
# Attendre la réponse
reponse = ""
quantity = lora.in_waiting
# Lire la réponse de la carte jusqu'au timeout
while int(round(time.time() * 1000)) - startMillis < timeout_ms:
# Si on a des données
if quantity > 0:
# Les ajouter à la chaine réponse
reponse += lora.read(quantity).decode('utf-8')
print("Reponse1 de la carte : ", reponse)
else:
# Sinon attendre un moment
time.sleep(read_timeout)
# Regarder si on a des données reçues
quantity = lora.in_waiting
print("Reponse de la carte : ", reponse)
# Tester si la chaine attendue existe
if chaine_verif in reponse :
print("La chaine réponse existe", reponse)
return 1
else:
return 0
# Configuration de la carte
if envoi_test_reponse("+AT: OK", 1000,"AT"):
# La carte a été détectée = passer à True
is_exist = True
# Configurer la carte
envoi_test_reponse("+ID: AppEui", 1000,"AT+ID")
envoi_test_reponse("+MODE: LWOTAA", 1000,"AT+MODE=LWOTAA")
envoi_test_reponse("+DR: EU868", 1000,"AT+DR=EU868")
envoi_test_reponse("+CH: NUM", 1000, "AT+CH=NUM,0-2")
envoi_test_reponse("+KEY: APPKEY", 1000, "AT+KEY=APPKEY,\"6224041874374E3E85B93F635AB5E774\"")
envoi_test_reponse("+CLASS: C", 1000, "AT+CLASS=A")
envoi_test_reponse("+PORT: 8", 1000, "AT+PORT=8")
# Joindre le réseau
envoi_test_reponse("+JOIN: Network joined", 12000, "AT+JOIN")
# Envoi d'une chaine de data pour test
data = "AT+CMSGHEX=\"12345678\""
envoi_test_reponse("Done", 5000, data)
| framboise314/Seeedstudio-Grove-E5-LoRa | programmes/lora-E5.py | lora-E5.py | py | 2,569 | python | fr | code | 3 | github-code | 6 | [
{
"api_name": "serial.Serial",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 54,... |
18100621514 | """
1971. Find if Path Exists in Graph
https://leetcode.com/problems/find-if-path-exists-in-graph/
"""
from typing import List, Tuple
from unittest import TestCase, main
class UnionFind:
def __init__(self, n: int) -> None:
self.root = list(range(n))
def find(self, a: int) -> int:
"""Returns the root value"""
if self.root[a] != a:
self.root[a] = self.find(self.root[a])
# Now self.root[a] points to the current root edge
return self.root[a]
def union(self, a: int, b: int):
"""Unoins two nodes"""
# Find the root of A and B
root_a, root_b = self.find(a), self.find(b)
# Always make root A smaller than root B
if root_b < root_a:
root_a, root_b = root_b, root_a
# Update root of B to point the root of A
# so that the two groups are now connected
self.root[root_b] = root_a
class Solution:
def validPath(
self, n: int, edges: List[List[int]], source: int, destination: int
) -> bool:
uf = UnionFind(n)
# Union
for a, b in edges:
uf.union(a, b)
# Find
return uf.find(source) == uf.find(destination)
class Test(TestCase):
data: List[Tuple[int, List[List[int]], int, int, bool]] = [
(
10,
[
[0, 7],
[0, 8],
[6, 1],
[2, 0],
[0, 4],
[5, 8],
[4, 7],
[1, 3],
[3, 5],
[6, 5],
],
7,
5,
True,
),
(3, [[0, 1], [1, 2], [2, 0]], 0, 2, True),
(6, [[0, 1], [0, 2], [3, 5], [5, 4], [4, 3]], 0, 5, False),
]
def test_solution(self):
s = Solution()
for n, edges, source, destination, expected in self.data:
self.assertEqual(s.validPath(n, edges, source, destination), expected)
if __name__ == "__main__":
main()
| hirotake111/leetcode_diary | leetcode/1971/solution.py | solution.py | py | 2,023 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_nu... |
30324581341 | import os, sys, re, pickle, json
import numpy as np
import cv2
import pandas as pd
def get_seq(seq_dir, seq_name):
seq_file = seq_dir + "/" + seq_name + ".pkl"
seq = pickle.load(open(seq_file, "rb"), encoding='latin1')
return seq
def get_3dkeypoints(seq, frame_id, model_id):
"""
SMPL joints
0: 'pelvis',
1: 'left_hip',
2: 'right_hip',
3: 'spine1',
4: 'left_knee',
5: 'right_knee',
6: 'spine2',
7: 'left_ankle',
8: 'right_ankle',
9: 'spine3',
10: 'left_foot',
11: 'right_foot',
12: 'neck',
13: 'left_collar',
14: 'right_collar',
15: 'head',
16: 'left_shoulder',
17: 'right_shoulder',
18: 'left_elbow',
19: 'right_elbow',
20: 'left_wrist',
21: 'right_wrist',
22: 'left_hand',
23: 'right_hand'
:param seq:
:param frame_id:
:param model_id:
:return:
"""
_3d_keypoints = seq["jointPositions"][model_id][frame_id]
_3d_keypoints = _3d_keypoints.reshape(-1, 3)
return _3d_keypoints
def get_cam_params(seq, frame_id):
intrinsic = seq["cam_intrinsics"]
extrinsic = seq["cam_poses"][frame_id]
R = extrinsic[:3,:3]
t = extrinsic[:-1, -1]
t = np.expand_dims(t, axis=1)
return intrinsic, R, t, extrinsic
def estimate_from_3d(seq, frame_id, model_id):
keypoints3d = get_3dkeypoints(seq, frame_id, model_id)
intrinsic, R, t, extrinsic = get_cam_params(seq, frame_id)
estimated_keypoints_2d, _ = cv2.projectPoints(keypoints3d, R, t, intrinsic, None)
estimated_keypoints_2d = np.squeeze(estimated_keypoints_2d, axis=1)
return estimated_keypoints_2d
def approximate_bb(keypoints):
x_offset = 30
y_offset_max = 80
y_offset_min = 130
xs = keypoints.T[0]
ys = keypoints.T[1]
x_min = int(xs.min()) - x_offset
x_max = int(xs.max()) + x_offset
y_min = int(ys.min()) - y_offset_min
y_max = int(ys.max()) + y_offset_max
top_left = [x_min, y_min]
bottom_right = [x_max, y_max]
return top_left, bottom_right
def smpl2coco(smpl_pose):
"""
smpl_format = ["pelvis", "left_hip", "right_hip", "lower_spine", "left_knee", "right_knee", # 0-5
"middle_spine", "left_ankle", "right_ankle", "upper_spine", "left_foot", "right_foot", # 6-11
"neck", "left_collar", "right_collar", "head", "left_shoulder", "right_shoulder", # 12-17
"left_elbow", "right_elbow", "left_wrist", "right_wrist", "left_hand", "right_hand"] # 18-23
coco_format = ['right_shoulder', 'right_elbow', 'right_wrist', 'left_shoulder',
'left_elbow', 'left_wrist', 'right_hip', 'right_knee', 'right_ankle',
'left_hip', 'left_knee', 'left_ankle', 'head', 'neck', 'right_ear',
'left_ear', 'nose', 'right_eye', 'left_eye']
"""
offset = 0
num_models = len(smpl_pose)
coco_poses = np.zeros((num_models, 19, 2))
#(smpl, coco)
common_joints = [(1, 9), (2, 6), (4, 10), (5, 7), (7, 11), (8, 8), (12, 13), (15, 12), (16, 3), (17, 0), (18, 4), (19, 1), (20, 5), (21, 2)]
for model_id in range(num_models):
for (smpl_joint, coco_joint) in common_joints:
coco_poses[model_id][coco_joint] = smpl_pose[model_id][smpl_joint]
coco_poses[model_id][14] = coco_poses[model_id][12] + offset # right_ear = head
coco_poses[model_id][15] = coco_poses[model_id][12] + offset # left_ear = head
coco_poses[model_id][16] = coco_poses[model_id][12] + offset # nose = head
coco_poses[model_id][17] = coco_poses[model_id][12] + offset # right_eye = head
coco_poses[model_id][18] = coco_poses[model_id][12] + offset # left_eye = head
return coco_poses
def dump_sorted(filename_list, index_list, occ_status, subset_name, scene_name="courtyard_basketball_00", folder_name = "./3dpw/selected_frames"):
selected = zip(filename_list, index_list, occ_status)
selected_sorted = sorted(selected, key=lambda x: x[1], reverse=True) # sort by occlusion value in descending order
os.makedirs(folder_name + "/" + scene_name, exist_ok=True)
with open(folder_name + "/" + scene_name + "/" + subset_name+".txt", "w+") as dump_file :
for result in selected_sorted:
dump_file.write(
"{} {} #{}\n".format(result[0], result[1], result[2])
) | egirgin/occlusionIndex | 3dpw/src/utils.py | utils.py | py | 4,338 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pickle.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "cv2.projectPoints",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"l... |
4669064041 | from Bio.Seq import Seq
with open('rosalind_ba1b.txt') as file:
text = file.readline().rstrip()
k = int(file.readline().rstrip())
def get_pattern_count_dict(text, length=3):
pattern_dict = {}
seq = Seq(text)
for i in range(len(text) - length + 1):
pattern = text[i:i + length]
if pattern in pattern_dict:
continue
count = seq.count_overlap(pattern)
pattern_dict[pattern] = count
return pattern_dict
def get_most_freq_patterns(text, length=3):
dict = get_pattern_count_dict(text, length)
dict = sorted(dict.items(), key=lambda x: x[1], reverse=True)
most_frequent_k_mers = [dict[0][0]]
for i in range(1, len(dict)):
if dict[i][1] != dict[i - 1][1]:
break
most_frequent_k_mers.append(dict[i][0])
return most_frequent_k_mers
k_mers = get_most_freq_patterns(text, k)
output = ' '.join(k_mers)
print(output)
with open('output.txt', 'w') as file:
file.write(output) | Partha-Sarker/Rosalind-Problems | Lab Assignment - 1/chapter 1/ba1b Find the Most Frequent Words in a String.py | ba1b Find the Most Frequent Words in a String.py | py | 987 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "Bio.Seq.Seq",
"line_number": 10,
"usage_type": "call"
}
] |
7485160594 | from datetime import datetime
from os.path import basename
from types import SimpleNamespace
import math
import numpy as np
__version__ = "2020.10.06"
def solve(length, supports, loads, EI, GA, top, bottom, shear): # {{{
"""Solve the beam problem.
Arguments:
length: The length of the beam in mm. This will be rounded to
an integer value.
supports: Either None or a 2-tuple of numbers between 0 and length.
If None, the beam will be assumed to be clamped at the origin.
loads: Either a Load or an iterable of Loads.
EI: An iterable of size length+1 or a float containing the bending
stiffenss in every mm of the cross-section of the beam.
GA: An iterable of size length+1 or a float containing the shear
stiffenss in every mm of the cross-section of the beam.
top: An iterable of size length+1 or a float containing the height
above the neutral line in every mm of the cross-section of the beam.
bottom: An iterable of size length+1 or a float containing the height
under the neutral line in every mm of the cross-section of the beam.
shear: A boolean indication if shear deformations should be
included. Will be added and set to 'True' if not provided.
Returns:
This function returns a types.SimpleNamespace with following items:
* D: A numpy array containing the shear force in the cross-section
at each mm of the beam.
* M: A numpy array containing the bending moment in the cross-section
at each mm of the beam.
* dy: A numpy array containing the deflection angle at each mm
of the beam.
* y: A numpy array containing the vertical displacement at each mm
of the beam.
* a: A numpy array containing angle between the tangent line of the beam
and the x-axis in radians at each mm of the beam.
* etop: A numpy array containing the strain at the top of the
cross-section at each mm of the beam.
* ebot: A numpy array containing the strain at the bottom of the
cross-section at each mm of the beam.
* R: If 'supports' was provided, R is a 2-tuple of the reaction
forces at said supports. Else R[0] is the reaction force at the
clamped x=0 and R[1] is the reaction moment at that point.
* length: Length in mm.
"""
length, s1, s2 = _check_length_supports(length, supports)
loads = _check_loads(loads)
loads = [ld for ld in loads] # make a copy since we modifiy it!
EI, GA, top, bot = _check_arrays(length, EI, GA, top, bottom)
if shear not in (True, False):
raise ValueError("shear should be a boolean")
# Calculate support loads.
moment = sum([ld.moment(s1) for ld in loads])
if s2:
R2 = Load(force=-moment / (s2 - s1), pos=s2)
loads.append(R2)
else: # clamped at x = 0
R2 = -moment
# Force equilibrium
R1 = Load(force=-sum([ld.size for ld in loads]), pos=s1)
loads.append(R1)
# Calculate shear force
D = np.sum(np.array([ld.shear(length) for ld in loads]), axis=0)
# Calculate bending moment
M = np.cumsum(D)
Mstep = np.sum(
np.array(
[ld.moment_array(length) for ld in loads if isinstance(ld, MomentLoad)]
),
axis=0,
)
M += Mstep
if s2 is None:
M -= M[-1]
ddy_b = M / EI
etop, ebot = -top * ddy_b, -bot * ddy_b
dy = np.cumsum(ddy_b)
if shear:
dy += -1.5 * D / GA # shear
y = np.cumsum(dy)
if s2:
# First, translate the whole list so that the value at the
# index anchor is zero.
y = y - y[s1]
# Then rotate around the anchor so that the deflection at the other
# support is also 0.
delta = -y[s2] / math.fabs(s1 - s2)
slope = (
np.concatenate((np.arange(-s1, 1, 1), np.arange(1, len(y) - s1))) * delta
)
dy += delta
y = y + slope
results = SimpleNamespace()
results.length = length
results.D, results.M = D, M
results.dy, results.y, results.R = dy, y, (R1, R2)
results.a = np.arctan(dy)
results.etop, results.ebot = etop, ebot
return results # }}}
def save(results, path): # {{{
"""
Save the data from a solved results to a file as columns of numbers.
It writes the following columns to the file:
* position
* shear force
* bending moment
* displacement
* strain at top
* strain at bottom
* deflection angle
Arguments:
results: Results dictionary.
path: Location where the data should be solved
Raises:
AttributeError if the results have not been solved yet.
"""
data = np.vstack(
(
np.arange(results.length + 1),
results.D,
results.M,
results.y,
results.etop,
results.ebot,
results.dy,
)
).T
p = basename(path)
d = str(datetime.now())[:-7]
h = f"file: {p}\ngenerated: {d}\nx D M y et eb dy"
np.savetxt(path, data, fmt="%g", header=h) # }}}
def EI(sections, normal=None): # {{{
"""Calculate the bending stiffnes of a cross-section.
The cross-section is composed out of rectangular nonoverlapping sections
that can have different Young's moduli.
Each section is represented by a 4-tuple (width, height, offset, E).
The offset is the distance from the top of the section to the top of the
highest section. This should always be a positive value.
E is the Young's modulus of the material of this section.
Arguments:
sections: Iterable of section properties.
normal: The Young's modulus to which the total cross-section will be
normalized. (Not used anymore, retained for compatibility.)
Returns:
Tuple of EI, top and bottom. Top and bottom are with respect to the
neutral line.
Examples:
>>> E = 210000
>>> B = 100
>>> H = 20
>>> sections = ((B, H, 0, E),)
>>> EI(sections)
(14000000000.0, 10.0, -10.0)
>>> B = 100
>>> h = 18
>>> t = 1
>>> H = h + 2 * t
>>> E = 210000
>>> sections = ((B, t, 0, E), (B, t, h+t, E))
>>> EI(sections)
(3794000000.0, 10.0, -10.0)
>>> E1, E2 = 200000, 71000
>>> t1, t2 = 1.5, 2.5
>>> H = 31
>>> B = 100
>>> sections = ((B, t1, 0, E1), (B, t2, H-t2, E2))
>>> EI(sections)
(9393560891.143106, 11.530104712041885, -19.469895287958117)
"""
normal = sections[0][-1]
normalized = tuple((w * E / normal, h, offs) for w, h, offs, E in sections)
A = sum(w * h for w, h, _ in normalized)
S = sum(w * h * (offs + h / 2) for w, h, offs in normalized)
yn = S / A
# Find any geometry that straddles yn.
to_split = tuple(g for g in sections if g[2] < yn and g[1] + g[2] > yn)
geom = tuple(g for g in sections if g not in to_split)
# split that geometry.
# The new tuple has the format (width, height, top, bottom)
new_geom = []
for w, h, offs, E in to_split:
h1 = yn - offs
h2 = h - h1
new_geom.append((w, h1, h1, 0, E))
new_geom.append((w, h2, 0, -h2, E))
# Convert the remaining geometry to reference yn.
for w, h, offs, E in geom:
new_geom.append((w, h, yn - offs, yn - offs - h, E))
EI = sum(E * w * (top ** 3 - bot ** 3) / 3 for w, h, top, bot, E in new_geom)
top = max(g[-3] for g in new_geom)
bot = min(g[-2] for g in new_geom)
return EI, top, bot # }}}
def interpolate(tuples): # {{{
"""
Creates a numpy array and fills it by interpolation.
Arguments:
tuples: A list of 2-tuples (n, v). Note that the n values will be
rounded and converted to integers.
Returns:
A numpy array with interpolated values so that at index n the array has
the value v.
Examples:
>>> import numpy as np
>>> interpolate([(0,0), (3,3)])
array([0., 1., 2., 3.])
>>> interpolate([(0,0), (4,3), (6,-1)])
array([ 0. , 0.75, 1.5 , 2.25, 3. , 1. , -1. ])
>>> interpolate([(1,1), (4,4), (6,-3)])
array([ 1. , 2. , 3. , 4. , 0.5, -3. ])
"""
x = np.array([int(round(x)) for x, _ in tuples])
y = np.array([y for _, y in tuples])
startx, starty = x[0], y[0]
arrays = []
for dx, dy in zip(x[1:] - x[:-1], y[1:] - y[:-1]):
if dx > 0:
a = np.linspace(starty, starty + dy, num=dx + 1, endpoint=True)
arrays.append(a[:-1])
startx += dx
starty += dy
arrays.append(np.array([y[-1]]))
return np.concatenate(arrays) # }}}
def patientload(**kwargs): # {{{
"""
Returns a list of DistLoads that represent a patient
load according to IEC 60601 specs. For this calculation the patient is
assumed to be lying with his feet pointing to the origin.
Named arguments:
kg: Mass of the patient in kg.
force: The gravitational force of the patient in N. Note that this
should be a *negative* number.
feet: Location of the patient's feet in mm.
head: Location of the patient's head in mm. This is an alternative for
'feet'. Either 'feet' or 'head' must be present or a ValueError
will be raised.
Returns:
A list of DistLoads.
"""
f = _force(**kwargs)
if "feet" in kwargs:
s = round(float(kwargs["feet"]))
elif "head" in kwargs:
s = round(float(kwargs["head"])) - 1900
else:
raise ValueError("No 'feet' nor 'head' given.")
fractions = [
(0.148 * f, (s + 0, s + 450)), # l. legs, 14.7% from 0--450 mm
(0.222 * f, (s + 450, s + 1000)), # upper legs
(0.074 * f, (s + 1000, s + 1180)), # hands
(0.408 * f, (s + 1000, s + 1700)), # torso
(0.074 * f, (s + 1200, s + 1700)), # arms
(0.074 * f, (s + 1220, s + 1900)),
] # head
return [DistLoad(force=i[0], pos=i[1]) for i in fractions] # }}}
class Load(object): # {{{
"""Point load."""
def __init__(self, **kwargs):
"""
Create a point load.
Named arguments:
force: Force in Newtons. N.B: downwards force should be a
*negative* number.
kg: Weight of a mass in kg, alternative for force. N.B: a weight
of 1 kg will translate into a force of -9.81 N.
pos: Distance from the origin to the location of the force in mm.
Examples:
>>> str(Load(kg=150, pos=100))
'point load of -1471.5 N @ 100 mm.'
"""
self.size = _force(**kwargs)
self.pos = round(float(kwargs["pos"]))
def __str__(self):
return f"point load of {self.size} N @ {self.pos} mm."
def moment(self, pos):
"""
Returns the bending moment that the load exerts at pos.
"""
return (self.pos - pos) * self.size
def shear(self, length):
"""
Return the contribution of the load to the shear.
Arguments:
length: length of the array to return.
Returns:
An array that contains the contribution of this load.
"""
rv = np.zeros(length + 1)
rv[self.pos :] = self.size
return rv # }}}
class MomentLoad(Load): # {{{
def __init__(self, moment, pos):
"""Create a local bending moment load.
Arguments:
moment: bending moment in Nmm
pos: position of the bending moment.
"""
self.m = float(moment)
Load.__init__(self, force=0, pos=pos)
def __str__(self):
return f"moment of {self.m} Nmm @ {self.pos}"
def moment(self, pos):
"""
Returns the bending moment that the load exerts at pos.
"""
return self.m
def shear(self, length):
"""
Return the contribution of the load to the shear.
Arguments:
length: length of the array to return.
Returns:
An array that contains the contribution of this load.
"""
return np.zeros(length + 1)
def moment_array(self, length):
"""
Return the contribution of the load to the bending moment.
Arguments:
length: length of the array to return.
Returns:
An array that contains the contribution of this load.
"""
rv = np.zeros(length + 1)
rv[self.pos :] = -self.m
return rv # }}}
class DistLoad(Load): # {{{
"""Evenly distributed load."""
def __init__(self, **kwargs):
"""
Create an evenly distributed load.
Named arguments:
force: Force in Newtons. N.B: downwards force should be a
*negative* number.
kg: Weight of a mass in kg, alternative for force. N.B: a weight
of 1 kg will translate into a force of -9.81 N.
start: Begin of the distributed load. Must be used in combination
with the 'end' argument.
end: End of the distributed load.
pos: 2-tuple containing the borders of the distributed load.
You can use this instead of start and end.
"""
size = _force(**kwargs)
self.start, self.end = _start_end(**kwargs)
if self.start > self.end:
self.start, self.end = self.end, self.start
Load.__init__(self, force=size, pos=float(self.start + self.end) / 2)
def __str__(self):
return (
f"constant distributed load of {self.size} N @ {self.start}--{self.end} mm."
)
def shear(self, length):
rem = length + 1 - self.end
d = self.end - self.start
q = self.size
parts = (np.zeros(self.start), np.linspace(0, q, d), np.ones(rem) * q)
return np.concatenate(parts) # }}}
class TriangleLoad(DistLoad): # {{{
"""Linearly rising distributed load."""
def __init__(self, **kwargs):
"""
Create an linearly rising distributed load.
Named arguments:
force: Force in Newtons. N.B: downwards force should be a
*negative* number.
kg: Weight of a mass in kg, alternative for force. N.B: a weight
of 1 kg will translate into a force of -9.81 N.
start: Begin of the distributed load. Must be used in combination
with the 'end' argument.
end: End of the distributed load.
"""
DistLoad.__init__(self, **kwargs)
length = abs(self.start - self.end)
pos = (self.start, self.end)
self.pos = round(min(pos)) + 2.0 * length / 3.0
self.q = 2 * self.size / length
def __str__(self):
if self.start < self.end:
d = "ascending"
else:
d = "descending"
return f"linearly {d} distributed load of {self.size} N @ {self.start}--{self.end} mm."
def shear(self, length):
rem = length + 1 - self.end
parts = (
np.zeros(self.start),
np.linspace(0, self.q, self.end - self.start),
np.ones(rem) * self.q,
)
dv = np.concatenate(parts)
return np.cumsum(dv) # }}}
# Everything below is internal to the module.
def _force(**kwargs): # {{{
"""
Determine the force. See Load.__init__()
Returns:
The force as a float.
Examples:
>>> _force(kg=1)
-9.81
"""
if "force" in kwargs:
force = float(kwargs["force"])
elif "kg" in kwargs:
force = -9.81 * float(kwargs["kg"])
else:
raise KeyError("No 'force' or 'kg' present")
return force # }}}
def _start_end(**kwargs): # {{{
"""
Validate the position arguments. See DistLoad.__init_()
Returns:
Postition as a (start, end) tuple
Examples:
>>> _start_end(pos=(100, 200))
(100, 200)
>>> _start_end(start=100, end=200)
(100, 200)
"""
if "pos" in kwargs:
p = kwargs["pos"]
if not isinstance(p, tuple) and len(p) != 2:
raise ValueError("'pos' should be a 2-tuple")
pos = (round(float(kwargs["pos"][0])), round(float(kwargs["pos"][1])))
elif "start" in kwargs and "end" in kwargs:
pos = (round(float(kwargs["start"])), round(float(kwargs["end"])))
else:
raise KeyError("Neither 'pos' or 'start' and 'end' present")
return pos # }}}
def _check_length_supports(length, supports): # {{{
"""
Validate the length and supports. See solve().
Returns:
A tuple (length, support1, support2)
"""
length = int(round(length))
if length < 1:
raise ValueError("length must be ≥1")
if supports is not None:
if len(supports) != 2:
t = "The problem definition must contain exactly two supports."
raise ValueError(t)
s = (int(round(supports[0])), int(round(supports[1])))
if s[0] == s[1]:
raise ValueError("Two identical supports found!")
elif s[0] > s[1]:
s = (s[1], s[0])
if s[0] < 0 or s[1] > length:
raise ValueError("Support(s) outside of the beam!")
else:
s = (0, None)
return (length, s[0], s[1]) # }}}
def _check_loads(loads): # {{{
"""
Validate the loads in the problem. See solve().
Returns:
A list of Loads
"""
if isinstance(loads, Load):
loads = [loads]
if loads is None or len(loads) == 0:
raise ValueError("No loads specified")
for ld in loads:
if not isinstance(ld, Load):
raise ValueError("Loads must be Load instances")
return list(loads) # }}}
def _check_arrays(L, EI, GA, top, bottom): # {{{
"""
Validate the length of the EI, GA, top and bot iterables and converts
them into numpy arrays. See solve().
Returns:
The modified EI, GA, top and bottom arrays.
"""
rv = []
for name, ar in zip(("EI", "GA", "top", "bottom"), (EI, GA, top, bottom)):
# Convert single number to an ndarray.
if isinstance(ar, (int, float)):
ar = np.ones(L + 1) * ar
# Convert list/tuple to ndarray.
elif isinstance(ar, (list, tuple)):
ar = np.array(ar)
elif isinstance(ar, np.ndarray):
pass
else:
raise ValueError(
f"{name} is not a int, float, list, tuple or numpy.ndarray"
)
la = len(ar)
if la != L + 1:
raise ValueError(
f"Length of {name} ({la}) doesn't match beam length ({L}) + 1 ."
)
rv.append(ar)
return rv # }}}
| rsmith-nl/beammech | beammech.py | beammech.py | py | 18,941 | python | en | code | 26 | github-code | 6 | [
{
"api_name": "numpy.sum",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 71,... |
36261973545 | from collections import deque
def solution1(graph):
queue = deque([(0,0,0)])
n = len(graph)
m = len(graph[0])
while queue:
x,y,v = queue.popleft()
if x>=n or y>=m or x<0 or y<0:
continue
if graph[x][y] == 1:
graph[x][y] += v
queue.append((x+1, y, graph[x][y]))
queue.append((x, y+1, graph[x][y]))
queue.append((x-1, y, graph[x][y]))
queue.append((x, y-1, graph[x][y]))
return graph[n-1][m-1]
def solution2(graph):
# 이동할 네 방향 정의(상, 하, 좌, 우)
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
# graph 크기 정의
n = len(graph)
m = len(graph[0])
# BFS 소스코드 구현
def bfs(x, y):
# 큐(Queue) 구현을 위해 deque 라이브러리 사용
queue = deque()
queue.append((x, y))
# 큐가 빌 때까지 반복
while queue:
x, y = queue.popleft()
# 현재 위치에서 네 방향으로의 위치 확인
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
# 미로 찾기 공간을 벗어난 경우 무시
if nx < 0 or ny < 0 or nx >= n or ny >= m:
continue
# 벽인 경우 무시
if graph[nx][ny] == 0:
continue
# 해당 노드를 처음 방문하는 경우에만 최단 거리 기록
if graph[nx][ny] == 1:
graph[nx][ny] = graph[x][y] + 1
queue.append((nx, ny))
# 가장 오른쪽 아래까지의 최단 거리 반환
return graph[n - 1][m - 1]
return bfs(0,0)
if __name__ == "__main__":
graph = [
[1,0,1,0,1,0],
[1,0,1,1,1,1],
[1,0,1,1,1,0],
[1,0,1,0,1,0],
[1,1,1,0,1,1]
]
print(solution1(graph)) | hon99oo/PythonAlgorithmStudy | 이코테/DFS_BFS/예제_미로 탈출/solution.py | solution.py | py | 1,924 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 35,
"usage_type": "call"
}
] |
34042177473 | import datetime
import logging
from django.contrib import auth
from django.http import HttpResponseRedirect, HttpResponseNotFound
from django.utils.translation import check_for_language
from django.shortcuts import render
from blueapps.account.components.bk_token.forms import AuthenticationForm
from gcloud.core.signals import user_enter
from gcloud.conf import settings
logger = logging.getLogger("root")
def page_not_found(request, exception):
if request.path.startswith(settings.STATIC_URL):
return HttpResponseNotFound()
user = _user_authenticate(request)
# 未登录重定向到首页,跳到登录页面
if not user:
return HttpResponseRedirect(
settings.SITE_URL + "?{}={}".format(settings.PAGE_NOT_FOUND_URL_KEY, request.build_absolute_uri())
)
request.user = user
# not home url enter
user_enter.send(username=user.username, sender=user.username)
return render(request, "core/base_vue.html", {})
def _user_authenticate(request):
# 先做数据清洗再执行逻辑
form = AuthenticationForm(request.COOKIES)
if not form.is_valid():
return None
bk_token = form.cleaned_data["bk_token"]
# 确认 cookie 中的 bk_token 和 session 中的是否一致
# 如果登出删除 cookie 后 session 存在 is_match 为False
is_match = bk_token == request.session.get("bk_token")
if is_match and request.user.is_authenticated:
return request.user
user = auth.authenticate(request=request, bk_token=bk_token)
if user:
# 登录成功,记录 user 信息
auth.login(request, user)
request.session["bk_token"] = bk_token
return user
def home(request):
try:
username = request.user.username
# home url enter
user_enter.send(username=username, sender=username)
except Exception:
logger.exception("user_enter signal send failed.")
return render(request, "core/base_vue.html")
def set_language(request):
request_params = getattr(request, request.method)
next_url = request_params.get("next", None) or request.META.get("HTTP_REFERER", "/")
response = HttpResponseRedirect(next_url)
if request.method == "GET":
lang_code = request.GET.get("language", None)
if lang_code and check_for_language(lang_code):
if hasattr(request, "session"):
request.session["blueking_language"] = lang_code
max_age = 60 * 60 * 24 * 365
expires = datetime.datetime.strftime(
datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age), "%a, %d-%b-%Y %H:%M:%S GMT",
)
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code, max_age, expires)
return response
| caiyj/bk-sops | gcloud/core/views.py | views.py | py | 2,776 | python | en | code | null | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "gcloud.conf.settings.STATIC_URL",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "gcloud.conf.settings",
"line_number": 17,
"usage_type": "name"
},
{
"api_n... |
19265168130 | import smtplib
from email.message import EmailMessage, MIMEPart
import time
from typing import Tuple
class SendAMessage():
def __init__(self,action,msg_body,config,attach=None):
self.config = config
self.msg_body = msg_body
self.attach = attach
self.action = action
self.setup_message_subject()
self.recipients = self.config.mms_recipients
self.user = self.config.email
self.password = self.config.token
if self.config.csv:
self.recipients = [self.config.csv]
with open(self.attach, 'rb') as content_file:
self.content = content_file.read()
self.build_and_send_msg()
else:
if self.config.mms_enabled:
self.recipients = self.config.mms_recipients
if not self.config.mms_subject:
self.build_subject = False
self.build_and_send_msg()
if self.config.email_enabled:
self.build_subject = True
if self.config.mms_enabled:
time.sleep(5)
self.recipients = self.config.email_recipients
self.build_and_send_msg()
def build_and_send_msg(self):
self.emailObj = EmailMessage()
if self.config.csv:
self.emailObj.add_attachment(self.content, maintype='application', subtype='pdf', filename=self.attach)
if self.build_subject:
self.emailObj['subject'] = self.subject
self.emailObj['from'] = self.user
self.emailObj.set_content(self.msg_body)
self.emailObj['to'] = self.user
self.emailObj['bcc'] = self.recipients
# print(f"Sending MMS to: {to}") # console debugging, informative.
self.enable_smtp_server()
self.server.send_message(self.emailObj)
self.quit_server()
def setup_message_subject(self):
# check what we need to do
if self.action == "normal" or self.action == "auto" or self.action == "error":
self.subject = f"CONSTELLATION {self.config.node_name}"
elif self.action == "health":
self.subject = f"HEALTH CHECK {self.config.node_name}"
# come back to re-enable this later...
# if self.action == "error":
# self.subject = "ERROR CONST DARSTAR"
def enable_smtp_server(self):
self.server = smtplib.SMTP("smtp.gmail.com", 587)
self.server.starttls()
self.server.login(self.user, self.password)
def quit_server(self):
self.server.quit()
if __name__ == "__main__":
print("This class module is not designed to be run independently, please refer to the documentation") | netmet1/constellation-node-automation | classes/send_sms_email.py | send_sms_email.py | py | 2,744 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "time.sleep",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "email.message.EmailMessage",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "smtplib.SMTP",
"line_number": 69,
"usage_type": "call"
}
] |
6836155009 | # -*- coding: utf-8 -*-
from windows import DSFWindow, PlateResWindow
from epyparser import viia_parser, exparser
from matplotlib.pyplot import figure, show
from optim import linmelt
from scipy import array, sqrt
import csv
def info(args):
if args.csv_wells is not None:
well_info = exparser(args.csv_wells,args.csvexp,args.csvregexp)
expnames = well_info.get_experiments()
print('Listed experiments:')
print('\n'.join(['%30s (%3d wells)' % (k,v) for k,v in expnames.items()]))
def plot(args):
dataset = viia_parser(args.input_file)
fig = figure(FigureClass=DSFWindow)
well_info = None
if args.csv_wells is not None:
well_info = exparser(args.csv_wells,args.csvexp,args.csvregexp)
args.wells = well_info.get_wells()
fig.set_data(dataset.get_all_readings(args.wells), well_info, tm_guess=args.tm_guess, kfac=args.basespan)
fig.plot_well()
fig.canvas.mpl_connect('key_press_event', fig.onkeypress)
show()
def fit(args):
dataset = viia_parser(args.input_file)
wtf = dataset.get_all_readings(args.wells)
if args.csv_output:
fout = open(args.output_file,'w')
fout.write('Well,Tm,deltaT\n')
if args.wells is not None:
wells = list(map(int, args.wells.split(',')))
elif args.csv_wells is not None:
wells = exparser(args.csv_wells,args.csvexp,args.csvregexp).get_wells()
else:
wells = sorted(wtf)
for well in wells:
wtfit = linmelt(wtf[well][0], wtf[well][1], tm=args.tm_guess)
p = wtfit.fmin()
if args.csv_output:
fout.write('%d,%f,%f\n' % (well,wtfit.tm(),wtfit.dt()))
print('Well #%d: %s' % (well,wtfit.report()))
if args.csv_output:
fout.close()
def plate(args):
dataset = viia_parser(args.input_file)
if args.cwells is not None or (args.csvcontrol is not None and args.csv_wells is not None):
if args.cwells is not None:
cwells = list(map(int, args.cwells.split(',')))
else:
cwells = exparser(args.csv_wells,args.csvcontrol).get_wells()
cwtf = dataset.get_all_readings(cwells)
c_parms = []
for well in cwells:
wtfit = linmelt(cwtf[well][0], cwtf[well][1], tm=args.tm_guess)
p = wtfit.fmin()
for i in range(args.nbro):
wtfit.w2delta(kfac=args.basespan)
p = wtfit.fmin()
print('Control well #%03d: %s' % (well,wtfit.report()))
c_parms.append([wtfit.tm(),wtfit.dt()])
c_parms = array(c_parms)
mtm, mdt = c_parms.mean(0)
stm, sdt = c_parms.std(0,ddof=1)
print('--------\nAverages:')
print(' Tm = %.1f +- %.1f' % (mtm, stm))
print(' deltaT = %.1f +- %.1f' % (mdt, sdt))
contrflag = True
else:
contrflag = False
print('--------\nResults:')
wtf = dataset.get_all_readings(args.wells)
if args.wells is not None:
wells = list(map(int, args.wells.split(',')))
elif args.csv_wells is not None:
well_info = exparser(args.csv_wells,args.csvexp,args.csvregexp)
wells = well_info.get_wells()
else:
wells = sorted(wtf)
wtfits = {}
a_parms = []
for well in wells:
if contrflag:
wtfit = linmelt(wtf[well][0], wtf[well][1], tm=273.15+mtm)
else:
wtfit = linmelt(wtf[well][0], wtf[well][1], tm=args.tm_guess)
p = wtfit.fmin()
for i in range(args.nbro):
wtfit.w2delta(kfac=args.basespan)
p = wtfit.fmin()
outline = 'Well #%03d: %s' % (well,wtfit.report())
if contrflag:
outline += ' ZTm=%8.1f' % ((wtfit.tm()-mtm)/stm/sqrt(1+len(cwells)))
if args.csv_wells is not None:
outline += ' : ' + well_info.get_well_value(well, 'info')
well_info.set_well_value(well, 'tm', wtfit.tm())
well_info.set_well_value(well, 'dt', wtfit.dt())
print(outline)
wtfits[well] = wtfit
a_parms.append([wtfit.tm(),wtfit.dt()])
a_parms = array(a_parms)
mtm, mdt = a_parms.mean(0)
stm, sdt = a_parms.std(0,ddof=1)
print('--------\nAverages:')
print(' Tm = %.1f +- %.1f' % (mtm, stm))
print(' deltaT = %.1f +- %.1f' % (mdt, sdt))
if args.csv_wells is not None:
x,tm,dt,fmt,wellnum = list(zip(*[(float(v.get('x')),v.get('tm'),v.get('dt'),v.get('format','ro'),k) for k,v in well_info.iteritems()]))
if args.output_file is not None:
with open(args.output_file,'w') as fout:
if args.csv_output:
fout.write('Concentration,Tm,deltaT\n')
for xx,yy,zz in zip(*(x,tm,dt)):
fout.write('%f,%f,%f\n' % (xx,yy,zz))
else:
for xx,yy in zip(*(x,tm)):
fout.write('%f %f\n' % (xx,yy))
fig = figure(FigureClass=PlateResWindow)
if contrflag:
fig.set_data({'tm':{'x':x,'y':tm,'format':fmt,'my':mtm,'sy':stm}, 'wells':wellnum})
else:
fig.set_data({'tm':{'x':x,'y':tm,'format':fmt}, 'wells':wellnum})
fig.plot('tm', args.logplot)
if args.ylabel is None:
args.ylabel = "Tm, °C"
fig.set_axlabels(args.xlabel, args.ylabel)
fig.canvas.mpl_connect('key_press_event', fig.onkeypress)
fig.canvas.mpl_connect('button_press_event', fig.onmouseclick)
figCurve = figure(FigureClass=DSFWindow)
figCurve.set_data(dataset.get_all_readings(wells), well_info, wtfit=wtfits, kfac=args.basespan)
figCurve.canvas.mpl_connect('key_press_event', figCurve.onkeypress)
fig.attach_curves(figCurve)
show()
| pozharski/epydsf | dsfactions.py | dsfactions.py | py | 5,760 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "epyparser.exparser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "epyparser.viia_parser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "win... |
26008064699 | import json
from pathlib import Path
def get_average_mark_student(student):
overall_mark = 0
for mark in student:
if mark in subjects:
overall_mark += student[mark]
student['average'] = overall_mark / len(subjects)
return student # return student report card with added average mark
# add mark for subject to dictionary of individual subject marks
def get_average_mark_subject(student):
for mark in student:
if mark in subjects:
if mark in subject_marks:
subject_marks[mark] = subject_marks.get(mark) + student[mark]
else:
subject_marks[mark] = student[mark]
# add average mark for subject to dictionary of individual grade marks
def get_average_mark_grade(student):
student_grade = student.get('grade')
if student_grade in grade_marks:
grade_marks[student_grade] = grade_marks.get(
student_grade) + student.get('average')
else:
grade_marks[student_grade] = student.get('average')
files = Path('./students').glob('*')
subjects = ['math', 'science', 'history', 'english', 'geography']
report_cards = []
subject_marks = {}
grade_marks = {}
average_grade = 0
for file in files: # iterate through all files
with open(file, 'r') as f: # open file
data = json.load(f) # load data
# append student report card to list of report cards
report_cards.append(get_average_mark_student(data))
for card in report_cards: # iterate through report cards
# add mark to dictionary of individual subject's marks
get_average_mark_subject(card)
# add student average mark to dictionary of individual grade's marks
get_average_mark_grade(card)
# add student average to the running total of average marks
average_grade += card.get('average')
# find the student card with the lowest average mark
worst_student = min(report_cards, key=lambda card: card['average'])
# find the student card with the highest average mark
best_student = max(report_cards, key=lambda card: card['average'])
print(f'''
Average Student Grade: {(average_grade / len(report_cards)):.2f}
Hardest Subject: {min(subject_marks, key=subject_marks.get)}
Easiest Subject: {max(subject_marks, key=subject_marks.get)}
Best Performing Grade: {max(grade_marks, key=grade_marks.get)}
Worst Performing Grade: {min(grade_marks, key=grade_marks.get)}
Best Student ID: {best_student['id']}
Worst Student ID: {worst_student['id']}
''')
| 1lubo/Student-Performance | main.py | main.py | py | 2,482 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 43,
"usage_type": "call"
}
] |
19990663885 | from django.conf.urls import url
from . import views
# 编写url尤其注意正则表达式匹配字符串的结尾,否则会引起冲突而达不到理想中的效果
urlpatterns = [
url(r'^$', views.index),
url(r'^(\d+)/$', views.detail),
url(r'^grades/$', views.grades),
url(r'^students/$', views.students),
url(r'^grades/(\d+)$', views.gradeStudents),
url(r'^addstudent/$', views.addStudent)
] | Evanavevan/Django_Project | Project1/MyApp/urls.py | urls.py | py | 427 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.co... |
15757093517 | from flask import Flask, request, abort
import os
import face_detect as f # face_detect.py
import base64
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage, ImageMessage #ImageMessageを追加
)
app = Flask(__name__)
# 環境変数取得
YOUR_CHANNEL_ACCESS_TOKEN = os.environ["BWQIpTAFRQyPzoL7/93GUuew5IW72b6qjhv6U3P21bLs5vY6krM/XYN/Cj4trhPU92yoaOB8Ycgfxh2CDNwJcb/X4YMRe8WxIAV3S+NcOLe3Kc9ThWxkCdGBcVjlipsjG58AsQprbT5V6aDWifZFqwdB04t89/1O/w1cDnyilFU="]
YOUR_CHANNEL_SECRET = os.environ["d8df9a5cef0760cad1bc328f942872bd"]
line_bot_api = LineBotApi(YOUR_CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(YOUR_CHANNEL_SECRET)
@app.route("/")
def hello_world():
return "hello world!"
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
# テキストの場合はオウム返し
push_text = event.message.text
line_bot_api.reply_message(event.reply_token, TextSendMessage(text=push_text))
@handler.add(MessageEvent, message=ImageMessage)
def handle_image_message(event):
push_img_id = event.message.id # 投稿された画像IDを取得
message_content = line_bot_api.get_message_content(push_img_id) # LINEサーバー上に自動保存された画像を取得
push_img = b""
for chunk in message_content.iter_content():
push_img += chunk #画像をiter_contentでpush_imgに順次代入
push_img = base64.b64encode(push_img) # APIに通すためbase64エンコード
msg = f.face_detect(push_img)
line_bot_api.reply_message(event.reply_token, TextSendMessage(text=msg))
if __name__ == "__main__":
# app.run()
port = int(os.getenv("PORT"))
app.run(host="0.0.0.0", port=port)
| kentamseisyou/myahutest | main.py | main.py | py | 2,201 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "linebot.LineBotApi",
... |
13415308592 | from pickletools import uint8
import time
import numpy as np
from onnxruntime import InferenceSession
import cv2
import numpy as np
# 加载ONNX模型
sess = InferenceSession('output.onnx')
image = cv2.imread('38.jpg')
image=cv2.resize(image,(1024,512))
cv2.normalize(image,image,0,255,cv2.NORM_MINMAX)
#print(image)
image=image.transpose((2,0,1))
img=np.array(image).astype('float32')
# 准备输入
inputname=sess.get_inputs()[0].name
outputname=sess.get_outputs()[0].name
# 模型预测
start = time.time()
ort_outs = sess.run(None, input_feed={inputname: img[None, :, :, :]})
pred = ort_outs[0].astype('uint8')
pred = pred.flatten()
print(pred)
#ret,thres = cv2.threshold(array,1,255,cv2.THRESH_BINARY)
cv2.imshow('t',pred)
cv2.waitKey()
end = time.time()
| Tommy-Bie/Logistics-Package-Separation-Software | DatasetUtils/test.py | test.py | py | 763 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "onnxruntime.InferenceSession",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.normalize",
... |
22949757973 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# k2hat.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - 07/15
# License: MIT. See the LICENCE file for license text.
'''
This contains functions for reading K2 CSV light-curves produced by the HAT
Project into a Python dictionary. Requires numpy.
The only external function here is::
read_csv_lightcurve(lcfile)
Example:
Reading the best aperture LC for EPIC201183188 = UCAC4-428-055298 (see
http://k2.hatsurveys.org to search for this object and download the light
curve):
>>> import k2hat
>>> lcdict = k2hat.read_csv_lightcurve('UCAC4-428-055298-75d3f4357b314ff5ac458e917e6dfeb964877b60affe9193d4f65088-k2lc.csv.gz')
The Python dict lcdict contains the metadata and all columns.
>>> lcdict.keys()
['decl', 'objectid', 'bjdoffset', 'qualflag', 'fovchannel', 'BGV',
'aperpixradius', 'IM04', 'TF17', 'EP01', 'CF01', 'ra', 'fovmodule', 'columns',
'k2campaign', 'EQ01', 'fovccd', 'FRN', 'IE04', 'kepid', 'YCC', 'XCC', 'BJD',
'napertures', 'ucac4id', 'IQ04', 'kepmag', 'ndet','kernelspec']
The columns for the light curve are stored in the columns key of the dict. To
get a list of the columns:
>>> lcdict['columns']
['BJD', 'BGV', 'FRN', 'XCC', 'YCC', 'IM04', 'IE04', 'IQ04', 'EP01', 'EQ01',
'TF17', 'CF01']
To get columns:
>>> bjd, epdmags = lcdict['BJD'], lcdict['EP01']
>>> bjd
array([ 2456808.1787283, 2456808.1991608, 2456808.2195932, ...,
2456890.2535691, 2456890.274001 , 2456890.2944328])
>>> epdmags
array([ 16.03474, 16.02773, 16.01826, ..., 15.76997, 15.76577,
15.76263])
'''
# put this in here because k2hat can be used as a standalone module
__version__ = '0.5.3'
#############
## LOGGING ##
#############
# the basic logging styles common to all astrobase modules
log_sub = '{'
log_fmt = '[{levelname:1.1} {asctime} {module}:{lineno}] {message}'
log_date_fmt = '%y%m%d %H:%M:%S'
import logging
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import os.path
import gzip
import numpy as np
########################
## COLUMN DEFINITIONS ##
########################
# LC column definitions
# the first elem is the column description, the second is the format to use when
# writing a CSV LC column, the third is the type to use when parsing a CSV LC
# column
COLUMNDEFS = {
'BJD':['time in Baryocentric Julian Date','%.7f',float],
'BGV':['Background value (ADU)','%.5f',float],
'BGE':['Background value (ADU)','%.5f',float],
'FRN':['cadence number of observation','%i',int],
'XCC':['x coordinate on module', '%.3f',float],
'YCC':['y coordinate on module', '%.3f',float],
'ARC':['arc length parameter', '%.3f', float],
# APERture 00
'IM00':['K2 instrumental magnitude (aperture 00)','%.5f',float],
'IE00':['K2 instrumental mag. error (aperture 00)','%.5f',float],
'IQ00':['K2 instrumental mag. quality flag (aperture 00)','%s',str],
'EP00':['detrended magnitude (aperture 00)','%.5f',float],
'EQ00':['detrended mag. quality flag (aperture 00)','%i',int],
'TF00':['TFA magnitude (aperture 00)','%.5f',float],
'CF00':['Cosine filtered magnitude (aperture 00)','%.5f',float],
# APERture 01
'IM01':['K2 instrumental magnitude (aperture 01)','%.5f',float],
'IE01':['K2 instrumental mag. error (aperture 01)','%.5f',float],
'IQ01':['K2 instrumental mag. quality flag (aperture 01)','%s',str],
'EP01':['detrended magnitude (aperture 01)','%.5f',float],
'EQ01':['detrended mag. quality flag (aperture 01)','%i',int],
'TF01':['TFA magnitude (aperture 01)','%.5f',float],
'CF01':['Cosine filtered magnitude (aperture 01)','%.5f',float],
# APERture 02
'IM02':['K2 instrumental magnitude (aperture 02)','%.5f',float],
'IE02':['K2 instrumental mag. error (aperture 02)','%.5f',float],
'IQ02':['K2 instrumental mag. quality flag (aperture 02)','%s',str],
'EP02':['detrended magnitude (aperture 02)','%.5f',float],
'EQ02':['detrended mag. quality flag (aperture 02)','%i',int],
'TF02':['TFA magnitude (aperture 02)','%.5f',float],
'CF02':['Cosine filtered magnitude (aperture 02)','%.5f',float],
# APERture 03
'IM03':['K2 instrumental magnitude (aperture 03)','%.5f',float],
'IE03':['K2 instrumental mag. error (aperture 03)','%.5f',float],
'IQ03':['K2 instrumental mag. quality flag (aperture 03)','%s',str],
'EP03':['detrended magnitude (aperture 03)','%.5f',float],
'EQ03':['detrended mag. quality flag (aperture 03)','%i',int],
'TF03':['TFA magnitude (aperture 03)','%.5f',float],
'CF03':['Cosine filtered magnitude (aperture 03)','%.5f',float],
# APERture 04
'IM04':['K2 instrumental magnitude (aperture 04)','%.5f',float],
'IE04':['K2 instrumental mag. error (aperture 04)','%.5f',float],
'IQ04':['K2 instrumental mag. quality flag (aperture 04)','%s',str],
'EP04':['detrended magnitude (aperture 04)','%.5f',float],
'EQ04':['detrended mag. quality flag (aperture 04)','%i',int],
'TF04':['TFA magnitude (aperture 04)','%.5f',float],
'CF04':['Cosine filtered magnitude (aperture 04)','%.5f',float],
# APERture 05
'IM05':['K2 instrumental magnitude (aperture 05)','%.5f',float],
'IE05':['K2 instrumental mag. error (aperture 05)','%.5f',float],
'IQ05':['K2 instrumental mag. quality flag (aperture 05)','%s',str],
'EP05':['detrended magnitude (aperture 05)','%.5f',float],
'EQ05':['detrended mag. quality flag (aperture 05)','%i',int],
'TF05':['TFA magnitude (aperture 05)','%.5f',float],
'CF05':['Cosine filtered magnitude (aperture 05)','%.5f',float],
# APERture 06
'IM06':['K2 instrumental magnitude (aperture 06)','%.5f',float],
'IE06':['K2 instrumental mag. error (aperture 06)','%.5f',float],
'IQ06':['K2 instrumental mag. quality flag (aperture 06)','%s',str],
'EP06':['detrended magnitude (aperture 06)','%.5f',float],
'EQ06':['detrended mag. quality flag (aperture 06)','%i',int],
'TF06':['TFA magnitude (aperture 06)','%.5f',float],
'CF06':['Cosine filtered magnitude (aperture 06)','%.5f',float],
# APERture 07
'IM07':['K2 instrumental magnitude (aperture 07)','%.5f',float],
'IE07':['K2 instrumental mag. error (aperture 07)','%.5f',float],
'IQ07':['K2 instrumental mag. quality flag (aperture 07)','%s',str],
'EP07':['detrended magnitude (aperture 07)','%.5f',float],
'EQ07':['detrended mag. quality flag (aperture 07)','%i',int],
'TF07':['TFA magnitude (aperture 07)','%.5f',float],
'CF07':['Cosine filtered magnitude (aperture 07)','%.5f',float],
# APERture 08
'IM08':['K2 instrumental magnitude (aperture 08)','%.5f',float],
'IE08':['K2 instrumental mag. error (aperture 08)','%.5f',float],
'IQ08':['K2 instrumental mag. quality flag (aperture 08)','%s',str],
'EP08':['detrended magnitude (aperture 08)','%.5f',float],
'EQ08':['detrended mag. quality flag (aperture 08)','%i',int],
'TF08':['TFA magnitude (aperture 08)','%.5f',float],
'CF08':['Cosine filtered magnitude (aperture 08)','%.5f',float],
# APERture 09
'IM09':['K2 instrumental magnitude (aperture 09)','%.5f',float],
'IE09':['K2 instrumental mag. error (aperture 09)','%.5f',float],
'IQ09':['K2 instrumental mag. quality flag (aperture 09)','%s',str],
'EP09':['detrended magnitude (aperture 09)','%.5f',float],
'EQ09':['detrended mag. quality flag (aperture 09)','%i',int],
'TF09':['TFA magnitude (aperture 09)','%.5f',float],
'CF09':['Cosine filtered magnitude (aperture 09)','%.5f',float],
# APERture 10
'IM10':['K2 instrumental magnitude (aperture 10)','%.5f',float],
'IE10':['K2 instrumental mag. error (aperture 10)','%.5f',float],
'IQ10':['K2 instrumental mag. quality flag (aperture 10)','%s',str],
'EP10':['detrended magnitude (aperture 10)','%.5f',float],
'EQ10':['detrended mag. quality flag (aperture 10)','%i',int],
'TF10':['TFA magnitude (aperture 10)','%.5f',float],
'CF10':['Cosine filtered magnitude (aperture 10)','%.5f',float],
# APERture 11
'IM11':['K2 instrumental magnitude (aperture 11)','%.5f',float],
'IE11':['K2 instrumental mag. error (aperture 11)','%.5f',float],
'IQ11':['K2 instrumental mag. quality flag (aperture 11)','%s',str],
'EP11':['detrended magnitude (aperture 11)','%.5f',float],
'EQ11':['detrended mag. quality flag (aperture 11)','%i',int],
'TF11':['TFA magnitude (aperture 11)','%.5f',float],
'CF11':['Cosine filtered magnitude (aperture 11)','%.5f',float],
# APERture 12
'IM12':['K2 instrumental magnitude (aperture 12)','%.5f',float],
'IE12':['K2 instrumental mag. error (aperture 12)','%.5f',float],
'IQ12':['K2 instrumental mag. quality flag (aperture 12)','%s',str],
'EP12':['detrended magnitude (aperture 12)','%.5f',float],
'EQ12':['detrended mag. quality flag (aperture 12)','%i',int],
'TF12':['TFA magnitude (aperture 12)','%.5f',float],
'CF12':['Cosine filtered magnitude (aperture 12)','%.5f',float],
# APERture 13
'IM13':['K2 instrumental magnitude (aperture 13)','%.5f',float],
'IE13':['K2 instrumental mag. error (aperture 13)','%.5f',float],
'IQ13':['K2 instrumental mag. quality flag (aperture 13)','%s',str],
'EP13':['detrended magnitude (aperture 13)','%.5f',float],
'EQ13':['detrended mag. quality flag (aperture 13)','%i',int],
'TF13':['TFA magnitude (aperture 13)','%.5f',float],
'CF13':['Cosine filtered magnitude (aperture 13)','%.5f',float],
# APERture 14
'IM14':['K2 instrumental magnitude (aperture 14)','%.5f',float],
'IE14':['K2 instrumental mag. error (aperture 14)','%.5f',float],
'IQ14':['K2 instrumental mag. quality flag (aperture 14)','%s',str],
'EP14':['detrended magnitude (aperture 14)','%.5f',float],
'EQ14':['detrended mag. quality flag (aperture 14)','%i',int],
'TF14':['TFA magnitude (aperture 14)','%.5f',float],
'CF14':['Cosine filtered magnitude (aperture 14)','%.5f',float],
# APERture 15
'IM15':['K2 instrumental magnitude (aperture 15)','%.5f',float],
'IE15':['K2 instrumental mag. error (aperture 15)','%.5f',float],
'IQ15':['K2 instrumental mag. quality flag (aperture 15)','%s',str],
'EP15':['detrended magnitude (aperture 15)','%.5f',float],
'EQ15':['detrended mag. quality flag (aperture 15)','%i',int],
'TF15':['TFA magnitude (aperture 15)','%.5f',float],
'CF15':['Cosine filtered magnitude (aperture 15)','%.5f',float],
# APERture 16
'IM16':['K2 instrumental magnitude (aperture 16)','%.5f',float],
'IE16':['K2 instrumental mag. error (aperture 16)','%.5f',float],
'IQ16':['K2 instrumental mag. quality flag (aperture 16)','%s',str],
'EP16':['detrended magnitude (aperture 16)','%.5f',float],
'EQ16':['detrended mag. quality flag (aperture 16)','%i',int],
'TF16':['TFA magnitude (aperture 16)','%.5f',float],
'CF16':['Cosine filtered magnitude (aperture 16)','%.5f',float],
# APERture 17
'IM17':['K2 instrumental magnitude (aperture 17)','%.5f',float],
'IE17':['K2 instrumental mag. error (aperture 17)','%.5f',float],
'IQ17':['K2 instrumental mag. quality flag (aperture 17)','%s',str],
'EP17':['detrended magnitude (aperture 17)','%.5f',float],
'EQ17':['detrended mag. quality flag (aperture 17)','%i',int],
'TF17':['TFA magnitude (aperture 17)','%.5f',float],
'CF17':['Cosine filtered magnitude (aperture 17)','%.5f',float],
# APERture 18
'IM18':['K2 instrumental magnitude (aperture 18)','%.5f',float],
'IE18':['K2 instrumental mag. error (aperture 18)','%.5f',float],
'IQ18':['K2 instrumental mag. quality flag (aperture 18)','%s',str],
'EP18':['detrended magnitude (aperture 18)','%.5f',float],
'EQ18':['detrended mag. quality flag (aperture 18)','%i',int],
'TF18':['TFA magnitude (aperture 18)','%.5f',float],
'CF18':['Cosine filtered magnitude (aperture 18)','%.5f',float],
# APERture 19
'IM19':['K2 instrumental magnitude (aperture 19)','%.5f',float],
'IE19':['K2 instrumental mag. error (aperture 19)','%.5f',float],
'IQ19':['K2 instrumental mag. quality flag (aperture 19)','%s',str],
'EP19':['detrended magnitude (aperture 19)','%.5f',float],
'EQ19':['detrended mag. quality flag (aperture 19)','%i',int],
'TF19':['TFA magnitude (aperture 19)','%.5f',float],
'CF19':['Cosine filtered magnitude (aperture 19)','%.5f',float],
# APERture 20
'IM20':['K2 instrumental magnitude (aperture 20)','%.5f',float],
'IE20':['K2 instrumental mag. error (aperture 20)','%.5f',float],
'IQ20':['K2 instrumental mag. quality flag (aperture 20)','%s',str],
'EP20':['detrended magnitude (aperture 20)','%.5f',float],
'EQ20':['detrended mag. quality flag (aperture 20)','%i',int],
'TF20':['TFA magnitude (aperture 20)','%.5f',float],
'CF20':['Cosine filtered magnitude (aperture 20)','%.5f',float],
# APERture 20
'IM21':['K2 instrumental magnitude (aperture 21)','%.5f',float],
'IE21':['K2 instrumental mag. error (aperture 21)','%.5f',float],
'IQ21':['K2 instrumental mag. quality flag (aperture 21)','%s',str],
'EP21':['detrended magnitude (aperture 21)','%.5f',float],
'EQ21':['detrended mag. quality flag (aperture 21)','%i',int],
'TF21':['TFA magnitude (aperture 21)','%.5f',float],
'CF21':['Cosine filtered magnitude (aperture 21)','%.5f',float],
# APERture 21
'IM22':['K2 instrumental magnitude (aperture 22)','%.5f',float],
'IE22':['K2 instrumental mag. error (aperture 22)','%.5f',float],
'IQ22':['K2 instrumental mag. quality flag (aperture 22)','%s',str],
'EP22':['detrended magnitude (aperture 22)','%.5f',float],
'EQ22':['detrended mag. quality flag (aperture 22)','%i',int],
'TF22':['TFA magnitude (aperture 22)','%.5f',float],
'CF22':['Cosine filtered magnitude (aperture 22)','%.5f',float],
# APERture 22
'IM23':['K2 instrumental magnitude (aperture 23)','%.5f',float],
'IE23':['K2 instrumental mag. error (aperture 23)','%.5f',float],
'IQ23':['K2 instrumental mag. quality flag (aperture 23)','%s',str],
'EP23':['detrended magnitude (aperture 23)','%.5f',float],
'EQ23':['detrended mag. quality flag (aperture 23)','%i',int],
'TF23':['TFA magnitude (aperture 23)','%.5f',float],
'CF23':['Cosine filtered magnitude (aperture 23)','%.5f',float],
# APERture 23
'IM24':['K2 instrumental magnitude (aperture 24)','%.5f',float],
'IE24':['K2 instrumental mag. error (aperture 24)','%.5f',float],
'IQ24':['K2 instrumental mag. quality flag (aperture 24)','%s',str],
'EP24':['detrended magnitude (aperture 24)','%.5f',float],
'EQ24':['detrended mag. quality flag (aperture 24)','%i',int],
'TF24':['TFA magnitude (aperture 24)','%.5f',float],
'CF24':['Cosine filtered magnitude (aperture 24)','%.5f',float],
# APERture 24
'IM25':['K2 instrumental magnitude (aperture 25)','%.5f',float],
'IE25':['K2 instrumental mag. error (aperture 25)','%.5f',float],
'IQ25':['K2 instrumental mag. quality flag (aperture 25)','%s',str],
'EP25':['detrended magnitude (aperture 25)','%.5f',float],
'EQ25':['detrended mag. quality flag (aperture 25)','%i',int],
'TF25':['TFA magnitude (aperture 25)','%.5f',float],
'CF25':['Cosine filtered magnitude (aperture 25)','%.5f',float],
# APERture 25
'IM26':['K2 instrumental magnitude (aperture 26)','%.5f',float],
'IE26':['K2 instrumental mag. error (aperture 26)','%.5f',float],
'IQ26':['K2 instrumental mag. quality flag (aperture 26)','%s',str],
'EP26':['detrended magnitude (aperture 26)','%.5f',float],
'EQ26':['detrended mag. quality flag (aperture 26)','%i',int],
'TF26':['TFA magnitude (aperture 26)','%.5f',float],
'CF26':['Cosine filtered magnitude (aperture 26)','%.5f',float],
# APERture 26
'IM27':['K2 instrumental magnitude (aperture 27)','%.5f',float],
'IE27':['K2 instrumental mag. error (aperture 27)','%.5f',float],
'IQ27':['K2 instrumental mag. quality flag (aperture 27)','%s',str],
'EP27':['detrended magnitude (aperture 27)','%.5f',float],
'EQ27':['detrended mag. quality flag (aperture 27)','%i',int],
'TF27':['TFA magnitude (aperture 27)','%.5f',float],
'CF27':['Cosine filtered magnitude (aperture 27)','%.5f',float],
# APERture 27
'IM28':['K2 instrumental magnitude (aperture 28)','%.5f',float],
'IE28':['K2 instrumental mag. error (aperture 28)','%.5f',float],
'IQ28':['K2 instrumental mag. quality flag (aperture 28)','%s',str],
'EP28':['detrended magnitude (aperture 28)','%.5f',float],
'EQ28':['detrended mag. quality flag (aperture 28)','%i',int],
'TF28':['TFA magnitude (aperture 28)','%.5f',float],
'CF28':['Cosine filtered magnitude (aperture 28)','%.5f',float],
# APERture 28
'IM29':['K2 instrumental magnitude (aperture 29)','%.5f',float],
'IE29':['K2 instrumental mag. error (aperture 29)','%.5f',float],
'IQ29':['K2 instrumental mag. quality flag (aperture 29)','%s',str],
'EP29':['detrended magnitude (aperture 29)','%.5f',float],
'EQ29':['detrended mag. quality flag (aperture 29)','%i',int],
'TF29':['TFA magnitude (aperture 29)','%.5f',float],
'CF29':['Cosine filtered magnitude (aperture 29)','%.5f',float],
# APERture 29
'IM30':['K2 instrumental magnitude (aperture 30)','%.5f',float],
'IE30':['K2 instrumental mag. error (aperture 30)','%.5f',float],
'IQ30':['K2 instrumental mag. quality flag (aperture 30)','%s',str],
'EP30':['detrended magnitude (aperture 30)','%.5f',float],
'EQ30':['detrended mag. quality flag (aperture 30)','%i',int],
'TF30':['TFA magnitude (aperture 30)','%.5f',float],
'CF30':['Cosine filtered magnitude (aperture 30)','%.5f',float],
# APERture 30
'IM31':['K2 instrumental magnitude (aperture 31)','%.5f',float],
'IE31':['K2 instrumental mag. error (aperture 31)','%.5f',float],
'IQ31':['K2 instrumental mag. quality flag (aperture 31)','%s',str],
'EP31':['detrended magnitude (aperture 31)','%.5f',float],
'EQ31':['detrended mag. quality flag (aperture 31)','%i',int],
'TF31':['TFA magnitude (aperture 31)','%.5f',float],
'CF31':['Cosine filtered magnitude (aperture 31)','%.5f',float],
# APERture 31
'IM32':['K2 instrumental magnitude (aperture 32)','%.5f',float],
'IE32':['K2 instrumental mag. error (aperture 32)','%.5f',float],
'IQ32':['K2 instrumental mag. quality flag (aperture 32)','%s',str],
'EP32':['detrended magnitude (aperture 32)','%.5f',float],
'EQ32':['detrended mag. quality flag (aperture 32)','%i',int],
'TF32':['TFA magnitude (aperture 32)','%.5f',float],
'CF32':['Cosine filtered magnitude (aperture 32)','%.5f',float],
# APERture 33
'IM33':['K2 instrumental magnitude (aperture 33)','%.5f',float],
'IE33':['K2 instrumental mag. error (aperture 33)','%.5f',float],
'IQ33':['K2 instrumental mag. quality flag (aperture 33)','%s',str],
'EP33':['detrended magnitude (aperture 33)','%.5f',float],
'EQ33':['detrended mag. quality flag (aperture 33)','%i',int],
'TF33':['TFA magnitude (aperture 33)','%.5f',float],
'CF33':['Cosine filtered magnitude (aperture 33)','%.5f',float],
# APERture 34
'IM34':['K2 instrumental magnitude (aperture 34)','%.5f',float],
'IE34':['K2 instrumental mag. error (aperture 34)','%.5f',float],
'IQ34':['K2 instrumental mag. quality flag (aperture 34)','%s',str],
'EP34':['detrended magnitude (aperture 34)','%.5f',float],
'EQ34':['detrended mag. quality flag (aperture 34)','%i',int],
'TF34':['TFA magnitude (aperture 34)','%.5f',float],
'CF34':['Cosine filtered magnitude (aperture 34)','%.5f',float],
# APERture 35
'IM35':['K2 instrumental magnitude (aperture 35)','%.5f',float],
'IE35':['K2 instrumental mag. error (aperture 35)','%.5f',float],
'IQ35':['K2 instrumental mag. quality flag (aperture 35)','%s',str],
'EP35':['detrended magnitude (aperture 35)','%.5f',float],
'EQ35':['detrended mag. quality flag (aperture 35)','%i',int],
'TF35':['TFA magnitude (aperture 35)','%.5f',float],
'CF35':['Cosine filtered magnitude (aperture 35)','%.5f',float],
}
##################################
## FUNCTIONS TO READ K2 HAT LCS ##
##################################
def _parse_csv_header(header):
'''This parses a CSV header from a K2 CSV LC.
Returns a dict that can be used to update an existing lcdict with the
relevant metadata info needed to form a full LC.
'''
# first, break into lines
headerlines = header.split('\n')
headerlines = [x.lstrip('# ') for x in headerlines]
# next, find the indices of the '# COLUMNS' line and '# LIGHTCURVE' line
metadatastart = headerlines.index('METADATA')
columnstart = headerlines.index('COLUMNS')
lcstart = headerlines.index('LIGHTCURVE')
# get the lines for the metadata and columndefs
metadata = headerlines[metadatastart+1:columnstart-1]
columndefs = headerlines[columnstart+1:lcstart-1]
# parse the metadata
metainfo = [x.split(',') for x in metadata][:-1]
aperpixradius = metadata[-1]
objectid, kepid, ucac4id, kepmag = metainfo[0]
objectid, kepid, ucac4id, kepmag = (objectid.split(' = ')[-1],
kepid.split(' = ')[-1],
ucac4id.split(' = ')[-1],
kepmag.split(' = ')[-1])
kepmag = float(kepmag) if kepmag else None
ra, decl, ndet, k2campaign = metainfo[1]
ra, decl, ndet, k2campaign = (ra.split(' = ')[-1],
decl.split(' = ')[-1],
int(ndet.split(' = ')[-1]),
int(k2campaign.split(' = ')[-1]))
fovccd, fovchannel, fovmodule = metainfo[2]
fovccd, fovchannel, fovmodule = (int(fovccd.split(' = ')[-1]),
int(fovchannel.split(' = ')[-1]),
int(fovmodule.split(' = ')[-1]))
try:
qualflag, bjdoffset, napertures = metainfo[3]
qualflag, bjdoffset, napertures = (int(qualflag.split(' = ')[-1]),
float(bjdoffset.split(' = ')[-1]),
int(napertures.split(' = ')[-1]))
kernelspec = None
except Exception:
qualflag, bjdoffset, napertures, kernelspec = metainfo[3]
qualflag, bjdoffset, napertures, kernelspec = (
int(qualflag.split(' = ')[-1]),
float(bjdoffset.split(' = ')[-1]),
int(napertures.split(' = ')[-1]),
str(kernelspec.split(' = ')[-1])
)
aperpixradius = aperpixradius.split(' = ')[-1].split(',')
aperpixradius = [float(x) for x in aperpixradius]
# parse the columndefs
columns = [x.split(' - ')[1] for x in columndefs]
metadict = {'objectid':objectid,
'objectinfo':{
'objectid':objectid,
'kepid':kepid,
'ucac4id':ucac4id,
'kepmag':kepmag,
'ra':ra,
'decl':decl,
'ndet':ndet,
'k2campaign':k2campaign,
'fovccd':fovccd,
'fovchannel':fovchannel,
'fovmodule':fovmodule,
'qualflag':qualflag,
'bjdoffset':bjdoffset,
'napertures':napertures,
'kernelspec':kernelspec,
'aperpixradius':aperpixradius,
},
'columns':columns}
return metadict
def read_csv_lightcurve(lcfile):
'''
This reads in a K2 lightcurve in CSV format. Transparently reads gzipped
files.
Parameters
----------
lcfile : str
The light curve file to read.
Returns
-------
dict
Returns an lcdict.
'''
# read in the file first
if '.gz' in os.path.basename(lcfile):
LOGINFO('reading gzipped K2 LC: %s' % lcfile)
infd = gzip.open(lcfile,'rb')
else:
LOGINFO('reading K2 LC: %s' % lcfile)
infd = open(lcfile,'rb')
lctext = infd.read().decode()
infd.close()
# figure out the header and get the LC columns
lcstart = lctext.index('# LIGHTCURVE\n')
lcheader = lctext[:lcstart+12]
lccolumns = lctext[lcstart+13:].split('\n')
lccolumns = [x.split(',') for x in lccolumns if len(x) > 0]
# initialize the lcdict and parse the CSV header
lcdict = _parse_csv_header(lcheader)
# tranpose the LC rows into columns
lccolumns = list(zip(*lccolumns))
# write the columns to the dict
for colind, col in enumerate(lcdict['columns']):
# this picks out the caster to use when reading each column using the
# definitions in the lcutils.COLUMNDEFS dictionary
lcdict[col.lower()] = np.array([COLUMNDEFS[col][2](x)
for x in lccolumns[colind]])
lcdict['columns'] = [x.lower() for x in lcdict['columns']]
return lcdict
| waqasbhatti/astrobase | astrobase/hatsurveys/k2hat.py | k2hat.py | py | 25,449 | python | en | code | 50 | github-code | 6 | [
{
"api_name": "logging.DEBUG",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "logging.basicC... |
28199024080 | import logging
import os
import json
from flask import Flask
from flask_ask import Ask, request, session, question, statement
import datetime as DT
os.system('sh transactions.sh > output.json')
data = json.load(open('output.json'))
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
# function that converts the amount of money to an string
# that can be said
def say_money(value):
neg = ''
startv = 0
if value < 0:
neg = 'negative'
startv = startv + 1
value = str(value)
cents = value[-2:]
if cents == '.0':
cents = '0'
dollars = value[startv:-2]
return neg + ' ' + dollars + ' dollars and ' + cents + ' cents'
# check balance
def check_bal(idn):
idn = str(idn)
os.system('sh accounts'+idn+'.sh > output.json')
data = json.load(open('output.json'))
return ('your current available balance is: ' + say_money(data['account_balances'][0]['available']))
# most recent transaction
def most_recent_transaction(idn):
idn = str(idn)
os.system('sh transactions'+idn+'.sh > output.json')
data = json.load(open('output.json'))
val = data['transactions'][0]['amount']
return ('Your most recent transaction was: ' + say_money(val))
# how much did i spend total
def total_yr_spend_value(idn):
idn = str(idn)
os.system('sh transactions'+idn+'.sh > output.json')
data = json.load(open('output.json'))
arr = data['transactions']
total = 0
for x in arr:
total = total + x['amount']
print('Your total spending over the last year was: ' + say_money(total))
return total
def total_yr_spend(idn):
idn = str(idn)
os.system('sh transactions'+idn+'.sh > output.json')
data = json.load(open('output.json'))
arr = data['transactions']
total = 0
for x in arr:
total = total + x['amount']
return ('Your total spending over the last year was: ' + say_money(total))
# how much did i spend last week
def week_spend(idn):
idn = str(idn)
os.system('sh transactions'+idn+'.sh > output.json')
data = json.load(open('output.json'))
total = 0
today = DT.date.today()
week_ago = today - DT.timedelta(days=14)
arr = data['transactions']
for x in arr:
strdate = str(x['settled_at'])
strdate = strdate[0:10]
print(strdate)
curr_day = DT.datetime.strptime(strdate, '%Y-%m-%d').date()
if curr_day >= week_ago:
total = total + x['amount']
ret_str = ''
ret_str = ret_str + ('Your total spending over the past two weeks was: ' + say_money(total) + '. ')
past_two = total
past_year = total_yr_spend_value()
ret_str = ret_str + ('The percentage from the past two weeks is ' + str(round(past_two*100/past_year, 2)) + ' percent of your spending over the past year')
return ret_str
@ask.launch
def launch():
speech_text = 'Welcome to EchoDog, your loyal fincancial companion.'
return question(speech_text).reprompt(speech_text).simple_card('HelloWorld', speech_text)
# ELLEN's
#
#
@ask.intent('BiWeeklyPercentage')
def BiWeekPercent():
speech_text = week_spend(1) + 'That is pretty good, keep it up'
return statement(speech_text).simple_card('BiWeeklyPercentage', speech_text)
@ask.intent('YearTotal')
def year_total():
speech_text = total_yr_spend(1) + 'That is a lot of money.'
return statement(speech_text).simple_card('YearTotal', speech_text)
@ask.intent('CheckBalance')
def chk_bal():
speech_text = check_bal(1) + ' You are doing pretty well for yourself'
return statement(speech_text).simple_card('CheckBalance', speech_text)
@ask.intent('MostRecent')
def most_recent():
speech_text = most_recent_transaction(1)
return statement(speech_text).simple_card('MostRecent', speech_text)
# JACOB's
#
#
@ask.intent('BiWeeklyPercentagetwo')
def BiWeekPercent2():
speech_text = week_spend(2)
return statement(speech_text).simple_card('BiWeeklyPercentage', speech_text)
@ask.intent('YearTotaltwo')
def year_total2():
speech_text = total_yr_spend(2)
return statement(speech_text).simple_card('YearTotal', speech_text)
@ask.intent('CheckBalancetwo')
def chk_bal2():
speech_text = check_bal(2)
return statement(speech_text).simple_card('CheckBalance', speech_text)
@ask.intent('MostRecenttwo')
def most_recent2():
speech_text = most_recent_transaction(2)
return statement(speech_text).simple_card('MostRecent', speech_text)
# MIKE
#
#
@ask.intent('BiWeeklyPercentagethree')
def BiWeekPercent3():
speech_text = 'The percentage over the past two weeks that you have' +'spent is 50 percent of your spending over the past year. ' + 'Boy you need to save more and stop being so yolo swag. Dabs'
return statement(speech_text).simple_card('BiWeeklyPercentage', speech_text)
@ask.intent('YearTotalthree')
def year_total3():
speech_text = 'You did alright this year but you could use a while lot of improvement. Your killing me Mike'
return statement(speech_text).simple_card('YearTotal', speech_text)
@ask.intent('CheckBalancethree')
def chk_bal():
speech_text = 'I am not sure if you want to know your balance, but you have 5 dollars and 37 cents in your account,'
return statement(speech_text).simple_card('CheckBalance', speech_text)
@ask.intent('MostRecentthree')
def most_recent():
speech_text = 'You spent 50 dollars on garlic bread maybe you need to rethink your life choices'
return statement(speech_text).simple_card('MostRecent', speech_text)
############################
@ask.intent('Unhandled')
def unhandled():
unhandled_response="Sorry, I did not understand that command. Say help for assitance"
return question().reprompt(unhandled_response)
@ask.intent('HelpFunc')
def help_func():
helplist="You are able to ask for most recent transaction, check your balance, spending stats for two weeks, and weekly total spending"
return question(helplist).simple_card('HelpFunc', helplist)
@ask.intent('AMAZON.HelpIntent')
def help():
unhandled_response="Sorry, I did not understand that command. Say help for assitance."
return question().reprompt(unhandled_response)
@ask.session_ended
def session_ended():
return "{}", 200
if __name__ == '__main__':
if 'ASK_VERIFY_REQUESTS' in os.environ:
verify = str(os.environ.get('ASK_VERIFY_REQUESTS', '')).lower()
if verify == 'false':
app.config['ASK_VERIFY_REQUESTS'] = False
app.run(debug=True)
# how much did i spend last week
# how much did i spend last week compared to the entire
| Interplay/HoyaHacks-18 | main.py | main.py | py | 6,607 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.system",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_ask.Ask",
"line_number": 12,
... |
28075967300 | import numpy as np
import matplotlib
matplotlib.use("Qt5Agg")
print("BACKEND: ", matplotlib.get_backend())
from matplotlib import pyplot as plt
import utility as ut
import network as nt
from tqdm import tqdm as tqdm
import plot as pt
delta_T = 1e-3
# bars
spiking_input = False
dim = 8
n_outputs = 2*dim
n_inputs = dim*dim
r_net = 2 # 0.5
m_k = 1.0/n_outputs
X = ut.generate_bars(10000, dim, dim, p=1.7/8.0)
X = np.reshape(X, (-1, dim*dim))
if spiking_input:
X = X * 70.0 + 20.0
X_spikes = ut.generate_spike_trains(X, 1000, delta_T=delta_T)
else:
X_spikes = ut.generate_constant_trains(X, 1000, delta_T=delta_T)
"""
# visualize spike trains
test_spikes = list(X_spikes)[0]
pt.plot_spiketrain(test_spikes, delta_T, tmax=2)
plt.show()
"""
net = nt.BinaryWTANetwork(n_inputs=n_inputs, n_outputs=n_outputs,
delta_T=delta_T, r_net=r_net, m_k=m_k, eta_v=1e2, eta_b=1e5)
# train
from plot import WeightPCAPlotter, WeightPlotter
pca_plotter = WeightPCAPlotter(X, np.zeros(X.shape[0]), n_outputs, [0, 0], annotations=True)
weights_plotter = WeightPlotter(ut.sigmoid(net._V).reshape((-1, dim, dim)))
from collections import deque
average_length_likelihood = 500
pbar = tqdm(enumerate(X_spikes))
for batch_index, sample_batch in pbar:
# update figure here
log_likelihoods = deque([])
for sample in sample_batch:
net.step(sample)
# log likelihood
Ak = np.sum(np.log(1+np.exp(net._V)), -1)
pi = ut.sigmoid(net._V)
log_likelihoods.append(np.log(1.0/n_outputs) + np.log(np.sum(np.prod(sample * pi + (1-sample) * (1-pi), axis=-1))))
if len(log_likelihoods) > average_length_likelihood:
log_likelihoods.popleft()
weights = ut.sigmoid(net._V)
pca_plotter.update(weights)
weights_plotter.update(weights)
pbar.set_description(f'<sigma(V)> = {np.mean(weights):.4f}, <b> = {np.mean(net._b):.4f}, <L(y)> = {np.mean(log_likelihoods)}')
| zimmerrol/spiking-bayesian-networks | bars_binary.py | bars_binary.py | py | 1,960 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "matplotlib.get_backend",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "utility.generate_bars",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.resha... |
70267336189 |
import config
from epyk.core.Page import Report
# Create a basic report object
page = Report()
page.ui.text("#This is a text", options={"markdown": True})
page.ui.button("This is a test").click([
page.js.alert("test")
])
page.outs.publish(server="node", app_path=config.OUTPUT_PATHS_LOCALS_TS, module=config.OUT_FILENAME) | epykure/epyk-templates | web/app_nodejs.py | app_nodejs.py | py | 328 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "epyk.core.Page.Report",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "config.OUTPUT_PATHS_LOCALS_TS",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "config.OUT_FILENAME",
"line_number": 14,
"usage_type": "attribute"
}
] |
13389188618 | from __future__ import print_function
import torch
import torchvision
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
class Vgg16c(torch.nn.Module):
def __init__(self):
super(Vgg16c, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features
modified_pretrained = nn.Sequential(*list(vgg_pretrained_features.children())[:-1])
for param in modified_pretrained.parameters():
param.requires_grad = False
self.features = modified_pretrained
self.up = nn.PixelShuffle(2)
self.filter1a = NL_Conv3(ksize=3, in_ch=8, out_ch=8)
self.filter1b = NL_Conv3(ksize=3, in_ch=16, out_ch=4)
self.filter1c = NL_Conv3(ksize=3, in_ch=20, out_ch=16)
self.filter3a = NL_Conv3N(ksize=3, in_ch=8, out_ch=8)
self.filter3b = NL_Conv3N(ksize=3, in_ch=16, out_ch=4)
self.filter3c = NL_Conv3N(ksize=3, in_ch=20, out_ch=16)
self.sk1 = nn.Conv2d(512, 8, 1)
self.sk2 = nn.Conv2d(8, 512, 1)
self.classifier2 = nn.Conv2d(512, 8, 1)
self.skclassifier2 = nn.Conv2d(8, 256, 1)
self.classifier3 = nn.Conv2d(256, 8, 1)
self.skclassifier3 = nn.Conv2d(8, 128, 1)
self.classifier4 = nn.Conv2d(128, 8, 1)
self.skclassifier4 = nn.Conv2d(8, 64, 1)
self.classifier5 = nn.Conv2d(64, 1, 1, 1)
self.c1 = nn.Sequential(*list(vgg_pretrained_features.children())[:-8])
self.c2 = nn.Sequential(*list(vgg_pretrained_features.children())[:-15])
self.c3 = nn.Sequential(*list(vgg_pretrained_features.children())[:-22])
self.c4 = nn.Sequential(*list(vgg_pretrained_features.children())[:-27])
def nlcn(self,x):
x1 = self.filter1a(x) # 8
x1t = torch.cat((x, x1), dim=1) # 16
x1 = self.filter1b(x1t) # 4
x1t = torch.cat((x1t, x1), dim=1) # 20
x1 = self.filter1c(x1t) # 16
# x1t = torch.cat((x1t,x1),dim=1) # 16
# x1 = self.up(x1t) #4
x2 = self.filter3a(x) # 8
x2t = torch.cat((x, x2), dim=1) # 16
x2 = self.filter3b(x2t) # 4
x2t = torch.cat((x2t, x2), dim=1) # 20
x2 = self.filter3c(x2t) # 16
# x2t = torch.cat((x2t,x2),dim=1) # 16
# x2 = self.up(x2t) #4
x = torch.cat((x1, x2), dim=1) # 32
x = self.up(x) # 8
return x
def forward(self, x):
xc1 = self.c1(x)
xc2 = self.c2(x)
xc3 = self.c3(x)
xc4 = self.c4(x)
# print('xc1:',xc1.shape)
# print('xc2:',xc2.shape)
# print('xc3:',xc3.shape)
# print('xc4:',xc4.shape)
# print("........")
# print('Input:',x.shape)
x = self.features(x)
# print('Features:',x.shape)
x = self.sk1(x)
# print('after sk1:',x.shape)
x = self.nlcn(x)
# print('after nlcn:',x.shape)
x = self.sk2(x)
# print('after sk2:',x.shape)
# x = self.classifier1(x)
x = self.classifier2(x + xc1)
x = self.nlcn(x)
# print('after classifier2(xc1 added) and nlcn:',x.shape)
x = self.skclassifier2(x)
# print('after skclassifier2:',x.shape)
x = self.classifier3(x + xc2)
x = self.nlcn(x)
# print('after classifier3(xc2 added) and nlcn:',x.shape)
x = self.skclassifier3(x)
# print('after skclassifier3:',x.shape)
x = self.classifier4(x + xc3)
x = self.nlcn(x)
# print('after classifier4(xc3 added) and nlcn:',x.shape)
x = self.skclassifier4(x)
# print('after skclassifier4:',x.shape)
x = self.classifier5(x + xc4)
# print('after classifier5(xc4 added) :',x.shape)
# return x1+x2
return x
class NL_Conv3(nn.Module):
"""NON LInear Convolution Layer"""
def __init__(self,ksize,in_ch,out_ch):
super(NL_Conv3, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch,ksize*ksize*in_ch*out_ch , kernel_size=ksize, padding=ksize//2, bias=False),
nn.ReLU()
)#ksize*ksize*out_ch*in_ch
self.ksize= ksize
self.in_ch= in_ch
self.out_ch= out_ch
self.por= ksize*ksize*in_ch
def forward(self, x):
dims=x.shape
xc=torch.clone(x) # Initialize xc as several copy of x
for i in range(self.ksize*self.ksize-1):
xc=torch.cat((xc,x),dim=1)
ind=0
for i in range(-(self.ksize//2),self.ksize//2+1):
for j in range(-(self.ksize//2),self.ksize//2+1):
# tmp=x.roll(i,-1).roll(j,-2).view(dims[0],1,dims[2],dims[3])
# xc[:,ind,:,:]=tmp[:,0,:,:]
xc[:,ind*self.in_ch:(ind+1)*self.in_ch,:,:]=\
x.roll(i,-1).roll(j,-2).view(dims[0],self.in_ch,dims[2],dims[3])\
[:,0:self.in_ch,:,:]
ind=ind+1
w=self.conv(x)+.0001
# out=torch.clone(xc).narrow(1,0,self.out_ch)
out=torch.empty(dims[0],self.out_ch,dims[2],dims[3]).to(xc.device)
for i in range(self.out_ch):
w_por=w[:,i*self.por:(i+1)*self.por,:,:]
w_sum=torch.sum(w_por,dim=1).view(-1,1,dims[2],dims[3])
w_norm=w_por/w_sum # normalization along Dim=1
xp=w_norm*xc
x1=torch.sum(xp,dim=1).view(-1,1,dims[2],dims[3])
out[:,i:i+1,:,:]=x1.view(-1,1,dims[2],dims[3])
return out
class NL_Conv3N(nn.Module):
"""NON LInear Convolution Layer"""
def __init__(self,ksize,in_ch,out_ch):
super(NL_Conv3N, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch,ksize*ksize*out_ch*in_ch , kernel_size=ksize, padding=ksize//2, bias=False)
# nn.Hardtanh()
)#ksize*ksize*out_ch*in_ch
self.ksize = ksize
self.in_ch = in_ch
self.out_ch = out_ch
self.por = ksize*ksize*in_ch
def forward(self, x):
dims=x.shape
xc=torch.clone(x) # Initialize xc as several copy of x
for i in range(self.ksize*self.ksize-1):
xc=torch.cat((xc,x),dim=1)
ind=0
for i in range(-(self.ksize//2),self.ksize//2+1):
for j in range(-(self.ksize//2),self.ksize//2+1):
xc[:,ind*self.in_ch:(ind+1)*self.in_ch,:,:]=\
x.roll(i,-1).roll(j,-2).view(dims[0],self.in_ch,dims[2],dims[3])\
[:,0:self.in_ch,:,:]
ind=ind+1
w=self.conv(x)
w=torch.sign(w)*(torch.abs(w)+.0001)
out=torch.empty(dims[0],self.out_ch,dims[2],dims[3]).to(xc.device)
for i in range(self.out_ch):
w_por=w[:,i*self.por:(i+1)*self.por,:,:]
w_sum=torch.sum(torch.abs(w_por),dim=1).view(-1,1,dims[2],dims[3])
w_norm=w_por/w_sum # normalization along Dim=1
xp=w_norm*xc
x1=torch.sum(xp,dim=1).view(-1,1,dims[2],dims[3])
out[:,i:i+1,:,:]=x1.view(-1,1,dims[2],dims[3])
return out
| jhilikb/NLBM | model/vgg_nlbm_cuhk.py | vgg_nlbm_cuhk.py | py | 7,190 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torchvision.models.vgg16",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.Se... |
74918964666 | import re
from collections import defaultdict
XMIN = -2
def find(rules,current):
if len(current) < 5:
return ""
if current in rules:
return rules[current]
elif len(current) == 5:
return "."
else:
size = len(current)
left=find(rules,current[0:size-1])
right=find(rules,current[size-5:])
rules[current] = left+right
return rules[current]
def read_file(file):
rules = defaultdict(lambda: ".")
rule_prog = re.compile("([.#]+) => ([.#])")
with open(file) as f:
lines = f.readlines()
state = lines[0].split(": ")[1].strip()
for line in lines[2:]:
m = rule_prog.match(line.strip())
rules[m.group(1)] = m.group(2)
return state,rules
def print_state(state):
print(state)
def sum_pots(state):
n = 0
for i,c in enumerate(state):
if c == "#":
n += i + XMIN
return n
def day12(file):
global XMIN
state,rules = read_file(file)
XMAX = len(state)+1
state = "..%s.." % state
sums = list()
i = 0
while len(sums) < 3 or sums[-1]-sums[-2] != sums[-2]-sums[-3]:
state = find(rules,"..%s.." % state)
if state[0] == "." and state[1] == "." and state[2] == "." and state[3] == ".":
state = state[2:]
XMIN += 2
if state[0] == "#" or state[1] == "#":
state = "..%s" % state
XMIN -= 2
if state[-1] == "#" or state[-2] == "#":
state = "%s.." % state
sums.append(sum_pots(state))
i += 1
diff = sums[-1]-sums[-2]
missing = 50000000000 - i
n = missing*diff + sums[-1]
return n
| aarroyoc/advent-of-code-2018 | python/day12/day12_2.py | day12_2.py | py | 1,670 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 22,
"usage_type": "call"
}
] |
7074661101 | import pandas as pd
import pandas_datareader as web
import matplotlib.pyplot as plt
import datetime as dt
start = dt.datetime(2021,1,1)
end = dt.datetime.now()
ticker_symbol = input('Enter the stock ticker which you wish to analyse: ')
data = web.DataReader(ticker_symbol, 'yahoo', start, end)
#print(data)
delta = data['Adj Close'].diff(1) #Calculate difference to the day before that
delta.dropna(inplace = True) # Keep the DataFrame with valid entries in the same variable.
positive = delta.copy()
negative = delta.copy()
positive[positive < 0] = 0
negative[negative > 0] = 0
days = 14 # Standard, but can be lowered to increase sensitivity or raised to decrease sensitivity.
average_gain = positive.rolling(window = days).mean()
average_loss = abs(negative.rolling(window = days).mean())
relative_strength = average_gain/average_loss
RSI = 100.0 - (100.0 / (1.0 + relative_strength)) # Formula
combined = pd.DataFrame()
combined['Adj Close'] = data['Adj Close']
combined['RSI'] = RSI
plt.figure(figsize=(12,8))
ax1 = plt.subplot(211) # subplot(nrows, ncols, plot_number) hence nrows=2, ncols=1, plot_number=1
ax1.plot(combined.index, combined['Adj Close'], color = 'lightgray')
ax1.set_title("{} Adjusted Close Price".format(ticker_symbol), color = 'white')
ax1.grid(True, color = "#555555")
ax1.set_axisbelow(True)
ax1.set_facecolor('black')
ax1.figure.set_facecolor('#121212')
ax1.tick_params(axis = 'x', colors = 'white')
ax1.tick_params(axis = 'y', colors = 'white')
# RSI Values of 70 or above indicate an overbought or overvalued condition.
# RSI Values of 30 or below indicates an oversold or undervalued condition.
ax2 = plt.subplot(212, sharex = ax1) # Share same x axis.
ax2.plot(combined.index, combined['RSI'], color = 'lightgray')
ax2.axhline(0, linestyle='--',alpha=0.5, color = '#ff0000')
ax2.axhline(10, linestyle='--',alpha=0.5, color = '#ffaa00')
ax2.axhline(20, linestyle='--',alpha=0.5, color = '#00ff00')
ax2.axhline(30, linestyle='--',alpha=0.5, color = '#cccccc')
ax2.axhline(70, linestyle='--',alpha=0.5, color = '#cccccc')
ax2.axhline(80, linestyle='--',alpha=0.5, color = '#00ff00')
ax2.axhline(90, linestyle='--',alpha=0.5, color = '#ffaa00')
ax2.axhline(100, linestyle='--',alpha=0.5, color = '#ff0000')
ax2.set_title('{} RSI Value'.format(ticker_symbol), color = 'white')
ax2.grid(False)
ax2.set_axisbelow(True)
ax2.set_facecolor('black')
ax2.tick_params(axis = 'x', colors = 'white')
ax2.tick_params(axis = 'y', colors = 'white')
plt.show() | amanpanditap/Python_Projects | finance_python/technical_stock_analysis/technical_stock_analysis.py | technical_stock_analysis.py | py | 2,507 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pandas_da... |
5510824333 | from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
import cv2
import numpy as np
from detectron2 import model_zoo
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg.MODEL.WEIGHTS = 'weights/model_segmentation.pth' # path to the model we just trained
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.95 # set a custom testing threshold
predictor = DefaultPredictor(cfg)
def get_segment_crop(img, tol=0, mask=None):
if mask is None:
mask = img > tol
return img[np.ix_(mask.any(1), mask.any(0))]
def segment_single_images(image, save_img=False):
error_ims = []
segmen_info = []
# image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
output_predictor = predictor(image)
if output_predictor['instances'].pred_masks.shape[0] > 1:
mask_check = output_predictor['instances'].pred_masks.cpu().numpy()
masks = output_predictor['instances'].pred_masks.cpu().numpy()
mask_binary = masks[np.argmax(np.sum(masks, axis=(1, 2))) ,:,:]
else:
mask_binary = np.squeeze(output_predictor['instances'].pred_masks.permute(1, 2, 0).cpu().numpy())
try:
crop_mask = get_segment_crop(img = image, mask = mask_binary)
except ValueError:
print("error")
origin_mask = cv2.cvtColor(np.float32(mask_binary) * 255.0, cv2.COLOR_GRAY2RGB)
for j in range(image.shape[2]):
image[:,:,j] = image[:,:,j] * origin_mask[:,:,j] * 255
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
| hoainv99/mc-ocr | modules/image_segmentation/predict.py | predict.py | py | 1,630 | python | en | code | 26 | github-code | 6 | [
{
"api_name": "detectron2.config.get_cfg",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "detectron2.model_zoo.get_config_file",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "detectron2.model_zoo",
"line_number": 7,
"usage_type": "name"
},
{
"... |
18886739040 | import re
import ast
from tkinter import Tk, Button, Text, Scrollbar, END
from pathlib import Path
from retroperm.project import RetropermProject
from retroperm.rules import Rule
from retroperm.rules.filesystem_rule import FilesystemRule
from retroperm.rules.ban_library_function_rule import BanLibraryFunctionRule
from retroperm.rules.ban_category_rule import BanCategoryRule
# TEST_BINARIES = Path("test_binaries")
TEST_BINARIES = Path(__file__).parent.parent / "tests" / "executables"
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class VstTester:
def __init__(self):
self.retro_proj_clean = RetropermProject(TEST_BINARIES / "GoodArpeggiator.so.o")
self.retro_proj_mal = RetropermProject(TEST_BINARIES / "BadArpeggiator.so.o")
def iterprint(self, header: str, payload: dict):
result = bcolors.HEADER + header + bcolors.ENDC + "\n"
for key, v in payload.items():
if v.startswith("Failed"):
result += f'{bcolors.WARNING}{key}: {v}{bcolors.ENDC}\n'
else:
result += f'{bcolors.OKGREEN}{key}: {v}{bcolors.ENDC}\n'
return result
def eval_flow(self, proj: RetropermProject, header: str):
ban_filesystem = BanCategoryRule('filesystem')
ban_network = BanCategoryRule('network')
my_rule_good = FilesystemRule("/home/mahaloz/.global.bsconf", 'filename', is_whitelist=True, is_dir=False)
my_rule_bad = FilesystemRule("/etc/passwd", 'filename', is_whitelist=False, is_dir=False)
rule_list = [ban_filesystem, ban_network, my_rule_good, my_rule_bad]
proj.init_rules(rule_list, override_default=True)
output = proj.validate_rules()
result = self.iterprint(header, output)
result += "\n"
if output[ban_filesystem].startswith("Failed"):
resolved_data = self.retro_proj_mal.resolve_abusable_functions()
rfo = resolved_data['resolved_function_data']
match_list = ast.literal_eval(re.findall(r'\[.*\]', output[my_rule_bad])[0])
for match in match_list:
if match not in rfo:
continue
match_rfo = rfo[match]
vals = list(match_rfo.args_by_location.values())
result += f'{bcolors.OKCYAN}{str(vals)}{bcolors.ENDC}\n'
return result
def run_test(self):
resolved_data_clean = self.retro_proj_clean.resolve_abusable_functions()
resolved_data_mal = self.retro_proj_mal.resolve_abusable_functions()
results = []
results.append(self.eval_flow(self.retro_proj_clean, '`CleanVST` Rule Validation'))
results.append(self.eval_flow(self.retro_proj_mal, '`MalVST` Rule Validation'))
return "\n".join(results)
def run_test():
tester = VstTester()
result_text.delete(1.0, END)
result_text.insert(END, tester.run_test())
root = Tk()
root.title("VST Tester")
test_button = Button(root, text="Run Test", command=run_test)
test_button.pack()
result_text = Text(root, wrap="word", bg="white", fg="black")
result_text.pack(expand=True, fill="both")
scrollbar = Scrollbar(root, command=result_text.yview)
scrollbar.pack(side="right", fill="y")
result_text.config(yscrollcommand=scrollbar.set)
root.geometry("800x600")
root.mainloop()
| SpiritSeal/retroperm | ui/gui2.py | gui2.py | py | 3,484 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "retroperm.project.RetropermProject",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "retroperm.project.RetropermProject",
"line_number": 29,
"usage_type": "call"
},
{
... |
39508101385 | import numpy as np
import matplotlib.pyplot as plt
baseline = np.loadtxt('sub-AD4009_ses-baseline_acq-AP_date-2011-07-07_trc-av45_pet.csv', delimiter=',')
followup = np.loadtxt('sub-AD4009_ses-followup_acq-AP_date-2013-07-03_trc-av45_pet.csv', delimiter=',')
prediction = followup + np.random.normal(0, .025, size=followup.shape)
plt.figure(figsize=(20,10))
#plt.plot(baseline, '-', marker='o', c='#390099', label='baseline', linewidth=1)
plt.plot(followup, '-', marker='o', c='#00A6FB', linewidth=1)
plt.plot(prediction, '-', marker='o', c='#FF0054', linewidth=6, alpha = 0.2)
#plt.legend(bbox_to_anchor=(0, 1), loc='upper left', fontsize=22)
#plt.ylabel('Regional Error', fontsize=18)
#plt.yticks(fontsize=14)
plt.xlim(-1, len(baseline))
plt.tick_params(
axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False)
#plt.xticks(np.arange(len(baseline), step=2), fontsize=12, rotation=40)
#plt.xlabel('ROI (166 AAL3 regions)', fontsize=18)
plt.grid(True)
plt.tight_layout()
plt.savefig('followup.png')
#plt.savefig('baseline.png') | SanoScience/MP-spreading-prediction | pictures/Graphical_abstract/plot.py | plot.py | py | 1,090 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "numpy.loadtxt",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line... |
31490583356 | from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.shortcuts import render, redirect
from django.http import JsonResponse
import PathFinder.PathFinderModels.pathfinder_chat_bot as qamodel
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from .forms import EditProfileForm, UpdateProfile
from .models import Notes
from .forms import NotesForm, ContactForm
from django.contrib.auth.models import User
from .forms import ContactForm
from django.conf import settings
from django.core.mail import EmailMessage
import os
import json
# from PathFinder.PathFinderApp.forms import RegisterUserForm, ChatBotForm
# from django.contrib.auth.forms import UserCreationForm
# from django.core.exceptions import ObjectDoesNotExist
# from django.contrib.auth.decorators import login_required
# import smtplib
# from django.core.mail import send_mail
# import pickle
# import warnings
# import openai
import pinecone
from django.urls import reverse_lazy
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.views import PasswordChangeView
api_key = os.environ.get("OPENAI_API_KEY")
# warnings.filterwarnings("ignore")
pinecone.init(
api_key="5bf2927b-0fb7-423b-b8f1-2f6a3347a15d", environment="asia-northeast1-gcp"
)
vectorstore = Pinecone.from_existing_index("teamprojindex", OpenAIEmbeddings())
pathfinder_chatbot = qamodel.make_chain(vectorstore)
def render_chatbotview(request):
return render(request, "chatwindow.html")
def send_chat_response(request):
pathfinder_response = ""
if request.method == "POST":
json_user_input = json.loads(request.body)
user_message = json_user_input["user_message"]
if user_message is not None:
response = pathfinder_chatbot(
{
"question": user_message,
# [("Q","A")]
"chat_history": [("", "")],
}
) # query the chatbot
# print(user_message)
pathfinder_response = response["answer"]
# context = {'pathfinder_response': pathfinder_response}
# jsondata = json.dumps(jsonresp)
# reverse('/chatbox/')
return JsonResponse({"pathfinder_response": pathfinder_response})
# return JsonResponse({'pathfinder_response': pathfinder_response})
# return render(request, 'chatwindow.html', {'pathfinder_response': pathfinder_response, 'pathfinder_api_url': reverse('chatbot')})
def index(request):
return render(request, "index.html")
def gdpr(request):
return render(request, "gdpr.html")
def about(request):
return render(request, "about.html")
### For the Games Page ###
def games(request):
return render(request, "games.html")
def brickbreaker(request):
return render(request, "brickbreaker.html")
def remembergame(request):
return render(request, "remem.html")
def rockps(request):
return render(request, "rockps.html")
def tictakpro(request):
return render(request, "tictakpro.html")
### For the Notemaker Page ###
def noteindex(request):
notes = Notes.objects.all()
return render(request, "noteindex.html", {"notes": notes})
def new_note(request):
form = NotesForm()
if request.method == "POST":
form = NotesForm(request.POST)
if form.is_valid():
form.save()
return redirect("noteindex")
return render(request, "noteupdate.html", {"form": form})
def note_detail(request, pk):
note = Notes.objects.get(id=pk)
form = NotesForm(instance=note)
if request.method == "POST":
form = NotesForm(request.POST, instance=note)
if form.is_valid():
form.save()
return redirect("noteindex")
return render(request, "noteupdate.html", {"note": note, "form": form})
def delete_note(request, pk):
note = Notes.objects.get(id=pk)
form = NotesForm(instance=note)
if request.method == "POST":
note.delete()
messages.info(request, "The note has been deleted")
return redirect("noteindex")
return render(request, "notedelete.html", {"note": note, "form": form})
def search_page(request):
if request.method == "POST":
search_text = request.POST["search"]
notes = Notes.objects.filter(
heading__icontains=search_text
) | Notes.objects.filter(text__icontains=search_text)
# if notes is None:
# messages.info(request, "Note not found")
return render(request, "notesearch.html", {"notes": notes})
# m = openai.Model.list()
# print([m['id'] for m in m['data'] if m['id'].startswith('gpt')])
# models= list(openai.Model.list().values())[1]
# print(models)
# print(list(filter(lambda x: re.match('*gpt*', x) , models)))
# *ASIDE: Function for token counting queries.
# def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301"):
# try:
# encoding = tiktoken.encoding_for_model(model)
# except KeyError:
# encoding = tiktoken.get_encoding("cl100k_base")
# if model == "gpt-3.5-turbo-0301": # note: future models may deviate from this
# num_tokens = 0
# for message in messages:
# num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
# for key, value in message.items():
# num_tokens += len(encoding.encode(value))
# if key == "name": # if there's a name, the role is omitted
# num_tokens += -1 # role is always required and always 1 token
# num_tokens += 2 # every reply is primed with <im_start>assistant
# return num_tokens
# else:
# raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
# See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
# def login_user(request):
# if request.method == "POST":
# username = request.POST['username']
# password = request.POST['password']
# user = authenticate(request, username=username, password=password)
# if user is not None:
# login(request, user)
# return redirect('index')
# else:
# messages.success(
# request, ("There was an error logging in, try again..."))
# return redirect('login')
# else:
# return render(request, 'login.html', {})
# def logout_user(request):
# logout(request)
# messages.success(request, ("You have successfully logged out."))
# return redirect('index')
# def register_user(request):
# if request.method == "POST":
# form = RegisterUserForm(request.POST)
# if form.is_valid():
# form.save()
# username = form.cleaned_data['username']
# password = form.cleaned_data['password1']
# user = authenticate(username=username, password=password)
# login(request, user)
# messages.success(request, ("Account successfuly created!"))
# return redirect('index')
# else:
# form = RegisterUserForm()
# return render(request, 'signup.html', {'form': form, })
### Google sign in ###
# def logout_view(request):
# logout(request)
# return redirect("index")
# def signup_redirect(request):
# messages.error(
# request, "Something wrong here, it may be that you already have account!")
# return redirect("index")
# from django.contrib.auth import authenticate, login
# from django.shortcuts import render, redirect
# def login_view(request):
# # Handle user login
# if request.method == 'POST':
# username = request.POST['username']
# password = request.POST['password']
# user = authenticate(request, username=username, password=password)
# if user is not None:
# login(request, user)
# print(user.username)
# request.session['username'] = user.username
# return redirect('home')
# else:
# # Handle login failure
# pass
# else:
# # Display login page
# return render(request, 'bloglogin.html')
def edit_profile(request):
if not request.user.is_authenticated:
return redirect("/login")
if request.method == "POST":
user_form = EditProfileForm(request.POST, instance=request.user)
profile_form = UpdateProfile(request.POST, request.FILES, instance=request.user)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, "Your profile is updated successfully")
return redirect("/profile")
else:
user_form = EditProfileForm(instance=request.user)
profile_form = UpdateProfile(instance=request.user)
if not request.user.is_authenticated:
return redirect("/login")
else:
return render(
request, "edit.html", {"user_form": user_form, "profile_form": profile_form}
)
# def profile(request):
# context = {}
# return render(request, 'profile.html', context)
def contact(request):
if request.method == "POST":
form = ContactForm(request.POST)
if form.is_valid():
name = form.cleaned_data["name"]
email = form.cleaned_data["email"]
message = form.cleaned_data["message"]
email_subject = "New Contact Form Submission"
email_body = f"Name: {name}\nEmail: {email}\nMessage: {message}"
email = EmailMessage(
email_subject,
email_body,
settings.DEFAULT_FROM_EMAIL,
[settings.CONTACT_EMAIL],
reply_to=[email],
)
email.from_email = email
email.send(fail_silently=False)
return render(request, "thanks.html")
else:
form = ContactForm()
return render(request, "form.html", {"form": form})
# For Blog
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from .models import Profile, Meep
from .forms import MeepForm, SignUpForm, ProfilePicForm, EditProfileForm
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.contrib.auth.models import User
def bloghome(request):
if request.user.is_authenticated:
form = MeepForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
meep = form.save(commit=False)
meep.user = request.user
meep.save()
messages.success(request, ("Your Discovery Has Been Posted!"))
return redirect("bloghome")
meeps = Meep.objects.all().order_by("-created_at")
return render(request, "home.html", {"meeps": meeps, "form": form})
else:
meeps = Meep.objects.all().order_by("-created_at")
return render(request, "home.html", {"meeps": meeps})
def profilelist(request):
if request.user.is_authenticated:
profiles = Profile.objects.exclude(user=request.user)
return render(request, "profile_list.html", {"profiles": profiles})
else:
messages.success(request, ("You Must Be Logged In To View This Page..."))
return redirect("bloghome")
def profile(request, pk):
if request.user.is_authenticated:
profile = Profile.objects.get(user_id=pk)
meeps = Meep.objects.filter(user_id=pk).order_by("-created_at")
# Post Form logic
if request.method == "POST":
# Get current user
current_user_profile = request.user.profile
# Get form data
action = request.POST["follow"]
# Decide to follow or unfollow
if action == "unfollow":
current_user_profile.follows.remove(profile)
elif action == "follow":
current_user_profile.follows.add(profile)
# Save the profile
current_user_profile.save()
return render(request, "profile.html", {"profile": profile, "meeps": meeps})
else:
messages.success(request, ("You Must Be Logged In To View This Page..."))
return redirect("bloghome")
def login_user(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
messages.success(request, ("You Have Been Logged In!"))
return redirect("index")
else:
messages.success(
request, ("There was an error logging in. Please Try Again...")
)
return redirect("login")
else:
return render(request, "login.html", {})
# def logout_user(request):
# return render(request, 'profile_list.html')
def logout_user(request):
logout(request)
messages.success(request, ("You Have Been Logged Out. Till we meet again..."))
return redirect("index")
def register_user(request):
form = SignUpForm()
if request.method == "POST":
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data["username"]
password = form.cleaned_data["password1"]
# first_name = form.cleaned_data['first_name']
# second_name = form.cleaned_data['second_name']
# email = form.cleaned_data['email']
# Log in user
user = authenticate(username=username, password=password)
login(request, user)
messages.success(request, ("You have successfully registered! Welcome!"))
return redirect("index")
return render(request, "signup.html", {"form": form})
def update_user(request):
if request.user.is_authenticated:
current_user = User.objects.get(id=request.user.id)
profile_user = Profile.objects.get(user__id=request.user.id)
# Get Forms
user_form = EditProfileForm(
request.POST or None, request.FILES or None, instance=current_user
)
profile_form = ProfilePicForm(
request.POST or None, request.FILES or None, instance=profile_user
)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
# login(request, current_user)
messages.success(request, ("Your Profile Has Been Updated!"))
return redirect("bloghome")
return render(
request,
"update_user.html",
{"user_form": user_form, "profile_form": profile_form},
)
else:
messages.success(request, ("You Must Be Logged In To View That Page..."))
return redirect("bloghome")
def meep_like(request, pk):
if request.user.is_authenticated:
meep = get_object_or_404(Meep, id=pk)
if meep.likes.filter(id=request.user.id):
meep.likes.remove(request.user)
else:
meep.likes.add(request.user)
return redirect(request.META.get("HTTP_REFERER"))
else:
messages.success(request, ("You Must Be Logged In To View That Page..."))
return redirect("bloghome")
def meep_show(request, pk):
meep = get_object_or_404(Meep, id=pk)
if meep:
return render(request, "show_meep.html", {"meep": meep})
else:
messages.success(request, ("That Post Does Not Exist..."))
return redirect("bloghome")
def my_view(request):
# Get the current user
user = request.user
# Get the user's first name
first_name = user.first_name
# Add the first name to the context dictionary
context = {"first_name": first_name}
return render(request, "my_template.html", context)
class ChangePasswordView(SuccessMessageMixin, PasswordChangeView):
template_name = "password.html"
success_message = "Successfully changed your pasword"
success_url = reverse_lazy("profile")
| Susa0823/PathFinderProject | PathFinder/PathFinderApp/views.py | views.py | py | 16,953 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pinecone.init",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "langchain.vectorstores.P... |
5683288254 | import torch.nn as nn
from torch.nn.parameter import Parameter
import torch
import torch.nn.functional as F
class DNN(nn.Module):
def __init__(self, n_input, n_hidden, n_output, real):
super(DNN, self).__init__()
self.loss = 0
self.hidden1 = nn.Linear(n_input, n_hidden, True)
self.hidden2 = nn.Linear(n_hidden, n_output, True)
self.o = nn.Linear(n_output, real, True)
self.sig = nn.Sigmoid()
def forward(self, x):
x = self.hidden1(x)
x = self.hidden2(x)
x = self.o(x)
x = self.sig(x)
return x.squeeze(1)
| asd1354403003/NON | DNN.py | DNN.py | py | 637 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
23978857817 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def compute_dct_coeffs(blockSize):
T = np.zeros((blockSize, blockSize))
T[0, :] = np.sqrt(1.0/blockSize)
for i in range(1, blockSize):
for j in range(blockSize):
T[i][j] = np.sqrt(2.0/blockSize)*np.cos(np.pi*(2.0*j+1.0)*i/(2.0*blockSize))
return T
def viewing_dct_matrix(dct_matrix, out_dir):
fig, ax = plt.subplots()
ax.matshow(dct_matrix, cmap='viridis')
for (i, j), z in np.ndenumerate(dct_matrix):
if z < -0.35: # for better visualization when the colour is dark
ax.text(j, i, np.round(z,2), ha='center', va='center', color='white')
else:
ax.text(j, i, np.round(z,2), ha='center', va='center', color='black')
plt.title("The 64 DCT coefficients")
plt.savefig(out_dir+"dct_matrix.png")
def viewing_dct_for_a_random_selected_block(yDCT, crDCT, cbDCT, h_luma, w_luma, h_chroma, w_chroma, blockSize, out_dir):
xlabels=[0,1,2,3,4,5,6,7]
ylabels=[0,1,2,3,4,5,6,7]
nbh_luma = np.ceil(h_luma / blockSize)
nbw_luma = np.ceil(w_luma / blockSize)
i = np.random.randint(0, nbh_luma-1)
j = np.random.randint(0, nbw_luma-1)
row_ind_1 = i*blockSize
row_ind_2 = row_ind_1+blockSize
col_ind_1 = j*blockSize
col_ind_2 = col_ind_1+blockSize
fig=plt.figure(figsize=(15,7.5))
fig.suptitle("DCT for randoms selected Y,Cb,Cr blocks")
ax = fig.add_subplot(2,3,1)
plt.title('yDCT')
plt.imshow(yDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2],cmap='jet')
plt.colorbar(shrink=1)
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
ax = fig.add_subplot(234, projection='3d')
x, y = np.meshgrid(np.arange(blockSize), np.arange(blockSize)) # creating a 3D grid from the size of one block
ax.plot_surface(x, y, yDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2], cmap='jet') # drawing of the 3D-surface
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('yDCT Coefficient')
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
nbh_chroma = np.ceil(h_chroma / blockSize)
nbw_chroma = np.ceil(w_chroma / blockSize)
i = np.random.randint(0, nbh_chroma-1)
j = np.random.randint(0, nbw_chroma-1)
row_ind_1 = i*blockSize
row_ind_2 = row_ind_1+blockSize
col_ind_1 = j*blockSize
col_ind_2 = col_ind_1+blockSize
ax = fig.add_subplot(2,3,2)
plt.title('cbDCT')
plt.imshow(cbDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2],cmap='jet')
plt.colorbar(shrink=1)
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
ax = fig.add_subplot(2,3,3)
plt.title('crDCT')
plt.imshow(crDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2],cmap='jet')
plt.colorbar(shrink=1)
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
ax = fig.add_subplot(235, projection='3d')
ax.plot_surface(x, y, cbDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2], cmap='jet')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('cbDCT Coefficient')
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
ax = fig.add_subplot(236, projection='3d')
ax.plot_surface(x, y, crDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2], cmap='jet')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('crDCT Coefficient')
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
plt.savefig(out_dir+"dct_for_a_random_selected_block.png")
| vince-robin/Image-compression | soft/functions/dct.py | dct.py | py | 3,672 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 14,
... |
45342937416 | import torch
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.autograd import Variable
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.input_variational_dropout import InputVariationalDropout
class Char_RNN(nn.Module):
def __init__(self, char_to_index, char_embed_size, hidden_size,output_size,dropout,cuda_flag, batch_first=True):
"""
Args:
char_to_index:
char_embed_size: char embeddings dim
hidden_size: lstm reccurent dim
dropout: dropout probability
batch_first: batch first option
"""
super(Char_RNN, self).__init__()
self.char_to_index = char_to_index
self.char_embed_size = char_embed_size
self.hidden_size = hidden_size
self.dropout = dropout
self.output_size = output_size
self.batch_first = batch_first
self.padding_index = self.char_to_index['__PADDING__']
self.cuda_flag = cuda_flag
self.char_encoder = nn.Embedding(len(self.char_to_index), self.char_embed_size, sparse=True, padding_idx= self.padding_index)
torch.nn.init.xavier_uniform_(self.char_encoder.weight.data)
self.char_rnn = AugmentedLstm(input_size= self.char_embed_size, hidden_size = self.hidden_size,go_forward = True, recurrent_dropout_probability = self.dropout,
use_highway = False, use_input_projection_bias = False)
self.char_rnn.state_linearity.bias.data.fill_(0.0)
self.var_drop = InputVariationalDropout(self.dropout)
self.w_atten = nn.Linear(self.hidden_size,1,bias=False)
self.w_atten.weight.data.fill_(0.0)
self.char_projection = nn.Linear(self.hidden_size*2,self.output_size,bias=True)
self.char_projection.weight.data.fill_(0.0)
self.char_projection.bias.data.fill_(0.0)
self.drp = nn.Dropout(self.dropout)
def forward(self,char_ids,seq_lengths):
tokenIdChars = []
for sent in char_ids:
tokenIdChars.extend([idChars for idChars in sent])
tokenIdChars_set = set(map(tuple,tokenIdChars))
tokenIdChars = list(map(list,tokenIdChars_set))
tokenIdChars.sort(key=lambda x: -len(x))
max_len = len(max(tokenIdChars,key=len))
batch_size = len(tokenIdChars)
char_tensor = torch.zeros(batch_size,max_len).long()
char_tensor.fill_(self.padding_index)
for idx in range(len(tokenIdChars)):
for jdx in range(len(tokenIdChars[idx])):
char_tensor[idx,jdx] = tokenIdChars[idx][jdx]
if self.cuda_flag:
char_tensor = char_tensor.cuda()
char_embed = self.char_encoder(char_tensor)
char_embed = self.var_drop(char_embed)
char_seq_lengths = np.array([len(char) for char in tokenIdChars])
packed_input = pack_padded_sequence(char_embed, char_seq_lengths,batch_first=True)
packed_output, (ht,cell) = self.char_rnn(packed_input, None)
out_rnn, lengths = pad_packed_sequence(packed_output, batch_first=True)
out_rnn = self.var_drop(out_rnn)
w_att = self.w_atten(out_rnn)
if self.cuda_flag:
mask = torch.ones(w_att.size()).cuda()
else:
mask = torch.ones(w_att.size())
for i, l in enumerate(lengths):
if l < out_rnn.size()[1]:
mask[i, l:] = 0
w_att = w_att.masked_fill(mask == 0, -1e9)
#compute and apply attention
attentions = F.softmax(w_att.squeeze(),dim=1)
weighted = torch.mul(out_rnn, attentions.unsqueeze(-1).expand_as(out_rnn))
char_att = weighted.sum(1).squeeze()
char_embs = torch.cat((char_att,cell.squeeze(0)),1)
char_embs = self.drp(char_embs)
proj_char_embs = self.char_projection(char_embs)
RNN_embs = {}
for idx in range(len(tokenIdChars)):
RNN_embs[str(tokenIdChars[idx])] = proj_char_embs[idx,:]
max_seq = torch.max(seq_lengths).cpu().numpy().tolist()
if self.cuda_flag:
char_emb_tensor = Variable(torch.zeros(len(char_ids),max_seq,self.output_size)).cuda()
else:
char_emb_tensor = Variable(torch.zeros(len(char_ids),max_seq,self.output_size))
for idx in range(len(char_ids)):
for jdx in range(len(char_ids[idx])):
char_emb_tensor[idx,jdx,:] = RNN_embs[str(char_ids[idx][jdx])]
return char_emb_tensor
| makyr90/DL_Syntax_Models | Biaffine_parser_PyTorch/char_lstm.py | char_lstm.py | py | 4,586 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
8747023453 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 13:34:12 2019
@author: ADMIN
"""
import AllFunctions as af
import pandas as pd
import numpy as np
import pandas_profiling
#import H2OHandler as hh
df=pd.read_csv('train.csv')
orgCC = df['cc_cons'].copy()
df['isTrain']=True
df2=pd.read_csv('test.csv')
df2['isTrain']=False
df2['cc_cons']=-1
fillCountMinPer = 50
idCols=['id']
distCatPer=2
onehotColumns=[]
pred_variable_type='regression'
target_variable = 'cc_cons'
TrainCleanVars={}
TrainCleanVars['dropCols']=[]
# Account Desc
dfDescT=af.getDFDesc(df)
dfDescT2=af.getDFDesc(df2)
df3=pd.concat([df,df2],ignore_index=True)
dfDescT3=af.getDFDesc(df3)
df=df3.reset_index(drop=True)
dfDescT=af.getDFDesc(df)
#profile = df.profile_report(title='Pandas Profiling Report')
#profile.to_file(output_file="output.html")
#rejected_variables = profile.get_rejected_variables(threshold=0.9)
#age has some unusual values like 224 which are quite invalid hence we will trim all such values to 75
df.loc[df['age'] > 75, 'age'] = 75
#Many amount columns are skewed lets take log and profile the results
cols=['card_lim', 'cc_cons_apr',
'cc_cons_jun', 'cc_cons_may', 'cc_count_apr', 'cc_count_jun',
'cc_count_may', 'credit_amount_apr', 'credit_amount_jun',
'credit_amount_may', 'credit_count_apr', 'credit_count_jun',
'credit_count_may', 'dc_cons_apr', 'dc_cons_jun', 'dc_cons_may',
'dc_count_apr', 'dc_count_jun', 'dc_count_may', 'debit_amount_apr',
'debit_amount_jun', 'debit_amount_may', 'debit_count_apr',
'debit_count_jun', 'debit_count_may', 'emi_active',
'max_credit_amount_apr', 'max_credit_amount_jun',
'max_credit_amount_may']
#for col in cols:
# df[col]=np.log(df[col]+1)
#profile = df.profile_report(title='Pandas Profiling Report after amount log')
#profile.to_file(output_file="output_log.html")
"""import matplotlib.pyplot as plt
plt.matshow(df.corr())
plt.show()
f = plt.figure(figsize=(19, 15))
plt.matshow(df.corr(), fignum=f.number)
plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=45)
plt.yticks(range(df.shape[1]), df.columns, fontsize=14)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
plt.title('Correlation Matrix', fontsize=16)
f.savefig('CorrMatrix.png')
"""
"""columns=['personal_loan_active','personal_loan_closed','vehicle_loan_active', 'vehicle_loan_closed','investment_1', 'investment_2', 'investment_3', 'investment_4']
df[columns]=df[columns].fillna(0)
df['loan_enq']=df['loan_enq'].fillna('N')
dfDescT=af.getDFDesc(df)"""
TrainCleanVars['dropCols'].extend(idCols)
df.drop(columns=idCols,inplace=True)
print("Dropping cols as declared as id cols in config : ",idCols)
#Missing Value Imputation
# Now here many columns have missing values especially debit ones related, we have to fill them using data dictionary
df['cc_cons_highest'] = df[['cc_cons_apr','cc_cons_may','cc_cons_jun']].max(axis=1)
df['cc_cons_lowest'] = df[['cc_cons_apr','cc_cons_may','cc_cons_jun']].min(axis=1)
df['cc_cons_total'] = df[['cc_cons_apr','cc_cons_may','cc_cons_jun']].sum(axis=1)
df['cc_cons_average'] = df[['cc_cons_apr','cc_cons_may','cc_cons_jun']].mean(axis=1)
df['cc_cons_trans_avg']=df['cc_cons_total']/df[['cc_count_apr','cc_count_may','cc_count_jun']].sum(axis=1)
df['cc_cons_high_low_range']=df['cc_cons_highest']-df['cc_cons_lowest']
df['cc_cons_limit_crossed']=df['cc_cons_highest']>df['card_lim']
df['cc_cons_total_lim_ratio']=(df['cc_cons_total']/3)/df['card_lim']
"""df['dc_cons_highest'] = df[['dc_cons_apr','dc_cons_may','dc_cons_jun']].max(axis=1)
df['dc_cons_lowest'] = df[['dc_cons_apr','dc_cons_may','dc_cons_jun']].min(axis=1)
df['dc_cons_total'] = df[['dc_cons_apr','dc_cons_may','dc_cons_jun']].sum(axis=1)
df['dc_cons_average'] = df[['dc_cons_apr','dc_cons_may','dc_cons_jun']].mean(axis=1)
df['dc_cons_trans_avg']=df['dc_cons_total']/df[['dc_count_apr','dc_count_may','dc_count_jun']].sum(axis=1)
df['dc_cons_high_low_range']=df['dc_cons_highest']-df['dc_cons_lowest']
df['debit_amount_highest'] = df[['debit_amount_apr','debit_amount_may','debit_amount_jun']].max(axis=1)
df['debit_amount_lowest'] = df[['debit_amount_apr','debit_amount_may','debit_amount_jun']].min(axis=1)
df['debit_amount_total'] = df[['debit_amount_apr','debit_amount_may','debit_amount_jun']].sum(axis=1)
df['debit_amount_average'] = df[['debit_amount_apr','debit_amount_may','debit_amount_jun']].mean(axis=1)
df['debit_amount_trans_avg']=df['debit_amount_total']/df[['dc_count_apr','dc_count_may','dc_count_jun']].sum(axis=1)
df['debit_amount_high_low_range']=df['debit_amount_highest']-df['debit_amount_lowest']
df['credit_amount_highest'] = df[['credit_amount_apr','credit_amount_may','credit_amount_jun']].max(axis=1)
df['credit_amount_lowest'] = df[['credit_amount_apr','credit_amount_may','credit_amount_jun']].min(axis=1)
df['credit_amount_total'] = df[['credit_amount_apr','credit_amount_may','credit_amount_jun']].sum(axis=1)
df['credit_amount_average'] = df[['credit_amount_apr','credit_amount_may','credit_amount_jun']].mean(axis=1)
df['credit_amount_trans_avg']=df['credit_amount_total']/df[['dc_count_apr','dc_count_may','dc_count_jun']].sum(axis=1)
df['credit_amount_high_low_range']=df['credit_amount_highest']-df['credit_amount_lowest']
df['max_credit_amount_highest'] = df[['max_credit_amount_apr','max_credit_amount_may','max_credit_amount_jun']].max(axis=1)
df['max_credit_amount_lowest'] = df[['max_credit_amount_apr','max_credit_amount_may','max_credit_amount_jun']].min(axis=1)
df['max_credit_amount_total'] = df[['max_credit_amount_apr','max_credit_amount_may','max_credit_amount_jun']].sum(axis=1)
df['max_credit_amount_average'] = df[['max_credit_amount_apr','max_credit_amount_may','max_credit_amount_jun']].mean(axis=1)
df['max_credit_amount_trans_avg']=df['max_credit_amount_total']/df[['dc_count_apr','dc_count_may','dc_count_jun']].sum(axis=1)
df['max_credit_amount_high_low_range']=df['max_credit_amount_highest']-df['max_credit_amount_lowest']
df['cc_dc_cons_ratio'] = df['cc_cons_total'] / df['dc_cons_total']
df['credit_debit_ratio'] = df['credit_amount_total'] / df['debit_amount_total']
df['dc_count_total']=df[['dc_count_apr','dc_count_may','dc_count_jun']].sum(axis=1)
df['cc_count_total']=df[['cc_count_apr','cc_count_may','cc_count_jun']].sum(axis=1)
df['cc_dc_count_ratio']=df['cc_count_total']/df['cc_count_total']"""
df=df.replace([np.inf, -np.inf], np.nan)
dfDescT=af.getDFDesc(df)
#lets drop cols which are less than minimum fillcount for now. We can later revisit them if required
dropFlag=dfDescT[(dfDescT['fillCount']<fillCountMinPer) | (dfDescT['unique']==1) | (dfDescT['std']==0)]
dropCols=list(dropFlag.index)
TrainCleanVars['dropCols'].extend(dropCols)
print("Dropping cols as unique count less or fillcount less or std is zero : ",dropCols)
df.drop(columns=dropCols,inplace=True)
df.to_csv('AfterFeature.csv',index=False)
"""plt.subplot(1, 2, 1)
plt.scatter(df['cc_cons_highest'],df['card_lim'],c="b")
plt.xlabel("highest spend")
plt.ylabel("card_lim")
plt.subplot(1, 2, 2)
plt.scatter(df['cc_cons_lowest'],df['card_lim'],c="r")
plt.xlabel("lowest spend")
plt.ylabel("card_lim")
plt.show()
div_val=10000
sc=plt.scatter(df['cc_cons_lowest']/div_val,df['cc_cons_highest']/div_val,c=df['card_lim']/div_val)
plt.colorbar(sc)
plt.xlabel("lowest spend")
plt.ylabel("highest spend")
plt.show()
plt.scatter(df['cc_cons_highest']/div_val,df['card_lim']/div_val,c=df['cc_cons_limit_crossed'])
plt.xlabel("highest spend")
plt.ylabel("card_lim")
plt.hist(df['card_lim'].dropna())
plt.show()
plt.hist(np.log(df.loc[df['isTrain']==True,'cc_cons']+1))
plt.show()
"""
#df.loc[df['isTrain']==True,'cc_cons']=np.log(df.loc[df['isTrain']==True,'cc_cons']+1)
dfDescT=af.getDFDesc(df)
catFlag=dfDescT[(dfDescT['distCount']<=distCatPer)]
catCols=list(catFlag.index)
df=af.categorizeCols(df,catCols)
catCols=list(set(catCols)-set(onehotColumns))
df=af.LabelEncodeCols(df.copy(),catCols,onehotColumns)
zeroOneCols=df.apply(lambda x: af.ChkZeroOne(x))
standarizeCols=list(zeroOneCols[zeroOneCols==False].index)
#standarizeCols.remove(target_variable)
"""profile = df.profile_report(title='Pandas Profiling Report')
profile.to_file(output_file="outputFeature.html")
rejected_variables = profile.get_rejected_variables(threshold=0.9)
df.drop(columns=rejected_variables,inplace=True)
standarizeCols = list(set(standarizeCols) - set(rejected_variables))
"""
X=df
X_trainVal=X[X['isTrain']==True]
X_test=X[X['isTrain']==False]
X_trainVal.reset_index(inplace=True,drop=True)
X_test.reset_index(inplace=True,drop=True)
X_trainVal.drop(columns=['isTrain'],inplace=True)
X_test.drop(columns=['isTrain'],inplace=True)
X_trainVal,misDict=af.missing_value(X_trainVal)
X_test,_=af.missing_value(X_test,misDict=misDict)
outlierlist=af.getOutliers(X_trainVal)
y_pred_outliers=np.array(outlierlist[0][1])
df_outliers=X_trainVal[y_pred_outliers==1]
dfDescT=af.getDFDesc(df_outliers)
X_trainVal=X_trainVal[y_pred_outliers==0]
dfDescT2=af.getDFDesc(X_trainVal)
X_trainVal,scaler=af.normalize(X_trainVal,standarizeCols)
#standarizeCols.remove(target_variable)
X_test=af.normalize(X_test,standarizeCols,scaler)
X_test.drop(columns=[target_variable],inplace=True)
dfDesc=X_test.describe(include='all')
dfDescT=dfDesc.T
trainVal_frame=X_trainVal
x_cols=list(X_trainVal.columns)
y_col=target_variable
import H2OHandler as hh
print("Start H2O model training")
res,PredDF,predtrain=hh.GetBestH2OModel(trainVal_frame,x_cols,y_col,pred_variable_type == "categorical",X_test)
TrainCleanVars['H2OBestModel']=res.leader
X_test[target_variable]=PredDF['predict']
X_test[standarizeCols]=scaler.inverse_transform(X_test[standarizeCols])
ts=af.GetTimeStamp()
af.PickleWrite(TrainCleanVars,"TrainCleanVars"+str(ts)+".pkl")
X_test[X_test < 0]=0 #Need to fix this
X_test['id']=df2['id']
final_sub=X_test[['id',target_variable]]
final_sub.to_csv('samplesubmission'+str(ts)+'.csv',index=False)
lb=res.leaderboard
lbres=lb[:5,"model_id"]
import h2o
m = h2o.get_model(lb[0,"model_id"])
varimpres=m.varimp(use_pandas=True)
trainVal_frameCopy=trainVal_frame.copy()
trainVal_frameCopy.reset_index(inplace=True,drop=True)
trainVal_frameCopy['cc_cons']=predtrain
trainVal_frameCopy[standarizeCols]=scaler.inverse_transform(trainVal_frameCopy[standarizeCols])
trainVal_frameCopy[trainVal_frameCopy < 0]=0
orgCC=orgCC[y_pred_outliers==0]
trainVal_frameCopy['cc_cons_org']=orgCC
trainVal_frameCopy['diff']=trainVal_frameCopy['cc_cons_org']-trainVal_frameCopy['cc_cons']
trainCompare=trainVal_frameCopy[['cc_cons_org','cc_cons','diff']]
from sklearn.metrics import mean_squared_log_error
rmsle=np.sqrt(mean_squared_log_error(orgCC, trainVal_frameCopy['cc_cons']))
print(rmsle)
| kinjaldand/MLProjects | CreditCardConsumptionPatternAMEX/InitialExplore.py | InitialExplore.py | py | 10,737 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "AllFunctions.getDFDesc",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "AllFunctions.g... |
72231186747 | from __future__ import print_function, division, unicode_literals
import os
import yaml
from pymatgen.io.vasp.inputs import Kpoints, Incar
from pymatgen.io.vasp.outputs import Vasprun
import twod_materials.utils as utl
from pymatgen.matproj.rest import MPRester
from monty.serialization import loadfn
import twod_materials
PACKAGE_PATH = twod_materials.__file__.replace('__init__.pyc', '')
PACKAGE_PATH = PACKAGE_PATH.replace('__init__.py', '')
PACKAGE_PATH = '/'.join(PACKAGE_PATH.split('/')[:-2])
try:
config_vars = loadfn(os.path.join(os.path.expanduser('~'), 'config.yaml'))
except:
print('WARNING: No config.yaml file was found. please configure the '\
'config.yaml and put it in your home directory.')
# Still set them for testing purposes.
config_vars = loadfn(os.path.join(PACKAGE_PATH, 'config.yaml'))
if 'MP_API' in os.environ: # Also for testing purposes.
MPR = MPRester(os.environ['MP_API'])
else:
MPR = MPRester(config_vars['mp_api'])
VASP = config_vars['normal_binary']
VASP_2D = config_vars['twod_binary']
if 'queue_system' in config_vars:
QUEUE = config_vars['queue_system'].lower()
elif '/ufrc/' in os.getcwd():
QUEUE = 'slurm'
elif '/scratch/' in os.getcwd():
QUEUE = 'pbs'
class Calibrator():
def __init__(self, incar_dict, potcar_dict, n_kpts_per_atom=500,
ncores=1, nprocs=16, pmem='600mb', walltime='6:00:00',
binary='vasp'):
"""
Args:
incar_dict (dict): dictionary of all input parameters
used in the given framework.
potcar_dict (dict): dictionary of all species to be
calibrated and the potcar hashes used in the
given framework, e.g. {'Mo': 'pv', 'S': ''}.
n_kpts_per_atom (int): Create kpoints at specified
density per atom. Defaults to 500.
n_cores, n_procs, pmem, walltime, binary: runjob
parameters. Defaults established for a regular
sized job on hipergator.
"""
self._incar_dict = incar_dict
self._n_kpts_per_atom = n_kpts_per_atom
self._potcar_dict = potcar_dict
self._ncores = ncores
self._nprocs = nprocs
self._pmem = pmem
self._walltime = walltime
self._binary = binary
self._config = loadfn('/home/mashton/cal_config.yaml')
def prepare(self, submit=False):
"""
Set up calculation directories to calibrate
the ion corrections to match a specified framework of INCAR
parameters, k-points, and potcar hashes.
Args:
submit (bool): whether or not to submit each job
after preparing it.
"""
for elt in self._potcar_dict:
# Set up reference directory for the pure element.
if not os.path.isdir(elt):
os.mkdir(elt)
os.chdir(elt)
# Poscar
s = MPR.get_structure_by_material_id(
self._config['Mpids'][elt]['self']
)
s.to('POSCAR', 'POSCAR')
plines = open('POSCAR').readlines()
elements = plines[5].split()
# Kpoints
kp = Kpoints.automatic_density(s, self._n_kpts_per_atom)
kp.write_file('KPOINTS')
# Incar
incar = Incar.from_dict(self._incar_dict)
incar.write_file('INCAR')
# Potcar
utl.write_potcar(types=[self._potcar_dict[el] for el in elements])
# Runjob
if QUEUE == 'pbs':
utl.write_pbs_runjob('{}_cal'.format(elt), self._ncores,
self._nprocs, self._pmem, self._walltime,
self._binary)
submission_command = 'qsub runjob'
elif QUEUE == 'slurm':
utl.write_slurm_runjob('{}_cal'.format(elt), self._nprocs,
self._pmem, self._walltime,
self._binary)
submission_command = 'sbatch runjob'
if submit:
os.system(submission_command)
# Set up reference oxide compound subdirectory.
if elt not in ['O', 'S', 'F', 'Cl', 'Br', 'I']:
if not os.path.isdir('ref'):
os.mkdir('ref')
os.chdir('ref')
# Poscar
s = MPR.get_structure_by_material_id(
self._config['Mpids'][elt]['ref']
)
s.to('POSCAR', 'POSCAR')
plines = open('POSCAR').readlines()
elements = plines[5].split()
# Kpoints
kp = Kpoints.automatic_density(s, self._n_kpts_per_atom)
kp.write_file('KPOINTS')
# Incar
incar = Incar.from_dict(self._incar_dict)
incar.write_file('INCAR')
# Potcar
utl.write_potcar(
types=[self._potcar_dict[el] for el in elements])
# Runjob
if QUEUE == 'slurm':
utl.write_pbs_runjob('{}_cal'.format(elt), self._ncores,
self._nprocs, self._pmem,
self._walltime, self._binary)
submission_command = 'qsub runjob'
elif QUEUE == 'pbs':
utl.write_slurm_runjob('{}_cal'.format(elt), self._nprocs,
self._pmem, self._walltime,
self._binary)
submission_command = 'sbatch runjob'
if submit:
os.system(submission_command)
os.chdir('../')
os.chdir('../')
def get_corrections(self, parent_dir=os.getcwd(), write_yaml=False,
oxide_corr=0.708):
"""
Pulls the corrections to be added for each element.
Args:
parent_dir (str): path to parent directory containing
subdirectories created by prepare(). Defaults to cwd.
write_yaml (bool): whether or not to write the
corrections to ion_corrections.yaml and the mu0
values to end_members.yaml.
oxide_corr (float): additional correction added for oxygen
to get water's formation energy right.
Returns:
dict. elements as keys and their corrections as values,
in eV per atom, e.g. {'Mo': 0.135, 'S': -0.664}.
"""
mu0 = dict()
corrections = dict()
os.chdir(parent_dir)
special_cases = ['O', 'S', 'F', 'Cl', 'Br', 'I']
elts = [elt for elt in self._potcar_dict if elt not in special_cases]
# Add entropic correction for special elements (S * 298K)
specials = [elt for elt in self._potcar_dict if elt in special_cases]
for elt in specials:
os.chdir(elt)
vasprun = Vasprun('vasprun.xml')
composition = vasprun.final_structure.composition
n_formula_units = composition.get_integer_formula_and_factor()[1]
mu0[elt] = (
round(vasprun.final_energy / n_formula_units
+ self._config['OtherCorrections'][elt], 3)
)
os.chdir(parent_dir)
# Oxide correction from Materials Project
mu0['O'] += oxide_corr
for elt in elts:
os.chdir(elt)
vasprun = Vasprun('vasprun.xml')
composition = vasprun.final_structure.composition
n_formula_units = composition.get_integer_formula_and_factor()[1]
mu0[elt] = round(vasprun.final_energy / n_formula_units, 3)
# Nitrogen needs both kinds of corrections
if elt == 'N':
mu0[elt] -= 0.296
os.chdir(parent_dir)
for elt in elts:
os.chdir('{}/ref'.format(elt))
vasprun = Vasprun('vasprun.xml')
composition = vasprun.final_structure.composition
n_formula_units = composition.get_integer_formula_and_factor()[1]
fH_exp = self._config['Experimental_fH'][elt]
try:
fH_dft = vasprun.final_energy / n_formula_units
plines = open('POSCAR').readlines()
elements = plines[5].split()
stoichiometries = plines[6].split()
comp_as_dict = {}
for element in elements:
comp_as_dict[element] = 0
for i, element in enumerate(elements):
comp_as_dict[element] += int(stoichiometries[i])
n_elt_per_fu = (
int(comp_as_dict[elt]) / n_formula_units
)
for el in comp_as_dict:
fH_dft -= (
mu0[el] * int(comp_as_dict[el])
/ n_formula_units
)
corrections[elt] = round((fH_dft - fH_exp) / n_elt_per_fu, 3)
except UnboundLocalError:
corrections[elt] = 'Not finished'
os.chdir(parent_dir)
if write_yaml:
with open('ion_corrections.yaml', 'w') as icy:
icy.write(yaml.dump(corrections, default_flow_style=False))
with open('end_members.yaml', 'w') as emy:
emy.write(yaml.dump(mu0, default_flow_style=False))
return corrections
| ashtonmv/twod_materials | twod_materials/pourbaix/startup.py | startup.py | py | 9,663 | python | en | code | 18 | github-code | 6 | [
{
"api_name": "twod_materials.__file__.replace",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "twod_materials.__file__",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "monty.serialization.loadfn",
"line_number": 22,
"usage_type": "call"
},
... |
21253269122 | from django.urls import path
from .views import TagContentView #导入TagContentView
from .views import XieyiConfigDateView
from .views import NodeConfigMakeDevRequest,NodeConfigCopyRequest,NodeConfigReadAndSaveRequest,NodeConfigDeleteRequest
from .views import XieyiConfigDateOrderView,XieyiTestCaseView,SenderHexDataOrderView,RecriminatDataOrderView
urlpatterns = [
# 节点配置页面的url配置
path('tagcontent/<path:tagcontent_id>/', TagContentView.as_view(), name="tag_content_id"),
# 配置复制新增测试用例url,namespace指明命名空间,用命名空间做限定
# 协议测试用例页面的url配置
path('xieyiconfigdate/<path:xieyiconfigdate_id>/', XieyiConfigDateView.as_view(), name="xie_yi_config_date_id"),
# 配置复制新增测试用例url,namespace指明命名空间,用命名空间做限定
# 节点配置NodeConfig页面的url配置-生成dev的配置
path('nodeconfigmakedev/<path:nodeconfig_id>/', NodeConfigMakeDevRequest, name="node_config_make_dev_id"),
# 配置复制新增测试用例url,namespace指明命名空间,用命名空间做限定
# 节点配置NodeConfig页面的url配置-完全复制
path('nodeconfigallcopy/<path:nodeconfig_id>/', NodeConfigCopyRequest, name="node_config_all_copy_id"),
# 配置复制新增测试用例url,namespace指明命名空间,用命名空间做限定
# 节点配置NodeConfig页面的url配置-将上传的文件入库
path('nodeconfigreadandsave/<path:nodeconfig_id>/', NodeConfigReadAndSaveRequest, name="node_config_read_and_save_id"),
# 配置复制新增测试用例url,namespace指明命名空间,用命名空间做限定
# 节点配置NodeConfig页面的url配置-删除本条数据
path('nodeconfigalldelete/<path:nodeconfig_id>/', NodeConfigDeleteRequest, name="node_config_all_delete_id"),
# 配置复制新增测试用例url,namespace指明命名空间,用命名空间做限定
# 协议测试用例之依赖配置url配置
path('xieyiconfigdateorder/<path:xieyiconfigdateorder_id>/', XieyiConfigDateOrderView.as_view(), name="xie_yi_config_date_order_id"),
# 配置复制新增测试用例url,namespace指明命名空间,用命名空间做限定
# 协议测试用例之测试用例url配置
path('xieyitestcase/<path:xieyitestcase_id>/', XieyiTestCaseView.as_view(),
name="new_xie_yi_test_case_id"),
# 配置复制新增测试用例url,namespace指明命名空间,用命名空间做限定
# 协议测试用例之串口收发数据url配置
path('senderhexdataorder/<path:senderhexdataorder_id>/', SenderHexDataOrderView.as_view(),name="sender_hex_date_order_id"),
# 配置复制新增测试用例url,namespace指明命名空间,用命名空间做限定
# 协议测试用例之反控收发数据url配置
path('recriminatdataorder/<path:recriminatdataorder_id>/', RecriminatDataOrderView.as_view(),
name="recriminat_data_order_id"),
# 配置复制新增测试用例url,namespace指明命名空间,用命名空间做限定
]
app_name = 'shucaiyidate'
| wawj901124/shangbaogongju | apps/shucaiyidate/urls.py | urls.py | py | 3,118 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.TagContentView.as_view",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.TagContentView",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "dj... |
6428170708 | # File: LangChainchatOpenAI.py
# Author: Denys L
# Date: October 8, 2023
# Description:
import os
import sys
import hashlib
from typing import Any
import streamlit as st
from dotenv import load_dotenv
from langchain.callbacks.base import BaseCallbackHandler
from fundamentals.langchain_utils import StuffSummarizerByChapter
class StreamingStdOutCallbackHandlerPersonal(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
st.session_state.full_response = st.session_state.full_response + token
st.session_state.placeholder.markdown(
st.session_state.full_response + "▌")
sys.stdout.write(token)
sys.stdout.flush()
def process_book(uploaded_file):
temp_file_path = f'.trash/{uploaded_file.name}'
with open(temp_file_path, 'wb') as file:
file.write(uploaded_file.read())
st.session_state.full_response = ""
st.session_state.handler_ia_message = st.chat_message(
"assistant", avatar="🤖")
st.session_state.placeholder = st.session_state.handler_ia_message.empty()
# magic
st.session_state.llm.summarize(temp_file_path)
# print output
st.session_state.placeholder.markdown(st.session_state.full_response)
st.session_state.messages.append(
{"role": "assistant", "content": st.session_state.full_response, "avatar": "🤖"})
st.session_state.full_response = ""
# remove temp file
os.remove(temp_file_path)
def main():
load_dotenv()
st.title("Storyteller")
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.handler = StreamingStdOutCallbackHandlerPersonal()
st.session_state.llm = StuffSummarizerByChapter(
st.session_state.handler)
for message in st.session_state.messages:
with st.chat_message(message["role"], avatar=message["avatar"]):
st.markdown(message["content"])
st.sidebar.subheader("Your books")
uploaded_file = st.sidebar.file_uploader(
"Upload your Books here and click on 'Process' to start the story", accept_multiple_files=False)
if st.sidebar.button("Process"):
with st.spinner("Processing"):
process_book(uploaded_file)
if __name__ == '__main__':
main()
| lyepustin/bookNLP | app.py | app.py | py | 2,293 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "langchain.callbacks.base.BaseCallbackHandler",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "streamlit.session_state",
"line_number": 19,
"usage_type": "attribute"
},
{
... |
23138969943 |
import argparse
from random import sample
def load_data(fr_file,fw_file):
all_users = []
all_moives = []
for lines in fr_file:
if lines.startswith('i'):
all_moives.append(lines.replace('\n',''))
if lines.startswith('u'):
all_users.append(lines.replace('\n',''))
for users in all_users:
item_candidate = sample(all_moives,300)
for items in item_candidate:
line = users +','+ items + '\n'
fw_file.write(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=''' mine all paths''')
parser.add_argument('--all_nodes', type=str, dest='all_nodes', default='data/all_nodes.txt')
parser.add_argument('--candidate_user_items', type=str, dest='candidate_user_items', default='data/candidate_user_items.txt')
parsed_args = parser.parse_args()
all_nodes = parsed_args.all_nodes
candidate_user_items = parsed_args.candidate_user_items
fr_file = open(all_nodes, 'r')
fw_file = open(candidate_user_items,'w')
load_data(fr_file,fw_file)
fr_file.close()
fw_file.close()
| 55TFSI/RKGE | all_paths.py | all_paths.py | py | 1,138 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.sample",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 29,
"usage_type": "call"
}
] |
43263411443 | def plot_data_with_fit(data, fit_curve, format_x, format_y):
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as mp
mp.title('Final Curve Plot')
format_x
format_y
plt.scatter(data[0],data[1], label='Data', s=1,)
plt.plot(fit_curve[0],fit_curve[1], 'blue')
return plt.show()
| UW-ParksidePhysics/Delgado-Omar | plot_data_with_fit.py | plot_data_with_fit.py | py | 352 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.title",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mat... |
10909511760 | from datetime import datetime
from settings import ORDER_TTL, TCS_ACCOUNT_ID
from tinkoff.invest import OrderDirection, OrderType
from tinkoff.invest.schemas import StopOrderDirection as SODir
from tinkoff.invest.schemas import StopOrderExpirationType as SType
from tinkoff.invest.schemas import StopOrderType as SOType
from tools.utils import delta_minutes_to_utc
class OrderAdapter:
DIRECTIONS = {
'sell': OrderDirection.ORDER_DIRECTION_SELL,
'buy': OrderDirection.ORDER_DIRECTION_BUY,
}
ORDER_TYPES = {
'market': OrderType.ORDER_TYPE_MARKET,
'limit': OrderType.ORDER_TYPE_LIMIT,
}
def __init__(self, asset, order_type: str) -> None:
self._asset = asset
self._order_type = order_type
@property
def order_params(self):
params = {
'account_id': TCS_ACCOUNT_ID,
'order_type': self.ORDER_TYPES[self._order_type],
'order_id': str(datetime.utcnow().timestamp()),
'figi': self._asset.figi,
'quantity': self._asset.get_lots(self._asset.next_order_amount),
}
params['direction'] = (
self.DIRECTIONS['sell']
if self._asset.sell
else self.DIRECTIONS['buy']
)
if self._order_type == 'limit':
params['price'] = self._asset.price
return params
class StopOrderAdapter:
ORDER_TYPES = {
'stop_loss': SOType.STOP_ORDER_TYPE_STOP_LOSS,
'take_profit': SOType.STOP_ORDER_TYPE_TAKE_PROFIT,
'stop_limit': SOType.STOP_ORDER_TYPE_STOP_LIMIT,
}
EXPIRATION_TYPES = {
'gtd': SType.STOP_ORDER_EXPIRATION_TYPE_GOOD_TILL_DATE,
'gtc': SType.STOP_ORDER_EXPIRATION_TYPE_GOOD_TILL_CANCEL,
}
DIRECTIONS = {
'sell': SODir.STOP_ORDER_DIRECTION_SELL,
'buy': SODir.STOP_ORDER_DIRECTION_BUY,
}
def __init__(self, stop_order):
self._asset = stop_order.asset
self._price = self._asset.get_correct_price(stop_order.price)
self._params = {
'figi': self._asset.figi,
'price': self._price,
'stop_price': self._price,
'quantity': self._asset.get_lots(
int(stop_order.sum / stop_order.price)
),
'account_id': TCS_ACCOUNT_ID,
'direction': self.DIRECTIONS[stop_order.params.direction],
'stop_order_type': self.ORDER_TYPES[stop_order.params.stop_type],
'expiration_type': self.EXPIRATION_TYPES[
stop_order.params.expiration
],
}
if stop_order.params.expiration == 'gtd':
self._params['expire_date'] = delta_minutes_to_utc(ORDER_TTL)
@property
def order_params(self):
return self._params
class SpreadToJsonAdapter:
def __init__(self, spread) -> None:
self._spread = spread
@property
def output(self):
return {
'far_leg': {
'executed': self._spread.far_leg.executed,
'avg_exec_price': str(self._spread.far_leg.avg_exec_price),
},
'near_leg': {
'executed': self._spread.near_leg.executed,
'avg_exec_price': str(self._spread.near_leg.avg_exec_price),
},
}
class SellBuyToJsonAdapter:
def __init__(self, sellbuy) -> None:
self._sellbuy = sellbuy
@property
def output(self):
return ({
'executed': self._sellbuy.executed,
'avg_exec_price': str(self._sellbuy.avg_exec_price),
})
| holohup/trademan-1.0-alpha-public | bot/tools/adapters.py | adapters.py | py | 3,580 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "tinkoff.invest.OrderDirection.ORDER_DIRECTION_SELL",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tinkoff.invest.OrderDirection",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tinkoff.invest.OrderDirection.ORDER_DIRECTION_BUY",
"li... |
37366648878 | import configparser
working_dir_list = ['./examples/test-ex1-50d/', './examples/test-ex2']
task_name_list = ['example 1', 'example 2']
task_id = 1
conjugated_eigvec_flag = 0
with_FVD_solution = False
#with_FVD_solution = True
working_dir_name = working_dir_list[task_id]
task_name = task_name_list[task_id]
# read parameters from config file
config = configparser.ConfigParser()
config.read_file(open('../%s/params.cfg' % working_dir_name))
md_flag = config['default'].getboolean('md_data_flag')
num_k = config['Training'].getint('eig_k')
eig_file_name_prefix = config['default'].get('eig_file_name_prefix')
log_filename = config['default'].get('log_filename')
if md_flag:
data_filename_prefix = config['MD'].get('data_filename_prefix')
data_filename_prefix_validation = config['MD'].get('data_filename_prefix_validation')
else :
dim = config['SDE'].getint('dim')
data_filename_prefix = config['SDE'].get('data_filename_prefix')
| zwpku/EigenPDE-NN | plot_scripts/common.py | common.py | py | 954 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 16,
"usage_type": "call"
}
] |
410815561 | from typing import Tuple
import jax
import jax.numpy as jnp
import jax.scipy.linalg as linalg
from numpy.typing import ArrayLike
def transition_function(F: jnp.array, u: jnp.array, L: jnp.array, h: float, n_linspace=10000) -> Tuple[
ArrayLike, ArrayLike,
ArrayLike]:
r"""
A prior of the form
\mathrm{d}X(t) = (FX(t) + u)\mathrm{d}t + L \mathrm{d}W_t,
has the following strong solution:
X(t+h) = \exp{Fh}(X(t) + \int_0^h \exp{-Fs}L \mathrm{d}W_s),
where
X(t+h) \mid X(t) ~ \mathcal{N}(A(h)X(t) + \xi(h), Q(h)).
----------------------------
Return \xi(h), Q(h), A(h).
"""
linspace = jnp.linspace(0, h, n_linspace)
A = linalg.expm(F * h)
@jax.vmap
def integrand_xi(s):
return linalg.expm(F * s) @ u
integrand_xi_values = integrand_xi(linspace)
xi = jnp.trapz(integrand_xi_values, linspace, axis=0)
@jax.vmap
def integrand_Q(s):
B = linalg.expm(F * s) @ L
return B @ B.T
integrand_Q_values = integrand_Q(linspace)
Q = jnp.trapz(integrand_Q_values, linspace, axis=0)
return xi, Q, A
| hallelujahylefay/bayesianSDEsolver | bayesian_sde_solver/ode_solvers/probnum/transition_function.py | transition_function.py | py | 1,114 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "jax.numpy.array",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "jax.numpy",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "jax.numpy.linspace",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line... |
17689667482 | import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
no_resBlocks = 16
HR_shape = 96
train_data_path = '../data/train'
val_data_path = '../data/val'
advLossFactor = 0.001
VGGLossFactor = 0.006
mse_lr = 0.0001
mse_epochs = 700
initial_lr = 0.0001
second_lr = 0.00001
gan_epochs = 140
batch_size = 16
images_to_eval = 10
no_workers = 8
| abed11326/Training-a-Super-Resolution-GAN-for-4x-image-upscaling | hypParam.py | hypParam.py | py | 365 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.device",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 3,
"usage_type": "attribute"
}
] |
75112397948 | from flask import Flask, render_template, request, redirect, url_for
import requests
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
try:
nom = request.form['NOM']
prenom = request.form['PRENOM']
email = request.form['EMAIL']
return redirect(url_for(".test", nom=nom, prenom=prenom, email=email))
except:
return render_template("index.html", linkbr="/brnews", linkstw="/stwnews", linkmap="/map", linkbanners="/banners", linkgetplayer="/getplayer")
@app.route("/getplayer", methods=["GET", "POST"])
def getplayer():
try:
name = request.form['name']
type = request.form.get('checkbox')
print(type)
inttype = int(type)
if inttype == 1:
accounttype = "epic"
return redirect(url_for(".playerstats", name=name, type=accounttype))
elif inttype == 2:
accounttype = "psn"
return redirect(url_for(".playerstats", name=name, type=accounttype))
elif inttype == 3:
accounttype = "xbl"
return redirect(url_for(".playerstats", name=name, type=accounttype))
except:
return render_template("getplayer.html")
@app.route("/playerstats", methods=["GET", "POST"])
def playerstats():
try:
name = request.args['name']
accounttype = request.args['type']
url = 'https://fortnite-api.com/v2/stats/br/v2'
headers = {
'Authorization': 'd1341b3c-4723-4ff6-a667-153f6c9f238d'
}
params = {
'name': name,
'accountType': accounttype
}
rep = requests.get(url, headers=headers, params=params)
jsonn = rep.json()
all = jsonn['data']['stats']['all']
minutesPlayed = all['overall']["minutesPlayed"]
hoursPlayed = minutesPlayed / 60
daysPlayed = hoursPlayed / 24
idcount = jsonn["data"]
# with open("fooddata.json", "w", encoding='utf-8') as jsonfile:
# json.dump(jsonn, jsonfile, ensure_ascii=False, indent= 4)
return render_template("playerstats.html", idcount=idcount, name=name, all=all, solo=jsonn['data']['stats']['all']['trio'] , hoursPlayed=round(hoursPlayed, 1), daysPlayed=round(daysPlayed, 1), battlePass=jsonn['data']["battlePass"])
except:
return render_template("errorplayerstats.html", linkgetplayer="/getplayer")
@app.route("/test", methods=["GET", "POST"])
def test():
nom = request.args['nom']
prenom = request.args['prenom']
email = request.args['email']
return render_template("test.html", nom=nom, prenom=prenom, email=email)
@app.route("/map")
def map():
url = 'https://fortnite-api.com/v1/map'
params = {
'language': 'fr'
}
rep = requests.get(url, params=params)
jsonn = rep.json()
return render_template("map.html", link_image=jsonn['data']['images']['pois'])
@app.route("/banners")
def banners():
url = 'https://fortnite-api.com/v1/banners'
params = {
'language': 'fr'
}
rep = requests.get(url, params=params)
jsonn = rep.json()
# embedvar = discord.Embed(title=jsonn["data"][r]["name"], description=f"De : {jsonn['data'][r]['devName']}",
# color=0x00ff00)
# embedvar.add_field(name="Catégorie : ", value=jsonn['data'][r]['category'])
# embedvar.set_image(url=jsonn["data"][r]["images"]["icon"])
return render_template("banner.html", data=jsonn["data"])
@app.route("/stwnews")
def stwNews():
url = 'https://fortnite-api.com/v2/news/stw'
params = {
'language': 'fr'
}
rep = requests.get(url, params=params)
jsonn = rep.json()
# with open('stw.json', encoding='utf-8') as mon_fichier:
# jsonn = json.load(mon_fichier)
return render_template("stwnews.html", data=jsonn["data"]["messages"], len_data=len(jsonn["data"]["messages"]))
@app.route("/brnews")
def brNews():
url = 'https://fortnite-api.com/v2/news/br'
params = {
'language': 'fr'
}
rep = requests.get(url, params=params)
jsonn = rep.json()
# with open('example.json', encoding='utf-8') as mon_fichier:
# jsonn = json.load(mon_fichier)
return render_template("brnews.html", data=jsonn["data"]["motds"], len_data=len(jsonn["data"]["motds"]))
if __name__ == "__main__":
app.run(debug=True)
| Foodjubi/fortnite-news | app.py | app.py | py | 4,379 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.request.form",... |
12026015047 | import pygame
# Global Consts
# Colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
BLUE = ( 0, 0, 255)
RED = ( 255, 0, 0)
GREEN = ( 0, 255, 0)
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
class Player(pygame.sprite.Sprite):
# -- Attribute
# Set speed vector
change_x = 0
change_y = 0
# list of sprites we can bumb against
level = None
# -- Methods
def __init__(self):
# call parents constructor
super().__init__()
# Create image of the block
width = 40
height = 60
self.image = pygame.Surface([width, height])
self.image.fill(RED)
# Set a reference to the image rect.
self.rect = self.image.get_rect()
def update(self):
# Gravity
self.calc_grav()
# movement
self.rect.x += self.change_x
# Check for collision
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# If moving right, set right side to left side of object we are colliding with
if self.change_x > 0:
self.rect.right = block.rect.left
elif self.change_x < 0:
# do the opposite if we are moving left
self.rect.left = block.rect.right
# move up or down
self.rect.y += self.change_y
# check for collision
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# reset position based on top/bottom of the object
if self.change_y > 0:
self.rect.bottom = block.rect.top
elif self.change_y < 0:
self.rect.top = block.rect.bottom
# stop vertical movement
self.change_y = 0
def calc_grav(self):
""" Caclulate effect of grav"""
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .35
# check if we are on the ground
if self.rect.y >= SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = SCREEN_HEIGHT - self.rect.height
def jump(self):
""" Called when jump is pressed """
# move down a bit and see if there is a platform below us.
# Move down 2 pixels because it doesn't work well if we only move down 1
# when working with a platform moving down.
self.rect.y += 2
platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
self.rect.y -= 2
# If it is ok to jump, set speed upwards
if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:
self.change_y = -10
# Player controlled movement
def go_left(self):
self.change_x = -6
def go_right(self):
self.change_x = 6
def stop(self):
self.change_x = 0
class Platform(pygame.sprite.Sprite):
""" Platforms to jump on """
def __init__(self, width, height):
"""Platform constructor"""
super().__init__()
self.image = pygame.Surface([width, height])
self.image.fill(GREEN)
self.rect = self.image.get_rect()
class Level():
"""Generic super-class to define a level. Creates a child class with level-specific info"""
#List o f sprites used in each level
platform_list = None
enemy_list = None
#how far the world has been scrolled left/right
world_shift = 0
def __init__(self, player):
"""Constructor. NEeded for when moving platforms collide w the player"""
self.platform_list = pygame.sprite.Group()
self.enemy_list = pygame.sprite.Group()
self.player = player
#Update everything on the level
def update(self):
"""update everything in the level"""
self.platform_list.update()
self.enemy_list.update()
def draw(self, screen):
"""Draw everything on the level"""
screen.fill(BLUE)
self.platform_list.draw(screen)
self.enemy_list.draw(screen)
def shift_world(self,shift_x):
"""scroll left and right when the player moves"""
#keep track of shift amount
self.world_shift += shift_x
#go through sprite list and shift
for platform in self.platform_list:
platform.rect.x += shift_x
for enemy in self.enemy_list:
enemy.rect.x += shift_x
#Create platforms
class Level_01(Level):
"""Def for level 1"""
def __init__(self, player):
#Call parent constructor
Level.__init__(self,player)
self.level_limit = -1000
#array with width, height, x and y of platforms
level = [[210, 70, 500, 500],
[210, 70, 800, 400],
[210, 70, 800, 400],
[210, 70, 800, 400],
]
#Go through the array above and add platforms
for platform in level:
block = Platform(platform[0], platform[1])
block.rect.x = platform[2]
block.rect.y = platform[3]
block.player = self.player
self.platform_list.add(block)
#Create platforms for level 2
class Level_02(Level):
"""Def for level 2"""
def __init__(self, player):
Level.__init__(self,player)
self.level_limit = -1000
level = [[210, 30, 450, 570],
[210, 30, 850, 420],
[210, 30, 1000, 520],
[210, 30, 1120, 280],
]
# Go through array above
for platform in level:
block = Platform(platform[0], platform[1])
block.rect.x = platform[2]
block.rect.y = platform[3]
block.player = self.player
self.platform_list.add(block)
def main():
"""Main Program"""
pygame.init()
#set height and width of screen
size = [SCREEN_WIDTH, SCREEN_HEIGHT]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Side-Scrolling platformer")
#Create player
player = Player()
#create all levels
level_list = []
level_list.append(Level_01(player))
level_list.append(Level_02(player))
#set the current level
current_level_no = 0
current_level = level_list[current_level_no]
active_sprite_list = pygame.sprite.Group()
player.level = current_level
player.rect.x = 340
player.rect.y = SCREEN_HEIGHT - player.rect.height
active_sprite_list.add(player)
#loop unitl user clicks the close button
done = False
#Used to manage how fast the screen update
clock = pygame.time.Clock()
#------Main Program Loop--------
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player.go_left()
if event.key == pygame.K_RIGHT:
player.go_right()
if event.key == pygame.K_UP:
player.jump()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT and player.change_x < 0:
player.stop()
if event.key == pygame.K_RIGHT and player.change_x > 0:
player.stop()
#Update the player
active_sprite_list.update()
#Update items
current_level.update()
#If player nears the right side, shift the world left
if player.rect.right >= 500:
diff = player.rect.right -500
player.rect.right = 500
current_level.shift_world(-diff)
#If player nears left side, shift right
if player.rect.left <= 120:
diff = 120 - player.rect.left
player.rect.left = 120
current_level.shift_world(diff)
#If player reaches the end of the level, go to the next
current_position = player.rect.x + current_level.world_shift
if current_position < current_level.level_limit:
player.rect.x =120
if current_level_no < len(level_list)-1:
current_level_no += 1
current_level = level_list[current_level_no]
player.level = current_level
current_level.draw(screen)
active_sprite_list.draw(screen)
#limit to 60fps
clock.tick(60)
#update screen w what we've drawn
pygame.display.flip()
pygame.quit()
if __name__ == "__main__":
main()
| danielp28/Python-Testing | platformer.py | platformer.py | py | 8,838 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.sprite",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pygame.sprite.spritecollide",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pygame.... |
72531854269 | from typing import Final
import sqlalchemy as sa
def column_created_datetime(*, timezone: bool = True) -> sa.Column:
return sa.Column(
"created",
sa.DateTime(timezone=timezone),
nullable=False,
server_default=sa.sql.func.now(),
doc="Timestamp auto-generated upon creation",
)
def column_modified_datetime(*, timezone: bool = True) -> sa.Column:
return sa.Column(
"modified",
sa.DateTime(timezone=timezone),
nullable=False,
server_default=sa.sql.func.now(),
onupdate=sa.sql.func.now(),
doc="Timestamp with last row update",
)
_TRIGGER_NAME: Final[str] = "auto_update_modified_timestamp"
def register_modified_datetime_auto_update_trigger(table: sa.Table) -> None:
"""registers a trigger/procedure couple in order to ensure auto
update of the 'modified' timestamp column when a row is modified.
NOTE: Add a *hard-coded* version in the alembic migration code!!!
see [this example](https://github.com/ITISFoundation/osparc-simcore/blob/78bc54e5815e8be5a8ed6a08a7bbe5591bbd2bd9/packages/postgres-database/src/simcore_postgres_database/migration/versions/e0a2557dec27_add_services_limitations.py)
Arguments:
table -- the table to add the auto-trigger to
"""
assert "modified" in table.columns # nosec
# NOTE: scoped on database
procedure_name: Final[str] = f"{table.name}_auto_update_modified_timestamp()"
# TRIGGER
modified_timestamp_trigger = sa.DDL(
f"""
DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {table.name};
CREATE TRIGGER {_TRIGGER_NAME}
BEFORE INSERT OR UPDATE ON {table.name}
FOR EACH ROW EXECUTE PROCEDURE {procedure_name};
"""
)
# PROCEDURE
update_modified_timestamp_procedure = sa.DDL(
f"""
CREATE OR REPLACE FUNCTION {procedure_name}
RETURNS TRIGGER AS $$
BEGIN
NEW.modified := current_timestamp;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
"""
)
# REGISTER THEM PROCEDURES/TRIGGERS
sa.event.listen(table, "after_create", update_modified_timestamp_procedure)
sa.event.listen(table, "after_create", modified_timestamp_trigger)
NUMERIC_KWARGS = {"scale": 2}
| ITISFoundation/osparc-simcore | packages/postgres-database/src/simcore_postgres_database/models/_common.py | _common.py | py | 2,235 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "sqlalchemy.Column",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.sql.func.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchem... |
24177027496 | import pygame
from pygame.locals import *
class MyPlane(pygame.sprite.Sprite):
def __init__(self,bg_size, screen):
pygame.sprite.Sprite.__init__(self)
self.screen = screen
self.image1 = pygame.image.load('../img/hero1.png').convert_alpha()
self.image2 = pygame.image.load('../img/hero2.png').convert_alpha()
self.active = True
self.image = self.image1
self.rect = self.image.get_rect()
self.rect.left = (bg_size[0] - 102) / 2
self.rect.top = 550
self.speed = 4
self.HP = 5
self.destroy_images = []
self.destroy_images.extend([pygame.image.load('../img/hero_blowup_n1.png').convert_alpha(),
pygame.image.load('../img/hero_blowup_n2.png').convert_alpha(),
pygame.image.load('../img/hero_blowup_n3.png').convert_alpha(),
pygame.image.load('../img/hero_blowup_n4.png').convert_alpha()])
self.destroy_index = 0
self.timer = 0
self.mask = pygame.mask.from_surface(self.image)
self.bomb = 5
self.double_fire = False
self.f = 20
def move(self):
self.timer += 1
if self.active:
key_pressed = pygame.key.get_pressed()
if key_pressed[K_w] or key_pressed[K_UP]:
self.rect.top = self.rect.top - self.speed
if key_pressed[K_s] or key_pressed[K_DOWN]:
self.rect.top = self.rect.top + self.speed
if key_pressed[K_a] or key_pressed[K_LEFT]:
self.rect.left = self.rect.left - self.speed
if key_pressed[K_d] or key_pressed[K_RIGHT]:
self.rect.left = self.rect.left + self.speed
if self.rect.left < 0:
self.rect.left = 0
if self.rect.top > 574:
self.rect.top = 574
if self.rect.left > 378:
self.rect.left = 378
if self.rect.top < 0:
self.rect.top = 0
if self.image == self.image1:
self.image = self.image2
else:
self.image = self.image1
else:
if self.destroy_index < 4:
self.image = self.destroy_images[self.destroy_index]
if self.timer % 25 == 0:
self.destroy_index += 1
def draw(self):
self.screen.blit(self.image,[self.rect.left,self.rect.top])
def hit(self):
self.active = False
def reset(self):
self.active = True
self.image = self.image1
self.destroy_index = 0
self.bomb = 5
class Bullet(pygame.sprite.Sprite):
def __init__(self,plane, pos = 1):
pygame.sprite.Sprite.__init__(self) # pos: 0 - left, 1 - middle, 2 - right
self.plane = plane
self.active = True
self.pos = pos
self.img1 = pygame.image.load('../img/bullet1.png').convert_alpha()
self.img2 = pygame.image.load('../img/bullet2.png').convert_alpha()
self.img = self.img1
#self.sound = pygame.mixer.music.load('bullet.mp3')
self.rect = self.img.get_rect()
if pos == 1:
self.rect.left = plane.rect.left + 50
self.rect.top = plane.rect.top + 50
elif pos == 0:
self.img = self.img2
self.rect.left = plane.rect.left + 25
self.rect.top = plane.rect.top + 50
elif pos == 2:
self.rect.left = plane.rect.left + 75
self.rect.top = plane.rect.top + 50
self.img = self.img2
def move(self):
self.rect.top -= 10
if self.rect.top<0:
self.active = False
def draw(self,screen):
screen.blit(self.img, [self.rect.left, self.rect.top])
self.mask = pygame.mask.from_surface(self.img)
| daniel-yaoyuan/paperplane | src/hero.py | hero.py | py | 4,088 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.sprite",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pyga... |
71723953788 | # coding:utf-8
import datetime
from sqlalchemy import Column, Integer, DateTime, Numeric, create_engine, VARCHAR
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from config import DB_CONFIG, DEFAULT_SCORE
'''
sql操作的基类
包括ip,端口,types类型(0高匿名,1透明),protocol(0 http,1 https http),country(国家),area(省市),updatetime(更新时间)
speed(连接速度)
'''
BaseModel = declarative_base()
class Proxy(BaseModel):
__tablename__ = 'proxy'
id = Column(Integer, primary_key=True, autoincrement=True)
ip = Column(VARCHAR(16), nullable=False)
port = Column(Integer, nullable=False)
types = Column(Integer, nullable=False)
protocol = Column(Integer, nullable=False, default=0)
country = Column(VARCHAR(100), nullable=False)
area = Column(VARCHAR(100), nullable=False)
updatetime = Column(DateTime(), default=datetime.datetime.utcnow)
speed = Column(Numeric(5, 2), nullable=False)
score = Column(Integer, nullable=False, default=DEFAULT_SCORE)
def get_proxy(self):
if self.protocol < 0:
return None
return ("http://%s:%d" if self.protocol == 0 else "https://%s:%d") % (self.ip, self.port)
class SqlHelper(object):
params = {'id': Proxy.id, 'ip': Proxy.ip, 'port': Proxy.port, 'types': Proxy.types, 'protocol': Proxy.protocol,
'country': Proxy.country, 'area': Proxy.area, 'score': Proxy.score}
def __init__(self):
if 'sqlite' in DB_CONFIG['DB_CONNECT_STRING']:
connect_args = {'check_same_thread': False}
self.engine = create_engine(DB_CONFIG['DB_CONNECT_STRING'], echo=False, connect_args=connect_args)
else:
self.engine = create_engine(DB_CONFIG['DB_CONNECT_STRING'], echo=False)
DB_Session = sessionmaker(bind=self.engine)
self.session = DB_Session()
def init_db(self):
BaseModel.metadata.create_all(self.engine)
# def drop_db(self):
# BaseModel.metadata.drop_all(self.engine)
def delete(self, conditions=None):
if conditions:
conditon_list = []
for key in list(conditions.keys()):
if self.params.get(key, None):
conditon_list.append(self.params.get(key) == conditions.get(key))
conditions = conditon_list
query = self.session.query(Proxy)
for condition in conditions:
query = query.filter(condition)
deleteNum = query.delete()
self.session.commit()
else:
deleteNum = 0
return ('deleteNum', deleteNum)
def update(self, conditions=None, value=None):
'''
conditions的格式是个字典。类似self.params
:param conditions:
:param value:也是个字典:{'ip':192.168.0.1}
:return:
'''
if conditions and value:
conditon_list = []
for key in list(conditions.keys()):
if self.params.get(key, None):
conditon_list.append(self.params.get(key) == conditions.get(key))
conditions = conditon_list
query = self.session.query(Proxy)
for condition in conditions:
query = query.filter(condition)
updatevalue = {}
for key in list(value.keys()):
if self.params.get(key, None):
updatevalue[self.params.get(key, None)] = value.get(key)
updateNum = query.update(updatevalue)
self.session.commit()
else:
updateNum = 0
return {'updateNum': updateNum}
def select(self, count=None, conditions=None):
'''
conditions的格式是个字典。类似self.params
:param count:
:param conditions:
:return:
'''
if conditions:
conditon_list = []
for key in list(conditions.keys()):
if self.params.get(key, None):
conditon_list.append(self.params.get(key) == conditions.get(key))
conditions = conditon_list
else:
conditions = []
query = self.session.query(Proxy.id, Proxy.ip, Proxy.port, Proxy.score, Proxy.protocol)
if len(conditions) > 0 and count:
for condition in conditions:
query = query.filter(condition)
return query.order_by(Proxy.score.desc(), Proxy.speed).limit(count).all()
elif count:
return query.order_by(Proxy.score.desc(), Proxy.speed).limit(count).all()
elif len(conditions) > 0:
for condition in conditions:
query = query.filter(condition)
return query.order_by(Proxy.score.desc(), Proxy.speed).all()
else:
return query.order_by(Proxy.score.desc(), Proxy.speed).all()
def select_valid(self, count=None, conditions=None):
'''
conditions的格式是个字典。类似self.params
:param count:
:param conditions:
:return:
'''
if conditions:
conditon_list = []
for key in list(conditions.keys()):
if self.params.get(key, None):
conditon_list.append(self.params.get(key) == conditions.get(key))
conditions = conditon_list
else:
conditions = []
query = self.session.query(Proxy.id, Proxy.ip, Proxy.port, Proxy.score, Proxy.protocol)
query = query.filter(Proxy.score > 0)
if len(conditions) > 0 and count:
for condition in conditions:
query = query.filter(condition)
return query.order_by(Proxy.id.desc(), Proxy.score.desc(), Proxy.speed.desc()).limit(count).all()
elif count:
return query.order_by(Proxy.id.desc(), Proxy.score.desc(), Proxy.speed.desc()).limit(count).all()
elif len(conditions) > 0:
for condition in conditions:
query = query.filter(condition)
return query.order_by(Proxy.score.desc(), Proxy.speed.desc()).all()
else:
return query.order_by(Proxy.score.desc(), Proxy.speed.desc()).all()
def close(self):
pass
if __name__ == '__main__':
sqlhelper = SqlHelper()
condition = {"country": "国内"}
ips = sqlhelper.select(conditions=condition)
print(len(ips))
for ip in ips:
proxy = Proxy(id=ip.id, ip=ip.ip, port=ip.port, protocol=ip.protocol, score=ip.score)
print(proxy.get_proxy())
print(ips[0].id)
sqlhelper.update(conditions={"id": ips[0].id}, value={"score": 0})
proxy = Proxy(ip="127.0.0.1", port=8080, protocol=0)
print(proxy.get_proxy())
| xindemeng/python-projects | jd_spider/jd_spider/db/SqlHelper.py | SqlHelper.py | py | 6,761 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 20,
"usage_type": "argument"
},
{
... |
18573808607 | from Utilities import say
import Utilities
import json
import Room
class Thing:
"""The basic class for all non-Room objects in the game"""
def __init__(self, id, name):
self.id = id
self.name = name
self.adjectives = []
self.alternate_names = []
# how the item should appear in a list. A book. An apple. A piece of cheese.
self.list_name = "a " + name
# Starting Location is a Room
self.starting_location = None
self.can_be_taken = False
self.can_be_read = False
self.can_be_dropped = False
self.has_been_taken = False
self.can_go = False
self.has_contents = False
self.clock = False
self.can_be_opened = False
self.can_receive = False
self.has_dynamic_description = False
self.is_listed = True
self.is_accessible = True
## room or storage of current location
self.current_location = None # Room
self.description = "This is a thing."
self.dynamic_description_text = "There is a thing."
# Default Text for all messages
self.msg_take_first = "You take the {}.".format(self.name)
self.msg_take = "You take the {}.".format(self.name)
self.msg_cannot_take = "You cannot take the {}.".format(self.name)
self.msg_already_in_inventory = "You already have the {}.".format(self.name)
self.msg_cannot_read = "There is nothing to read on the {}.".format(self.name)
self.msg_cannot_be_opened = "The {} cannot be opened".format(self.name)
self.msg_cannot_be_closed = "The {} cannot be closed".format(self.name)
self.msg_drop = "You drop the {}.".format(self.name)
self.msg_cannot_drop = "You cannot drop the {}".format(self.name)
self.msg_cannot_go = "That is not a way you can go."
self.msg_go = "You go that way."
self.msg_cannot_pull = "You cannot pull that."
self.msg_has_no_contents = "The {} can't store anything.".format(self.name)
self.msg_cannot_look_in = "You cannot look in the {}".format(self.name)
self.msg_nothing_happens = "Nothing happens"
self.msg_cannot_eat = "You cannot eat that."
self.msg_cannot_drink = "You cannot drink that."
self.msg_cannot_play = "You cannot play that."
self.msg_cannot_dance = "You cannot dance with that."
self.msg_cannot_spray = "You cannot spray that."
self.msg_cannot_talk = "You cannot talk to that."
self.msg_cannot_sit = "You cannot sit there."
def get_status(self, type):
"""returns the status of a thing in JSON format"""
# Returns the appropriate export value based on whether value is a Room or Thing
def get_export_value(value):
if isinstance(value, Room.Room):
return "<R:" + value.id + ">"
elif isinstance(value, Thing):
return "<T:" + value.id + ">"
else:
return value
str_dict = self.__dict__.copy()
# print ("str_dict before: " + str(str_dict))
for attr in str_dict:
if isinstance(str_dict[attr], list):
new_list = list()
for x in str_dict[attr]:
new_list.append(get_export_value(x))
str_dict[attr] = new_list
elif isinstance(str_dict[attr], dict):
new_dict = dict()
for x in str_dict[attr]:
new_dict[x] = get_export_value(str_dict[attr][x])
str_dict[attr] = new_dict
else:
str_dict[attr] = get_export_value(str_dict[attr])
# print ("str_dict after: " + str(str_dict))
ret_val = dict()
ret_val["type"] = type
ret_val["data"] = str_dict
return json.dumps(ret_val)
def set_status(self, status, thing_list, room_list):
"""uses the JSON data in status to update the thing"""
# Returns the appropriate import value based on whether value is Room or Thing
def get_import_value(value, thing_list, room_list):
list = None
if isinstance(value, str):
if value.find("<R:") == 0:
list = room_list
elif value.find("<T:") == 0:
list = thing_list
if list is not None:
id = value[3:(value.find(">"))]
return list[id]
return value
status_obj = json.loads(status)
for attr in status_obj:
if isinstance(status_obj[attr], list):
imp_val = list()
for x in status_obj[attr]:
imp_val.append(get_import_value(x, thing_list, room_list))
elif isinstance(status_obj[attr], dict):
imp_val = dict()
for i in status_obj[attr]:
imp_val[i] = get_import_value(status_obj[attr][i], thing_list, room_list)
else:
imp_val = get_import_value(status_obj[attr], thing_list, room_list)
setattr(self, attr, imp_val)
def get_desc(self):
"""returns the description to be used when looking at the room"""
return self.description
def get_dynamic_description(self):
"""returns the description to be used when looking at the room"""
return self.dynamic_description_text
def get_list_name(self):
return self.list_name
# ACTION for look (and look at)
def look(self, game, actionargs):
cannot_see = False
if game.player.current_room.id == "roomI":
if not game.player.current_room.is_lit\
and self.name != "cobwebs":
cannot_see = True
if cannot_see:
say("You don't see {}.".format(self.list_name))
say("But then again you can't really see much of anything...")
else:
say(self.get_desc())
def look_in(self, game, actionargs):
say(self.msg_cannot_look_in)
# ACTION for read
def read(self, game, actionargs):
if self.can_be_read:
self.look(game, actionargs)
else:
say(self.msg_cannot_read)
def open(self, game, actionargs):
say(self.msg_cannot_be_opened)
def close(self, game, actionargs):
say(self.msg_cannot_be_closed)
# ACTION for take
def take(self, game, actionargs):
if self.can_be_taken:
if game.player.is_in_inventory(self):
say(self.msg_already_in_inventory)
else:
game.player.take(self)
if not self.has_been_taken:
self.has_been_taken = True
say(self.msg_take_first)
else:
say(self.msg_take)
else:
say(self.msg_cannot_take)
# ACTION for "drop"
def drop(self, game, actionargs):
if self.can_be_dropped:
say(self.msg_drop)
# TODO make sure game function is used properly
game.player.drop(self)
else:
say(self.msg_cannot_drop)
def put_down(self, game, actionargs):
self.drop(game, actionargs)
def give_to(self, game, actionargs):
if self.can_be_dropped:
thing_to_receive = Utilities.find_by_name(actionargs["iobj"], game.thing_list)
if thing_to_receive is game.thing_list["shiftyMan"]:
say("The shifty man does not want the {}.".format(self.name))
elif thing_to_receive.can_receive:
# TODO better define default action?
say("")
else:
say("You cannot give anything to the {}".format(thing_to_receive.name))
else:
say("You cannot give the {}.".format(self.name))
def go(self, game, actionargs):
"""Default response for "cannot go" """
say(self.msg_cannot_go)
def put_in(self, game, actionargs):
if not self.can_be_dropped:
say(self.msg_cannot_drop)
else:
storage_object = Utilities.find_by_name(actionargs["iobj"], game.player.current_room.get_all_accessible_contents())
if storage_object == None:
storage_object = Utilities.find_by_name(actionargs["iobj"],game.player.inventory)
storage_object.receive_item(game, self, "in")
def put_on(self, game, actionargs):
if not self.can_be_dropped:
say(self.msg_cannot_drop)
else:
storage_object = Utilities.find_by_name(actionargs["iobj"], game.player.current_room.get_all_accessible_contents())
if storage_object == None:
storage_object = Utilities.find_by_name(actionargs["iobj"],game.player.inventory)
storage_object.receive_item(game, self, "on")
def pull(self, game, actionargs):
say(self.msg_cannot_pull)
def receive_item(self, game, item, prep):
say("You can't put things {} the {}.".format(prep, self.name))
def use(self, game, actionargs):
say(self.msg_nothing_happens)
def dance(self, game, actionargs):
say(self.msg_cannot_dance)
def eat(self, game, actionargs):
say(self.msg_cannot_eat)
def drink(self, game, actionargs):
say(self.msg_cannot_drink)
def play(self, game, actionargs):
say(self.msg_cannot_play)
def spray(self, game, actionargs):
say(self.msg_cannot_spray)
def spray_with(self, game, actionargs):
say(self.msg_cannot_spray)
def talk(self, game, actionargs):
say(self.msg_cannot_talk)
def hit(self, game, actionargs):
say(self.msg_nothing_happens)
def sit(self, game, actionargs):
say(self.msg_cannot_sit)
# Special Functions
def ram(self, game, actionargs):
say(self.msg_nothing_happens)
def kin(self, game, actionargs):
say(self.msg_nothing_happens)
def tic(self, game, actionargs):
say(self.msg_nothing_happens)
class Exit(Thing):
"""Class for object that transports the player to another room."""
def __init__(self, id, name):
super().__init__(id, name)
self.can_go = True
self.is_listed = False
self.destination = None # Room
def go(self, game, actionargs):
if self.can_go:
say(self.msg_go)
# TODO make sure game function is used properly
game.player.go(self.destination)
game.new_room = True
else:
say(self.msg_cannot_go)
def use(self, game, actionargs):
self.go(game, actionargs)
def get_status(self, type=None):
if type is None:
type = "Exit"
return super().get_status(type)
class Door(Exit):
"""A special Exit, doors can be closed, locked, and unlocked"""
def get_status(self, type=None):
if type is None:
type = "Door"
return super().get_status(type)
def open(self, game, actionargs):
self.go(game, actionargs)
class BlockedDoor(Door):
def __init__(self, id, name):
super().__init__(id, name)
self.can_go = False
self.locked = False
self.msg_unlock = "The door is unlocked."
self.msg_cannot_go = "You cannot go through the door."
self.alt_msg_cannot_go = None
self.msg_go = "You go through the door."
def get_status(self):
return super().get_status("BlockedDoor")
def go(self, game, actionargs):
if self.id == "clockRoomDoor":
if game.room_list["roomJ"].shifty_man in game.room_list["roomJ"].contents\
and not self.can_go and self.alt_msg_cannot_go is not None:
say(self.alt_msg_cannot_go)
else:
super().go(game, actionargs)
else:
super().go(game, actionargs)
class MetaDoor(Door):
def __init__(self, id, name):
super().__init__(id, name)
self.can_go = False
self.locked = True
self.num_lights = 0
self.msg_unlock = "The fifth and final orb along the top of the ornate door begins to glow. " \
"You hear clanging and whirring sounds as if some internal mechanism is " \
"operating inside the door."
self.msg_cannot_go = "You try to open the door, but it will not budge."
self.msg_go = "You approach the door, and with the slightest touch, it slowly swings open. " \
"You walk through."
def get_status(self):
return super().get_status("MetaDoor")
def add_light(self):
self.num_lights += 1
if self.num_lights == 5:
say(self.msg_unlock)
self.can_go = True
self.locked = False
elif self.num_lights == 1:
say("One of the orbs along the top of the ornate door suddenly begins to glow bright white.")
else:
say("Another orb on the door begins to glow. Now {} of the five orbs are shining "
"bright.".format(self.num_to_word(self.num_lights)))
def num_to_word(self, num):
"""takes an integer 1 through 5 and returns it spelled out"""
if num == 0:
return "none"
elif num == 1:
return "one"
elif num == 2:
return "two"
elif num == 3:
return "three"
elif num == 4:
return "four"
elif num == 5:
return "five"
def get_desc(self):
description_text = "A towering ornate door. It is intricately decorated, and seems to be connected via " \
"various cords to the large computer. "
if self.num_lights == 5:
description_text += "All five orbs along the top of the door are glowing brightly."
elif self.num_lights == 0:
description_text += "There are five dark orbs along the top of the door."
else:
description_text += "There are five orbs along the top of the door, {} of which are " \
"glowing white.".format(self.num_to_word(self.num_lights))
say(description_text)
# class Door(Exit):
# is_lockable = False
# is_locked = False
# is_open = True
# will_unlock = []
# message_unlocked = "The door is already unlocked."
# message_locked = "The door is locked"
# message_cant_unlock = "That can't unlock the door."
# message_unlocked = "The door is unlocked."
# message_not_lockable = "This door does not lock."
#
# def __init__(self, id, name):
# self.id = id
# self.name= name
# self.adjectives = []
# self.alternate_names = []
# self.actions = {}
#
# def unlock(self, object_id):
# if not self.is_lockable:
# say(self.message_not_lockable)
# elif not self.is_locked:
# say(self.message_unlocked)
# elif object not in self.will_unlock:
# say(self.message_cant_unlock)
# else:
# say(self.message_unlocked)
# self.is_locked = False
#
# def go(self):
# if self.is_locked:
# say(self.message_locked)
#
# class MultiKeyDoor(Door):
# number_of_keys = 0
# keys_to_unlock = 5
#
#
# def get_description(self):
# if
#
#
class Item(Thing):
"""Takable, Dropable thing"""
def __init__(self, id, name):
super().__init__(id, name)
self.can_be_taken = True
self.can_be_dropped = True
def get_status(self, type=None):
if type is None:
type = "Item"
return super().get_status(type)
class RubberDuck(Item):
def __init__(self, id, name):
super().__init__(id, name)
def get_status(self, type=None):
if type is None:
type = "RubberDuck"
return super().get_status(type)
def give_to(self, game, actionargs):
recipient = game.get_thing_by_name(actionargs["iobj"], False)
if recipient is not game.thing_list["shiftyMan"]:
recipient = Utilities.find_by_name(actionargs["iobj"], game.thing_list)
if recipient.can_receive:
say("The {} doesn't want the rubber duck.".format(recipient.name))
else:
say("You cannot give anything to the {}".format(recipient.name))
else:
door = game.thing_list["clockRoomDoor"]
print(door.msg_unlock)
door.locked = False
door.can_go = True
game.player.remove_from_inventory(self)
class Book(Item):
def __init__(self, id, name):
super().__init__(id, name)
self.can_be_read = True
self.can_be_dropped = False
self.msg_cannot_drop = "This book of documentation seems too important to leave behind."
def get_status(self, type=None):
if type is None:
type = "Book"
return super().get_status(type)
def read(self, game, actionargs):
if not game.player.is_in_inventory(self):
say("You pick up the book, and flip through its pages.")
game.player.current_room.remove_thing(self)
game.player.add_to_inventory(self)
else:
say("You flip through the \"Tome of Documentation\"...")
book_text = "<WRITTEN_TEXT>"
book_text += "Notes on the " + game.player.current_room.name + "\n"
book_text += game.player.current_room.documentation + "\n"
at_least_one_func = False
for func in game.player.special_functions.values():
if func["learned"] == True:
if at_least_one_func == False:
at_least_one_func = True
book_text += "Special functions (used with 'call'): \n"
book_text += func["name"].upper() + ": " + func["description"] + "\n"
book_text += "</>"
say(book_text)
def open(self, game, actionargs):
self.read(game, actionargs)
class Cheese(Item):
def __init__(self, id, name):
super().__init__(id, name)
self.msg_cannot_eat = "As you bring the cheese to your lips, the smell makes you gag." \
"You decide it isn't fit for human consumption."
def get_status(self, type=None):
if type is None:
type = "Cheese"
return super().get_status(type)
def give_to(self, game, actionargs):
thing_to_receive = Utilities.find_by_name(actionargs["iobj"], game.thing_list)
if game.player.current_room.id == "roomI" and\
game.player.current_room.is_lit == False and\
thing_to_receive.id is not "cobwebs":
print("You don't see {}.".format(thing_to_receive.list_name))
print("Then again you can't really see much of anything...")
return
if thing_to_receive is game.thing_list["hungryMouse"]:
message = "As you hold out the cheese, the mosue's eyes widen. " \
"It snatches it from your hand, and runs to the opposite corner of the room. " \
"It begins nibbling away."
say(message)
self.mouse_eats_cheese(game, actionargs)
else:
thing_to_receive = Utilities.find_by_name(actionargs["iobj"], game.thing_list)
if thing_to_receive.can_receive:
say("The {} doesn't want the cheese.".format(thing_to_receive.name))
else:
say("You cannot give anything to the {}".format(thing_to_receive.name))
def drop(self, game, actionargs):
if game.player.current_room is game.room_list["roomD"]:
message = "You drop the cheese, and the mouses's eyes widen. " \
"It quickly darts over, grabs the cheese, and runs to the opposite corner of the room. " \
"It begins nibbling away."
say(message)
self.mouse_eats_cheese(game, actionargs)
else:
Thing.drop(self, game, actionargs)
def mouse_eats_cheese(self, game, actionargs):
game.player.remove_from_inventory(self)
game.room_list["roomD"].remove_thing(game.thing_list["hungryMouse"])
game.room_list["roomD"].add_thing(game.thing_list["eatingMouse"])
game.thing_list["lever"].become_reachable()
class Ticket(Item):
def __init__(self, id, name):
super().__init__(id, name)
self.dispensed = False
def get_status(self, type=None):
if type is None:
type = "Ticket"
return super().get_status(type)
def take(self, game, actionargs):
if not self.dispensed:
say(self.msg_blocked)
else:
super().take(game, actionargs)
def use(self, game, actionargs):
accessible = game.player.current_room.get_all_accessible_contents()
if game.thing_list["driverDaemon"] in accessible:
args = actionargs.copy()
args["iobj"] = "daemon"
self.give_to(game, args)
else:
super().use(game, actionargs)
def give_to(self, game, actionargs):
thing_to_receive = game.get_thing_by_name(actionargs["iobj"], False)
if thing_to_receive is game.thing_list["driverDaemon"]:
message = "The DAEMON nods, takes your ticket, barely looking at you, and steps aside, granting access to the bus."
say(message)
self.grant_bus_access(game, actionargs)
else:
thing_to_receive = Utilities.find_by_name(actionargs["dobj"], game.thing_list)
if thing_to_receive.can_receive:
say("The {} doesn't want the ticket.".format(thing_to_receive.name))
else:
say("You cannot give anything to the {}".format(thing_to_receive.name))
def grant_bus_access(self, game, actionargs):
accessible = game.player.current_room.get_all_accessible_contents()
if self in accessible:
game.player.current_room.remove_thing(self)
elif game.player.is_in_inventory(self):
game.player.remove_from_inventory(self)
game.room_list["roomG"].remove_thing(game.thing_list["busLocked"])
game.room_list["roomG"].add_thing(game.thing_list["bus"])
game.room_list["roomG"].bus = game.thing_list["bus"]
class Key(Item):
def __init__(self, id, name):
super().__init__(id, name)
self.msg_no_lock = "There is nothing to use the " + self.name + " with!"
self.lock = None
def get_status(self, type=None):
if type is None:
type = "Key"
return super().get_status(type)
def use(self, game, actionargs):
# Determine if the applicable lock is accessible
accessible = game.player.current_room.get_all_accessible_contents()
if self.lock in accessible:
args = actionargs.copy()
args["iobj"] = self.lock.name
self.put_in(game, args)
else:
say("There isn't anything that works with the " + self.name + "!")
class Drink(Item):
def __init__(self, id, name):
super().__init__(id, name)
def get_status(self, type=None):
if type is None:
type = "Drink"
return super().get_status(type)
def use(self, game, actionargs):
self.drink(game, actionargs)
def drink(self, game, actionargs):
message = "You take a sip of the {}.".format(self.name)
say(message)
class Wine(Drink):
def __init__(self, id, name):
super().__init__(id, name)
def get_status(self, type=None):
if type is None:
type = "Wine"
return super().get_status(type)
def drink(self, game, actionargs):
if game.player.current_room is not game.room_list["roomE"] or game.thing_list["piano"].tip_received:
super().drink(game, actionargs)
else:
say("You drink some wine and start to loosen up...")
game.player.drunk = True
class Newspaper(Item):
def __init__(self, id, name):
super().__init__(id, name)
self.can_be_read = True
def get_status(self, type=None):
if type is None:
type = "Newspaper"
return super().get_status(type)
#def read(self, game, actionargs): #COMMENTING THIS OUT, BECAUSE DESCRIPTION HAS CONTENT
#contents = "The newspaper has an article about bugs."
#say(contents)
def open(self, game, actionargs):
self.read(game, actionargs)
class Debugger(Item):
def __init__(self, id, name):
super().__init__(id, name)
self.can_be_sprayed = True
def get_status(self, type=None):
if type is None:
type = "Debugger"
return super().get_status(type)
def spray(self, game, actionargs):
say("You spray the Debugger in the air. Nothing happens.")
class Feature(Thing):
"""Not-Takable, Not-Dropable thing"""
def __init__(self, id, name):
super().__init__(id, name)
self.can_be_taken = False
self.can_be_dropped = False
self.msg_cannot_take = "The {} is fixed in place.".format(self.name)
def get_status(self, type=None):
if type is None:
type = "Feature"
return super().get_status(type)
class Seat(Feature):
def __init__(self, id, name):
super().__init__(id, name)
self.msg_sit = "You sit on the {}. After resting for some time "\
"on the comfortable {}, you get back up, ready "\
"to continue exploring.".format(self.name, self.name)
def get_status(self, type=None):
if type is None:
type = "Seat"
return super().get_status(type)
def sit(self, game, actionargs):
say(self.msg_sit)
class Lock(Feature):
def __init__(self, id, name):
super().__init__(id, name)
self.item_dispenser = False
self.door_lock = False
self.toggled = False
self.controlled_exit = None
self.open_exit = None
self.key = None
self.receive_preps = []
self.already_used_msg = "The " + self.name + " has already been used!"
self.incompatible_msg = "The " + self.name + " can not receive that item!"
self.msg_toggled = ""
self.key_consumed = False
def get_status(self, type=None):
if type is None:
type = "Lock"
return super().get_status(type)
def receive_item(self, game, item, prep):
if prep in self.receive_preps:
if self.toggled:
say(self.already_used_msg)
elif item == self.key:
say("You put the {} {} the {}.".format(item.name, prep, self.name))
self.toggle(game)
if self.key_consumed:
accessible = game.player.current_room.get_all_accessible_contents()
if item in accessible:
game.player.current_room.remove_thing(item)
elif game.player.is_in_inventory(item):
game.player.remove_from_inventory(item)
else:
say("You can't put things {} the {}.".format(prep, self.name))
# use
def use(self, game, actionargs):
# Determine if the key is accessible in the Room or in the Player's inventory
accessible = game.player.current_room.get_all_accessible_contents()
if self.key in accessible or game.player.is_in_inventory(self.key):
self.receive_item(game, self.key, "in")
else:
say("You don't have anything that works with the " + self.name + "!")
def unlock_exit(self, game):
# Find direction(s) of the Exit
directions = list()
for dir in game.player.current_room.exits:
if game.player.current_room.exits[dir] == self.controlled_exit:
directions.append(dir)
# Remove the Exit from the current room
game.player.current_room.remove_exit(self.controlled_exit)
# Add the "opened" Exit to the current room in the directions of the previous Exit
for dir in directions:
game.player.current_room.add_exit(self.open_exit, dir)
def toggle(self, game):
if self.door_lock:
self.unlock_exit(game)
elif self.item_dispenser:
self.dispense_item(game)
self.toggled = True
say(self.msg_toggled)
def dispense_item(self, game):
game.player.add_to_inventory(self.item)
class Input(Feature):
"""A feature that you can input text into (like a keyboard)
By default they have one correct answer which performs one function.
"""
def __init__(self, id, name):
super().__init__(id, name)
# True if the input only functions once
self.one_time_use = True
self.triggers_once = True
# True if the correct answer has already been triggered
self.triggered = False
self.msg_prompt = "What do you input?"
self.msg_yn_prompt = "Would you like to input something? (y/n)"
self.answer = "ANSWER"
self.msg_correct_answer = "Correct!"
self.msg_incorrect_answer = "Nothing happens."
self.msg_already_triggered = "Nothing happens."
self.msg_already_used = "There is nothing left to use it for."
def get_status(self, type=None):
if type is None:
type = "Input"
return super().get_status(type)
def look(self, game, actionargs):
say(self.get_desc())
if not (self.one_time_use and self.triggered):
yes_or_no = game.get_yn_answer(self.msg_yn_prompt)
if yes_or_no:
self.get_input(game, actionargs)
def use(self, game, actionargs):
say(self.get_desc())
self.get_input(game, actionargs)
def get_input(self, game, actionargs):
if self.one_time_use and self.triggered:
say(self.msg_already_used)
else:
response = game.get_word_answer(self.msg_prompt, self.answer)
if (response):
if self.triggers_once and self.triggered:
say(self.msg_already_triggered)
else:
self.triggered = True
say(self.msg_correct_answer)
self.carry_out_action(game, actionargs)
else:
say(self.msg_incorrect_answer)
# This is the function called on a successful answer
def carry_out_action(self, game, actionargs):
print("[[default action...]]")
class InputBalconyWindow(Input):
"""the class for the input device on the balcony that opens the window"""
# This status function is not working on its own
def get_status(self):
return super().get_status("InputBalconyWindow")
# This is the function called on a successful answer
def carry_out_action(self, game, actionargs):
# Open window...
game.room_list["roomA"].remove_exit(game.thing_list["balconyWindowClosed"])
game.room_list["roomA"].add_exit(game.thing_list["balconyWindowOpen"], "north")
if not game.player.is_in_inventory(game.thing_list["book"]):
book_message = "The force of the window opening has left the book on the floor. " \
"Curious, you decide to pick it up."
say(book_message)
game.player.current_room.remove_thing(game.thing_list["book"])
game.player.add_to_inventory(game.thing_list["book"])
class InputPuzzle1(Input):
"""the class for the input device in puzzle 1"""
def __init__(self, id, name):
super().__init__(id, name)
self.msg_prompt = "What do you input into the control panel?"
self.answer = "gone"
self.msg_correct_answer = \
"The Control Panel displays this message: \n" \
"<DIGITAL_TEXT>Crystals are gone, shutting down and switching to manual monitoring system.</>\n" \
"All of the monitors in the room turn off, and it is now pitch black. \n" \
"Your Documentation Tome begins to glow. Opening it, you see a new function appear: LED. " \
"To use this function, input \"call LED\". You try it, and the lights in the room turn back on."
# "The light from the tome fades away, and the room is again completely dark."
def get_status(self):
return super().get_status("InputPuzzle1")
def get_desc(self):
if not self.triggered:
desc = \
"The Control Panel has a large screen and a keyboard below it. On the screen is this message: \n" \
"<DIGITAL_TEXT>Error: Crystals have disappeared without a TRACE. " \
"Before turning offline, the monitoring system detected " \
"four distinct paths the crystals took: \n" \
"Path 1: FFFFF0 -> FFFF99 -> FF69B4 -> C19A6B -> 2A3439 -> 614051\n" \
"Path 2: FFFF99 -> FF69B4 -> C19A6B -> FFFFF0 -> FFFF99\n" \
"Path 3: 007FFF -> 800020 -> C19A6B -> FFFFF0\n" \
"Path 4: 800020 -> FFFF99 -> 228B22 -> 614051 -> 228B22 -> FF69B4 -> 007FFF\n" \
"Please input the current location of the crystals...</>"
else:
desc = "The control panel's screen is now blank."
say(desc)
# This is the function called on a successful answer
def carry_out_action(self, game, actionargs):
# learn function
game.player.learn_function("led")
game.player.current_room.remove_thing(game.thing_list["puzzle1MonitorsBroken"])
game.player.current_room.add_thing(game.thing_list["puzzle1MonitorsFixed"])
class InputPuzzle2(Input):
"""the class for the input device in puzzle 2"""
def __init__(self, id, name):
super().__init__(id, name)
self.msg_prompt = "What do you type to complete the email?"
self.answer = "alone"
self.msg_correct_answer = \
"You type the final word and send the email. You feel better now that this apology " \
"has been sent to those who deserve to hear it. As you reflect on your bizarre adventure, " \
"and dream of being free of this tower, you wonder when you will next see your own family. " \
"Just then, your Tome of Documentation starts to get very warm. You open it and see a new function " \
"has appeared: KIN. You can use it by saying \"call KIN on _____\". It will reconnect something with " \
"it's relatives."
self.msg_incorrect_answer = "You think harder, and realize that is not the right word to complete the email."
def get_status(self):
return super().get_status("InputPuzzle2")
def get_desc(self):
if not self.triggered:
desc = "The computer is on, and on the screen it appears as though someone was composing an email. " \
"Here is what is says: \n" \
"<DIGITAL_TEXT>Dear family, \n" \
"I'm sorry for disrupting our relationship database. " \
"My actions have caught up to me. Now I must confess that my fears have become reality, " \
"for, now I am ...</> \n" \
"The email is missing a final word. What should you type before sending the email?"
else:
desc = "The computer's screen is now blank."
say(desc)
# This is the function called on a successful answer
def carry_out_action(self, game, actionargs):
# learn function
game.player.learn_function("kin")
class InputPuzzle3(Input):
"""the class for the input device in puzzle 3"""
def __init__(self, id, name):
super().__init__(id, name)
self.msg_prompt = "What book do you search for?"
self.answer = "hackers assets"
self.msg_correct_answer = \
"After entering the book title, you hear a buzzing as a drone flys from behind the table. " \
"It zooms through the shelves before a small claw extends out and grasps a book. " \
"The drone clumsily delivers the book onto the table. You flip through \"Hackers Assets\" " \
"and learn about many tools for manipulating hardware and software. " \
"You discover that sometimes, the best answer is brute force. " \
"Just then your Tome of Documentation begins to thrash wildly. " \
"It falls on the table and opens, and you see a new function appear: RAM. " \
"This will apply a great force to something. " \
"To use it, say \"call RAM on _____\". The drone is startled, grabs the hackers book and flies away."
self.msg_incorrect_answer = "The search comes up empty, there are no books by that name."
def get_status(self):
return super().get_status("InputPuzzle3")
# This is the function called on a successful answer
def carry_out_action(self, game, actionargs):
# learn function
game.player.learn_function("ram")
def get_desc(self):
if not self.triggered:
desc = \
"This touchscreen is used to search for books in the library. " \
"Most are separated into two categories: <CLUE>hardware</> or <CLUE>software</>."
else:
desc = \
"This touchscreen is used to search for books in the library. " \
"A message comes up saying you have reached your maximum rentals."
say(desc)
class InputPuzzle4(Input):
"""the class for the input device in puzzle 3"""
def __init__(self, id, name):
super().__init__(id, name)
self.msg_prompt = "What status do you enter?"
self.answer = "bricked"
self.msg_correct_answer = \
"The computer reads: \n" \
"<DIGITAL_TEXT>\"BRICKED\" status detected. Initializing troubleshooting procedure.</> \n" \
"Just then a small robot on wheels emerges from the computer! Startled, you jump back. " \
"The tiny robot rolls over towards a large plug which seems to be powering the entire system. " \
"With it's mechanical arms, it grabs the plug and yanks it from the wall. " \
"The processors and computer shut down, and all of the awful noises and smoke stop. " \
"After five seconds, the robot plugs the computer back in, and rolls back into the computer." \
"The computer screen reads: <DIGITAL_TEXT>Booting up...</>. It appears you've fixed the system! " \
"You suddenly feel a jolt, as if your Tome of Documentation just shocked you. " \
"Opening its pages you see a new function appear: TIC. This function will make a machine malfunction; " \
"to use it, say \"call TIC on _____\". "
self.msg_incorrect_answer = "The computer reads: <DIGITAL_TEXT>You've entered an unknown status. " \
"Please enter correct status.</>"
def get_status(self):
return super().get_status("InputPuzzle4")
# This is the function called on a successful answer
def carry_out_action(self, game, actionargs):
# learn function
game.player.learn_function("tic")
game.player.current_room.remove_thing(game.thing_list["puzzle4ProcessorsBroken"])
game.player.current_room.add_thing(game.thing_list["puzzle4ProcessorsFixed"])
def get_desc(self):
if not self.triggered:
desc = \
"This large computer seems to be controlling the whole mechanical system in this room. It has keyboard, " \
"and a large screen displaying this message: \n" \
"<DIGITAL_TEXT>ERROR! Processing system is malfunctioning. Must diagnose problem. " \
"Check error messages from processors, and input system status below to initiate troubleshooting."
else:
desc = \
"This large computer seems to be controlling the whole mechanical system in this room. It has keyboard, " \
"and a large screen displaying this message: \n" \
"<DIGITAL_TEXT>Booting up...</>"
say(desc)
class InputPuzzle5(Input):
"""the class for the input device in puzzle 5"""
def __init__(self, id, name):
super().__init__(id, name)
self.msg_prompt = "What password do you enter?"
self.answer = "finesse"
self.msg_correct_answer = \
"The computer unlocks! looking at the monitor, it appears an online course was in progress, " \
"for increasing coordination. After skimming through a few pages, " \
"you notice your Tome of Documentation begin to vibrate. " \
"You open it up and see a new function has been added: PRO. " \
"To use it, say \"call PRO\". This will temporarilty increase your skill and dexterity. " \
"You exit the course and turn off the computer."
self.msg_incorrect_answer = "You've entered the incorrect password."
def get_status(self):
return super().get_status("InputPuzzle5")
# This is the function called on a successful answer
def carry_out_action(self, game, actionargs):
# learn function
game.player.learn_function("pro")
def get_desc(self):
if not self.triggered:
desc = \
"This is a fancy computer with a large monitor and a standard US QWERTY keyboard. " \
"The computer is on, but it appears to be stuck on a log-in screen. I" \
"t looks like there have been a few incorrect password attempts, " \
"and you can read them! What kind of security is that? " \
"Who ever previously tried to log in experienced <CLUE>off-by-one-errors</> " \
"with every key! The incorrect passwords were: \n" \
"<DIGITAL_TEXT>COMDEXS \nGUMWEED \nROBSZED \nTUBREAR</>\n" \
"You wonder what the actual password might be."
else:
desc = "The computer is turned off."
say(desc)
class MetaPuzzleInput(Input):
"""the class for the input device in the meta puzzle"""
def __init__(self, id, name):
super().__init__(id, name)
self.msg_prompt = "What do you type?"
self.msg_prompt2 = "What do you type?"
self.answer = "pro ram kin tic led"
self.answer2 = "geek"
self.msg_correct_answer = \
"Correct! More text appears on the screen: \n" \
"<DIGITAL_TEXT>Now you should have learned something about yourself. What are you?</>"
self.msg_correct_answer2 = \
"The Mother DAEMON looks at you and silently nods. She comes over and guides your hands on the keyboard. " \
"You press Control... then Alt... then Delete. Everything goes black.\n" \
"When you come to, you find yourself sitting in front of your computer at home. " \
"You have escaped the tower! And now you have a craving for cheese..."
self.msg_incorrect_answer = "<DIGITAL_TEXT>Incorrect sequence.</>"
self.msg_incorrect_answer2 = "<DIGITAL_TEXT>Incorrect. Look deeper.</>"
self.description = \
"This computer is identical to your computer at home. It is displaying a simple message: \n" \
"<DIGITAL_TEXT>Enter in all five special functions (separated by spaces) in the correct order.</>"
def get_status(self):
return super().get_status("MetaPuzzleInput")
# This is the function called on a successful answer
def carry_out_action(self, game, actionargs):
# learn function
game.game_over = True
game.end()
def get_input(self, game, actionargs):
response1 = game.get_word_answer(self.msg_prompt, self.answer)
if (response1):
say(self.msg_correct_answer)
response2 = game.get_word_answer(self.msg_prompt2, self.answer2)
if (response2):
say(self.msg_correct_answer2)
self.carry_out_action(game, actionargs)
else:
say(self.msg_incorrect_answer2)
else:
say(self.msg_incorrect_answer)
class Sign(Feature):
"""Readable Feature"""
def __init__(self, id, name):
super().__init__(id, name)
self.can_be_read = True
def get_status(self):
return super().get_status("Sign")
class Lever(Feature):
"""Readable Feature"""
def __init__(self, id, name):
super().__init__(id, name)
self.is_reachable = False
def get_status(self):
return super().get_status("Lever")
def use(self, game, actionargs):
self.pull(game, actionargs)
def pull(self, game, actionargs):
if self.is_reachable:
lever_text = "You pull the lever. You hear a rumbling sound behind you and turn as a section of " \
"the west wall slides away, revealing a tunnel leading off to the west."
say(lever_text)
game.player.current_room.remove_exit(game.thing_list["secretWall"])
game.player.current_room.add_exit(game.thing_list["mousepadTunnel"], "west")
else:
say("You cannot reach the lever, the mouse is in the way.")
def become_reachable(self):
self.is_reachable = True
def get_desc(self):
if self.is_reachable:
say("A large lever is attatched to the wall. It is not clear what it is connected to.")
else:
say("Some kind of lever is attached to the wall. You can't get a closer look with the mouse in the way.")
class Computer(Feature):
def __init__(self, id, name):
super().__init__(id, name)
self.key_items = ["floppyDisk", "cartridge", "tape", "cd", "flashdrive"]
self.inserted_things = list()
self.description_text = \
"This massive machine takes up most of the east wall. It is some sort of system of large rectangular devices all " \
"connected with various wires. There are lights blinking, and you hear whirring and clicking sounds. " \
"You can only assume it functions as some type of computer. " \
"There appears to be a handful of unique ports in the machine where something could be inserted."
def get_status(self):
return super().get_status("Computer")
def receive_item(self, game, item, prep):
if prep == "in":
if item.id in self.key_items:
game.player.remove_from_inventory(item)
self.inserted_things.append(item)
say("You find an appropriate looking place to insert the {} into the "
"computer.".format(item.name, prep, self.name))
game.thing_list["lobbyOrnateDoor"].add_light()
else:
say("You can't find anywhere in the computer to put the {}.".format(item.name))
else:
say("You can't put things {} the computer.".format(prep))
def get_desc(self):
text = self.description_text
if self.inserted_things:
text += " You have inserted"
text += Utilities.list_to_words([o.get_list_name() for o in self.inserted_things])
text += "."
say(text)
class Clock(Feature):
"""Readable Feature that can tell game time"""
def __init__(self, id, name):
super().__init__(id, name)
self.can_be_read = True
def get_status(self):
return super().get_status("Clock")
def look(self, game, actionargs):
say("The time is <DIGITAL_TEXT>t=" + str(game.game_time) + "</>")
class ComplexClock(Feature):
"""For the clock in the Clock Room"""
def __init__(self, id, name):
super().__init__(id, name)
self.can_be_read = True
def get_status(self, type=None):
if type is None:
type = "ComplexClock"
return super().get_status(type)
def look(self, game, actionargs):
say(self.description)
say("The time is <DIGITAL_TEXT>t=" + str(game.game_time) + "</>")
class Piano(Feature):
"""Playable piano"""
def __init__(self, id, name):
super().__init__(id, name)
self.tip_received = False
self.daemon_summoned = False
self.msg_good = "You play the piano. Thanks to the wine, you're really groovin'. It sounds good!"
self.msg_great = "You play the piano. Thanks to the PRO effects, you're unstoppable! It sounds great!"
self.msg_bad = "You play the piano, but you feel a little stiff. It doesn't sound great. Maybe you'll play better if you loosen up somehow..."
def get_status(self, type=None):
if type is None:
type = "Piano"
return super().get_status(type)
def use(self, game, actionargs):
self.play(game, actionargs)
def play(self, game, actionargs):
if game.player.pro:
say(self.msg_great)
self.play_great(game)
elif game.player.drunk:
say(self.msg_good)
self.play_good(game)
else:
say(self.msg_bad)
def play_good(self, game):
if not self.tip_received:
self.tip_received = True
game.thing_list["tipJar"].add_item(game.thing_list["coin"])
print()
say("You received a tip! A coin has appeared in the tip jar.")
def play_great(self, game):
self.play_good(game)
if not self.daemon_summoned:
self.daemon_summoned = True
game.room_list["roomE"].add_thing(game.thing_list["DancingDaemon"])
print()
say("Your playing has attracted one of the tower's DAEMONs!")
say(game.thing_list["DancingDaemon"].description)
class DancingDaemon(Feature):
"""Daemon that appears"""
def __init__(self, id, name):
super().__init__(id, name)
self.floppy_received = False
self.floppy = None
self.msg_dance = "You dance with the DAEMON!"
def get_status(self, type=None):
if type is None:
type = "DancingDaemon"
return super().get_status(type)
def dance(self, game, actionargs):
say(self.msg_dance)
if not self.floppy_received:
message = "The DAEMON gives you a " + self.floppy.name + "!"
game.player.add_to_inventory(self.floppy)
self.floppy_received = True
say(message)
class Moth(Feature):
"""Moth holding the cartridge"""
def __init__(self, id, name):
super().__init__(id, name)
self.floppy_received = False
self.floppy = None
self.been_sprayed = False
self.msg_spray = "You spray the moth with the Debugger."
self.msg_first_spray = "The moth flies into the opening, taking whatever is in its mouth with it " \
", but leaving the door unguarded."
self.msg_been_sprayed = "The moth flaps its wings in an attempt to get away."
self.in_web = False
self.msg_kin_not_in_web = "Hundreds of other moths appear. They appear to check on the "\
"giant moth before flying away."
self.msg_kin_in_web = "Hundreds of other moths appear. They work to free the giant moth "\
"from the web. The giant moth comes loose from the web, dropping a "\
"cartridge in your hand before flying away with its family."
def get_status(self, type=None):
if type is None:
type = "Moth"
return super().get_status(type)
def err_message(self, game):
if game.player.current_room.id == "roomI" and\
game.player.current_room.is_lit == False:
say("You don't see a moth.")
say("But then again you don't really see much of anything...")
return True
else:
return False
def look(self, game, actionargs):
if self.err_message(game):
return
self.get_desc(game)
def get_desc(self, game):
message = self.description
message += " It seems to be calling out for help... but to who?"
say(message)
def spray(self, game, actionargs):
if self.err_message(game):
return
has_debugger = False
for item in game.player.inventory:
if item.id == "debugger":
has_debugger = True
break
if has_debugger:
say(self.msg_spray)
if self.been_sprayed:
say(self.msg_been_sprayed)
else:
say(self.msg_first_spray)
self.been_sprayed = True
game.room_list["roomH"].remove_thing(self)
game.room_list["roomI"].add_thing(self)
game.room_list["roomI"].contains_moth = True
self.in_web = True
game.thing_list["webDoor"].can_go = True
else:
say("You don't have anything to spray the moth with.")
def spray_with(self, game, actionargs):
if self.err_message(game):
return
obj = Utilities.find_by_name(actionargs["iobj"], game.thing_list)
if obj.id == "debugger":
self.spray(game, actionargs)
else:
say("You cannot spray the moth with that.")
def kin(self, game, actionargs):
if self.err_message(game):
return
if not self.in_web:
say(self.msg_kin_not_in_web)
else:
say(self.msg_kin_in_web)
if not self.floppy_received:
message = "You received a " + self.floppy.name + "!"
game.player.add_to_inventory(self.floppy)
self.floppy_received = True
self.in_web = False
game.room_list["roomI"].remove_thing(self)
game.room_list["roomI"].contains_moth = False
say(message)
def look_in(self, game, actionargs):
if not self.err_message(game):
super().look_in(game, actionargs)
def read(self, game, actionargs):
if not self.err_message(game):
super().read(game, actionargs)
def open(self, game, actionargs):
if not self.err_message(game):
super().open(game, actionargs)
def close(self, game, actionargs):
if not self.err_message(game):
super().close(game, actionargs)
def take(self, game, actionargs):
if not self.err_message(game):
super().take(game, actionargs)
def drop(self, game, actionargs):
if not self.err_message(game):
super().drop(game, actionargs)
def put_down(self, game, actionargs):
if not self.err_message(game):
super().put_down(game, actionargs)
def give_to(self, game, actionargs):
if not self.err_message(game):
super().give_to(game, actionargs)
def go(self, game, actionargs):
if not self.err_message(game):
super().go(game, actionargs)
def put_in(self, game, actionargs):
if not self.err_message(game):
super().receive_item(game, actionargs, "in")
def put_on(self, game, actionargs):
if not self.err_message(game):
super().receive_item(game, actionargs, "on")
def pull(self, game, actionargs):
if not self.err_message(game):
super().pull(game, actionargs)
def receive_item(self, game, actionargs, prep):
if not self.err_message(game):
super().receive_item(game, actionargs, prep)
def use(self, game, actionargs):
if not self.err_message(game):
super().use(game, actionargs)
def dance(self, game, actionargs):
if not self.err_message(game):
super().dance(game, actionargs)
def eat(self, game, actionargs):
if not self.err_message(game):
super().eat(game, actionargs)
def drink(self, game, actionargs):
if not self.err_message(game):
super().drink(game, actionargs)
def play(self, game, actionargs):
if not self.err_message(game):
super().play(game, actionargs)
def talk(self, game, actionargs):
if not self.err_message(game):
super().talk(game, actionargs)
def hit(self, game, actionargs):
if not self.err_message(game):
super().hit(game, actionargs)
def ram(self, game, actionargs):
if not self.err_message(game):
super().ram(game, actionargs)
def tic(self, game, actionargs):
if not self.err_message(game):
super().tic(game, actionargs)
def sit(self, game, actionargs):
if not self.err_message(game):
super().sit(game, actionargs)
class Tape(Item):
def __init__(self, id, name):
super().__init__(id, name)
def get_status(self, type=None):
if type is None:
type = "Tape"
return super().get_status(type)
def err_message(self, game):
if game.player.current_room.id == "roomI" and\
game.player.current_room.is_lit == False:
say("You don't see a tape.")
say("Then again you can't really see much of anything...")
return True
else:
return False
def spray(self, game, actionargs):
if not self.err_message(game):
super().spray(game, actionargs)
def spray_with(self, game, actionargs):
if not self.err_message(game):
super().spray_with(game, actionargs)
def look_in(self, game, actionargs):
if not self.err_message(game):
super().look_in(game, actionargs)
def read(self, game, actionargs):
if not self.err_message(game):
super().read(game, actionargs)
def open(self, game, actionargs):
if not self.err_message(game):
super().open(game, actionargs)
def close(self, game, actionargs):
if not self.err_message(game):
super().close(game, actionargs)
def take(self, game, actionargs):
if not self.err_message(game):
super().take(game, actionargs)
def drop(self, game, actionargs):
if not self.err_message(game):
super().drop(game, actionargs)
def put_down(self, game, actionargs):
if not self.err_message(game):
super().put_down(game, actionargs)
def give_to(self, game, actionargs):
if not self.err_message(game):
super().give_to(game, actionargs)
def go(self, game, actionargs):
if not self.err_message(game):
super().go(game, actionargs)
def put_in(self, game, actionargs):
if not self.err_message(game):
super().receive_item(game, actionargs, "in")
def put_on(self, game, actionargs):
if not self.err_message(game):
super().receive_item(game, actionargs, "on")
def pull(self, game, actionargs):
if not self.err_message(game):
super().pull(game, actionargs)
def receive_item(self, game, actionargs, prep):
if not self.err_message(game):
super().receive_item(game, actionargs, prep)
def use(self, game, actionargs):
if not self.err_message(game):
super().use(game, actionargs)
def dance(self, game, actionargs):
if not self.err_message(game):
super().dance(game, actionargs)
def eat(self, game, actionargs):
if not self.err_message(game):
super().eat(game, actionargs)
def drink(self, game, actionargs):
if not self.err_message(game):
super().drink(game, actionargs)
def play(self, game, actionargs):
if not self.err_message(game):
super().play(game, actionargs)
def talk(self, game, actionargs):
if not self.err_message(game):
super().talk(game, actionargs)
def hit(self, game, actionargs):
if not self.err_message(game):
super().hit(game, actionargs)
def ram(self, game, actionargs):
if not self.err_message(game):
super().ram(game, actionargs)
def kin(self, game, actionargs):
if not self.err_message(game):
super().kin(game, actionargs)
def tic(self, game, actionargs):
if not self.err_message(game):
super().tic(game, actionargs)
def sit(self, game, actionargs):
if not self.err_message(game):
super().sit(game, actionargs)
class ShiftyMan(Feature):
def __init__(self, id, name):
super().__init__(id, name)
def get_status(self, type=None):
if type is None:
type = "ShiftyMan"
return super().get_status(type)
def talk(self, game, actionargs):
say("<SPOKEN_TEXT>There are five DAEMONS in the Tower who stole some very important things from my computer:</>")
say("<SPOKEN_TEXT>One likes to play pranks.</>")
say("<SPOKEN_TEXT>One likes to dance - but only likes very fast music.</>")
say("<SPOKEN_TEXT>One got a job as a bus driver.</>")
say("<SPOKEN_TEXT>One has been hanging out with a spider.</>")
say("<SPOKEN_TEXT>One is the Tower custodian, and keeps a strange pet.</>")
class Spider(Feature):
def __init__(self, id, name):
super().__init__(id, name)
self.msg_spray = "You spray the spider with the Debugger. "\
"The spider angrily lunges toward you, and "\
"you fall backwards, narrowly avoiding being bitten. "
def get_status(self, type=None):
if type is None:
type = "Spider"
return super().get_status(type)
def err_message(self, game):
if game.player.current_room.id == "roomI" and\
game.player.current_room.is_lit == False:
say("You don't see a spider.")
say("But then again you don't really see much of anything...")
return True
else:
return False
def spray(self, game, actionargs):
if self.err_message(game):
return
has_debugger = False
for item in game.player.inventory:
if item.id == "debugger":
has_debugger = True
break
if has_debugger:
say(self.msg_spray)
else:
say("You don't have anything to spray the spider with.")
def spray_with(self, game, actionargs):
if self.err_message(game):
return
obj = Utilities.find_by_name(actionargs["iobj"], game.thing_list)
if obj.id == "debugger":
self.spray(game, actionargs)
else:
say("You cannot spray the spider with that.")
def look_in(self, game, actionargs):
if not self.err_message(game):
super().look_in(game, actionargs)
def read(self, game, actionargs):
if not self.err_message(game):
super().read(game, actionargs)
def open(self, game, actionargs):
if not self.err_message(game):
super().open(game, actionargs)
def close(self, game, actionargs):
if not self.err_message(game):
super().close(game, actionargs)
def take(self, game, actionargs):
if not self.err_message(game):
super().take(game, actionargs)
def drop(self, game, actionargs):
if not self.err_message(game):
super().drop(game, actionargs)
def put_down(self, game, actionargs):
if not self.err_message(game):
super().put_down(game, actionargs)
def give_to(self, game, actionargs):
if not self.err_message(game):
super().give_to(game, actionargs)
def go(self, game, actionargs):
if not self.err_message(game):
super().go(game, actionargs)
def put_in(self, game, actionargs):
if not self.err_message(game):
super().receive_item(game, actionargs, "in")
def put_on(self, game, actionargs):
if not self.err_message(game):
super().receive_item(game, actionargs, "on")
def pull(self, game, actionargs):
if not self.err_message(game):
super().pull(game, actionargs)
def receive_item(self, game, actionargs, prep):
if not self.err_message(game):
super().receive_item(game, actionargs, prep)
def use(self, game, actionargs):
if not self.err_message(game):
super().use(game, actionargs)
def dance(self, game, actionargs):
if not self.err_message(game):
super().dance(game, actionargs)
def eat(self, game, actionargs):
if not self.err_message(game):
super().eat(game, actionargs)
def drink(self, game, actionargs):
if not self.err_message(game):
super().drink(game, actionargs)
def play(self, game, actionargs):
if not self.err_message(game):
super().play(game, actionargs)
def talk(self, game, actionargs):
if not self.err_message(game):
super().talk(game, actionargs)
def hit(self, game, actionargs):
if not self.err_message(game):
super().hit(game, actionargs)
def ram(self, game, actionargs):
if not self.err_message(game):
super().ram(game, actionargs)
def kin(self, game, actionargs):
if not self.err_message(game):
super().kin(game, actionargs)
def tic(self, game, actionargs):
if not self.err_message(game):
super().tic(game, actionargs)
def sit(self, game, actionargs):
if not self.err_message(game):
super().tic(game, actionargs)
class Fireplace(Feature):
def __init__(self, id, name):
super().__init__(id, name)
self.look_in_msg = "You look in the fireplace, but "\
"you don't see anything other than the "\
"burning fire."
def get_status(self, type=None):
if type is None:
type = "Fireplace"
return super().get_status(type)
def look_in(self, game, actionargs):
say(self.look_in_msg)
class Freezer(Feature):
"""Freezer that manipulates chunk of ice"""
def __init__(self, id, name):
super().__init__(id, name)
self.is_malfunctioned = False
def get_status(self, type=None):
if type is None:
type = "Freezer"
return super().get_status(type)
def tic(self, game, actionargs):
if not self.is_malfunctioned:
self.is_malfunctioned = True
malfunction_text = \
"The freezer buzzes and groans. It begins to shake before finally turning off. " \
"The chunk of ice begins to drip, and then crack. Finally, the ice falls apart, and the laptop " \
"comes crashing down. The screen cracks and the frame gets severely bent upon impact. " \
"A flashdrive pops out and slides across the floor."
say(malfunction_text)
# laptop frozenLaptop -> brokenLaptop -> drop flash drive
game.player.current_room.remove_thing(game.thing_list["frozenLaptop"])
game.player.current_room.add_thing(game.thing_list["brokenLaptop"])
game.player.current_room.add_thing(game.thing_list["flashdrive"])
else:
say("Nothing happens.")
def get_desc(self):
if self.is_malfunctioned:
desc_text = "This strange device is labeled as a \"<WRITTEN_TEXT>freezer</>\". " \
"It is turned off, and there is now a puddle of water next to it."
else:
desc_text = "This strange device is labeled as a \"<WRITTEN_TEXT>freezer</>\". " \
"Is is making a grumbling sound, and cold air is pouring from it. " \
"It is connected to a small platform on which is a chunk of ice. " \
"It seems to be keeping it frozen."
say(desc_text)
class MotherDaemon(Feature):
def __init__(self, id, name):
super().__init__(id, name)
self.talk_msg = \
"<SPOKEN_TEXT>Greetings. I am the mother of the DAEMON's. " \
"You've arrived here <CLUE>due to motions<SPOKEN_TEXT> out of your control. " \
"It is surprising you have made it this far- a great <CLUE>application<SPOKEN_TEXT> of your skills. " \
"I hope you are <CLUE>quite pleased<SPOKEN_TEXT>. " \
"You may be upset, and you might think that revenge is a <CLUE>small dish<SPOKEN_TEXT> best served cold. " \
"But hopefully by now you have at least learned what you truly are...</>"
def get_status(self, type=None):
if type is None:
type = "MotherDaemon"
return super().get_status(type)
def talk(self, game, actionargs):
say(self.talk_msg)
class Storage(Feature):
"""Thing that can store other things"""
def __init__(self, id, name):
super().__init__(id, name)
self.has_contents = True
self.contents = []
self.contents_accessible = True
self.receive_preps = []
self.contents_accessible_iff_open = True
self.can_be_opened = False
self.is_open = True
self.msg_already_opened = "The {} is already open.".format(self.name)
self.msg_already_closed = "The {} is already closed.".format(self.name)
self.msg_open = "You open the {}.".format(self.name)
self.msg_close = "You close the {}.".format(self.name)
self.msg_is_closed = "The {} is closed.".format(self.name)
self.msg_look_in = "You look in the {}.".format(self.name)
def get_status(self, type=None):
if type is None:
type = "Storage"
return super().get_status(type)
def receive_item(self, game, item, prep):
if self.has_contents and prep in self.receive_preps:
if self.can_be_opened and not self.is_open:
say(self.msg_is_closed)
else:
game.player.remove_from_inventory(item)
self.add_item(item)
say("You put the {} {} the {}.".format(item.name, prep, self.name))
else:
say("You can't put things {} the {}.".format(prep, self.name))
def add_item(self, item):
self.contents.append(item)
def remove_item(self, item):
self.contents.remove(item)
def list_contents(self):
if self.receive_preps:
prep = self.receive_preps[0]
else:
prep = "in"
extra_sentence = ""
# if contents is not empty it returns "True"
if self.contents and self.contents_accessible:
extra_sentence = "{} it you see".format(prep).capitalize()
extra_sentence = " " + extra_sentence
extra_sentence += Utilities.list_to_words([o.get_list_name() for o in self.contents])
extra_sentence += "."
return extra_sentence
def get_desc(self):
desc_string = self.description
desc_string += self.list_contents()
say(desc_string)
def get_list_name(self):
list_string = self.list_name
if self.receive_preps:
prep = self.receive_preps[0]
else:
prep = "in"
# if contents is not empty it returns "True"
if self.contents and self.contents_accessible:
list_string += " ({} which is".format(prep)
list_string += Utilities.list_to_words([o.get_list_name() for o in self.contents])
list_string += ")"
return list_string
def open(self, game, actionargs):
if not self.can_be_opened:
say(self.msg_cannot_be_opened)
else:
if self.is_open:
say(self.msg_already_opened)
else:
self.set_open()
open_text = self.msg_open
open_text += self.list_contents()
say(open_text)
def set_open(self):
self.is_open = True
if self.contents_accessible_iff_open:
self.contents_accessible = True
def close(self, game, actionargs):
if not self.can_be_opened:
say(self.msg_cannot_be_opened)
else:
if not self.is_open:
say(self.msg_already_closed)
else:
self.set_closed()
say(self.msg_close)
def set_closed(self):
self.is_open = False
if self.contents_accessible_iff_open:
self.contents_accessible = False
def look_in(self, game, actionargs):
if "in" not in self.receive_preps or not self.can_be_opened:
say(self.msg_cannot_look_in)
else:
if self.is_open:
look_text = self.msg_look_in
look_text += self.list_contents()
say(look_text)
else:
self.open(game, actionargs)
class Container(Storage):
"""Things are stored IN the Container
If the container is CLOSED things inside are NOT accessible.
If the container is OPEN things inside ARE accessible
EXAMPLES: Fridge, Chest
"""
def __init__(self, id, name):
super().__init__(id, name)
self.can_be_opened = True
self.is_open = False
self.receive_preps = ["in"]
self.contents_accessible = False
self.contents_accessible_iff_open = True
def get_status(self, type=None):
if type is None:
type = "Container"
return super().get_status(type)
class Surface(Storage):
"""Things are stored ON the surface
Things ARE accessible when on the surface
EXAMPLES: Table, Shelf"""
def __init__(self, id, name):
super().__init__(id, name)
self.can_be_opened = False
self.receive_preps = ["on"]
def get_status(self):
return super().get_status("Surface")
class VendingTerminal(Container):
"""Things are stored IN the Container
If the container is CLOSED things inside are NOT accessible.
If the container is OPEN things inside ARE accessible
EXAMPLES: Fridge, Chest
"""
def __init__(self, id, name):
super().__init__(id, name)
self.can_receive = False
self.can_be_opened = False
self.is_open = True
self.contents_accessible = True
self.contents_accessible_iff_open = True
self.is_listed = True
self.dispensed = False
self.ticket = None
def get_status(self, type=None):
if type is None:
type = "VendingTerminal"
return super().get_status(type)
def hit(self, game, actionargs):
if self.dispensed:
super().hit(game, actionargs)
else:
say(
"You give the vending terminal a smack, but it's not enough to dislodge the ticket. Maybe something with more force...")
def ram(self, game, actionargs):
say("You RAM the vending terminal with tremendous force.")
if self.dispensed:
super().ram(game, actionargs)
else:
self.ticket.dispensed = True
self.ticket.description = self.ticket.alt_description
self.dispensed = True
self.description = self.alt_description
say(self.msg_rammed)
def receive_item(self, game, item, prep):
say("You can't put things {} the {}.".format(prep, self.name))
class Bus(Container):
"""Not-Takable, Not-Dropable thing"""
def __init__(self, id, name):
super().__init__(id, name)
self.can_be_taken = False
self.can_be_dropped = False
self.msg_cannot_take = "The {} is fixed in place.".format(self.name)
def get_status(self, type=None):
if type is None:
type = "Bus"
return super().get_status(type)
def go(self, game, actionargs):
say(self.msg_go)
if game.thing_list["floppyDisk"] in self.contents:
say("On your way out of the bus, you notice a floppy disk sitting on the driver's seat...")
class Document(Item):
"""Takable, Dropable thing"""
def __init__(self, id, name):
super().__init__(id, name)
self.file = "binary"
def get_status(self, type=None):
if type is None:
type = "Document"
return super().get_status(type)
# ACTION for read
def read(self, game, actionargs):
file = open(self.file, "r")
count = 0
if file.mode == 'r':
str = file.read()
str = str.split(" ")
display_str = ""
for x in str:
display_str += x
count += 1
if count % 10 == 0:
say(display_str)
display_str = ""
if count % 100 == 0:
ans = game.get_yn_answer("...Are you sure you want to keep reading? (y/n)")
if not ans:
say("That was... useful.")
break
if count % 1000 == 0:
say("You stop reading in spite of yourself, lest you go mad...")
break
file.close()
| LindseyL610/CS467-AdventureProject | Thing.py | Thing.py | py | 66,406 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "Room.Room",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "Utilities.say",
"line_numb... |
14712071511 | from fastapi import APIRouter, Header
from fastapi.exceptions import HTTPException
from client import get_ccxt_client
router = APIRouter()
@router.get("/info/")
async def list_markets(x_connection_id: str = Header()):
try:
client = get_ccxt_client(x_connection_id)
return client.load_markets()
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.get("/info/{symbol:path}/")
async def retrieve_markets(symbol: str, x_connection_id: str = Header()):
try:
markets = await list_markets(x_connection_id)
if symbol not in markets:
raise HTTPException(status_code=400, detail=f"symbol {symbol} not found")
return markets[symbol]
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.post("/info/sync/")
async def sync_markets(x_connection_id: str = Header()):
try:
client = get_ccxt_client(x_connection_id)
return client.load_markets(reload=True)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.get("/ticker/")
async def list_ticker(x_connection_id: str = Header()):
try:
client = get_ccxt_client(x_connection_id)
return client.fetch_tickers()
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.get("/ticker/{symbol:path}/")
async def retrieve_ticker(symbol: str, x_connection_id: str = Header()):
try:
client = get_ccxt_client(x_connection_id)
return client.fetch_ticker(symbol=symbol)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.get("/kline/{symbol:path}/interval/{interval:path}/")
async def retrieve_kline(symbol: str, interval: str, x_connection_id: str = Header()):
try:
client = get_ccxt_client(x_connection_id)
client_properties = client.describe()
if interval not in client_properties["timeframes"]:
raise HTTPException(status_code=400, detail="invalid value for interval")
return client.fetch_ohlcv(symbol=symbol, timeframe=interval)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
| masked-trader/raccoon-exchange-service | src/server/routes/market.py | market.py | py | 2,242 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "fastapi.Header",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "client.get_ccxt_client",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "client.load_ma... |
5539453892 | import cv2
import numpy as np
"""
YUV란?
빛의 밝기를 나타내는 휘도(Y)와 색상신호 2개(U, V)로 표현하는 방식이다.
The Y′UV model defines a color space in terms of one luma component (Y′) and
two chrominance components, called U (blue projection) and V (red projection) respectively.
Y = 0.299R + 0.587G + 0.114B
"""
src = cv2.imread('../resources/Lena.png')
(h,w,c) = src.shape
yuv = cv2.cvtColor(src,cv2.COLOR_BGR2YUV)
my_y = np.zeros((h,w))
my_y = (src[:,:,0] * 0.114) + (src[:,:,1] * 0.587) + (src[:,:,2] * 0.299)
my_y = np.round(my_y).astype(np.uint8)
print(yuv[0:5,0:5,0])
print(my_y[0:5,0:5])
cv2.imshow('original',src)
cv2.imshow('cvtColor',yuv[:,:,0]) # yuv의 첫번째 채널은 y
cv2.imshow('my_y',my_y)
cv2.waitKey()
cv2.destroyAllWindows()
| daebakk/Image-Processing | Assignment/Fundamentals of Image processing/BGRToY.py | BGRToY.py | py | 794 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2YUV",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"lin... |
7755753027 | from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
from .. import api, utils
from ..command import Command, CMD_SUCCESS, HELP_LIST
from ..exceptions import InvalidDateError, NotFoundError, WrappedValueError
class Log(Command):
"""List enrollment information available in the registry.
The command list a set of enrollments. Some searching parameters
to filter the results are available. Parameters <uuid> and <organization>
filter by unique identity and organization name. Enrollments between a
period can also be listed using <from> and <to> parameters, where
<from> must be less or equal than <to>. Default values for these dates
are '1900-01-01' and '2100-01-01'.
Dates may follow several patterns. The most common and recommended
is 'YYYY-MM-DD'. Optionally, time information can be included using
patters like 'YYYY-MM-DD hh:mm:ss'.
"""
def __init__(self, **kwargs):
super(Log, self).__init__(**kwargs)
self.parser = argparse.ArgumentParser(description=self.description,
usage=self.usage)
# Enrollments search options
self.parser.add_argument('--uuid', default=None,
help="unique identity to withdraw")
self.parser.add_argument('--organization', default=None,
help="organization where the uuid is enrolled")
self.parser.add_argument('--from', dest='from_date', default=None,
help="date (YYYY-MM-DD:hh:mm:ss) when the enrollment starts")
self.parser.add_argument('--to', dest='to_date', default=None,
help="date (YYYY-MM-DD:hh:mm:ss) when the enrollment ends")
# Exit early if help is requested
if 'cmd_args' in kwargs and [i for i in kwargs['cmd_args'] if i in HELP_LIST]:
return
self._set_database(**kwargs)
@property
def description(self):
return """List enrollments."""
@property
def usage(self):
return "%(prog)s log [--uuid <uuid>] [--organization <organization>] [--from <date>] [--to <date>]"
def run(self, *args):
"""List enrollments using search parameters."""
params = self.parser.parse_args(args)
uuid = params.uuid
organization = params.organization
try:
from_date = utils.str_to_datetime(params.from_date)
to_date = utils.str_to_datetime(params.to_date)
code = self.log(uuid, organization, from_date, to_date)
except InvalidDateError as e:
self.error(str(e))
return e.code
return code
def log(self, uuid=None, organization=None, from_date=None, to_date=None):
""""List enrollment information available in the registry.
Method that returns a list of enrollments. If <uuid> parameter is set,
it will return the enrollments related to that unique identity;
if <organization> parameter is given, it will return the enrollments
related to that organization; if both parameters are set, the function
will return the list of enrollments of <uuid> on the <organization>.
Enrollments between a period can also be listed using <from_date> and
<to_date> parameters. When these are set, the method will return
all those enrollments where Enrollment.start >= from_date AND
Enrollment.end <= to_date. Defaults values for these dates are
1900-01-01 and 2100-01-01.
:param db: database manager
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
"""
try:
enrollments = api.enrollments(self.db, uuid, organization,
from_date, to_date)
self.display('log.tmpl', enrollments=enrollments)
except (NotFoundError, WrappedValueError) as e:
self.error(str(e))
return e.code
return CMD_SUCCESS
| timhayduk/glusterDashboard | gitlab/lib/python3.5/site-packages/sortinghat/cmd/log.py | log.py | py | 4,198 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "command.Command",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "command.HELP_LIST",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "exceptions.... |
70096885309 | from collections import deque
def solution(progresses, speeds):
progresses = deque(progresses)
speeds = deque(speeds)
answer = []
while progresses:
count = 0
for i in range(len(speeds)):
progresses[i] += speeds[i]
while True:
if progresses[0] < 100:
break
else:
progresses.popleft()
speeds.popleft()
count += 1
if not progresses:
break
if count != 0:
answer.append(count)
return answer
if __name__ == "__main__":
progresses = [93, 30, 55]
speeds = [1, 30, 5]
solution(progresses, speeds)
| YooGunWook/coding_test | 프로그래머스_복습/스택_기능개발.py | 스택_기능개발.py | py | 697 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 6,
"usage_type": "call"
}
] |
39671957475 | """ Класс для работы с контестами """
import os
import re
import json
import string
import shutil
import configparser, itertools
from collections import OrderedDict
from bs4 import BeautifulSoup
from mysite import settings
from .models import Cntsregs, Logins, Contests, Problems
from problems.classes import ProblemsCreator
# Вспомогательный класс для парсинга настроек контеста
class MultiDict(OrderedDict):
_unique = 0 # class variable
def __setitem__(self, key, val):
if isinstance(val, dict):
self._unique += 1
key += "_" + str(self._unique)
OrderedDict.__setitem__(self, key, val)
# Класс для парсинга настроек контеста
class SettingParser(object):
# Удаляет номер из названия ключа при парсинге
def delete_number_in_key(self, keyvalue):
keyname = str(keyvalue)
clear_keyname = re.sub('_[\d]+', '', keyname)
return clear_keyname
# Конвертирует строку из Windows-1251 в UTF-8
def convert_from_windows1251_to_utf8(self, value):
string = str(value)
decoded_string = ""
try:
decoded_string = string.encode('windows-1251').decode('utf-8')
except:
decoded_string = string
return decoded_string
# Парсит конфиг контеста
# Возвращает словарь
def parse_config(self, filepath):
config_data = dict()
config = configparser.RawConfigParser(strict=False, allow_no_value=True, dict_type=MultiDict)
with open(filepath) as fp:
config.read_file(itertools.chain(['[general]'], fp), source=filepath)
for key in config:
config_data[key] = dict()
for i in config.items(key):
item_key = self.convert_from_windows1251_to_utf8(i[0])
item_value = self.convert_from_windows1251_to_utf8(i[1])
config_data[key][item_key] = item_value
return config_data
# Класс-менеджер для обработки контестов
class ContestsManager(object):
_errors = list()
_problems_folder = "problems/"
# Возвращает список ошибок
def get_errors(self):
return self._errors
# Основная директория с контестами
@property
def main_dir(self):
return settings.EJUDGE_CONTEST_PATH
# Директория с Xml контестов
@property
def xml_contests_dir(self):
return settings.EJUDGE_CONTEST_SETTINGS_PATH
# Префикс для пути к файлу конфигурации
@property
def conf_prefix(self):
return '/conf/serve.cfg'
# Создаёт все необходимые папки для контеста
def create_contest_dirs(self, full_id):
contest_folder = self.main_dir + str(full_id) + "/"
if os.path.isdir(contest_folder):
self._errors.append("Contest dir already exist")
return False
conf_folder = contest_folder + "conf/"
problems_folder = contest_folder + "problems/"
var_folder = contest_folder + "var/"
include_var_folders = ["archive", "run", "status", "team_extra", "work"]
try:
os.mkdir(contest_folder)
os.mkdir(conf_folder)
os.mkdir(problems_folder)
os.mkdir(var_folder)
for folder in include_var_folders:
path = var_folder + folder + "/"
os.mkdir(path)
except:
self._errors.append("Cannot create contest folders")
return False
return True
# Загружает данные о контестах для пользователя
def upload_user_contests(self, user_id):
user_contests = list()
user_contests_obj = Cntsregs.objects.all().filter(user=user_id)
for contest_object in user_contests_obj:
contest_id = contest_object.contest_id
user_contests.append(contest_id)
return user_contests
# Регистрирует пользователя на соревнование
def reg_user_to_contest(self, user_id, contest_id):
error = ""
try:
user = Logins.objects.get(user_id=user_id)
except:
error = "Cannot get User"
return error
try:
is_register_exist = Cntsregs.objects.filter(user=user, contest_id=contest_id).exists()
except:
error = "Cannot check if record exist"
return error
if not is_register_exist:
try:
Cntsregs.objects.create(user=user, contest_id=contest_id, status=0)
except:
error = "Cannot add User to Contest"
return error
else:
error = "Record already exist"
return error
return False
# Генерирует путь к файлу конфигурации
def get_config_path(self, full_id):
return self.main_dir + str(full_id) + self.conf_prefix
# Генерирует путь к папке с контестом
def get_contest_dir(self, full_id):
return self.main_dir + str(full_id) + "/"
# Генерирует путь к файлу XML конфигурации
def get_xml_config_path(self, full_id):
return self.xml_contests_dir + str(full_id) + ".xml"
# Существует ли файл конфигурации
def is_config_exist(self, full_id):
return os.path.isfile(self.main_dir + str(full_id) + self.conf_prefix)
# Существует ли файл xml конфигурации
def is_xml_config_exist(self, full_id):
return os.path.isfile(self.xml_contests_dir + str(full_id) + ".xml")
# Существует ли директория с контестом
def is_contest_dir_exist(self, full_id):
return os.path.isdir(self.main_dir + str(full_id) + "/")
# Получает список всех xml для директорий
def get_contests_xml_list(self):
directory = self.xml_contests_dir
if not os.path.isdir(directory):
raise Exception("Директория с контестами не обнаружена")
files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and re.search('.xml', f)]
return files
# Получает данные о контесте из xml
def parse_contest_xml(self, filepath):
if not os.path.isfile(filepath):
return False
try:
file = open(filepath, 'r', encoding="utf-8")
except IOError:
return False
soup = BeautifulSoup(file, 'xml')
try:
name = soup.find('name').get_text()
except:
name = "Unknown"
try:
sched_time = soup.find('sched_time').get_text()
except:
sched_time = "Unknown"
info = {
"name": name,
"sched_time": sched_time,
}
file.close()
return info
# Получает ID контеста из названия файла xml
def parse_contest_id_from_xml(self, xmlname):
search = re.match(r'\d+', str(xmlname))
if search is not None:
return search.group(0)
else:
return None
# Парсит настройки контеста
# Возвращает словарь или False
def parse_contest_settings(self, contest_full_id):
if not self.is_config_exist(contest_full_id):
return False
config_path = self.get_config_path(contest_full_id)
setting_parser = SettingParser()
config_data = setting_parser.parse_config(config_path)
return config_data
# Получает список id всех контестов из xml
def get_contests_ids(self, filenames):
ids = list()
for filename in filenames:
contest_id = self.parse_contest_id_from_xml(filename)
ids.append(contest_id)
return ids
# Получает данные о контесте
def get_contest(self, contest_full_id):
contest = dict()
contest_id = int(contest_full_id)
contest_settings = ""
contest_config_path = ""
contest_xml_config_path = ""
contest_info = dict()
# Полный путь к файлу конфигурации контеста
if (self.is_config_exist(contest_full_id)):
contest_config_path = self.get_config_path(contest_full_id)
try:
contest_settings = self.parse_contest_settings(contest_full_id)
except:
contest_settings = dict()
#self._errors.append("Cannot parse contest settings")
# Полный путь к xml файлу конфигурации контеста
if (self.is_xml_config_exist(contest_full_id)):
contest_xml_config_path = self.get_xml_config_path(contest_full_id)
try:
contest_info = self.parse_contest_xml(contest_xml_config_path)
except:
contest_info = dict()
self._errors.append("Cannot parse contest XML")
# Данные об контестах
contest["full_id"] = contest_full_id
contest["id"] = contest_id
contest["dir"] = self.get_contest_dir(contest_full_id)
contest["problems_dir"] = self.get_contest_dir(contest_full_id) + self._problems_folder
if "name" in contest_info:
contest["name"] = contest_info["name"]
else:
contest["name"] = "Unknown"
if "sched_time" in contest_info:
contest["sched_time"] = contest_info["sched_time"]
else:
contest["sched_time"] = "Unknown"
contest["xml_config_path"] = contest_xml_config_path
contest["config_path"] = contest_config_path
contest["settings"] = contest_settings
return contest
# Получает данные о всех контестах
def get_contests(self):
contests = list()
contest_xmls = self.get_contests_xml_list()
for xml in contest_xmls:
contest_full_id = self.parse_contest_id_from_xml(xml)
contest = self.get_contest(contest_full_id)
contests.append(contest)
return contests
# Получает следующий уникальный FULL ID
def get_next_full_id(self):
contests = Contests.objects.all()
ids = list()
for contest in contests:
ids.append(int(contest.full_id))
if len(ids):
last_id = int(max(ids))
next_id = last_id + 1
else:
next_id = 1
full_id = str(next_id)
while len(full_id) != 6:
full_id = "0" + full_id
# Генерируем уникальный Full ID
count = 0
while self.is_contest_dir_exist(full_id):
full_id = str(int(full_id) + 1)
while len(full_id) != 6:
full_id = "0" + full_id
count = count + 1
if count > 100:
break
return full_id
# Сохраняет контест в БД
def save_contest(self, form_data):
name = form_data.get('name')
sched_time = form_data.get('sched_time')
problems = form_data.get('tasks')
duration = form_data.get('duration')
try:
full_id = self.get_next_full_id()
except:
self._errors.append("Не могу получить следующий FULL_ID")
return False
contest_dir = self.get_contest_dir(full_id)
xml_config_path = self.get_xml_config_path(full_id)
config_path = self.get_config_path(full_id)
try:
Contests.objects.create(name=name,
sched_time=sched_time,
problems=problems,
full_id=full_id,
contest_dir=contest_dir,
duration=duration,
xml_config_path=xml_config_path,
config_path=config_path)
except:
self._errors.append("Не удалось создать контест")
return False
return True
# Обновляет контест в БД
def update_contest(self, form_data):
contest_id = form_data.get('contest_id')
try:
contest_object = Contests.objects.get(pk=contest_id)
except:
return False
name = form_data.get('name')
sched_time = form_data.get('sched_time')
problems = form_data.get('tasks')
duration = form_data.get('duration')
try:
contest_object.name = name
contest_object.sched_time = sched_time
contest_object.problems = problems
contest_object.duration = duration
contest_object.save()
except:
self._errors.append("Не удалось обновить контест")
return False
return True
# Создаёт XML файл для контеста
def create_contest_xml(self, contest):
filepath = self.get_xml_config_path(contest.full_id)
xml_template = settings.EJUDGE_FILE_EXAMPLES_FOLDER + "config.xml"
if os.path.isfile(filepath):
self._errors.append("XML файл для контеста уже существует")
return False
if not os.path.isfile(xml_template):
self._errors.append("Шаблон XML для контеста не существует")
return False
if contest.sched_time != "":
sched_time = '<sched_time>' + contest.sched_time + '</sched_time>'
else:
sched_time = ""
if contest.name != "":
name = contest.name
else:
name = ""
try:
with open(xml_template, encoding="utf-8") as fp:
xml_example_data = fp.read()
xml_example_data = xml_example_data.replace("{{ name }}", name)
xml_example_data = xml_example_data.replace("{{ sched_time }}", sched_time)
except:
self._errors.append("Не могу прочитать XML шаблон для контеста")
return False
try:
with open(filepath, mode="w", encoding="utf-8") as fp2:
fp2.write(xml_example_data)
except:
self._errors.append("Не могу создать XML для контеста")
return False
return True
# Сворачивает контест
def undeploy_contest(self, contest_id):
try:
contest_id = int(contest_id)
contest = Contests.objects.get(pk=contest_id)
except:
self._errors.append("Ошибка получения контеста")
return False
if os.path.isfile(contest.config_path):
os.remove(contest.config_path)
if os.path.isfile(contest.xml_config_path):
os.remove(contest.xml_config_path)
if os.path.isdir(contest.contest_dir):
shutil.rmtree(contest.contest_dir)
ejudge_contest_id = int(contest.full_id)
Cntsregs.objects.filter(contest_id=ejudge_contest_id).delete()
return True
# Разворачивает контест
def deploy_contest(self, contest_id):
# Шаг 1. Получаем контест
try:
contest_id = int(contest_id)
contest = Contests.objects.get(pk=contest_id)
except:
self._errors.append("Ошибка получения контеста")
return False
# Шаг 2. Получаем связанные с ним задачи
try:
tasks = json.loads(contest.problems)
except:
self._errors.append("Не могу распарсить JSON с задачами")
return False
problems = list()
for task in tasks:
try:
item = Problems.objects.get(pk=task["id"])
problems.append(item)
except:
self._errors.append("Не могу получить задачу с ID " + task["id"])
continue
# Шаг 3. Создаём все папки
create_dir_success = self.create_contest_dirs(contest.full_id)
if not create_dir_success:
self._errors.append("Ошибка создания директорий контеста")
return False
create_xml_success = self.create_contest_xml(contest)
if not create_xml_success:
self._errors.append("Ошибка создания XML для контеста")
return False
problemsManager = ProblemsCreator()
# Каждой задаче задаём свой короткий буквенный ID
problemsShortIds = list(string.ascii_uppercase)
max_length = len(problemsShortIds)
i = 0
problems_dir = contest.contest_dir + "problems/"
problems_configs = ""
for problem in problems:
if i >= max_length:
break
problem_id = problemsShortIds[i]
create_problem_dir_success = problemsManager.create_problem_folder(problems_dir, problem_id)
if not create_problem_dir_success:
self._errors.append("Не могу создать директорию для задачи " + problem.title)
return False
create_xml_success = problemsManager.create_xml(create_problem_dir_success, problem.id, problem_id)
if not create_xml_success:
self._errors.append("Не могу создать XML для задачи " + problem.title)
return False
problem_dir = create_problem_dir_success + "/tests/"
create_tests_success = problemsManager.create_tests(problem_dir, problem.tests)
if not create_tests_success:
self._errors.append("Не могу создать тесты для задачи " + problem.title)
return False
problems_config = problemsManager.get_problem_config(problem, i + 1, problem_id)
problems_configs = problems_configs + problems_config + "\n"
i = i + 1
contest_config_template = settings.EJUDGE_FILE_EXAMPLES_FOLDER + "serve.cfg"
with open(contest_config_template, mode="r", encoding="utf-8") as fp:
serveCfg = fp.read()
serveCfg = serveCfg.replace("{{ duration }}", str(contest.duration))
serveCfg = serveCfg.replace("{{ problems }}", problems_configs)
with open(contest.config_path, mode="w", encoding="utf-8") as fp2:
fp2.write(serveCfg)
try:
self.reg_user_to_contest(1, int(contest.full_id))
except:
self._errors.append("Не могу зарегистрировать администратора")
return True | Raftor74/ejudge-web-app | contests/classes.py | classes.py | py | 19,662 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "collections.OrderedDict",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict.__setitem__",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 23,
"usage_type": "name"
},
{
... |
42435364768 | import threading
import time
import PySimpleGUI as sg
import psutil
from pathlib import Path
import subprocess
def is_running(process_name):
running = False
for proc in psutil.process_iter():
if process_name in proc.name():
running = True
break
return running
def the_thread(window: sg.Window, logtext, values, gtav_exe, dll_name):
injected = False
while not injected:
if is_running(gtav_exe):
logtext += f"\n{gtav_exe} is running..."
window["log"].update(logtext)
delay = int(values["delay"])
logtext += f"\nInjecting DLL in {delay} seconds..."
window["log"].update(logtext)
time.sleep(delay)
inj_path = Path("Injector.exe").resolve()
window["log"].update(logtext)
inj_output = subprocess.run([inj_path, "--process-name", gtav_exe, "--inject", dll_name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, shell=True)
inj_output_out = inj_output.stdout.decode("UTF-8")
logtext += f"\n{inj_output_out}"
window["log"].update(logtext)
injected = True
window["start"].update(disabled=False)
def main():
delay = 10
gta_path = ""
dll_name = "GTAO_Booster.dll"
gtav_exe = "GTA5.exe"
play_gta = "PlayGTAV.exe"
if Path.exists(Path("settings.ini")):
with open("settings.ini", 'r') as file:
content = file.readlines()
for line in content:
if "[GTA5_LOC]=" in line:
gta_path = line.split("=")[1]
if "[DELAY]=" in line:
delay = int(line.split("=")[1])
if "[DLL]=" in line:
dll_name = line.split("=")[1]
sg.theme('Dark')
layout = [
[sg.Text('Select PlayGTAV.exe:', size=(16,1)), sg.Input(gta_path, key="gta_exe"), sg.FileBrowse(file_types=((play_gta, play_gta),))],
[sg.Text('Select DLL:', size=(16,1)), sg.Input(dll_name, key="dll"), sg.FileBrowse(file_types=(("", "*.dll"),))],
[sg.Text('Injection Delay:', size=(16,1), tooltip="Delay to allow GTA to start & decrypt memory. Depending on PC performance and storage media you can decrease/increase this value."), sg.Input(delay, size=(5, 1), enable_events=True, tooltip="Delay to allow GTA to start & decrypt memory. Depending on PC performance and storage media you can decrease/increase this value.", key="delay")],
[sg.Multiline(size=(70, 12), enable_events=True, key="log", autoscroll=True, disabled=True)],
[sg.Button('START GTAV & Inject DLL', key="start", disabled=False), sg.Button('EXIT', key="exit", button_color=("white", "red"))],
[sg.Text('© ThatOldGrumpyMan', size=(16, 1))],
]
window = sg.Window('GTAV Auto DLL Injector', layout, finalize=True)
if not Path.exists(Path("Injector.exe")):
logtext = "Injector.exe is missing! Place it in the same directory as this file!\nInjector.exe is required to inject DLL in process.\nGet it here: https://github.com/nefarius/Injector\nRestart application when done."
window["log"].update(logtext)
window["start"].update(disabled=True)
if is_running(gtav_exe):
logtext = "GTA V is already running! Close it and restart this application!"
window["log"].update(logtext)
window["start"].update(disabled=True)
while True:
event, values = window.read()
try:
delay = str(values["delay"])
except TypeError:
delay = str(delay)
if len(delay) > 0 and delay[0] not in ('123456789'):
window["delay"].update(values["delay"][:-1])
if len(delay) > 0 and delay[-1] not in ('0123456789'):
window["delay"].update(values["delay"][:-1])
if len(delay) > 2:
window["delay"].update(values["delay"][:-1])
if event == "start":
logtext = ""
window["log"].update(logtext)
window["start"].update(disabled=True)
gta_path = Path(str(values["gta_exe"]).strip("\n")).resolve()
try:
logtext = "Starting GTA V..."
window["log"].update(logtext)
subprocess.Popen([gta_path])
except WindowsError:
logtext += "\nInvalid GTA Path!"
window["log"].update(logtext)
window["start"].update(disabled=False)
continue
with open("settings.ini", 'w') as file:
file.write("[GTA5_LOC]=" + str(values["gta_exe"]) + "\n" + "[DELAY]=" + str(values["delay"]) + "\n" + "[DLL]=" + str(values["dll"]))
logtext += "\nWaiting for GTA V to start..."
window["log"].update(logtext)
dll_name = Path(str(values["dll"]).strip("\n")).resolve()
threading.Thread(target=the_thread, args=(window, logtext, values, gtav_exe, dll_name), daemon=True).start()
if event == "exit" or event == sg.WIN_CLOSED:
break
window.close()
if __name__ == '__main__':
main() | activatedtmx/GTAV-Auto-DLL-Injector | injector.py | injector.py | py | 5,140 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "psutil.process_iter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.Window",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",... |
6642507334 | from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializer import perfil_usuarioSerializer
from .models import perfil_usuario
@api_view(['GET', 'POST'])
def lista_usuarios(request):
if request.method == 'GET':
users = perfil_usuario.objects.all()
serializer = perfil_usuarioSerializer(users, many=True)
return Response(serializer.data)
elif request.method == 'POST':
data = request.data
correo = data.get('correo')
existing_user = perfil_usuario.objects.filter(correo=correo).first()
if existing_user:
return Response({'error': 'Ya existe un usuario con este correo electrónico.'}, status=status.HTTP_400_BAD_REQUEST)
serializer = perfil_usuarioSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | MarcoToloza/Caso_A_Parte-2-API | Api_qr/AppPi/views.py | views.py | py | 1,057 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.perfil_usuario.objects.all",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.perfil_usuario.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "models.perfil_usuario",
"line_number": 10,
"usage_type": "name"
},... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.