seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32909477589 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import time
import random
import decimal
from PySide2.QtWidgets import QApplication, QMessageBox, QWidget
from PySide2 import QtCore
from PySide2.QtUiTools import QUiLoader
from PyQt5.QtCore import QTimer
marry_date = '2020-07-06'
birth_date = '2022-01-22 11:01'
birth_datetime = datetime.datetime.strptime(birth_date, '%Y-%m-%d %H:%M')
marry_datetime = datetime.datetime.strptime(marry_date, '%Y-%m-%d')
def decimal_float_number(number):
decimal.getcontext().rounding = "ROUND_HALF_UP"
res = decimal.Decimal(str(number)).quantize(decimal.Decimal("0.00"))
return str(res)
def format_date(input_date):
return datetime.datetime.strptime(input_date, '%Y-%m-%d')
def format_datetime(input_date):
return datetime.datetime.strptime(input_date, '%Y-%m-%d %H:%M')
class ComputeTools(QWidget):
def __init__(self):
self.ui = QUiLoader().load('compute_day.ui')
self.timer = QTimer()
self.timer.timeout.connect(self.compute_day_second)
def compute_day_second(self):
input_date = self.ui.dateEdit.text()
input_date = format_date(input_date)
compute_second = datetime.datetime.now().timestamp() - input_date.timestamp()
compute_day = decimal_float_number(compute_second / 3600 / 24)
self.ui.secondEdit.setText(str(compute_second))
self.ui.dayEdit.setText(str(compute_day))
def msg_notifaction(self, compute_day, compute_second):
msg_box = QMessageBox()
msg_box.setIcon(QMessageBox.Information)
msg_box.setText('{}秒, {}天'.format(compute_second, compute_day))
msg_box.setDetailedText('{}秒, {}天'.format(compute_second, compute_day))
msg_box.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
msg_box.setDefaultButton(QMessageBox.Ok)
msg_box.show()
msg_box.exec()
def compute_realtime_day_second(self):
self.timer.start(random.randint(10, 1000))
self.ui.computeRealtime.setEnabled(False)
def stop_realtime_compute(self):
self.timer.stop()
self.ui.computeRealtime.setEnabled(True)
def open_page(self):
self.ui.computeRealtime.clicked.connect(self.compute_realtime_day_second)
self.ui.stopCompute.clicked.connect(self.stop_realtime_compute)
self.ui.show()
if __name__ == '__main__':
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
app = QApplication([])
qt_tools = ComputeTools()
qt_tools.open_page()
app.exec_()
| id10tttt/tools | qt_tools/compute_day.py | compute_day.py | py | 2,564 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 15,
"usage_type": "call"
},
{
"api_... |
72532696509 | """ Subsystem to communicate with catalog service
"""
import logging
from aiohttp import web
from pint import UnitRegistry
from servicelib.aiohttp.application_setup import ModuleCategory, app_module_setup
from . import _handlers
_logger = logging.getLogger(__name__)
@app_module_setup(
__name__,
ModuleCategory.ADDON,
settings_name="WEBSERVER_CATALOG",
depends=["simcore_service_webserver.rest"],
logger=_logger,
)
def setup_catalog(app: web.Application):
# ensures routes are names that corresponds to function names
assert all( # nosec
route_def.kwargs["name"] == route_def.handler.__name__
for route_def in _handlers.routes
)
app.add_routes(_handlers.routes)
# prepares units registry
app[UnitRegistry.__name__] = UnitRegistry()
| ITISFoundation/osparc-simcore | services/web/server/src/simcore_service_webserver/catalog/plugin.py | plugin.py | py | 801 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "aiohttp.web.Application",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "aiohttp.web",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pint.UnitR... |
1293512231 | import inspect
from onnx import defs
from onnx.backend.test.runner import BackendIsNotSupposedToImplementIt
from onnx_jax.logger import logger
class Handler(object):
ONNX_OP = None
DOMAIN = defs.ONNX_DOMAIN
VERSION = 0
SINCE_VERSION = 0
@classmethod
def check_cls(cls):
if not cls.ONNX_OP:
logger.warning(
f"{cls.__name__} doesn't have ONNX_OP. "
"Please use Handler.onnx_op decorator to register ONNX_OP."
)
@classmethod
def args_check(cls, node, **kwargs):
pass
@classmethod
def handle(cls, node, **kwargs):
ver_handle = getattr(cls, "version_{}".format(cls.SINCE_VERSION), None)
if ver_handle:
cls.args_check(node, **kwargs)
return ver_handle(node, **kwargs)
raise BackendIsNotSupposedToImplementIt(
"{} version {} is not implemented.".format(node.op_type, cls.SINCE_VERSION)
)
@classmethod
def get_versions(cls):
versions = []
for k, v in inspect.getmembers(cls, inspect.ismethod):
if k.startswith("version_"):
versions.append(int(k.replace("version_", "")))
return versions
@staticmethod
def onnx_op(op):
return Handler.property_register("ONNX_OP", op)
@staticmethod
def domain(d):
return Handler.property_register("DOMAIN", d)
@staticmethod
def property_register(name, value):
def deco(cls):
setattr(cls, name, value)
return cls
return deco
domain = Handler.domain
onnx_op = Handler.onnx_op
property_register = Handler.property_register
| gglin001/onnx-jax | onnx_jax/handlers/handler.py | handler.py | py | 1,679 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "onnx.defs.ONNX_DOMAIN",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "onnx.defs",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "onnx_jax.logger.logger.warning",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "o... |
1584126381 | # -*- coding: utf-8 -*-
import os
from django.utils.translation import ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from filer.settings import FILER_STATICMEDIA_PREFIX
from cmsplugin_filer_html5video.models import FilerHTML5Video
class FilerHTML5VideoPlugin(CMSPluginBase):
model = FilerHTML5Video
name = _("HTML5 Video (Filer)")
render_template = "cmsplugin_filer_html5video/video.html"
text_enabled = True
general_fields = [
'title',
('width', 'height'),
'auto_play',
'auto_hide',
'fullscreen',
'loop',
]
fieldsets = [
(None, {
'fields': general_fields,
}),
(_('formats'), {
'fields': ('video_mp4', 'video_webm', 'video_ogv', 'image')
})
]
def render(self, context, instance, placeholder):
formats = {}
for format in ('video_mp4', 'video_webm', 'video_ogv'):
if getattr(instance, format + '_id'):
formats[format.replace('_', '/')] = getattr(instance, format).url
context.update({
'object': instance,
'placeholder':placeholder,
'formats': formats
})
return context
def icon_src(self, instance):
return os.path.normpath(u"%s/icons/video_%sx%s.png" % (FILER_STATICMEDIA_PREFIX, 32, 32,))
plugin_pool.register_plugin(FilerHTML5VideoPlugin) | beniwohli/cmsplugin-filer-html5video | cmsplugin_filer_html5video/cms_plugins.py | cms_plugins.py | py | 1,464 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "cms.plugin_base.CMSPluginBase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "cmsplugin_filer_html5video.models.FilerHTML5Video",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 15,
... |
36223514660 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 24 11:01:22 2023
@author: brand
"""
import math
import matplotlib.pyplot as plt
import numpy as np
timeStep = 0.01 #seconds
timeRange = np.arange(0,5,timeStep).tolist() # 20 seconds to take off
# reynolds is 200k
class Aircraft():
def __init__(self, weight, wingAvgChord, wingSpan):
self.wingArea = wingAvgChord * wingSpan # square meters
self.xVel=0
self.yVel=0
self.xPos=0
self.yPos=0
self.weight=weight
def calculateLift(self, AoA, curSpeed):
if AoA == 0:
Cl=0.4304
elif AoA == 5:
Cl = 0.9118
elif AoA== 10:
Cl = 1.2591
else:
Cl=0
print("Incorrect Angle")
self.lift=(0.5)*1.225*curSpeed*curSpeed*Cl*self.wingArea # Newtons
def calculateDrag(self, AoA, curSpeed):
if AoA == 0:
Cd=0.01004
elif AoA == 5:
Cd = 0.01213
elif AoA == 10:
Cd = 0.02158
else:
Cd=0
print("Incorrect Angle")
self.drag=(0.5)*1.225*curSpeed*curSpeed*Cd*self.wingArea # Newtons
def calculatePropThrust(self, curSpeed, propDia, propPitch, propRPM):
self.thrust = (4.392e-8)*propRPM*(pow(propDia,3.5)/pow(propPitch,0.5))*((4.233e-4)*propRPM*propPitch-curSpeed)
#print(self.thrust)
def calculateForces(self,AoA):
if self.yPos==0:
fric_force=self.weight*9.81*0.1
else:
fric_force=0
self.xForces = math.cos(math.radians(AoA))*(self.thrust-self.drag)-fric_force
self.yForces = math.cos(math.radians(AoA))*(self.lift)-self.weight*9.81
#print(self.yForces)
def calcVel(self,timeStep):
self.xVel=self.xForces*timeStep+self.xVel
self.yVel=self.yForces*timeStep+self.yVel
def calcPos(self,timeStep):
self.xPos = self.xVel*timeStep + self.xPos
self.yPos = self.yVel*timeStep + self.yPos
if self.yPos<0:
self.yPos=0
self.yVel=0
class PID():
def __init__(self,P,I,D,step):
self.pGain=P
self.iGain=I
self.dGain=D
self.step=step
self.p=0
self.i=0
self.d=0
self.errSum=0
self.errPrev=0
def gain(self,curAlt,tarAlt):
err=tarAlt-curAlt
self.i=self.errSum+err*self.step
self.d = (err-self.errPrev)/self.step
self.output=err*self.pGain + self.iGain*self.i + self.dGain*self.d
self.errPrev=err
self.errSum=self.i
self.output = max(min(22000,self.output),0)
#print(self.output)
# 0 AoA
plane = Aircraft(1.3, 0.2, 1)
control = PID(700,140/2,140/8,timeStep)
xPos=[]
yPos=[]
xVel=[]
yVel=[]
lift=[]
thrust=[]
curSpeed=0
AoA=0
RPM_l=[]
RPM=22000
for x in timeRange:
plane.calculateLift(AoA,curSpeed)
plane.calculateDrag(AoA,curSpeed)
plane.calculatePropThrust(curSpeed,7,3,RPM)
plane.calculateForces(AoA)
plane.calcVel(timeStep)
plane.calcPos(timeStep)
xPos.append(plane.xPos)
yPos.append(plane.yPos)
curSpeed=plane.xVel
xVel.append(plane.xVel)
yVel.append(plane.yVel)
lift.append(plane.yForces)
thrust.append(plane.thrust)
#RPM_l.append(control.output/1000)
#plt.plot(timeRange,yPos/199)
#plt.plot(timeRange,thrust)
#plt.plot(timeRange,lift)
#plt.plot(timeRange,xPos)
plt.plot(timeRange,yPos)
#plt.plot(timeRange,thrust)
#plt.plot(timeRange, RPM_l)
#plt.plot(timeRange,yVel)
plt.legend(['alt'])
plt.xlabel('Time (s)')
plt.ylabel(['Meters']) | Brandonh291/RC-Plane | Systems Engineering/Phase B - Preliminary Design and Technology Completition/Plane Take-off Sim.py | Plane Take-off Sim.py | py | 3,633 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.arange",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 56,
... |
6701340278 | import cv2
import numpy as np
import imgaug.augmenters as iaa
import imgaug as ia
import torchvision
from torchvision import transforms
from PIL import Image, ImageEnhance, ImageOps
from RandAugment.augmentations import Lighting, RandAugment
class ResizeImage(object):
def __init__(self, height=256, width=256):
self.height = height
self.width = width
def __call__(self, img):
img = np.array(img)
h, w = img.shape[:2]
if h < w:
w = int(self.height*w*1.0/h)
h = self.height
else:
h = int(self.width*h*1.0/w)
w = self.width
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_CUBIC)
return Image.fromarray(img)
class ResizeImageVal(object):
def __init__(self, height=256, width=256):
self.height = height
self.width = width
self.pad_fix = iaa.PadToFixedSize(width=width, height=height)
def __call__(self, img):
img = np.array(img)
h, w = img.shape[:2]
if h > w:
w = int(self.height*w*1.0/h)
h = self.height
else:
h = int(self.width*h*1.0/w)
w = self.width
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_CUBIC)
img = self.pad_fix.augment_image(img)
return Image.fromarray(img)
def sometimes(aug): return iaa.Sometimes(0.5, aug)
class imgaugAugment(object):
def __init__(self):
super(imgaugAugment, self).__init__()
self.seq = iaa.Sequential(
[
# apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.2), # vertically flip 20% of all images
# crop images by -5% to 10% of their height/width
sometimes(iaa.CropAndPad(
percent=(-0.05, 0.1),
pad_mode=ia.ALL,
pad_cval=(0, 255)
)),
sometimes(iaa.Affine(
# scale images to 80-120% of their size, individually per axis
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
# translate by -20 to +20 percent (per axis)
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-45, 45), # rotate by -45 to +45 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
# use nearest neighbour or bilinear interpolation (fast)
order=[0, 1],
# if mode is constant, use a cval between 0 and 255
cval=(0, 255),
# use any of scikit-image's warping modes (see 2nd image from the top for examples)
mode=ia.ALL
)),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
# convert images into their superpixel representation
sometimes(iaa.Superpixels(
p_replace=(0, 1.0), n_segments=(20, 200))),
iaa.OneOf([
# blur images with a sigma between 0 and 3.0
iaa.GaussianBlur((0, 3.0)),
# blur image using local means with kernel sizes between 2 and 7
iaa.AverageBlur(k=(2, 7)),
# blur image using local medians with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 11)),
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(
0.75, 1.5)), # sharpen images
iaa.Emboss(alpha=(0, 1.0), strength=(
0, 2.0)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(
alpha=(0.5, 1.0), direction=(0.0, 1.0)),
])),
# add gaussian noise to images
iaa.AdditiveGaussianNoise(loc=0, scale=(
0.0, 0.05*255), per_channel=0.5),
iaa.OneOf([
# randomly remove up to 10% of the pixels
iaa.Dropout((0.01, 0.1), per_channel=0.5),
iaa.CoarseDropout((0.03, 0.15), size_percent=(
0.02, 0.05), per_channel=0.2),
]),
# invert color channels
iaa.Invert(0.05, per_channel=True),
# change brightness of images (by -10 to 10 of original value)
iaa.Add((-10, 10), per_channel=0.5),
# change hue and saturation
iaa.AddToHueAndSaturation((-20, 20)),
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
iaa.Multiply((0.5, 1.5), per_channel=0.5),
iaa.FrequencyNoiseAlpha(
exponent=(-4, 0),
first=iaa.Multiply((0.5, 1.5), per_channel=True),
second=iaa.LinearContrast((0.5, 2.0))
)
]),
# improve or worsen the contrast
iaa.LinearContrast((0.5, 2.0), per_channel=0.5),
iaa.Grayscale(alpha=(0.0, 1.0)),
# move pixels locally around (with random strengths)
sometimes(iaa.ElasticTransformation(
alpha=(0.5, 3.5), sigma=0.25)),
# sometimes move parts of the image around
sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
],
random_order=True
)
],
random_order=True
)
def __call__(self, img):
img = self.seq.augment_image(img)
return Image.fromarray(img)
_IMAGENET_PCA = {
'eigval': [0.2175, 0.0188, 0.0045],
'eigvec': [
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
]
}
class Augment(object):
def __init__(self, width=320, height=320, phase='train'):
super(Augment, self).__init__()
self.phase = phase
self.widht = width
self.height = height
# self.transform_train = torchvision.transforms.Compose([
# imgaugAugment(),
# ])
self.transform_train = transforms.Compose([
imgaugAugment(),
RandAugment(n=3, m=9),
transforms.RandomResizedCrop(self.height, scale=(0.08, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
transforms.ToTensor(),
Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
self.transform_test = transforms.Compose([
transforms.Resize(self.height+32, interpolation=Image.BICUBIC),
transforms.CenterCrop(self.height),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
def __call__(self, image):
if self.phase == 'train':
image = self.transform_train(image)
elif self.phase == 'valid' or self.phase=='test':
image = self.transform_test(image)
return image
| toandaominh1997/ProductDetectionShopee | datasets/augment.py | augment.py | py | 8,295 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.fromarray",
... |
41118007213 | import logging
import time
import traceback
from pathlib import Path
from requests_tracker.request import WebRequestType
from requests_tracker.session import WebSessionFactory
from requests_tracker.storage import convert_HAR_to_markdown, write_HAR_to_local_file, CookiesFileStorage
from requests_tracker.util import LogHelper
if __name__ == '__main__':
logger = LogHelper.configure(logging.DEBUG)
session_cache_path = Path(__file__).parent.parent.joinpath('session_cache')
cookies_storage = CookiesFileStorage(session_cache_path)
# creates session and pre-loads cookies from persisted local cookie storage
web_session = WebSessionFactory.create(
cookies_storage,
default_referer='https://www.jet2holidays.com',
sensitive_values=[],
sensitive_params=[],
retry_count=1,
timeout=10.00
)
try:
response1 = web_session.get('https://www.jet2holidays.com')
# print(response1.text)
time.sleep(1)
response2 = web_session.post(
url='https://www.jet2holidays.com/api/jet2/sitesearch/HotelAndRegionList',
request_type=WebRequestType.XHR,
data={
'term': 'radi',
'maxResults': 30
}
)
print(response2.text)
except Exception as ex:
logger.error(traceback.print_exc())
finally:
# persists cookies to local file
cookies_storage.save(web_session.cookies)
# writes to 'session-cache/session-DD-MM-YYYY HH-MM-SS.har' file
write_HAR_to_local_file(session_cache_path, web_session.request_session_context)
# converts HAR file to markdown file + response files in folder 'session-cache/session-DD-MM-YYYY HH-MM-SS/'
convert_HAR_to_markdown(session_cache_path, web_session.request_session_context)
| eladeon/requests-tracker-python | examples/scraper.py | scraper.py | py | 1,851 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests_tracker.util.LogHelper.configure",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests_tracker.util.LogHelper",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "logging.DEBUG",
"line_number": 13,
"usage_type": "attribute"
... |
7166024264 | import pathlib
from typing import Any
import pytest
from competitive_verifier.models import (
AddtionalSource,
CommandVerification,
ConstVerification,
ResultStatus,
VerificationFile,
)
test_parse_VerificationFile_params: list[
tuple[VerificationFile, dict[str, Any], dict[str, Any]]
] = [
(
VerificationFile(),
{},
{
"dependencies": set(),
"document_attributes": {},
"verification": [],
"additonal_sources": [],
},
),
(
VerificationFile(
dependencies=set(
[
pathlib.Path("bar1"),
pathlib.Path("bar2"),
]
),
),
{
"dependencies": [
"bar1",
"bar2",
],
},
{
"dependencies": set(
[
pathlib.Path("bar1"),
pathlib.Path("bar2"),
]
),
"document_attributes": {},
"verification": [],
"additonal_sources": [],
},
),
(
VerificationFile(
document_attributes={
"title": "Bar bar",
},
),
{
"document_attributes": {
"title": "Bar bar",
},
},
{
"dependencies": set(),
"document_attributes": {
"title": "Bar bar",
},
"verification": [],
"additonal_sources": [],
},
),
(
VerificationFile(
verification=[ConstVerification(status=ResultStatus.SUCCESS)],
),
{
"verification": [
{
"type": "const",
"status": "success",
}
],
},
{
"dependencies": set(),
"document_attributes": {},
"verification": [ConstVerification(status=ResultStatus.SUCCESS)],
"additonal_sources": [],
},
),
(
VerificationFile(
verification=[ConstVerification(status=ResultStatus.SUCCESS)],
),
{
"verification": {
"type": "const",
"status": "success",
},
},
{
"dependencies": set(),
"document_attributes": {},
"verification": [ConstVerification(status=ResultStatus.SUCCESS)],
"additonal_sources": [],
},
),
(
VerificationFile(
additonal_sources=[
AddtionalSource(name="dummy", path=pathlib.Path("tmp/dummy.sh"))
]
),
{
"additonal_sources": [{"name": "dummy", "path": "tmp/dummy.sh"}],
},
{
"dependencies": set(),
"document_attributes": {},
"verification": [],
"additonal_sources": [
{"name": "dummy", "path": pathlib.Path("tmp/dummy.sh")}
],
},
),
]
@pytest.mark.parametrize(
"obj, raw_dict, output_dict",
test_parse_VerificationFile_params,
)
def test_parse_VerificationFile(
obj: VerificationFile,
raw_dict: dict[str, Any],
output_dict: dict[str, Any],
):
assert obj == VerificationFile.parse_obj(raw_dict)
assert obj.dict() == output_dict
test_is_verification_params = [
(
VerificationFile(
verification=[ConstVerification(status=ResultStatus.SUCCESS)],
),
True,
True,
),
(
VerificationFile(
verification=[
ConstVerification(status=ResultStatus.SUCCESS),
ConstVerification(status=ResultStatus.FAILURE),
],
),
True,
True,
),
(
VerificationFile(
verification=[CommandVerification(command="true")],
),
True,
False,
),
(
VerificationFile(
verification=[
ConstVerification(status=ResultStatus.SUCCESS),
CommandVerification(command="true"),
],
),
True,
False,
),
(
VerificationFile(
verification=[],
),
False,
False,
),
]
@pytest.mark.parametrize(
"obj, is_verification, is_skippable_verification", test_is_verification_params
)
def test_is_verification(
obj: VerificationFile,
is_verification: bool,
is_skippable_verification: bool,
):
assert obj.is_verification() == is_verification
assert obj.is_skippable_verification() == is_skippable_verification
| competitive-verifier/competitive-verifier | tests/models/test_file.py | test_file.py | py | 4,735 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "competitive_verifier.models.VerificationFile",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "competitive_verifier.models.VerificationFile",
"line_number": 18,
"usage_type": "... |
2077956577 |
from odoo import models, api, fields, _
# from odoo.exceptions import UserError
from datetime import datetime
from dateutil.relativedelta import relativedelta
import json
import xlsxwriter
_
from odoo.exceptions import ValidationError
from odoo.exceptions import UserError
import base64
import io
try:
import xlwt
except ImportError:
xlwt = None
class billingMonthModel(models.Model):
_name = 'billing.month'
_description = 'Billing Month Model'
name = fields.Char(string='Name', required=True)
description = fields.Text(string='Description')
class AccountMoveReport(models.TransientModel):
_name = 'account.recovery.report.move.line'
billing_cycle=fields.Char('Billing Cycle')
total_issuance=fields.Integer('Total Billing (Bills Issuance)')
no_of_std=fields.Integer('#No of Students')
total_recovery=fields.Integer('Recovery')
recovery_percentage=fields.Char('Percentage of Recovery on Amount')
class RecoveryReportWizard(models.TransientModel):
_name="recovery.report.wizard"
_description='Print Recovery Wizard'
# selected_month= fields.Many2many('billing.month', string='Select Month')
from_date = fields.Date(string='From')
to_date = fields.Date(string='To')
all_branch=fields.Boolean(string=" Select All Branches")
one_branch=fields.Many2one('school.school', string= 'Select any one branch')
account_recovery_report_line=fields.Many2many('account.recovery.report.move.line', string='Account report Line')
# groups_ids = fields.Many2many('aging.invoice.group', string='Groups')
def _branch_constrains(self):
if self.all_branch and self.one_branch:
raise ValidationError(_('Sorry, You Must select only one option.'))
elif not self.one_branch and not self.all_branch:
raise ValidationError(_('Sorry, You Must select atleast one option.'))
if not self.to_date or not self.from_date:
raise ValidationError(_('Please Select the both dates.'))
def list_months(self):
next_month = self.to_date + relativedelta(months=1)
first_day_of_next_month = next_month.replace(day=1)
# Subtract one day from the first day of the next month to get the last day of the current month
last_day_of_month = first_day_of_next_month - relativedelta(days=1)
# Initialize the result list
covered_months = []
# Iterate over each month within the duration
current_month = self.from_date
while current_month <= last_day_of_month:
# Format the month as "Mon-YY" (e.g., Feb-22)
month_str = current_month.strftime("%b-%y")
# Add the formatted month to the result list
covered_months.append(month_str)
# Move to the next month
current_month += relativedelta(months=1)
return covered_months
def action_print_report(self):
lines=[]
selected_month = self.list_months()
for month in selected_month:
if self.all_branch==True:
inv_ids=self.env['account.move'].search([('move_type','=','out_invoice'),('journal_id','=',125),('state','=','posted'),('invoice_date',">=",self.from_date),('invoice_date',"<=",self.to_date)])
else:
inv_ids=self.env['account.move'].search([('move_type','=','out_invoice'),('state','=','posted'),('journal_id','=',125),('x_studio_current_branchschool','=',self.one_branch.id),('invoice_date',">=",self.from_date),('invoice_date',"<=",self.to_date)])
stud_lst=[]
month_issuance=0
month_due_amount=0
month_recovery=0
perc=0
for rec in inv_ids:
invoice_month = rec.invoice_date.strftime("%b-%y")
if invoice_month==month:
if rec.x_studio_udid_monthly_bills not in stud_lst:
stud_lst.append(rec.x_studio_udid_monthly_bills)
month_issuance=month_issuance+rec.amount_total
if rec.payment_state=='paid':
month_recovery = month_recovery+rec.amount_total
nostd=len(stud_lst)
if month_issuance !=0 :
number=(month_recovery/month_issuance)*100
perc = round(number, 2)
mvl=self.env['account.recovery.report.move.line'].create({
"billing_cycle":month,
"total_issuance":month_issuance,
"no_of_std":nostd,
"total_recovery":month_recovery,
"recovery_percentage":str(perc)+'%',
})
lines.append(mvl.id)
self.write({
"account_recovery_report_line":[(6,0,lines)]
})
def action_print_excel_recovery_report(self):
self._branch_constrains()
self.action_print_report()
if xlwt:
branch=""
if self.all_branch:
branch="All Branches"
else:
branch=self.one_branch.name
filename = str(branch)+"-"+str(self.from_date)+"-"+str(self.to_date)+".xls"
# One sheet by partner
workbook = xlwt.Workbook()
# sheet = workbook.add_sheet(report_name[:31])
worksheet = workbook.add_sheet('Recovery Report')
style_title = xlwt.easyxf(
"font:bold on,; align: vertical center,horiz center; border: top thin, bottom thin, right thin, left thin")
red_style_title = xlwt.easyxf('pattern: pattern solid, fore_colour pale_blue;'
"font:bold on,; align: vertical center,horiz center; border: top thin, bottom thin, right thin, left thin")
yellow_style_title = xlwt.easyxf('pattern: pattern solid, fore_colour yellow;'
"font:bold on,; align: vertical center,horiz center; border: top thin, bottom thin, right thin, left thin")
lime_style_title = xlwt.easyxf('pattern: pattern solid, fore_colour lime;'
"font:bold on,; align: vertical center,horiz center; border: top thin, bottom thin, right thin, left thin")
grand_heading_style = xlwt.easyxf('pattern: pattern solid, fore_colour white;'
'font: colour black, bold True;')
heading_style = xlwt.easyxf('align: vertical center,horiz center;')
date_format = xlwt.XFStyle()
date_format.num_format_str = 'dd/mm/yyyy'
# worksheet.write_merge(0, 1, 0, 5,"LACAS SCHOOL NETWORK ",style=style_title)
# worksheet.write_merge(0, 1, 6, 11, "Billing Cycle wise recovery report", style=style_title)
worksheet.write_merge(0,1,0,0,"Billing Cycle.", style=red_style_title)
worksheet.write_merge(0,1,1,1,"Total Billing (Bills Issuance)",style=red_style_title)
worksheet.write_merge(0,1,2,2,"No of Std",style=red_style_title)
worksheet.write_merge(0,1,3,3,"Recovery",style=red_style_title)
worksheet.write_merge(0,1,4,4,"Percentage of Recovery on Amount",style=red_style_title)
row=2
for rec in self.account_recovery_report_line:
if rec:
worksheet.write_merge(row,row,0,0,rec.billing_cycle, style=style_title)
worksheet.write_merge(row,row,1,1,rec.total_issuance,style=style_title)
worksheet.write_merge(row,row,2,2,rec.no_of_std,style=style_title)
worksheet.write_merge(row,row,3,3,rec.total_recovery,style=style_title)
worksheet.write_merge(row,row,4,4,rec.recovery_percentage,style=style_title)
row+=1
fp = io.BytesIO()
workbook.save(fp)
export_id = self.env['sale.day.book.report.excel'].create({'excel_file': base64.encodestring(fp.getvalue()), 'file_name': filename})
res = {
'view_mode': 'form',
'res_id': export_id.id,
'res_model': 'sale.day.book.report.excel',
'type': 'ir.actions.act_window',
'target':'new'
}
return res
else:
raise Warning (""" You Don't have xlwt library.\n Please install it by executing this command : sudo pip3 install xlwt""") | Odolution/lacas | ol_lacas_custom_recovery_report/wizard/custom_wizard.py | custom_wizard.py | py | 8,991 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "odoo._",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "odoo.models.Model",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line... |
43536075224 | import requests
import json
import os
import sys
import logging
logger = logging.getLogger(__name__)
def gdc_read_file(file_id="11443f3c-9b8b-4e47-b5b7-529468fec098"):
data_endpt = "https://api.gdc.cancer.gov/slicing/view/{}".format(file_id)
TOKEN_FILE_PATH = os.environ.get('GDC_TOKEN')
if not TOKEN_FILE_PATH:
logger.warning("GDC_TOKEN environment variable should point to GDC token file")
sys.exit(1)
with open(TOKEN_FILE_PATH, "r") as token:
token_string = str(token.read().strip())
params = {"gencode": ["BRCA1", "BRCA2"]}
response = requests.post(
data_endpt,
data=json.dumps(params),
headers={
"Content-Type": "application/json",
"X-Auth-Token": token_string
})
return response.content
| neksa/mutagene | mutagene/io/gdc.py | gdc.py | py | 807 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_... |
32724523948 | import tornado.ioloop
import tornado.web
import hashlib
import uuid
import json
from time import mktime
from datetime import datetime
from email.utils import formatdate
up_user = ''
up_password = ''
up_method = 'PUT'
up_host = 'v1.api.upyun.com'
up_path = '/bucket/'
up_base_url = "http://bucket.b0.upaiyun.com/%s"
class MainHandler(tornado.web.RequestHandler):
def get(self):
content_md5 = self.get_argument('md5', '')
content_len = self.get_argument('len', '')
content_type = self.get_argument('type', '')
stamp = mktime(datetime.now().timetuple())
date = formatdate(timeval = stamp, localtime = False, usegmt = True)
filename = hashlib.md5(uuid.uuid1().hex).hexdigest()
base_string = "%s&%s&%s&%s&%s" % (
up_method,
up_path + filename,
date,
content_len,
hashlib.md5(up_password).hexdigest())
signature = hashlib.md5(base_string).hexdigest()
headers = {"Authorization": "UpYun %s:%s" % (up_user, signature),
"Content-Type": content_type,
"Content-MD5": content_md5,
"Date": date,
"Expect": ""}
self.write(json.dumps({
"headers": headers,
"method": up_method,
"host": up_host,
"path": up_path + filename,
"url": up_base_url % filename
}))
application = tornado.web.Application([
(r"/storage", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
| zhicheng/storage | main.py | main.py | py | 1,410 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "tornado.ioloop.web",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "tornado.ioloop",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "time.mktime",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.... |
16838640248 | import pytest
import requests_mock
from csvcubed.utils.cache import session
from csvcubed.definitions import ROOT_DIR_PATH
@pytest.fixture(scope="package", autouse=True)
def mock_http_session_qube_config_schema():
"""
Fixture which mocks the HTTP responses of the JSON qube-config schema file for testing.
"""
with session.cache_disabled(), requests_mock.Mocker(
session=session, real_http=True
) as mocker:
schema_path = (
ROOT_DIR_PATH
/ "csvcubed"
/ "schema"
/ "cube-config"
/ "v1_0"
/ "schema.json"
)
with open(schema_path) as f:
mocker.register_uri(
"GET",
"//purl.org/csv-cubed/qube-config/v1.0",
text=f.read(),
)
yield session
| GDonRanasinghe/csvcubed-models-test-5 | csvcubed/tests/unit/readers/cubeconfig/v1_0/conftest.py | conftest.py | py | 844 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csvcubed.utils.cache.session.cache_disabled",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "csvcubed.utils.cache.session",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "requests_mock.Mocker",
"line_number": 14,
"usage_type": "call"
... |
33875335541 | import torch
import wandb
from .utils import matrix_to_dict
class Logger(object):
def __init__(self, hparams, model) -> None:
super().__init__()
self.hparams = hparams
self._setup_exp_management(model)
self.total_loss_values = None
def _setup_exp_management(self, model):
if self.hparams.use_wandb is True:
wandb.init(
entity="causal-representation-learning",
project=self.hparams.project,
notes=self.hparams.notes,
config=self.hparams,
tags=self.hparams.tags,
)
wandb.watch(model, log_freq=self.hparams.n_log_steps, log="all")
# define metrics
wandb.define_metric("total_loss", summary="min")
wandb.define_metric("lin_dis_score", summary="max")
wandb.define_metric("perm_dis_score", summary="max")
def log_jacobian(
self, dep_mat, name="gt_decoder", inv_name="gt_encoder", log_inverse=True
):
jac = dep_mat.detach().cpu()
cols = [f"a_{i}" for i in range(dep_mat.shape[1])]
gt_jacobian_dec = wandb.Table(columns=cols, data=jac.tolist())
self.log_summary(**{f"{name}_jacobian": gt_jacobian_dec})
if log_inverse is True:
gt_jacobian_enc = wandb.Table(columns=cols, data=jac.inverse().tolist())
self.log_summary(**{f"{inv_name}_jacobian": gt_jacobian_enc})
| rpatrik96/nl-causal-representations | care_nl_ica/logger.py | logger.py | py | 1,456 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "wandb.init",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "wandb.watch",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "wandb.define_metric",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "wandb.define_metric",
... |
10368808313 | from . import get_help
__doc__ = get_help("help_autoban")
from telethon import events
from pyUltroid.dB.base import KeyManager
from . import LOGS, asst, ultroid_bot, ultroid_cmd
Keym = KeyManager("DND_CHATS", cast=list)
def join_func(e):
return e.user_joined and Keym.contains(e.chat_id)
async def dnd_func(event):
for user in event.users:
try:
await (await event.client.kick_participant(event.chat_id, user)).delete()
except Exception as ex:
LOGS.error("Error in DND:")
LOGS.exception(ex)
await event.delete()
@ultroid_cmd(
pattern="autokick (on|off)$",
admins_only=True,
manager=True,
require="ban_users",
fullsudo=True,
)
async def _(event):
match = event.pattern_match.group(1)
if match == "on":
if Keym.contains(event.chat_id):
return await event.eor("`Chat already in do not disturb mode.`", time=3)
Keym.add(event.chat_id)
event.client.add_handler(dnd_func, events.ChatAction(func=join_func))
await event.eor("`Do not disturb mode activated for this chat.`", time=3)
elif match == "off":
if not Keym.contains(event.chat_id):
return await event.eor("`Chat is not in do not disturb mode.`", time=3)
Keym.remove(event.chat_id)
await event.eor("`Do not disturb mode deactivated for this chat.`", time=3)
if Keym.get():
ultroid_bot.add_handler(dnd_func, events.ChatAction(func=join_func))
asst.add_handler(dnd_func, events.ChatAction(func=join_func))
| TeamUltroid/Ultroid | plugins/autoban.py | autoban.py | py | 1,550 | python | en | code | 2,615 | github-code | 6 | [
{
"api_name": "pyUltroid.dB.base.KeyManager",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "telethon.events.ChatAction",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "telethon.events",
"line_number": 41,
"usage_type": "name"
},
{
"api_name"... |
19882697980 | import os
import trio
import ssl
from async_generator import asynccontextmanager
from structlog import get_logger
from typing import Optional, Union
from guardata.crypto import SigningKey
from guardata.api.transport import Transport, TransportError, TransportClosedByPeer
from guardata.api.protocol import (
DeviceID,
ProtocolError,
HandshakeError,
BaseClientHandshake,
AuthenticatedClientHandshake,
InvitedClientHandshake,
APIV1_AnonymousClientHandshake,
APIV1_AdministrationClientHandshake,
)
from guardata.client.types import (
BackendAddr,
BackendOrganizationAddr,
BackendOrganizationBootstrapAddr,
BackendInvitationAddr,
)
from guardata.client.backend_connection.exceptions import (
BackendConnectionError,
BackendNotAvailable,
BackendConnectionRefused,
BackendProtocolError,
)
logger = get_logger()
TIMEOUT_SERVER_CONNECT = 8
async def apiv1_connect(
addr: Union[BackendAddr, BackendOrganizationBootstrapAddr, BackendOrganizationAddr],
device_id: Optional[DeviceID] = None,
signing_key: Optional[SigningKey] = None,
administration_token: Optional[str] = None,
keepalive: Optional[int] = None,
) -> Transport:
"""
Raises:
BackendConnectionError
"""
handshake: BaseClientHandshake
if administration_token:
if not isinstance(addr, BackendAddr):
raise BackendConnectionError(f"Invalid url format `{addr}`")
handshake = APIV1_AdministrationClientHandshake(administration_token)
elif not device_id:
if isinstance(addr, BackendOrganizationBootstrapAddr):
handshake = APIV1_AnonymousClientHandshake(addr.organization_id)
elif isinstance(addr, BackendOrganizationAddr):
handshake = APIV1_AnonymousClientHandshake(addr.organization_id, addr.root_verify_key)
else:
raise BackendConnectionError(
f"Invalid url format `{addr}` "
"(should be an organization url or organization bootstrap url)"
)
else:
raise BackendConnectionError("Invalid v1 auth method")
return await _connect(addr.hostname, addr.port, addr.use_ssl, keepalive, handshake)
async def connect_as_invited(addr: BackendInvitationAddr, keepalive: Optional[int] = None):
handshake = InvitedClientHandshake(
organization_id=addr.organization_id, invitation_type=addr.invitation_type, token=addr.token
)
return await _connect(addr.hostname, addr.port, addr.use_ssl, keepalive, handshake)
async def connect_as_authenticated(
addr: BackendOrganizationAddr,
device_id: DeviceID,
signing_key: SigningKey,
keepalive: Optional[int] = None,
):
handshake = AuthenticatedClientHandshake(
organization_id=addr.organization_id,
device_id=device_id,
user_signkey=signing_key,
root_verify_key=addr.root_verify_key,
)
return await _connect(addr.hostname, addr.port, addr.use_ssl, keepalive, handshake)
async def _connect(
hostname: str,
port: int,
use_ssl: bool,
keepalive: Optional[int],
handshake: BaseClientHandshake,
) -> Transport:
try:
with trio.fail_after(TIMEOUT_SERVER_CONNECT):
stream = await trio.open_tcp_stream(hostname, port)
except (OSError, trio.TooSlowError) as exc:
logger.debug("Impossible to connect to backend", reason=exc)
raise BackendNotAvailable(exc) from exc
if use_ssl:
stream = _upgrade_stream_to_ssl(stream, hostname)
try:
transport = await Transport.init_for_client(stream, host=hostname)
transport.handshake = handshake
transport.keepalive = keepalive
except TransportError as exc:
logger.debug("Connection lost during transport creation", reason=exc)
raise BackendNotAvailable(exc) from exc
try:
await _do_handshake(transport, handshake)
except Exception as exc:
transport.logger.debug("Connection lost during handshake", reason=exc)
await transport.aclose()
raise
return transport
def _upgrade_stream_to_ssl(raw_stream, hostname):
# The ssl context should be generated once and stored into the config
# however this is tricky (should ssl configuration be stored per device ?)
cafile = os.environ.get("SSL_CAFILE")
ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
if cafile:
ssl_context.load_verify_locations(cafile)
else:
ssl_context.load_default_certs()
return trio.SSLStream(raw_stream, ssl_context, server_hostname=hostname)
async def _do_handshake(transport: Transport, handshake):
try:
challenge_req = await transport.recv()
answer_req = handshake.process_challenge_req(challenge_req)
await transport.send(answer_req)
result_req = await transport.recv()
handshake.process_result_req(result_req)
except TransportError as exc:
raise BackendNotAvailable(exc) from exc
except HandshakeError as exc:
raise BackendConnectionRefused(str(exc)) from exc
except ProtocolError as exc:
transport.logger.exception("Protocol error during handshake")
raise BackendProtocolError(exc) from exc
class TransportPool:
def __init__(self, connect_cb, max_pool):
self._connect_cb = connect_cb
self._transports = []
self._closed = False
self._lock = trio.Semaphore(max_pool)
@asynccontextmanager
async def acquire(self, force_fresh=False):
"""
Raises:
BackendConnectionError
trio.ClosedResourceError: if used after having being closed
"""
async with self._lock:
transport = None
if not force_fresh:
try:
# Fifo style to retrieve oldest first
transport = self._transports.pop(0)
except IndexError:
pass
if not transport:
if self._closed:
raise trio.ClosedResourceError()
transport = await self._connect_cb()
try:
yield transport
except TransportClosedByPeer:
raise
except Exception:
await transport.aclose()
raise
else:
self._transports.append(transport)
| bitlogik/guardata | guardata/client/backend_connection/transport.py | transport.py | py | 6,416 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "structlog.get_logger",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "guardata.client.types.BackendAddr",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "g... |
72532004029 | # pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
from copy import deepcopy
import pytest
from pytest import MonkeyPatch
from settings_library.docker_registry import RegistrySettings
MOCKED_BASE_REGISTRY_ENV_VARS: dict[str, str] = {
"REGISTRY_AUTH": "False",
"REGISTRY_USER": "usr",
"REGISTRY_PW": "pwd",
"REGISTRY_SSL": "False",
}
def _add_parameter_to_env(env: dict[str, str], key: str, value: str) -> dict[str, str]:
registry_env = deepcopy(env)
registry_env[key] = value
return registry_env
def _mock_env_vars(monkeypatch: MonkeyPatch, env_vars: dict[str, str]) -> None:
for key, value in env_vars.items():
monkeypatch.setenv(key, value)
@pytest.mark.parametrize(
"env_key, env_var",
[
("REGISTRY_PATH", "some_dev_path"),
("REGISTRY_URL", "some_prod_url"),
],
)
def test_model_ok(env_key: str, env_var: str, monkeypatch: MonkeyPatch) -> None:
registry_env_vars = _add_parameter_to_env(
MOCKED_BASE_REGISTRY_ENV_VARS, env_key, env_var
)
_mock_env_vars(monkeypatch, registry_env_vars)
registry_settings = RegistrySettings()
assert registry_settings
assert registry_settings.resolved_registry_url == env_var
def test_registry_path_none_string(monkeypatch: MonkeyPatch) -> None:
registry_env_vars = _add_parameter_to_env(
MOCKED_BASE_REGISTRY_ENV_VARS, "REGISTRY_PATH", "None"
)
registry_env_vars = _add_parameter_to_env(
registry_env_vars, "REGISTRY_URL", "some_prod_url"
)
_mock_env_vars(monkeypatch, registry_env_vars)
registry_settings = RegistrySettings()
assert registry_settings
assert registry_settings.resolved_registry_url == registry_env_vars["REGISTRY_URL"]
| ITISFoundation/osparc-simcore | packages/settings-library/tests/test_docker_registry.py | test_docker_registry.py | py | 1,754 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "copy.deepcopy",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytest.MonkeyPatch",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pytest.MonkeyPatch",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "settings_library.... |
32060115586 | import torch
from torch import nn
from torchvision import models, transforms
class VGG16Extractor(nn.Module):
def __init__(self):
super(VGG16Extractor, self).__init__()
vgg = models.vgg16(pretrained=True)
features = vgg.features
self.relu_1_2 = nn.Sequential()
self.relu_2_2 = nn.Sequential()
self.relu_3_3 = nn.Sequential()
self.relu_4_3 = nn.Sequential()
for x in range(4):
self.relu_1_2.add_module(str(x), features[x])
for x in range(4, 9):
self.relu_2_2.add_module(str(x), features[x])
for x in range(9, 16):
self.relu_3_3.add_module(str(x), features[x])
for x in range(16, 23):
self.relu_4_3.add_module(str(x), features[x])
for params in self.parameters():
params.requires_grad = False
def forward(self, input):
h_relu_1_2 = self.relu_1_2(input)
h_relu_2_2 = self.relu_2_2(h_relu_1_2)
h_relu_3_3 = self.relu_3_3(h_relu_2_2)
h_relu_4_3 = self.relu_4_3(h_relu_3_3)
return h_relu_1_2, h_relu_2_2, h_relu_3_3, h_relu_4_3
def gram(x):
(bs, ch, h, w) = x.size()
f = x.view(bs, ch, w*h)
f_T = f.transpose(1, 2)
G = f.bmm(f_T) / (ch * h * w)
return G
def vgg_tensor_transformer():
transformer = transforms.Compose([transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
transforms.Resize(size=(224, 224))])
return transformer
| harsh020/image-colorization | colorizer/utils.py | utils.py | py | 1,588 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torchvision.models.vgg16",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torchvision.mode... |
9384835860 | import argparse
from click import secho
import sys
from DNScanner.DNScanner import DNScanner
savesys = sys.stdout
# Flags
parser = argparse.ArgumentParser(description='\t Scan domains https://github.com/ChinadaCam/DNScanner')
parser.add_argument('-d', '--domain', required=True, type=str, help='Set domain (example.com)')
parser.add_argument('-cS', '--checkSubdomains', const='Others\wordlists\subdomainlist.txt', nargs='?' , help='Check subdomains and give an output if founded. (Default path: Others\wordlists\10000-dnswords.txt) ')
parser.add_argument('-O', '--Output', const='Others\Discovers',nargs='?', help='Output to file.\n Default is Other/Discovers, change directory with --directory ')
parser.add_argument('-D', '--Directory', const='Others\Discovers',nargs='?', help='Define a directory to output.\n Default is Discovers')
parser.add_argument('-mx', '--mxrecords', nargs='?', const='True' ,help='Show Mail Exanger Records (MX RECORDS)')
parser.add_argument('-ns', '--Nameserver', nargs='?', const='True' ,help='Show Nameserver Records (NS RECORDS)')
parser.add_argument('-A', '--all', nargs='?', const='True' ,help='Run all parameters (output not included)')
parser.add_argument('-cn', '--cname', nargs='?', const='True' ,help='Show Canonical Name Records(CN Records)')
parser.add_argument('-W', '--whois', nargs='?', const='True' ,help='Who is (Clean format)')
parser.add_argument('-WJ', '--whoisJ', nargs='?', const='True' ,help='Who is (JSON)')
#parser.add_argument('-geo', '--geolocation', nargs='?', const='True' ,help='Try to get coordinates')
args = parser.parse_args()
def main():
print('------------------------------------------------')
print('\t DNScanner '
'\n\tMade by Tiago Faustino'
'\n Project link: https://github.com/ChinadaCam/DNScanner ' )
print('------------------------------------------------\n')
Scanner = DNScanner(args.domain)
if args.all:
Scanner.getNS()
Scanner.whoIsJson()
Scanner.subdomainspath = 'DNScanner\Others\wordlists\subdomainlist.txt'
Scanner.subdomainbool = True
Scanner.getCN()
Scanner.getMX()
args.mxrecords = True
# check if output is used
if args.Output:
if args.Directory:
Scanner.output(args.Directory)
else:
Scanner.output(args.Output)
Scanner.start()
if args.checkSubdomains:
Scanner.subdomainspath = args.checkSubdomains
Scanner.subdomainbool = True
# Toggle mx
if args.mxrecords:
Scanner.getMX()
if args.Nameserver:
Scanner.getNS()
if args.whois:
Scanner.whoIs()
if args.whoisJ:
Scanner.whoIsJson()
if args.cname:
Scanner.getCN()
sys.stdout = savesys
secho("\n[+] Finished ", fg="green")
#Scanner.whoIs()
if __name__ == '__main__':
main()
| ChinadaCam/DNScanner | start.py | start.py | py | 2,894 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "sys.stdout",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "DNScanner.DNScanner.DNScanner",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "... |
11474271839 | '''
Created on Jan 9, 2010
@author: eric
'''
import asyncore
import socket
import time
from ParsedMessage import ParsedMessage
class Connection(asyncore.dispatcher):
'''
maintains the connection to the server
'''
buffer = ""
bytesIn = 0
bytesOut = 0
connectionAttempts = 0
reconnectWait = 3
maxAttempts = 100
def __init__(self, Pyibber):
'''
ctor, pass in the Pyibber instance
'''
asyncore.dispatcher.__init__(self)
self.Pyibber = Pyibber
self.omgPoniesConnect()
"""
def omgPoniesConnect(self):
print "omgPoniesConnect called"
count = 0
while (count < self.maxAttempts):
config = self.Pyibber.config;
server = str(config.get("pyib", "serverAddress"))
port = int(config.get("pyib", "serverPort"))
self.Pyibber.logger.info('attempt %d connecting to: %s:%d' % (count, server, port))
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
x = self.connect((server, port))
print "x: (%s)" % x
count = count + 1
time.sleep(self.reconnectWait)
self.Pyibber.logger.error('Unable to connect to server after %d tries' % self.maxAttempts)
self.Pyibber.stop()
"""
def omgPoniesConnect(self):
config = self.Pyibber.config;
server = str(config.get("pyib", "serverAddress"))
port = int(config.get("pyib", "serverPort"))
self.Pyibber.logger.info('attempt %d connecting to: %s:%d' % (self.connectionAttempts, server, port))
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((server, port))
def write(self, message):
self.Pyibber.logger.debug("socket.send: [%s]" % message)
self.buffer = self.buffer + message + "\r\n"
self.bytesOut += len(self.buffer)
def handle_connect(self):
pass
def handle_close(self):
self.Pyibber.logger.debug("connection.handle_close")
self.close()
def handle_error(self):
self.Pyibber.logger.debug("connection.handle_error")
self.close()
def handle_read(self):
data = self.recv(4096)
self.Pyibber.logger.debug('socket.recv: [%s]' % data)
self.bytesIn += len(data)
lines = data.splitlines()
for line in lines:
message = ParsedMessage(line)
self.Pyibber.Commandx.createFromMessage(message)
def writable(self):
return (len(self.buffer) > 0)
def handle_write(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
"""
def arghconnect(self, server, port):
count = 0
while (count < self.maxAttempts):
server = str(config.get("pyib", "serverAddress"))
port = int(config.get("pyib", "serverPort"))
self.Pyibber.logger.info('attempt %d connecting to: %s:%d' % (count, server, port))
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((server, port))
return
except Exception, e:
#self.socket.close()
#self.socket = None
self.Pyibber.logger.warning('socket fail: %s' % e)
#time.sleep(self.reconnectWait)
sys.exit(1)
count = count + 1
if self.socket is None:
self.Pyibber.logger.error('unable to connect to server after %d tries' % self.maxAttempts)
self.Pyibber.stop()
return
"""
| ericbutera/pyib | src/Pyib/Connection.py | Connection.py | py | 3,734 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "asyncore.dispatcher",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "asyncore.dispatcher.__init__",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "asyncore.dispatcher",
"line_number": 29,
"usage_type": "attribute"
},
{
"ap... |
74883082427 | from collections import defaultdict
class UnionFind():
def __init__(self, n):
# 頂点の値が0から始まる前提なので注意
self.par = [i for i in range(n)]
def root(self, x):
if self.par[x] == x:
return x
else:
self.par[x] = self.root(self.par[x])
return self.par[x]
def same(self, x, y):
return self.root(x) == self.root(y)
def unite(self, x, y):
x = self.root(x)
y = self.root(y)
if x == y:
return
self.par[x] = y
return
def main():
N, K, L = map(int, input().split())
uf1 = UnionFind(N)
uf2 = UnionFind(N)
# O(K)
for _ in range(K):
p, q = map(int, input().split())
uf1.unite(p-1, q-1)
# O(L)
for _ in range(L):
r, s = map(int, input().split())
uf2.unite(r-1, s-1)
# O(N) x O(log_N)
# 理解が少し難しいが、根ペアをキーに加算していく
# => 根から見ると、連結している頂点数(自身を含む)を数えている。
cnts = defaultdict(int)
for i in range(N):
pos = (uf1.root(i), uf2.root(i))
cnts[pos] += 1
ans = []
for i in range(N):
pos = (uf1.root(i), uf2.root(i))
ans.append(cnts[pos])
print(*ans)
if __name__ == '__main__':
main()
| kazuo-mu/at_coder_answers | ABC049/d_connectivity.py | d_connectivity.py | py | 1,393 | python | ja | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 49,
"usage_type": "call"
}
] |
38831023014 | from hyperopt import hp, STATUS_OK
import numpy as np
from mne.filter import resample
from crossvalidate import crossvalidate,test_ensamble,test_naive, run_a_trial
from keras.utils import to_categorical
import keras.backend as K
import uuid
from utils import save_results,get_subj_split
from my_models import ShallowConvNet
import os
import sys
sys.path.append(os.path.join(os.path.split(os.getcwd())[0],'data_loader'))
from data import DataBuildClassifier,EEG_SAMPLE_RATE
RESULTS_DIR = "results_shallow/"
WEIGHTS_DIR = "weights_shallow/"
space = {'resample_to': hp.choice('resample_to', range(128, 501)),
'dropoutRate': hp.uniform('dropoutRate', 0, 1),
'lr': hp.loguniform('lr', -5 * np.log(10), -3 * np.log(10))
}
def build_and_train_all_subjects(params,subjects,subj_tr_val_ind,subj_tst_ind):
params_uuid = str(uuid.uuid4())[:5]
subj_val_aucs,subj_tst_aucs_ens,subj_tst_aucs_naive = {},{},{}
tmp_weights_res_path = os.path.join(WEIGHTS_DIR,params_uuid)
# for subj in subjects.keys():
for subj in [25,26]:
K.clear_session()
tr_val_ind = subj_tr_val_ind[subj]
tst_ind = subj_tst_ind[subj]
x_tr_val,y_tr_val = subjects[subj][0][tr_val_ind], to_categorical(subjects[subj][1][tr_val_ind],2)
x_tst, y_tst = subjects[subj][0][tst_ind], to_categorical(subjects[subj][1][tst_ind],2)
x_tr_val = resample(x_tr_val, up=1., down=EEG_SAMPLE_RATE/params['resample_to'], npad='auto', axis=1)
x_tst = resample(x_tst, up=1., down=EEG_SAMPLE_RATE / params['resample_to'], npad='auto', axis=1)
model_path = os.path.join(tmp_weights_res_path,str(subj))
model = ShallowConvNet(params,Chans=x_tr_val.shape[2], Samples=x_tr_val.shape[1])
x_tr_val = x_tr_val.transpose(0, 2, 1)[:,np.newaxis,:,:]
x_tst = x_tst.transpose(0, 2, 1)[:, np.newaxis, :, :]
val_aucs, val_aucs_epochs,_ = crossvalidate(x_tr_val, y_tr_val, model, model_path)
test_auc_ensemble = test_ensamble(x_tst,y_tst,model_path)
test_naive_history = test_naive(x_tr_val, y_tr_val, x_tst, y_tst, model, int(np.mean(val_aucs_epochs)), model_path)
test_auc_naive = test_naive_history['val_auc'][-1]
subj_val_aucs[subj] = np.mean(val_aucs)
subj_tst_aucs_ens[subj] = test_auc_ensemble
subj_tst_aucs_naive[subj] = test_auc_naive
median_val_aucs = np.median(list(subj_val_aucs.values()))
weights_res_path = os.path.join(WEIGHTS_DIR, '%.2f_%s' % (median_val_aucs,params_uuid))
os.rename(tmp_weights_res_path,weights_res_path)
params_res_path = os.path.join(RESULTS_DIR, '%.2f_%s' % (median_val_aucs,params_uuid))
save_results(params_res_path, subj_val_aucs,subj_tst_aucs_naive, subj_tst_aucs_ens, params)
result= {
'loss': -median_val_aucs,
'real_loss': np.mean(list(subj_tst_aucs_naive.values())),
'subj_tst_aucs_naive':subj_tst_aucs_naive,
'subj_tst_aucs_ens':subj_tst_aucs_ens,
'subj_val_aucs':subj_val_aucs,
'status': STATUS_OK
}
return result
if __name__ == '__main__':
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
if not os.path.exists(WEIGHTS_DIR):
os.makedirs(WEIGHTS_DIR)
data = DataBuildClassifier('/home/likan_blk/BCI/NewData')
subjects, subj_tr_val_ind, subj_tst_ind = get_subj_split(data)
# split_subj = lambda x, ind: {key: (x[key][0][ind[key]], x[key][1][ind[key]]) for key in x}
# subj_train_val = split_subj(subjects,subj_tr_val_ind)
# subj_test = split_subj(subjects, subj_tst_ind)
for t in range(3):
run_a_trial(subjects, subj_tr_val_ind, subj_tst_ind,RESULTS_DIR,build_and_train_all_subjects,space) | bkozyrskiy/NN_hyperopt_search | opt_shallow.py | opt_shallow.py | py | 3,725 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
34225982423 | #This file is part of Chess-game-tracker.
#Chess-game-tracker is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#Chess-game-tracker is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with Chess-game-tracker. If not, see <https://www.gnu.org/licenses/>.
from threading import Thread
import imutils
import cv2
import sys
import time
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue
class Capturer:
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
self.stopped = False
self.Q = Queue(maxsize=200)
self.t = Thread(target=self.get, args=())
self.t.daemon = True
def start(self):
self.t.start()
return self
def running(self):
return self.more() or not self.stopped
def get(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
break
# otherwise, ensure the queue has room in it
if not self.Q.full():
# read the next frame from the file
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stopped = True
# if there are transforms to be done, might as well
# do them on producer thread before handing back to
# consumer thread. ie. Usually the producer is so far
# ahead of consumer that we have time to spare.
#
# Python is not parallel but the transform operations
# are usually OpenCV native so release the GIL.
#
# Really just trying to avoid spinning up additional
# native threads and overheads of additional
# producer/consumer queues since this one was generally
# idle grabbing frames.
# add the frame to the queue
self.Q.put(frame)
else:
time.sleep(0.1) # Rest for 10ms, we have a full queue
self.stream.release()
def stop(self):
self.stopped = True
self.t.join()
def more(self):
# return True if there are still frames in the queue. If stream is not stopped, try to wait a moment
tries = 0
while self.Q.qsize() == 0 and not self.stopped and tries < 5:
time.sleep(0.1)
tries += 1
return self.Q.qsize() > 0
def read(self):
# return next frame in the queue
return self.Q.get()
| nandovm/chess-game-tracker | Chess-game-tracker/multithread/Capturer.py | Capturer.py | py | 2,793 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sys.version_info",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "Queue.Queue",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "threading.Thread",... |
26626826683 | # Import the qrcode library
import qrcode
# Create a qr code instance
qr = qrcode.QRCode(
version = 1,
error_correction = qrcode.constants.ERROR_CORRECT_L,
box_size = 10,
border = 4,
)
# The data that you want to encode
data = "192.168.1.19:8765"
# Add the data
qr.add_data(data)
qr.make(fit=True)
# Create an image from the QR code instance
img = qr.make_image(fill_color="black", back_color="white")
# Save it somewhere, change the extension as needed:
img.save("./image_name.png")
# img.save("image_name.bmp")
# img.save("image_name.jpeg")
| Gex-devs/val_overlay | ts/BackUp_Local_Api_py/QRcode.py | QRcode.py | py | 564 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "qrcode.QRCode",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "qrcode.constants",
"line_number": 7,
"usage_type": "attribute"
}
] |
73573190267 | import pymysql
from dbutils.pooled_db import PooledDB
class MysqlPool:
config = {
'creator': pymysql,
'host': "127.0.0.1",
'port': 3306,
'user': "tron",
'password': "123456",
'db': "vecrv_sun_airdrop_claimed",
'charset': 'utf8',
'maxconnections': 70,
'cursorclass': pymysql.cursors.DictCursor
}
pool = PooledDB(**config)
def __enter__(self):
self.conn = MysqlPool.pool.connection()
self.cursor = self.conn.cursor()
return self
def __exit__(self, type, value, trace):
if type == None or type == 0:
self.conn.commit()
else:
print(f"mysql exec failed: \"{self.cursor._last_executed}\"\n"
f"{type.__name__}{value}\n"
f"{trace}")
self.conn.rollback()
self.cursor.close()
self.conn.close()
def db_conn(func):
def wrapper(*args, **kw):
with MysqlPool() as db:
result = func(db, *args, **kw)
return result
return wrapper
class Mysql:
@staticmethod
@db_conn
def getAll(db, sql, param=None):
"""
@summary: 执行查询,并取出所有结果集
@param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来
@param param: 可选参数,条件列表值(元组/列表)
@return: result list(字典对象)/boolean 查询到的结果集
"""
if param is None:
count = db.cursor.execute(sql)
else:
count = db.cursor.execute(sql, param)
if count>0:
result = db.cursor.fetchall()
else:
result = False
return result
@staticmethod
@db_conn
def getOne(db, sql, param=None):
"""
@summary: 执行查询,并取出第一条
@param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来
@param param: 可选参数,条件列表值(元组/列表)
@return: result list/boolean 查询到的结果集
"""
if param is None:
count = db.cursor.execute(sql)
else:
count = db.cursor.execute(sql, param)
if count>0:
result = db.cursor.fetchone()
else:
result = False
return result
@staticmethod
@db_conn
def getMany(db, sql, num, param=None):
"""
@summary: 执行查询,并取出num条结果
@param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来
@param num:取得的结果条数
@param param: 可选参数,条件列表值(元组/列表)
@return: result list/boolean 查询到的结果集
"""
if param is None:
count = db.cursor.execute(sql)
else:
count = db.cursor.execute(sql, param)
if count>0:
result = db.cursor.fetchmany(num)
else:
result = False
return result
@staticmethod
@db_conn
def insertOne(db, sql, value):
"""
@summary: 向数据表插入一条记录
@param sql:要插入的SQL格式
@param value:要插入的记录数据tuple/list
@return: insertId 受影响的行数
"""
db.cursor.execute(sql, value)
return Mysql.__getInsertId(db)
@staticmethod
@db_conn
def insertMany(db, sql, values):
"""
@summary: 向数据表插入多条记录
@param sql:要插入的SQL格式
@param values:要插入的记录数据tuple(tuple)/list[list]
@return: count 受影响的行数
"""
count = db.cursor.executemany(sql,values)
return count
@staticmethod
def __getInsertId(db):
"""
获取当前连接最后一次插入操作生成的id,如果没有则为0
"""
db.cursor.execute("SELECT @@IDENTITY AS id")
result = db.cursor.fetchall()
return result[0]['id']
@staticmethod
def __query(db, sql, param=None):
if param is None:
count = db.cursor.execute(sql)
else:
count = db.cursor.execute(sql, param)
return count
@staticmethod
@db_conn
def update(db, sql, param=None):
"""
@summary: 更新数据表记录
@param sql: SQL格式及条件,使用(%s,%s)
@param param: 要更新的 值 tuple/list
@return: count 受影响的行数
"""
return Mysql.__query(db, sql, param)
@staticmethod
@db_conn
def delete(db, sql, param=None):
"""
@summary: 删除数据表记录
@param sql: SQL格式及条件,使用(%s,%s)
@param param: 要删除的条件 值 tuple/list
@return: count 受影响的行数
"""
return Mysql.__query(db, sql, param)
| dpneko/pyutil | mysql_client.py | mysql_client.py | py | 5,065 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "pymysql.cursors",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "dbutils.pooled_db.PooledDB",
"line_number": 16,
"usage_type": "call"
}
] |
4510504745 | from tkinter import *
import tkinter as tk
import sqlite3
import sys
print("Imported")
con = sqlite3.connect("project.db")
print("Connected")
root = tk.Tk()
v = tk.IntVar()
v1 = tk.IntVar()
v2 = tk.IntVar()
v3 = tk.IntVar()
def createtable():
create = ("CREATE TABLE IF NOT EXISTS vehicle(NAME VARCHAR(200),"+
"CONTACT VARCHAR(200),"+
"VEHICLE VARCHAR(100),"+
"DATEOFBOOKING VARCHAR(100),"+
"DURATION VARCHAR(100),"+
"RENT VARCHAR(100))")
con.execute(create)
print("CREATED SUCCESSFULLY")
def insertt(aname, acon, aveh, adob, adur, arent):
aname = aname.get()
acon = acon.get()
aveh = aveh.get()
adob = adob.get()
adur = adur.get()
arent = arent.get()
print(aname, acon, aveh, adob, adur, arent)
print("Value is", aname)
insert = str("INSERT INTO vehicle(NAME,CONTACT,VEHICLE,DATEOFBOOKING,DURATION,RENT)"+" VALUES(?,?,?,?,?,?)")
con.execute(insert,(aname, acon, aveh, adob, adur, arent))
con.commit()
print("INSERTED SUCCESSFULLY")
def btnClickdis():
winddis = tk.Toplevel(root)
winddis.geometry('500x500+50+50')
winddis.title("BOOKING RECORD")
curs = con.cursor()
query = 'SELECT * FROM vehicle'
curs.execute(query)
Label(winddis, text="NAME", font=('arial',10,'bold')).grid(row=0, column=0)
Label(winddis, text="CONTACT", font=('arial',10,'bold')).grid(row=0, column=1)
Label(winddis, text="VEHICLE", font=('arial',10,'bold')).grid(row=0, column=2)
Label(winddis, text="DATE OF BOOKING", font=('arial',10,'bold')).grid(row=0, column=3)
Label(winddis, text="DURATION", font=('arial',10,'bold')).grid(row=0, column=4)
Label(winddis, text="RENT", font=('arial',10,'bold')).grid(row=0, column=5)
data=curs.fetchall()
for i, row in enumerate(data) :
Label(winddis, text=row[0]).grid(row=i+1, column=0)
Label(winddis, text=row[1]).grid(row=i+1, column=1)
Label(winddis, text=row[2]).grid(row=i+1, column=2)
Label(winddis, text=row[3]).grid(row=i+1, column=3)
Label(winddis, text=row[4]).grid(row=i+1, column=4)
Label(winddis, text=row[5]).grid(row=i+1, column=5)
def searching(e):
windsearch = tk.Toplevel(root,width=500,height=500)
windsearch.title("SEARCH")
windsearch.geometry('500x500+50+50')
sname = e.get()
print("Record of", sname, "is to be deleted")
data = con.execute('SELECT NAME,CONTACT,VEHICLE,DATEOFBOOKING,DURATION,RENT FROM vehicle where NAME=?;', (sname,))
for row in data:
Label(windsearch, text=row[0], font=('Verdana',12,'bold')).grid(row=1, column=4)
Label(windsearch, text=row[1], font=('Verdana',12,'bold')).grid(row=2, column=4)
Label(windsearch, text=row[2], font=('Verdana',12,'bold')).grid(row=3, column=4)
Label(windsearch, text=row[3], font=('Verdana',12,'bold')).grid(row=4, column=4)
Label(windsearch, text=row[4], font=('Verdana',12,'bold')).grid(row=5, column=4)
Label(windsearch, text=row[5], font=('Verdana',12,'bold')).grid(row=6, column=4)
Label(windsearch, text="NAME", font=('Verdana',15,'bold')).grid(row=1, column=1)
Label(windsearch, text="CONTACT", font=('Verdana',15,'bold')).grid(row=2, column=1)
Label(windsearch, text="VEHICLE", font=('Verdana',15,'bold')).grid(row=3, column=1)
Label(windsearch, text="DATE OF BOOKING", font=('Verdana',15,'bold')).grid(row=4, column=1)
Label(windsearch, text="DURATION", font=('Verdana',15,'bold')).grid(row=5, column=1)
Label(windsearch, text="RENT", font=('Verdana',15,'bold')).grid(row=6, column=1)
def delcnfrm(e):
winddelcnfrm = tk.Toplevel(root)
winddelcnfrm.geometry('250x250+50+50')
winddelcnfrm.title("DELETE?")
print(e)
delname = e.get()
print(delname)
l = Label(winddelcnfrm,font=('timesnewroman',10,'bold'),text="CONFIRM?",fg="black").grid(columnspan=2)
btndelete = Button(winddelcnfrm,fg="black",font=('arial',10,'bold'),text="YES",command=lambda:deleterec(delname),relief="raise",width=10,height=3,bg="cyan").grid(row=2,column=0)
btndelete = Button(winddelcnfrm,fg="black",font=('arial',10,'bold'),text="NO",command=btnClickdel,relief="raise",width=10,height=3,bg="cyan").grid(row=2,column=1)
def deleterec(delname):
con.execute("DELETE from vehicle where NAME=?;",(delname,))
con.commit()
print("DELETED SUCCESSFULLY")
def btnLogin():
pass
def btnClickdel():
winddel = tk.Toplevel(root)
winddel.geometry('700x500+50+50')
winddel.title("DELETE A RECORD")
l = Label(winddel,font=('timesnewroman',10,'bold'),text="Enter the name whose vehicle booking details you want to delete.",fg="black").grid(row=0,column=0)
e = Entry(winddel,font=(20),bd=6)
e.place(x=75,y=75)
Button(winddel,text="DELETE",font=(20),bg="aquamarine",relief="raise",command=lambda:delcnfrm(e),width=10,height=1).place(x=150,y=150)
def btnClickLoginRegister():
windlgrg = tk.Toplevel(root)
windlgrg.geometry('500x500+500+150')
windlgrg.title("LOGIN OR REGISTER")
Button(windlgrg, text="LOGIN", width=25, height=2, command=btnLogin, bg="gold", bd=7, relief="raise", font=(30)).place(x=110,y=100)
Button(windlgrg, text="REGISTER", width=25, height=2, command=btnRegister, bg="gold", bd=7, relief="raise", font=(30)).place(x=110,y=230)
def buttonClickA():
winda = tk.Toplevel(root, width=500, height=500)
winda.geometry('1000x1000+50+50')
winda.title("I'M ADMIN")
l = tk.Label(winda, text="CHOOSE YOUR OPTION :", font=('Verdana',15,'bold')).place(x=350,y=50)
Button(winda, text="VIEW THE BOOKINGS!", width=40, height=3, command=btnClickdis, bg="gold", bd=7, relief="raise", font=(30)).place(x=350,y=100)
Button(winda, text="SEARCH A RECORD!", width=40, height=3, command=buttonClick, bg="gold", bd=7, relief="raise", font=(30)).place(x=350,y=230)
Button(winda, text="DELETE A RECORD!", width=40, height=3, command=btnClickdel, bg="gold", bd=7, relief="raise", font=(30)).place(x=350,y=360)
def buttonClickB():
windb = tk.Toplevel(root)
windb.geometry('1000x1000+50+50')
windb.title("I'M CUSTOMER")
l = tk.Label(windb, text="CHOOSE YOUR OPTION :", font=('Verdana',15,'bold')).place(x=350,y=100)
Button(windb, text="BOOK A VEHICLE!", width=40, height=3, command=buttonClick1, bg="maroon1", bd=7, relief="raise", font=(30)).place(x=350,y=150)
Button(windb, text="SEARCH YOUR RECORD!", width=40, height=3, command=buttonClick, bg="maroon1", bd=7, relief="raise", font=(30)).place(x=350,y=280)
Button(windb, text="GIVE YOUR REVIEWS!", width=40, height=3, command=buttonClick2, bg="maroon1", bd=7, relief="raise", font=(30)).place(x=350,y=410)
def buttonClick():
winds = tk.Toplevel(root,width=500,height=500)
winds.title("SEARCH WINDOW")
winds.geometry('1000x700+50+50')
l = tk.Label(winds, font=('timesnewroman',10,'bold'), text="Enter the name whose vehicle booking details you are looking for!", fg="black").place(x=100,y=75)
e= Entry(winds, font=(20), bd=6)
e.place(x=100, y=125)
Button(winds, text="SEARCH", font=(20), bg="tomato", relief="raise", command=lambda:searching(e)).place(x=450,y=200)
def btnRegister():
windc = tk.Toplevel(root)
windc.geometry('1200x800+50+50')
windc.title("PERSONAL DETAILS")
pd = tk.Label(windc, text="PERSONAL DETAILS", font=('arial',40,'bold'), bd=6, fg="magenta2", anchor='center').grid(row=0, column=1, columnspan=4, pady=5)
l1 = tk.Label(windc, text="NAME:", font=(20)).grid(row=1, column=0, pady=5)
l2 = tk.Label(windc, text="CONTACT_NO:", font=(20)).grid(row=2, column=0, pady=5)
l3 = tk.Label(windc, text="ADDRESS:", font=(20)).grid(row=3, column=0, pady=5)
l4 = tk.Label(windc, text="EMAIL_ID:", font=(20)).grid(row=4, column=0, pady=5)
l5 = tk.Label(windc, text="GENDER:", font=(20)).grid(row=5, column=0, pady=5)
Button(windc, text="REGISTER", font=(20), command=btnClickLoginRegister, bg="yellow", relief="raise").grid(row=7, column=1, rowspan=2)
aname = Entry(windc, width=80, font=(20), bd=6)
aname.place(x=150, y=90)
acon = Entry(windc, width=80, font=(20), bd=6)
acon.place(x=150, y=125)
e3 = Text(windc, font=(20), height=5, bd=6).grid(row=3, column=1, pady=5)
e4 = Text(windc, font=(20), height=0, bd=6).grid(row=4, column=1, pady=5)
tk.Radiobutton(windc, text="Male", variable=v, value=1, font=(20)).grid(row=5, column=1, sticky=W)
tk.Radiobutton(windc, text="Female", variable=v, value=2, font=(20)).grid(row=6, column=1, sticky=W)
def buttonClick2():
windr = tk.Toplevel(root)
windr.geometry('1000x1000+50+50')
windr.title("REVIEW")
re = tk.Label(windr, text="WELCOME TO THE REVIEW SECTION", font=('arial',40,'bold'), bd=6, fg="magenta2", anchor='center').place(x=30,y=5)
l = tk.Label(windr, text="Your Name here.", font=('System',15)).place(x=30,y=80)
l1 = tk.Label(windr, text="Give your reviews here.", font=('System',15)).place(x=30,y=125)
l2 = tk.Label(windr, text="If you have any complaints regarding our rental agency ,enter them here.", font=('System',15)).place(x=30,y=250)
l3 = tk.Label(windr, text="Enter your suggestions ,if any.", font=('System',15)).place(x=30,y=375)
e = Text(windr, height=0, width=60, font=(20), bd=6).place(x=200,y=80)
e1 = Text(windr, height=2, font=(20), bd=6).place(x=30,y=175)
e2 = Text(windr, height=2, font=(20), bd=6).place(x=30,y=300)
e3 = Text(windr, height=2, font=(20), bd=6).place(x=30,y=425)
Button(windr, text="DONE", font=(20), bg="yellow", relief="raise").place(x=425,y=500)
def buttonClick3(aname,acon):
windn = tk.Toplevel(root)
windn.geometry('1200x700+50+50')
windn.title("VEHICLE BOOKING DETAILS")
vd = tk.Label(windn, text="VEHICLE BOOKING DETAILS", font=('arial',40,'bold'), bd=6, fg="magenta2", anchor='center').place(x=200,y=5)
Button(windn, text="VEHICLES FOR RENT", font=(20), command=buttonClick4, bg="orange", relief="raise").place(x=250,y=75)
Button(windn, text="VIEW RATES", font=(20), command=buttonClick5, bg="orange", relief="raise").place(x=700,y=75)
l6 = tk.Label(windn ,text="ENTER THE NAME OF THE VEHICLE YOU WANT ON RENT :", font=(20)).place(x=30,y=125)
la = tk.Label(windn, text="* BIKE",font=(20)).place(x=850,y=125)
lb = tk.Label(windn, text="* CAR",font=(20)).place(x=850,y=150)
lc = tk.Label(windn, text="* JEEP",font=(20)).place(x=850,y=175)
ld = tk.Label(windn, text="* BUS",font=(20)).place(x=850,y=200)
le = tk.Label(windn, text="* TRUCK",font=(20)).place(x=850,y=225)
l7 = tk.Label(windn, text="DATE OF BOOKING:",font=(20)).place(x=30,y=300)
l8 = tk.Label(windn, text="DURATION:",font=(20)).place(x=30,y=350)
l9 = tk.Label(windn, text="RENT A DRIVER?",font=(20)).place(x=30,y=400)
Button(windn, text="NEXT", font=(20), command=lambda:buttonClick6(aname, acon, aveh, adob, adur), bg="orange", relief="raise").place(x=515,y=570)
aveh = Entry(windn, width=80, font=(20), bd=6)
aveh.place(x=30, y=250)
adob = Entry(windn, width=80, font=(20), bd=6)
adob.place(x=225, y=300)
adur = Entry(windn, width=80, font=(20), bd=6)
adur.place(x=225, y=350)
tk.Radiobutton(windn, text="Yes", variable=v2, value=1, font=(20)).place(x=225,y=400)
tk.Radiobutton(windn, text="No", variable=v2, value=2, font=(20)).place(x=225,y=450)
def buttonClick4():
windv = tk.Toplevel(root)
windv.geometry('700x500+50+50')
windv.title("VEHICLES FOR RENT")
l = tk.Label(windv, text="THE VEHICLES WHICH ARE AVAILABLE FOR RENT WITH US ARE:", font=(20)).grid()
l1 = tk.Label(windv, text="BIKES", font=(20)).grid()
l2 = tk.Label(windv, text="CARS ", font=(20)).grid()
l3 = tk.Label(windv, text="JEEPS", font=(20)).grid()
l4 = tk.Label(windv, text="BUSES", font=(20)).grid()
l5 = tk.Label(windv, text="TRUCKS", font=(20)).grid()
def buttonClick5():
windr = tk.Toplevel(root)
windr.geometry('500x500+50+50')
windr.title("RENTS OF VEHICLES")
l1 = tk.Label(windr, text="RENT OF BIKE IS RS. 300/DAY", font=(20)).grid()
l2 = tk.Label(windr, text="RENT OF CAR IS RS. 1500/DAY", font=(20)).grid()
l3 = tk.Label(windr, text="RENT OF JEEP IS RS. 2000/DAY", font=(20)).grid()
l4 = tk.Label(windr, text="RENT OF BUS IS RS. 9000/DAY", font=(20)).grid()
l5 = tk.Label(windr, text="RENT OF TRUCK IS RS. 10000/DAY", font=(20)).grid()
def buttonClick6(aname, acon, aveh, adob, adur):
windp = tk.Toplevel(root)
windp.geometry('1200x700+50+50')
windp.title("PAYMENT DETAILS")
pay = tk.Label(windp, text="PAYMENT DETAILS", font=('arial',40,'bold'), bd=6, fg="magenta2", anchor='center').place(x=300,y=5)
l1 = tk.Label(windp, text="TOTAL RENT:", font=(20)).place(x=30,y=100)
l = tk.Label(windp, text="Enter the rent as per the vehicle chosen and the number of days for which the vehcile is rented", font=(15)).place(x=225,y=140)
arent = Entry(windp, font=(20),bd=6)
arent.place(x=200, y=100)
l2 = tk.Label(windp, text="PAYMENT VIA:", font=(20)).place(x=30,y=175)
l3 = tk.Label(windp, text="*IN CASE OF ANY DAMAGE DONE TO THE VEHICLE,DAMAGE FINE WILL BE CHARGED.", font=('Verdana',15,'bold'), fg="firebrick1").place(x=30,y=275)
l4 = tk.Label(windp, text="Damage amount is 50% of the rent!!!", font=('Verdana',15,'bold'), fg="firebrick1").place(x=30,y=300)
l5 = tk.Label(windp, text="*IF VEHICLE IS NOT RETURNED BACK ON TIME,LATE FINE WILL BE CHARGED.", font=('Verdana',15,'bold'), fg="firebrick1").place(x=30,y=350)
l6 = tk.Label(windp, text="The late fine is 25% of the rent(if late by a day)!!!", font=('Verdana',15,'bold'), fg="firebrick1").place(x=30,y=375)
tk.Radiobutton(windp, text="Credit Card", variable=v3, value=1, font=(20)).place(x=200,y=175)
tk.Radiobutton(windp, text="Cash", variable=v3, value=2, font=(20)).place(x=200,y=225)
ok = Button(windp, text="SUBMIT", font=('arial',20,'bold'), fg="black", bg="cyan2", relief="raise", command=lambda:insertt(aname, acon, aveh, adob, adur, arent)).place(x=525,y=500)
createtable()
root.title('Vehicle Rental Agency')
root.geometry('1350x700+100+50')
root.config(bg="sky blue")
backgrnd = Frame(root, width=1600, height=300, relief="raise", bg="sky blue")
backgrnd.pack(side = TOP)
backgrnd1 = Frame(root, width=1600, height=400, relief="raise", bg="sky blue")
backgrnd1.pack(side = TOP)
label1 = Label(backgrnd, font=('times',35,'bold'), text="***VEHICLE RENTAL AGENCY***", fg='black', bd=10, bg="plum1").grid()
Button(backgrnd1, text="ADMIN", width=40, height=3, command=buttonClickA, bg="blue", bd=7, relief="raise", font=(30)).place(x=450,y=150)
Button(backgrnd1, text="CUSTOMER", width=40, height=3, command=btnClickLoginRegister, bg="blue", bd=7, relief="raise", font=(30)).place(x=450,y=280)
root.mainloop()
| karankhat/Vehicle_Rental_Agency | python.py | python.py | py | 14,870 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tkinter.IntVar",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tkinter.IntVar",
"line_n... |
33317389259 | import pandas as pd
import streamlit as st
import folium
from streamlit_folium import st_folium
st.title("Peta Nilai Properti Kota Tangerang")
st.markdown("Sumber Data: Pusat Pembinaan Profesi Keuangan")
st.markdown("")
# Load data
df_tangcity = pd.read_csv('df_property.csv')
# Set up map
tangcity_map = folium.Map(location=[df_tangcity['Latitude'].mean(), df_tangcity['Longitude'].mean()], zoom_start=14)
# Create sidebar for filtering
st.sidebar.subheader("")
st.sidebar.subheader("Filter Atribut Data Properti")
selected_subdistrict = st.sidebar.selectbox("Pilih Kecamatan", df_tangcity['Kecamatan'].unique())
filtered_subdistrict = df_tangcity[df_tangcity['Kecamatan']== selected_subdistrict]
selected_village = st.sidebar.selectbox("Pilih Desa/Kelurahan", filtered_subdistrict['Desa'].unique())
filtered_village = filtered_subdistrict[filtered_subdistrict['Desa']== selected_village]
selected_valuation_objectives = [st.sidebar.selectbox("Pilih Tujuan Penilaian", df_tangcity['Tujuan Penilaian'].unique())]
filtered_valuation_objectives = filtered_village[filtered_village['Tujuan Penilaian'].isin(selected_valuation_objectives)]
selected_property_types = st.sidebar.multiselect("Pilih Jenis Properti (Bisa >1)", df_tangcity['Jenis_Objek'].unique())
filtered_data = filtered_valuation_objectives[filtered_valuation_objectives['Jenis_Objek'].isin(selected_property_types)]
selected_display = st.sidebar.multiselect("Pilih Nilai untuk Ditampilkan (Bisa >1)", options=["Nilai Tanah/m2", "Nilai Objek", "Total Nilai"], default=[])
# Set up map
if len(filtered_data) == 0:
tangcity_map = folium.Map(location=[df_tangcity['Latitude'].mean(), df_tangcity['Longitude'].mean()], zoom_start=14)
else:
tangcity_map= folium.Map(location=[filtered_data['Latitude'].mean(), filtered_data['Longitude'].mean()], zoom_start=16)
# Loop over filtered data and add markers to map
for index, row in filtered_data.iterrows():
lat = row['Latitude']
lon = row['Longitude']
nilai_tanah = row['Indikasi Nilai Tanah']
nilai_objek = row['Nilai Objek']
total_nilai = row['Total Nilai']
tanggal_penilaian = row['Tgl Penilaian']
# Construct html string based on selected values
html = ""
if "Nilai Tanah/m2" in selected_display:
html += f"Nilai Tanah: {nilai_tanah}/m<sup>2</sup><br>"
if "Nilai Objek" in selected_display:
html += f"Nilai Objek: {nilai_objek}<br>"
if "Total Nilai" in selected_display:
html += f"Total Nilai: {total_nilai}<br>"
# Always add Tanggal Penilaian as hover information
html += f"Tgl. Penilaian: {tanggal_penilaian}"
# Add marker to map with hover information
folium.Marker(
[lat, lon],
tooltip=html
).add_to(tangcity_map)
# Display the map
tangcity_data = st_folium(tangcity_map, width=725, height=450)
| danarssidig/propertymap | property_map.py | property_map.py | py | 2,935 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.title",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
9345123435 | from datetime import datetime
from elasticsearch_dsl import DocType, Date, Nested, Boolean, \
analyzer, InnerObjectWrapper, Completion, Keyword, Text
from elasticsearch_dsl.analysis import CustomAnalyzer as _CustomAnalyzer
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=["140.143.211.106"])
class CustomAnalyzer(_CustomAnalyzer):
def get_analysis_definition(self):
return {}
ik_analyzer = CustomAnalyzer("ik_max_word", filter=["lowercase"])
class BaiduType(DocType):
suggest = Completion(analyzer=ik_analyzer)
url = Keyword()
title = Text(analyzer="ik_max_word")
summary = Text(analyzer="ik_max_word")
content = Text(analyzer="ik_max_word")
class Meta:
index = "baidu"
doc_type = "baike"
def gen_suggest(index, info_tuple):
# 根据字符串生成搜索建议数组
es = connections.create_connection(BaiduType._doc_type.using,hosts=["140.143.211.106"]) # 连接elasticsearch(搜索引擎),使用操作搜索引擎的类下面的_doc_type.using连接
used_words = set()
suggests = []
for text, weight in info_tuple:
if text:
# 调用es的analyze接口分析字符串,
words = es.indices.analyze(index="baidu", analyzer="ik_max_word", params={'filter': ["lowercase"]}, body=text)
anylyzed_words = set([r["token"] for r in words["tokens"] if len(r["token"])>1])
new_words = anylyzed_words - used_words
else:
new_words = set()
if new_words:
suggests.append({"input":list(new_words), "weight":weight})
return suggests
if __name__ == "__main__":
BaiduType.init() | XiaoShenLong/scrapy-search | baike_spider/baike/models/es_types.py | es_types.py | py | 1,703 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "elasticsearch_dsl.connections.connections.create_connection",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "elasticsearch_dsl.connections.connections",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "elasticsearch_dsl.analysis.CustomAnalyzer",
... |
3774041977 | #!/usr/bin/env python
import argparse
import yaml
import sys
import http.server
import http.client
import requests
configFile = 'config.yaml'
configDefault = {'server': {'host': "127.0.0.1", 'port': 2323},
'proxies': None,
'forwarder': {'host': "127.0.0.1", 'headers': ["Content-Type"]}}
config = {}
def GetRealHost(headers: http.client.HTTPMessage) -> str:
'''获取实际的Host信息'''
# print(headers,flush=True)
for header, value in headers.items():
if header.lower() == 'x-forwarded-host' or header.lower() == 'host':
return value
def GetForwarder(forwardHost: str) -> dict:
'''从配置文件中获取跟host相匹配的forwarder配置
没有匹配的就返回None
'''
for forwarder in config['forwarders']:
if forwarder['host'] == forwardHost:
return forwarder
return None
def TransformProxies(proxies: list) -> dict:
'''将[{target,proxy}]转换成{target:proxy}形式'''
return {x['target']: x['proxy'] for x in proxies}
class RequestHandler(http.server.BaseHTTPRequestHandler):
def badresponse(self, msg: str):
self.send_response(http.HTTPStatus.BAD_REQUEST)
self.send_header('Content-type', 'text/plain')
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(msg.encode())
def do_request(self, method: str):
targetPath = self.path
forwardHost = GetRealHost(self.headers)
forwarder = GetForwarder(forwardHost)
if forwarder is None:
self.badresponse('no matching forwarder')
return
forwardTarget = forwarder['target']
proxies = None
if not config['proxies'] is None:
proxies = TransformProxies(config['proxies'])
# 获取请求的headers
forwarderHeaders = {}
for header, value in self.headers.items():
if header.lower() in [x.lower() for x in forwarder['headers']]:
forwarderHeaders[header] = value
# 从客户端读取请求体
contentLength = int(self.headers.get('Content-Length', 0))
forwardBody = self.rfile.read(contentLength)
# 发送请求到代理服务器
try:
response = None
response = requests.request(
method=method, url=f'{forwardTarget}{targetPath}', headers=forwarderHeaders, data=forwardBody, proxies=proxies)
# 转发响应给客户端
self.send_response(response.status_code)
for header, value in response.headers.items():
if not header.lower() in ('transfer-encoding', 'content-encoding', 'content-length', 'connection', 'date', 'server'):
self.send_header(header, value)
self.send_header('Connection', 'close')
self.end_headers()
r = response.content
self.wfile.write(r)
except Exception as e:
self.badresponse(str(e))
finally:
# 关闭连接
if not response is None:
response.close()
def do_GET(self):
self.do_request("GET")
def do_POST(self):
self.do_request("POST")
def do_PUT(self):
self.do_request("PUT")
def do_DELETE(self):
self.do_request("DELETE")
def do_PATCH(self):
self.do_request("PATCH")
def do_HEAD(self):
self.do_request("HEAD")
if __name__ == "__main__":
# 解析命令行参数
parser = argparse.ArgumentParser(description='http forwarder')
parser.add_argument('-c', '--config', default=configFile,
help=f'config file default is {configFile}')
args = parser.parse_args()
configFile = args.config
# 初始化配置文件
with open(configFile) as file:
config = yaml.safe_load(file)
if config is None:
config = {}
config['server'] = config.get('server', configDefault['server'])
config['server']['host'] = config['server'].get(
'host', configDefault['server']['host'])
config['server']['port'] = config['server'].get(
'port', configDefault['server']['port'])
config['proxies'] = config.get('proxies', configDefault['proxies'])
if type(config['proxies']) == list:
for i in range(len(config['proxies'])):
if not 'target' in config['proxies'][i]:
print(f"proxies[{i}].target is not defined",
file=sys.stderr)
exit(1)
if not 'proxy' in config['proxies'][i]:
print(f"proxies[{i}].proxy is not defined",
file=sys.stderr)
exit(1)
config['forwarders'] = config.get('forwarders', [])
for i in range(len(config['forwarders'])):
if (not 'target' in config['forwarders'][i]) or (not type(config['forwarders'][i]['target']) is str):
print(f"forwarder[{i}].target is not defined", file=sys.stderr)
exit(1)
target = config['forwarders'][i]['target']
if (not target.startswith('http://')) and (not target.startswith('https://')):
print(
f"forwarder[{i}].target not startswith http:// or https://", file=sys.stderr)
exit(1)
elif target.endswith('/'):
print(
f"forwarder[{i}].target can not endswith /", file=sys.stderr)
exit(1)
config['forwarders'][i]['description'] = config['forwarders'][i].get(
'description', f'forward {target}')
config['forwarders'][i]['host'] = config['forwarders'][i].get(
'host', configDefault['forwarder']['host'])
config['forwarders'][i]['headers'] = config['forwarders'][i].get(
'headers', configDefault['forwarder']['headers'])
print(config)
host = config['server']['host']
port = config['server']['port']
# 启动服务
serverAddress = (host, port)
httpd = http.server.HTTPServer(serverAddress, RequestHandler)
print(f'Starting HTTP Forward server on {host}:{port}...', flush=True)
httpd.serve_forever()
| ecator/http-forwarder | http-forwarder.py | http-forwarder.py | py | 6,296 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "http.server.client",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "http.server",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "http.server.server",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "http.serv... |
33562211468 | import cv2
import numpy as np
def contraste(inp):
f,c,color=inp.shape
c1=np.min(inp)
d=np.max(inp)
for i in range(f):
for j in range(c):
inp[i][j][0]=round((inp[i][j][0]-c1)*((255)/(d-c1)))
inp[i][j][1]=round((inp[i][j][1]-c1)*((255)/(d-c1)))
inp[i][j][2]=round((inp[i][j][2]-c1)*((255)/(d-c1)))
return inp
def multi(img1,constante):
f,c,color=img1.shape
for i in range(f):
for j in range(c):
r1=int(img1[i][j][0])*constante
r2=int(img1[i][j][1])*constante
r3=int(img1[i][j][2])*constante
if(r1<0):
img1[i][j][0]=0
elif(r1>255):
img1[i][j][0]=255
else:
img1[i][j][0]=r1
if(r2<0):
img1[i][j][1]=0
elif(r2>255):
img1[i][j][1]=255
else:
img1[i][j][1]=r2
if(r3<0):
img1[i][j][2]=0
elif(r3>255):
img1[i][j][2]=255
else:
img1[i][j][2]=r3
return img1
img1=cv2.imread('tigre.jpeg')
img1=cv2.resize(img1,(400,400))
img2=contraste(img1)
cv2.imshow('res1',img1)
cv2.imshow('res2',img2)
| renzovc987/CG | multipliacion.py | multipliacion.py | py | 1,313 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.min",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 41,
... |
277948458 | import torch
import torch.nn as nn
from shapmagn.global_variable import Shape
from shapmagn.utils.obj_factory import obj_factory
from shapmagn.modules_reg.module_gradient_flow import gradient_flow_guide
from shapmagn.shape.point_sampler import point_fps_sampler
class GradFlowPreAlign(nn.Module):
def __init__(self, opt):
super(GradFlowPreAlign, self).__init__()
self.opt = opt
self.niter = opt[("niter", 10, "self iteration")]
self.rel_ftol = opt[("rel_ftol", 1e-2, "relative tolerance")]
self.plot = opt[("plot", False, "plot the shape")]
self.method_name = opt[("method_name", "affine", "affine or rigid")]
self.eval_scale_for_rigid = opt[
(
"eval_scale_for_rigid",
True,
"evaluate scale for the rigid transformation",
)
]
self.control_points = opt[
(
"control_points",
-1,
"compute prealign with # control point, points are sampled from farthest point sampling",
)
]
self.sampler = point_fps_sampler(self.control_points)
self.use_barycenter_weight = opt[
(
"use_barycenter_weight",
False,
"use barycenter weight for partial registration",
)
]
pair_feature_extractor_obj = self.opt[
("pair_feature_extractor_obj", "", "feature extraction function")
]
self.pair_feature_extractor = (
obj_factory(pair_feature_extractor_obj)
if pair_feature_extractor_obj
else None
)
self.get_correspondence_shape = self.solve_correspondence_via_gradflow()
self.solver = (
self.solve_affine if self.method_name == "affine" else self.solve_rigid
)
def set_mode(self, mode):
self.prealign = True
def solve_affine(self,x, y, w):
"""
:param x: BxNxD
:param y: BxNxD
:param w: BxNx1
:return:
"""
# Optimal affine transform: ================================================
# A = (X^T @ diag(w) @ X)^-1 @ (X^T @ diag(w) @ y)
# (B,D+1,N) (B,N,N) (B,N,D+1) (B,D+1,N) (B,N,N) (B,N,D)
#
# = Xt_wX \ Xt_yw
# (B,D+1,D+1) (B,D+1, D)
# (x, y, z, 1) array to work easily with affine transforms:
X = torch.cat((x, torch.ones_like(x[:, :, :1])), dim=2) # (B,N, D+1)
Xt_wX = X.transpose(2, 1) @ (w * X) # (B,D+1, N) @ (B,N, D+1) = (B,D+1, D+1)
Xt_wy = X.transpose(2, 1) @ (w * y) # (B,D+1, N) @ (B,N, D) = (B,D+1, D)
# Affine transformation:
A = torch.solve(Xt_wy, Xt_wX).solution # (B,D+1, D)
return A, X @ A
def solve_rigid(self, x, y, w):
"""
:param x: BxNxD
:param y: BxNxD
:param w: BxNx1
:return:
"""
B, N, D = x.shape[0], x.shape[1], x.shape[2]
device = x.device
sum_w = w.sum(1, keepdim=True)
mu_x = (x * w).sum(1, keepdim=True) / sum_w
mu_y = (y * w).sum(1, keepdim=True) / sum_w
x_hat = x - mu_x
wx_hat = x_hat * w
y_hat = y - mu_y
wy_hat = y_hat * w
a = wy_hat.transpose(2, 1) @ wx_hat # BxDxN @ BxNxD BxDxD
u, s, v = torch.svd(a)
c = torch.ones(B, D).to(device)
c[:, -1] = torch.det(u @ v) #
r = (u * (c[..., None])) @ v.transpose(2, 1)
tr_atr = torch.diagonal(a.transpose(2, 1) @ r, dim1=-2, dim2=-1).sum(-1)
tr_xtwx = torch.diagonal(wx_hat.transpose(2, 1) @ wx_hat, dim1=-2, dim2=-1).sum(
-1
)
s = (
(tr_atr / tr_xtwx)[..., None][..., None]
if self.eval_scale_for_rigid
else 1.0
)
t = mu_y - s * (r @ mu_x.transpose(2, 1)).transpose(2, 1)
A = torch.cat([r.transpose(2, 1) * s, t], 1)
X = torch.cat((x, torch.ones_like(x[:, :, :1])), dim=2) # (B,N, D+1)
return A, X @ A
def compose_transform(self, A_prev, A_cur):
D = A_prev.shape[-1]
A_composed_matrix = A_prev[:, :D, :] @ A_cur[:, :D, :] # BxDxD
A_composed_trans = (
A_prev[:, D:, :] @ A_cur[:, :D, :] + A_cur[:, D:, :]
) # Bx1XD @ BxDxD Bx1xD
return torch.cat([A_composed_matrix, A_composed_trans], 1)
def solve_correspondence_via_gradflow(self):
from functools import partial
self.gradflow_mode = self.opt[
(
"gradflow_mode",
"grad_forward",
" 'grad_forward' if only use position info otherwise 'ot_mapping'",
)
]
self.search_init_transform = self.opt[
(
"search_init_transform",
False,
" the 16(2D)/64(3D) initial transforms (based on position and ot similarity) would be searched and return the best one ",
)
]
self.geomloss_setting = self.opt[("geomloss", {}, "settings for geomloss")]
return partial(
gradient_flow_guide(self.gradflow_mode),
geomloss_setting=self.geomloss_setting,
local_iter=torch.tensor([0]),
)
def _solve_transform(self, source, flowed):
return self.solver(source.points, flowed.points, source.weights)
def extract_point_fea(self, flowed, target, iter=-1):
flowed.pointfea = flowed.points.clone()
target.pointfea = target.points.clone()
return flowed, target
def extract_fea(self, flowed, target, iter):
if not self.pair_feature_extractor:
return self.extract_point_fea(flowed, target, iter)
else:
return self.pair_feature_extractor(flowed, target, iter)
def find_initial_transform(self, source, target):
import numpy as np
from scipy.spatial.transform import Rotation as R
source_center = source.points.mean(dim=1, keepdim=True)
target_center = target.points.mean(dim=1, keepdim=True)
max_diameter = lambda x: (x.points.max(1)[0] - x.points.min(1)[0]).max(1)[0]
scale = max_diameter(target) / max_diameter(source)
bias_center = (
target_center - source_center
) / 10 # avoid fail into the identity local minimum
D = source.points.shape[-1]
n_init = 16 if D == 2 else 64
r = None
if D == 2:
angle_comp = np.mgrid[0:271:90, 0:271:90].transpose(1, 2, 0).reshape(-1, D)
r = R.from_euler("yx", angle_comp, degrees=True)
elif D == 3:
angle_comp = (
np.mgrid[0:271:90, 0:271:90, 0:271:90]
.transpose(1, 2, 3, 0)
.reshape(-1, D)
)
r = R.from_euler("zyx", angle_comp, degrees=True)
init_rotation_matrix = torch.tensor(r.as_matrix().astype(np.float32)).to(
source.points.device
)
init_best_transformed = []
init_best_transform = []
for i, (
b_source_points,
b_target_points,
b_source_weights,
b_target_weights,
) in enumerate(
zip(source.points, target.points, source.weights, target.weights)
):
b_source_points = b_source_points.repeat(n_init, 1, 1)
b_target_points = b_target_points.repeat(n_init, 1, 1)
b_source_weights = b_source_weights.repeat(n_init, 1, 1)
b_target_weights = b_target_weights.repeat(n_init, 1, 1)
b_init_rotation_bias = bias_center[i].repeat(n_init, 1, 1)
b_transform = torch.cat(
[init_rotation_matrix * scale[i], b_init_rotation_bias], 1
)
geo_dist = obj_factory(self.geomloss_setting["geom_obj"])
b_init_transformed = (
torch.cat(
(b_source_points, torch.ones_like(b_source_points[:, :, :1])), dim=2
)
@ b_transform
)
bdist = geo_dist(
b_source_weights[..., 0],
b_init_transformed,
b_target_weights[..., 0],
b_target_points,
)
min_val, min_index = bdist.min(0)
b_init_best_transformed = b_init_transformed[min_index]
b_init_best_transform = b_transform[min_index]
print("the best init transform is {}".format(b_init_best_transform))
init_best_transformed.append(b_init_best_transformed)
init_best_transform.append(b_init_best_transform)
return torch.stack(init_best_transform, 0), Shape().set_data_with_refer_to(
torch.stack(init_best_transformed, 0), source
)
def sampling_input(self, toflow, target):
compute_at_low_res = self.control_points > 0
sampled_toflow = self.sampler(toflow) if compute_at_low_res else toflow
sampled_target = self.sampler(target) if compute_at_low_res else target
return sampled_toflow, sampled_target
def __call__(self, source, target, init_A=None):
"""
:param source: Shape with points BxNxD
:param target_batch: Shape with points BxMxD
:return: Bx(D+1)xD transform matrix
"""
source, target = self.sampling_input(source, target)
toflow = source
A_prev = init_A if init_A is not None else None
A = None
if self.search_init_transform:
A_prev, toflow = self.find_initial_transform(source, target)
for i in range(self.niter):
toflow, target = self.extract_fea(toflow, target, i)
flowed, weight_map_ratio = self.get_correspondence_shape(toflow, target)
if not self.use_barycenter_weight:
A, transforme_points = self._solve_transform(toflow, flowed)
else:
toflow_weights = toflow.weights
toflow.weights = weight_map_ratio
A, transforme_points = self._solve_transform(toflow, flowed)
toflow.weights = toflow_weights
A = self.compose_transform(A_prev, A) if A_prev is not None else A
transformed_points = (
torch.cat(
(source.points, torch.ones_like(source.points[:, :, :1])), dim=2
)
@ A
)
toflow = Shape().set_data_with_refer_to(transformed_points, source)
if i > 0 and torch.norm(A - A_prev) < self.rel_ftol:
print(
"reach relative tolerance {}".format(torch.norm(A - A_prev).item())
)
break
A_prev = A
if self.plot:
self.visualize(
source, toflow, target, weight_map_ratio, self.geomloss_setting, i
)
return A
def visualize(
self, source, transformed, target, weight_map_ratio, geomloss_setting, iter
):
from shapmagn.utils.visualizer import visualize_source_flowed_target_overlap, default_plot
from shapmagn.demos.demo_utils import get_omt_mapping
# mapped_fea = get_omt_mapping(geomloss_setting,source, target,
# source.points[0], p=2, mode="hard", confid=0.0)
weight_map_ratio = torch.log10(weight_map_ratio + 1e-8)
weight_map_ratio = (weight_map_ratio - weight_map_ratio.min()) / (
weight_map_ratio.max() - weight_map_ratio.min()
).repeat(1, 1, 1)
visualize_source_flowed_target_overlap(
source.points,
transformed.points,
target.points,
source.points,
weight_map_ratio,
target.points,
"source",
"attention",
"target",
source_plot_func=default_plot(cmap="viridis",rgb=True),
flowed_plot_func=default_plot(cmap="magma",rgb=False),
target_plot_func=default_plot(cmap="magma",rgb=True),
opacity= (0.1,"linear",0.02),
show=True,
add_bg_contrast=False,
)
| uncbiag/shapmagn | shapmagn/modules_reg/module_gradflow_prealign.py | module_gradflow_prealign.py | py | 12,227 | python | en | code | 94 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "shapmagn.shape.point_sampler.point_fps_sampler",
"line_number": 31,
"usage_type": "call"
},
{
"api_na... |
73726701627 | from django.shortcuts import render
from .models import Post, Categories
# Create your views here.
def blog(request):
post = Post.objects.all()
cat = [i.categories.all()[0] for i in post]
cat = list(set(cat))
return render(request, 'blog/blog.html',
{'posts': post, 'categories': cat})
def category(request, category_id):
cat = Categories.objects.get(id=category_id)
posts = Post.objects.filter(categories=cat)
print(cat)
return render(request,
'blog/categories.html',
{'categories': cat, 'posts': posts})
| rebecalvarezc/django_clases | firstWeb/blogApp/views.py | views.py | py | 596 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Post.objects.all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.Post.objects",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "models.Post",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.shor... |
12877884293 | from sklearn import datasets
from sklearn.preprocessing import MaxAbsScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.exceptions import ConvergenceWarning
from numpy import mean, array
from articles_to_features import vectorize, get_feature_mappings
from newspaper import Article
import warnings
import pickle
import io
import os
import requests
def load_training_data():
X, y = datasets.load_svmlight_file(open('features_file.txt', 'rb'))
true_center = mean(y)
# Preprocess
left_threshold = 7.5
right_threshold = 7.5
def discretize(val):
if val < -left_threshold:
return -1
elif val < right_threshold:
return 0
else:
return 1
return MaxAbsScaler().fit(X).transform(X), [discretize(val) for val in y]
def load_test_data():
X, y = datasets.load_svmlight_file(open('allsides_vectors.txt', 'rb'))
# Preprocess
def discretize(val):
if val <= -2:
return -1
elif val < 2:
return 0
else:
return 1
return MaxAbsScaler().fit(X).transform(X), [discretize(val) for val in y]
def load_model():
return LogisticRegression(solver='saga', random_state=0)
def load_trained_model():
if not os.path.exists('left_right_model.pkl'):
X, y = load_training_data()
model = load_model()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model.fit(X, y)
pickle.dump(model, open('left_right_model.pkl', 'wb'))
else:
model = pickle.load(open('left_right_model.pkl', 'rb'))
return model
class Left_right_classifier(object):
def __init__(self):
self.__model = load_trained_model()
def classify_article_from_url(self, x_article_url):
return self.classify_html_article(requests.get(x_article_url).content)
def classify_html_article(self, x_article_html):
article = Article(url='')
article.download(input_html=x_article_html)
article.parse()
return self.classify_article(article.text, article.title)
def classify_article(self, x_article_text, x_article_title=''):
vectorized = vectorize(get_feature_mappings(), x_article_title + '\n' + x_article_text, 0)
return self.classify_vectorized_article(vectorized)
def classify_vectorized_article(self, x_vec):
if isinstance(x_vec, str):
x_vec, _ = datasets.load_svmlight_file(io.BytesIO(x_vec.encode()), n_features=len(self.__model.coef_[0]))
return self.__model.predict(x_vec)[0]
if __name__ == '__main__':
X, y = load_training_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model = load_model()
model.fit(X_train, y_train)
y_predictions = model.predict(X_test)
print(f'Model test accuracy_score={accuracy_score(y_test, y_predictions)}')
print(classification_report(y_test, y_predictions, target_names=['left', 'center', 'right']))
conf_matrix = confusion_matrix(y_test, y_predictions)
print('Confusion Matrix')
print(conf_matrix)
print(f' Left marked as right = {conf_matrix[0][2]/sum(conf_matrix[0])}')
print(f' Right marked as left = {conf_matrix[2][0]/sum(conf_matrix[2])}')
print()
print(f' Center marked as right = {conf_matrix[1][2]/sum(conf_matrix[1])}')
print(f' Center marked as left = {conf_matrix[1][0]/sum(conf_matrix[1])}')
print()
classifier = Left_right_classifier()
print(classifier.classify_article_from_url('https://www.vox.com/2020/4/20/21225016/protests-stay-at-home-orders-trump-conservative-group-michigan'))
print(classifier.classify_article_from_url('https://www.cnn.com/2020/04/20/politics/aoc-2022-senate-schumer/index.html'))
print(classifier.classify_article_from_url('https://www.vox.com/covid-19-coronavirus-us-response-trump/2020/4/19/21227175/coronavirus-trump-who-information-china-embeds-december'))
print(classifier.classify_article_from_url('https://www.vice.com/en_us/article/4agzpn/texas-anti-lockdown-protesters-are-coming-for-fauci-now'))
print(classifier.classify_article_from_url('https://www.infowars.com/trump-to-press-you-and-the-obama-administration-were-duped-for-years-by-china/'))
print(classifier.classify_article_from_url('https://www.dailywire.com/news/poll-people-have-no-idea-joe-biden-is-talking-about-coronavirus'))
print(classifier.classify_article_from_url('https://www.louderwithcrowder.com/opinion-sorry-democrats-its-not-the-republicans-who-are-nazis/'))
print(classifier.classify_article_from_url('https://dailycaller.com/2020/04/20/alexandria-ocasio-cortez-oil-drop-tweet-lost-jobs/')) | abhi-baireddy/IRProject | left_right_classifier.py | left_right_classifier.py | py | 5,085 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.datasets.load_svmlight_file",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklea... |
18244520374 | import json
from datetime import datetime, timedelta
from news_utilities import add_news
cache_file = '/home/pi/AlarmClockProject/AlarmClock/cache/calendars/' + 'cal_ed.json'
homeworks = None
calendar = None
notes = None
datetime_now = datetime.now()
def str_to_datetime(date_str):
if len(date_str) == 16:
datetime_time = datetime(int(date_str[:4]), # Year
int(date_str[5:7]), # Month
int(date_str[8:10]), # Day
int(date_str[11:13]) % 24, # Hour
int(date_str[14:16]) % 60) # Minute
else:
datetime_time = datetime(int(date_str[:4]), # Year
int(date_str[5:7]), # Month
int(date_str[8:10])) # Day
return(datetime_time)
def float_to_str(floating_number):
if floating_number == float(int(floating_number)):
string_number = str(int(floating_number))
else:
string_number = str(floating_number)
return string_number
def get_ed_data():
global homeworks, calendar, notes
from EcoleDirect import EcoleDirect
file_path = "/home/pi/credentials/EcoleDirecte/credentials.txt"
# file as following : 'id:user\npwd:password
creds = open(file_path, "r").read().split("\n")
user = creds[0].split(':')[1]
pwd = creds[1].split(':')[1]
ed = EcoleDirect(user, pwd)
homeworks = ed.getHW() # Get HomeWork
calendar = ed.getWT() # Get WorkTime
notes = ed.getNotes() # Get Notes
def store_calendar():
if any(data is None for data in [homeworks, calendar, notes]):
get_ed_data()
home_works = []
for work_day in homeworks:
for work in homeworks[work_day]:
code = work['codeMatiere']
if not work['effectue']:
home_works.append([code, work_day])
events = []
for lesson in calendar:
dtstart = lesson['start_date']
dtend = lesson['end_date']
summary = lesson['codeMatiere']
if any(dtstart[:10] == work[1] and summary == work[0] for work in home_works):
todo = True
else:
todo = False
if summary == '':
continue
event = {'DTSTART': dtstart,
'DTEND': dtend,
'SUMMARY': summary,
'TODO': todo,
'CAL_ID': '200'}
events.append(event)
# Store the events in a new calendar file
with open(cache_file, 'w', encoding='utf-8') as jsonfile:
json.dump(events, jsonfile, ensure_ascii=False, indent=4)
def get_calendar():
with open(cache_file, "r") as json_file:
events = json.load(json_file)
return events
def get_latest_notes():
if any(data is None for data in [homeworks, calendar, notes]):
get_ed_data()
last_n_days = 10
notes_ = sorted(notes['notes'], key=lambda i: i['dateSaisie'])
news_desc = ''
notes_by_subject = {}
for note in notes_:
saisie_time = str_to_datetime(note['dateSaisie'])
if saisie_time < datetime_now-timedelta(days=last_n_days):
continue
individual_note = float(note['valeur'].replace(",", "."))
note_max = float(note['noteSur'].replace(",", "."))
class_avg = float(note['moyenneClasse'].replace(",", "."))
better_than_class = individual_note > class_avg
note_display = (float_to_str(individual_note)
+ ('+' if better_than_class else '-')
+ (float_to_str(note_max) if note_max != 20.0 else "")
+ " ")
if not note['codeMatiere'] in notes_by_subject.keys():
notes_by_subject[note['codeMatiere']] = ""
notes_by_subject[note['codeMatiere']] += note_display
for note_subject in notes_by_subject.keys():
note = notes_by_subject[note_subject]
news_desc += f"\n{note_subject} : {note}"
add_news(300, datetime_now, 'Latest notes', news_desc)
if __name__ == "__main__":
store_calendar()
get_latest_notes()
| cg-Kdaf/RPIAlarmClock | src/ED_utilities.py | ED_utilities.py | py | 4,112 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.da... |
43085107371 | #!/bin/env python
import numpy as np
from matplotlib import pyplot as plt
import argparse
import sys
import parse_ats
def load(fname, density):
dat = np.loadtxt(fname) # units s, mol/s
dat[:,0] = dat[:,0] / 86400. # convert to days
dat[:,1] = dat[:,1] / density * 86400 # convert to m^3/d
return dat
def plot(data, format='-', color='b', name=None, ax=None):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data[:,0], data[:,1], format, color=color, label=name)
ax.set_xlabel("time [days]")
ax.set_ylabel("runoff [m^3 / day]")
return ax
def load_area_rain(args):
k,t,d = parse_ats.readATS(args.directory, args.filename)
cv = d[args.area_key][k[0]][:]
area = cv.sum()
rain = np.array([(d[args.rainfall_rate_key][key][:] * cv).sum() for key in k]) * 86400
return area, t*365.25, rain # units m^2, days, m^3/s
def plot_rain(area, t, rain, format='--', color='k', ax=None):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, rain, format, color=color, label="rainfall rate")
return ax
if __name__ == "__main__":
parser = argparse.ArgumentParser("Plot discharge observation from ATS run")
parser.add_argument("runoff_filename", type=str, help="Runoff observation filename.")
parser.add_argument("-p", "--plot-rainfall", action="store_true", help="Plot rainfall rate as an asymptotic limit.")
parser.add_argument("-d", "--directory", type=str, help="Simulation output directory", default='.')
parser.add_argument("-f", "--filename", type=str, help="Simulation surface output filename", default="visdump_surface_data.h5")
parser.add_argument("-r", "--rainfall-rate-key", type=str, help="Rainfall rate variable name", default="surface-mass_source.cell.0")
parser.add_argument("-a", "--area-key", type=str, help="Surface cell area variable name", default="surface-cell_volume.cell.0")
parser.add_argument("--density", type=float, help="Density of water", default=55000.)
args = parser.parse_args()
ax = None
if args.plot_rainfall:
area, time, rain = load_area_rain(args)
ax = plot_rain(area, time, rain)
plot(load(args.runoff_filename, args.density), ax=ax)
plt.show()
sys.exit(0)
| amanzi/ats | tools/utils/plot_runoff.py | plot_runoff.py | py | 2,293 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "numpy.loadtxt",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "parse_ats.re... |
34408932008 | from flask_restplus import Resource
from flask import current_app as cur_app
from flask import request
from app.main.services.story.brand_story_services import duplicate_story, get_all_draft_or_published_story, get_story, issue_story_template_before_save, post_story_publish_and_draft, remove_story_from_search, update_story_by_id, get_all_system_story
from app.main.utils.api_story_dto import BrandStoryDTO
api = BrandStoryDTO.api
_res_issue_template_before_save = BrandStoryDTO.res_issue_template_before_save
_post_req_for_draft_publish = BrandStoryDTO.post_req_for_draft_publish
_res_for_draft_publish = BrandStoryDTO.res_for_draft_publish
_req_for_draft_publish_update = BrandStoryDTO.post_req_for_draft_publish_update
_res_story_by_id = BrandStoryDTO.res_story_by_id
_res_get_all_draft_and_publish_story = BrandStoryDTO.res_get_all_draft_and_publish_story
_res_get_all_system = BrandStoryDTO.res_all_Storye_pages
@api.route("api/v1.0/brand/story/<story_id>")
class BrandStoryOpe(Resource):
@api.marshal_with(_res_story_by_id)
def get(self, story_id):
return get_story(story_id)
def delete(self, story_id):
return remove_story_from_search(story_id=story_id)
@api.route("api/v1.0/story/template/<template_id>/data")
class IssueStoryTemplate(Resource):
@api.marshal_with(_res_issue_template_before_save)
def get(self, template_id):
return issue_story_template_before_save(template_id)
@api.route("api/v1.0/brand/<brand_id>/story")
class BrandStoryOperation(Resource):
@api.expect(_post_req_for_draft_publish)
@api.marshal_with(_res_for_draft_publish)
def post(self, brand_id):
return post_story_publish_and_draft(brand_id, data=request.json)
@api.route("api/v1.0/brand/<brand_id>/story/<story_id>")
class BrandStoryOperationUpdate(Resource):
@api.expect(_req_for_draft_publish_update)
@api.marshal_with(_res_for_draft_publish)
def put(self, brand_id, story_id):
return update_story_by_id(brand_id, story_id, data=request.json)
@api.route("api/v1.0/brand/<brand_id>/storys")
class FetchStatusStory(Resource):
@api.marshal_with(_res_get_all_draft_and_publish_story)
# @token_required
def get(self, brand_id):
status = request.args.get('status')
active = request.args.get('active')
category = request.args.get('category')
args = request.args
search = args.get('search', '')
page = int(args.get('page', cur_app.config['PAGE']))
limit = int(args.get('limit', cur_app.config['LIMIT']))
return get_all_draft_or_published_story(brand_id, status, active, search, category, page, limit)
@api.route("api/v1.0/brand/<brand_id>/story/<story_id>/duplicate")
class BrandDuplicateStory(Resource):
@api.marshal_with(_res_for_draft_publish)
def get(self, brand_id, story_id):
return duplicate_story(brand_id=brand_id, story_id=story_id)
@api.route("api/v1.0/story")
class SystemBrandPages(Resource):
@api.marshal_with(_res_get_all_system)
def get(self):
args = request.args
page = int(args.get('page', cur_app.config['PAGE']))
limit = int(args.get('limit', cur_app.config['LIMIT']))
category = request.args.get('category')
return get_all_system_story(category, page, limit)
| deepakarya09/cureas_reads | app/main/controllers/api_story_controller.py | api_story_controller.py | py | 3,300 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.main.utils.api_story_dto.BrandStoryDTO.api",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "app.main.utils.api_story_dto.BrandStoryDTO",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "app.main.utils.api_story_dto.BrandStoryDTO.res_issue... |
29806901602 | import os
from dataclasses import dataclass
from datetime import datetime
from fastapi.encoders import jsonable_encoder
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import joinedload
from MenuApp.src.models import Menu, Submenu
@dataclass
class ReportService:
db: AsyncSession
async def get_data(self) -> list:
"""Generate a list of all menus, submenus and dishes.
Returns:
report: a list of all menus, submenus and dishes.
"""
stmt = select(Menu).options(joinedload(Menu.submenus).joinedload(Submenu.dishes))
data = await self.db.execute(stmt)
return jsonable_encoder(data.scalars().unique().all())
@staticmethod
def formate_data(report_data) -> dict:
"""Generate a dict-template to write in xlsx-file.
Parameters:
report_data: JSON-file with all data,
Returns:
template: a dict-template to write in xlsx-file.
"""
generated_date = datetime.now().strftime("%d %B %Y at %H:%M")
timezone = datetime.now().astimezone()
description = f"Report generated {generated_date} ({timezone.tzinfo.__str__()})"
template = {
"A": [description],
"B": [""],
"C": [""],
"D": [""],
"E": [""],
"F": [""],
}
for i, menu in enumerate(report_data, 1):
template["A"].append(str(i))
template["B"].append(menu["title"])
template["C"].append(menu["description"])
template["D"].append("")
template["E"].append("")
template["F"].append("")
for j, submenu in enumerate(menu["submenus"], 1):
template["A"].append("")
template["B"].append(str(j))
template["C"].append(submenu["title"])
template["D"].append(submenu["description"])
template["E"].append("")
template["F"].append("")
for k, dish in enumerate(submenu["dishes"], 1):
template["A"].append("")
template["B"].append("")
template["C"].append(str(k))
template["D"].append(dish["title"])
template["E"].append(dish["description"])
template["F"].append(dish["price"])
return template
@staticmethod
def is_exist(file_path):
return os.path.exists(file_path)
| Aliakseeva/MenuApp | MenuApp/src/services/tasks/report_service.py | report_service.py | py | 2,538 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlalchemy.ext.asyncio.AsyncSession",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.select",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "MenuApp.src.models.Menu",
"line_number": 23,
"usage_type": "argument"
},
{
... |
72908363068 | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from dateutil.relativedelta import relativedelta
from odoo.exceptions import ValidationError, UserError
from datetime import datetime, timedelta
from odoo.http import request
class OpAdmissionRegisterCustom(models.Model):
_inherit = "op.admission.register"
batch_id = fields.Many2one(
'op.batch', 'Term', required=True)
product_id = fields.Many2one(
'product.product', 'Course Fees', required=False,
domain=[('type', '=', 'service')], readonly=True,
states={'draft': [('readonly', False)]}, track_visibility='onchange')
class OpAdmission(models.Model):
_inherit = 'op.admission'
batch_id = fields.Many2one('op.batch', 'Term', domain=[], required=True, readonly=False)
name = fields.Char(
'Name', size=128, required=False, translate=False)
readonly = fields.Boolean(compute="_compute_read_only")
class_id = fields.Many2one('op.classroom', 'Class', required=False)
birth_place = fields.Many2one('res.country.state', 'Birth Place')
payment_option = fields.Selection([('normal', 'Normal'),
('exempted', 'Exempted'),
('haft_scholarship', '50% Scholarship'),
('full_scholarship', '100% Scholarship'),
('installment', 'Installment')], default='normal')
fill_application = fields.Boolean('Fill Application')
marital_status = fields.Selection([('single', 'Single'),
('married', 'Married')])
constrains = fields.Text('Special Wishes')
shoe_size_id = fields.Many2one('pm.shoe.size')
uniform_size_id = fields.Many2one('pm.uniform.size')
shoe_size = fields.Selection([
('xxs', 'XXS'),
('xs', 'XS'),
('s', 'S'),
('m', 'M'),
('l', 'L'),
('xl', 'Xl'),
('xxl', 'XXL'),
], 'Shoe Size')
khmer_name = fields.Char('Name in Khmer')
uniform_size = fields.Selection([
('xxs', 'XXS'),
('xs', 'XS'),
('s', 'S'),
('m', 'M'),
('l', 'L'),
('xl', 'Xl'),
('xxl', 'XXL'),
], 'Uniform Size')
nationality = fields.Many2one('res.country', 'Nationality')
primary_language = fields.Many2one('pm.student.language', string='Other Language')
other_language = fields.Many2many('pm.student.language', string='Other languages', help="Other languages")
english_score = fields.Float('English Score (%)')
high_school_id = fields.Many2one('pm.high_school', 'High School')
highest_education = fields.Selection([('HS', 'High School'),
('BA', 'Bachelor Degree'),
('MA', 'Master Degree'),
('PHD', 'Doctoral Degree')])
working_experience = fields.Text('Working Experience')
job_position = fields.Text('Job Position')
enroll_reason_id = fields.Many2one('pm.enroll_reason', string='Reason to Enroll')
not_enroll_reason_id = fields.Many2one('pm.not_enroll_reason', string='Reason not to Enroll')
current_address = fields.Char('Current Address')
hobby = fields.Char('Hobby')
family_size = fields.Integer('Family Size')
family_status = fields.Selection([('p', 'Poor'),
('n', 'Normal'),
('r', 'Rich')])
campaign_id = fields.Many2one('utm.campaign', 'Campaign')
source_id = fields.Many2one('utm.source', 'Source')
referred = fields.Char('Referred By')
passport_number = fields.Char('Passport Number')
id_card = fields.Char('ID Card')
medical_checkup = fields.Boolean('Medical Check up', default=True)
motivational_letter = fields.Boolean('Motivational Letter')
special_medical = fields.Text('Special Medical Condition')
is_scholarship = fields.Boolean('Scholarship')
lead_id = fields.Integer()
p_street = fields.Char('Street...')
p_street2 = fields.Char('Street...')
p_city = fields.Char('City', size=64)
p_zip = fields.Char('Zip', size=8)
p_state_id = fields.Many2one(
'res.country.state', 'States')
p_country_id = fields.Many2one(
'res.country', 'Country', )
application_number = fields.Char(
'Application Number', copy=False, readonly=True, store=True)
# new fields
application_date = fields.Datetime(
'Application Date', required=True, copy=False,
default=lambda self: fields.Datetime.now())
application_fee = fields.Boolean('Application Fee', required=True, default=True)
scholarship_status = fields.Many2one('pm.scholarship.status', string='Scholarship Status')
status = fields.Selection([('1st_follow_up', '1st follow-up'),
('2nd_follow_up', '2nd follow-up'),
('3rd_follow_up', '3rd follow-up'),
('visited_and_toured', 'Visited & toured academy'),
('live_student', 'Live of a student'),
('pick_up_application', 'Pick up application'),
('submitted_application', 'Submitted application incomplete'),
('schedule_for_interview', 'Schedule for interview'),
('interviewed', 'interviewed'),
('acceptance_letter', 'Acceptance letter issued')])
status_detail = fields.Char('Status detail')
lead_source = fields.Selection([('social_media', 'Social media'),
('facebook', 'Facebook'),
('website', 'Website'),
('school_visit', 'School Visit'),
('acac_student', 'By ACAC student'),
('friend', 'Friend'),
('school_councelor', 'School councelor'),
('family', 'Family'),
('open_day', 'Open day'),
('fair_exhibition', 'Fair/exhibition'),
('nea', 'NEA'),
('other', 'other')])
lead_participation = fields.One2many('pm.lead.participation',
inverse_name='admission_id',
string='Participation',
help="Participation")
additional_source = fields.Char('Additional Source Info')
parents = fields.Char('Parents')
siblings = fields.Integer('Siblings')
other_depends = fields.Char('Other dependents')
application_form = fields.Boolean('Application Form', default=True, required=True)
pictures = fields.Boolean('Pictures')
schooling_year = fields.Char('No. Schooling years')
lead_educational_achievement = fields.One2many('pm.lead.educational.achievement',
inverse_name="admission_id",
string='Educational Achievements',
help="Educational Achievements")
lead_working_experience = fields.One2many('pm.lead.working.experience',
inverse_name="admission_id",
string='Working Experience',
help="Working Experience")
contact_name = fields.Many2one('res.partner', string='Emergency Contact')
email_from = fields.Char('Email', help="Email address of the contact", tracking=40, index=True)
user_id = fields.Many2one('res.users', string='ACAC Contact', index=True, tracking=True,
default=lambda self: self.env.user)
acac_contact = fields.Char('ACAC Contact')
scholar_application = fields.Boolean('Scholar Application')
financial_status = fields.Boolean('Proof of Financial Status')
family_income = fields.Float('Source of Family income')
rank = fields.Selection([('first_contact', 'First Contact'),
('potential', 'Potential'),
('high_potential', 'High Potential')])
facebook = fields.Char('Facebook')
phone = fields.Char('Mobile 1')
admission_url = fields.Char('Link', compute="_compute_admission_url", store=True)
visa_number = fields.Char('Visa Number')
visa_expiry = fields.Date('Expiry Date')
product_id = fields.Many2one(
'product.product', 'Course Fees', required=False,
domain=[('type', '=', 'service')],track_visibility='onchange')
@api.depends('state')
def _compute_read_only(self):
for rec in self:
if rec.state == 'done':
rec.readonly = True
else:
rec.readonly = False
@api.onchange('register_id')
def onchange_register(self):
print('gege')
print(self.register_id.batch_id)
self.course_id = self.register_id.course_id
self.batch_id = self.register_id.batch_id
print(self.course_id)
print(self.batch_id)
@api.onchange('course_id')
def onchange_course(self):
# self.batch_id = False
term_id = False
if self.course_id and self.course_id.fees_term_id:
term_id = self.course_id.fees_term_id.id
self.fees_term_id = term_id
@api.onchange('product_id')
def onchange_product(self):
print('gaga')
self.fees = self.product_id.lst_price
@api.depends('name')
def _compute_admission_url(self):
for record in self:
base_url = request.env['ir.config_parameter'].get_param('web.base.url')
base_url += '/web#id=%d&view_type=form&model=op.admission' % (record.id)
record.admission_url = base_url
def submit_form(self):
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('pm_admission', 'student_admission_submission')[1]
except ValueError:
template_id = False
self.env['mail.template'].browse(template_id).send_mail(self.id, force_send=True)
self.state = 'submit'
action = self.env.ref("crm.crm_lead_all_leads").read()[0]
return action
def confirm_in_progress(self):
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('pm_admission', 'student_payment_confirm')[1]
except ValueError:
template_id = False
self.env['mail.template'].browse(template_id).send_mail(self.id, force_send=True)
self.state = 'confirm'
action = self.env.ref("crm.crm_lead_all_leads").read()[0]
return action
def admission_confirm(self):
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('pm_admission', 'student_admission_confirm')[1]
except ValueError:
template_id = False
self.env['mail.template'].browse(template_id).send_mail(self.id, force_send=True)
self.state = 'admission'
action = self.env.ref("crm.crm_lead_all_leads").read()[0]
return action
def confirm_cancel(self):
lead = self.env['crm.lead'].browse(self.lead_id)
lead.type = 'lead'
self.unlink()
action = self.env.ref("crm.crm_lead_all_leads").read()[0]
return action
@api.onchange('student_id')
def onchange_student_id(self):
student = self.env['op.student'].search(
[('id', '=', self.student_id.id)])
print(student)
if self.student_id and self.is_student:
self['prev_course_id'] = student['prev_course_id']
self['high_school_id'] = student['high_school_id']
self['english_score'] = student['english_score']
# additional information
self['khmer_name'] = student['khmer_name']
self['id_card'] = student['id_card']
self['passport_number'] = student['passport_number']
self['marital_status'] = student['marital_status']
self['nationality'] = student['nationality']
self['primary_language'] = student['primary_language']
self['other_language'] = student['other_language']
self['shoe_size'] = student['shoe_size']
self['uniform_size'] = student['uniform_size']
self['job_position'] = student['job_position']
self['working_experience'] = student['working_experience']
self['constrains'] = student['constrains']
self['hobby'] = student['hobby']
self['facebook'] = student['facebook']
self['visa_number'] = student['visa_number']
self['visa_expiry'] = student['visa_expiry']
self['image'] = student['image_1920']
# family info
self['family_status'] = student['family_status']
self['family_business'] = student['family_business']
self['family_income'] = student['family_income']
self['family_size'] = student['family_size']
#
self['campaign_id'] = student['campaign_id']
self['source_id'] = student['source_id']
self['referred'] = student['referred']
# Extra
self['medical_checkup'] = student['medical_checkup']
self['special_medical'] = student['special_medical']
self['motivational_letter'] = student['motivational_letter']
@api.onchange('is_student')
def onchange_is_student(self):
if not self.is_student:
self['prev_course_id'] = False
self['high_school_id'] = False
self['english_score'] = False
# additional information
self['khmer_name'] = False
self['id_card'] = False
self['passport_number'] = False
self['marital_status'] = False
self['nationality'] = False
self['primary_language'] = False
self['other_language'] = False
self['shoe_size'] = False
self['uniform_size'] = False
self['job_position'] = False
self['working_experience'] = False
self['constrains'] = False
self['hobby'] = False
self['facebook'] = False
self['visa_number'] = False
self['visa_expiry'] = False
# family info
self['family_status'] = False
self['family_business'] = False
self['family_income'] = False
self['family_size'] = False
#
self['campaign_id'] = False
self['source_id'] = False
self['referred'] = False
# Extra
self['medical_checkup'] = False
self['special_medical'] = False
self['motivational_letter'] = False
@api.model
def create(self, val):
student = self.env['op.student'].search(
[('id', '=', self.student_id.id)])
if self.student_id and self.is_student:
self['prev_course_id'] = student['prev_course_id']
self['high_school_id'] = student['high_school_id']
self['english_score'] = student['english_score']
# additional information
self['khmer_name'] = student['khmer_name']
self['id_card'] = student['id_card']
self['passport_number'] = student['passport_number']
self['marital_status'] = student['marital_status']
self['nationality'] = student['nationality']
self['primary_language'] = student['primary_language']
self['other_language'] = student['other_language']
self['shoe_size'] = student['shoe_size']
self['uniform_size'] = student['uniform_size']
self['job_position'] = student['job_position']
self['working_experience'] = student['working_experience']
self['constrains'] = student['constrains']
self['hobby'] = student['hobby']
self['facebook'] = student['facebook']
self['visa_number'] = student['visa_number']
self['visa_expiry'] = student['visa_expiry']
# family info
self['family_status'] = student['family_status']
self['family_business'] = student['family_business']
self['family_income'] = student['family_income']
self['family_size'] = student['family_size']
#
self['campaign_id'] = student['campaign_id']
self['source_id'] = student['source_id']
self['referred'] = student['referred']
# Extra
self['medical_checkup'] = student['medical_checkup']
self['special_medical'] = student['special_medical']
self['motivational_letter'] = student['motivational_letter']
@api.onchange('is_student')
def onchange_is_student(self):
if not self.is_student:
self['prev_course_id'] = False
self['high_school_id'] = False
self['english_score'] = False
# additional information
self['khmer_name'] = False
self['id_card'] = False
self['passport_number'] = False
self['marital_status'] = False
self['nationality'] = False
self['primary_language'] = False
self['other_language'] = False
self['shoe_size'] = False
self['uniform_size'] = False
self['job_position'] = False
self['working_experience'] = False
self['constrains'] = False
self['hobby'] = False
# family info
self['family_status'] = False
self['family_business'] = False
self['family_income'] = False
self['family_size'] = False
#
self['campaign_id'] = False
self['source_id'] = False
self['referred'] = False
# Extra
self['medical_checkup'] = False
self['special_medical'] = False
self['motivational_letter'] = False
# @api.onchange('batch_id')
# def onchange_batch_id(self):
# if self.batch_id and self.batch_id.state != 'active':
# msg = 'The selected term is not active: (%s) state: (%s)' % (self.batch_id.name,
# self.batch_id.state)
# raise ValidationError(_(msg))
@api.model
def create(self, val):
print('=====batch=====')
print(val['batch_id'])
if val['batch_id']:
print('hit 1')
batch = self.env['op.batch'].browse(val['batch_id'])
if batch.state != 'active':
print('hit 2')
msg = 'The selected term is not active:- (%s)' % (
batch.name)
raise ValidationError(_(msg))
lead_id = val.get('lead_id')
if lead_id:
lead_ref = self.env['crm.lead'].browse(lead_id)
lead_ref.type = "admission"
res = super(OpAdmission, self).create(val)
attachment = self.env['ir.attachment'].search([('res_model', '=', 'crm.lead'), ('res_id', '=', lead_id)])
if attachment:
for att in attachment:
att.write({
'res_model': 'op.admission',
'res_id': res.id
})
return res
def enroll_student(self):
for record in self:
messages = ''
if not record.class_id:
messages += 'Class | '
if not record.contact_name:
messages += 'Emergency Contact | '
if len(messages):
notification = {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'Please fill in the following fields:',
'message': _(messages),
'type': 'danger', # types: success,warning,danger,info
'sticky': True, # True/False will display for few seconds if false
},
}
return notification
if record.register_id.max_count:
total_admission = self.env['op.admission'].search_count(
[('register_id', '=', record.register_id.id),
('state', '=', 'done')])
if not total_admission < record.register_id.max_count:
msg = 'Max Admission In Admission Register :- (%s)' % (
record.register_id.max_count)
raise ValidationError(_(msg))
if not record.student_id:
vals = record.get_student_vals()
record.partner_id = vals.get('partner_id')
record.student_id = student_id = self.env[
'op.student'].create(vals).id
else:
record.student_id.course_detail_ids.p_active = False
student_id = record.student_id.id
record.student_id.write({
'course_detail_ids': [[0, False, {
'course_id':
record.course_id and record.course_id.id or False,
'batch_id':
record.batch_id and record.batch_id.id or False,
'p_active': True,
}]],
})
attachment = self.env['ir.attachment'].search([('res_model', '=', 'op.admission'), ('res_id', '=', record.id)])
print(attachment)
if attachment:
for att in attachment:
attchment_clone = att.copy()
print('******')
print(attchment_clone)
attchment_clone.write({
'res_model': 'op.student',
'res_id': student_id
})
print('true true')
if record.fees_term_id:
val = []
product = self.env['product.product'].search([('barcode', '=', '168@168')])
print('...........product............')
product_id = product.id
for line in record.fees_term_id.line_ids:
no_days = line.due_days
no_alert_days = no_days - 7
state = 'draft'
price = line.total
print(price)
amount = price
date = (datetime.today() + relativedelta(
days=no_days)).date()
alert_date = (datetime.today() + relativedelta(
days=no_alert_days)).date()
dict_val = {
'semester': line.semester,
'fees_line_id': line.id,
'amount': amount,
'date': date,
'alert_date': alert_date,
'product_id': product_id,
'state': state,
}
val.append([0, False, dict_val])
print(val)
record.student_id.write({
'fees_detail_ids': val
})
record.write({
'nbr': 1,
'state': 'done',
'admission_date': fields.Date.today(),
'student_id': student_id,
'is_student': True,
})
def get_student_vals(self):
for student in self:
langs = [[6, False, student.other_language.mapped('id')]]
educat = [[6, False, student.lead_educational_achievement.mapped('id')]]
working = [[6, False, student.lead_working_experience.mapped('id')]]
partition = [[6, False, student.lead_participation.mapped('id')]]
student_user = self.env['res.users'].with_context(no_reset_password=False).create({
'name': student.name,
'login': student.email,
'image_1920': self.image or False,
'is_student': True,
'company_id': self.env.ref('base.main_company').id,
'groups_id': [
(6, 0,
[self.env.ref('base.group_portal').id])]
})
details = {
'phone': student.phone,
'mobile': student.mobile,
'email': student.email,
'street': student.street,
'street2': student.street2,
'city': student.city,
'country_id':
student.country_id and student.country_id.id or False,
'state_id': student.state_id and student.state_id.id or False,
# 'image_1920': student.image,
'zip': student.zip
}
student_user.partner_id.write(details)
student_user.with_context(create_user=True).action_reset_password()
details.update({
'title': student.title and student.title.id or False,
'first_name': student.first_name,
'birth_place': student.birth_place.id,
'middle_name': student.middle_name,
'khmer_name': student.khmer_name,
'last_name': student.last_name,
'birth_date': student.birth_date,
'gender': student.gender,
# 'image_1920': student.image or False,
'course_detail_ids': [[0, False, {
'course_id':
student.course_id and student.course_id.id or False,
'batch_id':
student.batch_id and student.batch_id.id or False,
'class_ids': [[6, 0, [student.class_id.id]]],
}]],
'user_id': student_user.id,
'partner_id': student_user.partner_id.id,
'batch_id':
student.batch_id and student.batch_id.id or False,
'fill_application': student.marital_status,
'marital_status': student.marital_status,
'constrains': student.constrains,
'shoe_size': student.shoe_size,
'shoe_size_id': student.shoe_size_id.id,
'uniform_size': student.uniform_size,
'uniform_size_id': student.uniform_size_id.id,
'primary_language': student.primary_language.id,
'other_language': langs,
'english_score': student.english_score,
'highest_education': student.highest_education,
'working_experience': student.working_experience,
'job_position': student.job_position,
'current_address': student.current_address,
'hobby': student.hobby,
'family_size': student.family_size,
'family_status': student.family_status,
'passport_number': student.passport_number,
'id_card': student.id_card,
'campaign_id': student.campaign_id.id,
'source_id': student.source_id.id,
'referred': student.referred,
'medical_checkup': student.medical_checkup,
'is_scholarship': student.is_scholarship,
'scholarship_status': student.scholarship_status.id,
'motivational_letter': student.motivational_letter,
'special_medical': student.special_medical,
'enroll_reason_id': student.enroll_reason_id.id,
'high_school_id': student.high_school_id.id,
'facebook': student.facebook,
'visa_number': student.visa_number,
'visa_expiry': student.visa_expiry,
'nationality': student.nationality.id,
'rank': student.rank,
'status_detail': student.status_detail,
'lead_source': student.lead_source,
'additional_source': student.additional_source,
'parents': student.parents,
'siblings': student.siblings,
'other_depends': student.other_depends,
'application_form': student.application_form,
'pictures': student.pictures,
'schooling_year': student.schooling_year,
'lead_educational_achievement': educat,
'lead_working_experience': working,
'lead_participation': partition,
'family_income': student.family_income,
'scholar_application': student.scholar_application,
'financial_status': student.financial_status,
'contact_name': student.contact_name.id,
'application_fee': student.application_fee,
'p_street': student.p_street,
'p_street2': student.p_street2,
'p_city': student.p_city,
'p_zip': student.p_zip,
'p_state_id': student.p_state_id.id,
'p_country_id': student.p_country_id.id,
})
print('*&(7e2132')
print(details)
return details
def write(self, vals):
# Temporarily fixing image issue when update a record
if 'image' in vals and vals['image']:
self.env.cr.execute("""DELETE FROM ir_attachment WHERE res_model = '%s' AND res_field = '%s' AND res_id = %d""" % (self._name, 'image', self.id))
return super(OpAdmission, self).write(vals) | mrrtmob/odoo_acac | local-addon/pm_admission/models/pm_admission.py | pm_admission.py | py | 30,867 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "odoo.models.Model",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Many2one",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
... |
41119379113 | import random
random.seed(1)
import numpy as np
np.random.seed(1)
import tensorflow.compat.v1 as tf
tf.random.set_random_seed(1)
import gym
import os
tf.disable_v2_behavior()
env = gym.make('CartPole-v1')
class PolicyNetwork:
def __init__(self, state_size, action_size, learning_rate, name='policy_network'):
self.state_size = state_size
self.action_size = action_size
self.learning_rate = learning_rate
with tf.variable_scope(name):
self.state = tf.placeholder(tf.float32, [None, self.state_size], name="state")
self.action = tf.placeholder(tf.int32, [self.action_size], name="action")
self.td_error = tf.placeholder(tf.float32, name="td_error")
self.I = tf.placeholder(tf.float32, name="I")
self.W1 = tf.get_variable("W1", [self.state_size, 12], initializer=tf.keras.initializers.glorot_normal(seed=0))
self.b1 = tf.get_variable("b1", [12], initializer=tf.zeros_initializer())
self.W2 = tf.get_variable("W2", [12, self.action_size], initializer=tf.keras.initializers.glorot_normal(seed=0))
self.b2 = tf.get_variable("b2", [self.action_size], initializer=tf.zeros_initializer())
self.Z1 = tf.add(tf.matmul(self.state, self.W1), self.b1)
self.A1 = tf.nn.relu(self.Z1)
self.output = tf.add(tf.matmul(self.A1, self.W2), self.b2)
# Softmax probability distribution over actions
self.actions_distribution = tf.squeeze(tf.nn.softmax(self.output))
# Loss with negative log probability
self.neg_log_prob = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output, labels=self.action)
self.loss = tf.reduce_mean(self.I * self.neg_log_prob * self.td_error)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss)
class ValueNetwork:
def __init__(self, state_size, learning_rate, name='value_network'):
self.state_size = state_size
self.learning_rate = learning_rate
with tf.variable_scope(name):
self.state = tf.placeholder(tf.float32, [None, self.state_size], name="state")
self.td_error = tf.placeholder(tf.float32, name='td_error')
self.I = tf.placeholder(tf.float32, name="I")
self.W1 = tf.get_variable("W1", [self.state_size, 256], initializer=tf.keras.initializers.glorot_normal(seed=0))
self.b1 = tf.get_variable("b1", [256], initializer=tf.zeros_initializer())
self.W2 = tf.get_variable("W2", [256, 64], initializer=tf.keras.initializers.glorot_normal(seed=0))
self.b2 = tf.get_variable("b2", [64], initializer=tf.zeros_initializer())
self.W3 = tf.get_variable("W3", [64, 1], initializer=tf.keras.initializers.glorot_normal(seed=0))
self.b3 = tf.get_variable("b3", [1], initializer=tf.zeros_initializer())
self.Z1 = tf.add(tf.matmul(self.state, self.W1), self.b1)
self.A1 = tf.nn.relu(self.Z1)
self.Z2 = tf.add(tf.matmul(self.A1, self.W2), self.b2)
self.A2 = tf.nn.relu(self.Z2)
self.output = tf.add(tf.matmul(self.A2, self.W3), self.b3)
self.loss = tf.reduce_mean(-self.I * self.output * self.td_error)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
# Define hyperparameters
state_size = 4
action_size = env.action_space.n
max_episodes = 5000
max_steps = 501
discount_factor = 0.99
critic_learning_rate = 0.002
actor_learning_rate = 0.0004
render = False
# Initialize the actor network
tf.reset_default_graph()
actor = PolicyNetwork(state_size, action_size, actor_learning_rate)
critic = ValueNetwork(state_size, critic_learning_rate)
# tensorboard logs
actor_loss_placeholder = tf.compat.v1.placeholder(tf.float32)
tf.compat.v1.summary.scalar(name="policy_losses", tensor=actor_loss_placeholder)
critic_loss_placeholder = tf.compat.v1.placeholder(tf.float32)
tf.compat.v1.summary.scalar(name="value_losses", tensor=actor_loss_placeholder)
reward_placeholder = tf.compat.v1.placeholder(tf.float32)
tf.compat.v1.summary.scalar(name="reward", tensor=reward_placeholder)
avg_reward_placeholder = tf.compat.v1.placeholder(tf.float32)
tf.compat.v1.summary.scalar(name="avg_reward", tensor=avg_reward_placeholder)
log_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs')
if not os.path.isdir(log_path):
os.mkdir(log_path)
writer = tf.compat.v1.summary.FileWriter(log_path)
summaries = tf.compat.v1.summary.merge_all()
print('saving logs to: %s' % log_path)
# Start training the agent with REINFORCE algorithm
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
solved = False
episode_rewards = np.zeros(max_episodes)
average_rewards = 0.0
episode_critic_loss = []
episode_actor_loss = []
for episode in range(max_episodes):
state = env.reset()
# state = np.concatenate([state, np.asarray([0])])
state = state.reshape([1, state_size])
episode_transitions = []
I=1
for step in range(max_steps):
value = sess.run(critic.output, {critic.state: state})
actions_distribution = sess.run(actor.actions_distribution, {actor.state: state})
action = np.random.choice(np.arange(len(actions_distribution)), p=actions_distribution)
next_state, reward, done, _ = env.step(action)
# next_state = np.concatenate([next_state, np.asarray([(step + 1) / max_steps])])
next_state = next_state.reshape([1, state_size])
next_value = sess.run(critic.output, {critic.state: next_state}) if not done else 0
if render:
env.render()
action_one_hot = np.zeros(action_size)
action_one_hot[action] = 1
episode_rewards[episode] += reward
target = reward + discount_factor * next_value
td_error = target - value
value_feed_dict = {critic.state: state, critic.td_error: td_error, critic.I: I}
_, critic_loss = sess.run([critic.optimizer, critic.loss], value_feed_dict)
policy_feed_dict = {actor.state: state, actor.td_error: td_error, actor.action: action_one_hot,actor.I: I}
_, actor_loss = sess.run([actor.optimizer, actor.loss], policy_feed_dict)
state = next_state
episode_critic_loss.append(critic_loss)
episode_actor_loss.append(actor_loss)
if done:
if episode > 98:
# Check if solved
average_rewards = np.mean(episode_rewards[(episode - 99):episode+1])
print("Episode {} Reward: {} Average over 100 episodes: {}".format(episode, episode_rewards[episode], round(average_rewards, 2)))
if average_rewards > 475:
print(' Solved at episode: ' + str(episode))
solved = True
break
I = I * discount_factor
if solved:
break
avg_actor_loss = np.mean(episode_actor_loss)
avg_critic_loss = np.mean(episode_critic_loss)
summery = sess.run(summaries, feed_dict={actor_loss_placeholder: avg_actor_loss,
critic_loss_placeholder: avg_critic_loss,
reward_placeholder: episode_rewards[episode],
avg_reward_placeholder: average_rewards if episode > 98 else 0})
writer.add_summary(summery, global_step=episode)
| eladfeld/DRL_hw | hw2/actor_critic.py | actor_critic.py | py | 7,669 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.seed",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.ran... |
14875088196 | import bpy
import bmesh
import sys
import time
import argparse
# blender -b -P Resize.py -- --height 0.8 --inm Objects/Bed.obj --outm oBed2.obj
def get_args():
parser = argparse.ArgumentParser()
# get all script args
_, all_arguments = parser.parse_known_args()
double_dash_index = all_arguments.index('--')
script_args = all_arguments[double_dash_index + 1: ]
# add parser rules
# add parser rules
parser.add_argument('-hei', '--height', help="Final Height Dimension")
parser.add_argument('-in', '--inm', help="Original Model")
parser.add_argument('-out', '--outm', help="Rescaled output file")
parsed_script_args, _ = parser.parse_known_args(script_args)
return parsed_script_args
args = get_args()
height = float(args.height)
print(height)
input_model = str(args.inm)
print(input_model)
output_model = str(args.outm)
print(output_model)
print('\n Clearing blender scene (default garbage...)')
# deselect all
bpy.ops.object.select_all(action='DESELECT')
print('\n Beginning the process of import & export using Blender Python API ...')
bpy.ops.import_scene.obj(filepath=input_model)
print('\n Obj file imported successfully ...')
### just imported obj
print('\n Starting Resize...')
print('\n Z Dimension of the object is')
for o in bpy.data.objects:
if o.type == 'MESH':
z=o.dimensions.z
#x= bpy.data.objects[0].dimensions.x
#y=bpy.data.objects[0].dimensions.y
#z=bpy.data.objects[0].dimensions.z
# Resize the object
newscale=1
print(z)
if z != 0 :
newscale= height/z
bpy.ops.transform.resize(value=(newscale,newscale,newscale))
print('\n new scale is',newscale ,'\n')
#bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY')
bpy.ops.export_scene.obj(filepath=output_model)
print('\n Ending Resize...')
| Niloofar-didar/AR-Realtime-Decimation-main | eAR-offline_modeling/Resize.py | Resize.py | py | 1,784 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bpy.ops.object.select_all",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "bpy.op... |
18298344467 | import argparse
import socket
import struct
import codecs
import dns.resolver
import dns.message
import dns.query
import base64
from aes import aes
# Address of the DNS server
#dns_server = "8.8.8.8"
# DNS query message format
#dns_query = struct.pack("!6H", 0x1234, 1, 1, 0, 0, 0) + b"\x03foo\x03bar\x00\x00\x01\x00\x01"
def encrypt_message(message, key):
# Pad the message to a multiple of 16 bytes
message = message + b' ' * (16 - len(message) % 16)
# Create an AES cipher object and encrypt the message
cipher = aes.new(key, aes.MODE_ECB)
encrypted_message = cipher.encrypt(message)
# Return the base64 encoded encrypted message
return base64.b64encode(encrypted_message).decode()
def encode_message(message, key):
# Encrypt the message using AES encryption
encrypted_message = encrypt_message(message.encode(), key)
# Convert the encrypted message into the format described in the specifications
encoded_message = ''
for char in encrypted_message:
encoded_message += str(ord(char) * 2) + '.'
# Return the encoded message
return encoded_message.rstrip('.')
#def encode_message(message):
# # Map of characters to binary
# mapping = {chr(97 + i): format(i, '05b') for i in range(26)}
# mapping['EOF'] = '11111'
#
# # Encode message as binary
# message = ''.join(mapping[c] for c in message)
#
# # Split message into 10-bit chunks
# message = [message[i:i + 10] for i in range(0, len(message), 10)]
#
# # Convert 10-bit chunks to integer values
# message = [int(chunk, 2) for chunk in message]
#
# return message
#
def decode_message(encoded_message):
# Split the encoded message into individual values
values = encoded_message.split('.')
# Convert the values back into characters
decoded_message = ''
for value in values:
decoded_message += chr(int(value) // 2)
# Decrypt the message using AES encryption
decrypted_message = decrypt_message(decoded_message.encode(), key)
# Return the decrypted message
return decrypted_message.rstrip()
def decrypt_message(encrypted_message, key):
# Decode the base64 encoded encrypted message
encrypted_message = base64.b64decode(encrypted_message)
# Create an AES cipher object and decrypt the message
cipher = aes.new(key, aes.MODE_ECB)
decrypted_message = cipher.decrypt(encrypted_message)
# Return the decrypted message
return decrypted_message.rstrip()
def send_payload_to_target(message, domain, source):
mapping = {'00000': 'a', '00001': 'b', '00010': 'c', '00011': 'd',
'00100': 'e', '00101': 'f', '00110': 'g', '00111': 'h',
'01000': 'i', '01001': 'j', '01010': 'k', '01011': 'l',
'01100': 'm', '01101': 'n', '01110': 'o', '01111': 'p',
'10000': 'q', '10001': 'r', '10010': 's', '10011': 't',
'10100': 'u', '10101': 'v', '10110': 'w', '10111': 'x',
'11000': 'y', '11001': 'z', '11011': '0', '11100': '1',
'11101': '2', '11110': '3', '11111': '4'}
# Check if message is a string
if not isinstance(message, str):
raise ValueError("Message must be a string")
# Check if message contains only lowercase letters and numbers
for char in message:
if char not in mapping.values():
raise ValueError("Message must contain only lowercase letters and numbers")
# Convert message to binary
message = ''.join(format(ord(char) - ord('a'), '05b') for char in message)
# Pad message with EOF character to make its length a multiple of 10
message += '11011' * (10 - len(message) % 10)
# Multiply binary values by 5 to obtain larger TTL values
message = ''.join(format(int(char, 2) * 5, '05b') for char in message)
# Split data into 10-bit chunks
chunks = [message[i:i+10] for i in range(0, len(message), 10)]
# Convert 10-bit chunks to integer values
chunks = [int(chunk, 2) for chunk in chunks]
# Send DNS requests with TTL values
for chunk in chunks:
request = dns.message.make_query(domain, dns.rdatatype.A)
response = dns.query.udp(request, source, timeout=1)
if response.rcode() != dns.rcode.NOERROR:
raise Exception("DNS query failed")
ttl = response.answer[0].ttl
if ttl != chunk:
raise Exception("Unexpected TTL value")
return True
# Function to decode the covert message from the DNS reply
#def decode_message(data):
# # Map of binary to characters
# mapping = {format(i, '05b'): chr(97 + i) for i in range(26)}
# mapping['11111'] = 'EOF'
#
# # Split data into 10-bit chunks
# chunks = [data[i:i + 10] for i in range(0, len(data), 10)]
#
# # Convert 10-bit chunks to integer values
# chunks = [int(chunk, 2) for chunk in chunks]
#
# # Divide integer values by 5 to obtain original message
# chunks = [chunk // 5 for chunk in chunks]
#
# # Convert integer values to binary
# chunks = [format(chunk, '05b') for chunk in chunks]
#
# # Join binary values to form the message
# message = ''.join(chunks)
#
# # Split message into character codes
# message = [message[i:i + 5] for i in range(0, len(message), 5)]
#
# # Convert character codes to characters
# message = ''.join(mapping[code] for code in message)
#
# return message
def dns_spoof(target, source_ip, source_port, payload, aes_key=None):
try:
# Encode the message using the text only scheme
encoded_message = ''
for char in payload:
encoded_message += str((ord(char) - 97) * 26 ** 2)
# AES encryption implementation here
encrypted_message = encrypt_message(aes_key, encoded_message) if aes_key else encoded_message
# Construct the DNS packet
packet = b''
packet += struct.pack("!H", 0x1234) # Transaction ID
packet += struct.pack("!H", 0x0100) # Flags
packet += struct.pack("!H", 1) # Questions
packet += struct.pack("!H", 0) # Answer RRs
packet += struct.pack("!H", 0) # Authority RRs
packet += struct.pack("!H", 0) # Additional RRs
packet += b'\x03\x77\x77\x77\x06\x67\x6f\x6f\x67\x6c\x65\x03\x63\x6f\x6d\x00' # Domain name
packet += struct.pack("!H", 0x0001) # Query type
packet += struct.pack("!H", 0x0001) # Query class
# Split the message into 4 character segments
message_segments = [encrypted_message[i:i+4] for i in range(0, len(encrypted_message), 4)]
# Encode the message segments into TTL values
ttl_values = []
for segment in message_segments:
ttl = 0
for char in segment:
ttl = ttl * 26 + ord(char) - 97
ttl_values.append(ttl * 5)
# Add the TTL values to the packet as answers
for ttl in ttl_values:
packet += b'\xc0\x0c' # Pointer to domain name
packet += struct.pack("!H", 0x0001) # Query type
packet += struct.pack("!H", 0x0001) # Query class
packet += struct.pack("!I", ttl) # TTL
# Create a raw socket
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
# Set the source IP and source port for spoofing
s.bind((source_ip, source_port))
# Send the packet
s.sendto(packet, (target, 53))
# Passive listening for a reply
response, addr = s.recvfrom(1024)
# Verify that the reply is from the expected target
if addr[0] == target:
# Extract the TTL values from the response
ttl_values = []
for i in range(len(response)):
if response[i:i+2] == b'\x00\x01':
ttl = struct.unpack("!I", response[i+10:i+14])[0]
except socket.error as e:
print(f"Error: {e}")
finally:
s.close()
def parse_arguments():
parser = argparse.ArgumentParser(description='Send payload over a covert DNS channel.')
parser.add_argument('payload', type=str, help='The message to send.')
parser.add_argument('target', type=str, help='The target to send the message to.')
parser.add_argument('source', type=str, help='The true client to receive the message')
parser.add_argument('-s', '--spoof', dest='spoof', action='store_true', help='Spoof the source address on the request.')
parser.add_argument('--key', type=str, default='1234567890abcdef', help='Encryption key')
return parser.parse_args()
#python covert_channel_client.py <payload> <target> [--key <key>]
if __name__ == '__main__':
args = parse_arguments()
payload = args.payload
target = args.target
source = args.source
key = args.key
spoof = args.spoof
if spoof:
print("Spoofing address on request...")
dns_spoof(target, spoof, 53, payload, key)
# Encode the payload
encoded_payload = encode_message(payload, key)
# Send the encoded payload to the target domain
send_payload_to_target(encoded_payload, target, source)
| unicycling-amphibian/CovertDNS | DNSCovert_Client.py | DNSCovert_Client.py | py | 9,310 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "aes.aes.new",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "aes.aes",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "aes.aes.MODE_ECB",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "base64.b64encode",
"lin... |
19052604441 | import json
import snappy
from structlog import get_logger
from jwcrypto.common import base64url_decode
from app.data_model.app_models import QuestionnaireState
from app.storage import data_access
from app.storage.storage_encryption import StorageEncryption
logger = get_logger()
class EncryptedQuestionnaireStorage:
def __init__(self, user_id, user_ik, pepper, stateless_updates_enabled=False):
if user_id is None:
raise ValueError('User id must be set')
self._user_id = user_id
self.encrypter = StorageEncryption(user_id, user_ik, pepper)
self.stateless_updates_enabled = stateless_updates_enabled
def add_or_update(self, data, version):
compressed_data = snappy.compress(data)
encrypted_data = self.encrypter.encrypt_data(compressed_data)
if self.stateless_updates_enabled:
logger.debug('saving questionnaire data', user_id=self._user_id)
questionnaire_state = QuestionnaireState(self._user_id, encrypted_data, version)
else:
questionnaire_state = self._find_questionnaire_state()
if questionnaire_state:
logger.debug('updating questionnaire data', user_id=self._user_id)
questionnaire_state.state_data = encrypted_data
questionnaire_state.version = version
else:
logger.debug('creating questionnaire data', user_id=self._user_id)
questionnaire_state = QuestionnaireState(self._user_id, encrypted_data, version)
data_access.put(questionnaire_state)
def get_user_data(self):
questionnaire_state = self._find_questionnaire_state()
if questionnaire_state:
version = questionnaire_state.version or 0
try:
# legacy data was stored in a dict, base64-encoded, and not compressed
data = json.loads(questionnaire_state.state_data)['data']
is_legacy_data = True
except ValueError:
data = questionnaire_state.state_data
is_legacy_data = False
decrypted_data = self.encrypter.decrypt_data(data)
if is_legacy_data:
decrypted_data = base64url_decode(decrypted_data.decode()).decode()
else:
decrypted_data = snappy.uncompress(decrypted_data).decode()
return decrypted_data, version
return None, None
def delete(self):
logger.debug('deleting users data', user_id=self._user_id)
questionnaire_state = self._find_questionnaire_state()
if questionnaire_state:
data_access.delete(questionnaire_state)
def _find_questionnaire_state(self):
logger.debug('getting questionnaire data', user_id=self._user_id)
return data_access.get_by_key(QuestionnaireState, self._user_id)
| ONSdigital/census-survey-runner | app/storage/encrypted_questionnaire_storage.py | encrypted_questionnaire_storage.py | py | 2,887 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "structlog.get_logger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "app.storage.storage_encryption.StorageEncryption",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "snappy.compress",
"line_number": 25,
"usage_type": "call"
},
{
... |
36181808903 | """
gradient descent 연습
"""
import matplotlib.pyplot as plt
from scratch08.ex01 import difference_quotient, tangent, move
def g(x):
"""y = (1/3)x**3 - x"""
return x ** 3 / 3 - x
if __name__ == '__main__':
# ex01에서 작성한 함수를 이용, 함수 g(x)의 그래프를 그림
# 극값(local 최소/최대)를 경사 하강법으로 찾음
xs = [x / 10 for x in range(-30, 31)]
ys = [g(x) for x in xs]
plt.plot(xs, ys)
plt.axhline(y=0, color='0.3')
plt.axvline(x=0, color='0.3')
plt.axvline(x=-1, color='0.75')
plt.axvline(x=1, color='0.75')
plt.ylim(bottom=-2, top=2)
x_init = 1.9
tolerance = 0.00001
count = 0
while True:
count += 1
gradient = difference_quotient(g, x_init, 0.0001)
x_next = move(x_init, gradient, -0.1)
print(f'{count} x: {x_next}')
# ys_next = [tangent(x, gradient, x_next, g(x_next)) for x in xs]
# plt.plot(xs, ys_next)
if abs(x_init - x_next) < tolerance:
break
else:
x_init = x_next
x_init = -1.9
count = 0
while True:
count += 1
gradient = difference_quotient(g, x_init, 0.0001)
x_next = move(x_init, gradient, 0.1)
print(f'{count} x: {x_next}')
# ys_next = [tangent(x, gradient, x_next, g(x_next)) for x in xs]
# plt.plot(xs, ys_next)
if abs(x_init - x_next) < tolerance:
break
else:
x_init = x_next
plt.show()
| lee-saint/lab-python | scratch08/ex02.py | ex02.py | py | 1,510 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "ma... |
14349515929 | import numpy as np
# import packages
from PIL import Image
import pytesseract
import argparse
import cv2
import os
import re
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("--image", required=True,
help="path to input image to be OCR'd")
ap.add_argument("-p", "--preprocess", type=str, default="thresh",
help="type of preprocessing to be done")
args = vars(ap.parse_args())
# load the example image and convert it to grayscale
image = cv2.imread(args["image"])
gray = image
#gray = gray.resize((500, 500 * height / width), Image.ANTIALIAS)
if args["preprocess"] == "thresh":
gray = cv2.threshold(gray, 0, 255,
cv2.THRESH_TOZERO)[1]
elif args["preprocess"] == "blur":
gray = cv2.medianBlur(gray, 3)
kernel = np.array([[0,-3,-3],
[-1, 14,-1],
[-2,1,-2]])
gray = cv2.filter2D(gray, -1, kernel)
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
#gray = cv2.threshold(gray, 0, 255,
# cv2.THRESH_TOZERO | cv2.THRESH_OTSU)[1]
filename = "{}.jpg".format(os.getpid())
cv2.imwrite(filename, gray)
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
text = pytesseract.image_to_string(Image.open(filename), lang="deu")
os.remove(filename)
text = re.sub("[^a-zA-Z]+", " ", text)
print(text)
# show the output images
#cv2.imshow("Image", image)
#cv2.imshow("Output", gray)
#cv2.waitKey(0)
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials
endpoint = os.environ['ACCOUNT_ENDPOINT']
key = os.environ['ACCOUNT_KEY']
# Set credentials
credentials = CognitiveServicesCredentials(key)
# Create client
client = ComputerVisionClient(endpoint, credentials)
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4b/Bündnis_90_-_Die_Grünen_Logo.svg/2560px-Bündnis_90_-_Die_Grünen_Logo.svg.png"
image_analysis = client.analyze_image(url,visual_features=[VisualFeatureTypes.tags])
for tag in image_analysis.tags:
print(tag)
| guessthepartei/App | magic/parse.py | parse.py | py | 2,186 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_TOZERO",
... |
29876271313 | # pylint: disable=invalid-name, unused-argument, unspecified-encoding, missing-function-docstring
"""
Implements all winreg functions
https://docs.python.org/3/library/winreg.html#functions
"""
import os
from typing import Union
from re import findall
from tempfile import TemporaryDirectory
from warnings import warn
from typing import Optional, List
from unixreg.key import RegKey
from unixreg.constants import STANDARD_RIGHTS_REQUIRED, KEY_WOW64_64KEY, KEY_WRITE, KEY_READ
KEY_TYPE = Union[str, RegKey]
SUBKEY_TYPE = Union[str, RegKey, None]
_KEY_CACHE: List[RegKey] = []
_ENV_REPLACE = {
"USERPROFILE": "HOME"
}
_CONFIG_DIR = os.getenv("XDG_CONFIG_HOME")
if not _CONFIG_DIR:
home = os.getenv("HOME")
if home:
_CONFIG_DIR = os.path.join(home, ".config")
else:
_CONFIG_DIR = TemporaryDirectory().name
if not os.getenv("TOX"):
warn(f"Could not find directory to put registry in. Falling back to {_CONFIG_DIR}")
_CONFIG_DIR = os.path.join(_CONFIG_DIR, "unixreg")
def __init_values(key: KEY_TYPE, sub_key: SUBKEY_TYPE = None, access = STANDARD_RIGHTS_REQUIRED):
if isinstance(key, str):
key = RegKey(key)
if sub_key is not None:
print(sub_key)
key = key + sub_key
key.access = access
return key
def __create_key(key: RegKey):
if _CONFIG_DIR and key and key.key:
path = os.path.join(_CONFIG_DIR, key.key)
os.makedirs(path, exist_ok=True)
def CloseKey(key: RegKey):
"""
Closes a previously opened registry key.
The key argument specifies a previously opened key.
"""
key.Close()
try:
_KEY_CACHE.remove(key)
except ValueError:
pass
def ConnectRegistry(computer: Union[str, None], key: RegKey):
"""
Opens a registry handle on another computer and returns the handle
If computer_name is None, the local computer is used, otherwise
OSError is raised to signify the function failing
"""
if not computer:
return OpenKey(key, None)
raise OSError("Not Implemented")
def OpenKeyEx(key: RegKey, sub_key: SUBKEY_TYPE, reserved=0, access=KEY_READ):
return CreateKeyEx(key, sub_key, reserved, access)
OpenKey = OpenKeyEx
def CreateKey(key: RegKey, sub_key: SUBKEY_TYPE):
return CreateKeyEx(key, sub_key)
def CreateKeyEx(key: RegKey, sub_key: SUBKEY_TYPE, reserved=0, access=KEY_WRITE):
key = __init_values(key, sub_key, access)
__create_key(key)
_KEY_CACHE.append(key)
return key
def DeleteKey(key: KEY_TYPE, sub_key: SUBKEY_TYPE):
return DeleteKeyEx(key, sub_key)
def DeleteKeyEx(key: KEY_TYPE, sub_key: SUBKEY_TYPE, access=KEY_WOW64_64KEY, reserved=0):
kkey = __init_values(key, sub_key, access)
if _CONFIG_DIR:
path = os.path.join(_CONFIG_DIR, kkey.key)
if os.path.isfile(path):
os.remove(path)
def DeleteValue(key: KEY_TYPE, value: str):
kkey = __init_values(key)
if _CONFIG_DIR:
filepath = os.path.join(_CONFIG_DIR, kkey.key, value)
try:
os.remove(filepath)
except FileNotFoundError:
pass
def EnumKey(key: KEY_TYPE, index: int):
raise NotImplementedError("Not Implemented")
def EnumValue(key: KEY_TYPE, index: int):
raise NotImplementedError("Not Implemented")
def ExpandEnvironmentStrings(env: str):
for key, val in _ENV_REPLACE.items():
env = env.replace(f"%{key}%", f"%{val}%")
match = findall(r"%(.+?)%", env)
for val in match:
valenv = os.getenv(val)
if valenv:
env = env.replace(f"%{val}%", valenv)
env.replace("\\", os.path.sep)
return env
def FlushKey(key: KEY_TYPE):
raise NotImplementedError("Not Implemented")
def QueryInfoKey(key: KEY_TYPE):
raise NotImplementedError("Not Implemented")
def QueryValueEx(key: KEY_TYPE, sub_key: SUBKEY_TYPE) -> str:
kkey = __init_values(key, sub_key)
if _CONFIG_DIR:
filepath = os.path.join(_CONFIG_DIR, kkey.key)
with open(filepath, "r") as file:
return file.read()
return ""
QueryValue = QueryValueEx
def LoadKey(key: RegKey, sub_key: SUBKEY_TYPE, file_name: str):
# this requires a win32 permission compatibility layer
raise OSError("Not Implemented")
def SaveKey(key: RegKey, file_name: str) -> None:
# this requires a win32 permission compatibility layer
raise OSError("Not Implemented")
def SetValue(key: KEY_TYPE, sub_key: SUBKEY_TYPE, typearg: int, value: str) -> None:
if isinstance(sub_key, RegKey):
sub_key = sub_key.key
if sub_key:
return SetValueEx(key, sub_key, 0, typearg, value)
def SetValueEx(key: KEY_TYPE, value_name: str, reserved: int, typearg: int, value: str) -> None:
kkey = __init_values(key)
if _CONFIG_DIR:
filepath = os.path.join(_CONFIG_DIR, kkey.key, value_name)
with open(filepath, "w") as file:
file.write(value)
def DisableReflectionKey(key: KEY_TYPE):
raise NotImplementedError("Not Implemented")
def EnableReflectionKey(key: KEY_TYPE):
raise NotImplementedError("Not Implemented")
def QueryReflectionKey(key: KEY_TYPE):
raise NotImplementedError("Not Implemented")
# Non winreg functions
def LoadRegFile(file_name: str) -> Optional[str]:
def _strip_quotes(val) -> str:
_QUOTE_LIST = ("\"", '\'')
if val.startswith(_QUOTE_LIST) and val.endswith(_QUOTE_LIST):
val = val[1:-1]
return val
def _strip_brackets(val) -> str:
_BRACKET_LIST = ("[", "]")
if val.startswith(_BRACKET_LIST) and val.endswith(_BRACKET_LIST):
val = val[1:-1]
return val
with open(file_name, "r") as reg:
nextline = reg.readline()
key: Optional[str] = None
while nextline:
line = nextline.strip()
nextline = reg.readline()
if len(line) == 1:
continue
split = line.split("=")
keyline = _strip_brackets(line)
if keyline:
key = keyline
elif key and len(split) == 2:
name, value = split
name = _strip_quotes(name)
value = _strip_quotes(value)
os.makedirs(key, exist_ok=True)
if _CONFIG_DIR:
with open(os.path.join(_CONFIG_DIR, key, name), "w") as regvalue:
regvalue.write(value)
print(f"[{key}] {name}={value}")
return None | Jan200101/unixreg | unixreg/functions.py | functions.py | py | 6,498 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Union",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "unixreg.key.RegKey",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "unixreg.key.RegKey",
... |
35920335524 | import requests
from bifocal import utils, models
from polo import Polo
from coindesk import Coindesk
class Blockscan(object):
@staticmethod
def _request(**kwargs):
uri = 'http://xcp.blockscan.com/api2?%s' % utils.encode_args(kwargs)
ret = requests.get(uri)
return utils.parse_json(ret)
@staticmethod
def get_tx_by_id(txid):
return Blockscan._request(
module='transaction',
action='info',
txhash=txid
)
@staticmethod
def get_address_transactions(address, asset):
data = Blockscan._request(
module='address',
action='credit_debit',
btc_address=address,
asset=asset
)
transactions = data['data']
return map(Blockscan._parse_tx, transactions)
@staticmethod
def get_tx_source(txid):
tx = Blockscan.get_tx_by_id(txid)
return tx['data'][0]['source']
@staticmethod
def get_tx_destination(txid):
tx = Blockscan.get_tx_by_id(txid)
return tx['data'][0]['destination']
@staticmethod
def _parse_tx(tx):
stamp = int(tx['block_time'])
pair = "BTC_%s" % tx['asset']
btc_rate = Polo.get_daily_close_price(pair, stamp)
return models.Transaction(
timestamp=stamp,
quantity=int(tx['quantity']),
asset=tx['asset'],
id=tx['event'],
price=btc_rate * Coindesk.get_price_by_timestamp(stamp),
price_in_btc=btc_rate,
source=Blockscan.get_tx_source(tx['event']),
destination=Blockscan.get_tx_destination(tx['event'])
)
| super3/bifocal | bifocal/apis/blockscan.py | blockscan.py | py | 1,672 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "bifocal.utils.encode_args",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bifocal.utils",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bifocal.utils.pa... |
16606388338 | from rich import print
from napalm import get_network_driver
from my_devices import arista1, arista2, arista3, arista4
def main():
for device in (arista1, arista2, arista3, arista4):
driver = get_network_driver('eos')
with driver(**device) as device:
device.open()
vlans = device.get_vlans()
host = device.hostname
device.load_merge_candidate(filename='vlans.cfg')
diff = device.compare_config()
print(f'diff for host {host}:')
print(diff)
print('-'*70)
if diff:
print('committing')
device.commit_config()
else:
print('no changes')
print()
if __name__ == '__main__':
main()
| caseymorris87/pynet_test2 | napalm/ex2.py | ex2.py | py | 839 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "my_devices.arista1",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "my_devices.arista2",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "my_devices.arista3",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "my_devices.aris... |
6547068714 | """
Tests for Randomized Reconstruction Commands
"""
import unittest
import requests
import sys
import os
import importlib
from pathlib import Path
import json
# Add the client folder to sys.path
CLIENT_DIR = os.path.join(os.path.dirname(__file__), "..", "client")
if CLIENT_DIR not in sys.path:
sys.path.append(CLIENT_DIR)
from fusion360gym_client import Fusion360GymClient
HOST_NAME = "127.0.0.1"
PORT_NUMBER = 8080
class TestFusion360GymRandomizedReconstruction(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = Fusion360GymClient(f"http://{HOST_NAME}:{PORT_NUMBER}")
current_dir = Path(__file__).parent
test_config_file = current_dir / "test_config.json"
if not test_config_file.exists():
print("Error: test_config.json file not found in the test directory")
with open(test_config_file, encoding="utf8") as f:
test_config = json.load(f)
dataset_dir = Path(test_config["dataset_dir"])
if not dataset_dir.exists():
print("Error: dataset_dir does not exist")
cls.data_dir = dataset_dir
cls.void_data_dir = dataset_dir.parent / "void"
cls.split_file = dataset_dir.parent / "train_test.json"
cls.void_split_file = dataset_dir.parent / "void.json"
cls.distributions_json = dataset_dir.parent / "d7_distributions.json"
cls.distributions_training_only_json = dataset_dir.parent / "d7_training_distributions.json"
def test_sample_design(self):
# Sample the whole dataset
r = self.client.sample_design(self.data_dir, filter=False)
# Sample the training data
r = self.client.sample_design(self.data_dir, filter=True, split_file=self.split_file)
def test_sample_design_invalid_data_dir(self):
# Sample from a non-existent directory
r = self.client.sample_design(self.void_data_dir, filter=False)
# Sample from a non-existent directory with the split file
r = self.client.sample_design(self.void_data_dir, filter=True, split_file=self.split_file)
# Sample from a non-existent string
r = self.client.sample_design("random_data_dir", filter=False)
def test_sample_design_invalid_split_file(self):
# the split file is void
r = self.client.sample_design(self.data_dir, filter=True, split_file=self.void_split_file)
def test_get_distributions_from_dataset(self):
import json
# distributions of the whole dataset
r = self.client.get_distributions_from_dataset(self.data_dir, filter=False)
# with open('d7_distributions.json', 'w') as outfile:
# json.dump(r, outfile)
# distributions of the training dataset
r = self.client.get_distributions_from_dataset(self.data_dir, filter=True, split_file=self.split_file)
# with open('d7_training_distributions.json', 'w') as outfile:
# json.dump(r, outfile)
def test_get_distributions_from_json(self):
# distributions of the whole dataset
r = self.client.get_distributions_from_json(self.distributions_json)
# distributions of the training dataset
r = self.client.get_distributions_from_json(self.distributions_training_only_json)
# invalid input file
r = self.client.get_distributions_from_json("void")
def test_distribution_sampling(self):
# test invalid distributions
distributions = {"invalid": "testing"}
r = self.client.distribution_sampling(distributions)
# sample all parameters
distributions = self.client.get_distributions_from_json(self.distributions_training_only_json)
r = self.client.distribution_sampling(distributions)
# test invalid parameters
r = self.client.distribution_sampling(distributions, ["invalid"])
# sample a list of selected parameters
r = self.client.distribution_sampling(distributions, ["num_faces", "num_bodies"])
def test_sample_sketch(self):
json_data, _ = self.client.sample_design(self.data_dir, filter=True, split_file=self.split_file)
# test invlid sampling type
r = self.client.sample_sketch(json_data, "invalid")
# random sampling
r = self.client.sample_sketch(json_data, sampling_type = "random")
# deterministic sampling
r = self.client.sample_sketch(json_data, sampling_type = "deterministic")
# distributive sampling
distributions = self.client.get_distributions_from_json(self.distributions_training_only_json)
r = self.client.sample_sketch(json_data, sampling_type = "distributive", area_distribution=distributions["sketch_areas"])
# test invalid area distribution
r = self.client.sample_sketch(json_data, sampling_type = "distributive", area_distribution=["invalid"])
def test_sample_profiles(self):
json_data, _ = self.client.sample_design(self.data_dir, filter=True, split_file=self.split_file)
sketch_data = self.client.sample_sketch(json_data, sampling_type = "random")
# test invalid sketch data
r = self.client.sample_profiles({"data":"invalid"}, max_number_profiles = 1, sampling_type = "random")
# test invalid max number of profiles
r = self.client.sample_profiles(sketch_data, max_number_profiles = -1, sampling_type = "random")
# random sampling
r = self.client.sample_profiles(sketch_data, max_number_profiles = 2, sampling_type = "random")
# deterministic sampling
r = self.client.sample_profiles(sketch_data, max_number_profiles = 2, sampling_type = "deterministic")
# distributive sampling
distributions = self.client.get_distributions_from_json(self.distributions_training_only_json)
r = self.client.sample_sketch(json_data, sampling_type = "distributive", area_distribution=distributions["profile_areas"])
# test invalid area distribution
r = self.client.sample_sketch(json_data, sampling_type = "distributive", area_distribution=["invalid"])
if __name__ == "__main__":
unittest.main()
| albertotono/Fusion360GalleryDataset | tools/fusion360gym/test/test_fusion360gym_randomized_reconstruction.py | test_fusion360gym_randomized_reconstruction.py | py | 6,122 | python | en | code | null | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number... |
26306025238 | #!/usr/bin/python3
"""Use reddit api to get info about subredit subscribers"""
def number_of_subscribers(subreddit):
"""Return number of subscribers in subreddit given as argument"""
import requests
url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)
headers = {'user-agent': 'andy'}
res = requests.get(url, headers=headers, allow_redirects=False)
if res.status_code != 200:
return (0)
else:
return (res.json()['data']['subscribers'])
| AndyMSP/holbertonschool-system_engineering-devops | 0x16-api_advanced/0-subs.py | 0-subs.py | py | 496 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
}
] |
20105217581 | #https://towardsdatascience.com/how-to-perform-lasso-and-ridge-regression-in-python-3b3b75541ad8
import numpy as np
import pandas as pd
#we only have three advertising mediums, and sales is our target variable.
DATAPATH = 'Advertising.csv'
data = pd.read_csv(DATAPATH)
print(data.head())
data.drop(['Unnamed: 0'], axis=1, inplace=True) #remove first column which have the record number
#Least square regression
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
Xs = data.drop(['sales'], axis=1)
y = data['sales'].values.reshape(-1,1)
lin_reg = LinearRegression()
MSEs = cross_val_score(lin_reg, Xs, y, scoring='neg_mean_squared_error', cv=5)
mean_MSE = np.mean(MSEs)
print("Least square MSE ",mean_MSE)
#Lasso regression
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Lasso
lasso = Lasso()
parameters = {'alpha': [1e-15, 1e-10, 1e-8, 1e-4, 1e-3,1e-2, 1, 5, 10, 20]}
#GridSearchCV. This will allow us to automatically perform 5-fold cross-validation
# with a range of different regularization parameters in order to find the optimal value of alpha.
lasso_regressor = GridSearchCV(lasso, parameters, scoring='neg_mean_squared_error', cv = 5)
lasso_regressor.fit(Xs, y)
print("Lasso best alpth value ",lasso_regressor.best_params_)
print("Lasso MSE score ",lasso_regressor.best_score_) #MSE
| eyadwin/Machine_Learning | regularization_lasso_regression.py | regularization_lasso_regression.py | py | 1,396 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.cross_val_score",
"line_number": 21,
"usage_type": "call"
... |
1584185561 | # This is mostly lifted from django-storages' sftp backend: Their license:
#
# SFTP storage backend for Django.
# Author: Brent Tubbs <brent.tubbs@gmail.com>
# License: MIT
#
# Modeled on the FTP storage by Rafal Jonca <jonca.rafal@gmail.com>
from __future__ import print_function
try:
import ssh
except ImportError:
import paramiko as ssh
import os
import posixpath
import warnings
from django.conf import settings
from django.core.files.base import File
try:
from io import StringIO
except ImportError:
# Python 2 fallbacks
from cStringIO import StringIO
from localdevstorage.base import BaseStorage
class SftpStorage(BaseStorage):
def __init__(self, location=None, base_url=None, user=None, host=None, root_path=None):
warnings.warn(
'The SFTP backend is unsupported and untested. '
'Usage is not recommended!'
)
self._host = host or settings.LOCALDEVSTORAGE_SFTP_HOST
self._root_path = root_path or settings.LOCALDEVSTORAGE_SFTP_ROOT_PATH
# if present, settings.SFTP_STORAGE_PARAMS should be a dict with params
# matching the keyword arguments to paramiko.SSHClient().connect(). So
# you can put username/password there. Or you can omit all that if
# you're using keys.
self._params = getattr(settings, 'SFTP_STORAGE_PARAMS', {})
self._params['username'] = user or settings.LOCALDEVSTORAGE_SFTP_USER
# for now it's all posix paths. Maybe someday we'll support figuring
# out if the remote host is windows.
self._pathmod = posixpath
super(SftpStorage, self).__init__(location, base_url)
def _connect(self):
self._ssh = ssh.SSHClient()
# automatically add host keys from current user.
self._ssh.load_host_keys(os.path.expanduser(os.path.join("~", ".ssh", "known_hosts")))
# and automatically add new host keys for hosts we haven't seen before.
self._ssh.set_missing_host_key_policy(ssh.AutoAddPolicy())
try:
self._ssh.connect(self._host, **self._params)
except ssh.AuthenticationException as e:
raise
except Exception as e:
print(e)
if not hasattr(self, '_sftp'):
self._sftp = self._ssh.open_sftp()
@property
def sftp(self):
"""Lazy SFTP connection"""
if not hasattr(self, '_sftp'):
self._connect()
return self._sftp
def _get(self, name):
try:
return SFTPStorageFile(name, self, 'rb')
except IOError:
pass
def _exists_upstream(self, name):
try:
f = SFTPStorageFile(name, self, 'rb')
f.close()
return True
except Exception:
return False
def _read(self, name):
remote_path = self._remote_path(name)
return self.sftp.open(remote_path, 'rb')
def _remote_path(self, name):
return self._join(self._root_path, name)
def _join(self, *args):
# Use the path module for the remote host type to join a path together
return self._pathmod.join(*args)
class SFTPStorageFile(File):
def __init__(self, name, storage, mode):
self._name = name
self._storage = storage
self._mode = mode
self._is_dirty = False
self.file = StringIO()
self._is_read = False
@property
def size(self):
if not hasattr(self, '_size'):
self._size = self._storage.size(self._name)
return self._size
def read(self, num_bytes=None):
if not self._is_read:
self.file = self._storage._read(self._name)
self._is_read = True
return self.file.read(num_bytes)
def write(self, content):
raise NotImplementedError
def close(self):
if self._is_dirty:
self._storage._save(self._name, self.file.getvalue())
self.file.close()
| beniwohli/django-localdevstorage | localdevstorage/sftp.py | sftp.py | py | 3,964 | python | en | code | 50 | github-code | 6 | [
{
"api_name": "localdevstorage.base.BaseStorage",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "warnings.warn",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.LOCALDEVSTORAGE_SFTP_HOST",
"line_number": 35,
"usage_type": "attribu... |
6827571219 | """ https://adventofcode.com/2020/day/17 """
from typing import List
from copy import deepcopy
from functools import lru_cache
def part1(data: List[str]) -> int:
""" O(n) solution """
size_x = len(data[0]) + 2 * CYCLES
size_y = len(data) + 2 * CYCLES
size_z = CYCLES * 2 + 1
pocket = [[[False] * size_x for _ in range(size_y)] for _ in range(size_z)]
for y in range(len(data)):
for x in range(len(data[y])):
pocket[CYCLES][CYCLES + y][CYCLES + x] = data[y][x] == "#"
for _ in range(CYCLES):
temp = deepcopy(pocket)
for z, depth in enumerate(pocket):
for y, row in enumerate(depth):
for x, cube in enumerate(row):
position = (z, y, x)
adjacents = find_3d_adjacents(
position, len(pocket), len(pocket[0]))
active = sum([(pocket[i][j][k]) for i, j, k in adjacents])
if cube and active not in (2, 3):
temp[z][y][x] = False
elif not cube and active == 3:
temp[z][y][x] = True
pocket = deepcopy(temp)
return sum([x for z in pocket for y in z for x in y])
def part2(data: List[str]) -> int:
""" O(?) solution """
size_x = len(data[0]) + 2 * CYCLES
size_y = len(data) + 2 * CYCLES
size_z = CYCLES * 2 + 1
size_w = CYCLES * 2 + 1
pocket = [[[[False] * size_x for _ in range(size_y)]
for _ in range(size_z)] for _ in range(size_w)]
for y, _ in enumerate(data):
for x, _ in enumerate(data[y]):
pocket[CYCLES][CYCLES][CYCLES +
y][CYCLES + x] = data[y][x] == "#"
for _ in range(CYCLES):
temp = deepcopy(pocket)
for w, time in enumerate(pocket):
for z, depth in enumerate(time):
for y, row in enumerate(depth):
for x, cube in enumerate(row):
position = (w, z, y, x)
adjacents = find_4d_adjacents(position, len(
pocket), len(pocket[0]), len(pocket[0][0]))
active = sum([(pocket[i][j][k][l])
for i, j, k, l in adjacents])
if cube and active not in (2, 3):
temp[w][z][y][x] = False
elif not cube and active == 3:
temp[w][z][y][x] = True
pocket = deepcopy(temp)
return sum([x for w in pocket for z in w for y in z for x in y])
@lru_cache(maxsize=None)
def find_3d_adjacents(pos, depth, width):
z, y, x = pos
adjacents = []
for i in range(z - 1, z + 2):
for j in range(y - 1, y + 2):
for k in range(x - 1, x + 2):
if (i, j, k) != pos and - 1 < i < depth and - 1 < j < width and - 1 < k < width:
adjacents.append((i, j, k))
return adjacents
@lru_cache(maxsize=None)
def find_4d_adjacents(pos, time, depth, width):
w, z, y, x = pos
adjacents = []
for i in range(w - 1, w + 2):
for j in range(z - 1, z + 2):
for k in range(y - 1, y + 2):
for l in range(x - 1, x + 2):
if (i, j, k, l) != pos and - 1 < i < time and - 1 < j < depth and - 1 < k < width and - 1 < l < width:
adjacents.append((i, j, k, l))
return adjacents
if __name__ == "__main__":
TEST = [line.strip() for line in open("tests/d17.txt", "r")]
PUZZLE = [line.strip() for line in open("puzzles/d17.txt", "r")]
CYCLES = 6
assert part1(TEST) == 112
assert part2(TEST) == 848
print(f"Part 1: {part1(PUZZLE)}")
print(f"Part 2: {part2(PUZZLE)}")
| pozhega/AoC | 2020/d17.py | d17.py | py | 3,804 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number... |
21837055614 | """Parsing url to check its SEO and availability"""
from datetime import date
from bs4 import BeautifulSoup
def get_page_data(response):
"""Check SEO functionality of url"""
result = {'status_code': response.status_code}
page = BeautifulSoup(response.text, 'html.parser')
result['h1'] = page.h1.get_text() if page.h1 else ''
result['title'] = page.title.get_text() if page.title else ''
result['description'] = page.find(
'meta', {'name': 'description'}
).get('content') if page.find('meta', {'name': 'description'}) else ''
result['created_at'] = date.today()
return result
| GunGalla/python-project-83 | page_analyzer/parse_url.py | parse_url.py | py | 628 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 21,
"usage_type": "name"
}
] |
73819284349 | import numpy as np
import dill
import math
import sys
sys.path.append('../')
sys.path.append('./')
from src.graph import Graph
from src.evolution_strategies import one_plus_lambda, tournament_selection
from src.population import Population
from src.arg_parser import parse_args
import cProfile
import pstats
def bool_and(x, y): return x and y
def bool_or(x, y): return x or y
def bool_nand(x, y): return not(x and y)
def bool_nor(x, y): return not(x or y)
Population.add_operation(arity=2, func=bool_and, string="AND")
Population.add_operation(arity=2, func=bool_or, string="OR")
Population.add_operation(arity=2, func=bool_nand, string="NAND")
Population.add_operation(arity=2, func=bool_nor, string="NOR")
def eleven_multiplexer(arr):
if len(arr) != 11:
print("AAAAAAAAAAAAAAAAAAAAAAA")
raise
d = arr[0:8]
a = arr[8:11]
index = (int(a[0]) * 4) + (int(a[1]) * 2) + (int(a[2]) * 1)
if d[index] == "1":
return True
return False
def create_tests(n):
tests = []
for i in range(2**n):
base_2_v = bin(i).replace("0b", "").zfill(n)
cont = 0
input_arr = []
for c in base_2_v:
inp = False
if c == "1":
cont += 1
inp = True
input_arr.append(inp)
response = True if cont%2 == 0 else False
tests.append((input_arr, [response]))
return tests
def fitness_func(individual: Graph, gen: int, tests):
fitness = 0
for t in tests:
inputs = t[0]
expected_out = t[1]
graph_out = individual.operate(inputs)
for h, y in zip(graph_out, expected_out):
if h == y:
fitness += 1
fitness = fitness/len(tests)
return np.clip(fitness, -1*(10**10), 10**10)
def main():
n = 5
args = parse_args()
Population.rng = np.random.default_rng(args["seed"])
tests = create_tests(n)
def fit_func(indv, gen): return fitness_func(indv, gen, tests)
population = Population(
population_size=args["pop_size"],
n_in=n,
n_out=1,
n_middle=args["n_middle_nodes"]
)
def t_select(): return tournament_selection(
population=population,
generations=args["max_gens"],
goal_fit=1,
fitness_func=fit_func,
minimize_fitness=False,
fit_share=args["fit_share"],
stagnation=args["stagnation"],
stag_preservation=args["stag_preservation"],
report=args["report"],
mutate_active_only=args["mut_active_only"],
mutation_rate=args["mut_rate"],
elitism=args["elitism"],
crossover_rate=args["crossover_rate"],
tournament_size=args["tourney_size"],
species_threshold=args["species_threshold"],
n_threads=args["n_threads"],
csv_file=args["csv"],
fit_partition_size=args["fit_partition"]
)
def p_lambda(): return one_plus_lambda(
population=population,
generations=args["max_gens"],
goal_fit=1,
fitness_func=fit_func,
minimize_fitness=False,
fit_share=args["fit_share"],
stagnation=args["stagnation"],
stag_preservation=args["stag_preservation"],
report=args["report"],
n_champions=args["elitism"],
mutate_active_only=args["mut_active_only"],
mutation_rate=args["mut_rate"],
species_threshold=args["species_threshold"],
n_threads=args["n_threads"],
csv_file=args["csv"],
fit_partition_size=args["fit_partition"]
)
exec_func = t_select
if args["selection_method"] == "lambda":
exec_func = p_lambda
# profile = cProfile.Profile()
# profile.runcall(exec_func)
# ps = pstats.Stats(profile)
# ps.print_stats()
# print()
exec_func()
if args["save_to"] is not None:
dill.dump(population, open(args["save_to"], mode='wb'))
if __name__ == "__main__":
main()
| fhtanaka/CGPython | tests/diversity_parity_test.py | diversity_parity_test.py | py | 3,954 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numbe... |
13394323895 | # -*- coding: utf-8 -*-
"""This module loads images from csv files and outputs numpy arrays"""
from __future__ import absolute_import, division, print_function
from copy import deepcopy
import numpy as np
import tensorflow as tf
from six import string_types
import niftynet.utilities.util_csv as util_csv
from niftynet.io.image_type import ImageFactory
from niftynet.layer.base_layer import Layer, DataDependentLayer, RandomisedLayer
from niftynet.utilities.user_parameters_helper import make_input_tuple
from niftynet.utilities.util_common import print_progress_bar
# NP_TF_DTYPES = {'i': tf.int32, 'u': tf.int32, 'b': tf.int32, 'f': tf.float32}
from niftynet.utilities.niftynet_global_config import NiftyNetGlobalConfig
NP_TF_DTYPES = {'i': tf.float32,
'u': tf.float32,
'b': tf.float32,
'f': tf.float32}
def infer_tf_dtypes(image_array):
return NP_TF_DTYPES.get(image_array.dtype[0].kind, tf.float32)
class ImageReader(Layer):
"""
For a concrete example:
_input_sources define multiple modality mappings, e.g.,
_input_sources {'image': ('T1', 'T2'),
'label': ('manual_map',)}
means
'image' consists of two components, formed by
concatenating 'T1' and 'T2' input source images.
'label' consists of one component, loading from 'manual_map'
self._names: a tuple of the output names of this reader.
('image', 'labels')
self._shapes: the shapes after combining input sources
{'image': (192, 160, 192, 1, 2), 'label': (192, 160, 192, 1, 1)}
self._dtypes: store the dictionary of tensorflow shapes
{'image': tf.float32, 'label': tf.float32}
self.output_list is a list of dictionaries, with each item:
{'image': <niftynet.io.image_type.SpatialImage4D object>,
'label': <niftynet.io.image_type.SpatialImage3D object>}
"""
def __init__(self, names):
# list of file names
self._file_list = None
self._input_sources = None
self._shapes = None
self._dtypes = None
self._names = None
self.names = names
self._global_config = NiftyNetGlobalConfig()
# list of image objects
self.output_list = None
self.current_id = -1
self.preprocessors = []
super(ImageReader, self).__init__(name='image_reader')
def initialise_reader(self, data_param, task_param):
"""
task_param specifies how to combine user input modalities
e.g., for multimodal segmentation 'image' corresponds to multiple
modality sections, 'label' corresponds to one modality section
"""
if not self.names:
tf.logging.fatal('Please specify data names, this should '
'be a subset of SUPPORTED_INPUT provided '
'in application file')
raise ValueError
self._names = [name for name in self.names
if vars(task_param).get(name, None)]
self._input_sources = {name: vars(task_param).get(name)
for name in self.names}
data_to_load = {}
for name in self._names:
for source in self._input_sources[name]:
try:
data_to_load[source] = data_param[source]
except KeyError:
tf.logging.fatal(
'reader name [%s] requires [%s], however it is not '
'specified as a section in the config, '
'current input section names: %s',
name, source, list(data_param))
raise ValueError
default_data_folder = self._global_config.get_niftynet_home_folder()
self._file_list = util_csv.load_and_merge_csv_files(data_to_load, default_data_folder)
self.output_list = _filename_to_image_list(
self._file_list, self._input_sources, data_param)
for name in self.names:
tf.logging.info(
'image reader: loading [%s] from %s (%d)',
name, self.input_sources[name], len(self.output_list))
def prepare_preprocessors(self):
for layer in self.preprocessors:
if isinstance(layer, DataDependentLayer):
layer.train(self.output_list)
def add_preprocessing_layers(self, layers):
assert self.output_list is not None, \
'Please initialise the reader first, ' \
'before adding preprocessors.'
if isinstance(layers, Layer):
self.preprocessors.append(layers)
else:
self.preprocessors.extend(layers)
self.prepare_preprocessors()
# pylint: disable=arguments-differ
def layer_op(self, idx=None, shuffle=True):
"""
this layer returns a dictionary
keys: self.output_fields
values: image volume array
"""
if idx is None and shuffle:
# training, with random list output
idx = np.random.randint(len(self.output_list))
if idx is None and not shuffle:
# testing, with sequential output
# accessing self.current_id, not suitable for multi-thread
idx = self.current_id + 1
self.current_id = idx
try:
idx = int(idx)
except ValueError:
idx = -1
if idx < 0 or idx >= len(self.output_list):
return -1, None, None
image_dict = self.output_list[idx]
image_data_dict = {field: image.get_data()
for (field, image) in image_dict.items()}
interp_order_dict = {field: image.interp_order
for (field, image) in image_dict.items()}
if self.preprocessors:
preprocessors = [deepcopy(layer) for layer in self.preprocessors]
# dictionary of masks is cached
mask = None
for layer in preprocessors:
# import time; local_time = time.time()
if layer is None:
continue
if isinstance(layer, RandomisedLayer):
layer.randomise()
image_data_dict = layer(image_data_dict, interp_order_dict)
else:
image_data_dict, mask = layer(image_data_dict, mask)
# print('%s, %.3f sec'%(layer, -local_time + time.time()))
return idx, image_data_dict, interp_order_dict
@property
def shapes(self):
"""
image shapes before any preprocessing
:return: tuple of integers as image shape
"""
# to have fast access, the spatial dimensions are not accurate
# 1) only read from the first image in list
# 2) not considering effects of random augmentation layers
# but time and modality dimensions should be correct
if not self.output_list:
tf.logging.fatal("please initialise the reader first")
raise RuntimeError
if not self._shapes:
first_image = self.output_list[0]
self._shapes = {field: first_image[field].shape
for field in self.names}
return self._shapes
@property
def tf_dtypes(self):
if not self.output_list:
tf.logging.fatal("please initialise the reader first")
raise RuntimeError
if not self._dtypes:
first_image = self.output_list[0]
self._dtypes = {field: infer_tf_dtypes(first_image[field])
for field in self.names}
return self._dtypes
@property
def input_sources(self):
if not self._input_sources:
tf.logging.fatal("please initialise the reader first")
raise RuntimeError
return self._input_sources
@property
def names(self):
return self._names
@names.setter
def names(self, fields_tuple):
# output_fields is a sequence of output names
# each name might correspond to a list of multiple input sources
# this should be specified in CUSTOM section in the config
self._names = make_input_tuple(fields_tuple, string_types)
def get_subject_id(self, image_index):
return self._file_list.iloc[image_index, 0]
def _filename_to_image_list(file_list, mod_dict, data_param):
"""
converting a list of filenames to a list of image objects
useful properties (e.g. interp_order) are added to each object
"""
volume_list = []
for idx in range(len(file_list)):
print_progress_bar(idx, len(file_list),
prefix='reading datasets headers',
decimals=1, length=10, fill='*')
# combine fieldnames and volumes as a dictionary
_dict = {field: _create_image(file_list, idx, modalities, data_param)
for (field, modalities) in mod_dict.items()}
volume_list.append(_dict)
return volume_list
def _create_image(file_list, idx, modalities, data_param):
"""
data_param consists of description of each modality
This function combines modalities according to the 'modalities'
parameter and create <niftynet.io.input_type.SpatialImage*D>
"""
try:
file_path = tuple(file_list.loc[idx, mod] for mod in modalities)
interp_order = tuple(data_param[mod].interp_order for mod in modalities)
pixdim = tuple(data_param[mod].pixdim for mod in modalities)
axcodes = tuple(data_param[mod].axcodes for mod in modalities)
except KeyError:
tf.logging.fatal(
"Specified modality names %s "
"not found in config: input sections %s",
modalities, list(data_param))
raise
except AttributeError:
tf.logging.fatal(
'data params must contain: interp_order, pixdim, axcodes')
raise
image_properties = {'file_path': file_path,
'name': modalities,
'interp_order': interp_order,
'output_pixdim': pixdim,
'output_axcodes': axcodes}
return ImageFactory.create_instance(**image_properties)
| LUYU0004/ISLES2018-1 | lib/niftynet/io/image_reader.py | image_reader.py | py | 10,262 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow.float32",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.float32",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.float32",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name"... |
14405820391 | # coding=utf-8
from django.contrib.admin import ModelAdmin, site
from models import News
class NewsAdmin(ModelAdmin):
list_display = ('id', 'match_type', 'game_start_time', 'end_score', 'middle_score', 'status', 'team1', 'score',
'team2', 'yapan','yapanSB', 'daxiaopan','daxiaopanSB', 'findex', 'create_time')
ording = ('id',)
list_per_page = 300
list_filter = ('create_time',)
search_fields = ['team1', 'team2', 'findex', 'score']
site.register(News, NewsAdmin)
| xiaoqiu206/football | spider/admin.py | admin.py | py | 507 | python | en | code | 36 | github-code | 6 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.News",
"line_number": 14,
"usage_type": "argument"
},
{
... |
74798420027 | import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.cm import get_cmap
data = pd.read_csv("./output_geo.csv")
df = pd.DataFrame(data)
fig, ax = plt.subplots()
# get a color map
cmap = get_cmap("tab20", 28) # type: matplotlib.colors.ListedColormap
colors = cmap.colors # type: list
ips = df['ip']
mean_rtt = df['mean_rtt']
ax.bar(ips, mean_rtt, color=colors)
ax.set_ylabel('Mean RTT in ms')
ax.set_title('Mean RTT for each hop between ips')
plt.xticks(rotation=15)
plt.show() | LisandroDiMeo/tp_traceroute | create_graph_rtt.py | create_graph_rtt.py | py | 499 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.p... |
15211930040 | """
Perform Outlier Rejection with MCMC
-----------------------------------
Figure 8.9
Bayesian outlier detection for the same data as shown in figure 8.8. The
top-left panel shows the data, with the fits from each model. The top-right
panel shows the 1-sigma and 2-sigma contours for the slope and intercept with
no outlier correction: the resulting fit (shown by the dotted line) is clearly
highly affected by the presence of outliers. The bottom-left panel shows the
marginalized 1-sigma and 2-sigma contours for a mixture model (eq. 8.67). The
bottom-right panel shows the marginalized 1-sigma and 2-sigma contours for a
model in which points are identified individually as "good" or "bad"
(eq. 8.68). The points which are identified by this method as bad with a
probability greater than 68% are circled in the first panel.
"""
# Author: Jake VanderPlas (adapted to PyMC3 by Brigitta Sipocz)
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
import pymc3 as pm
from matplotlib import pyplot as plt
from theano import shared as tshared
import theano.tensor as tt
from astroML.datasets import fetch_hogg2010test
from astroML.plotting.mcmc import convert_to_stdev
# ----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
np.random.seed(0)
# ------------------------------------------------------------
# Get data: this includes outliers. We need to convert them to Theano variables
data = fetch_hogg2010test()
xi = tshared(data['x'])
yi = tshared(data['y'])
dyi = tshared(data['sigma_y'])
size = len(data)
# ----------------------------------------------------------------------
# Define basic linear model
def model(xi, theta, intercept):
slope = np.tan(theta)
return slope * xi + intercept
# ----------------------------------------------------------------------
# First model: no outlier correction
with pm.Model():
# set priors on model gradient and y-intercept
inter = pm.Uniform('inter', -1000, 1000)
theta = pm.Uniform('theta', -np.pi / 2, np.pi / 2)
y = pm.Normal('y', mu=model(xi, theta, inter), sd=dyi, observed=yi)
trace0 = pm.sample(draws=5000, tune=1000)
# ----------------------------------------------------------------------
# Second model: nuisance variables correcting for outliers
# This is the mixture model given in equation 17 in Hogg et al
def mixture_likelihood(yi, xi):
"""Equation 17 of Hogg 2010"""
sigmab = tt.exp(log_sigmab)
mu = model(xi, theta, inter)
Vi = dyi ** 2
Vb = sigmab ** 2
root2pi = np.sqrt(2 * np.pi)
L_in = (1. / root2pi / dyi * np.exp(-0.5 * (yi - mu) ** 2 / Vi))
L_out = (1. / root2pi / np.sqrt(Vi + Vb)
* np.exp(-0.5 * (yi - Yb) ** 2 / (Vi + Vb)))
return tt.sum(tt.log((1 - Pb) * L_in + Pb * L_out))
with pm.Model():
# uniform prior on Pb, the fraction of bad points
Pb = pm.Uniform('Pb', 0, 1.0, testval=0.1)
# uniform prior on Yb, the centroid of the outlier distribution
Yb = pm.Uniform('Yb', -10000, 10000, testval=0)
# uniform prior on log(sigmab), the spread of the outlier distribution
log_sigmab = pm.Uniform('log_sigmab', -10, 10, testval=5)
inter = pm.Uniform('inter', -200, 400)
theta = pm.Uniform('theta', -np.pi / 2, np.pi / 2, testval=np.pi / 4)
y_mixture = pm.DensityDist('mixturenormal', logp=mixture_likelihood,
observed={'yi': yi, 'xi': xi})
trace1 = pm.sample(draws=5000, tune=1000)
# ----------------------------------------------------------------------
# Third model: marginalizes over the probability that each point is an outlier.
# define priors on beta = (slope, intercept)
def outlier_likelihood(yi, xi):
"""likelihood for full outlier posterior"""
sigmab = tt.exp(log_sigmab)
mu = model(xi, theta, inter)
Vi = dyi ** 2
Vb = sigmab ** 2
logL_in = -0.5 * tt.sum(qi * (np.log(2 * np.pi * Vi)
+ (yi - mu) ** 2 / Vi))
logL_out = -0.5 * tt.sum((1 - qi) * (np.log(2 * np.pi * (Vi + Vb))
+ (yi - Yb) ** 2 / (Vi + Vb)))
return logL_out + logL_in
with pm.Model():
# uniform prior on Pb, the fraction of bad points
Pb = pm.Uniform('Pb', 0, 1.0, testval=0.1)
# uniform prior on Yb, the centroid of the outlier distribution
Yb = pm.Uniform('Yb', -10000, 10000, testval=0)
# uniform prior on log(sigmab), the spread of the outlier distribution
log_sigmab = pm.Uniform('log_sigmab', -10, 10, testval=5)
inter = pm.Uniform('inter', -1000, 1000)
theta = pm.Uniform('theta', -np.pi / 2, np.pi / 2)
# qi is bernoulli distributed
qi = pm.Bernoulli('qi', p=1 - Pb, shape=size)
y_outlier = pm.DensityDist('outliernormal', logp=outlier_likelihood,
observed={'yi': yi, 'xi': xi})
trace2 = pm.sample(draws=5000, tune=1000)
# ------------------------------------------------------------
# plot the data
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.25,
bottom=0.1, top=0.95, hspace=0.2)
# first axes: plot the data
ax1 = fig.add_subplot(221)
ax1.errorbar(data['x'], data['y'], data['sigma_y'], fmt='.k', ecolor='gray', lw=1)
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
#------------------------------------------------------------
# Go through models; compute and plot likelihoods
linestyles = [':', '--', '-']
labels = ['no outlier correction\n(dotted fit)',
'mixture model\n(dashed fit)',
'outlier rejection\n(solid fit)']
x = np.linspace(0, 350, 10)
bins = [(np.linspace(140, 300, 51), np.linspace(0.6, 1.6, 51)),
(np.linspace(-40, 120, 51), np.linspace(1.8, 2.8, 51)),
(np.linspace(-40, 120, 51), np.linspace(1.8, 2.8, 51))]
for i, trace in enumerate([trace0, trace1, trace2]):
H2D, bins1, bins2 = np.histogram2d(np.tan(trace['theta']),
trace['inter'], bins=50)
w = np.where(H2D == H2D.max())
# choose the maximum posterior slope and intercept
slope_best = bins1[w[0][0]]
intercept_best = bins2[w[1][0]]
# plot the best-fit line
ax1.plot(x, intercept_best + slope_best * x, linestyles[i], c='k')
# For the model which identifies bad points,
# plot circles around points identified as outliers.
if i == 2:
Pi = trace['qi'].mean(0)
outlier_x = data['x'][Pi < 0.32]
outlier_y = data['y'][Pi < 0.32]
ax1.scatter(outlier_x, outlier_y, lw=1, s=400, alpha=0.5,
facecolors='none', edgecolors='red')
# plot the likelihood contours
ax = plt.subplot(222 + i)
H, xbins, ybins = np.histogram2d(trace['inter'],
np.tan(trace['theta']), bins=bins[i])
H[H == 0] = 1E-16
Nsigma = convert_to_stdev(np.log(H))
ax.contour(0.5 * (xbins[1:] + xbins[:-1]),
0.5 * (ybins[1:] + ybins[:-1]),
Nsigma.T, levels=[0.683, 0.955], colors='black')
ax.set_xlabel('intercept')
ax.set_ylabel('slope')
ax.grid(color='gray')
ax.xaxis.set_major_locator(plt.MultipleLocator(40))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.text(0.96, 0.96, labels[i], ha='right', va='top',
bbox=dict(fc='w', ec='none', alpha=0.5),
transform=ax.transAxes)
ax.set_xlim(bins[i][0][0], bins[i][0][-1])
ax.set_ylim(bins[i][1][0], bins[i][1][-1])
ax1.set_xlim(0, 350)
ax1.set_ylim(100, 700)
plt.show()
| astroML/astroML_figures | book_figures/chapter8/fig_outlier_rejection.py | fig_outlier_rejection.py | py | 8,174 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "astroML.plotting.setup_text_plots",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": ... |
8266559866 | import copy
import logging
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, NamedTuple, Union
import yaml
import cekit
from cekit.cekit_types import _T, RawDescriptor
from cekit.config import Config
from cekit.descriptor import (
Arg,
Descriptor,
Env,
Label,
Modules,
Osbs,
Packages,
Port,
Run,
Volume,
)
from cekit.descriptor.resource import Resource, create_resource
from cekit.errors import CekitError
from cekit.tools import get_latest_image_version
if TYPE_CHECKING:
from cekit.descriptor.modules import Install
from cekit.descriptor.overrides import Overrides
from cekit.generator.base import ModuleRegistry
logger = logging.getLogger("cekit")
config = Config()
_image_schema = yaml.safe_load(
"""
map:
name: {type: str, required: True}
version: {type: text, required: True}
schema_version: {type: int}
release: {type: text}
from: {type: str}
follow_tag: {type: str}
description: {type: text}
args: {type: any}
labels: {type: any}
envs: {type: any}
ports: {type: any}
run: {type: any}
artifacts: {type: any}
modules: {type: any}
packages: {type: any}
osbs: {type: any}
volumes: {type: any}
help:
map:
add: {type: bool}
template: {type: text}"""
)
class ImageOverrides(NamedTuple):
artifacts: Dict[str, "Resource"]
modules: Dict[str, "Install"]
def get_image_schema():
return copy.deepcopy(_image_schema)
class Image(Descriptor):
def __init__(self, descriptor: RawDescriptor, artifact_dir: str):
self._artifact_dir: str = artifact_dir
self.path: str = artifact_dir
self.schema = _image_schema.copy()
super(Image, self).__init__(descriptor)
self.skip_merging = ["description", "version", "name", "release"]
self._prepare()
def _prepare(self):
# TODO: Separating raw image descriptor from a higher level Image class would change this
# confusing code into a connector/factory.
self._descriptor["labels"] = [
Label(x) for x in self._descriptor.get("labels", [])
]
self._descriptor["envs"] = [Env(x) for x in self._descriptor.get("envs", [])]
self._descriptor["ports"] = [Port(x) for x in self._descriptor.get("ports", [])]
if "run" in self._descriptor:
self._descriptor["run"] = Run(self._descriptor["run"])
self._descriptor["artifacts"] = [
create_resource(a, directory=self._artifact_dir)
for a in self._descriptor.get("artifacts", [])
]
self._descriptor["modules"] = Modules(
self._descriptor.get("modules", {}), self.path
)
self._descriptor["packages"] = Packages(
self._descriptor.get("packages", {}), self.path
)
self._descriptor["osbs"] = Osbs(self._descriptor.get("osbs", {}), self.path)
self._descriptor["volumes"] = [
Volume(x) for x in self._descriptor.get("volumes", [])
]
# make sure image declarations override any module definitions
# TODO: Make into a NamedTuple to make types easier to reason about.
self._image_overrides = ImageOverrides(
artifacts=Image._to_dict(self.artifacts),
modules=Image._to_dict(self.modules.install),
)
self._all_artifacts: Dict[str, Resource] = Image._to_dict(self.artifacts)
def process_defaults(self):
"""Prepares default values before rendering"""
if not self.run:
self.run = Run({})
# do we want to force a user?
if "user" not in self.run:
self.run._descriptor["user"] = cekit.DEFAULT_USER
# Default package manager is yum
if not self.packages.manager:
self.packages._descriptor["manager"] = "yum"
# Default directory for supplementary files that should be copied to dist-git directory
if not self.osbs.extra_dir:
self.osbs._descriptor["extra_dir"] = "osbs_extra"
# Placing this here rather than in init as apply_image_overrides runs after that. This means
# follow_tag is applied *after* overrides.
if self.follow:
if not config.get("common", "redhat"):
raise CekitError(
"follow_tag annotation only supported with redhat flag"
)
self.base = get_latest_image_version(self.follow)
@property
def name(self) -> str:
return self.get("name")
@name.setter
def name(self, value: str):
self._descriptor["name"] = value
@property
def version(self) -> Any:
# TODO: Convert to string up front to simplify
return self.get("version")
@version.setter
def version(self, value: Any):
self._descriptor["version"] = value
# TODO: release is undocumented.
@property
def release(self) -> str:
return self.get("release")
@release.setter
def release(self, value: str):
self._descriptor["release"] = value
@property
def base(self) -> str:
return self.get("from")
@base.setter
def base(self, value: str):
self._descriptor["from"] = value
@property
def follow(self) -> str:
return self.get("follow_tag")
@follow.setter
def follow(self, value: str):
self._descriptor["follow_tag"] = value
@property
def description(self) -> str:
return self.get("description")
@description.setter
def description(self, value: str) -> None:
self._descriptor["description"] = value
@property
def labels(self) -> List[Label]:
return self.get("labels", [])
@property
def envs(self) -> List[Env]:
return self.get("envs", [])
@property
def args(self) -> List[Arg]:
return self.get("args", [])
@property
def ports(self) -> List[Port]:
return self.get("ports", [])
@property
def run(self) -> Run:
return self.get("run")
@run.setter
def run(self, value: Run):
self._descriptor["run"] = value
@property
def all_artifacts(self) -> Iterable[Resource]:
return self._all_artifacts.values()
@property
def artifacts(self) -> List[Resource]:
return self.get("artifacts", [])
@property
def modules(self) -> Modules:
return self.get("modules", Modules({}, self._artifact_dir))
@property
def packages(self) -> Packages:
return self.get("packages", Packages({}, self.path))
@property
def osbs(self) -> Osbs:
return self.get("osbs")
@osbs.setter
def osbs(self, value: Osbs):
self._descriptor["osbs"] = value
@property
def volumes(self) -> List[Volume]:
return self.get("volumes", [])
@property
def help(self) -> dict:
return self.get("help", {})
def apply_image_overrides(self, overrides: List["Overrides"]):
"""
Applies overrides to the image descriptor.
"""
if not overrides:
return
for override in overrides:
if override.name:
self.name = override.name
if override.version:
self.version = override.version
if override.base:
self.base = override.base
if override.description:
self.description = override.description
labels = Image._to_dict(self.labels)
for label in override.labels:
name = label.name
if name in labels:
labels[name] = label.merge(labels[name])
else:
labels[name] = label
self._descriptor["labels"] = list(labels.values())
envs = Image._to_dict(self.envs)
for env in override.envs:
name = env.name
if name in envs:
envs[name] = env.merge(envs[name])
else:
envs[name] = env
self._descriptor["envs"] = list(envs.values())
ports = Image._to_dict(self.ports)
for port in override.ports:
name = port.value
if name in ports:
ports[name] = port.merge(ports[name])
else:
ports[name] = port
self._descriptor["ports"] = list(ports.values())
module_repositories = Image._to_dict(self.modules.repositories)
for repository in override.modules.repositories:
name = repository.name
if name in module_repositories:
module_repositories[name] = repository.merge(
module_repositories[name]
)
else:
module_repositories[name] = repository
self.modules._descriptor["repositories"] = list(
module_repositories.values()
)
self.packages._descriptor = override.packages.merge(self.packages)
# In case content sets are provided as null values
# Remove the key entirely.
# TODO: This should be handled probably at general level, for every key
for flag in ["content_sets", "content_sets_file"]:
if flag in override.packages and override.packages[flag] is None:
self.packages._descriptor.pop("content_sets", None)
self.packages._descriptor.pop("content_sets_file", None)
# Merge override osbs items into self.
self.osbs = self.osbs.merge(override.osbs)
# Using 'or []' to avoid having to set default value in packages.py for _descriptor["remove"]
for package in override.packages.remove or []:
if package not in self.packages.remove:
self.packages.remove.append(package)
for package in override.packages.install or []:
if package not in self.packages.install:
self.packages.install.append(package)
for package in override.packages.reinstall or []:
if package not in self.packages.reinstall:
self.packages.reinstall.append(package)
artifact_overrides = self._image_overrides.artifacts
image_artifacts = Image._to_dict(self.artifacts)
for i, artifact in enumerate(override.artifacts):
name = artifact.name
# override.artifact contains override values WITH defaults.
# override.original_descriptor contains override value WITHOUT defaults.
# artifact_overrides contains original dictionary
#
# Iterating over dest / target / ...
# If we have _not_ supplied a target (check original_descriptor),
# then check artifact_overrides,
# otherwise use default from override.artifact
override_without_defaults = override.original_descriptor.get(
"artifacts"
)[i]
for key in ["dest", "target", "description"]:
if override_without_defaults.get(key):
logger.debug(
"Key ({}) found in override as {}".format(
key, override_without_defaults.get(key)
)
)
artifact[key] = override_without_defaults.get(key)
elif artifact_overrides.get(name) and artifact_overrides.get(
name
).get(key):
new_value = artifact_overrides.get(name).get(key)
logger.debug(
"Key ({}) found in original artifact as {}".format(
key, new_value
)
)
artifact[key] = new_value
# collect override so we can apply it to modules
artifact_overrides[name] = artifact
# add it to the list of everything
self._all_artifacts[name] = artifact
# Apply override to image descriptor
image_artifacts[name] = artifact
# Sort the output as it makes it easier to view and test.
logger.debug(
"Final (with override) artifact is {}".format(
sorted(artifact.items())
)
)
self._descriptor["artifacts"] = list(image_artifacts.values())
module_overrides = self._image_overrides.modules
image_modules = Image._to_dict(self.modules.install)
for module in override.modules.install:
name = module.name
# collect override so we can apply it to modules.
# this allows us to override module versions without affecting ordering.
module_overrides[name] = module
# Apply override to image descriptor
# If the module does not exists in the original descriptor, add it there
image_modules[name] = module
self.modules._descriptor["install"] = list(image_modules.values())
if override.run is not None:
if self.run:
self.run = override.run.merge(self.run)
else:
self.run = override.run
def apply_module_overrides(self, module_registry: "ModuleRegistry"):
"""
Applies overrides to included modules. This includes:
Artifact definitions
Module dependency version overrides
Also incorporates module contributed global configuration into the image:
Run specification
Package repository definitions
Required artifacts
"""
install_list: Dict[str, "Install"] = OrderedDict()
# index by name for easier access
self._package_repositories = Image._to_dict(self.packages.repositories)
# collect final 'run' value from modules
self._module_run = Run({})
# process the modules and integrate relevant bits into ourself
self.process_install_list(
self, self.modules.install, install_list, module_registry
)
# update ourself based on module declarations
# final order of modules to be installed
self.modules._descriptor["install"] = list(install_list.values())
# all package repositories required for installing packages
self.packages._descriptor["repositories"] = list(
self._package_repositories.values()
)
# final 'run' value
if self.run:
self.run = self.run.merge(self._module_run)
else:
self.run = self._module_run
def process_install_list(
self,
source: Union["Image"],
to_install_list: List["Install"],
install_list: Dict[str, "Install"],
module_registry: "ModuleRegistry",
) -> None:
# TODO: Return value is passed as parameter in `install_list`
module_overrides = self._image_overrides.modules
artifact_overrides = self._image_overrides.artifacts
for to_install in to_install_list:
logger.debug(
"Preparing module '{}' required by '{}'.".format(
to_install.name, source.name
)
)
override = module_overrides.get(to_install.name, None)
if override:
if override.version != to_install.version:
logger.debug(
"Module '{}:{}' being overridden with '{}:{}'.".format(
to_install.name,
to_install.version,
override.name,
override.version,
)
)
# apply module override
to_install = override
existing = install_list.get(to_install.name, None)
# see if we've already processed this
if existing:
# check for a version conflict
if existing.version != to_install.version:
logger.warning(
"Module version inconsistency for {}: {} requested, but {} will be used.".format(
to_install.name, to_install.version, existing.version
)
)
continue
module = module_registry.get_module(to_install.name, to_install.version)
if not module:
raise CekitError(
"Could not locate module %s version %s. Please verify that it is included in one of the "
"specified module repositories."
% (to_install.name, to_install.version)
)
# collect artifacts and apply overrides
module_artifacts = Image._to_dict(module.artifacts)
for artifact in module.artifacts:
name = artifact.name
if name in artifact_overrides:
override = artifact_overrides[name]
self._all_artifacts[name] = override
module_artifacts[name] = override
else:
self._all_artifacts[name] = artifact
module._descriptor["artifacts"] = list(module_artifacts.values())
# collect package repositories
for repo in module.packages.repositories:
name = repo.name
if name not in self._package_repositories:
self._package_repositories[name] = repo
# collect package manager
if not self.packages.manager and module.packages.manager:
logger.debug(
f"Applying module package manager of {module.packages.manager} to image"
)
self.packages._descriptor["manager"] = module.packages.manager
# incorporate run specification contributed by module
if module.run:
# we're looping in order of install, so we want the current module to override whatever we have
self._module_run = module.run.merge(self._module_run)
# prevent circular dependencies. we'll move it to the end after processing
install_list[to_install.name] = to_install
# process this modules dependencies
self.process_install_list(
module, module.modules.install, install_list, module_registry
)
# move this module to the end of the list.
install_list.pop(to_install.name)
install_list[to_install.name] = to_install
# helper to simplify merging lists of objects
@classmethod
def _to_dict(cls, named_items: Iterable[_T], key="name") -> Dict[str, _T]:
# TODO: `key` argument is never used?
# TODO: This assumes that `name` is always a string, but in fact it isn't for Port
dictionary = OrderedDict()
for item in named_items:
dictionary[item[key]] = item
return dictionary
| cekit/cekit | cekit/descriptor/image.py | image.py | py | 19,524 | python | en | code | 70 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cekit.config.Config",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "yaml.safe_... |
73266752189 | import cv2
import numpy as np
from scipy import signal
import math
import matplotlib.pyplot as plt
if __name__ == "__main__":
gauss_blur_filter = [[0 for x in range(3)] for y in range(3)]
gauss_blur_filter[0][0] = 1/16
gauss_blur_filter[0][1] = 1/8
gauss_blur_filter[0][2] = 1/16
gauss_blur_filter[1][0] = 1/8
gauss_blur_filter[1][1] = 1/4
gauss_blur_filter[1][2] = 1/8
gauss_blur_filter[2][0] = 1/16
gauss_blur_filter[2][1] = 1/8
gauss_blur_filter[2][2] = 1/16
image = cv2.imread('point.jpg',0)
kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype = np.float)
replicate = cv2.copyMakeBorder(image,20,20,20,20,cv2.BORDER_REPLICATE)
resultant_image = cv2.blur(replicate,(5,5))
cv2.imwrite('gauss-blue.jpg',resultant_image)
resultant_image_1 = signal.convolve2d(image,kernel,'same')
rows,columns = resultant_image_1.shape
for i in range(rows):
for j in range(columns):
resultant_image_1[i][j] = abs(resultant_image_1[i][j])
cv2.imwrite('mask-application.jpg',resultant_image_1)
print(resultant_image_1.max())
for i in range(rows):
for j in range(columns):
if resultant_image_1[i][j] >= 2024:
print(i,j)
else:
resultant_image_1[i][j] = 0
cv2.imwrite('point-detection.jpg',resultant_image_1)
image_segment = cv2.imread('segment.jpg',0)
rows,columns = image_segment.shape
'''x = np.zeros(255)
y = np.arange(0,255,1)
for i in range(rows):
for j in range(columns):
if image_segment[i][j] != 0:
x[image_segment[i][j]] += 1
hist, bins = np.histogram(x, bins=y)
width = 1.0 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
#plt.plot(y,x)
#plt.bar(np.arange(len(y)),y)
plt.show()'''
for i in range(rows):
for j in range(columns):
if image_segment[i][j] > 208 or image_segment[i][j] < 200 :
image_segment[i][j] = 0
cv2.imwrite('segemented.jpg',image_segment)
| Srivenkat1995/Image-Segmentation-and-Point-Detection | task2.py | task2.py | py | 2,305 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "cv2.copyMakeBorder",
"lin... |
70078277628 | import numpy as np
import torch
import os
import yaml
import tqdm
from addict import Dict
from collections import defaultdict
from matplotlib import pyplot as plt
import matplotlib.patches as patches
import pickle
import random
import pytorch_ssim
from skimage.measure import compare_ssim as ssim
from train import build_loaders
from scene_generation.data import imagenet_deprocess_batch
from scene_generation.metrics import jaccard
from scene_generation.model import Model
from scene_generation.args import get_args
#perceptual error
from PerceptualSimilarity import models
#from PerceptualSimilarity.util import util
GPU = 0
PRECOMPUTED = False # use precomputed samples (saved in the checkpoint) for evaluation
EVAL_ALL = True # evaluate on all bounding boxes (batch size=1)
USE_GT = True # use ground truth bounding boxes for evaluation
USE_FEATS = False #True
IGNORE_SMALL = False
SPLIT = '../sg2im/datasets/vg/test.h5'
BATCH_SIZE = 1
PRINT_EVERY = 50
SAVE_EVERY = 500
#EXPERIMENT = 'jitter_L_0.05_FixBoxes'
#EXPERIMENT = 'clean_infeats_64'
EXPERIMENT = "aGCN_SPADE"
ckpt = "checkpoint"
#EXPERIMENT = 'baseline_64_noise_250k'
CHECKPOINT = './output/Nov12_14-43-14_atnavab21/{}_with_model.pt'.format(ckpt)
#config_file = 'experiments/default/logs/{}/args.yaml'.format(EXPERIMENT)
results_file = 'test_results_{}.pickle'
def main():
if not os.path.isfile(CHECKPOINT):
print('ERROR: Checkpoint file "%s" not found' % CHECKPOINT)
return
# Read config file of the model
args = get_args()
print(args)
torch.manual_seed(1)
random.seed(1)
np.random.seed(1)
# reset some arguments
args.add_jitter_bbox = None
args.add_jitter_layout = None
args.add_jitter_feats = None
args.batch_size = BATCH_SIZE
args.test_h5 = SPLIT
device = torch.device("cuda:0") #torch.cuda.set_device(GPU)
# Load the model, with a bit of care in case there are no GPUs
map_location = 'cpu' if device == torch.device('cpu') else None
checkpoint = torch.load(CHECKPOINT, map_location=map_location)
if not PRECOMPUTED:
# initialize model and load checkpoint
kwargs = checkpoint['model_kwargs']
model = Model(**kwargs)
model.load_state_dict(checkpoint['model_state'])
model.eval()
model.to(device)
# create data loaders
_, train_loader, val_loader, test_loader = build_loaders(args, evaluating=True)
# testing model
print('Batch size: ', BATCH_SIZE)
print('Evaluating on {} set'.format(SPLIT))
eval_model(args, model, test_loader, device, use_gt=USE_GT, use_feats=USE_FEATS, filter_box=IGNORE_SMALL)
# losses, samples, avg_iou = results
else:
# sample images and scores already computed while training (only one batch)
samples = checkpoint['val_samples'][-1] # get last iteration
original_img = samples['gt_img'].cpu().numpy()
predicted_img = samples['gt_box_pred_mask'].cpu().numpy()
return
def eval_model(args, model, loader, device, use_gt=False, use_feats=False, filter_box=False):
all_losses = defaultdict(list)
all_boxes = defaultdict(list)
total_iou = []
total_boxes = 0
num_batches = 0
num_samples = 0
mae_per_image = []
mae_roi_per_image = []
roi_only_iou = []
ssim_per_image = []
ssim_rois = []
rois = 0
margin = 2
## Initializing the perceptual loss model
lpips_model = models.PerceptualLoss(model='net-lin',net='alex',use_gpu=True)
perceptual_error_image = []
perceptual_error_roi = []
# ---------------------------------------
with torch.no_grad():
for batch in tqdm.tqdm(loader):
num_batches += 1
# if num_batches > 10:
# break
batch = [tensor.to(device) for tensor in batch]
masks = None
if len(batch) == 6:
imgs, objs, boxes, triples, obj_to_img, triple_to_img = batch
elif len(batch) == 7:
imgs, objs, boxes, masks, triples, obj_to_img, triple_to_img = batch
elif len(batch) == 12:
imgs, objs, boxes, triples, obj_to_img, triple_to_img, \
objs_r, boxes_r, triples_r, obj_to_img_r, triple_to_img_r, imgs_in = batch
elif len(batch) == 13:
imgs, objs, boxes, triples, obj_to_img, triple_to_img, attributes, \
objs_r, boxes_r, triples_r, obj_to_img_r, triple_to_img_r, imgs_in = batch
else:
assert False
predicates = triples[:, 1]
# #EVAL_ALL = True
if EVAL_ALL:
imgs, imgs_in, objs, boxes, triples, obj_to_img, \
dropbox_indices, dropfeats_indices = process_batch(
imgs, imgs_in, objs, boxes, triples, obj_to_img, triple_to_img, device,
use_feats=use_feats, filter_box=filter_box)
else:
dropbox_indices = None
dropfeats_indices = None
#
# if use_gt: # gt boxes
# model_out = model(objs, triples, obj_to_img, boxes_gt=boxes, masks_gt=masks, src_image=imgs_in,
# drop_box_idx=None, drop_feat_idx=dropfeats_indices, mode='eval')
# else:
# model_out = model(objs, triples, obj_to_img, boxes_gt=boxes, src_image=imgs_in,
# drop_box_idx=dropbox_indices, drop_feats_idx=dropfeats_indices, mode='eval')
masks_gt = None
gt_train = False
attributes = torch.zeros_like(attributes)
all_features = None
# Run the model with predicted masks
model_out = model(imgs, objs, triples, obj_to_img, boxes_gt=boxes, masks_gt=masks_gt, attributes=attributes,
gt_train=gt_train, test_mode=False, use_gt_box=True, features=all_features
, drop_box_idx=dropbox_indices, drop_feat_idx= dropfeats_indices, src_image= imgs_in)
#imgs_pred, boxes_pred, masks_pred, _, layout, _ = model_out
# OUTPUT
imgs_pred, boxes_pred, masks_pred, predicate_scores, layout, _ = model_out
# --------------------------------------------------------------------------------------------------------------
#imgs_pred *= 3
#print(imgs_pred.min(), imgs_pred.max())
# Save all box predictions
all_boxes['boxes_gt'].append(boxes)
all_boxes['objs'].append(objs)
all_boxes['boxes_pred'].append(boxes_pred)
all_boxes['drop_targets'].append(dropbox_indices)
# IoU over all
total_iou.append(jaccard(boxes_pred, boxes).cpu().numpy()) #.detach()
total_boxes += boxes_pred.size(0)
# IoU over targets only
pred_dropbox = boxes_pred[dropbox_indices.squeeze() == 0, :]
gt_dropbox = boxes[dropbox_indices.squeeze() == 0, :]
roi_only_iou.append(jaccard(pred_dropbox, gt_dropbox).detach().cpu().numpy())
rois += pred_dropbox.size(0)
# assert(pred_dropbox.size(0) == imgs.size(0))
num_samples += imgs.shape[0]
imgs = imagenet_deprocess_batch(imgs).float()
imgs_pred = imagenet_deprocess_batch(imgs_pred).float()
# Uncomment to plot images (for debugging purposes)
#visualize_imgs_boxes(imgs, imgs_pred, boxes, boxes)
# MAE per image
mae_per_image.append(torch.mean(
torch.abs(imgs - imgs_pred).view(imgs.shape[0], -1), 1).cpu().numpy())
for s in range(imgs.shape[0]):
# get coordinates of target
left, right, top, bottom = bbox_coordinates_with_margin(boxes[s, :], margin, imgs)
# calculate errors only in RoI one by one - good, i wanted to check this too since the errors were suspicious pheww
mae_roi_per_image.append(torch.mean(
torch.abs(imgs[s, :, top:bottom, left:right] - imgs_pred[s, :, top:bottom, left:right])).cpu().item())
ssim_per_image.append(
pytorch_ssim.ssim(imgs[s:s+1, :, :, :] / 255.0,
imgs_pred[s:s+1, :, :, :] / 255.0, window_size=3).cpu().item())
ssim_rois.append(
pytorch_ssim.ssim(imgs[s:s+1, :, top:bottom, left:right] / 255.0,
imgs_pred[s:s+1, :, top:bottom, left:right] / 255.0, window_size=3).cpu().item())
imgs_pred_norm = imgs_pred[s:s+1, :, :, :] / 127.5 - 1 # = util.im2tensor(imgs_pred[s:s+1, :, :, :].detach().cpu().numpy())
imgs_gt_norm = imgs[s:s+1, :, :, :] / 127.5 - 1 # util.im2tensor(imgs[s:s+1, :, :, :].detach().cpu().numpy())
#perceptual_error_roi.append(lpips_model.forward(imgs_pred_norm[:,:, top:bottom, left:right],
# imgs_gt_norm[:,:, top:bottom, left:right]))
#print(imgs_pred_norm.shape)
perceptual_error_image.append(
lpips_model.forward(imgs_pred_norm, imgs_gt_norm).detach().cpu().numpy())
if num_batches % PRINT_EVERY == 0:
calculate_scores(mae_per_image, mae_roi_per_image, total_iou, roi_only_iou, ssim_per_image, ssim_rois,
perceptual_error_image, perceptual_error_roi)
if num_batches % SAVE_EVERY == 0:
save_results(mae_per_image, mae_roi_per_image, total_iou, roi_only_iou, ssim_per_image, ssim_rois,
perceptual_error_image, perceptual_error_roi, all_boxes, num_batches)
# mean_losses = {k: np.mean(v) for k, v in all_losses.items()}
save_results(mae_per_image, mae_roi_per_image, total_iou, roi_only_iou, ssim_per_image, ssim_rois,
perceptual_error_image, perceptual_error_roi, all_boxes, 'final')
# masks_to_store = masks
# if masks_to_store is not None:
# masks_to_store = masks_to_store.data.cpu().clone()
# masks_pred_to_store = masks_pred
# if masks_pred_to_store is not None:
# masks_pred_to_store = masks_pred_to_store.data.cpu().clone()
# batch_data = {
# 'objs': objs.detach().cpu().clone(),
# 'boxes_gt': boxes.detach().cpu().clone(),
# 'masks_gt': masks_to_store,
# 'triples': triples.detach().cpu().clone(),
# 'obj_to_img': obj_to_img.detach().cpu().clone(),
# 'triple_to_img': triple_to_img.detach().cpu().clone(),
# 'boxes_pred': boxes_pred.detach().cpu().clone(),
# 'masks_pred': masks_pred_to_store
# }
# out = [mean_losses, samples, batch_data, avg_iou]
# out = [mean_losses, mean_L1, avg_iou]
return # mae_per_image, mae_roi_per_image, total_iou, roi_only_iou
def calculate_scores(mae_per_image, mae_roi_per_image, total_iou, roi_only_iou, ssim_per_image, ssim_rois,
perceptual_image, perceptual_roi):
mae_all = np.mean(np.hstack(mae_per_image), dtype=np.float64)
mae_std = np.std(np.hstack(mae_per_image), dtype=np.float64)
mae_roi = np.mean(mae_roi_per_image, dtype=np.float64)
mae_roi_std = np.std(mae_roi_per_image, dtype=np.float64)
iou_all = np.mean(np.hstack(total_iou), dtype=np.float64)
iou_std = np.std(np.hstack(total_iou), dtype=np.float64)
iou_roi = np.mean(np.hstack(roi_only_iou), dtype=np.float64)
iou_roi_std = np.std(np.hstack(roi_only_iou), dtype=np.float64)
ssim_all = np.mean(ssim_per_image, dtype=np.float64)
ssim_std = np.std(ssim_per_image, dtype=np.float64)
ssim_roi = np.mean(ssim_rois, dtype=np.float64)
ssim_roi_std = np.std(ssim_rois, dtype=np.float64)
# percept error -----------
percept_all = np.mean(perceptual_image, dtype=np.float64)
#print(perceptual_image, percept_all)
percept_all_std = np.std(perceptual_image, dtype=np.float64)
percept_roi = np.mean(perceptual_roi, dtype=np.float64)
percept_roi_std = np.std(perceptual_roi, dtype=np.float64)
# ------------------------
print()
print('MAE: Mean {:.6f}, Std {:.6f}'.format(mae_all, mae_std))
print('MAE-RoI: Mean {:.6f}, Std {:.6f}: '.format(mae_roi, mae_roi_std))
print('IoU: Mean {:.6f}, Std {:.6f}'.format(iou_all, iou_std))
print('IoU-RoI: Mean {:.6f}, Std {:.6f}'.format(iou_roi, iou_roi_std))
print('SSIM: Mean {:.6f}, Std {:.6f}'.format(ssim_all, ssim_std))
print('SSIM-RoI: Mean {:.6f}, Std {:.6f}'.format(ssim_roi, ssim_roi_std))
print('LPIPS: Mean {:.6f}, Std {:.6f}'.format(percept_all, percept_all_std))
print('LPIPS-RoI: Mean {:.6f}, Std {:.6f}'.format(percept_roi, percept_roi_std))
return
def save_results(mae_per_image, mae_roi_per_image, total_iou, roi_only_iou, ssim_per_image, ssim_rois,
perceptual_per_image, perceptual_rois, all_boxes, iter):
results = dict()
results['mae_per_image'] = mae_per_image
results['mae_rois'] = mae_roi_per_image
results['iou_per_image'] = total_iou
results['iou_rois'] = roi_only_iou
results['ssim_per_image'] = ssim_per_image
results['ssim_rois'] = ssim_rois
results['perceptual_per_image'] = perceptual_per_image
results['perceptual_rois'] = perceptual_rois
results['data'] = all_boxes
with open(results_file.format(iter), 'wb') as p:
pickle.dump(results, p)
def process_batch(imgs, imgs_in, objs, boxes, triples, obj_to_img, triples_to_img, device,
use_feats=True, filter_box=False):
num_imgs = imgs.shape[0]
imgs_stack = []
imgs_in_stack = []
boxes_stack = []
objs_stack = []
triples_stack = []
obj_to_img_new = []
candidates_stack = []
previous_idx = 0
for i in range(num_imgs):
start_idx_for_img = (obj_to_img == i).nonzero()[0]
last_idx_for_img = (obj_to_img == i).nonzero()[-1]
boxes_i = boxes[start_idx_for_img: last_idx_for_img + 1, :] # this includes the 'image' box!
objs_i = objs[start_idx_for_img: last_idx_for_img + 1]
start_idx_for_img = (triples_to_img == i).nonzero()[0]
last_idx_for_img = (triples_to_img == i).nonzero()[-1]
triples_i = triples[start_idx_for_img:last_idx_for_img + 1]
num_boxes = boxes_i.shape[0] # number of boxes in current image minus the 'image' box
if filter_box:
min_dim = 0.05 # about 3 pixels
keep = [b for b in range(boxes_i.shape[0] - 1) if
boxes_i[b, 2] - boxes_i[b, 0] > min_dim and boxes_i[b, 3] - boxes_i[b, 1] > min_dim]
print('Ignoring {} out of {} boxes'.format(boxes_i.shape[0] - len(keep), boxes_i.shape[0]))
times_to_rep = len(keep)
img_indices = torch.LongTensor(keep)
else:
times_to_rep = num_boxes - 1
img_indices = torch.arange(0, times_to_rep)
# boxes that will be dropped for each sample (always shift the index by one to get the next box)
drop_indices = img_indices * (num_boxes + 1)
# replicate things for current image
imgs_stack.append(imgs[i, :, :, :].repeat(times_to_rep, 1, 1, 1))
imgs_in_stack.append(imgs_in[i, :, :, :].repeat(times_to_rep, 1, 1, 1))
objs_stack.append(objs_i.repeat(times_to_rep)) # replicate object ids #boxes times
boxes_stack.append(boxes_i.repeat(times_to_rep, 1)) # replicate boxes #boxes times
obj_to_img_new.append(img_indices.unsqueeze(1).repeat(1, num_boxes).view(-1) + previous_idx)
previous_idx = obj_to_img_new[-1].max() + 1
triplet_offsets = (num_boxes * img_indices.unsqueeze(1).repeat(1, triples_i.size(0)).view(-1)).cuda()
triples_i = triples_i.repeat(times_to_rep, 1)
triples_i[:, 0] = triples_i[:, 0] + triplet_offsets # offset for replicated subjects
triples_i[:, 2] = triples_i[:, 2] + triplet_offsets # offset for replicated objects
triples_stack.append(triples_i)
# create index to drop for each sample
candidates = torch.ones(boxes_stack[-1].shape[0], device=device)
candidates[drop_indices] = 0 # set to zero the boxes that should be dropped
candidates_stack.append(candidates)
imgs = torch.cat(imgs_stack)
imgs_in = torch.cat(imgs_in_stack)
boxes = torch.cat(boxes_stack)
objs = torch.cat(objs_stack)
triples = torch.cat(triples_stack)
obj_to_img_new = torch.cat(obj_to_img_new)
candidates = torch.cat(candidates_stack).unsqueeze(1)
if use_feats:
feature_candidates = torch.ones((candidates.shape[0], 1), device=device)
else:
feature_candidates = candidates
return imgs, imgs_in, objs, boxes, triples, obj_to_img_new, candidates, feature_candidates
def bbox_coordinates_with_margin(bbox, margin, img):
# extract bounding box with a margin
left = max(0, bbox[0] * img.shape[3] - margin)
top = max(0, bbox[1] * img.shape[2] - margin)
right = min(img.shape[3], bbox[2] * img.shape[3] + margin)
bottom = min(img.shape[2], bbox[3] * img.shape[2] + margin)
return int(left), int(right), int(top), int(bottom)
def visualize_imgs_boxes(imgs, imgs_pred, boxes, boxes_pred):
nrows = imgs.size(0)
imgs = imgs.detach().cpu().numpy()
imgs_pred = imgs_pred.detach().cpu().numpy()
boxes = boxes.detach().cpu().numpy()
boxes_pred = boxes_pred.detach().cpu().numpy()
plt.figure()
for i in range(0, nrows):
# i = j//2
ax1 = plt.subplot(2, nrows, i+1)
img = np.transpose(imgs[i, :, :, :], (1, 2, 0)) / 255.
plt.imshow(img)
left, right, top, bottom = bbox_coordinates_with_margin(boxes[i, :], 0, imgs[i:i+1, :, :, :])
bbox_gt = patches.Rectangle((left, top),
width=right-left,
height=bottom-top,
linewidth=1, edgecolor='r', facecolor='none')
# Add the patch to the Axes
ax1.add_patch(bbox_gt)
plt.axis('off')
ax2 = plt.subplot(2, nrows, i+nrows+1)
pred = np.transpose(imgs_pred[i, :, :, :], (1, 2, 0)) / 255.
plt.imshow(pred)
left, right, top, bottom = bbox_coordinates_with_margin(boxes_pred[i, :], 0, imgs[i:i+1, :, :, :])
bbox_pr = patches.Rectangle((left, top),
width=right-left,
height=bottom-top,
linewidth=1, edgecolor='r', facecolor='none')
# ax2.add_patch(bbox_gt)
ax2.add_patch(bbox_pr)
plt.axis('off')
plt.show()
return
if __name__ == '__main__':
main()
| azadef/interactive_scene_generation | evaluate_vg.py | evaluate_vg.py | py | 18,847 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.isfile",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "scene_generation.args.get_args",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.manu... |
37681820103 | import pygame
from configs import ColorConfig
class Button(object):
def __init__(self, x_coordinate: int, y_coordinate: int, button_width: int,
button_height: int, text_font: str, text_size: str,
button_name: str, onclick_function=None):
self.x = x_coordinate
self.y = y_coordinate
self.width = button_width
self.height = button_height
self.function_by_click = onclick_function
self.buttonSurface = pygame.Surface((self.width, self.height))
self.buttonRect = pygame.Rect(self.x, self.y, self.width, self.height)
self.buttonSurf = pygame.font.SysFont(
text_font, int(text_size)).render(button_name, True, (20, 20, 20))
def process(self, game_window: pygame.display, parameter: str):
self.buttonSurface.fill(ColorConfig.WHITE)
if self.buttonRect.collidepoint(pygame.mouse.get_pos()):
self.buttonSurface.fill(ColorConfig.GREY)
if pygame.mouse.get_pressed(num_buttons=3)[0]:
self.buttonSurface.fill(ColorConfig.GREEN)
self.function_by_click(parameter)
return True
self.buttonSurface.blit(self.buttonSurf, [
self.buttonRect.width / 2 - self.buttonSurf.get_rect().width / 2,
self.buttonRect.height / 2 - self.buttonSurf.get_rect().height / 2
])
game_window.blit(self.buttonSurface, self.buttonRect)
| pavst23/project_game | elements/button.py | button.py | py | 1,452 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.Surface",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"lin... |
40197352017 | from django.urls import path
from . import views
app_name = "Employees"
urlpatterns = [
path('profile', views.profile, name="profile"),
path('edit_profile', views.editprofile, name="edit_profile"),
path('check_employee', views.checkemployee, name="check_employee"),
path('employee_position', views.employeeposition, name="employee_position"),
path('modify_permissions', views.modifypermissions, name="modify_permissions"),
path('access_requests', views.access_request, name="access_request"),
path('access_requests_list', views.access_request_list, name="access_request_list"),
]
| jakubbm/employees-management | Employees/urls.py | urls.py | py | 615 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
42269789956 | from .models import *
from .forms import *
from app import filtersets
import cx_Oracle
from django.http.response import Http404
from django.shortcuts import render, redirect
from django.contrib.auth import login
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.paginator import Paginator
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.db import connection
from functools import wraps
from django.contrib.admin.views import decorators
def staff_member_required(view_func):
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
else:
return HttpResponseRedirect('/acceso-denegado')
return wraps(view_func)(_checklogin)
decorators.staff_member_required = staff_member_required
def accesodenegado(request):
return render(request,'acceso-denegado.html')
def groups_only(*groups):
def inner(view_func):
@wraps(view_func)
def wrapper_func(request, *args, **kwargs):
if request.user.groups.filter(name__in=groups).exists():
return view_func(request, *args, **kwargs)
else:
return redirect(to='acceso-denegado')
return wrapper_func
return inner
@staff_member_required
def crear_grupo(request):
data = {
'form': GrupoForm
}
if request.method == 'POST':
formulario = GrupoForm(data=request.POST)
if formulario.is_valid():
gru = formulario.save()
messages.success(request, "Grupo "+gru.name+" creado correctamente!")
return redirect (to='mantenedor-usr')
else:
data["form"] = formulario
return render(request, 'registration/group.html',data )
@staff_member_required
def user_filter(request):
# https://www.youtube.com/watch?v=dkJ3uqkdCcY
#https://django-filter.readthedocs.io/en/stable/index.html
"""
filtro = filtersets.UsertFilter(
request.GET,
queryset= User.objects.all()
)
PerfilF = filtersets.PerfilFilter(
request.GET,
queryset= Perfil.objects.all()
)
page = request.GET.get('page', 1)
try:
paginator = Paginator(PerfilF, 5)
PerfilF = paginator.page(page)
except:
raise Http404
context = {
'filtro': filtro,
'entity':PerfilF,
'paginator': paginator
}
"""
filtro = filtersets.UsertFilter(
request.GET,
queryset= User.objects.all()
)
PerfilF = filtersets.PerfilFilter(
request.GET,
queryset= Perfil.objects.all()
)
context = {
'filtro': filtro,
'PerfilF':PerfilF,
}
return render(request, 'pruebas/ekisde.html', context)
@staff_member_required
def signup_view(request):
context = {'form': CustomUserCreationForm(),
'form_p':PerfilForm(),
'adminform':AdminForm(),
'proform': ProfesionalForm(),
'cliform': ClienteForm(),
}
if request.method == 'POST':
formulario = CustomUserCreationForm(data=request.POST)
formPerfil = PerfilForm(data=request.POST)
formAdm = AdminForm(data=request.POST)
formProf = ProfesionalForm(data=request.POST)
formCli = ClienteForm(data=request.POST)
if formulario.is_valid() and formPerfil.is_valid():
usuario = formulario.save()
group = request.POST.get('groups')
usuario.groups.add(group)
perfil = formPerfil.save(commit=False)
perfil.id_auth_user = usuario
perfil.save()
if perfil.tipo_perf=='1':
admin = formAdm.save(commit=False)
admin.id_perfil = perfil
admin.save()
elif perfil.tipo_perf=='2':
prof = formProf.save(commit=False)
prof.id_perfil = perfil
prof.save()
elif perfil.tipo_perf=='3':
cli = formCli.save(commit=False)
cli.id_perfil =perfil
cli.save()
messages.success(request, 'Usuario '+usuario.username+' creado correctamente')
return redirect(to="mantenedor-usr")
context = {'form': CustomUserCreationForm(),
'form_p':PerfilForm(),
'adminform':AdminForm(),
'proform':ProfesionalForm(),
'cliform': ClienteForm(),
}
return render(request, 'registration/signup.html', context)
def home(request):
return render(request, 'home.html')
@staff_member_required
def home_admin(request):
usuario = User.objects.all().order_by('id')
context = {'usuario': usuario }
return render(request,'administrador/home-adm.html', context)
@staff_member_required
def maintainer(request):
return render(request, 'administrador/mantenedor.html')
@staff_member_required
def maintainer_user(request):
return render(request, 'administrador/mantenedor-usuario.html')
@staff_member_required
def maintainer_plan(request):
return render(request, 'administrador/mantenedor-plan.html')
@staff_member_required
def maintainer_service(request):
return render(request, 'administrador/mantenedor-servicio.html')
def login_view(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
return redirect(to='home')
else:
form = AuthenticationForm()
return render(request,'registration/login.html',{'form':form})
def login_filter(request):
if request.user.groups.filter(name="Administrador") or request.user.is_staff:
return redirect(to='home-adm')
elif request.user.groups.filter(name="Profesional"):
return redirect(to='home-prof')
else:
return redirect(to='home-cliente')
@staff_member_required
#mantenedor
def UserLista(request):
usuario = User.objects.all().order_by('id')
page = request.GET.get('page', 1)
try:
paginator = Paginator(usuario, 5)
usuario = paginator.page(page)
except:
raise Http404
context = {'entity': usuario,
'paginator': paginator}
return render(request, 'administrador/lista.html', context)
@staff_member_required
def UserEdit(request,id):
usuario = User.objects.get(id=id)
if request.method == 'GET':
form = UserUpdateForm(instance=usuario)
else:
form = UserUpdateForm(request.POST, instance=usuario)
if form.is_valid():
form.save()
messages.success(request, "Usuario "+usuario.username+" modificado correctamente")
return redirect(to="listar")
context = {
'form':form,
}
return render(request,'administrador/editar.html', context)
@staff_member_required
def UserDelete(request,id):
usuario = User.objects.get(id=id)
usuario.is_active = 0
if request.method == 'POST':
form = UserActive(instance=usuario)
else:
form = UserActive(request.POST, instance=usuario)
if form.is_valid():
form.save()
messages.success(request, "Usuario desactivado correctamente")
return redirect(to="listar")
@staff_member_required
def UserActivate(request,id):
usuario = User.objects.get(id=id)
if request.method == 'POST':
form = UserActive(instance=usuario)
else:
form = UserActive(request.POST, instance=usuario)
if form.is_valid():
user = form.save()
user.is_active = True
user.save()
messages.success(request, "Usuario activado correctamente")
return redirect(to="listar")
@staff_member_required
##PLAN
def PlanCreate(request):
data = {
'form': PlanForm
}
if request.method == 'POST':
formulario = PlanForm(data=request.POST)
if formulario.is_valid():
formulario.save()
messages.success(request, "Plan creado correctamente!")
return redirect (to='mantenedor')
else:
data["form"] = formulario
return render(request, 'administrador/planes/agregar-plan.html', data)
@staff_member_required
def plan_lista(request):
plan = Plan.objects.all().order_by('id_plan')
page = request.GET.get('page', 1)
try:
paginator = Paginator(plan, 5)
plan = paginator.page(page)
except:
raise Http404
context = {'entity': plan,
'paginator': paginator}
return render(request, 'administrador/planes/lista-plan.html', context)
@staff_member_required
def PlanEdit(request,id_plan):
plan = Plan.objects.get(id_plan=id_plan)
if request.method == 'GET':
form = PlanUpdateForm(instance=plan)
else:
form = PlanUpdateForm(request.POST, instance=plan)
if form.is_valid():
form.save()
messages.success(request, "Plan modificado correctamente")
return redirect(to='lista-plan')
return render(request,'administrador/planes/editar-plan.html',{'form':form})
@staff_member_required
def PlanDelete(request,id):
plan = Plan.objects.get(id_plan=id)
plan.estado = 0
if request.method == 'POST':
form = PlanActive(instance=plan)
else:
form = PlanActive(request.POST, instance=plan)
if form.is_valid():
form.save()
messages.success(request, "Plan desactivado correctamente")
return redirect(to="lista-plan")
@staff_member_required
def PlanActivate(request,id):
plan = Plan.objects.get(id_plan=id)
if request.method == 'POST':
form = PlanActive(instance=plan)
else:
form = PlanActive(request.POST, instance=plan)
if form.is_valid():
plan = form.save()
plan.estado = 1
plan.save()
messages.success(request, "Plan activado correctamente")
return redirect(to="lista-plan")
##SERVICIOS
@staff_member_required
def ServicioCreate(request):
data = {
'form': ServicioForm
}
if request.method == 'POST':
formulario = ServicioForm(data=request.POST)
if formulario.is_valid():
formulario.save()
messages.success(request, "Servicio creado correctamente!")
return redirect (to='mantenedor')
else:
data["form"] = formulario
return render(request, 'administrador/servicios/agregar-servicio.html', data)
@staff_member_required
def Servicio_lista(request):
servicio = Servicio.objects.all().order_by('id_servicio')
page = request.GET.get('page', 1)
try:
paginator = Paginator(servicio, 5)
servicio = paginator.page(page)
except:
raise Http404
context = {'entity': servicio,
'paginator': paginator}
return render(request, 'administrador/servicios/lista-servicio.html', context)
@staff_member_required
def ServicioEdit(request,id_servicio):
servicio = Servicio.objects.get(id_servicio=id_servicio)
if request.method == 'GET':
form = ServicioUpdateForm(instance=servicio)
else:
form = ServicioUpdateForm(request.POST, instance=servicio)
if form.is_valid():
form.save()
messages.success(request, "Servicio modificado correctamente")
return redirect(to='lista-servicios')
return render(request,'administrador/servicios/editar-servicio.html',{'form':form})
@staff_member_required
def ServicioDelete(request,id):
serv = Servicio.objects.get(id_servicio=id)
serv.estado = 0
if request.method == 'POST':
form = ServicioActive(instance=serv)
else:
form = ServicioActive(request.POST, instance=serv)
if form.is_valid():
form.save()
messages.success(request, "Servicio desactivado correctamente")
return redirect(to="lista-servicios")
@staff_member_required
def ServicioActivate(request,id):
serv = Servicio.objects.get(id_servicio=id)
if request.method == 'POST':
form = ServicioActive(instance=serv)
else:
form = ServicioActive(request.POST, instance=serv)
if form.is_valid():
serv = form.save()
serv.estado = 1
serv.save()
messages.success(request, "Servicio activado correctamente")
return redirect(to="lista-servicios")
#informacion de clientes ClienteForm
def cliente_datos():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_datos_cliente",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
@staff_member_required
def infoCliente(request):
cliente = cliente_datos()
page = request.GET.get('page', 1)
try:
paginator = Paginator(cliente, 5)
cliente = paginator.page(page)
except:
raise Http404
context = {'entity': cliente,
'paginator': paginator,
}
return render(request, 'administrador/info_cliente/info-cliente.html',context)
#informacion de clientes ProfesionalForm
@staff_member_required
def infoProfesional(request):
pro = Profesional.objects.all().order_by('id_prof')
page = request.GET.get('page', 1)
try:
paginator = Paginator(pro, 5)
pro = paginator.page(page)
except:
raise Http404
context = {'entity': pro,
'paginator': paginator}
return render(request, 'administrador/info_profesional/info-profesional.html',context)
#informacion de perfiles
@staff_member_required
def infoPerfil(request):
PerfilF = filtersets.PerfilFilter(request.GET,queryset= Perfil.objects.all())
page = request.GET.get('page', 1)
try:
paginator = Paginator(PerfilF, 5)
PerfilF = paginator.page(page)
except:
"""raise Http404"""
context = {
'entity':PerfilF,
'paginator': paginator
}
return render(request, 'administrador/info_perfil/info-perfil.html', context)
@staff_member_required
def modificar_perfil(request,id_perfil):
perfil = Perfil.objects.get(id_perfil=id_perfil)
if request.method == 'GET':
form = PerfilModificar(instance=perfil)
else:
form = PerfilModificar(request.POST, instance=perfil)
if form.is_valid():
form.save()
messages.success(request,"Perfil de "+perfil.id_auth_user.first_name+" "+perfil.id_auth_user.last_name+" modificado correctamente!")
else:
messages.error(request, "Ha ingresado un rut ya registrado, no se han guardado cambios.")
return redirect(to='infoPerfil')
context = {
'form':form
}
return render(request, 'administrador/info_perfil/modificar-perfil.html',context)
"""
Utilizando procedures
"""
def lista_actividades():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_actividades",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def lista_capacitacion():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_capacitacion",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def lista_asesoria():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_asesoria",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def lista_visita():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_visita",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def lista_cliente():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_cliente",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def lista_profesional():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_profesional",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def guardar_actividad(nombre,descripcion,tipo_act,fec_estimada,estado,id_cli,id_prof):
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
salida = cursor.var(cx_Oracle.NUMBER)
cursor.callproc('sp_agregar_actividad_corta',[nombre,descripcion,tipo_act,fec_estimada,estado,id_cli,id_prof, salida])
return salida.getvalue()
# actividades
@staff_member_required
def actividades(request):
actividad = lista_actividades()
page = request.GET.get('page', 1)
try:
paginator = Paginator(actividad, 5)
actividad = paginator.page(page)
except:
raise Http404
context = {'entity': actividad,
'paginator': paginator,
}
return render(request, 'administrador/actividades/actividades_lista.html',context)
@staff_member_required
def crear_actividad(request):
capacitacion = lista_capacitacion()
asesoria = lista_asesoria()
visita = lista_visita()
cliente = Cliente.objects.all()
profesional = Profesional.objects.all()
data = {
'capacitacion':capacitacion,
'asesoria':asesoria,
'visita':visita,
'cliente':cliente,
'profesional':profesional
}
if request.method == 'POST':
nombre = request.POST.get('nombre')
descripcion =request.POST.get('descripcion')
tipo_act = request.POST.get('tipo_act')
fec_estimada = request.POST.get('fec_estimada')
estado = request.POST.get('estado')
id_cli = request.POST.get('id_cli')
id_prof = request.POST.get('id_prof')
salida = guardar_actividad(nombre,descripcion,tipo_act,fec_estimada,estado,id_cli,id_prof)
if salida == 1:
data['mensaje'] = 'Agregado Correctamente'
return redirect(to='actividades')
else:
data['mensaje'] = 'No se a podido guardar'
return render(request, 'administrador/actividades/crear.html',data)
@staff_member_required
def actualizar_actividad(request,id_actividad):
act = Actividad.objects.get(id_actividad=id_actividad)
if request.method == 'GET':
form = ActualizarActividad(instance=act)
else:
form = ActualizarActividad(request.POST, instance=act)
if form.is_valid():
form.save()
messages.success(request, "Actividad modificada correctamente")
return redirect(to='actividades')
context = {'form':form}
return render(request, 'administrador/actividades/actualizar.html',context)
@staff_member_required
def checklist(request):
data = {
'form': listaForm
}
if request.method == 'POST':
formulario = listaForm(data=request.POST)
if formulario.is_valid():
formulario.save()
messages.success(request, "Creado correctamente!")
return redirect(to='listaCheck')
else:
data["form"] = formulario
return render(request, 'administrador/checklist/checklist.html', data)
@staff_member_required
def listaCheck(request):
lista = CliCheckPro.objects.all().order_by('id_clicheck')
page = request.GET.get('page', 1)
try:
paginator = Paginator(lista, 5)
lista = paginator.page(page)
except:
raise Http404
context = {'entity': lista,
'paginator': paginator}
return render(request, 'administrador/checklist/listado.html', context)
@staff_member_required
def modificaCheck(request,id_clicheck):
lista = CliCheckPro.objects.get(id_clicheck=id_clicheck)
if request.method == 'GET':
form = listaForm(instance=lista)
else:
form = listaForm(request.POST, instance=lista)
if form.is_valid():
form.save()
messages.success(request, "Modificado correctamente")
return redirect(to='listaCheck')
return render(request,'administrador/checklist/modificar.html',{'form':form}) | maxquevedo/safelyWeb | safely/app/views.py | views.py | py | 21,095 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.views.decorators.staff_member_required",
"line_number": 35,
"usage_... |
28521286035 | """Added instructions html to make instructions dynamic
Revision ID: 6a1ef6fabfaf
Revises: 1c8b21137307
Create Date: 2017-08-12 01:36:17.185403
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '6a1ef6fabfaf'
down_revision = '1c8b21137307'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('test', sa.Column('instruction_html', mysql.LONGTEXT(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('test', 'instruction_html')
# ### end Alembic commands ###
| harveyslash/backend-cleaned | beatest/migrations/versions/0005_6a1ef6fabfaf_added_instructions_html_to_make_.py | 0005_6a1ef6fabfaf_added_instructions_html_to_make_.py | py | 748 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialect... |
14095456258 | # Aditya Halder // @AdityaHalder
import os
import aiofiles
import aiohttp
import ffmpeg
import requests
from os import path
from asyncio.queues import QueueEmpty
from typing import Callable
from pyrogram import Client, filters
from pyrogram.types import Message, Voice, InlineKeyboardButton, InlineKeyboardMarkup
from pyrogram.errors import UserAlreadyParticipant
from modules.cache.admins import set
from modules.clientbot import clientbot, queues
from modules.clientbot.clientbot import client as USER
from modules.helpers.admins import get_administrators
from youtube_search import YoutubeSearch
from modules import converter
from modules.downloaders import youtube
from modules.config import que, SUDO_USERS
from modules.cache.admins import admins as a
from modules.helpers.command import commandpro
from modules.helpers.filters import command, other_filters
from modules.helpers.decorators import errors, sudo_users_only
from modules.helpers.errors import DurationLimitError
from modules.helpers.gets import get_url, get_file_name
from pytgcalls import StreamType
from pytgcalls.types.input_stream import InputStream
from pytgcalls.types.input_stream import InputAudioStream
# plus
chat_id = None
useer = "NaN"
def transcode(filename):
ffmpeg.input(filename).output(
"input.raw", format="s16le", acodec="pcm_s16le", ac=2, ar="48k"
).overwrite_output().run()
os.remove(filename)
# Convert seconds to mm:ss
def convert_seconds(seconds):
seconds = seconds % (24 * 3600)
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%02d:%02d" % (minutes, seconds)
# Convert hh:mm:ss to seconds
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(":"))))
@Client.on_message(
commandpro(["ply"])
& filters.group
& ~filters.edited
& ~filters.forwarded
& ~filters.via_bot
)
@errors
@sudo_users_only
async def play(_, message: Message):
global que
global useer
await message.delete()
lel = await message.reply("**🔄 Ƥɤøƈɘssɩɳʛ ...**")
administrators = await get_administrators(message.chat)
chid = message.chat.id
audio = (
(message.reply_to_message.audio or message.reply_to_message.voice)
if message.reply_to_message
else None
)
url = get_url(message)
if audio:
file_name = get_file_name(audio)
title = file_name
thumb_name = "https://te.legra.ph/file/ed6920a2f0ab5af3fd55d.png"
thumbnail = thumb_name
duration = round(audio.duration / 60)
views = "Locally added"
requested_by = message.from_user.first_name
file_path = await converter.convert(
(await message.reply_to_message.download(file_name))
if not path.isfile(path.join("downloads", file_name))
else file_name
)
elif url:
try:
results = YoutubeSearch(url, max_results=1).to_dict()
# print results
title = results[0]["title"]
duration = results[0]["duration"]
url_suffix = results[0]["url_suffix"]
views = results[0]["views"]
durl = url
durl = durl.replace("youtube", "youtubepp")
secmul, dur, dur_arr = 1, 0, duration.split(":")
for i in range(len(dur_arr) - 1, -1, -1):
dur += int(dur_arr[i]) * secmul
secmul *= 60
except Exception as e:
title = "NaN"
thumb_name = "https://te.legra.ph/file/ed6920a2f0ab5af3fd55d.png"
duration = "NaN"
views = "NaN"
requested_by = message.from_user.first_name
file_path = await converter.convert(youtube.download(url))
else:
if len(message.command) < 2:
return await lel.edit(
"**🤖 Wɦɑʈ 🙃 Yøʋ 💿 Wɑŋʈ ðŸ˜�\n💞 Ƭø 🔊 ƤÉÉ‘yâ�“ ...**"
)
await lel.edit("**🔎 Sɘɑɤƈɦɩɳʛ ...**")
query = message.text.split(None, 1)[1]
# print(query)
await lel.edit("**🔄 Ƥɤøƈɘssɩɳʛ ...**")
try:
results = YoutubeSearch(query, max_results=1).to_dict()
url = f"https://youtube.com{results[0]['url_suffix']}"
# print results
title = results[0]["title"]
duration = results[0]["duration"]
url_suffix = results[0]["url_suffix"]
views = results[0]["views"]
durl = url
durl = durl.replace("youtube", "youtubepp")
secmul, dur, dur_arr = 1, 0, duration.split(":")
for i in range(len(dur_arr) - 1, -1, -1):
dur += int(dur_arr[i]) * secmul
secmul *= 60
except Exception as e:
await lel.edit(
"**🔊 Ɱʋsɩƈ 😕 �øʈ 📵 Føʋɳɗ��\n💞 Ƭɤy ♨� Ʌɳøʈɦɘɤ 🌷...**"
)
print(str(e))
return
requested_by = message.from_user.first_name
file_path = await converter.convert(youtube.download(url))
ACTV_CALLS = []
chat_id = message.chat.id
for x in clientbot.pytgcalls.active_calls:
ACTV_CALLS.append(int(x.chat_id))
if int(chat_id) in ACTV_CALLS:
position = await queues.put(chat_id, file=file_path)
await lel.edit("**💥 ƘɑɑÉ🤞Ʌɗɗɘɗ 💿 Søɳʛâ�—ï¸�\n🔊 Ʌʈ 💞 Ƥøsɩʈɩøɳ » `{}` 🌷 ...**".format(position),
)
else:
await clientbot.pytgcalls.join_group_call(
chat_id,
InputStream(
InputAudioStream(
file_path,
),
),
stream_type=StreamType().local_stream,
)
await lel.edit("**💥 ƘɑɑÉ🤞MÊ‹sɩƈ 🎸 Nøω 💞\n🔊 ƤÉÉ‘yɩɳʛ ðŸ˜� ØƤ 🥀 ...**".format(),
)
return await lel.delete()
@Client.on_message(commandpro(["pse"]) & other_filters)
@errors
@sudo_users_only
async def pause(_, message: Message):
await message.delete()
await clientbot.pytgcalls.pause_stream(message.chat.id)
pase = await message.reply_text("**▶� Ƥɑʋsɘɗ 🌷 ...**")
await pase.delete()
@Client.on_message(commandpro(["rsm"]) & other_filters)
@errors
@sudo_users_only
async def resume(_, message: Message):
await message.delete()
await clientbot.pytgcalls.resume_stream(message.chat.id)
rsum = await message.reply_text("**� Ʀɘsʋɱɘɗ 🌷 ...**")
await rsum.delete()
@Client.on_message(commandpro(["skp", "nxt"]) & other_filters)
@errors
@sudo_users_only
async def skip(_, message: Message):
global que
await message.delete()
ACTV_CALLS = []
chat_id = message.chat.id
for x in clientbot.pytgcalls.active_calls:
ACTV_CALLS.append(int(x.chat_id))
if int(chat_id) not in ACTV_CALLS:
novc = await message.reply_text("**💥 Æ�øʈɦɩɳʛ 🔇 ƤÉÉ‘yɩɳʛ 🌷 ...**")
await novc.delete()
else:
queues.task_done(chat_id)
if queues.is_empty(chat_id):
empt = await message.reply_text("**🥀 Empty Queue, Leaving VC ✨ ...**")
await empt.delete()
await clientbot.pytgcalls.leave_group_call(chat_id)
else:
next = await message.reply_text("**� Sƙɩƥƥɘɗ 🌷 ...**")
await next.delete()
await clientbot.pytgcalls.change_stream(
chat_id,
InputStream(
InputAudioStream(
clientbot.queues.get(chat_id)["file"],
),
),
)
@Client.on_message(commandpro(["end", "stp"]) & other_filters)
@errors
@sudo_users_only
async def stop(_, message: Message):
await message.delete()
try:
clientbot.queues.clear(message.chat.id)
except QueueEmpty:
pass
await clientbot.pytgcalls.leave_group_call(message.chat.id)
leav = await message.reply_text("**� Sʈøƥƥɘɗ 🌷 ...**")
await leav.delete()
| ndika22/KaalMusic | plugins/vcbot.py | vcbot.py | py | 8,167 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ffmpeg.input",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.Message",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "modules.helpers.admins.... |
3562737092 | from modules import s3upload, s3transcribe, parse
import argparse
if __name__ == "__main__":
# Create Argument Parser
parser = argparse.ArgumentParser(description='Process video, create transcripts, proofread with OpenAI GPT.')
parser.add_argument('input_folder', type=str, help='Input folder with .mp4 interview video(s)')
parser.add_argument('s3_folder', type=str, help='Output folder name to save files to in S3 bucket')
args = parser.parse_args()
# Step 1: Upload videos to S3
print("Step 1: Uploading videos to S3...")
s3upload.upload_to_s3(args.input_folder, args.s3_folder)
# Step 2: Transcribe videos from S3 and download the transcriptions
print("Step 2: Transcribing videos from S3 and downloading the transcriptions...")
transcribe_folder = s3transcribe.transcribe_from_s3(args.s3_folder)
s3transcribe.download_transcripts(transcribe_folder)
# Step 3: Parse transcriptions
print("Step 3: Parsing and proofreading transcriptions...")
parse.proofread_transcripts(transcribe_folder)
print("Finished processing videos! View the resulting transcript and .docx file in the timestamped folder.") | uitrial/Interview-Transcribe-Proofread | process_transcripts.py | process_transcripts.py | py | 1,168 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "modules.s3upload.upload_to_s3",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "modules.s3upload",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": ... |
31019892466 | import json
import re
import os
from bs4 import BeautifulSoup
import io
import html2text
#import transformationScript
import datetime
#from pprint import pprint
class Word:
content = ""
tag = ""
# The class "constructor" - It's actually an initializer
def __init__(self, content, tag):
self.content = content
self.tag = tag
class Component:
distance = 20
startPosition = -1
position = -1
def setDistance(self, distance):
self.distance = distance
self.startPosition = -1
self.position = -1
class Premise(Component):
words = []
# The class "constructor" - It's actually an initializer
def __init__(self, words):
self.words = words
def getText(self):
words = []
for word in self.words:
words.append(word.content)
return ' '.join(words)
def getTags(self):
words = []
for word in self.words:
words.append(word.tag)
return ' '.join(words)
class Claim(Component):
words = []
# The class "constructor" - It's actually an initializer
def __init__(self, words):
self.words = words
def getText(self):
words = []
for word in self.words:
words.append(word.content)
return ' '.join(words)
def getTags(self):
words = []
for word in self.words:
words.append(word.tag)
return ' '.join(words)
class Translator:
contents = {}
def __init__(self):
self.contents = {}
def addPair(self, htmlFile, jsonfile):
self.contents[jsonfile] = htmlFile
def createAssociation(self, nodeSet):
fileName = "corpusInput/json/" + nodeSet
file = open(fileName, "r")
contents = file.read()
elements = json.loads(contents)
for node in elements['nodes']:
if 'http' in node['text']:
link = re.search("(?P<url>https?://[^\s]+)", node['text']).group("url")
link = re.sub('http://web.fe.up.pt/~ei11124/argmine_news/', '', link)
link = link[:-1]
self.addPair(link,nodeSet)
break
def createAssociations(self):
fileList = os.listdir("corpusInput/json")
for file in fileList:
self.createAssociation(file)
class TextDumper:
_nArgTag = "(O,|)"
words = []
file = ""
def __init__(self, htmlFile):
self.file = "corpusInput/html/" + htmlFile + '.html'
self.words = []
def getText(self):
words = []
for word in self.words:
words.append(word.content)
return ' '.join(words)
def stripHtml(self):
with io.open(self.file, 'r', encoding='utf8') as f:
contents = f.read()
plainText = html2text.html2text(contents)
sentences = plainText.split('\n')
maxSize = sentenceNumber = chosen = 0
for sentence in sentences:
size = len(sentence)
if(size > maxSize):
chosen = sentenceNumber
sentenceNumber += 1
sentences[chosen] = re.sub(r'[.]+(?![0-9])', r' .', sentences[chosen])
sentences[chosen] = re.sub(r'[:]+(?![0-9])', r' :', sentences[chosen])
sentences[chosen] = re.sub(r'[,]+(?![0-9])', r' ,', sentences[chosen])
sentences[chosen] = re.sub(r'[;]+(?![0-9])', r' ;', sentences[chosen])
sentences[chosen] = re.sub(r'[?]+(?![0-9])', r' ?', sentences[chosen])
sentences[chosen] = re.sub(r'[!]+(?![0-9])', r' !', sentences[chosen])
sentences[chosen] = re.sub(r'[…]+(?![0-9])', r' …', sentences[chosen])
sentences[chosen] = re.sub(r'[“]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[”]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'["]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[‘]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[’]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[(]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[)]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[\']+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[`]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[`]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[[]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[]]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[«]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[»]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[**]+(?![0-9])', r'', sentences[chosen])
print(sentences[chosen])
return sentences[chosen]
def wordifyText(self):
text = self.stripHtml()
originalWords = text.split(' ')
for word in originalWords:
'''if(word == '.'):
taggedWord = Word(word, '.')
self.words.append(taggedWord)
elif(word != ''):
taggedWord = Word(word, self._nArgTag)
self.words.append(taggedWord)'''
if (word != ''):
taggedWord = Word(word, self._nArgTag)
self.words.append(taggedWord)
class claimsAndPremises:
claims = []
premises = []
premisesToClaims = {}
file = ""
def __init__(self, jsonFile):
self.file = "corpusInput/json/" + jsonFile + '.json'
self.claims = []
self.premises = []
self.premisesToClaims = {}
def removeHttp(self, elements):
for node in elements['nodes']:
if 'http' in node['text']:
elements['nodes'].remove(node)
return elements
def removeInferences(self, elements):
for node in elements['nodes']:
if 'Default Inference' in node['text']:
elements['nodes'].remove(node)
return elements
def collapseEdges(self, edges, nodes):
collapsedEdges = []
for originEdge in edges:
for destinationEdge in edges:
if ((destinationEdge['fromID'] == originEdge['toID']) & (self.getNodeText(nodes,originEdge['fromID']) != self.getNodeText(nodes,destinationEdge['toID']))):
edge = {originEdge['fromID']:destinationEdge['toID']}
collapsedEdges.append(edge)
#collapsedEdges[originEdge['fromID']] = destinationEdge['toID']
#print(collapsedEdges)
return collapsedEdges
def getNodeText(self, nodes, nodeId):
nodeText = ''
for node in nodes:
if (node['nodeID'] == nodeId):
nodeText = node['text']
nodeText = re.sub(r'[.]+(?![0-9])', r' .', nodeText)
nodeText = re.sub(r'[:]+(?![0-9])', r' :', nodeText)
nodeText = re.sub(r'[,]+(?![0-9])', r' ,', nodeText)
nodeText = re.sub(r'[;]+(?![0-9])', r' ;', nodeText)
nodeText = re.sub(r'[?]+(?![0-9])', r' ?', nodeText)
nodeText = re.sub(r'[!]+(?![0-9])', r' !', nodeText)
nodeText = re.sub(r'[…]+(?![0-9])', r' …', nodeText)
nodeText = re.sub(r'[“]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[”]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'["]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[‘]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[’]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[(]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[)]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[\']+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[`]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[`]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[[]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[]]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[«]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[»]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[**]+(?![0-9])', r'', nodeText)
return nodeText
def tagClaimOrPremise(self, words, type):
if (type == 'premise'):
distance = '1'
else:
distance = '|'
taggedSentence = []
for wordIndex in range(0, len(words)):
word = words[wordIndex]
if (wordIndex == 0):
#tag = '(B,' + type + ',' + distance + ')'
#tag = '(B,' + type + ')'
tag = '(I,' + type + ')'
elif ((word == '.') or (word == ':') or (word == ';') or (word == '?') or (word == '!')):
#tag = '(O,|,|)'
tag = '(O,|)'
#tag = '.'
else:
#tag = '(I,' + type + ',' + distance + ')'
tag = '(I,' + type + ')'
taggedWord = Word(word,tag)
taggedSentence.append(taggedWord)
return taggedSentence
def isIntermediatePremise(self, claim, connections):
isIntermediate = False
for connection in connections:
if next(iter(connection)) == claim:
isIntermediate = True
return isIntermediate
def getPremisesAndClaims(self):
file = open(self.file, "r")
contents = file.read()
elements = self.removeHttp(json.loads(contents))
#elements = self.removeInferences(elements)
connections = self.collapseEdges(elements['edges'], elements['nodes'])
#print(self.file)
#print(connections)
nodes = elements['nodes']
for connection in connections:
claim = self.getNodeText(nodes, connection[next(iter(connection))])
claimWords = claim.split()
taggedClaim = Claim(self.tagClaimOrPremise(claimWords, 'claim'))
self.claims.append(taggedClaim)
premise = self.getNodeText(nodes, next(iter(connection)))
premiseWords = premise.split()
taggedPremise = Premise(self.tagClaimOrPremise(premiseWords, 'premise'))
self.premises.append(taggedPremise)
#print(taggedPremise.getText())
self.premisesToClaims[premise] = claim
class claimsReplacer:
processedText = []
originalText = []
existingClaimsAndPremises = []
def __init__(self, originalText, existingClaimsAndPremises):
self.originalText = originalText
self.existingClaimsAndPremises = existingClaimsAndPremises
self.processedText = originalText
def getOriginalText(self):
words = []
for word in self.originalText:
words.append(word.content)
return ' '.join(words)
def getProcessedText(self):
words = []
for word in self.processedText:
words.append(word.content)
return ' '.join(words)
def getTags(self):
tags = []
for word in self.processedText:
tags.append(word.tag)
return ' '.join(tags)
def matchText(self, wordPosition, component, textSize):
#print(textSize)
isMatch = True
for word in component.words:
#print(word.content)
if ((wordPosition >= textSize) or
(word.content.lower() != self.originalText[wordPosition].content.lower())):
isMatch = False
break
wordPosition += 1
return isMatch
def replaceText(self, wordPosition, component):
for word in component.words:
self.processedText[wordPosition] = word
wordPosition += 1
def processText(self):
#print(self.getOriginalText())
for claim in self.existingClaimsAndPremises.claims:
wordPosition = 0
for word in self.originalText:
if (claim.words[0].content.lower() == word.content.lower()):
if(self.matchText(wordPosition, claim, len(self.originalText))):
self.replaceText(wordPosition, claim)
claim.startPosition = wordPosition
wordPosition += 1
for premise in self.existingClaimsAndPremises.premises:
wordPosition = 0
for word in self.originalText:
if (premise.words[0].content.lower() == word.content.lower()):
if(self.matchText(wordPosition, premise, len(self.originalText))):
self.replaceText(wordPosition, premise)
premise.startPosition = wordPosition
wordPosition += 1
class DistanceCalculator:
processedText = []
claimsAndPremises = []
def __init__(self, processedText, existingClaimsAndPremises):
self.processedText = processedText
self.claimsAndPremises = existingClaimsAndPremises
def getKey(self, component):
return component.startPosition
def returnUniqueComponents(self, components):
index = 0
uniqueComponents = []
nonUniqueComponents = []
unique = True
for component in components:
for secondComponent in uniqueComponents:
if component.getText() == secondComponent.getText():
unique = False
nonUniqueComponents.append(component)
if (unique):
#print(component.getText())
uniqueComponents.append(component)
index += 1
return uniqueComponents
def arrangeComponents(self):
#claims = self.returnUniqueComponents(self.claimsAndPremises.claims)
#premises = self.returnUniqueComponents(self.claimsAndPremises.premises)
'''for claim in claims:
print("claim - " + claim.getText())
for premise in premises:
print("premise - " + premise.getText())'''
components = self.claimsAndPremises.claims + self.claimsAndPremises.premises
components = self.returnUniqueComponents(components)
components = sorted(components, key=self.getKey)
position = 1
for component in components:
#print("component - " + component.getText())
#print (component.startPosition)
for claim in self.claimsAndPremises.claims:
if claim.startPosition == component.startPosition:
claim.position = position
#print("premise " + premise.getText())
for premise in self.claimsAndPremises.premises:
if premise.startPosition == component.startPosition:
premise.position = position
#print(premise.position)
#print("premise " + premise.getText())
position += 1
def calculateDistances(self):
index = 0
for premise in self.claimsAndPremises.premises:
distance = self.claimsAndPremises.claims[index].position - self.claimsAndPremises.premises[index].position
premise.distance = distance
#print(distance)
index += 1
def updatePremises(self):
for premise in self.claimsAndPremises.premises:
for word in premise.words:
tag = list(word.tag)
if tag[1] != 'O':
tag[len(tag)-2] = str(premise.distance)
tag = "".join(tag)
word.tag = tag
class OutputWriter:
processedText = []
textFile = ""
tagFile = ""
file = ""
def __init__(self, processedText, file):
self.processedText = processedText
self.file = open("corpusOutputPunctuation/txt/" + file + '.txt', "w", encoding='utf-8')
#self.textFile = open("corpusOutput/txt/textsWithSentences/" + file + '.txt', "w", encoding='utf-8')
self.textFile = open("corpusOutputPunctuation/txt/texts/" + file + '.txt', "w", encoding='utf-8')
self.tagFile = open("corpusOutputPunctuation/txt/tags/" + file + '.txt', "w", encoding='utf-8')
def writeToTextFile(self):
for word in self.processedText:
content = word.content
tag = word.tag
self.textFile.write(u'' + content + '\n')
self.tagFile.write(u'' + tag + '\n')
self.file.write(u'' + content + '' + tag + '\n')
class Pipeline:
def translate(self):
translator = Translator()
translator.createAssociations()
files = translator.contents
startTime = datetime.datetime.now().replace(microsecond=0)
for (jsonFile, htmlFile) in files.items():
htmlFile = re.sub('.html', '', htmlFile)
jsonFile = re.sub('.json', '', jsonFile)
dumper = TextDumper(htmlFile)
dumper.wordifyText()
claims = claimsAndPremises(jsonFile)
claims.getPremisesAndClaims()
replacer = claimsReplacer(dumper.words, claims)
replacer.processText()
#distanceCalculator = DistanceCalculator(replacer.processedText, replacer.existingClaimsAndPremises)
#distanceCalculator.arrangeComponents()
#distanceCalculator.calculateDistances()
#distanceCalculator.updatePremises()
#replacer = claimsReplacer(dumper.words, distanceCalculator.claimsAndPremises)
#replacer.processText()
output = OutputWriter(replacer.processedText, jsonFile)
output.writeToTextFile()
endTime = datetime.datetime.now().replace(microsecond=0)
timeTaken = endTime - startTime
print("Isto demorou ")
print(timeTaken)
pipeline = Pipeline()
pipeline.translate()
| fspring/NeuralArgMining | Translators/Portuguese/TransFormationScriptComplete.py | TransFormationScriptComplete.py | py | 17,944 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.loads",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 97,
"... |
17203277817 | #Pasos a seguir:
#1) Installar el repostorio de proyecto EELabs, con .env y credentials.json en \resources_folder\google, y el entorno conda
#2) Pegar este script en dentro del repositorio \
#3) Ejecutar desde su ubicacion
#IMPORTANTE no permite realizar actualizaciones de fecha, está pensado para una descarga única no para realizar descargas actualizadas
import select
from utils.devices_api.eelabs_devices_api import EELabsDevicesApi
from utils.devices.tess import TESS
from utils.devices.skyglow import SkyGlow
from utils.devices.sqm import SQM
from utils.devices.astmon import ASTMON
import pandas as pd
import numpy as np
from datetime import datetime
from utils.my_utils import Utils
import config as conf #Configuration variables
from utils.filter import Filter
from datetime import date
import argparse
import os
#Input of the script
parser = argparse.ArgumentParser()
parser.add_argument('--f', '--from', type=int, help='Year from')
parser.add_argument('--to', type=int, help='Year to')
parser.add_argument('--out','--output', required=True, type=str, help='Output filename')
parser.add_argument('--filter', type=str, help='Data filtering: sun, moon, clouds, galaxy, zodiacal, sigma. Format example: [sun,galaxy,zodaical] Write all without brackets for all filters. Format example: all')
parser.add_argument('--device',type=str,help='Format exalmple: [LPL1_001,LPL2_033,stars1] rite all without brackets for ones device. Format example: stars1')
parser.add_argument('--ephemeris',type=bool,help='True for ephemeris included')
args = parser.parse_args()
initial_year = args.f
final_year = args.to
output = args.out
filter = args.filter
ephemerides = args.ephemeris
if filter:
if filter[0]=='[':
filter=filter[1:-1].split(',')
else:
filter=filter
else:
filter=[]
select_devices=args.device
if select_devices:
if select_devices[0]=='[':
select_devices=set(select_devices[1:-1].split(','))
else:
select_devices=set([select_devices])
#Create the save folder
output=output+'\Photometer_data'
if not os.path.exists(output):
os.mkdir(output)
#Photometers dataset
devices=EELabsDevicesApi().get_all_devices_info()
devices=pd.DataFrame(devices)
devices=devices.drop(['sg_type','lpl','zero_point','filters','mov_sta_position','local_timezone','location','info_img','info_tess','place','tester','info_org','__v','latitude','longitude','country','city'],axis=1)
localizacion=pd.DataFrame(list(devices['info_location'])).drop(['latitude_hide','longitude_hide'],axis=1)
devices['place']=localizacion['place']
devices['town']=localizacion['town']
devices['sub_region']=localizacion['sub_region']
devices['region']=localizacion['region']
devices['country']=localizacion['country']
devices['latitude']=localizacion['latitude']
devices['longitude']=localizacion['longitude']
devices['elevation']=localizacion['elevation']
devices=devices.drop(['info_location'],axis=1)
devices.to_csv(output+'\All_devices.csv', index = False)
#Folder to save records
if not os.path.exists(output+'\Records'):
os.mkdir(output+'\Records')
#Obtain the device class regardless of its type
def Device(device_name):
devices=pd.read_csv(output+'\All_devices.csv')
type=devices[devices['name']==device_name]['TYPE'].values[0]
if type==TESS.TYPE:
device_obj=TESS(name=device_name)
elif type==SkyGlow.TYPE:
device_obj=SkyGlow(name=device_name)
elif type==SQM.TYPE:
device_obj=SQM(name=device_name)
elif type==ASTMON.TYPE:
device_obj=ASTMON(name=device_name)
return device_obj
#Obtain filtered data for a device and year
def Data(device_name,year,filter): #filter: data vector such as ['sun', 'moon'] for example.
device_obj=Device(device_name)
FIRST_DATE=pd.Timestamp(datetime(year, 1, 1, 0, 0), tz='UTC')
LAST_DATE=pd.Timestamp(datetime(year+1, 1, 1, 0, 0), tz='UTC')
df_all=None
try:
df_all = device_obj.get_all_data(date_from=FIRST_DATE, date_to=LAST_DATE,force=False)
No_data=False
except:
df_all=None
No_data=True
if No_data:
print('The device '+device_name+' not responded due to an error')
df_all=df_all[(df_all['mag']>conf.MAG_MIN) & (df_all['mag']<conf.MAG_MAX)] #Filter for extreme magnitudes
if __name__ == '__main__':
df_all = Utils().add_ephems(df_all, device_obj.getObserver(), parallelize=False) # The parallelize option is causing issues
V=[]
if 'sun' in filter or filter=='all':
df_all = Filter().filter_sun(df_all, max_sun_alt=conf.SUN_ALT_MAX)
else:
df_filter=Filter().filter_sun(df_all, max_sun_alt=conf.SUN_ALT_MAX)
F=np.array([True]*(df_all.index[-1]+1)) #Vector with all True for all indices
F[df_filter.index]=False #Replace remaining indices to False after filtering
df_all['sun']=F[df_all.index] #Retrieve data according to the original index
V=V+['sun']
if 'moon' in filter or filter=='all':
df_all = Filter().filter_moon(df_all, max_moon_alt=conf.MOON_ALT_MAX)
else:
df_filter=Filter().filter_moon(df_all, max_moon_alt=conf.MOON_ALT_MAX)
F=np.array([True]*(df_all.index[-1]+1))
F[df_filter.index]=False
df_all['moon']=F[df_all.index]
V=V+['moon']
if 'clouds' in filter or filter=='all':
clouds_threshold=conf.CLOUD_STD_FREQ
df_all = Filter().filter_column(df_all, device_obj.getMagSTDColname(), max=clouds_threshold)
else:
clouds_threshold=conf.CLOUD_STD_FREQ
df_filter=Filter().filter_column(df_all, device_obj.getMagSTDColname(), max=clouds_threshold)
F=np.array([True]*(df_all.index[-1]+1))
F[df_filter.index]=False
df_all['clouds']=F[df_all.index]
V=V+['clouds']
if 'galaxy' in filter or filter=='all':
df_all = Filter().filter_galactic_abs_lat(df_all, min_lat=conf.GALACTIC_LAT_MIN, max_lat=180)
else:
df_filter=Filter().filter_galactic_abs_lat(df_all, min_lat=conf.GALACTIC_LAT_MIN, max_lat=180)
F=np.array([True]*(df_all.index[-1]+1))
F[df_filter.index]=False
df_all['galaxy']=F[df_all.index]
V=V+['galaxy']
if 'zodiacal' in filter or filter=='all':
df_all = Filter().filter_column(df_all, col_name='ecliptic_f', max=conf.ECLIPTIC_F_MAX)
else:
df_filter=Filter().filter_column(df_all, col_name='ecliptic_f', max=conf.ECLIPTIC_F_MAX)
F=np.array([True]*(df_all.index[-1]+1))
F[df_filter.index]=False
df_all['zodiacal']=F[df_all.index]
V=V+['zodiacal']
if 'sigma' in filter or filter=='all':
sigma=conf.NSIGMA
df_all = Filter().filter_nsigma(df_all, col_name='mag', sigma=sigma)
else:
sigma=conf.NSIGMA
df_filter=Filter().filter_nsigma(df_all, col_name='mag', sigma=sigma)
F=np.array([True]*(df_all.index[-1]+1))
F[df_filter.index]=False
df_all['sigma']=F[df_all.index]
V=V+['sigma']
if ephemerides:
df=pd.DataFrame({'time':df_all['time'],'mag':df_all['mag'],'name':device_name,'moon_phase':df_all['moon_phase'],'moon_alt':df_all['moon_alt'],'galactic_lat':df_all['galactic_lat'],'galactic_lon':df_all['galactic_lon'],'helioecliptic_lon_abs':df_all['helioecliptic_lon_abs'],'ecliptic_lat_abs':df_all['ecliptic_lat_abs']})
else:
df=pd.DataFrame({'time':df_all['time'],'mag':df_all['mag'],'name':device_name})
for ii in V:
df[ii]=df_all[ii]
return df
#Obtain all data between two years
def Data_download(V,initial_year=None,final_year=None,iterate=True): #Iterate to prompt for enter key per iteration
#Downloaded devices
Downloaded_devices=set()
for j in range(0,1000):
try:
df_records=pd.read_csv(output+'\Records\Records_'+str(j)+'.csv')
Downloaded_devices=Downloaded_devices|set(df_records['name'])
except:
Downloaded_devices=Downloaded_devices
#Log devices
try:
df_log=pd.read_csv(output+'\Log.csv')
Log_devices=set(df_log['Devices'])
Log_exists=True
except:
Log_devices=set()
Log_exists=False
diff=Downloaded_devices-Log_devices
Downloaded_devices=Downloaded_devices|Log_devices
print(Downloaded_devices)
df_all_devices=pd.read_csv(output+'\All_devices.csv')
All_devices=set(df_all_devices['name'])
if select_devices:
Missing_devices=select_devices-Downloaded_devices
else:
Missing_devices=All_devices-Downloaded_devices #To know which devices need to be download
n_missing_devices=len(Missing_devices)
if initial_year:
i_year=initial_year
else:
i_year=2010
if final_year:
f_year=final_year
else:
f_year=date.today().year
Downloaded_missing_devices=[]
v_empty=[]
v_time=[]
#Loop where it goes device by device and then year by year
for i in Missing_devices:
df=pd.DataFrame()
empty=True
for ii in range(i_year,f_year+1):
try:
dat=Data(i,ii,V)
df=pd.concat([df,dat])
if list(dat.values)!=[]:
empty=False
except:
df=df
print('Year: '+str(ii))
#Save
#Saving with files limited to 1 GB
try:
df_records=pd.read_csv(output+'\Records\Records_1.csv')
Records_exist=True
except:
df_final=df
df_final.to_csv(output+'\Records\Records_1.csv', index = False)
Records_exist=False
if Records_exist==True:
counter=0
for j in range(1,1000):
try:
df_records=pd.read_csv(output+'\Records\Records_'+str(j)+'.csv')
if os.stat(output+'\Records\Records_'+str(j)+'.csv').st_size<1000000000:
df_final=pd.concat([df_records,df])
df_final.to_csv(output+'\Records\Records_'+str(j)+'.csv', index = False)
counter=1
except:
if counter==0:
df_final=df
df_final.to_csv(output+'\Records\Records_'+str(j)+'.csv', index = False)
counter=1
time=datetime.now()
v_empty=v_empty+[empty]
v_time=v_time+[time]
Downloaded_missing_devices=Downloaded_missing_devices+[i]
Log_downloaded_devices=pd.DataFrame({'Devices':Downloaded_missing_devices,'Time':v_time,'Empty':v_empty})
Log_downloaded_devices_2=pd.DataFrame({'Devices':list(diff),'Time':None,'Empty':False})
Log=pd.concat([Log_downloaded_devices_2,Log_downloaded_devices])
#Save log
if Log_exists:
Log_2=pd.concat([df_log,Log])
else:
Log_2=Log
Log_2.to_csv(output+'\Log.csv', index = False)
n_no_downloaded_missing_devices=n_missing_devices-len(Downloaded_missing_devices)
print(str(n_no_downloaded_missing_devices)+' are still pending for download')
if iterate:
if input('Downloaded device:'+i+'\n')=='exit':
break
else:
print('Downloaded device:'+i+'\n')
#Run
Data_download(filter,initial_year,final_year,iterate=False)
| mt4sd/EELabs_paper | Download_data/Photometer_data/Download_EELabs_photometers.py | Download_EELabs_photometers.py | py | 11,585 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"l... |
19876190982 | #!/usr/bin/python3
import os
import argparse
from subprocess import call
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('basedir', type=str, help='The base directory to walk from')
args = parser.parse_args()
print('The base dir is: {}'.format(args.basedir))
for dirname, subdirs, filenames in os.walk(args.basedir):
for filename in filenames:
full_path = os.path.join(dirname, filename)
if filename.endswith('.bz2'):
print("Decompressing {}".format(full_path))
call(["bzip2", "-d", full_path])
else:
print("Ignoring {}".format(full_path))
| ruizhang84/B565-Data-Mining | src/preprocess/scripts/decompress.py | decompress.py | py | 688 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numbe... |
36153558414 | import unittest
import datetime
import json
from app.textsapi.models.submission import Submission
from app.textsapi.models.text import Text
from app.tests.base import BaseTestCase
def register_ok_submission(self, token):
return self.client.post(
'/submission/',
headers=dict(
Authorization="Token {}".format(token)
),
data=json.dumps(dict(
submitted_texts=['text1', 'text2']
)),
content_type='application/json'
)
def register_illegal_submission(self, token):
return self.client.post(
'/submission/',
headers=dict(
Authorization="Token {}".format(token)
),
data=json.dumps(dict(
submitted_texts=[1, 'text2']
)),
content_type='application/json'
)
def get_submissions(self, token):
return self.client.get(
'/submission/',
headers=dict(
Authorization="Token {}".format(token)
)
)
class TestSubmission(BaseTestCase):
def test_create_valid_submission(self):
""" Test for creating a valid submission """
with self.client:
# valid submission registration
sub_response = register_ok_submission(self, self.token)
response_data = json.loads(sub_response.data.decode())
self.assertTrue(response_data['status']=='success')
def test_create_invalid_submission(self):
""" Test for creating an invalid submission """
with self.client:
# invalid submission registration
sub_response = register_illegal_submission(self, self.token)
response_data = json.loads(sub_response.data.decode())
self.assertTrue(response_data['errors']!=None)
def test_update_submission(self):
""" Test for updating a submission """
sub_response_register = register_ok_submission(self, self.token)
response_data = json.loads(sub_response_register.data.decode())
self.assertTrue(response_data['status']=='success')
sub = [sub for sub in Submission.query(hash_key=self.new_user.username, range_key_condition=Submission.sort.startswith('SUBMISSION_'))][0]
sub_response_update = self.client.put(
'/submission/{}'.format(str(sub.public_id)),
headers=dict(
Authorization="Token {}".format(self.token)
),
data=json.dumps(dict(
submitted_texts=['updated_text1']
)),
content_type='application/json'
)
update_data = json.loads(sub_response_update.data.decode())
upd_sub = Submission.get(hash_key=sub.username, range_key=sub.sort)
self.assertTrue(update_data['status']=='success')
self.assertTrue(upd_sub.text_count == 1)
def test_get_submission(self):
""" Test getting the submissions from database """
# creating a submission
sub_register = register_ok_submission(self, self.token)
response_data = json.loads(sub_register.data.decode())
self.assertTrue(response_data['status']=='success')
# getting it from the service
get_response = get_submissions(self, self.token)
response_data = json.loads(get_response.data.decode())
self.assertTrue(response_data['data'][0]['text_count']==2)
self.assertTrue(isinstance(response_data['data'][0]['texts'], list))
if __name__ == '__main__':
unittest.main() | jkausti/flask-textsapi | app/tests/_test_submission_controller.py | _test_submission_controller.py | py | 3,468 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "json.dumps",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "app.tests.base.BaseTestCase",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "json.loads",
"l... |
36397304694 | """
Table of Contents
1. drop_null_columns: Drop columns that exceed a threshold of null values.
"""
from pyspark.sql import functions as F, DataFrame
from ..parsing.melting import melt
def drop_null_columns(df: DataFrame, threshold: float = 1.0, subset: list = None) -> DataFrame:
"""
Drop columns that exceed a threshold of null values.
Inputs
df: DataFrame.
threshold: Threshold value. If a column has at least this fraction of nulls, it will be removed.
subset: List of columns to check. All others will be kept by default regardless of the null count.
Output
Updated DataFrame.
Example
df = df.drop_null_columns(df, 0.5, ["col1", "col2"])
Will remove col1 or col2 if either is at least 50% null.
"""
# If looking only at a subset of columns, downselect, otherwise, evaluate all columns.
if subset is None:
temp_df = df
else:
temp_df = df.select(subset)
# List of columns being evaluated.
columns_evaluated = temp_df.columns
# Replace each column with a 1 if null, 0 otherwise.
for col in columns_evaluated:
temp_df = temp_df.withColumn(col, F.when(F.col(col).isNull(), 1).otherwise(0))
# Sum the number of nulls, represented with a 1, for each column.
null_count = temp_df.agg(*[F.sum(c).alias(c) for c in columns_evaluated])
# Total row count.
count = temp_df.count()
# Pivot this null_count DataFrame to do a few operations to find when there are too many nulls.
# The names of the columns will now be listed in a column called "categories".
null_count = melt(null_count, value_vars=columns_evaluated)
null_count = (
null_count
# Compute the fraction of rows that are null.
.withColumn("fraction", F.col("values") / F.lit(count))
# Keep those rows that meet or exceed the threshold.
.where(F.col("fraction") >= F.lit(threshold))
# Now, pivot again so that the column names are restored.
.groupBy().pivot("categories").sum("fraction")
)
# Get the list of the columns that need to be dropped; drop and return.
columns_to_drop = null_count.columns
df = df.drop(*columns_to_drop)
return df
| phil-trinh/pyspark_utilities_sample | transformations/calculations/null_funcs.py | null_funcs.py | py | 2,241 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyspark.sql.DataFrame",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.when",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 39,
"usage_type": "name"
},
{
"api_name":... |
39188602086 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from blog import apiviews
router = DefaultRouter()
router.register('posts', apiviews.PostViewSet)
router.register('comments', apiviews.CommentViewSet)
router.register('replies', apiviews.ReplyViewSet)
router.register('users', apiviews.UserViewSet )
urlpatterns = [
path('', include(router.urls)),
path('rest-auth/', include('rest_auth.urls')),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('categories', apiviews.CategoryListView.as_view(), name='categories-list'),
path('category-filter/<int:pk>/', apiviews.PostsInCategoryView.as_view(), name='category-detail')
]
# urlpatterns += router.urls | MahfuzKhandaker/blogapi | blog/urls.py | urls.py | py | 736 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "blog.apiviews.PostViewSet",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "blog.apiviews",
"line_number": 6,
"usage_type": "name"
},
{
"ap... |
1433290010 | import pytest
import stk
from .case_data import CaseData
@pytest.fixture(
scope="session",
params=(
lambda name: CaseData(
molecule=stk.BuildingBlock("C1=CC=CC=C1"),
sub_group_data={
"c6_planarity": [2.7518147481201438e-06],
"c5n1_planarity": [],
"x5_planarity": [],
"c#c_angle": [],
},
name=name,
),
lambda name: CaseData(
molecule=stk.BuildingBlock("C1N=CC(CCC2CCOC2)N=1"),
sub_group_data={
"c6_planarity": [],
"c5n1_planarity": [],
"x5_planarity": [1.3688005804646254e-06, 0.932064037529801],
"c#c_angle": [],
},
name=name,
),
lambda name: CaseData(
molecule=stk.BuildingBlock("C1=CC=C(C=C1)C#CC2=CN=CC=C2"),
sub_group_data={
"c6_planarity": [8.41286151020968e-08],
"c5n1_planarity": [5.678704369238556e-08],
"x5_planarity": [],
"c#c_angle": [179.00063441359868],
},
name=name,
),
),
)
def case_data(request) -> CaseData:
return request.param(
f"{request.fixturename}{request.param_index}",
)
| JelfsMaterialsGroup/stko | tests/molecular/subgroup/conftest.py | conftest.py | py | 1,311 | python | en | code | 18 | github-code | 6 | [
{
"api_name": "pytest.fixture",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "case_data.CaseData",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "stk.BuildingBlock",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "case_data.CaseData... |
655866557 | from __future__ import annotations
import contextlib
import inspect
import os
import time
import warnings
from collections import OrderedDict
from importlib import import_module
from typing import Any, Callable, Dict, Optional, Union
import numpy as np
import torch
import torch.cuda.amp as amp
from tqdm import tqdm
from .tensorboard_logger import TensorboardLogger
from .wandb_logger import WandbLogger
from ..util import auto_compile, get_constructor_arguments, is_compiled
class DefaultTrainer:
"""Trainer class for 2d/3d training on a single GPU."""
def __init__(
self,
name: Optional[str],
train_loader: torch.utils.data.DataLoader,
val_loader: torch.utils.data.DataLoader,
model: torch.nn.Module,
loss,
optimizer,
metric,
device: Union[str, torch.device],
lr_scheduler=None,
log_image_interval=100,
mixed_precision=True,
early_stopping=None,
logger=TensorboardLogger,
logger_kwargs: Optional[Dict[str, Any]] = None,
id_: Optional[str] = None,
save_root: Optional[str] = None,
compile_model: Optional[Union[bool, str]] = None,
):
if name is None and not issubclass(logger, WandbLogger):
raise TypeError("Name cannot be None if not using the WandbLogger")
if not all(hasattr(loader, "shuffle") for loader in [train_loader, val_loader]):
raise ValueError(f"{self.__class__} requires each dataloader to have 'shuffle' attribute.")
self._generate_name = name is None
self.name = name
self.id_ = id_ or name
self.train_loader = train_loader
self.val_loader = val_loader
self.model = model
self.loss = loss
self.optimizer = optimizer
self.metric = metric
self.device = device
self.lr_scheduler = lr_scheduler
self.log_image_interval = log_image_interval
self.save_root = save_root
self.compile_model = compile_model
self._iteration = 0
self._epoch = 0
self._best_epoch = 0
self.mixed_precision = mixed_precision
self.early_stopping = early_stopping
self.scaler = amp.GradScaler() if mixed_precision else None
self.logger_class = logger
self.logger_kwargs = logger_kwargs
self.log_image_interval = log_image_interval
@property # because the logger may generate and set trainer.id on logger.__init__
def checkpoint_folder(self):
assert self.id_ is not None
# save_root enables saving the checkpoints somewhere else than in the local
# folder. This is handy for filesystems with limited space, where saving the checkpoints
# and log files can easily lead to running out of space.
save_root = getattr(self, "save_root", None)
return os.path.join("./checkpoints", self.id_) if save_root is None else\
os.path.join(save_root, "./checkpoints", self.id_)
@property
def iteration(self):
return self._iteration
@property
def epoch(self):
return self._epoch
class Deserializer:
"""Determines how to deserialize the trainer kwargs from serialized 'init_data'
Examples:
To extend the initialization process you can inherite from this Deserializer in an inherited Trainer class.
Note that `DefaultTrainer.Deserializer.load_generic()` covers most cases already.
This example adds `the_answer` kwarg, which requires 'calculations' upon initialization:
>>> class MyTrainer(DefaultTrainer):
>>> def __init__(self, *args, the_answer: int, **kwargs):
>>> super().__init__(*args, **kwargs)
>>> self.the_answer = the_answer # this allows the default Serializer to save the new kwarg,
>>> # see DefaultTrainer.Serializer
>>>
>>> class Deserializer(DefaultTrainer.Deserializer):
>>> def load_the_answer(self):
>>> generic_answer = self.init_data["the_answer"]
>>> # (device dependent) special deserialization
>>> if self.trainer_kwargs["device"].type == "cpu": # accessing previously deserialized kwarg
>>> self.trainer_kwargs["the_answer"] = generic_answer + 1
>>> else:
>>> self.trainer_kwargs["the_answer"] = generic_answer * 2
"""
def __init__(self, init_data: dict, save_path: str, device: Union[str, torch.device]):
self.init_data = init_data
self.save_path = save_path
# populate with deserialized trainer kwargs during deserialization; possibly overwrite 'device'
self.trainer_kwargs: Dict[str, Any] = dict(
device=torch.device(self.init_data["device"]) if device is None else torch.device(device)
)
def load(self, kwarg_name: str, optional):
"""`optional` is True if self.trainer.__class__.__init__ specifies a default value for 'kwarg_name'"""
if kwarg_name == "device":
pass # deserialized in __init__
elif kwarg_name.endswith("_loader"):
self.load_data_loader(kwarg_name, optional)
else:
load = getattr(self, f"load_{kwarg_name}", self.load_generic)
load(kwarg_name, optional=optional)
def load_data_loader(self, loader_name, optional) -> None:
ds = self.init_data.get(loader_name.replace("_loader", "_dataset"))
if ds is None and optional:
return
loader_kwargs = self.init_data[f"{loader_name}_kwargs"]
loader = torch.utils.data.DataLoader(ds, **loader_kwargs)
# monkey patch shuffle loader_name to the loader
loader.shuffle = loader_kwargs.get("shuffle", False)
self.trainer_kwargs[loader_name] = loader
def load_generic(
self,
kwarg_name: str,
*dynamic_args,
optional: bool,
only_class: bool = False,
dynamic_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
if kwarg_name in self.init_data:
self.trainer_kwargs[kwarg_name] = self.init_data[kwarg_name]
return
this_cls = self.init_data.get(f"{kwarg_name}_class", None)
if this_cls is None:
if optional:
return
else:
raise RuntimeError(f"Could not find init data for {kwarg_name} in {self.save_path}")
assert isinstance(this_cls, str), this_cls
assert "." in this_cls, this_cls
cls_p, cls_m = this_cls.rsplit(".", 1)
this_cls = getattr(import_module(cls_p), cls_m)
if only_class:
self.trainer_kwargs[kwarg_name] = this_cls
else:
self.trainer_kwargs[kwarg_name] = this_cls(
*dynamic_args, **self.init_data.get(f"{kwarg_name}_kwargs", {}), **(dynamic_kwargs or {})
)
def load_name(self, kwarg_name: str, optional: bool):
self.trainer_kwargs[kwarg_name] = os.path.split(os.path.dirname(self.save_path))[1]
def load_optimizer(self, kwarg_name: str, optional: bool):
self.load_generic(kwarg_name, self.trainer_kwargs["model"].parameters(), optional=optional)
def load_lr_scheduler(self, kwarg_name: str, optional: bool):
self.load_generic(kwarg_name, self.trainer_kwargs["optimizer"], optional=optional)
# todo: remove and rename kwarg 'logger' to 'logger_class'
def load_logger(self, kwarg_name: str, optional: bool):
assert kwarg_name == "logger"
self.load_generic("logger", optional=optional, only_class=True)
@staticmethod
def _get_save_dict(save_path, device):
if not os.path.exists(save_path):
raise ValueError(f"Cannot find checkpoint {save_path}")
return torch.load(save_path, map_location=device)
@classmethod
def from_checkpoint(cls, checkpoint_folder, name="best", device=None):
save_path = os.path.join(checkpoint_folder, f"{name}.pt")
# make sure the correct device is set if we don't have access to CUDA
if not torch.cuda.is_available():
device = "cpu"
save_dict = cls._get_save_dict(save_path, device)
deserializer = cls.Deserializer(save_dict["init"], save_path, device)
has_kwargs = False
deserialized = []
for name, parameter in inspect.signature(cls).parameters.items():
if name == "kwargs":
has_kwargs = True
continue
deserializer.load(name, optional=parameter.default is not inspect.Parameter.empty)
deserialized.append(name)
# to deserialze kwargs we can't rely on inspecting the signature, so we
# go through the remaning kwarg names in init data instead
if has_kwargs:
kwarg_names = list(set(deserializer.init_data.keys()) - set(deserialized))
for name in kwarg_names:
if name.endswith("_kwargs"):
continue
elif name.endswith("_dataset"):
deserializer.load(name.replace("dataset", "loader"), optional=False)
elif name.endswith("_class"):
deserializer.load(name.replace("_class", ""), optional=False)
else:
deserializer.load(name, optional=False)
trainer = cls(**deserializer.trainer_kwargs)
trainer._initialize(0, save_dict)
trainer._is_initialized = True
return trainer
class Serializer:
"""Implements how to serialize trainer kwargs from a trainer instance
Examples:
To extend the serialization process you can inherite from this Serializer in a derived Trainer class.
Note that the methods `dump_generic_builtin()`, `dump_generic_class()` and `dump_generic_instance()`
called by the `dump()` method when appropriate cover most cases already.
This example adds `the_answer` kwarg, which requires extra steps on dumping only because we don't keep a
'the_answer' attribute:
>>> class MyTrainer(DefaultTrainer):
>>> def __init__(self, *args, the_answer: int, **kwargs):
>>> super().__init__(*args, **kwargs)
>>> # self.the_answer = the_answer # this would allow the default Serializer to save the new kwarg,
>>> # but let's make things more interesting...
>>> self.the = the_answer // 10
>>> self.answer = the_answer % 10
>>>
>>> class Serializer(DefaultTrainer.Serializer):
>>> trainer: MyTrainer
>>> def dump_the_answer(self, kwarg_name: str) -> None: # custom dump method for 'the_answer' kwarg
>>> assert kwarg_name == "the_answer"
>>> # populate self.init_data with the serialized data required by Deserializer
>>> # to restore the trainer kwargs
>>> self.init_data["the_answer"] = self.trainer.the * 10 + self.trainer.answer
This example with both Serializer and Deserializer adds `the_answer` kwarg,
while saving it in two separate entries 'the' and 'answer'
>>> class MyTrainer(DefaultTrainer):
>>> def __init__(self, *args, the_answer: int, **kwargs):
>>> super().__init__(*args, **kwargs)
>>> self.the_answer = the_answer
>>>
>>> class Serializer(DefaultTrainer.Serializer):
>>> trainer: MyTrainer
>>> def dump_the_answer(self, kwarg_name: str):
>>> assert kwarg_name == "the_answer"
>>> self.init_data.update({
>>> "the": self.trainer.the_answer // 10,
>>> "answer": self.trainer.the_answer % 10
>>> })
>>>
>>> class Deserializer(DefaultTrainer.Deserializer):
>>> def load_the_answer(self, kwarg_name: str, optional: bool):
>>> assert kwarg_name == "the_answer"
>>> # 'optional' is True if MyTrainer.__init__ specifies a default value for 'kwarg_name'
>>> self.trainer_kwargs[kwarg_name] = self.init_data["the"] * 10 + self.init_data["answer"]
"""
def __init__(self, trainer: DefaultTrainer):
self.trainer = trainer
self.init_data = {} # to be populated during serialization process
def dump(self, kwarg_name: str) -> None:
dumper = getattr(self, f"dump_{kwarg_name}", None)
if dumper is not None:
dumper(kwarg_name)
elif kwarg_name.endswith("_loader"):
self.dump_data_loader(kwarg_name)
elif kwarg_name.endswith("_class"):
self.dump_generic_class(kwarg_name)
elif not hasattr(self.trainer, kwarg_name):
raise AttributeError(
f"{self.trainer.__class__} missing attribute '{kwarg_name}' "
f"or special dump method {self.trainer.__class__}.Serializer.dump_{kwarg_name}()"
)
else:
assert hasattr(self.trainer, kwarg_name)
obj = getattr(self.trainer, kwarg_name)
if obj is None or type(obj) in (
bool,
bytearray,
bytes,
dict,
float,
frozenset,
int,
list,
set,
str,
tuple,
):
self.dump_generic_builtin(kwarg_name)
else:
self.dump_generic_instance(kwarg_name)
def dump_generic_builtin(self, kwarg_name: str) -> None:
assert hasattr(self.trainer, kwarg_name)
self.init_data[kwarg_name] = getattr(self.trainer, kwarg_name)
def dump_generic_class(self, kwarg_name: str) -> None:
assert hasattr(self.trainer, kwarg_name)
assert kwarg_name.endswith("_class")
obj = getattr(self.trainer, kwarg_name)
self.init_data[kwarg_name] = None if obj is None else f"{obj.__module__}.{obj.__name__}"
def dump_generic_instance(self, kwarg_name: str) -> None:
assert hasattr(self.trainer, kwarg_name)
instance = getattr(self.trainer, kwarg_name)
self.init_data.update(
{
f"{kwarg_name}_class": f"{instance.__class__.__module__}.{instance.__class__.__name__}",
f"{kwarg_name}_kwargs": get_constructor_arguments(instance),
}
)
def dump_device(self, kwarg_name: str):
assert hasattr(self.trainer, kwarg_name)
self.init_data[kwarg_name] = str(getattr(self.trainer, kwarg_name))
def dump_data_loader(self, kwarg_name: str) -> None:
assert hasattr(self.trainer, kwarg_name)
loader = getattr(self.trainer, kwarg_name)
if loader is None:
return
self.init_data.update(
{
f"{kwarg_name.replace('_loader', '_dataset')}": loader.dataset,
f"{kwarg_name}_kwargs": get_constructor_arguments(loader),
}
)
def dump_logger(self, kwarg_name: str): # todo: remove and rename kwarg 'logger' to 'logger_class'
self.dump_generic_class(f"{kwarg_name}_class")
def dump_model(self, kwarg_name: str):
if is_compiled(self.trainer.model):
self.init_data.update(
{
"model_class": self.trainer._model_class,
"model_kwargs": self.trainer._model_kwargs,
}
)
else:
self.dump_generic_instance("model")
def _build_init(self) -> Dict[str, Any]:
serializer = self.Serializer(self)
for name in inspect.signature(self.__class__).parameters:
# special rules to serialize kwargs
# if a trainer class inherits from DefaultTrainer and has **kwargs
# they need to be saved in self._kwargs
if name == "kwargs":
if not hasattr(self, "_kwargs"):
msg = "The trainer class has **kwargs in its signature, but is missing the _kwargs attribute. " +\
"Please add self._kwargs to its __init__ function"
raise RuntimeError(msg)
kwargs = getattr(self, "_kwargs")
for kwarg_name in kwargs:
serializer.dump(kwarg_name)
continue
serializer.dump(name)
return serializer.init_data
def _initialize(self, iterations, load_from_checkpoint, epochs=None):
assert self.train_loader is not None
assert self.val_loader is not None
assert self.model is not None
assert self.loss is not None
assert self.optimizer is not None
assert self.metric is not None
assert self.device is not None
if load_from_checkpoint is not None:
self.load_checkpoint(load_from_checkpoint)
if sum((iterations is not None, epochs is not None)) != 1:
raise ValueError(
"Exactly one of 'iterations' or 'epochs' has to be specified to initialize the trainer."
f"You have passed 'iterations'={iterations} and 'epochs'={epochs}"
)
if epochs is None:
epochs = int(np.ceil(float(iterations) / len(self.train_loader)))
else:
iterations = epochs * len(self.train_loader)
self.max_iteration = self._iteration + iterations
self.max_epoch = self._epoch + epochs
if not getattr(self, "_is_initialized", False):
# check if we compile the model (only supported by pytorch 2)
# to enable (de)serialization of compiled models, we keep track of the model class and kwargs
if is_compiled(self.model):
warnings.warn(
"You have passed a compiled model to the trainer."
"It will not be possible to (de)serialize the trainer with it."
"If you want to be able to do this please pass the normal model."
"It can be automatically compiled by setting 'compile_model' to True"
)
self._model_class = f"{self.model.__class__.__module__}.{self.model.__class__.__name__}"
self._model_kwargs = get_constructor_arguments(self.model)
self.model = auto_compile(self.model, self.compile_model)
self.model.to(self.device)
self.loss.to(self.device)
# this saves all the information that is necessary
# to fully load the trainer from the checkpoint
self.init_data = self._build_init()
if self.logger_class is None:
self.logger = None
else:
# may set self.name if self.name is None
save_root = getattr(self, "save_root", None)
self.logger = self.logger_class(self, save_root, **(self.logger_kwargs or {}))
try:
os.makedirs(self.checkpoint_folder, exist_ok=True)
except PermissionError:
warnings.warn(
f"The checkpoint folder at {self.checkpoint_folder} could not be created."
"The most likely reason for this is that you copied the checkpoint somewhere else,"
"so we skip this error to enable loading the model from this checkpoint."
)
pass
best_metric = np.inf
return best_metric
def save_checkpoint(self, name, best_metric, **extra_save_dict):
save_path = os.path.join(self.checkpoint_folder, f"{name}.pt")
extra_init_dict = extra_save_dict.pop("init", {})
save_dict = {
"iteration": self._iteration,
"epoch": self._epoch,
"best_epoch": self._best_epoch,
"best_metric": best_metric,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"init": self.init_data | extra_init_dict,
}
save_dict.update(**extra_save_dict)
if self.scaler is not None:
save_dict.update({"scaler_state": self.scaler.state_dict()})
if self.lr_scheduler is not None:
save_dict.update({"scheduler_state": self.lr_scheduler.state_dict()})
torch.save(save_dict, save_path)
def load_checkpoint(self, checkpoint="best"):
if isinstance(checkpoint, str):
save_path = os.path.join(self.checkpoint_folder, f"{checkpoint}.pt")
if not os.path.exists(save_path):
warnings.warn(f"Cannot load checkpoint. {save_path} does not exist.")
return
save_dict = torch.load(save_path)
elif isinstance(checkpoint, dict):
save_dict = checkpoint
else:
raise RuntimeError
self._iteration = save_dict["iteration"]
self._epoch = save_dict["epoch"]
self._best_epoch = save_dict["best_epoch"]
self.best_metric = save_dict["best_metric"]
model_state = save_dict["model_state"]
# to enable loading compiled models
compiled_prefix = "_orig_mod."
model_state = OrderedDict(
[(k[len(compiled_prefix):] if k.startswith(compiled_prefix) else k, v) for k, v in model_state.items()]
)
self.model.load_state_dict(model_state)
# we need to send the network to the device before loading the optimizer state!
self.model.to(self.device)
self.optimizer.load_state_dict(save_dict["optimizer_state"])
if self.scaler is not None:
self.scaler.load_state_dict(save_dict["scaler_state"])
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(save_dict["scheduler_state"])
return save_dict
def fit(self, iterations=None, load_from_checkpoint=None, epochs=None, save_every_kth_epoch=None):
"""Run neural network training.
Exactly one of 'iterations' or 'epochs' has to be passed.
Parameters:
iterations [int] - how long to train, specified in iterations (default: None)
load_from_checkpoint [str] - path to a checkpoint from where training should be continued (default: None)
epochs [int] - how long to train, specified in epochs (default: None)
save_every_kth_epoch [int] - save checkpoints after every kth epoch separately.
The corresponding checkpoints will be saved with the naming scheme 'epoch-{epoch}.pt'. (default: None)
"""
best_metric = self._initialize(iterations, load_from_checkpoint, epochs)
print(
"Start fitting for",
self.max_iteration - self._iteration,
"iterations / ",
self.max_epoch - self._epoch,
"epochs",
)
print("with", len(self.train_loader), "iterations per epoch")
if self.mixed_precision:
train_epoch = self._train_epoch_mixed
validate = self._validate_mixed
print("Training with mixed precision")
else:
train_epoch = self._train_epoch
validate = self._validate
print("Training with single precision")
progress = tqdm(
total=epochs * len(self.train_loader) if iterations is None else iterations,
desc=f"Epoch {self._epoch}", leave=True
)
msg = "Epoch %i: average [s/it]: %f, current metric: %f, best metric: %f"
train_epochs = self.max_epoch - self._epoch
for _ in range(train_epochs):
# run training and validation for this epoch
t_per_iter = train_epoch(progress)
current_metric = validate()
# perform all the post-epoch steps:
# apply the learning rate scheduler
if self.lr_scheduler is not None:
self.lr_scheduler.step(current_metric)
# save this checkpoint as the new best checkpoint if
# it has the best overall validation metric
if current_metric < best_metric:
best_metric = current_metric
self._best_epoch = self._epoch
self.save_checkpoint("best", best_metric)
# save this checkpoint as the latest checkpoint
self.save_checkpoint("latest", best_metric)
# if we save after every k-th epoch then check if we need to save now
if save_every_kth_epoch is not None and (self._epoch + 1) % save_every_kth_epoch == 0:
self.save_checkpoint(f"epoch-{self._epoch + 1}", best_metric)
# if early stopping has been specified then check if the stopping condition is met
if self.early_stopping is not None:
epochs_since_best = self._epoch - self._best_epoch
if epochs_since_best > self.early_stopping:
print("Stopping training because there has been no improvement for", self.early_stopping, "epochs")
break
self._epoch += 1
progress.set_description(msg % (self._epoch, t_per_iter, current_metric, best_metric), refresh=True)
print(f"Finished training after {self._epoch} epochs / {self._iteration} iterations.")
print(f"The best epoch is number {self._best_epoch}.")
if self._generate_name:
self.name = None
# TODO save the model to wandb if we have the wandb logger
if isinstance(self.logger, WandbLogger):
self.logger.get_wandb().finish()
def _backprop(self, loss):
loss.backward()
self.optimizer.step()
def _backprop_mixed(self, loss):
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
def _train_epoch(self, progress):
return self._train_epoch_impl(progress, contextlib.nullcontext, self._backprop)
def _train_epoch_mixed(self, progress):
return self._train_epoch_impl(progress, amp.autocast, self._backprop_mixed)
def _forward_and_loss(self, x, y):
pred = self.model(x)
if self._iteration % self.log_image_interval == 0:
if pred.requires_grad:
pred.retain_grad()
loss = self.loss(pred, y)
return pred, loss
def _train_epoch_impl(self, progress, forward_context, backprop: Callable[[torch.Tensor], None]):
self.model.train()
n_iter = 0
t_per_iter = time.time()
for x, y in self.train_loader:
x, y = x.to(self.device), y.to(self.device)
self.optimizer.zero_grad()
with forward_context():
pred, loss = self._forward_and_loss(x, y)
backprop(loss)
lr = [pm["lr"] for pm in self.optimizer.param_groups][0]
if self.logger is not None:
self.logger.log_train(self._iteration, loss, lr, x, y, pred, log_gradients=True)
self._iteration += 1
n_iter += 1
if self._iteration >= self.max_iteration:
break
progress.update(1)
t_per_iter = (time.time() - t_per_iter) / n_iter
return t_per_iter
def _validate(self):
return self._validate_impl(contextlib.nullcontext)
def _validate_mixed(self):
return self._validate_impl(amp.autocast)
def _validate_impl(self, forward_context):
self.model.eval()
metric_val = 0.0
loss_val = 0.0
with torch.no_grad():
for x, y in self.val_loader:
x, y = x.to(self.device), y.to(self.device)
with forward_context():
pred, loss = self._forward_and_loss(x, y)
metric = self.metric(pred, y)
loss_val += loss.item()
metric_val += metric.item()
metric_val /= len(self.val_loader)
loss_val /= len(self.val_loader)
if self.logger is not None:
self.logger.log_validation(self._iteration, metric_val, loss_val, x, y, pred)
return metric_val
| constantinpape/torch-em | torch_em/trainer/default_trainer.py | default_trainer.py | py | 29,151 | python | en | code | 42 | github-code | 6 | [
{
"api_name": "typing.Optional",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.utils",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "torch.utils",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"lin... |
12110709697 | import numpy as np
from tqdm import tqdm
flip_inst = {}
flip_inst['e'] = [1, 0]
flip_inst['w'] = [-1, 0]
flip_inst['se'] = [0, -1]
flip_inst['sw'] = [-1, -1]
flip_inst['ne'] = [1, 1]
flip_inst['nw'] = [0, 1]
def flip_tile(instr, tiles):
tile = np.array([0, 0])
while instr:
for fi, dir in flip_inst.items():
if instr.startswith(fi):
tile += dir
instr = instr[len(fi):]
continue
if list(tile) in tiles:
del tiles[tiles.index(list(tile))]
else:
tiles.append(list(tile))
return tiles
def count_neighbors(matrix, x, y):
num = 0
for _, dir in flip_inst.items():
if matrix[x + dir[0], y + dir[1]] == 1:
num += 1
return num
def evolve(matrix):
to_flip = np.zeros(matrix.shape)
for x in range(1, matrix.shape[0]-1):
for y in range(1, matrix.shape[1]-1):
neighbors = count_neighbors(matrix, x, y)
# Any black tile with zero or more than 2 black tiles immediately
# adjacent to it is flipped to white.
if matrix[x, y] == 1:
if neighbors == 0 or neighbors > 2:
to_flip[x, y] = 1
# Any white tile with exactly 2 black tiles immediately adjacent to
# it is flipped to black.
else:
if neighbors == 2:
to_flip[x, y] = 1
matrix[to_flip == 1] = (matrix[to_flip == 1] + 1) % 2
return matrix
if __name__ == "__main__":
file_name = "test_24.txt"
file_name = "input_24.txt"
tiles = []
for line in open(file_name):
tiles = flip_tile(line.strip(), tiles)
print(len(tiles)) # 521
tiles = np.array(tiles)
delx = max(tiles[:, 0]) - min(tiles[:, 0])
dely = max(tiles[:, 1]) - min(tiles[:, 1])
matrix = np.zeros([200 + delx, 200 + dely], np.int)
for t in tiles:
matrix[t[0]+100, t[1]+100] = 1
for _ in tqdm(range(100)):
matrix = evolve(matrix)
print(np.sum(matrix)) # 4242
| scjohnson/aoc_2020 | solution_24.py | solution_24.py | py | 2,048 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": ... |
73154395388 | import os
import pickle
import argparse
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from Model.INPLIM import Doctor
from data_utils import CodeLevelDataset
from utils import train_eval
def args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='',
help='Set the root path of the dataset')
parser.add_argument('--devices', type=str, default='cpu',
help='Setting the IDs of GPU devices.')
parser.add_argument('--epochs', type=int, default=15,
help='Setting the number of epochs to run.')
parser.add_argument('--batch_size', type=int, default=512,
help='Setting the mini-batch size.')
parser.add_argument('--weight_decay', type=float, default=1e-3,
help='Setting weight decay')
parser.add_argument('--lr', type=float, default=1e-3,
help='Setting the learning rate.')
parser.add_argument('--dim', type=int, default=128,
help='Setting the inner dim of the model.')
parser.add_argument('--max_len', type=int, default=200,
help='Setting the maximum number of code to use for a patient.')
parser.add_argument('--drop_context', type=float, default=0.3,
help='Setting drop rate of the context-aware branch.')
parser.add_argument('--drop_time', type=float, default=0.3,
help='Setting drop rate of the time-aware branch.')
parser.add_argument('--save_model', action='store_true',
help='Whether to save the parameters of the trained model.',
default=True)
parser.add_argument('--save_dir', type=str, default='./saved_models',
help='Setting the dir of saving trained model.')
return parser
def main(opts):
if opts.devices != 'cpu':
os.environ['CUDA_VISIBLE_DEVICES'] = opts.devices
dataset = pickle.load(open(opts.data_root, 'rb'))
train_set = CodeLevelDataset(dataset=dataset, max_len=opts.max_len, phase='train')
valid_set = CodeLevelDataset(dataset=dataset, max_len=opts.max_len, phase='val')
test_set = CodeLevelDataset(dataset=dataset, max_len=opts.max_len, phase='test')
train_loader = DataLoader(train_set, batch_size=opts.batch_size, num_workers=2, shuffle=True)
val_loader = DataLoader(valid_set, batch_size=1, num_workers=1, shuffle=False)
test_loader = DataLoader(test_set, batch_size=1, num_workers=1, shuffle=False)
net = Doctor(features=dataset['features'], out_dim=2, emb_dim=opts.dim, dropout_context=opts.drop_context,
dropout_time=opts.drop_time)
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=opts.lr, weight_decay=opts.weight_decay, eps=0)
if opts.devices != 'cpu':
net = torch.nn.DataParallel(net).cuda()
criterion = criterion.cuda()
best_auc, best_epoch, best_test_nll, best_test_auc_roc = 0, 0, 0, 0
model_dict = {}
for epoch in range(opts.epochs):
train_eval(opts, net, criterion, optimizer, train_loader, epoch, phase='Train')
_, auc = train_eval(opts, net, criterion, optimizer, val_loader, epoch, phase='Valid')
if auc > best_auc:
best_auc, best_epoch = auc, epoch
best_test_nll, best_test_auc_roc = train_eval(opts, net, criterion, optimizer, test_loader, epoch,
phase='Test')
model_dict['opts'] = opts
model_dict['states'] = net.state_dict()
print('Best Test NLL:{:.4f}\t Best AUROC:{:.4f}'.format(best_test_nll, best_test_auc_roc))
if not os.path.exists(opts.save_dir):
os.makedirs(opts.save_dir)
if opts.save_model:
torch.save(model_dict, os.path.join(opts.save_dir, 'Model-AUC-{:.4f}.pth'.format(best_test_auc_roc)))
if __name__ == '__main__':
opts = args().parse_args()
main(opts)
| xlbryantx/INPLIM | main.py | main.py | py | 4,064 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "data_utils.CodeLe... |
2052216012 | from fastai import vision, metrics
from fastai.callback import hooks
from fastai.utils import mem
import numpy as np
from os import path
import torch
vision.defaults.device = vision.defaults.device if torch.cuda.is_available() else torch.device('cpu')
# Download data and get path
fastai_path = vision.untar_data(vision.URLs.CAMVID)
PATH = str(fastai_path)
print('CAMVID paths:')
print(fastai_path.ls())
BATCH_SIZE = 64
WD = 1e-2
LR = 1e-4
PCT_START_FINETUNE = 0.9 # given the default of 0.3, it means that your LR is going up for 30% of your iterations and then decreasing over the last 70%
PCT_START = 0.8
EPOCHS_FINETUNE = 12
EPOCHS = 12
# Define images and label path
LABEL_PATH = path.sep.join([PATH, 'labels'])
IMAGE_PATH = path.sep.join([PATH, 'images'])
# Define paths of image and label
image_paths = vision.get_image_files(IMAGE_PATH)
label_paths = vision.get_image_files(LABEL_PATH)
# Load some samples to see what's inside
rand_indx = np.random.randint(0, len(image_paths))
sample_image_path = image_paths[rand_indx]
sample_image = vision.open_image(sample_image_path)
sample_image.show(figsize=(6, 6))
# Function to match between image and its label path. E.g. image path: /root/.fastai/data/camvid/images/0006R0_f02910.png; label path: /root/.fastai/data/camvid/labels/0006R0_f02910_P.png
segment_name_fn = lambda image_path: path.sep.join([LABEL_PATH, f'{image_path.stem}_P{image_path.suffix}'])
# Load image segmentation by defaults (segment image given in dataset) and vision.open_mask()
sample_label_path = segment_name_fn(sample_image_path)
sample_label = vision.open_image(sample_label_path)
sample_label.show(figsize=(6, 6))
# Note sample segment after preprocess based on vision.open_mask just has 1 depth instead of 3 depth as origin segment
sample_label_preprocessed = vision.open_mask(sample_label_path)
sample_label_preprocessed.show(figsize=(6, 6))
print(sample_label_preprocessed.data) # sample_label_preprocessed is also fastai tensor
# get image dimension (height and width)
image_size = np.array(sample_label_preprocessed.shape[1:])
data_size = image_size//2
objects_in_image = np.loadtxt(path.sep.join([PATH, 'codes.txt']), dtype=str)
# Determine batch size by gpu free memory to avoid CUDA out pf memory
if torch.cuda.is_available():
free = mem.gpu_mem_get_free_no_cache()
if free > 8200:
BATCH_SIZE = 8
else:
BATCH_SIZE = 4
print(f'Using batch size of {BATCH_SIZE}, have {free}MB of GPU RAM free')
origin_data = vision.SegmentationItemList.from_folder(IMAGE_PATH).split_by_fname_file(path.sep.join([PATH, 'valid.txt'])).label_from_func(segment_name_fn, classes=objects_in_image)
data = origin_data.transform(vision.get_transforms(), size=data_size, tfm_y=True).databunch(bs=BATCH_SIZE).normalize(vision.imagenet_stats) # tfm_y=True means apply transform to label
print(data.show_batch(2, figsize=(10, 7)))
print(data.show_batch(2, figsize=(10, 7), ds_type=vision.DatasetType.Valid))
# Define accuracy
object2id = {value: key for key, value in enumerate(objects_in_image)}
void_index = object2id['Void']
def camvid_accuracy(inputs, target):
target = target.squeeze(1)
mask = target != void_index
return (inputs.argmax(dim=1)[mask] == target[mask]).float().mean()
# Define model
learner = vision.unet_learner(data, vision.models.resnet34, metrics=camvid_accuracy, wd=WD)
# Find good LR
learner.lr_find()
learner.recorder.plot()
learner.fit_one_cycle(EPOCHS_FINETUNE, max_lr=slice(LR), pct_start=PCT_START_FINETUNE)
learner.save('stage-1-34-unet')
# Show results
learner.show_results(rows=3, figsize=(8, 9))
# After warming up, start to train all network
learner.unfreeze()
learner.fit_one_cycle(EPOCHS, max_lr=slice(LR/400, LR/4), pct_start=PCT_START)
learner.save('stage-2-34-unet') | lykhahaha/Mine | Fastai_Tutorial/lesson3-camvid.py | lesson3-camvid.py | py | 3,776 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastai.vision.defaults",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "fastai.vision",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.... |
26349073306 | import functools
from enum import Enum
import json
import re
class SCJDEntry:
def __init__(self):
self.data = {}
def set_title(self, title):
self.data['title'] = title
def set_id(self, idx):
self.data['idx'] = idx
def glue_pronounce(self, prn):
if 'prn' in self.data:
self.data['prn'] += prn
else:
self.data['prn'] = prn
def push_kanji(self, kanji):
if 'kanjis' in self.data:
self.data['kanjis'].append(kanji)
else:
self.data['kanjis'] = [kanji]
def push_void_definition(self):
if 'defs' in self.data:
self.data['defs'].append({})
else:
self.data['defs'] = [{}]
def set_definition_order(self, order):
self.data['defs'][-1]['order'] = order
def glue_definition_indicator(self, ind):
if 'defs' in self.data:
if 'ind' in self.data['defs'][-1]:
self.data['defs'][-1]['ind'] += ind
else:
self.data['defs'][-1]['ind'] = ind
def push_definition_chinese_translation(self, trans):
if 'chi_transs' in self.data['defs'][-1]:
self.data['defs'][-1]['chi_transs'].append(trans)
else:
self.data['defs'][-1]['chi_transs'] = [trans]
def push_definition_english_translation(self, trans):
if 'eng_transs' in self.data['defs'][-1]:
self.data['defs'][-1]['eng_transs'].append(trans)
else:
self.data['defs'][-1]['eng_transs'] = [trans]
def push_void_definition_sentence_example(self):
if 'sent_exs' in self.data['defs'][-1]:
self.data['defs'][-1]['sent_exs'].append({})
else:
self.data['defs'][-1]['sent_exs'] = [{}]
def glue_definition_sentence_example(self, jpn_sent, chi_sent):
if jpn_sent is not None:
if 'jpn_sent' in self.data['defs'][-1]['sent_exs'][-1]:
self.data['defs'][-1]['sent_exs'][-1]['jpn_sent'] += jpn_sent
else:
self.data['defs'][-1]['sent_exs'][-1]['jpn_sent'] = jpn_sent
if chi_sent is not None:
if 'chi_sent' in self.data['defs'][-1]['sent_exs'][-1]:
self.data['defs'][-1]['sent_exs'][-1]['chi_sent'] += chi_sent
else:
self.data['defs'][-1]['sent_exs'][-1]['chi_sent'] = chi_sent
def push_void_phrase(self):
if 'phrs' in self.data:
self.data['phrs'].append({})
else:
self.data['phrs'] = [{}]
def set_phrase_idx(self, idx):
self.data['phrs'][-1]['idx'] = idx
def glue_phrase_first_title(self, title):
if 'titles' in self.data['phrs'][-1]:
self.data['phrs'][-1]['titles'][0] += title
else:
self.data['phrs'][-1]['titles'] = [title]
def push_phrase_title(self, title):
self.data['phrs'][-1]['titles'].append(title)
def push_void_phrase_section(self):
if 'secs' in self.data['phrs'][-1]:
self.data['phrs'][-1]['secs'].append({})
else:
self.data['phrs'][-1]['secs'] = [{}]
def push_phrase_section_translation(self, trans):
if 'transs' in self.data['phrs'][-1]['secs'][-1]:
self.data['phrs'][-1]['secs'][-1]['transs'].append(trans)
else:
self.data['phrs'][-1]['secs'][-1]['transs'] = [trans]
def push_void_phrase_section_sentence_example(self):
if 'sent_exs' in self.data['phrs'][-1]['secs'][-1]:
self.data['phrs'][-1]['secs'][-1]['sent_exs'].append({})
else:
self.data['phrs'][-1]['secs'][-1]['sent_exs'] = [{}]
def glue_phrase_section_sentence_example(self, jpn_sent, chi_sent):
if jpn_sent is not None:
if 'jpn_sent' in self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]:
self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]['jpn_sent'] += jpn_sent
else:
self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]['jpn_sent'] = jpn_sent
if chi_sent is not None:
if 'chi_sent' in self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]:
self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]['chi_sent'] += chi_sent
else:
self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]['chi_sent'] = chi_sent
def get_data(self):
return self.data
class SCJDStateMachine:
class Node(Enum):
IGNORE = 0
ROOT = 1
D_ENTRY = 2
D_PRN = 3
D_DEF = 4
A = 5
HWG_SPAN = 10
HW_SPAN = 11
HV_SPAN = 12
GRAMB_SPAN = 13
SEMB_SPAN = 14
TRG_SPAN = 15
OUP_LABEL_SPAN = 16
TRANS_SPAN = 17
IDMB_SPAN = 18
IDMSEC_SPAN = 19
IDM_SPAN = 20
EXG_SPAN = 21
EX_SPAN = 22
IND_SPAN = 23
CB_SPAN = 24
CSEC_SPAN = 25
CW_SPAN = 26
CV_SPAN = 27
class State(Enum):
NUMBNESS = 0
BEGIN = 1
GRAMB = 10
GRAMB_SEMB = 11
GRAMB_OUP_LABEL = 12
GRAMB_TRG_AFTER_OUP_LABEL = 13
GRAMB_EXG = 14
IDMB = 20
IDMB_FIRST_IDM = 21
IDMB_NOT_FIRST_IDM = 22
IDMB_SEMB = 23
IDMB_EXG = 24
CB = 30
CB_GRAMB = 31
CB_SEMB = 32
CB_EXG = 33
def get_gramb_cluster():
return [SCJDStateMachine.State.GRAMB, SCJDStateMachine.State.GRAMB_SEMB, SCJDStateMachine.State.GRAMB_OUP_LABEL, SCJDStateMachine.State.GRAMB_EXG, SCJDStateMachine.State.GRAMB_TRG_AFTER_OUP_LABEL]
def get_idmb_cluster():
return [SCJDStateMachine.State.IDMB, SCJDStateMachine.State.IDMB_FIRST_IDM, SCJDStateMachine.State.IDMB_NOT_FIRST_IDM, SCJDStateMachine.State.IDMB_SEMB, SCJDStateMachine.State.IDMB_EXG]
def get_cb_cluster():
return [SCJDStateMachine.State.CB, SCJDStateMachine.State.CB_GRAMB, SCJDStateMachine.State.CB_SEMB, SCJDStateMachine.State.CB_EXG]
IGNORE_SPAN = {'hvg', 'gp', 'x_xoh', 'ty_pinyin', 'x_xdh', 'sn', 'gl', 'cwg', 'cvg', 'tail', 'ty_日中比較', 'x_xopt', 'pr', 'ty_参考', 'ty_参考参照', 'ty_項目参照', 'ty_注意', 'gr', 'ty_文化', 'ph', 'xr', 'xrlabelGroup', 'xrlabel', 'underline'}
INHERIT_SPAN = {'rf', 'tg_ind', 't_fld', 'subEnt'}
def __init__(self):
self.reinit()
def reinit(self):
self.stk = [SCJDStateMachine.Node.ROOT]
self.sta = SCJDStateMachine.State.BEGIN
def get_node(self):
return self.stk[-1]
def get_state(self):
return self.sta
def push_node(self, node):
self.stk.append(node)
def pop_node(self):
return self.stk.pop()
def numb(self):
self.sta = SCJDStateMachine.State.NUMBNESS
def is_numb(self):
return self.get_state() == SCJDStateMachine.State.NUMBNESS
def startelement_move(self, tag, attrs):
if tag == 'd:entry':
self.push_node(SCJDStateMachine.Node.D_ENTRY)
elif tag == 'span':
attrs_keys = attrs.getQNames()
if 'class' in attrs_keys:
attrs_class_values = attrs['class'].split(' ')
if not SCJDStateMachine.INHERIT_SPAN.isdisjoint(attrs_class_values):
self.push_node(self.get_node())
elif not SCJDStateMachine.IGNORE_SPAN.isdisjoint(attrs_class_values):
self.push_node(SCJDStateMachine.Node.IGNORE)
elif 'hwg' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.HWG_SPAN)
elif 'hw' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.HW_SPAN)
elif 'hv' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.HV_SPAN)
elif 'gramb' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.GRAMB_SPAN)
if self.get_state() == SCJDStateMachine.State.BEGIN:
self.sta = SCJDStateMachine.State.GRAMB
elif self.get_state() in SCJDStateMachine.State.get_cb_cluster():
self.sta = SCJDStateMachine.State.CB_GRAMB
else:
raise RuntimeError(f'{self.get_state()}')
elif 'semb' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.SEMB_SPAN)
if self.get_state() in SCJDStateMachine.State.get_gramb_cluster():
self.sta = SCJDStateMachine.State.GRAMB_SEMB
elif self.get_state() in SCJDStateMachine.State.get_idmb_cluster():
self.sta = SCJDStateMachine.State.IDMB_SEMB
elif self.get_state() in SCJDStateMachine.State.get_cb_cluster():
self.sta = SCJDStateMachine.State.CB_SEMB
else:
raise RuntimeError(f'{self.get_state()}')
elif 'trg' in attrs_class_values:
"""
Generally, there is only one "oup_label" span in "gramb-semb" span, except in some rare cases.
This happens when there is more than one kinds of translation in one "gramb-semb" span.
An example is where id = j_CRJC000115, and title = 相手役.
That "[芝居など] 配角" and "[ダンス] 舞伴" show up here is weird.
And this is the reason why I put "tg_ind" span into INHERIT_SPAN instead of IGNORE_SPAN.
Otherwise, key of ind in entry will become "芝居などダンス" instead of "[芝居など][ダンス]".
"""
if 'x_xd2' in attrs_class_values:
if self.get_state() == SCJDStateMachine.State.GRAMB_OUP_LABEL:
self.sta = SCJDStateMachine.State.GRAMB_TRG_AFTER_OUP_LABEL
elif self.get_state() == SCJDStateMachine.State.GRAMB_EXG:
pass
elif self.get_state() == SCJDStateMachine.State.GRAMB_SEMB:
pass
elif self.get_state() == SCJDStateMachine.State.GRAMB_TRG_AFTER_OUP_LABEL:
pass
else:
raise RuntimeError(f'{self.get_state()}')
self.push_node(SCJDStateMachine.Node.TRG_SPAN)
elif 'oup_label' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.OUP_LABEL_SPAN)
if self.get_state() == SCJDStateMachine.State.GRAMB_SEMB:
self.sta = SCJDStateMachine.State.GRAMB_OUP_LABEL
elif self.get_state() == SCJDStateMachine.State.GRAMB_TRG_AFTER_OUP_LABEL:
self.sta = SCJDStateMachine.State.GRAMB_OUP_LABEL
"""
Generally, there is no "oup_label" span in "idmb" span, except in some rare cases.
An example is where id = j_CRJC010600, and title = 塞翁が馬.
"""
elif self.get_state() == SCJDStateMachine.State.IDMB_SEMB:
pass
else:
raise RuntimeError(f'{self.get_state()}')
elif 'trans' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.TRANS_SPAN)
elif 'idmb' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.IDMB_SPAN)
self.sta = SCJDStateMachine.State.IDMB
elif 'idmsec' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.IDMSEC_SPAN)
elif 'idm' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.IDM_SPAN)
if self.get_state() == SCJDStateMachine.State.IDMB or self.get_state() == SCJDStateMachine.State.IDMB_EXG or self.get_state() == SCJDStateMachine.State.IDMB_SEMB:
self.sta = SCJDStateMachine.State.IDMB_FIRST_IDM
elif self.get_state() == SCJDStateMachine.State.IDMB_FIRST_IDM:
self.sta = SCJDStateMachine.State.IDMB_NOT_FIRST_IDM
elif 'exg' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.EXG_SPAN)
if self.get_state() in SCJDStateMachine.State.get_gramb_cluster():
self.sta = SCJDStateMachine.State.GRAMB_EXG
elif self.get_state() in SCJDStateMachine.State.get_idmb_cluster():
self.sta = SCJDStateMachine.State.IDMB_EXG
elif self.get_state() in SCJDStateMachine.State.get_cb_cluster():
self.sta = SCJDStateMachine.State.CB_EXG
else:
raise RuntimeError(f'{self.get_state()}')
elif 'ex' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.EX_SPAN)
elif 'ind' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.IND_SPAN)
elif 'fld' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.IND_SPAN)
elif 'cb' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.CB_SPAN)
self.sta = SCJDStateMachine.State.CB
elif 'csec' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.CSEC_SPAN)
elif 'cw' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.CW_SPAN)
elif 'cv' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.CV_SPAN)
else:
raise RuntimeError(f"SPAN with {attrs_class_values} in class key is not defined")
else:
raise RuntimeError(f"SPAN with {attrs_keys} key is not defined")
elif tag == 'd:prn':
self.push_node(SCJDStateMachine.Node.D_PRN)
elif tag == 'd:def':
self.push_node(SCJDStateMachine.Node.D_DEF)
elif tag == 'a':
self.push_node(SCJDStateMachine.Node.A)
else:
raise RuntimeError(f"TAG {tag} is not defined")
def endelement_move(self, tag):
self.pop_node()
def startdocument_move(self):
self.reinit()
def enddocument_move(self):
pass
def log(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if args[0].debug:
if func.__name__ == 'startElement':
print(f'SM: {args[0].sm.stk}, State: {args[0].sm.sta}')
print(f'Tag: {args[1]}, class: {args[2].getValueByQName("class") if "class" in args[2].getQNames() else None}')
print(f'{args[0].entry.get_data()}')
print(f'==========')
elif func.__name__ == 'endElement':
print(f'Tag: {args[1]}')
print(f'==========')
elif func.__name__ == 'characters':
print(f'Chars: {args[1]}')
print(f'==========')
return func(*args, **kwargs)
return wrapper
class SCJDController:
def __init__(self, debug=False):
self.reinit()
self.debug = debug
def reinit(self):
self.sm = SCJDStateMachine()
self.entry = SCJDEntry()
def get_entry(self):
return self.entry.data
@log
def startElement(self, tag, attrs):
if self.sm.is_numb():
return
self.sm.startelement_move(tag, attrs)
if self.sm.get_node() == SCJDStateMachine.Node.D_ENTRY:
if re.search("^j_CRJC.*", attrs['id']) is not None:
self.entry.set_title(attrs['d:title'])
self.entry.set_id(attrs['id'])
else:
self.sm.numb()
elif self.sm.get_node() == SCJDStateMachine.Node.SEMB_SPAN:
if self.sm.get_state() in SCJDStateMachine.State.get_gramb_cluster():
self.entry.push_void_definition()
if 'ord' in attrs.getQNames():
self.entry.set_definition_order(attrs['ord'])
else:
self.entry.set_definition_order('1')
elif self.sm.get_state() in SCJDStateMachine.State.get_idmb_cluster():
self.entry.push_void_phrase_section()
elif self.sm.get_state() in SCJDStateMachine.State.get_cb_cluster():
self.entry.push_void_phrase_section()
else:
raise RuntimeError(f"Node {self.sm.get_node()} with State {self.sm.get_state()} startElement function is not defined")
elif self.sm.get_node() == SCJDStateMachine.Node.IDMSEC_SPAN:
self.entry.push_void_phrase()
self.entry.set_phrase_idx(attrs['id'])
elif self.sm.get_node() == SCJDStateMachine.Node.EXG_SPAN:
if self.sm.get_state() in SCJDStateMachine.State.get_idmb_cluster():
self.entry.push_void_phrase_section_sentence_example()
elif self.sm.get_state() in SCJDStateMachine.State.get_gramb_cluster():
self.entry.push_void_definition_sentence_example()
elif self.sm.get_state() in SCJDStateMachine.State.get_cb_cluster():
self.entry.push_void_phrase_section_sentence_example()
else:
raise RuntimeError(f"Node {self.sm.get_node()} with State {self.sm.get_state()} startElement function is not defined")
elif self.sm.get_node() == SCJDStateMachine.Node.CSEC_SPAN:
self.entry.push_void_phrase()
self.entry.set_phrase_idx(attrs['id'])
@log
def endElement(self, tag):
if self.sm.is_numb():
return
self.sm.endelement_move(tag)
@log
def characters(self, chars):
if self.sm.is_numb():
return
def process_chars(chars):
return chars.strip()
if self.sm.get_node() == SCJDStateMachine.Node.HW_SPAN:
self.entry.glue_pronounce(process_chars(chars))
elif self.sm.get_node() == SCJDStateMachine.Node.HV_SPAN:
self.entry.push_kanji(process_chars(chars))
elif self.sm.get_node() == SCJDStateMachine.Node.TRANS_SPAN:
if self.sm.get_state() == SCJDStateMachine.State.GRAMB_SEMB:
self.entry.push_definition_chinese_translation(process_chars(chars))
elif self.sm.get_state() == SCJDStateMachine.State.GRAMB_TRG_AFTER_OUP_LABEL:
self.entry.push_definition_chinese_translation(process_chars(chars))
elif self.sm.get_state() == SCJDStateMachine.State.GRAMB_OUP_LABEL:
self.entry.push_definition_english_translation(process_chars(chars))
elif self.sm.get_state() == SCJDStateMachine.State.GRAMB_EXG:
self.entry.glue_definition_sentence_example(None, process_chars(chars))
elif self.sm.get_state() == SCJDStateMachine.State.IDMB_SEMB:
self.entry.push_phrase_section_translation(chars.strip())
elif self.sm.get_state() == SCJDStateMachine.State.IDMB_EXG:
self.entry.glue_phrase_section_sentence_example(None, chars.strip())
elif self.sm.get_state() == SCJDStateMachine.State.CB_SEMB:
self.entry.push_phrase_section_translation(chars.strip())
elif self.sm.get_state() == SCJDStateMachine.State.CB_EXG:
self.entry.glue_phrase_section_sentence_example(None, chars.strip())
else:
raise RuntimeError(f"Node {self.sm.get_node()} with State {self.sm.get_state()} characters function is not defined")
elif self.sm.get_node() == SCJDStateMachine.Node.IDM_SPAN:
if self.sm.get_state() == SCJDStateMachine.State.IDMB_FIRST_IDM:
self.entry.glue_phrase_first_title(process_chars(chars))
elif self.sm.get_state() == SCJDStateMachine.State.IDMB_NOT_FIRST_IDM:
self.entry.push_phrase_title(process_chars(chars))
else:
raise RuntimeError(f"Node {self.sm.get_node()} with State {self.sm.get_state()} characters function is not defined")
elif self.sm.get_node() == SCJDStateMachine.Node.EX_SPAN:
if self.sm.get_state() == SCJDStateMachine.State.IDMB_EXG:
self.entry.glue_phrase_section_sentence_example(process_chars(chars), None)
elif self.sm.get_state() == SCJDStateMachine.State.CB_EXG:
self.entry.glue_phrase_section_sentence_example(process_chars(chars), None)
elif self.sm.get_state() == SCJDStateMachine.State.GRAMB_EXG:
self.entry.glue_definition_sentence_example(process_chars(chars), None)
else:
raise RuntimeError(f"Node {self.sm.get_node()} with State {self.sm.get_state()} characters function is not defined")
elif self.sm.get_node() == SCJDStateMachine.Node.IND_SPAN:
self.entry.glue_definition_indicator(process_chars(chars))
elif self.sm.get_node() == SCJDStateMachine.Node.CW_SPAN:
self.entry.glue_phrase_first_title(process_chars(chars))
elif self.sm.get_node() == SCJDStateMachine.Node.CV_SPAN:
self.entry.push_phrase_title(process_chars(chars))
def startDocument(self):
self.sm.startdocument_move()
self.reinit()
def endDocument(self):
if self.sm.is_numb():
return
self.sm.enddocument_move()
# print(self.entry.data['title'])
| Leundo/apple-dictionary-extractor | ADParser/scjd_controller.py | scjd_controller.py | py | 17,976 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number":... |
32796024261 | from gevent import monkey
monkey.patch_all()
import gevent
import socket
import re
import dns
import log
LOG = log.get_logger('dns-proxy')
class DNSServer(object):
def __init__(self, host='0.0.0.0', port=53, nameserver='114.114.114.114'):
self.sock = None
self.host = host
self.port = port
self.nameserver = nameserver
self.engine = MatchEngine('./resolv.txt', const={'current': '192.168.199.180'})
def on_query(self, sip, sport, req):
def lookup_remote_nameserver(que):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if s.sendto(dns.pack(que), (self.nameserver, 53)) == 0:
LOG.error('failed to query')
raise Exception('query failed')
_resp = s.recv(2048)
LOG.debug("raw response: {}".format(repr(_resp)))
resp = dns.unpack(_resp)
return resp
# end lookup_remote_nameserver
LOG.debug("raw query: {}".format(repr(req)))
que = dns.unpack(req)
LOG.debug("query: {}".format(que))
host = self.engine.lookup(que.questions[0].qname)
if not host:
# reslov from remote nameserver.
resp = lookup_remote_nameserver(que)
else:
qh = que.header
qq = que.questions[0]
resp = dns.DNSResponse(
header=dns.DNSHeader(
id=qh.id, qr=1, opcode=qh.opcode,
aa=qh.aa, tc=qh.tc, rd=qh.rd, ra=qh.ra,
rcode=qh.rcode, qdcount=1, ancount=1, nscount=0, arcount=0),
questions=que.questions,
answers=[dns.DNSAnswer(
name=qq.qname, type=1, class_=1, ttl=255,
rdlength=4, rdata=host)])
_resp = dns.pack(resp)
LOG.debug("raw response: {}".format(repr(_resp)))
LOG.debug("response: {}".format(resp))
self.sock.sendto(_resp, (sip, sport))
def serve_forever(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((self.host, self.port))
try:
while True:
msg, (ip, port) = self.sock.recvfrom(2048)
gevent.spawn(self.on_query, ip, port, msg)
except KeyboardInterrupt:
LOG.info("exit.")
finally:
self.sock.close()
class MatchEngine(object):
def _read_rules_from_file(self, f):
_rules = {}
with open(f) as fr:
rules = fr.read().split('\n')[:-1]
for rule in rules:
domain, host = rule.split()
if host[0] == '<' and host[-1] == '>':
host = self._const[host[1:-1]]
_rules[re.compile(domain)] = host
return _rules
def __init__(self, resolv_file, const=None):
self.resolv_file = resolv_file
self._const = const if isinstance(const, dict) else {}
self._rules = self._read_rules_from_file(self.resolv_file)
def lookup(self, domain):
for domain_rule, host in self._rules.items():
if domain_rule.match(domain):
return host
return None
def reload(self):
self._rules = self._read_rules_from_file(self.resolv_file)
if __name__ == '__main__':
ds = DNSServer()
ds.serve_forever()
| PeerXu/death-star | death_star/dns_proxy.py | dns_proxy.py | py | 3,334 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "gevent.monkey.patch_all",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "gevent.monkey",
"line_number": 2,
"usage_type": "name"
},
{
"api_name": "log.get_logger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "socket.socket",
... |
14504710213 | import os
import numpy as np
import pandas as pd
from PIL import Image
from collections import Counter
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import torchvision.transforms as T
import spacy
spacy_eng = spacy.load("en_core_web_sm")
# defining the transform to be applied
transforms = T.Compose([
T.Resize(226),
T.RandomCrop(224),
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
transforms_advanced = T.Compose([
T.Resize(226),
T.RandomCrop(224),
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
T.RandomAffine(10),
T.RandomGrayscale(0.05),
T.RandomHorizontalFlip(0.05),
T.RandomVerticalFlip(0.05),
T.GaussianBlur(5),
T.RandomErasing(0.05)
])
class Vocabulary:
def __init__(self, freq_threshold):
# setting the pre-reserved tokens int to string tokens
self.pad_idx = 0
self.sos_idx = 1
self.eos_idx = 2
self.unk_idx = 3
self.pad = "<PAD>"
self.sos = "<SOS>"
self.eos = "<EOS>"
self.unk = "<UNK>"
self.itos = {self.pad_idx: self.pad,
self.sos_idx: self.sos,
self.eos_idx: self.eos,
self.unk_idx: self.unk}
# string to int tokens
# its reverse dict self.itos
self.stoi = {v: k for k, v in self.itos.items()}
self.freq_threshold = freq_threshold
def __len__(self):
return len(self.itos)
def size(self):
return len(self.itos)
@staticmethod
def tokenize(text):
return [token.text.lower() for token in spacy_eng.tokenizer(text)]
def build_vocab(self, sentence_list):
frequencies = Counter()
idx = 4
for sentence in sentence_list:
for word in self.tokenize(sentence):
frequencies[word] += 1
# add the word to the vocab if it reaches minum frequecy threshold
if frequencies[word] == self.freq_threshold:
self.stoi[word] = idx
self.itos[idx] = word
idx += 1
def numericalize(self, text):
""" For each word in the text corresponding index token for that word form the vocab built as list """
tokenized_text = self.tokenize(text)
return [self.stoi[token] if token in self.stoi else self.stoi["<UNK>"] for token in tokenized_text]
class FlickrDataset(Dataset):
"""
FlickrDataset
"""
def __init__(self,
root_dir,
captions_file,
transform=None,
freq_threshold=5,
vocab=None,
data_limit=None,
do_augmentation=False,
augmentation_probability=0.2):
self.root_dir = root_dir
self.df = pd.read_csv(captions_file)
self.transform = transform
self.random = np.random.RandomState()
self.do_augmentation = do_augmentation
self.augmentation_probability = augmentation_probability
# Get image and caption colum from the dataframe
self.imgs = self.df["image"]
self.captions = self.df["caption"]
# If needed truncating the data for faster running
if data_limit is not None:
self.imgs = self.imgs[:data_limit]
self.captions = self.captions[:data_limit]
# Initialize vocabulary and build vocab
if vocab is None:
self.vocab = Vocabulary(freq_threshold)
self.vocab.build_vocab(self.captions.tolist())
else:
self.vocab = vocab
def __len__(self):
# return len(self.df)
return self.imgs.shape[0]
def __getitem__(self, idx):
caption = self.captions[idx]
img_name = self.imgs[idx]
img_location = os.path.join(self.root_dir, img_name)
img_pil = Image.open(img_location).convert("RGB")
# do some random augmentations
if not self.do_augmentation:
img = transforms(img_pil)
else:
img = transforms_advanced(img_pil)
# numericalize the caption text
caption_vec = []
caption_vec += [self.vocab.stoi["<SOS>"]]
caption_vec += self.vocab.numericalize(caption)
caption_vec += [self.vocab.stoi["<EOS>"]]
return img, torch.tensor(caption_vec)
class CapsCollate:
"""
Collate to apply the padding to the captions with dataloader
"""
def __init__(self, vocab, batch_first=False, max_len=0):
self.pad_idx = vocab.pad_idx
self.eos_idx = vocab.eos_idx
self.batch_first = batch_first
self._max_len = max_len
def __call__(self, batch):
imgs = [item[0].unsqueeze(0) for item in batch]
imgs = torch.cat(imgs, dim=0)
targets = [item[1] for item in batch]
targets = pad_sequence(targets, batch_first=self.batch_first, padding_value=self.pad_idx)
if self._max_len > 0:
if targets.shape[1] >= self._max_len:
targets = targets[:, :self._max_len]
else:
pad_tensor = torch.ones(size=(targets.shape[0], self._max_len - targets.shape[1]),
dtype=torch.long) * self.pad_idx
targets = torch.cat([targets, pad_tensor], dim=1)
targets[:, -1] = targets[:, -1].where(targets[:, -1] == self.pad_idx, torch.tensor(self.eos_idx))
return imgs, targets
| danarip/ImageCaptionGenerator | source/data_preprocessing.py | data_preprocessing.py | py | 5,555 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "spacy.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torc... |
33195061509 | import numpy as np
import cv2
import copy
from time import sleep
import datetime
# from progress.bar import Bar
def Rodar(cam):
capture = cv2.VideoCapture(cam)
background_subtractor = cv2.bgsegm.createBackgroundSubtractorMOG()
#length = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
# bar = Bar('Processing Frames', max=length)
first_iteration_indicator = 1
while True:
sleep(1/60)
ret, frame = capture.read()
# If first frame
if first_iteration_indicator == 1:
first_frame = copy.deepcopy(frame)
height, width = frame.shape[:2]
accum_image = np.zeros((height, width), np.uint8)
first_iteration_indicator = 0
else:
filter = background_subtractor.apply(frame) # remove the background
threshold = 2
maxValue = 2
ret, th1 = cv2.threshold(filter, threshold, maxValue, cv2.THRESH_BINARY)
# add to the accumulated image
accum_image = cv2.add(accum_image, th1)
color_image_video = cv2.applyColorMap(accum_image, cv2.COLORMAP_SUMMER)
video_frame = cv2.addWeighted(frame, 0.7, color_image_video, 0.7, 0)
color_image = cv2.applyColorMap(accum_image, cv2.COLORMAP_HOT)
result_overlay = cv2.addWeighted(frame, 0.7, color_image, 0.7, 0)
relatorio = datetime.datetime.now();
data = "{:02d}-{:02d}-{:02d}".format(relatorio.day, relatorio.month, relatorio.replace(year=20).year)
if relatorio.hour == 22 and relatorio.minute == 29 and relatorio.second == 1:
cv2.imwrite("static/reports/report_" + data + ".jpg", result_overlay)
#cv2.imshow("Video Original" , result_overlay)
ret, jpeg = cv2.imencode('.jpg', result_overlay)
send_frame = jpeg.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + send_frame + b'\r\n\r\n')
# bar.next()
# bar.finish()
# save the final heatmap
# cv2.imwrite('diff-overlay.jpg', result_overlay)
# cleanup
#capture.release()
#cv2.destroyAllWindows()
| rnanc/MOBYDATA | services/motion_heatmap.py | motion_heatmap.py | py | 2,177 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.bgsegm.createBackgroundSubtractorMOG",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.bgsegm",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_nam... |
38586292194 | import cv2
import numpy as np
img = cv2.imread('images/saitama.jpg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # convert it to hsv
width = img.shape[1]
height = img.shape[0]
channel = img.shape[2]
increase_v = 40
decrease_s = 10
step = 2
# bien doi hinh anh
print("chon huong di cua animation: ")
print("1.Left -> Right")
print("2.Right -> Left")
print("3.Down")
print("4.Up")
flag = input()
if flag == '3':
# huong di xuong
for y in range(1, height, step):
h, s, v = cv2.split(hsv[0: y, :])
v = np.where(v <= 255 - increase_v, v + increase_v, 255)
s = np.where(s >= 0 + decrease_s, s - decrease_s, 0)
hsv[0: y, :] = cv2.merge((h, s, v))
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('animation', img)
cv2.waitKey(1)
elif flag == '1':
# huong sang phai
for x in range(1, width, step):
h, s, v = cv2.split(hsv[:, 0: x])
v = np.where(v <= 255 - increase_v, v + increase_v, 255)
s = np.where(s >= 0 + decrease_s, s - decrease_s, 0)
hsv[:, 0:x] = cv2.merge((h, s, v))
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('animation', img)
cv2.waitKey(1)
elif flag == '2':
# huong sang trai
for x in range(width - 2, 0, -step):
h, s, v = cv2.split(hsv[:, x: -1])
v = np.where(v <= 255 - increase_v, v + increase_v, 255)
s = np.where(s >= 0 + decrease_s, s - decrease_s, 0)
hsv[:, x:-1] = cv2.merge((h, s, v))
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('animation', img)
cv2.waitKey(1)
elif flag == '4':
# huong len tren
for y in range(height - 2, 0, -step):
h, s, v = cv2.split(hsv[y: -1, :])
v = np.where(v <= 255 - increase_v, v + increase_v, 255)
s = np.where(s >= 0 + decrease_s, s - decrease_s, 0)
hsv[y: -1, :] = cv2.merge((h, s, v))
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('animation', img)
cv2.waitKey(1)
else:
print('nhap cac gia tri tu 1 den 4')
cv2.destroyAllWindows()
| 19522515/CS231.L21-Computer-Vision-Project | Source code/pptanimation_swipe.py | pptanimation_swipe.py | py | 2,089 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.split",
"line_num... |
26433388736 | import pandas as pd
import numpy as np
import json
import os
from pydub import AudioSegment
from scipy.io.wavfile import read
import stft
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
import random
import signal
import cPickle as pickle
from functools import partial
import multiprocessing as mp
import time
from multiprocessing.pool import Pool
audio_path = '/media/jon/external_data/audio/'
# audio_path = "data/target_species/"
species_list = ["Poecile_atricapillus",
"Poecile_rufescens",
"Regulus_calendula",
"Regulus_satrapa"]
def make_dirs(audio_path):
'''
Read species names from json and create wav file directory for each species
'''
f = []
for (_, _, filename) in os.walk(audio_path + "json_files"):
f.extend(filename)
for name in f:
os.makedirs(audio_path + name + '_wav')
def dir_list(audio_path):
'''
INPUT:
path to data file directories
OUTPUT:
For each directory containing mp3 files, generate a new directory
to recieve wav files. Return a list of tuples containing mp3 and wav
directory paths.
'''
directory_list = []
for (_, _, filename) in os.walk(audio_path + "json_files"):
for name in filename:
input_directory = audio_path + name
output_directory = input_directory + "_wav"
directory_list.append((input_directory, output_directory))
return directory_list
def make_file_list(directory_list):
'''
INPUT:
list of tuples containing input, output directories
OUTPUT:
list of tuples containing input, output file names
'''
file_list = []
for directory in directory_list:
for (_, _, filenames) in os.walk(directory[0]):
for file_id in filenames:
mp3_file = (directory[0] + "/" + file_id)
wav_file = (directory[1] + "/" + file_id[:-3] + "wav")
file_list.append((mp3_file, wav_file))
return file_list
def make_wav_list(directory_list):
'''
INPUT:
list of tuples containing wav file directories
OUTPUT:
list of tuples containing path and file_id for all wav files
'''
wav_list = []
for directory in directory_list:
for (_, _, filenames) in os.walk(directory[1]):
for file_id in filenames:
wav_file = (directory[1] + "/" + file_id)
wav_list.append((wav_file, file_id))
return wav_list
def file_list(path, species):
'''
Create a list of files for further processing
'''
file_list = []
for sp in species:
for (_, _, filenames) in os.walk(path + sp + "_wav/"):
for f in filenames:
file_list.append(path + sp + "_wav/" + f)
return file_list
def make_mono(file_list):
'''
overwrite wav files as mono - other functions will have errors with stereo files
'''
for f in file_list:
sound = AudioSegment.from_wav(f)
sound = sound.set_channels(1)
sound.export(f, format="wav")
def make_spec(file_list):
'''
INPUT:
list of wav file - files will be converted to mono in function
OUTPUT:
dictionary with filename as key, spectrogram as value
'''
spectrograms = {}
for f in file_list:
sound = AudioSegment.from_wav(f)
sound = sound.set_channels(1)
sound.export("temp", format="wav")
a = read("temp")
# arr = np.array(a[1], dtype=float) already np array - don't need to convert
spec = stft.spectrogram(a[1])
spectrograms[f] = spec
return spectrograms
def norm_spec(spectrograms):
'''
INPUT:
dict of file name: spectrogram
OUTPUT:
dict of file name: l2 normalized spectrogram
'''
norm = {}
for k in spectrograms.keys():
norm[k] = normalize(spectrograms[k], norm="l2")
return norm
def whiten(normalized):
'''
INPUT:
dict of file name: spectrogram
OUTPUT:
dict of file name: pca whitened spectrogram
'''
whitened = {}
pca = PCA(n_components=40, copy=False, whiten=True)
for k in normalized.keys():
whitened[k] = pca.fit_transform(normalized[k])
def random_sample(species_files, n=10):
'''
INPUT:
a dict of species, file list pairs
OUTPUT:
a randomly selected list of n files from each species
'''
subset = []
for k, v in species_files:
subset.extend([v[i] for i in random.sample(xrange(len(v)))])
return subset
| jonathanwoodard/Melospiza | source/transform_audio.py | transform_audio.py | py | 4,665 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.walk",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 66,
"usa... |
5114460856 |
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm as LogNorm
def compare_fields(delta_F, delta_DM, R_sm, pc_meta):
# from Metin 2019
'''
A function for comparing the fields of delta_F and delta_DM with hist2d.
'''
fig = plt.figure(figsize = (10, 7))
bins_f = np.arange(-0.6,0.5, 0.01)
bins_dm = np.arange(0., 8., 0.04)
hist2d, edges_dm, edges_f = np.histogram2d(np.ndarray.flatten(delta_DM.field_data), np.ndarray.flatten(delta_F.field_data),
bins=[bins_dm, bins_f],density=True)
X, Y = np.meshgrid(edges_dm, edges_f, indexing='ij')
plt.pcolormesh(X,Y, hist2d, cmap='Greys',
norm=LogNorm(vmin=2e-3, vmax=100.))
cbar = plt.colorbar()
cbar.set_label('normalized density')
XCon, YCon = np.meshgrid(edges_dm[0:-1]+(edges_dm[1]-edges_dm[0])/2 ,
edges_f[0:-1]+(edges_f[1]-edges_f[1])/2 ,
indexing='ij')
# plt.contour(XCon,YCon, hist2d, levels = 3)
plt.xlabel('$\\delta_{DM}$')
plt.ylabel('$\\delta_{F}$')
plt.title('$\\delta_{DM} - \\delta_{F}$ of '
+ '{}, \nRA: {}, DE: {} '.format(pc_meta['Name'], pc_meta['RA'], pc_meta['DE'])
+ '$R_{sm}$ = ' + str(R_sm))
return fig
def compare_fields_general(field_1, field_2, extent, ncell_1, ncell_2, vmin = 2e-3, vmax = 100, countour = True):
# from Metin 2019
'''
extent = [x_1, y_1, x_2, y_2]
'''
fig = plt.figure(figsize = (10, 10))
x_1, y_1, x_2, y_2 = extent
bins_1 = np.linspace(x_1, x_2, ncell_1)
bins_2 = np.linspace(y_1, y_2, ncell_2)
hist2d, edges_1, edges_2 = np.histogram2d(np.ndarray.flatten(field_1.field_data), np.ndarray.flatten(field_2.field_data),
bins=[bins_1, bins_2],density=True)
X, Y = np.meshgrid(edges_1, edges_2, indexing='ij')
plt.pcolormesh(X,Y, hist2d, cmap='Greys',
norm=LogNorm(vmin=vmin, vmax=vmax))
cbar = plt.colorbar()
cbar.set_label('normalized density')
XCon, YCon = np.meshgrid(edges_1[0:-1]+(edges_1[1]-edges_1[0])/2 ,
edges_2[0:-1]+(edges_2[1]-edges_2[1])/2 ,
indexing='ij')
if countour:
plt.contour(XCon,YCon, hist2d, levels = 5)
return fig
def sphere_kernel(radius, normalize = True):
size = int(radius)*2+1
grid = np.array(np.meshgrid(np.arange(size), np.arange(size), np.arange(size)))
kernel = ((grid - int(radius))**2).sum(axis=0) < int(radius)**2
if normalize:
kernel = kernel / kernel.sum()
return kernel | pointeee/preheat2022_public | misc_func.py | misc_func.py | py | 2,753 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.arange"... |
19686502833 | import sys
import time
from datetime import datetime
from textwrap import dedent
import requests
import telegram
from environs import Env
from loguru import logger
from telegram import ParseMode
def send_telegram_message(chat_id: int, bot: telegram.Bot, telegram_message: str) -> None:
bot.send_message(chat_id=chat_id, text=telegram_message, parse_mode=ParseMode.HTML)
logger.debug(f'Сообщение {telegram_message} отправлено в чат Телеграмма')
def generate_telegram_message(response_json: dict) -> str:
emojy = {
'ufo': '👾',
'true': '✅',
'false': '❌',
}
new_attempts = response_json['new_attempts'][0]
is_negative = f'{emojy["false"]}Работа не выполнена{emojy["false"]}' \
if new_attempts['is_negative'] is True \
else f'{emojy["true"]}Работа сдана{emojy["true"]}'
telegram_message = dedent(f"""
{emojy["ufo"]}<b>{new_attempts['lesson_title']}</b>{emojy["ufo"]}
{is_negative}
{new_attempts['lesson_url']}
""")
return telegram_message
def get_new_checks(devman_api_token: str, bot: telegram.Bot, chat_id: int, timeout: int = 300) -> None:
timestamp = datetime.now().timestamp()
headers = {'Authorization': f'Token {devman_api_token}'}
params = {'timestamp': timestamp}
reconnect_time = 0.1
while True:
url = f'https://dvmn.org/api/long_polling/'
try:
response = requests.get(url, headers=headers, params=params, timeout=timeout)
response.raise_for_status()
checked_tasks = response.json()
logger.debug(checked_tasks)
if checked_tasks.get('status') == 'found':
telegram_message = generate_telegram_message(checked_tasks)
send_telegram_message(chat_id, bot, telegram_message)
timestamp = checked_tasks.get('timestamp_to_request') or checked_tasks.get('last_attempt_timestamp')
params = {'timestamp': timestamp}
reconnect_time = 0.1
except requests.exceptions.ReadTimeout as error:
logger.warning(f'Таймаут запроса отработал раньше чем сервер ответил: {error}. Делаем повторный запрос.')
params = {'timestamp': timestamp}
continue
except requests.exceptions.ConnectionError:
time.sleep(reconnect_time)
reconnect_time *= 2
logger.warning(f'Потеря соединения. Повторный запрос через {reconnect_time} секунд')
continue
except requests.exceptions.HTTPError as http_error:
time.sleep(reconnect_time)
reconnect_time *= 2
logger.warning(f'Запрос вернул ответ {http_error.response}. Повторное подключение через {reconnect_time}')
continue
def main():
env = Env()
env.read_env()
devman_api_token = env.str('DEVMAN_TOKEN_API')
telegram_api_key = env.str('TELEGRAM_API_KEY')
telegram_chat_id = env.int('TELEGRAM_CHAT_ID')
bot = telegram.Bot(token=telegram_api_key)
logger_level = 'DEBUG' if env.bool('DEBUG_MODE', False) else 'INFO'
logger.level(logger_level)
logger.add(sys.stdout, format='{time} {level} {message}')
send_telegram_message(telegram_chat_id, bot, telegram_message='Бот запущен')
while True:
try:
get_new_checks(devman_api_token, bot, telegram_chat_id)
except Exception as exception:
telegram_message = dedent(
f"""
Бот упал с ошибкой:
{exception}""",
)
send_telegram_message(telegram_chat_id, bot, telegram_message)
if __name__ == '__main__':
main()
| wepeqoor1/check_success_request | check_request.py | check_request.py | py | 3,916 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "telegram.Bot",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "telegram.ParseMode.HTML",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "telegram.ParseMode",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "log... |
43535916514 | import argparse
import sys
import logging
from mutagene.profiles.profile import calc_profile
logger = logging.getLogger(__name__)
genome_error_message = """requires genome name argument -g hg19, hg38, mm10, see http://hgdownload.cse.ucsc.edu/downloads.html for more
Use mutagene fetch to download genome assemblies"""
class ProfileMenu(object):
def __init__(self, parser):
parser.add_argument("--infile", "-i", nargs='*', help="Input file format", type=argparse.FileType('r'))
parser.add_argument('--outfile', "-o", nargs='?', type=argparse.FileType('w'), default=sys.stdout,
help="Name of output file, will be generated in TSV format")
parser.add_argument('--genome', "-g", help="Location of genome assembly file", type=str)
parser.add_argument('--input-format', "-f", help="Input format: auto, MAF, VCF", type=str, default='auto')
# for backwards compatibility with 0.8.X add a hidden action that would just take anything as a valid input
parser.add_argument('action', nargs="?", metavar="")
def callback(self, args):
# print('ProfileMenu', args.action)
self.calculate(args)
def calculate(self, args):
# print("Calculating...")
if not args.infile:
logger.warning("Provide input file in VCF or MAF format (-i) and a corresponding genome assembly (-g)")
return
if not args.genome:
logger.warning(genome_error_message)
return
calc_profile(args.infile, args.outfile, args.genome, args.input_format)
| neksa/mutagene | mutagene/cli/profile_menu.py | profile_menu.py | py | 1,614 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
... |
70270071869 | from sqlalchemy import Column
from sqlalchemy import Integer, String
from sqlalchemy.orm import relationship
from app.models.base import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String, nullable=False)
email = Column(String, nullable=False, unique=True, index=True)
lists = relationship('List', back_populates='owner', cascade='all, delete-orphan')
def get_api_repr(self, include_email=False):
api_repr = {
"id": self.id,
"username": self.username,
}
if include_email:
api_repr['email'] = self.email
return api_repr
| cyber-chuvash/todolist-API | app/models/user.py | user.py | py | 678 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.models.base.Base",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "sqlalche... |
21101364933 | import os
import tempfile
from shutil import rmtree
import pytest
import responses
from faker import Faker
from flask import Response, Flask
from flask.testing import FlaskClient
from statuspage2slack.statuspage_constants import ComponentStatus, \
IncidentStatus, IncidentImpact
fake = Faker()
STATUSPAGE_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
test_file_path = os.path.realpath(__file__)
test_file_folder = os.path.dirname(test_file_path)
@pytest.fixture
def component_update_request(old_component_status, new_component_status):
creation_datetime = fake.past_datetime()
update_datetime = fake.past_datetime(start_date=creation_datetime)
component_id = fake.bothify(text='????????????')
update_id = fake.bothify(text='????????????')
return {
"component_update": {
"created_at": update_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"new_status": new_component_status.value,
"old_status": old_component_status.value,
"id": update_id,
"component_id": component_id
},
"component": {
"created_at": creation_datetime.strftime(
STATUSPAGE_DATETIME_FORMAT),
"id": component_id,
"name": "Some Component",
"status": new_component_status.value
}
}
@pytest.fixture
def incident_update_request(incident_update, incident_impact, incident_status):
creation_datetime = fake.past_datetime()
monitoring_datetime = fake.past_datetime(start_date=creation_datetime)
resolved_datetime = fake.past_datetime(start_date=creation_datetime)
update_datetime = fake.past_datetime(start_date=creation_datetime)
name = fake.sentence(nb_words=6, variable_nb_words=True,
ext_word_list=None)
return {
"incident": {
"backfilled": False,
"created_at":
creation_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"impact": incident_impact.value,
"impact_override": None,
"monitoring_at":
monitoring_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"resolved_at":
resolved_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"shortlink": fake.url(),
"status": incident_status.value,
"updated_at": update_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"name": name,
"incident_updates": [incident_update]
}
}
@pytest.fixture()
def incident_update(incident_status):
body = fake.paragraph()
creation_datetime = fake.past_datetime()
display_datetime = fake.past_datetime(start_date=creation_datetime)
update_datetime = fake.past_datetime(start_date=creation_datetime)
return {
"body": body,
"created_at": creation_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"display_at": display_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"status": incident_status.value,
"updated_at": update_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
}
@pytest.mark.parametrize("old_component_status", ComponentStatus)
@pytest.mark.parametrize("new_component_status", ComponentStatus)
def test_component_update(flask_client: FlaskClient,
component_update_request, used_templates,
request_mocker: responses.RequestsMock):
response: Response = flask_client.post('/', json=component_update_request)
assert 200 <= response.status_code < 300
assert len(used_templates) == 1
(template, context) = used_templates.pop()
assert template.name == 'component_update.json'
component_update = component_update_request['component_update']
component = component_update_request['component']
assert context['component_update'] == component_update
assert context['component'] == component
@pytest.mark.parametrize("incident_status", IncidentStatus)
@pytest.mark.parametrize("incident_impact", IncidentImpact)
def test_incident_update(flask_client: FlaskClient, incident_update_request,
used_templates):
#request_mocker: responses.RequestsMock):
response: Response = flask_client.post('/', json=incident_update_request)
assert 200 <= response.status_code < 300
assert len(used_templates) == 1
(template, context) = used_templates.pop()
assert template.name == 'incident_update.json'
assert context['incident'] == incident_update_request['incident']
def test_invalid_request(flask_client: FlaskClient):
response: Response = flask_client.post('/', data='dummy')
assert 400 <= response.status_code < 500
@pytest.mark.parametrize("old_component_status",
[ComponentStatus.DEGRADED_PERFORMANCE])
@pytest.mark.parametrize("new_component_status", [ComponentStatus.OPERATIONAL])
@pytest.mark.parametrize("incident_status", [IncidentStatus.MONITORING])
@pytest.mark.parametrize("incident_impact", [IncidentImpact.CRITICAL])
@pytest.mark.parametrize("flag", ['COMPONENT_MESSAGES_ENABLED',
'INCIDENT_MESSAGES_ENABLED'])
def test_false_enabled_flags(flask_app: Flask, flask_client: FlaskClient,
component_update_request, incident_update_request,
used_templates, flag):
flask_app.config.update({
flag: False
})
if flag == 'INCIDENT_MESSAGES_ENABLED':
response: Response = flask_client.post('/',
json=incident_update_request)
elif flag == 'COMPONENT_MESSAGES_ENABLED':
response: Response = flask_client.post('/',
json=component_update_request)
else:
assert False, "Unexpected flag value"
assert 200 <= response.status_code < 300
assert len(used_templates) == 0
@pytest.mark.parametrize("incident_status", [IncidentStatus.MONITORING])
@pytest.mark.parametrize("incident_impact", [IncidentImpact.CRITICAL])
@pytest.mark.parametrize("env_dict", [
{'TEMPLATE_FOLDER': test_file_folder + '/templates'}
])
def test_change_template_folder(change_env, flask_client: FlaskClient,
incident_update_request, used_templates,
request_mocker: responses.RequestsMock,
env_dict):
template_name = 'incident_update.json'
response: Response = flask_client.post('/', json=incident_update_request)
assert 200 <= response.status_code < 300
assert len(used_templates) == 1
(template, context) = used_templates.pop()
assert template.name == template_name
assert os.path.realpath(template.filename) == os.path.realpath(
env_dict['TEMPLATE_FOLDER'] + '/' + template_name)
def test_copy_templates(flask_app: Flask):
runner = flask_app.test_cli_runner()
folder = tempfile.gettempdir() + '/templates/'
rmtree(folder, ignore_errors=True)
result = runner.invoke(args=['webhook', 'copy-templates', folder])
| Cobliteam/statuspage2slack | tests/test_webhook.py | test_webhook.py | py | 7,087 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "faker.Faker",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line... |
72298298427 | import pickle
from flask import Flask, request, jsonify
import numpy as np
# Load model and dv
with open("dv.pkl", "rb") as f_in:
dv = pickle.load(f_in)
with open("rf_model.pkl", "rb") as f_in:
model = pickle.load(f_in)
# instantiate
app = Flask('atx_housing_prediction')
# set path: /predict
@app.route('/predict', methods=['POST']) # HTTP Request: Post
def predict():
# Get data
data = request.get_json()
# Extract features
X = dv.transform([data])
# Make prediction
y_pred = np.expm1(model.predict(X)[0]) # predict and convert back from log1p
return jsonify({'price': float(y_pred)}) # cast
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=9696)
| michaelfronda/ATXHousePrice | predict.py | predict.py | py | 733 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pickle.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line... |
2533690932 | from typing import List, Optional
import filters as f
from iota import Address
from iota.commands import FilterCommand, RequestFilter
from iota.commands.core.find_transactions import FindTransactionsCommand
from iota.commands.core.were_addresses_spent_from import \
WereAddressesSpentFromCommand
from iota.crypto.addresses import AddressGenerator
from iota.crypto.types import Seed
from iota.filters import SecurityLevel, Trytes
import asyncio
__all__ = [
'GetNewAddressesCommand',
]
class GetNewAddressesCommand(FilterCommand):
"""
Executes ``getNewAddresses`` extended API command.
See :py:meth:`iota.api.Iota.get_new_addresses` for more info.
"""
command = 'getNewAddresses'
def get_request_filter(self):
return GetNewAddressesRequestFilter()
def get_response_filter(self):
pass
async def _execute(self, request: dict) -> dict:
checksum: bool = request['checksum']
count: Optional[int] = request['count']
index: int = request['index']
security_level: int = request['securityLevel']
seed: Seed = request['seed']
return {
'addresses':
await self._find_addresses(
seed,
index,
count,
security_level,
checksum,
),
}
async def _find_addresses(
self,
seed: Seed,
index: int,
count: Optional[int],
security_level: int,
checksum: bool
) -> List[Address]:
"""
Find addresses matching the command parameters.
"""
generator = AddressGenerator(seed, security_level, checksum)
if count is None:
# Connect to Tangle and find the first unused address.
for addy in generator.create_iterator(start=index):
# We use addy.address here because the commands do
# not work on an address with a checksum
# Execute two checks concurrently
responses = await asyncio.gather(
WereAddressesSpentFromCommand(self.adapter)(
addresses=[addy.address],
),
FindTransactionsCommand(self.adapter)(
addresses=[addy.address],
),
)
# responses[0] -> was it spent from?
# responses[1] -> any transaction found?
if responses[0]['states'][0] or responses[1].get('hashes'):
continue
return [addy]
return generator.get_addresses(start=index, count=count)
class GetNewAddressesRequestFilter(RequestFilter):
def __init__(self) -> None:
super(GetNewAddressesRequestFilter, self).__init__(
{
# Everything except ``seed`` is optional.
'checksum': f.Type(bool) | f.Optional(default=False),
'count': f.Type(int) | f.Min(1),
'index': f.Type(int) | f.Min(0) | f.Optional(default=0),
'securityLevel': SecurityLevel,
'seed': f.Required | Trytes(Seed),
},
allow_missing_keys={
'checksum',
'count',
'index',
'securityLevel',
},
)
| iotaledger/iota.py | iota/commands/extended/get_new_addresses.py | get_new_addresses.py | py | 3,422 | python | en | code | 344 | github-code | 6 | [
{
"api_name": "iota.commands.FilterCommand",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "iota.crypto.types.Seed",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "io... |
39749765607 | """
This file creates all the tables and databases,
used in the in_Voice APP as class,
and also does the CRUD operations of database by using the methods.
"""
# Importing the required modules to working with database
import sqlite3
# Importing os module to work with files and folders
import os
# Importing a function to open and read the data in the url
from urllib.request import urlopen
# Importing json module to convert the data from the url into json format
import json
# Importing the required modules from the Python-Docx module to create and work with '.docx' files
from docx import Document
from docx.shared import Inches, Pt, RGBColor
from docx.enum.text import WD_ALIGN_PARAGRAPH
# Creating a class for doing all the CRUD operations in the 'userCredentials' table on the inVoice App's Database
class UserCredential():
"""
Class for Registering a New Account,
which is used for working with InVoice App.
"""
# Assigning a name for the Directory where .db files are stored
DB_directory = "./.DB"
def __init__(self, db_name="in_Voice.db"):
# Assigning 'userCredentials' table name into a variable
self.tableName = "userCredentials"
# Creating a '.DB' Directory for storing the database files
self.createHiddenDIR(self.DB_directory)
# Connecting to the App's Database, if not exists creating a new database
self.inVoice_DB = sqlite3.connect(f"{self.DB_directory}/{db_name}")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'userCredentials' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
firstName TEXT,
lastName TEXT,
email CHAR NOT NULL UNIQUE,
mobileNumber INTEGER,
userId CHAR NOT NULL PRIMARY KEY,
passWord CHAR
)""")
'''CREATE USER'''
# Function for creating new record in the 'userCredentials' table
def createUser(self, firstName, lastName, email, mobileNumber, userId, password):
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:firstName, :lastName, :email, :mobileNumber, :userId, :password)",
{
"firstName": firstName,
"lastName": lastName,
"email": email,
"mobileNumber": mobileNumber,
"userId": userId,
"password": password
})
'''UPDATE USER PASSWORD'''
# Function for updating only a specific field from a existing record(user detail) in the 'userCredentials' table
def changePassword(self, userId, password):
# Updating only the specific field from the existing record(user detail) in the table which matches the userID
self.co.execute(f"UPDATE {self.tableName} SET password = :password WHERE userId = :userId",
{
"password": password,
"userId": userId
})
'''GET USER'''
# Function for querying the 'userCredentials' table for specific record
def getUser(self, userId):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'userCredentials' table, to pick a matched record
self.co.execute(
f"SELECT *, oid FROM {self.tableName} WHERE userId = '{userId}'")
# Assigning the queryed records into a variable to return as response
records = self.co.fetchone()
return records
'''GET ALL USERS'''
# Function for getting all the records from the table
def getallUsers(self):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'userCredentials' table, and picking all the available records
self.co.execute(f"SELECT *, oid FROM {self.tableName}")
# Assigning the queryed records into a variable to return as response
records = self.co.fetchall()
return records
'''DELETE USER'''
# Function for deleting a specific record from the 'userCredentials' table
def delUser(self, userId):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Deleting a specific record from the 'userCredentials' table which matches the UserId
self.co.execute(
f"DELETE FROM {self.tableName} WHERE userId = '{userId}'")
'''CREATE HIDDEN DIRECTORY'''
# Function for Creating a Hidden Directory and Deleting the Directory and its files
def createHiddenDIR(self, dirName, status=""):
# Confirming Directory not exists
if (os.path.exists(dirName) is False):
# Creating a Directory in the name passed as parameter
os.mkdir(dirName)
# Making the Directory Hidden
os.system("attrib +h " + dirName)
elif (os.path.exists(dirName) and status == "clearCache"):
# Deleting the all the files in the Directory and its files
for item in os.listdir(dirName):
os.remove(f"{dirName}/{item}")
# Deleting the empty Directory
os.rmdir(dirName)
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'userDetails' table on the User's Database
class UserDetail():
"""
Class for Creating a New Database,
which is used to separate all user data separate from each other,
and also from the main InVoice App.
"""
# Assigning a name for the Directory where binary files are stored
cache_directory = "./.__appcache__"
def __init__(self, userId):
# Assigning the UserId to a variable so it can be accessed by other functions inside this Class
self.userId = userId
# Assigning 'userDetails' table name into a variable
self.tableName = "userDetails"
# Connecting to the User's Database, if not exists creating a new database for each user
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'userDetails' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
userId CHAR NOT NULL PRIMARY KEY,
email CHAR NOT NULL UNIQUE,
firstName TEXT NOT NULL,
lastName TEXT NOT NULL,
profile BLOB NOT NULL
)""")
'''CONVERT IMAGE TO BINARY DATA'''
# Function for converting Binary file into a BLOB data
def convertToBinary(self, imageFile="Images/profile.png"):
# Opening the 'image' file in binary format for reading the binary data
with open(imageFile, 'rb') as file:
# Assigning the read binary data to variable and closing the file
blobData = file.read()
file.close()
return blobData
'''CONVERT BINARY DATA TO IMAGE'''
# Function for converting BLOB data into Binary file and saving it to a local folder
def convertToImage(self, blobData, fileName):
# Generating the path to file from the passed UserID
pathToImageFile = f"{self.cache_directory}/{fileName}_image.png"
count = 0
while (os.path.exists(pathToImageFile)):
count += 1
# Generating the new image path if generated path exists
pathToImageFile = f"{self.cache_directory}/{fileName}_image{count}.png"
# Opening a new 'image' file in binary format for writing the binary data
with open(pathToImageFile, 'wb') as file:
# Writing the binary data queryied from database and close the file
file.write(blobData)
file.close()
return pathToImageFile
'''CREATE PROFILE'''
# Function for creating new record in the 'userDetails' table
def create(self, userId, email, firstName, lastName):
# Converting binary file to BLOB data
image = self.convertToBinary()
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:userId, :email, :firstName, :lastName, :profile)",
{
"userId": userId,
"email": email,
"firstName": firstName,
"lastName": lastName,
"profile": image
})
'''GET PROFILE'''
# Function for querying the 'userDetails' table for specific record
def get(self):
# Creating a '.__appcache__' Directory for storing the binary files
UserCredential.createHiddenDIR(self, dirName=self.cache_directory)
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'userDetails' table, to pick the binary data from matched record
self.co.execute(
f"SELECT * FROM {self.tableName} WHERE userId = '{self.userId}'")
# Assigning the queryed record into a variable to use further
record = self.co.fetchone()
# Converting the queried Blob data into a '.png' file in a specified path
pathToProfile = self.convertToImage(record[4], record[0])
# Assigning the queried list as a dictionary and passing it as a response
userRecords = {
"userId": str(record[0]),
"email": str(record[1]),
"fullName": str(record[2]) + " " + str(record[3]),
"profile": str(pathToProfile)
}
return userRecords
'''UPDATE PROFILE PICTURE'''
# Function for updating the User's profile picture in the 'userDetails' table
def updateProfilePicture(self, pathToNewPicture):
# Converting binary file to BLOB data
image = self.convertToBinary(pathToNewPicture)
# Updating the existing record in the table which matches to UserId
self.co.execute(f"UPDATE {self.tableName} SET profile = :profile WHERE userId = :userId",
{
"profile": image,
"userId": self.userId
})
# Deleting the temporary image file passed by User
os.remove(pathToNewPicture)
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'userPreferences' table on the inVoice User's Database
class UserPreference():
"""
Class for creating and updating the User's Preference for working with the App,
which includes the default for Currency, tax percentage, customer message and due date.
"""
def __init__(self, userId):
# Assigning 'userPreferences' table name into a variable
self.tableName = "userPreferences"
# Connecting to the App's Database, if not exists creating a new database
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'userPreferences' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
currencyCode TEXT NOT NULL UNIQUE,
dueDate TEXT NOT NULL,
taxInPercentage INTEGER NOT NULL
)""")
'''CREATE USER DEFAULT PREFERENCES'''
# Function for creating new record in the 'userPreferences' table
def createDefault(self):
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:currencyCode, :dueDate, :taxInPercentage)",
{
"currencyCode": "INR",
"dueDate": "Nil",
"taxInPercentage": 0
})
'''UPDATE USER DEFAULT PREFERENCES'''
# Function for updating the existing record(user preference) in the 'userPreferences' table
def updateDefault(self, currencyCode, dueDate, taxInPercentage):
# Updating the existing record in the table which matches the uniqueID
self.co.execute(f"UPDATE {self.tableName} SET currencyCode = :currencyCode, dueDate = :dueDate, taxInPercentage = :taxInPercentage WHERE oid = :uniqueID",
{
"currencyCode": currencyCode,
"dueDate": dueDate,
"taxInPercentage": taxInPercentage,
"uniqueID": "1"
})
'''GET USER PREFERENCES'''
# Function for getting all the records from the 'userPreferences' table
def all(self):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'currencyRates' table, and picking all the available records
self.co.execute(f"SELECT * FROM {self.tableName}")
# Assigning the queryed records into a variable to return as response
records = self.co.fetchone()
return records
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'currencyRates' table on the inVoice User's Database
class CurrencyRate():
"""
Class for creating and updating the current Currency Exchange Rate to INR,
and the json data used in this process is collected from (http://www.floatrates.com).
"""
# Assigning the website URL into a variable which contains the JSON data for INR
site_URL = "http://www.floatrates.com/daily/inr.json"
# Defining a List of Currency Codes and Symbols for specific Countries as a python tuple list
CODES_and_SYMBOLS = [("aud", "$"),
("cad", "$"),
("chf", "chf"),
("eur", "€"),
("gbp", "£"),
("inr", "₹"),
("jpy", "¥"),
("nzd", "$"),
("usd", "$"),
("zar", "R")]
# Defining the App's default List for Currency Exchange Rate
default_currencyRates = [("AUD", "Australian Dollar", "$", 0.02, 55.68),
("CAD", "Canadian Dollar", "$", 0.02, 61.78),
("CHF", "Swiss Franc", "chf", 0.01, 83.49),
("EUR", "Euro", "€", 0.01, 80.73),
("GBP", "U.K. Pound Sterling", "£", 0.01, 96.06),
("INR", "Indian Rupee", "₹", 1.0, 1.0),
("JPY", "Japanese Yen", "¥", 1.70, 0.59),
("NZD", "New Zealand Dollar", "$", 0.02, 50.41),
("USD", "U.S. Dollar", "$", 0.01, 79.34),
("ZAR", "South African Rand", "R", 0.21, 4.82)]
def __init__(self, userId):
# Assigning 'currencyRates' table name into a variable
self.tableName = "currencyRates"
# Connecting to the App's Database, if not exists creating a new database
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'currencyRates' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
currencyCode TEXT NOT NULL PRIMARY KEY,
currencyName TEXT NOT NULL,
symbol TEXT NOT NULL,
INRvalue REAL NOT NULL,
exchangeValue REAL NOT NULL
)""")
'''UPDATE CURRENCY EXCHANGE RATE'''
# Function for creating new record in the 'currencyRates' table
def update(self, status="update"):
try:
# Opening the website URL
weburl = urlopen(self.site_URL, timeout=1)
resultCode = weburl.getcode()
# Creating a List to hold the Currency Exchange Rate for each Country
currencyRates = []
# Checking the result code of the website is 200
if (resultCode == 200):
# Reading the data on the website URL and assigning it into a variable
data = weburl.read()
# Parsing the JSON data available on the website and storing it on a variable
jsonData = json.loads(data)
# Iterating through each Country's currency code
for code in self.CODES_and_SYMBOLS:
# Checking the Country's currency code exists in the JSON data
if (jsonData.get(code[0]) is None):
# Creating a tuple with the values for the each Country's Currency (Code, name, rate, exchangerate)
item = ("INR",
"Indian Rupee",
code[1],
"%.2f" % 1,
"%.2f" % 1)
# Adding the tuple into the Currency Exchange Rate List as a item
currencyRates.append(item)
else:
# Creating a tuple with the values for the each Country's Currency (Code, name, rate, exchangerate)
item = (jsonData.get(code[0])["code"],
jsonData.get(code[0])["name"],
code[1],
"%.2f" % jsonData.get(code[0])["rate"],
"%.2f" % jsonData.get(code[0])["inverseRate"])
# Adding the tuple into the Currency Exchange Rate List as a item
currencyRates.append(item)
except:
# Querying the 'currencyRates' table, and picking all the available records(Currencies) if has one
if (self.all() != []):
currencyRates = self.all()
else:
# Assigning the App's default Currency Exchange Rate List
currencyRates = self.default_currencyRates
finally:
# Iterating through each Country's currency code in the Currency Exchange Rate List
for item in currencyRates:
# Confirming the status is 'update'
if (status == "update"):
# Updating the existing record in the table which matches to Currency Code
self.co.execute(f"UPDATE {self.tableName} SET INRvalue = :INRvalue, exchangeValue = :exchangeValue WHERE currencyCode = :uniqueID",
{
"INRvalue": float(item[3]),
"exchangeValue": float(item[4]),
"uniqueID": item[0]
})
elif (status == "create"):
# Inserting a new record to the table for every item if status is 'create'
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:currencyCode, :currencyName, :symbol, :INRvalue, :exchangeValue)",
{
"currencyCode": item[0],
"currencyName": item[1],
"symbol": item[2],
"INRvalue": float(item[3]),
"exchangeValue": float(item[4])
})
'''GET ALL CURRENCY EXCHANGE RATES'''
# Function for getting all the records(Currencies) from the 'currencyRates' table
def all(self):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'currencyRates' table, and picking all the available records(Currencies)
self.co.execute(f"SELECT * FROM {self.tableName}")
# Assigning the queryed records(Currencies) into a variable to return as response
records = self.co.fetchall()
return records
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'dueDates' table on the inVoice User's Database
class DueDate():
"""
Class for creating and updating the due dates,
which is in the form of days count.
"""
# Defining the App's default List for Due Dates dropdown
default_dueDates = ["Nil", "7", "14", "28", "56", "84"]
def __init__(self, userId):
# Assigning 'dueDates' table name into a variable
self.tableName = "dueDates"
# Connecting to the App's Database, if not exists creating a new database
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'dueDates' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
dayCount TEXT NOT NULL
)""")
'''ADD DUE DATE'''
# Function for adding new record in the 'dueDates' table
def add(self, days=None):
if (days is None):
for day in self.default_dueDates:
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:dayCount)",
{
"dayCount": str(day)
})
elif (days is not None):
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:dayCount)",
{
"dayCount": str(days)
})
'''GET ALL DUE DATE'''
# Function for getting all the records from the 'dueDates' table
def all(self):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'dueDates' table, and picking all the available records
self.co.execute(f"SELECT *, oid FROM {self.tableName}")
# Assigning the queryed records into a variable to return as response
records = self.co.fetchall()
return records
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'inVoiceDetails' table on the inVoice User's Database
class InVoiceDetail():
"""
Class for Creating a New Table in the User's Database,
which contains all the Invoices created by the User for their clients.
Each Invoice holds the client details, date of purchase, due date , total amount , inVoice number which is unique,
and also has some additional data.
"""
def __init__(self, userId):
# Assigning the UserId to a variable so it can be accessed by other functions inside this Class
self.userId = userId
# Assigning 'inVoiceDetails' table name into a variable
self.tableName = "inVoiceDetails"
# Connecting to the User's Database, if not exists creating a new database for each user
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'inVoiceDetails' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
invoiceNumber CHAR NOT NULL PRIMARY KEY,
clientName TEXT NOT NULL,
currencyCode TEXT NOT NULL,
currencySymbol TEXT NOT NULL,
datePurchased DATE NOT NULL,
dueDate DATE NOT NULL,
productsPurchased BLOB NOT NULL,
customerMessage TEXT,
taxInPercentage INTEGER,
subTotal REAL NOT NULL,
calculatedTAX REAL NOT NULL,
totalAmount REAL NOT NULL,
balanceAmount REAL NOT NULL,
paymentStatus TEXT
)""")
'''CREATE INVOICE'''
# Function for creating new record in the 'inVoiceDetails' table
def create(self, inVoiceNo, clientName, currencyCode, currencySymbol, purchaseDate, dueDate, productsPurchased, customerMessage, taxInPercentage, subTotal, calculatedTAX, totalAmount, balanceAmount):
# Converting a list of tuples to json data
purchasedProducts = json.dumps(productsPurchased)
# Setting the value of payment status by checking the Balance Amount
paymentStatus = "Paid" if (int(balanceAmount) == 0) else "Pending"
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:invoiceNumber, :clientName, :currencyCode, :currencySymbol, :datePurchased, :dueDate, :productsPurchased, :customerMessage, :taxInPercentage, :subTotal, :calculatedTAX, :totalAmount, :balanceAmount, :paymentStatus)",
{
"invoiceNumber": inVoiceNo,
"clientName": clientName,
"currencyCode": currencyCode,
"currencySymbol": currencySymbol,
"datePurchased": purchaseDate,
"dueDate": dueDate,
"productsPurchased": purchasedProducts,
"customerMessage": customerMessage,
"taxInPercentage": taxInPercentage,
"subTotal": subTotal,
"calculatedTAX": calculatedTAX,
"totalAmount": totalAmount,
"balanceAmount": balanceAmount,
"paymentStatus": paymentStatus
})
'''GET INVOICE'''
# Function for getting specific record(inVoice) from the 'inVoiceDetails' table
def get(self, inVoiceId):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'inVoiceDetails' table, and picking the matched record(inVoice)
self.co.execute(
f"SELECT * FROM {self.tableName} WHERE invoiceNumber = '{inVoiceId}'")
# Assigning the queryed record(inVoice) into a variable to return as response
records = self.co.fetchone()
# Creating a empty python list to store the queryed record
new_records = []
for item in records:
if (records.index(item) != 6):
# Adding data to the empty list
new_records.append(item)
else:
# Converting the BLOB data to a python list and adding it to the empty list
new_records.append(json.loads(item))
return new_records
'''UPDATE INVOICE'''
# Function for updating specific record(inVoice) in the 'inVoiceDetails' table
def update(self, inVoiceId, clientName, currencyCode, currencySymbol, purchaseDate, dueDate, productsPurchased, customerMessage, taxInPercentage, subTotal, calculatedTAX, totalAmount, balanceAmount):
# Converting a list of tuples to json data
purchasedProducts = json.dumps(productsPurchased)
# Setting the value of payment status by checking the Balance Amount
paymentStatus = "Paid" if (int(balanceAmount) == 0) else "Pending"
# Updating a specific record(inVoice) in the 'inVoiceDetails' table using its invoiceNumber
self.co.execute(f"UPDATE {self.tableName} SET clientName = :clientName, currencyCode = :currencyCode, currencySymbol = :currencySymbol, datePurchased = :datePurchased, dueDate = :dueDate, productsPurchased = :productsPurchased, customerMessage = :customerMessage, taxInPercentage = :taxInPercentage, subTotal = :subTotal, calculatedTAX = :calculatedTAX, totalAmount = :totalAmount, balanceAmount = :balanceAmount, paymentStatus = :paymentStatus WHERE invoiceNumber = :inVoiceId",
{
"clientName": clientName,
"currencyCode": currencyCode,
"currencySymbol": currencySymbol,
"datePurchased": purchaseDate,
"dueDate": dueDate,
"productsPurchased": purchasedProducts,
"customerMessage": customerMessage,
"taxInPercentage": taxInPercentage,
"subTotal": subTotal,
"calculatedTAX": calculatedTAX,
"totalAmount": totalAmount,
"balanceAmount": balanceAmount,
"paymentStatus": paymentStatus,
"inVoiceId": inVoiceId
})
'''GET ALL INVOICES'''
# Function for getting all the records(invoices) from the 'inVoiceDetails' table
def all(self):
# Querying the 'inVoiceDetails' table, and picking all the available records(invoices)
self.co.execute(f"SELECT *, oid FROM {self.tableName}")
# Assigning the queryed records(invoices) into a variable to return as response
records = self.co.fetchall()
return records
'''DELETE INVOICE DETAILS'''
# Function for deleting a specific record(inVoice) from the 'inVoiceDetails' table
def delete(self, userId):
# Deleting a specific record(inVoice) from the 'inVoiceDetails' table which matches the record(inVoice)
self.co.execute(
f"DELETE FROM {self.tableName} WHERE invoiceNumber = '{userId}'")
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'clientDetails' table on the inVoice User's Database
class ClientDetail():
"""
Class for Creating a New Table in the User's Database,
which contains all the Clients data created by the User for using it later on creating InVoice.
Each record has the individual client details.
"""
def __init__(self, userId):
# Assigning the UserId to a variable so it can be accessed by other functions inside this Class
self.userId = userId
# Assigning 'clientDetails' table name into a variable
self.tableName = "clientDetails"
# Connecting to the User's Database, if not exists creating a new database for each user
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'clientDetails' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
clientName TEXT NOT NULL,
emailId CHAR NOT NULL,
contactNumber INTEGER NOT NULL,
addressLine1 TEXT NOT NULL,
addressLine2 TEXT,
addressLine3 TEXT,
cityName TEXT NOT NULL,
pinCode TEXT NOT NULL,
customerNote TEXT
)""")
'''ADD CLIENT'''
# Function for creating new record in the 'clientDetails' table
def add(self, clientName, emailId, contactNumber, addressLine1, addressLine2, addressLine3, cityName, pinCode, customerNote):
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:clientName, :emailId, :contactNumber, :addressLine1, :addressLine2, :addressLine3, :cityName, :pinCode, :customerNote)",
{
"clientName": clientName,
"emailId": emailId,
"contactNumber": contactNumber,
"addressLine1": addressLine1,
"addressLine2": addressLine2,
"addressLine3": addressLine3,
"cityName": cityName,
"pinCode": pinCode,
"customerNote": customerNote
})
'''GET CLIENT'''
# Function for getting specific record(client) from the 'clientDetails' table
def get(self, clientId):
# Querying the 'clientDetails' table, and picking the matched record(client)
self.co.execute(
f"SELECT * FROM {self.tableName} WHERE oid = '{clientId}'")
# Assigning the queryed record(client) into a variable to return as response
records = self.co.fetchone()
return records
'''UPDATE CLIENT'''
# Function for updating specific record(client) in the 'clientDetails' table
def update(self, clientId, clientName, emailId, contactNumber, addressLine1, addressLine2, addressLine3, cityName, pinCode, customerNote):
# Updating a specific record(client) in the 'clientDetails' table using its oid
self.co.execute(f"UPDATE {self.tableName} SET clientName = :clientName, emailId = :emailId, contactNumber = :contactNumber, addressLine1 = :addressLine1, addressLine2 = :addressLine2, addressLine3 = :addressLine3, cityName = :cityName, pinCode = :pinCode, customerNote = :customerNote WHERE oid = :uniqueId",
{
"clientName": clientName,
"emailId": emailId,
"contactNumber": contactNumber,
"addressLine1": addressLine1,
"addressLine2": addressLine2,
"addressLine3": addressLine3,
"cityName": cityName,
"pinCode": pinCode,
"customerNote": customerNote,
"uniqueId": clientId
})
'''GET ALL CLIENTS'''
# Function for getting all the records(clients) from the 'clientDetails' table
def all(self):
# Querying the 'clientDetails' table, and picking all the available records(clients)
self.co.execute(f"SELECT *, oid FROM {self.tableName}")
# Assigning the queryed records(clients) into a variable to return as response
records = self.co.fetchall()
return records
'''DELETE CLIENT DETAILS'''
# Function for deleting a specific record(client) from the 'clientDetails' table
def delete(self, userId):
# Deleting a specific record(client) from the 'clientDetails' table which matches the record(client)
self.co.execute(
f"DELETE FROM {self.tableName} WHERE oid = '{userId}'")
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'productDetails' table on the inVoice User's Database
class ProductDetail():
"""
Class for Creating a New Table in the User's Database,
which contains all the Products created by the User for using it later on creating InVoice.
"""
def __init__(self, userId):
# Assigning the UserId to a variable so it can be accessed by other functions inside this Class
self.userId = userId
# Assigning 'productDetails' table name into a variable
self.tableName = "productDetails"
# Connecting to the User's Database, if not exists creating a new database for each user
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'productDetails' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
productName TEXT NOT NULL UNIQUE,
productMRP REAL,
quantity INTEGER NOT NULL,
purchaseRate REAL,
salesRate REAL NOT NULL,
reOrderQuantity INTEGER NOT NULL
)""")
'''ADD PRODUCT'''
# Function for creating new record (product) in the 'productDetails' table
def add(self, productName, productMRP, quantity, purchaseRate, salesRate, reOrderQuantity):
# Inserting a new record (product) to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:productName, :productMRP, :quantity, :purchaseRate, :salesRate, :reOrderQuantity)",
{
"productName": productName,
"productMRP": productMRP,
"quantity": quantity,
"purchaseRate": purchaseRate,
"salesRate": salesRate,
"reOrderQuantity": reOrderQuantity
})
'''UPDATE PRODUCT'''
# Function for updating specific record(product) in the 'productDetails' table
def update(self, productId, productName, productMRP, quantity, purchaseRate, salesRate, reOrderQuantity, getBy="oid"):
# Updating a specific record(product) in the 'productDetails' table using its oid
self.co.execute(f"UPDATE {self.tableName} SET productName = :productName, productMRP = :productMRP, quantity = :quantity, purchaseRate = :purchaseRate, salesRate = :salesRate, reOrderQuantity = :reOrderQuantity WHERE {getBy} = :uniqueId",
{
"productName": productName,
"productMRP": productMRP,
"quantity": quantity,
"purchaseRate": purchaseRate,
"salesRate": salesRate,
"reOrderQuantity": reOrderQuantity,
"uniqueId": productId
})
'''GET A PRODUCT BY NAME'''
# Function for getting a specified record(product) from the 'productDetails' table
def get(self, filterby, productName):
# Querying the 'productDetails' table, and picking the matched record(product)
self.co.execute(
f"SELECT * FROM {self.tableName} WHERE {filterby} = '{productName}'")
# Assigning the queryed record(product) into a variable to return as response
records = self.co.fetchone()
return records
'''GET ALL PRODUCTS'''
# Function for getting all the records(products) from the 'productDetails' table
def all(self):
# Querying the 'productDetails' table, and picking all the available records(products)
self.co.execute(f"SELECT *, oid FROM {self.tableName}")
# Assigning the queryed records(products) into a variable to return as response
records = self.co.fetchall()
return records
'''DELETE PRODUCT DETAILS'''
# Function for deleting a specific record(product) from the 'productDetails' table
def delete(self, userId):
# Deleting a specific record(product) from the 'productDetails' table which matches the record(product)
self.co.execute(
f"DELETE FROM {self.tableName} WHERE oid = '{userId}'")
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Function for creating the InVoice in '.docx' file format
def wordDocGenerator(inVoiceId, clientData, purchaseDate, dueDate, currencySymbol, productDetails, subTotal, calculatedTAX, totalAmount, balanceAmount, customerMessage):
# Function for adding a empty line
def emptyLines(count):
for _ in range(count):
linespace_style = document.styles["Body Text"]
linespace_style.font.size = Pt(10)
document.add_paragraph(style=linespace_style).add_run("")
# >>
# Opening a new Word document for storing the InVoice details as human readable data
document = Document()
# -------------------------------
# Headings
# -------------------------------
# Creating the Main heading for the 'InVoice' document and Aligning it to the center
mainheading = document.add_heading()
mainheading.alignment = WD_ALIGN_PARAGRAPH.CENTER
# Settings custom margins for the 'InVoice' document
sections = document.sections
section = sections[0]
section.top_margin = Inches(0.04)
section.bottom_margin = Inches(0.2)
section.left_margin = Inches(0.9)
section.right_margin = Inches(0.8)
# Adding the first content and styling of the Main heading
run = mainheading.add_run("in")
run.font.size = Pt(55)
run.font.name = "Magneto"
run.font.color.rgb = RGBColor(0x00, 0x00, 0x40)
# Adding the first content and styling of the Main heading
run = mainheading.add_run("Voice")
run.font.size = Pt(12)
run.font.name = "Matura MT Script Capitals"
run.font.color.rgb = RGBColor(0x46, 0x46, 0x46)
# Adding an empty line
emptyLines(1)
# -------------------------------
# InVoice-Id
# -------------------------------
# Creating the template for inVoice number
inVoiceId_container = document.add_paragraph("id: ")
inVoiceId_container.alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Filling the inVoice number template with the inVoiceId's value passed through
inVoiceNumber = inVoiceId_container.add_run(f"{inVoiceId}")
inVoiceNumber.font.name = "Consolas"
inVoiceNumber.font.size = Pt(13)
inVoiceNumber.font.bold = True
# -------------------------------
# Client Details
# -------------------------------
# Creating the template for Client Details
clientdetails_container = document.add_paragraph("")
# Filling the Client Details template with the client's name, address and phone
client_name = clientdetails_container.add_run(
f"{clientData[0]}\n{clientData[3]}\n{clientData[4]}\n{clientData[6]}\n{clientData[5]}\n\n{clientData[2]}")
client_name.font.name = "Times New Roman"
# -------------------------------
# Due Date
# -------------------------------
# Creating the template for Due Date
duedate_container = document.add_paragraph("Due Date : ")
duedate_container.alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Filling the Due Date template with the due date
due_date = duedate_container.add_run(f"{dueDate}")
due_date.font.size = Pt(13)
due_date.font.name = "Times New Roman"
# Adding an empty line
emptyLines(1)
# -------------------------------
# Product Details
# -------------------------------
# Creating a table for holding the product purchased details
product_table = document.add_table(1, 5)
# Creating the Table Header
heading_cells = product_table.rows[0].cells
# Populating the Table Header
heading_cells[0].text = "No"
heading_cells[1].text = "Description"
heading_cells[2].text = "Quantity"
heading_cells[3].text = f"Rate {currencySymbol}"
heading_cells[4].text = f"Amount {currencySymbol}"
# Aligning and Styling the names in Table Header
heading_cells[1].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
heading_cells[2].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
heading_cells[3].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
heading_cells[4].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
heading_cells[1].paragraphs[0].runs[0].font.bold = True
heading_cells[2].paragraphs[0].runs[0].font.bold = True
heading_cells[3].paragraphs[0].runs[0].font.bold = True
heading_cells[4].paragraphs[0].runs[0].font.bold = True
heading_cells[1].paragraphs[0].runs[0].font.size = Pt(13)
heading_cells[2].paragraphs[0].runs[0].font.size = Pt(13)
heading_cells[3].paragraphs[0].runs[0].font.size = Pt(13)
heading_cells[4].paragraphs[0].runs[0].font.size = Pt(13)
product_table.rows[0].height = Inches(0.6)
# Populating the product details inside the table
for detail in productDetails:
# Creating a new row for each product
cells = product_table.add_row().cells
# Filling the content for each field of the row
cells[0].text = str(detail[0])
cells[1].text = detail[1]
cells[2].text = str(detail[2])
cells[3].text = str(detail[3])
cells[4].text = str(detail[4])
# Aligning and Styling the each row
cells[1].width = Inches(2)
cells[2].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
cells[3].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
cells[4].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Adding an empty line
emptyLines(1)
# -------------------------------
# Sub Total
# -------------------------------
# Creating the template for Sub Total
subtotal_container = document.add_paragraph("Sub Total : ")
subtotal_container.alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Filling the Sub Total template with the value of sub total
sub_total = subtotal_container.add_run(f"{subTotal}")
sub_total.font.size = Pt(14)
sub_total.font.name = "Times New Roman"
sub_total.font.bold = True
# -------------------------------
# Sales Tax
# -------------------------------
# Creating the template for Sales Tax
salestax_container = document.add_paragraph("Sales Tax : ")
salestax_container.alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Filling the Sales Tax template with the value of sales tax
sales_tax = salestax_container.add_run(f"{calculatedTAX}")
sales_tax.font.size = Pt(13)
sales_tax.font.name = "Times New Roman"
sales_tax.font.bold = True
# Adding an empty line
emptyLines(1)
# -------------------------------
# Total Amount
# -------------------------------
# Creating the template for Total Amount
totalamount_container = document.add_paragraph("Total Amount : ")
totalamount_container.alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Filling the Total Amount template with the value of total amount
total_amount = totalamount_container.add_run(
f"{totalAmount} {currencySymbol}")
total_amount.font.size = Pt(15)
total_amount.font.name = "Times New Roman"
total_amount.font.bold = True
# Adding an empty line
emptyLines(2)
# -------------------------------
# Customer Message
# -------------------------------
# Creating the template for Customer Message
customermsg_container = document.add_paragraph("")
customermsg_container.alignment = WD_ALIGN_PARAGRAPH.CENTER
# Filling the Customer Message template with the value of Customer Message
customer_msg = customermsg_container.add_run(f"~ {customerMessage} ~")
customer_msg.font.size = Pt(13)
customer_msg.font.name = "Times New Roman"
customer_msg.font.bold = True
# -------------------------------
# Balance Amount
# -------------------------------
# Adding an empty line
emptyLines(1)
# Creating the template for Balance Amount
balanceamount_container = document.add_paragraph("Balance Amount : ")
balanceamount_container.alignment = WD_ALIGN_PARAGRAPH.LEFT
# Setting the value of Balance Amount by checking the Balance Amount is zero
to_pay = "Nil" if (int(balanceAmount) ==
0) else f"{balanceAmount} {currencySymbol}"
# Filling the Balance Amount template with the value of balance amount
balance_amount = balanceamount_container.add_run(f"{to_pay}")
balance_amount.font.size = Pt(13)
balance_amount.font.name = "Times New Roman"
balance_amount.font.bold = True
# Setting the value of payment status by checking the Balance Amount
paymentStatus = "Paid" if (int(balanceAmount) == 0) else "Pending"
# Settings the Date purchase and Payment status as content for footer
footerDate = section.footer.paragraphs[0]
footerDate.text = f"Dated On : {purchaseDate}\t\tPayment Status : {paymentStatus}"
footerDate.style = document.styles["Footer"]
# Generating the name of the document from the inVoice id
pathToDOCXFile = f"{inVoiceId}.docx"
count = 0
while (os.path.exists(pathToDOCXFile)):
count += 1
# Generating the new name of the document from the inVoice id
pathToDOCXFile = f"{inVoiceId}_{count}.docx"
# Saving the document as in the generated name from inVoice id
document.save(pathToDOCXFile)
| Kumara2mahe/in_Voice | inVoiceDB.py | inVoiceDB.py | py | 50,993 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_nu... |
21091581358 | import sys
import time
import yaml
from watchdog.observers import Observer
from watchdog.events import *
import ftplib
config_file = "client.yml"
def get_config(index):
with open(config_file) as f:
return yaml.load(f, Loader=yaml.FullLoader)[index]
class MyHandler(FileSystemEventHandler):
def on_modified(self, event):
files = get_config("files")
if event.src_path.replace("./", "") in files:
print("log file %s changed!" % event.src_path)
session = ftplib.FTP(get_config("server_ip"), get_config("user_name"), get_config("password"))
file = open(event.src_path, "rb")
session.storbinary(f"STOR {event.src_path}", file)
if __name__ == "__main__":
print("Monitored Files: " + str(get_config("files")))
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, path='.', recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join() | yifan-ivan/FileSynchronizer | client.py | client.py | py | 1,061 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "yaml.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "ftplib.FTP",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "watchdog.observers.Observer... |
32726100359 | from collections import deque
from pathlib import Path
import random
from PIL import Image, ImageTk
from tkinter import Tk, Label
from typing import Callable, Optional, Sequence
from abc import ABC, abstractmethod
import numpy as np
import torch
import gym
from collections import namedtuple
from ..game.play import (Problem)
EpisodeData = namedtuple('EpisodeData', "obs reward done info".split()) # type: ignore
class GymProblem(Problem):
def __init__(self, gym: gym.Env, seed: int = 0) -> None:
self._gym = gym
self.action_space = gym.action_space
self.observation_space = gym.observation_space
self.reward_range = gym.reward_range
self._episode_n = 0
self._episode_data = None
self._gym.seed(seed)
self.reset()
def reward(self):
return self._episode_data.reward
def observation(self):
return self._episode_data.obs
def done(self):
return self._episode_data.done
def reset(self):
obs = self._gym.reset()
self._episode_data = EpisodeData(obs, 0, False, dict())
return obs
def step(self, a):
x = self._gym.step(a)
self._episode_data = EpisodeData(*x)
return x
def render(self, *a, **kw):
self._gym.render(*a, **kw)
def episode_reset(self, episode_n):
self._episode_n = episode_n
return self.reset()
def __getattr__(self, a):
return getattr(self._gym, a)
# copy of the un-exported method from collections.abc._check_methods
def _check_methods(C, *methods):
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
class RenderIO(ABC):
@abstractmethod
def write(self, pil: Image) -> None:
pass
@abstractmethod
def close(self) -> None:
pass
@classmethod
def __subclasshook__(cls, subclass):
return _check_methods(subclass, "out", "close")
class RenderShow(RenderIO):
def __init__(self, tk = Tk):
self.tk = tk()
def write(self, pil: Image):
img = ImageTk.PhotoImage(pil)
panel = Label(self.tk, image = img)
panel.pack(side = "bottom", fill = "both", expand = "yes")
self.tk.update_idletasks()
self.tk.update()
def close(self):
self.tk.destroy()
class RenderSave(RenderIO):
def __init__(self, img_save_dir: Path = Path("rewards")) -> None:
self.img_save_dir = img_save_dir
self.count = 0
def _img_path(self):
return self.img_save_dir / "render_{:%04d}.png".format(self.count)
def write(self, pil: Image, count: Optional[int] = None):
count = count or self.count
pil.save(str(self._img_path))
self.count += 1
def close(self):
self.count = 0
class GymImgEnv(Problem):
def __init__(self, args, renderio: Callable[[], RenderIO] = RenderSave) -> None:
self.device = args.device
# self.ale = atari_py.ALEInterface()
# self.ale.setInt('random_seed', args.seed)
# self.ale.setInt('max_num_frames', args.max_episode_length)
# self.ale.setFloat('repeat_action_probability', 0) # Disable sticky actions
# self.ale.setInt('frame_skip', 0)
# self.ale.setBool('color_averaging', False)
# self.ale.loadROM(atari_py.get_game_path(args.game)) # ROM loading must be done after setting options
self.ale = gym.make(args.game + "-v0")
actions = self.ale.getMinimalActionSet()
self.actions = dict((i, e) for i, e in zip(range(len(actions)), actions))
self.lives = 0 # Life counter (used in DeepMind training)
self.life_termination = False # Used to check if resetting only from loss of life
self.window = args.history_length # Number of frames to concatenate
self.state_buffer = deque(
[], maxlen=args.history_length
) # type: Sequence
self.training = True # Consistent with model training mode
self.renderio = renderio()
def _get_state(self):
state = Image.fromarray(
self.ale.getScreenGrayscale().squeeze()
).resize((84, 84), resample=Image.BILINEAR)
return torch.tensor(np.asarray(state),
dtype=torch.float32, device=self.device).div_(255)
def _reset_buffer(self):
for _ in range(self.window):
self.state_buffer.append(torch.zeros(84, 84, device=self.device))
def reset(self):
if self.life_termination:
self.life_termination = False # Reset flag
self.ale.act(0) # Use a no-op after loss of life
else:
# Reset internals
self._reset_buffer()
self.ale.reset_game()
# Perform up to 30 random no-ops before starting
for _ in range(random.randrange(30)):
self.ale.act(0) # Assumes raw action 0 is always no-op
if self.ale.game_over():
self.ale.reset_game()
# Process and return "initial" state
observation = self._get_state()
self.state_buffer.append(observation)
self.lives = self.ale.lives()
return torch.stack(list(self.state_buffer), 0)
def step(self, action):
# Repeat action 4 times, max pool over last 2 frames
frame_buffer = torch.zeros(2, 84, 84, device=self.device)
reward, done = 0, False
for t in range(4):
reward += self.ale.act(self.actions.get(action))
if t == 2:
frame_buffer[0] = self._get_state()
elif t == 3:
frame_buffer[1] = self._get_state()
done = self.ale.game_over()
if done:
break
observation = frame_buffer.max(0)[0]
self.state_buffer.append(observation)
# Detect loss of life as terminal in training mode
if self.training:
lives = self.ale.lives()
# Lives > 0 for Q*bert
if lives < self.lives and lives > 0:
# Only set flag when not truly done
self.life_termination = not done
done = True
self.lives = lives
# Return state, reward, done
return torch.stack(list(self.state_buffer), 0), reward, done
# Uses loss of life as terminal signal
def train(self):
self.training = True
# Uses standard terminal signal
def eval(self):
self.training = False
def action_space(self):
return len(self.actions)
def render(self):
pil = Image.fromarray(self.ale.getScreenRGB()[:, :, ::-1])
self.renderio.write(pil)
def close(self):
self.renderio.close()
| wecacuee/floyd-warshal-rl | fwrl/prob/gym.py | gym.py | py | 6,947 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.namedtuple",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "game.play.Problem",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "gym.Env",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "gym.action_spac... |
19154810876 | from numpy import *
from time import sleep
import json
import urllib2
# 数据导入函数
def loadDataSet(fileName): # 打开一个含有分隔符的文本文件
numFeat = len(open(fileName).readline().split('\t')) - 1 # 获得特征数,减1是因为最后一列是因变量
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat):
lineArr.append(float(curLine[i])) # 将每个数字读入lineArr
dataMat.append(lineArr) # 将每个样本读入dataMat
labelMat.append(float(curLine[-1])) # curLine最后一个元素读入labelMat
return dataMat, labelMat
# 标准回归函数:正规方程(Normal Equation)计算最佳拟合直线
def standRegres(xArr, yArr):
xMat = mat(xArr)
yMat = mat(yArr).T # 转置为列向量
xTx = xMat.T * xMat
if linalg.det(xTx) == 0.0: # 计算xTx的行列式
print("This matrix is singular, cannot do inverse") # 这是奇异阵,不可逆
return # xTx是奇异阵,无法计算
ws = xTx.I * (xMat.T * yMat) # .I是求逆;计算得到回归系数
return ws
# 局部加权线性回归函数:此处使用高斯核,k是高斯核中的参数;与testPoint越近,权重会越大
# 与kNN一样,该加权模型认为样本点距离越近,越可能符合同一个线性模型
# 注意区分此处的权重weights和回归系数ws,回归系数的计算中加入了权重
def lwlr(testPoint, xArr, yArr, k=1.0):
xMat = mat(xArr)
yMat = mat(yArr).T # 转置为列向量
m = shape(xMat)[0] # 样本个数
weights = mat(eye((m))) # m阶对角权重矩阵
for j in range(m): # 下面两行创建权重矩阵
diffMat = testPoint - xMat[j, :]
weights[j, j] = exp(diffMat * diffMat.T / (-2.0 * k ** 2))
xTx = xMat.T * (weights * xMat)
if linalg.det(xTx) == 0.0: # 如果xTx的行列式为0
print("This matrix is singular, cannot do inverse") # 这是奇异阵,不可逆
return # xTx是奇异阵,无法计算
ws = xTx.I * (xMat.T * (weights * yMat))
return testPoint * ws
def lwlrTest(testArr, xArr, yArr, k=1.0): # 遍历数据点,尝试对每个点都适用lwlr,这有助于求解k的大小
m = shape(testArr)[0] # 样本数
yHat = zeros(m) # 预测值
for i in range(m):
yHat[i] = lwlr(testArr[i], xArr, yArr, k)
return yHat
def lwlrTestPlot(xArr, yArr, k=1.0): # 与lwlrTest唯一的不同是先对X排序
yHat = zeros(shape(yArr)) # 对画图更容易
xCopy = mat(xArr)
xCopy.sort(0)
for i in range(shape(xArr)[0]):
yHat[i] = lwlr(xCopy[i], xArr, yArr, k)
return yHat, xCopy
def rssError(yArr, yHatArr): # 需要yArr和yHatArr都是数组
return ((yArr - yHatArr) ** 2).sum() # 最小二乘法计算代价函数
# 岭回归
def ridgeRegres(xMat, yMat, lam=0.2): # lam是单位矩阵前的系数;lambda是Python关键字,此处使用lam代替
xTx = xMat.T * xMat
denom = xTx + eye(shape(xMat)[1]) * lam
if linalg.det(denom) == 0.0:
print("This matrix is singular, cannot do inverse") # 如果lam是0,denom仍是奇异阵,无法计算
return
ws = denom.I * (xMat.T * yMat)
return ws
def ridgeTest(xArr, yArr): # 用一组lambda测试结果
xMat = mat(xArr)
yMat = mat(yArr).T # 转置为列向量
yMean = mean(yMat, 0) # 每列求平均值
yMat = yMat - yMean
# 对特征做标准化处理
xMeans = mean(xMat, 0) # 每列求平均值
xVar = var(xMat, 0) # 每列求方差
xMat = (xMat - xMeans) / xVar # 标准化计算
numTestPts = 30 # 在30个不同的lambda下调用ridgeRegres
wMat = zeros((numTestPts, shape(xMat)[1]))
for i in range(numTestPts):
ws = ridgeRegres(xMat, yMat, exp(i - 10))
wMat[i, :] = ws.T
return wMat
def regularize(xMat): # 标准化处理
inMat = xMat.copy() # 必须使用copy,否则得到索引
inMeans = mean(inMat, 0) # 计算平均值
inVar = var(inMat, 0) # 计算方差
inMat = (inMat - inMeans) / inVar # 标准化
return inMat
# 前向逐步线性回归:与lasso做法相近但计算简单
def stageWise(xArr, yArr, eps=0.01, numIt=100): # eps是每次迭代需要调整的步长;numIt表示迭代次数
xMat = mat(xArr)
yMat = mat(yArr).T # 转置为列向量
yMean = mean(yMat, 0)
yMat = yMat - yMean # 也可以使ys标准化,但会减小相关系数
xMat = regularize(xMat)
m, n = shape(xMat)
returnMat = zeros((numIt, n)) # 每次迭代都打印w向量,用于分析算法执行的过程和效果
ws = zeros((n, 1))
wsTest = ws.copy() # 必须使用.copy(),否则得到的是ws的索引
wsMax = ws.copy()
for i in range(numIt): # 贪心算法,每一步尽可能减小误差
lowestError = inf # 无穷大infinity
for j in range(n): # 对于每个特征
for sign in [-1, 1]:
wsTest = ws.copy()
wsTest[j] += eps * sign
yTest = xMat * wsTest
rssE = rssError(yMat.A, yTest.A) # 计算平方误差
if rssE < lowestError: # 比较,取最小误差
lowestError = rssE
wsMax = wsTest # 最小误差时的ws
ws = wsMax.copy()
returnMat[i, :] = ws.T
return returnMat
# 购物信息的获取函数
def searchForSet(retX, retY, setNum, yr, numPce, origPrc):
sleep(10)
myAPIstr = 'AIzaSyD2cR2KFyx12hXu6PFU-wrWot3NXvko8vY'
searchURL = 'https://www.googleapis.com/shopping/search/v1/public/products?key=%s&country=US&q=lego+%d&alt=json' % (
myAPIstr, setNum)
pg = urllib2.urlopen(searchURL)
retDict = json.loads(pg.read())
for i in range(len(retDict['items'])):
try:
currItem = retDict['items'][i]
if currItem['product']['condition'] == 'new':
newFlag = 1
else:
newFlag = 0
listOfInv = currItem['product']['inventories']
for item in listOfInv:
sellingPrice = item['price']
if sellingPrice > origPrc * 0.5:
print("%d\t%d\t%d\t%f\t%f" % (yr, numPce, newFlag, origPrc, sellingPrice))
retX.append([yr, numPce, newFlag, origPrc])
retY.append(sellingPrice)
except:
print('problem with item %d' % i)
def setDataCollect():
scrapePage('setHtml/lego8288.html', 'out.txt', 2006, 800, 49.99)
scrapePage('setHtml/lego10030.html', 'out.txt', 2002, 3096, 269.99)
scrapePage('setHtml/lego10179.html', 'out.txt', 2007, 5195, 499.99)
scrapePage('setHtml/lego10181.html', 'out.txt', 2007, 3428, 199.99)
scrapePage('setHtml/lego10189.html', 'out.txt', 2008, 5922, 299.99)
scrapePage('setHtml/lego10196.html', 'out.txt', 2009, 3263, 249.99)
# 交叉验证测试岭回归
def crossValidation(xArr, yArr, numVal=10): # numVal是交叉验证的次数
m = len(yArr) # 样本个数
indexList = range(m) # [1,2,...,m]
errorMat = zeros((numVal, 30)) # 误差矩阵,numVal行30列
for i in range(numVal):
trainX = [] # 训练集容器
trainY = []
testX = [] # 测试集容器
testY = []
random.shuffle(indexList) # 对indexList进行混洗
for j in range(m): # 以indexList前90%的值建立训练集
if j < m * 0.9:
trainX.append(xArr[indexList[j]])
trainY.append(yArr[indexList[j]])
else: # 剩下10%作为测试集
testX.append(xArr[indexList[j]])
testY.append(yArr[indexList[j]])
wMat = ridgeTest(trainX, trainY) # 从ridgeRegression得到30个回归系数
for k in range(30): # ridgeTest()使用30个不同的lambda创建了30组不同的回归系数
matTestX = mat(testX)
matTrainX = mat(trainX)
meanTrain = mean(matTrainX, 0)
varTrain = var(matTrainX, 0)
matTestX = (matTestX - meanTrain) / varTrain # 训练集标准化
yEst = matTestX * mat(wMat[k, :]).T + mean(trainY)
errorMat[i, k] = rssError(yEst.T.A, array(testY))
meanErrors = mean(errorMat, 0) # 按列计算30组回归系数的平均误差
minMean = float(min(meanErrors))
bestWeights = wMat[nonzero(meanErrors == minMean)] # nonzero获得索引,找到最优回归系数
# 建立模型可不标准化
# 当标准化 Xreg = (x-meanX)/var(x)
# 或不标准化: x*w/var(x) - meanX/var(x) +meanY
xMat = mat(xArr)
yMat = mat(yArr).T
meanX = mean(xMat, 0)
varX = var(xMat, 0)
# 岭回归使用了数据标准化,而standRegres()没有,为了将上述比较可视化还需将数据还原
unReg = bestWeights / varX
print("the best model from Ridge Regression is:\n", unReg)
print("with constant term: ", -1 * sum(multiply(meanX, unReg)) + mean(yMat))
| yhshu/Machine-Learning-in-Action | Ch08-LinearRegression/regression.py | regression.py | py | 9,114 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "time.sleep",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 146,
"usage_type": "call"
}
] |
10793618381 | """Code to interface with the SMA inverters and return the results."""
# Robust initialization and shutdown code courtesy of
# https://github.com/wbenny/python-graceful-shutdown.git
import logging
import sys
import os
import asyncio
import aiohttp
from delayedints import DelayedKeyboardInterrupt
from pvsite import Site
import version
import logfiles
from readconfig import read_config
from exceptions import FailedInitialization
_LOGGER = logging.getLogger("sbhistory")
class SBHistory:
class NormalCompletion(Exception):
pass
class FailedInitialization(Exception):
pass
def __init__(self, config):
self._config = config
self._loop = asyncio.new_event_loop()
self._session = None
self._site = None
def run(self):
try:
try:
with DelayedKeyboardInterrupt():
self._start()
except KeyboardInterrupt:
_LOGGER.critical("Received KeyboardInterrupt during startup")
raise
self._wait()
raise SBHistory.NormalCompletion
except (KeyboardInterrupt, SBHistory.NormalCompletion, SBHistory.FailedInitialization):
# The _stop() is also shielded from termination.
try:
with DelayedKeyboardInterrupt():
self._stop()
except KeyboardInterrupt:
_LOGGER.critical("Received KeyboardInterrupt during shutdown")
async def _astart(self):
self._session = aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False))
self._site = Site(self._session, self._config)
result = await self._site.start()
if not result:
raise SBHistory.FailedInitialization
async def _astop(self):
_LOGGER.info("Closing sbhistory application")
if self._site:
await self._site.stop()
if self._session:
await self._session.close()
async def _await(self):
await self._site.run()
def _start(self):
self._loop.run_until_complete(self._astart())
def _wait(self):
self._loop.run_until_complete(self._await())
def _stop(self):
self._loop.run_until_complete(self._astop())
def main():
"""Set up and start sbhistory."""
try:
config = read_config(checking=False)
except FailedInitialization as e:
print(f"{e}")
return
logfiles.start(config)
_LOGGER.info(f"sbhistory inverter utility {version.get_version()}, PID is {os.getpid()}")
try:
sbhistory = SBHistory(read_config(checking=True))
sbhistory.run()
except FailedInitialization as e:
_LOGGER.error(f"{e}")
except Exception as e:
_LOGGER.error(f"Unexpected exception: {e}")
if __name__ == '__main__':
if sys.version_info[0] >= 3 and sys.version_info[1] >= 8:
main()
else:
print("python 3.8 or better required")
| sillygoose/sbhistory | sbhistory/sbhistory.py | sbhistory.py | py | 2,975 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "asyncio.new_event_loop",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "delayedints.DelayedKeyboardInterrupt",
"line_number": 40,
"usage_type": "call"
},
{
"api... |
19626071979 | # coding: utf-8
from sqlalchemy import Column, DateTime, Integer, String, text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from base import db_url
Base = declarative_base()
metadata = Base.metadata
db = create_engine(db_url)
session_maker = sessionmaker(bind=db)
class Case(Base):
__tablename__ = 'case'
ID = Column(Integer, primary_key=True)
NAME = Column(String(20))
SCHEMA = Column(String(100))
KEYWORDS = Column(String(100))
RESPONSE = Column(String(500))
TOTAL_COUNT = Column(Integer)
COMMENTS = Column(String(50))
# 把字典变成对象
def __init__(self, **kwargs):
self.NAME = kwargs.get('name')
self.SCHEMA = kwargs.get('schema')
self.KEYWORDS = kwargs.get('keywords')
self.RESPONSE = kwargs.get('response')
self.TOTAL_COUNT = kwargs.get('total_count')
self.COMMENTS = kwargs.get('comments')
# 把对象变成字典
def __str__(self):
return {'id': self.ID,
'name': self.NAME,
'schema': self.SCHEMA,
'keywords': self.KEYWORDS,
'total_count': self.TOTAL_COUNT,
'comments': self.COMMENTS}
class Task(Base):
__tablename__ = 'task'
ID = Column(Integer, primary_key=True)
NAME = Column(String(20))
TEAM = Column(String(10))
PLATFORM = Column(String(20))
CASES = Column(String(100), nullable=False)
COMMENTS = Column(String(50))
def __init__(self, obj):
self.NAME = obj.get('name')
self.TEAM = obj.get('team')
self.PLATFORM = obj.get('platform')
self.CASES = obj.get('cases')
self.COMMENTS = obj.get('comments')
def __str__(self):
return {'id': self.ID,
'name': self.NAME,
'team': self.TEAM,
'platform': self.PLATFORM,
'cases': self.CASES,
'comments': self.COMMENTS}
class TroubledLog(Base):
__tablename__ = 'troubled_log'
ID = Column(Integer, primary_key=True)
TASK_ID = Column(Integer)
TASK_NAME = Column(String(20))
STATE = Column(String(10))
CREATE_TIME = Column(DateTime, server_default=text("CURRENT_TIMESTAMP"))
LOG_SIZE = Column(Integer)
OFFSET = Column(Integer)
def __init__(self, **kwargs):
self.TASK_ID = kwargs.get('task_id')
self.TASK_NAME = kwargs.get('task_name')
self.STATE = kwargs.get('state')
self.LOG_SIZE = kwargs.get('log_size')
self.OFFSET = kwargs.get('offset')
def __str__(self):
return {'id': self.ID,
'taskId': self.TASK_ID,
'taskName': self.TASK_NAME,
'state': self.STATE,
'createTime': self.CREATE_TIME,
'logSize': self.LOG_SIZE,
'offset': self.OFFSET}
class TroubledLogDetail(Base):
__tablename__ = 'troubled_log_detail'
ID = Column(Integer, primary_key=True)
LOG_ID = Column(Integer)
CASE_ID = Column(Integer)
CASE_NAME = Column(String(20))
TROUBLED_STRATEGY = Column(String(20))
TROUBLED_RESPONSE = Column(String)
STATE = Column(String(20))
IS_CRASH = Column(String(5))
CRASH_LOG = Column(String(500))
SCREEN_SHOT = Column(String)
CREATE_TIME = Column(DateTime, server_default=text("CURRENT_TIMESTAMP"))
def __init__(self, **kwargs):
self.LOG_ID = kwargs.get('log_id')
self.CASE_ID = kwargs.get('case_id')
self.CASE_NAME = kwargs.get('case_name')
self.TROUBLED_STRATEGY = kwargs.get('troubled_strategy')
self.TROUBLED_RESPONSE = kwargs.get('troubled_response')
self.STATE = kwargs.get('state')
self.IS_CRASH = kwargs.get('is_crash')
self.CRASH_LOG = kwargs.get('crash_log')
self.SCREEN_SHOT = kwargs.get('screen_shot')
def __str__(self):
return {'id': self.ID,
'logId': self.LOG_ID,
'caseId': self.CASE_ID,
'caseName': self.CASE_NAME,
'troubledStrategy': self.TROUBLED_STRATEGY,
'troubledResponse': self.TROUBLED_RESPONSE,
'isCrash': self.IS_CRASH,
'crashLog': self.CRASH_LOG,
'screenShot': self.SCREEN_SHOT,
'createTime': self.CREATE_TIME}
| natsuforyou/troubledmonkey | models.py | models.py | py | 4,401 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "base.db_url",
"line_number": 12,
"usage_type": "argument"
},
{
... |
72532105789 | import json
from http import HTTPStatus
from typing import Any, Literal
import httpx
from pydantic import BaseModel, Field
from simcore_service_api_server.utils.http_calls_capture_processing import (
PathDescription,
enhance_from_openapi_spec,
)
class HttpApiCallCaptureModel(BaseModel):
"""
Captures relevant information of a call to the http api
"""
name: str
description: str
method: Literal["GET", "PUT", "POST", "PATCH", "DELETE"]
host: str
path: PathDescription | str
query: str | None = None
request_payload: dict[str, Any] | None = None
response_body: dict[str, Any] | list | None = None
status_code: HTTPStatus = Field(default=HTTPStatus.OK)
@classmethod
def create_from_response(
cls,
response: httpx.Response,
name: str,
description: str = "",
enhance_from_openapi_specs: bool = True,
) -> "HttpApiCallCaptureModel":
request = response.request
path: PathDescription | str
if enhance_from_openapi_specs:
path = enhance_from_openapi_spec(response)
else:
path = response.request.url.path
return cls(
name=name,
description=description or f"{request}",
method=request.method,
host=request.url.host,
path=path,
query=request.url.query.decode() or None,
request_payload=json.loads(request.content.decode())
if request.content
else None,
response_body=response.json() if response.content else None,
status_code=HTTPStatus(response.status_code),
)
def __str__(self) -> str:
return f"{self.description: self.request_desc}"
@property
def request_desc(self) -> str:
return f"{self.method} {self.path}"
def as_response(self) -> httpx.Response:
return httpx.Response(status_code=self.status_code, json=self.response_body)
def get_captured_as_json(name: str, response: httpx.Response) -> str:
capture_json: str = HttpApiCallCaptureModel.create_from_response(
response, name=name
).json(indent=1)
return f"{capture_json}"
| ITISFoundation/osparc-simcore | services/api-server/src/simcore_service_api_server/utils/http_calls_capture.py | http_calls_capture.py | py | 2,202 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Literal",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "simcore_service_api_server.utils.http_calls_capture_processing.PathDescription",
"line_number": 22,
"us... |
26530831301 | import json
from oneview_redfish_toolkit.api.composition_service import CompositionService
from oneview_redfish_toolkit.tests.base_test import BaseTest
class TestCompositionService(BaseTest):
"""Tests for CompositionService class"""
def setUp(self):
"""Tests preparation"""
# Loading CompositionService mockup result
with open(
'oneview_redfish_toolkit/mockups/redfish/CompositionService.json'
) as f:
self.composition_service_mockup = json.load(f)
def test_class_instantiation(self):
# Tests if class is correctly instantiated
try:
compostion_service = CompositionService()
except Exception as e:
self.fail("Failed to instantiate CompositionService class."
" Error: {}".format(e))
self.assertIsInstance(compostion_service, CompositionService)
def test_serialize(self):
# Tests the serialize function result against known result
try:
compostion_service = CompositionService()
except Exception as e:
self.fail("Failed to instantiate CompositionService class."
" Error: {}".format(e))
try:
expected_result = json.loads(compostion_service.serialize())
except Exception as e:
self.fail("Failed to serialize. Error: ".format(e))
self.assertEqualMockup(self.composition_service_mockup,
expected_result)
| HewlettPackard/oneview-redfish-toolkit | oneview_redfish_toolkit/tests/api/test_composition_service.py | test_composition_service.py | py | 1,507 | python | en | code | 16 | github-code | 6 | [
{
"api_name": "oneview_redfish_toolkit.tests.base_test.BaseTest",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.api.composition_service.CompositionService",
"line_number":... |
10865376818 | import logging
import json
import datetime
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.gaussian_process import GaussianProcessRegressor
from aggregating.utils import flatten_X, generate_train_set, memory_efficient_predict
from stochastic_models import MaxCallStochasticModel
### general MPI helpers
def generate_logger_MPI(logfile, level,rank):
"""
generate logger for MPI
:param logfile: relative path to file
:type logfile: str
:param level: logging level (info,debug,..)
:type level: logging.level
:param rank: the rank of the process for which to create a logger
:return: logger
:rtype: logging.logger
"""
logging.basicConfig(filename=logfile, level=level,
format='%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(funcName)s : %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',)
logger = logging.getLogger("rank%i" % rank )
return logger
def write_results(basename,results,Config):
res_dict = {'N_train': Config.N_train, 'N_test': Config.N_test,'mgrid': Config.M_grid, 'alpha_grid': Config.alpha_grid, 'errors': results}
with open("logs/" + basename + f'{str(datetime.datetime.now().strftime("%Y-%m-%d.%H-%M-%S"))}.json', 'w') as fp:
json.dump(res_dict, fp)
def write_boosting_results(basename, results, Config):
res_dict = {'N_train': Config.N_train, 'N_test': Config.N_test,'Ratios': Config.Ratios, 'test_errors': results}
with open("logs/" + basename + f'{str(datetime.datetime.now().strftime("%Y-%m-%d.%H-%M-%S"))}.json', 'w') as fp:
json.dump(res_dict, fp)
#### Bagging MPI helpers
def generate_bagging_train_indices(N_train,alpha,M):
"""
creates indices that represent M trainingsets, used for bagging (replacement within and in between the sets)
:param N: trainset len
:type X: int
:param alpha: fractional size of each trainset
:type alpha: float
:param M: number of trainsets to create
:type M: int
:return: list of indices that represent the M trainsets
:rtype: List[np.ndarray(M*alpha)]
"""
n = round(N_train*alpha)
indices_list = []
for i in range(M):
indices = np.random.choice(N_train,size=n,replace=True)
indices_list.append(indices)
return indices_list
def generate_test_sets(trials, N_test,Delta, d,generator=MaxCallStochasticModel):
"""
generate #trials test sets of given dimensions using the util func in aggregating
:return: X_test_lists, y_test_list of specified dimensions; stacked into a single numpy array (trials, N,Delta*d / 1)
"""
X_test_list = []
y_test_list = []
for _ in range(trials):
X_test,y_test = generate_train_set(N_test,Delta,d,generator)
X_test_list.append(X_test)
y_test_list.append(y_test)
return np.stack(X_test_list,axis=0), np.stack(y_test_list,axis=0)
def train_and_evaluate(model, X_train, y_train, X_test_list):
"""
trains a gpr on the trainset and then performs inference on the test sets
:param model: the model to train
:type model: sklearn GPR
:param X_train: Train datapoints
:type X_train: [type]
:param y_train: labels
:type y_train: [type]
:param Y_test_list: test sets
:type Y_test_list: list of numpy arrays
:return: predictions, sigma for each of the X_test sets
:rtype: List of tuples of numpy arrays
"""
assert isinstance(model, GaussianProcessRegressor)
## train
model.fit(X_train,y_train)
## evaluate
result_list = []
for x_test in X_test_list:
mu, sigma = memory_efficient_predict(model,x_test,max_size=20000)
result_list.append((mu,sigma))
return result_list
def soft_prediction(predictor_lists,epsilon = 1e-10):
"""
creates the soft prediction of the bagging ensemble using the individual predictions
:param predictor_lists: the individual predictions & sigmas
:type predictor_lists: [[mu_i, sigma_i]]
:return: single list with predictions
:rtype: List of np.ndarray for each of the predicted sets
"""
predictions = np.zeros(predictor_lists[0][0].shape[0])
sigmas = np.zeros(predictor_lists[0][0].shape[0])
for predictor_list in predictor_lists:
mu,sigma = predictor_list
mu = mu.flatten()
predictions = predictions + ( mu / (sigma + epsilon))
sigmas = sigmas + (1/ (sigma + epsilon))
predictions = predictions / sigmas
return predictions
def hard_prediction(predictor_lists):
"""
creates the hard prediction of the bagging ensemble using the individual predictions
:param predictor_lists: the individual predictions & sigmas
:type predictor_lists: [[mu_i, sigma_i]]
:return: single list with predictions
:rtype: List of np.ndarray for each of the predicted sets
"""
predictions = np.zeros(predictor_lists[0][0].shape[0])
npredictors = len(predictor_lists)
for predictor_list in predictor_lists:
mu,sigma = predictor_list
mu = mu.flatten()
predictions = predictions + mu
predictions = predictions / npredictors
return predictions
def trials_soft_prediction(predictors_results,trials):
"""
gets the predictions for a list of the evaluations for each predictor in an ensemble, for a number of trials
:param predictors_results: [[(mu_predictor_i,trial_j;sigma_predictor_i,trial_j) for j in range(trials)] for i in range(M)]
:type predictors_results: [type]
:param trials: # trials
:type trials: [type]
"""
prediction_list = []
for trial in range(trials):
predictions = [predictor[trial] for predictor in predictors_results]
prediction = soft_prediction(predictions)
prediction_list.append(prediction)
return prediction_list
def trials_hard_prediction(predictors_results,trials):
"""
gets the HARD predictions for a list of the evaluations for each predictor in an ensemble, for a number of trials
:param predictors_results: [[(mu_predictor_i,trial_j;sigma_predictor_i,trial_j) for j in range(trials)] for i in range(M)]
:type predictors_results: [type]
:param trials: # trials
:type trials: [type]
"""
prediction_list = []
for trial in range(trials):
predictions = [predictor[trial] for predictor in predictors_results]
prediction = hard_prediction(predictions)
prediction_list.append(prediction)
return prediction_list
| tlpss/ML-Project2 | mpi/utils.py | utils.py | py | 6,471 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.... |
32506800793 | #!/usr/bin/env python3
"""
Read API data directly via internet and output to pipe
"""
import json
import logging
import time
import requests
from .. import defs
from . import net
from .order import ApiOrder, API_CHANNEL_SSE_NAME
from .pkt import BlocksatPkt, BlocksatPktHandler
logger = logging.getLogger(__name__)
MAX_SEQ_NUM = 2**31 # Maximum transmission sequence number
DEFAULT_REGIONS = list(range(0, len(defs.satellites)))
class DemoRx():
"""Demo receiver
"""
def __init__(self,
server,
socks,
kbps,
tx_event,
channel,
regions=None,
tls_cert=None,
tls_key=None,
poll=False,
sock_by_region=False):
""" DemoRx Constructor
Args:
server : API server address where the order lives.
socks : Instances of UdpSock over which to send the packets
kbps : Target bit rate in kbps.
tx_event : SSE event to use as trigger for transmissions.
channel : API channel number.
regions : Regions to process and potentially confirm Tx.
tls_key : API client key (for Tx confirmation).
tls_cer : API client certificate (for Tx confirmation).
poll : Poll messages directly from the Satellite API queue
instead of listening to server-sent events.
sock_by_region : Map each UdpSock to a region so that each socket
serves messages on a single region only. Requires the socks
parameter to have the same length as the regions parameter.
"""
# Validate args
assert (isinstance(socks, list))
assert (all([isinstance(x, net.UdpSock) for x in socks]))
# Configs
self.server = server
self.socks = socks
self.kbps = kbps
self.tx_event = tx_event
self.channel = channel
self.regions_list = DEFAULT_REGIONS if not regions else regions
self.regions_set = set(self.regions_list)
self.tls_cert = tls_cert
self.tls_key = tls_key
self.poll = poll
self.admin = tls_cert is not None and tls_key is not None
if sock_by_region and len(self.regions_list) != len(socks):
raise ValueError(
"Number of sockets must be equal to the number of regions")
self.sock_by_region = sock_by_region
def _send_pkts(self, pkts, socks):
"""Transmit Blocksat packets of the API message over all sockets
Transmit and sleep (i.e., block) to guarantee the target bit rate.
Args:
pkts : List of BlocksatPkt objects to be send over sockets
socks : List of sockets over which to send packets.
"""
assert (isinstance(pkts, list))
assert (all([isinstance(x, BlocksatPkt) for x in pkts]))
byte_rate = self.kbps * 1e3 / 8 # bytes / sec
next_tx = time.time()
for i, pkt in enumerate(pkts):
# Send the same packet on all sockets
for sock in socks:
sock.send(pkt.pack())
logger.debug("Send packet %d - %d bytes" % (i, len(pkt)))
# Throttle
if (byte_rate > 0):
tx_delay = len(pkt) / byte_rate
next_tx += tx_delay
sleep = next_tx - time.time()
if (sleep > 0):
time.sleep(sleep)
def _handle_event(self, event_data):
"""Handle event broadcast by the SSE server
Args:
event_data (dict): Event data.
"""
order = json.loads(event_data)
logger.debug("Order: " + json.dumps(order, indent=4, sort_keys=True))
# Proceed when the event matches the target Tx trigger event
if (order["status"] != self.tx_event):
return
self._handle_order(order)
def _handle_order(self, order_info):
"""Fetch the order data and send it over UDP
Args:
order_info (dict): Dictionary with the order's Tx sequence number
and message size.
"""
# The 'regions' field of the order info has different contents in
# polling and SSE mode. In SSE mode, it contains the missing regions
# for transmission, whereas, in polling mode (reading from
# /order/:uuid), it contains all the original regions, regardless of
# whether or not the transmission is pending. Nevertheless, when
# operating in polling mode as admin (fetching from
# /admin/order/:uuid), the order info includes the "tx_confirmations"
# field, which can be used to adjust the regions field such that it
# contains the missing regions only.
order_regions = set(order_info['regions'])
if 'tx_confirmations' in order_info:
confirmed_tx_regions = set(order_info['tx_confirmations'])
order_regions = order_regions - confirmed_tx_regions
# Ensure the order includes a region covered by this instance
served_regions = order_regions & self.regions_set
if (served_regions == set()):
logger.debug("Demo-Rx region(s) not covered by this order")
return
seq_num = order_info["tx_seq_num"]
logger.info("Message %-5d\tSize: %d bytes\t" %
(seq_num, order_info["message_size"]))
# Get the API message data
order = ApiOrder(self.server,
seq_num=seq_num,
tls_cert=self.tls_cert,
tls_key=self.tls_key)
data = order.get_data()
if (data is None):
logger.debug("Empty message. Skipping...")
return
# Define the sockets over which the order should be transmitted
tx_socks = []
if self.sock_by_region:
for region, sock in zip(self.regions_list, self.socks):
if region in order_regions:
tx_socks.append(sock)
else:
tx_socks = self.socks
# Split API message data into Blocksat packet(s)
tx_handler = BlocksatPktHandler()
tx_handler.split(data, seq_num, self.channel)
pkts = tx_handler.get_frags(seq_num)
if (self.kbps > 0):
logger.debug("Transmission is going to take: "
"{:g} sec".format(len(data) * 8 / (self.kbps * 1e3)))
# Send the packet(s)
self._send_pkts(pkts, tx_socks)
# Send transmission confirmation to the server
order.confirm_tx(list(served_regions))
def run_sse_client(self):
"""Server-sent Events (SSE) Client"""
logger.info("Connecting with Satellite API server...")
sleep = False
while (True):
try:
if sleep:
time.sleep(2)
sleep = False
sse_channel = API_CHANNEL_SSE_NAME[self.channel]
endpoint = '/admin/subscribe/' if self.admin else '/subscribe/'
r = requests.get(self.server + f"{endpoint}{sse_channel}",
stream=True,
cert=(self.tls_cert, self.tls_key))
r.raise_for_status()
logger.info("Connected. Waiting for events...\n")
# Continuously wait for events
event_line = 'event:' + sse_channel
event_next = False
for line in r.iter_lines():
if not line:
continue
dec_line = line.decode()
if dec_line.startswith(':'): # comment to be ignored
continue
logger.debug(line)
if dec_line.startswith(event_line):
event_next = True
continue
if event_next and dec_line.startswith('data:'):
self._handle_event(dec_line.replace('data:', ''))
event_next = False
except requests.exceptions.HTTPError as e:
logger.error(e)
break
except requests.exceptions.ChunkedEncodingError as e:
logger.debug(e)
pass
except requests.exceptions.ConnectionError as e:
logger.debug(e)
sleep = True
pass
except requests.exceptions.RequestException as e:
logger.debug(e)
sleep = True
pass
except KeyboardInterrupt:
exit()
logger.info("Reconnecting...")
def run_poll_client(self):
"""Polling-based client"""
order_mgr = ApiOrder(self.server,
tls_cert=self.tls_cert,
tls_key=self.tls_key)
tx_set = set()
while (True):
try:
tx_orders = order_mgr.get_orders(['transmitting'],
self.channel,
queue='transmitting')
# There can only be one order in transmitting state at a time
if len(tx_orders) > 1:
logger.warning("More than one order in transmitting "
"state on channel {}".format(self.channel))
# Filter out any repeated orders (already transmitted), except
# for those the server is explicitly retransmitting.
new_orders = list()
for order_info in tx_orders:
is_retransmission = 'retransmission' in order_info and \
order_info['retransmission'] is not None and \
'retry_count' in order_info['retransmission']
tx_attempt = 0 if not is_retransmission else \
order_info['retransmission']['retry_count']
order_id = "{}-{}".format(order_info['tx_seq_num'],
tx_attempt)
if order_id not in tx_set:
tx_set.add(order_id)
new_orders.append(order_info)
if new_orders:
for order_info in new_orders:
logger.debug(
"Order: " +
json.dumps(order_info, indent=4, sort_keys=True))
self._handle_order(order_info)
else:
time.sleep(1)
except requests.exceptions.ConnectionError as e:
logger.debug(e)
time.sleep(1)
pass
except KeyboardInterrupt:
exit()
def run(self):
"""Run the demo-rx transmission loop"""
if self.poll:
self.run_poll_client()
else:
self.run_sse_client()
| Blockstream/satellite | blocksatcli/api/demorx.py | demorx.py | py | 11,171 | python | en | code | 949 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pkt.BlocksatPkt",
"line_number": 89,
"usage_type": "argument"
},
{
"api_name": "time.time",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pkt.pack",
"line_... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.