seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14295656112 | import pinocchio as pin
import numpy as np
class NLinkCartpole(object):
def __init__(self, N, link_length, link_mass):
self.N = N
self.pin_model = pin.Model()
# Add the cart joint (prismatic joint along x axis)
jidx = 0
joint_model = pin.JointModelPrismaticUnaligned()
joint_model.axis[0] = 1
joint_model.axis[1] = 0
joint_model.axis[2] = 0
jidx = self.pin_model.addJoint(jidx, joint_model, pin.SE3.Identity(), 'cart_joint')
# Add cart link
inertia = pin.Inertia.FromBox(link_mass, link_length, link_length, link_length)
link_se3 = pin.SE3.Identity()
self.pin_model.appendBodyToJoint(jidx, inertia, link_se3)
self.pin_model.addBodyFrame('cart_link', jidx, link_se3, -1)
# Add the additional revolute joints
for i in range(N):
joint_se3 = pin.SE3.Identity()
if i != 0:
joint_se3.translation[2] = link_length
if i == 0:
# The joint rotates about its x axis. Align the joint's x axis with the global y axis
joint_se3.rotation = pin.exp3(np.array([0, 0, -np.pi/2]))
joint_model = pin.JointModelRevoluteUnaligned()
joint_model.axis[0] = 1
joint_model.axis[1] = 0
joint_model.axis[2] = 0
jidx = self.pin_model.addJoint(jidx, joint_model, joint_se3, 'cart_joint')
# Add link
inertia = pin.Inertia.FromCylinder(link_mass, 0.05, link_length)
link_se3 = pin.SE3.Identity()
link_se3.translation[2] = link_length/2
self.pin_model.appendBodyToJoint(jidx, inertia, link_se3)
self.pin_model.addBodyFrame('pole_link' + str(i), jidx, link_se3, -1)
self.pin_data = pin.Data(self.pin_model)
self.nq = self.pin_model.nq
self.nv = self.pin_model.nv
self.nx = self.nq + self.nv
self.nu = self.pin_model.nv - 1
def get_dim(self):
return self.nx, self.nu
def unpack(self, x, u):
q = x[:self.nq]
v = x[self.nq:]
tau = np.zeros(self.nv)
tau[:-1] = u
return q, v, tau
# u actuates the cart, and all revolute joints except the last (top) one
def dynamics(self, x, u):
q, v, tau = self.unpack(x, u)
xdot = np.zeros(self.nq + self.nv)
xdot[:self.nq] = np.copy(v)
xdot[self.nv:] = pin.aba(self.pin_model, self.pin_data, q, v, tau)
return xdot
def lin_dynamics(self, x, u):
q, v, tau = self.unpack(x, u)
A = np.zeros((2*self.nv, 2*self.nv))
B = np.zeros((2*self.nv, self.nu))
dtau_du = np.zeros((self.nv, self.nu))
dtau_du[:-1] = np.eye(self.nu)
A[:self.nv, self.nv:] = np.eye(self.nv)
pin.computeABADerivatives(self.pin_model, self.pin_data, q, v, tau)
A[self.nv:, :self.nv] = self.pin_data.ddq_dq
A[self.nv:, self.nv:] = self.pin_data.ddq_dv
B[self.nv:] = self.pin_data.Minv@dtau_du
return A, B
| EpicDuckPotato/final_project_16715 | src/n_link_cartpole.py | n_link_cartpole.py | py | 2,776 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pinocchio.Model",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pinocchio.JointModelPrismaticUnaligned",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pinocchio.SE3.Identity",
"line_number": 14,
"usage_type": "call"
},
{
"api_... |
2119785373 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
@FILE : Month6_general_analysis.py
@TIME : 2023/07/20 22:34:20
@AUTHOR : wangyu / NMC
@VERSION : 1.0
@DESC : 本文件负责进行 2023年 6 月 华北地区高温事件的基础统计分析
华北地区 6 月份高温事件集中在 6月14-17日, 6月21-30日
'''
### to import parent dir files ###
# import os, sys
### this is for jupyter notebook ###
#current_folder = globals()['_dh'][0]
#parentdir = os.path.dirname(current_folder
### this is for normal python file ###
#parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#sys.path.insert(0,parentdir)
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import cv2
import geopandas as gpd
from utils import common_func as cf
from utils import DataIO as DIO
from utils import draw_pictures as DP
def draw_daily_max_temperature_stat(fn_t2m_stat, fn_daily_tmax, fn_png, idate_lst, nor, sou, wst, est, dlon, dlat):
"""
"""
def draw_daily_max_temperature(fn_t2m_grd, fn_tmx24_stat, fn_daily_tmax, fn_png, idate_lst, nor, sou, wst, est, dlon, dlat):
"""
首先绘制华北地区 日2m最高温度 的空间分布
这里日最高温度,比较 当日 12时 ~ 20时 温度
华北地区: nor, sou, wst, est = 45, 30, 110, 125
温度数据使用 ERA-5 tmax 数据
# Maximum 2m temperature since previous post-processing
# defined as:
# This parameter is the highest temperature of air at 2m above the
# surface of land, sea or inland water since the parameter was last
# archived in a particular forecast. 2m temperature is calculated by
# interpolating between the lowest model level and the Earth's
# surface, taking account of the atmospheric conditions. This
# parameter has units of kelvin (K). Temperature measured in kelvin
# can be converted to degrees Celsius (°C) by subtracting 273.15.
"""
### create daily tmx24 file
# 通过求取 当日 12H~20H(LST) 的小时最高温度的最大值,获取网格点的日最高温度
ifn_tmx_1h = fn_t2m_grd.format(t=idate_lst)
bvld, tmx_1h, lons, lats = DIO.readNC(ifn_tmx_1h, 'mx2t', 'longitue', 'latitude',
nor=nor, sou=sou, wst=wst, est=est, dlon=dlon, dlat=dlat)
if not bvld:
return None
nrows = int((nor-sou)/dlat + 1.5)
ncols = int((est-wst)/dlon + 1.5)
tmx_daily = np.max(tmx_1h[12-8:21-8, :, :], axis=0)-273.15
cnts_lonlat = daily_heatwave_detect(tmx_daily)
ifn_tmx_daily = fn_daily_tmax.format(t=idate_lst)
DIO.writeNC(ifn_tmx_daily, tmx_daily, lons, lats, 'mx2t', 'longitude', 'latitude')
### load station data
ifn_tmx24_stat = fn_tmx24_stat.format(t=idate_lst)
nrows = int((nor-sou)/dlat + 1.5)
ncols = int((est-wst)/dlon + 1.5)
dst_lonlat = {
"nor": nor,
"sou": sou,
"wst": wst,
"est": est,
"dlon": dlon,
"dlat": dlat,
"nrows": nrows,
"ncols": ncols,
"ngrids": nrows*ncols
}
ifn_png = fn_png.format(t=idate_lst.replace(hour=20))
_, stat_data = DIO.load_m3(ifn_tmx24_stat, dst_latlon_dict=dst_lonlat, encoding='gbk')
stat_data = stat_data[stat_data["val"]>=35]
#####################
### draw pictures ###
tmax = np.max(tmx_daily)
tmin = np.min(tmx_daily)
clvls = np.arange(np.floor(tmin), np.ceil(tmax+1), 1)
titles_left = {
'left': 'Date: {t:%Y/%m/%d}'.format(t=idate_lst),
'right': 'ERA5 Daily TMX24: %.2f ℃' % (tmax)
}
titles_right = {
'right': 'Station Daily TMX24: %.2f ℃' % (np.max(stat_data["val"].values))
}
#DP.show_2D_mat(tmx_daily, lons, lats, ifn_png, False, clvls=clvls, cmap=mpl.colormaps['seismic'], titles=titles,
# contours_lonlat=cnts_lonlat)
DP.show_2D_mat_2pic_with_obs(dmat1=tmx_daily, lons1=lons, lats1=lats, clvls1=clvls, cmap1=mpl.colormaps['seismic'], title1=titles_left, contours_lonlat1=cnts_lonlat,
lons2=lons, lats2=lats, obs2=stat_data, obs2_cmap=mpl.colormaps['Reds'], title2=titles_right, contours_lonlat2=cnts_lonlat,
archive_fn=ifn_png, )
def daily_heatwave_detect(dmat, thresh=35.0):
"""
基于当日的 日最高温度数据,识别 heat wave
返回值为 [[lon, lat], [lon, lat], ... ]组成的 list
"""
# 使用阈值法制作 >35° 掩膜
_, masked_mat = cv2.threshold(dmat, thresh, 255, cv2.THRESH_BINARY)
kernel = np.ones((3, 3), np.uint8)
masked_mat = cv2.morphologyEx(masked_mat.astype(np.uint8), cv2.MORPH_OPEN, kernel, iterations=1)
contours, hierarchy = cv2.findContours(masked_mat, mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_SIMPLE)
cnts_lonlat = cf.trans_cvContours_to_lonlat(contours, wst, sou, dlon, dlat)
return cnts_lonlat
def heatwave_co_pair():
"""
不同日数据识别出的 > 35℃ heatwave 之间进行配对
构建一次 heat wave 事件
"""
if __name__ == "__main__":
fn_tmx01_grd = 'y:/ERA5_T2MX/{t:%Y/%Y%m%d}.nc'
fn_daily_tmax = 'y:/TMP/Huabei_Daily_TMax/{t:%Y%m%d}.nc'
fn_tmx24_stat = 'z:/YLRC_STATION/TEMP/rtmx24/{t:%Y/%Y%m%d%H}.000'
fn_png = 'f:/华北地区热浪分析/T2Max/{t:%Y%m%d}.png'
#nor, sou, wst, est = 47, 28, 108, 127
nor, sou, wst, est = 60, 10, 70, 140
dlon, dlat = 0.25, 0.25
idate = datetime(2023, 6, 1)
date_end = datetime(2023, 6, 1)
# step 1.
while idate <= date_end:
draw_daily_max_temperature(fn_tmx01_grd, fn_tmx24_stat, fn_daily_tmax, fn_png, idate, nor, sou, wst, est, dlon, dlat)
idate += timedelta(days=1)
| wangy1986/HB-HeatWave-Analysis | heatwave_general_analysis.py | heatwave_general_analysis.py | py | 5,896 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.DataIO.readNC",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "utils.DataIO",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "numpy.max",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "utils.DataIO.writeNC",
... |
35920582247 | from flask import Flask
from pymongo import MongoClient
import json
client = MongoClient()
db = client.devdb
collection = db.jobs
# reads json file, returns data as python dict
def load_data():
with open("./app/data.json", "r") as file:
data = json.load(file) # gets python dict
return data
# insert data if not already inserted
if collection.count_documents({}) == 0:
data = load_data()
result = collection.insert_many(data)
print ("total inserted:", len(result.inserted_ids))
# make job application Flask object
jobapp = Flask(__name__, static_url_path='/static')
import app.routes as routes | shailstorm/joblistings | app/__init__.py | __init__.py | py | 627 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 23,
"usage_type": "call"
}
] |
36809853370 | # -*- coding:utf-8 -*-
import multiprocessing
import os
_debug = os.environ.get('DJANGO_SETTINGS_MODULE', 'karelapan.settings').endswith('dev')
bind = "unix:/tmp/gunicorn.sock"
#bind = "127.0.0.1:8000"
workers = multiprocessing.cpu_count() * 2 + 1
preload_app = True
daemon = not _debug
pidfile = os.path.normpath(os.path.join(os.path.dirname(__file__), 'gunicorn.pid'))
accesslog = os.path.normpath(os.path.join(os.path.dirname(__file__), 'access.log'))
errorlog = os.path.normpath(os.path.join(os.path.dirname(__file__), 'error.log'))
loglevel = 'info'
| oca159/karelapan | gconfig.py | gconfig.py | py | 557 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.normpat... |
72644993064 | import os
import unittest
from pathlib import Path
import numpy as np
from pkg_resources import resource_filename
from compliance_checker.acdd import ACDDBaseCheck
from compliance_checker.base import BaseCheck, GenericFile, Result
from compliance_checker.suite import CheckSuite
static_files = {
"2dim": resource_filename("compliance_checker", "tests/data/2dim-grid.nc"),
"bad_region": resource_filename("compliance_checker", "tests/data/bad_region.nc"),
"bad_data_type": resource_filename(
"compliance_checker",
"tests/data/bad_data_type.nc",
),
"test_cdl": resource_filename("compliance_checker", "tests/data/test_cdl.cdl"),
"test_cdl_nc": resource_filename(
"compliance_checker",
"tests/data/test_cdl_nc_file.nc",
),
"empty": resource_filename("compliance_checker", "tests/data/non-comp/empty.file"),
"ru07": resource_filename(
"compliance_checker",
"tests/data/ru07-20130824T170228_rt0.nc",
),
"netCDF4": resource_filename(
"compliance_checker",
"tests/data/test_cdl_nc4_file.cdl",
),
}
class TestSuite(unittest.TestCase):
# @see
# http://www.saltycrane.com/blog/2012/07/how-prevent-nose-unittest-using-docstring-when-verbosity-2/
def setUp(self):
self.cs = CheckSuite()
self.cs.load_all_available_checkers()
def shortDescription(self):
return None
# override __str__ and __repr__ behavior to show a copy-pastable nosetest name for ion tests
# ion.module:TestClassName.test_function_name
def __repr__(self):
name = self.id()
name = name.split(".")
if name[0] not in ["ion", "pyon"]:
return "{} ({})".format(name[-1], ".".join(name[:-1]))
else:
return "{} ( {} )".format(
name[-1],
".".join(name[:-2]) + ":" + ".".join(name[-2:]),
)
__str__ = __repr__
def test_suite(self):
# BWA: what's the purpose of this test? Just to see if the suite
# runs without errors?
ds = self.cs.load_dataset(static_files["2dim"])
self.cs.run(ds, [], "acdd")
def test_suite_pathlib(self):
path_obj = Path(static_files["2dim"])
ds = self.cs.load_dataset(path_obj)
self.cs.run(ds, [], "acdd")
def test_unicode_formatting(self):
ds = self.cs.load_dataset(static_files["bad_region"])
score_groups = self.cs.run(ds, [], "cf")
limit = 2
for checker, rpair in score_groups.items():
groups, errors = rpair
score_list, points, out_of = self.cs.standard_output(
ds.filepath(),
limit,
checker,
groups,
)
# This asserts that print is able to generate all of the unicode
# output
self.cs.standard_output_generation(groups, limit, points, out_of, checker)
def test_generate_dataset_netCDF4(self):
"""
Tests that suite.generate_dataset works with cdl file with netCDF4
features.
"""
# create netCDF4 file
ds_name = self.cs.generate_dataset(static_files["netCDF4"])
# check if correct name is return
assert ds_name == static_files["netCDF4"].replace(".cdl", ".nc")
# check if netCDF4 file was created
assert os.path.isfile(static_files["netCDF4"].replace(".cdl", ".nc"))
def test_include_checks(self):
ds = self.cs.load_dataset(static_files["bad_data_type"])
score_groups = self.cs.run_all(ds, ["cf:1.7"], ["check_standard_name"])
checks_run = score_groups["cf:1.7"][0]
assert len(checks_run) == 1
first_check = checks_run[0]
assert first_check.name == "§3.3 Standard Name"
assert first_check.value[0] < first_check.value[1]
def test_skip_checks(self):
"""Tests that checks are properly skipped when specified"""
ds = self.cs.load_dataset(static_files["2dim"])
# exclude title from the check attributes
score_groups = self.cs.run_all(ds, ["acdd"], skip_checks=["check_high"])
msg_set = {
msg
for sg in score_groups["acdd"][0]
for msg in sg.msgs
if sg.weight == BaseCheck.HIGH
}
skipped_messages = {
att + " not present" for att in ACDDBaseCheck().high_rec_atts
}
# none of the skipped messages should be in the result set
self.assertTrue(len(msg_set & skipped_messages) == 0)
def test_skip_check_level(self):
"""Checks level limited skip checks"""
ds = self.cs.load_dataset(static_files["ru07"])
score_groups = self.cs.run_all(
ds,
["cf"],
skip_checks=[
"check_flags:A",
"check_convention_possibly_var_attrs:M",
"check_standard_name:L",
],
)
name_set = {sg.name for sg in score_groups["cf"][0]}
# flattened set of messages
msg_set = {msg for sg in score_groups["cf"][0] for msg in sg.msgs}
expected_excluded_names = {
"§3.5 flag_meanings for lat",
"§3.5 flag_meanings for lon",
"§3.5 lat is a valid flags variable",
"§3.5 lon is a valid flags variable",
}
self.assertTrue(len(expected_excluded_names & name_set) == 0)
# should skip references
ref_msg = "references global attribute should be a non-empty string"
self.assertTrue(ref_msg not in msg_set)
# check_standard_name is high priority, but we requested only low,
# so the standard_name check should still exist
standard_name_hdr = "§3.3 Standard Name"
self.assertTrue(standard_name_hdr in name_set)
def test_group_func(self):
# This is checking for issue #183, where group_func results in
# IndexError: list index out of range
ds = self.cs.load_dataset(static_files["bad_data_type"])
score_groups = self.cs.run(ds, [], "cf")
limit = 2
for checker, rpair in score_groups.items():
groups, errors = rpair
score_list, points, out_of = self.cs.standard_output(
ds.filepath(),
limit,
checker,
groups,
)
# This asserts that print is able to generate all of the unicode output
self.cs.standard_output_generation(groups, limit, points, out_of, checker)
def test_score_grouping(self):
# Testing the grouping of results for output, which can fail
# if some assumptions are not met, e.g. if a Result object has
# a value attribute of unexpected type
res = [
Result(BaseCheck.MEDIUM, True, "one"),
Result(BaseCheck.MEDIUM, (1, 3), "one"),
Result(BaseCheck.MEDIUM, None, "one"),
Result(BaseCheck.MEDIUM, True, "two"),
Result(BaseCheck.MEDIUM, np.isnan(1), "two"), # value is type numpy.bool_
]
score = self.cs.scores(res)
self.assertEqual(score[0].name, "one")
self.assertEqual(score[0].value, (2, 4))
self.assertEqual(score[1].name, "two")
self.assertEqual(score[1].value, (1, 2))
def test_cdl_file(self):
# Testing whether you can run compliance checker on a .cdl file
# Load the cdl file
ds = self.cs.load_dataset(static_files["test_cdl"])
vals = self.cs.run(ds, [], "cf")
limit = 2
for checker, rpair in vals.items():
groups, errors = rpair
score_list, cdl_points, cdl_out_of = self.cs.standard_output(
ds.filepath(),
limit,
checker,
groups,
)
# This asserts that print is able to generate all of the unicode output
self.cs.standard_output_generation(
groups,
limit,
cdl_points,
cdl_out_of,
checker,
)
ds.close()
# Ok now load the nc file that it came from
ds = self.cs.load_dataset(static_files["test_cdl_nc"])
vals = self.cs.run(ds, [], "cf")
limit = 2
for checker, rpair in vals.items():
groups, errors = rpair
score_list, nc_points, nc_out_of = self.cs.standard_output(
ds.filepath(),
limit,
checker,
groups,
)
# This asserts that print is able to generate all of the unicode output
self.cs.standard_output_generation(
groups,
limit,
nc_points,
nc_out_of,
checker,
)
ds.close()
nc_file_path = static_files["test_cdl"].replace(".cdl", ".nc")
self.addCleanup(os.remove, nc_file_path)
# Ok the scores should be equal!
self.assertEqual(nc_points, cdl_points)
self.assertEqual(nc_out_of, cdl_out_of)
def test_load_local_dataset_GenericFile(self):
resp = self.cs.load_local_dataset(static_files["empty"])
assert isinstance(resp, GenericFile)
def test_standard_output_score_header(self):
"""
Check that the output score header only checks the number of
of potential issues, rather than the weighted score
"""
ds = self.cs.load_dataset(static_files["bad_region"])
score_groups = self.cs.run(ds, [], "cf")
limit = 2
groups, errors = score_groups["cf"]
score_list, all_passed, out_of = self.cs.standard_output(
ds.filepath(),
limit,
"cf",
groups,
)
assert all_passed < out_of
def test_netCDF4_features(self):
"""
Check if a proper netCDF4 file with netCDF4-datatypes is created.
"""
# create and open dataset
ds = self.cs.load_dataset(static_files["netCDF4"])
# check if netCDF type of global attributes is correct
assert isinstance(ds.global_att_of_type_int, np.int32)
# check if netCDF4 type of global attributes is correct
assert isinstance(ds.global_att_of_type_int64, np.int64)
# check if netCDF type of variable is correct
assert ds["tas"].dtype is np.dtype("float32")
# check if netCDF4 type of variable is correct
assert ds["mask"].dtype is np.dtype("int64")
| ioos/compliance-checker | compliance_checker/tests/test_suite.py | test_suite.py | py | 10,559 | python | en | code | 92 | github-code | 36 | [
{
"api_name": "pkg_resources.resource_filename",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pkg_resources.resource_filename",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pkg_resources.resource_filename",
"line_number": 15,
"usage_type": "call"... |
15969019025 | import torch.nn as nn
class MultiOutputCNN(nn.Module):
def __init__(self, ndigits, nvocab):
# inputsize 32*112
super(MultiOutputCNN, self).__init__()
feature_net = nn.Sequential()
def Conv_Relu(depth, ni, no, nk):
feature_net.add_module('layer_' + str(depth), nn.Conv2d(ni, no, kernel_size=nk))
feature_net.add_module('bn_' + str(depth), nn.BatchNorm2d(no))
feature_net.add_module('act' + str(depth), nn.ReLU())
Conv_Relu(0, 3, 32, 3) # 30 * 110 * 32
Conv_Relu(1, 32, 32, 3) # 28 * 108 * 32
feature_net.add_module('max_pool1', nn.MaxPool2d(2)) # 14 * 54 * 32
Conv_Relu(2, 32, 64, 3) # 12 * 52 * 64
Conv_Relu(3, 64, 64, 3) # 10* 50 *64
feature_net.add_module('max_pool2', nn.MaxPool2d(2)) # 5 * 25 * 64
classifier = nn.ModuleList()
for i in range(ndigits):
classifier.append(nn.Sequential(nn.Linear(8000, 128), nn.ReLU(), nn.Linear(128, nvocab)))
self.feature_net = feature_net
self.classifier = classifier
def forward(self, x):
feats = self.feature_net(x)
feats = feats.view(x.size(0), -1)
preds = [classifier(feats) for classifier in self.classifier]
return preds
| waitwaitforget/VerificationCodeRecognition | multiCNN.py | multiCNN.py | py | 1,279 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
3717849813 | from PySide2.QtWidgets import QWidget, QTableWidgetItem, QAbstractItemView, QHeaderView, QMessageBox
from Ventanas.agendar_vuelo import AgendarVuelo
from Database.aeropuerto import *
from Database.hangares_db import traer_todas_aerolineas
from PySide2.QtCore import Qt
import datetime
from datetime import datetime
from datetime import date
import time
class Agenda (QWidget, AgendarVuelo):
def __init__ (self, parent = None):
super().__init__(parent)
self.padre_ventana = parent
self.setupUi(self)
self.setWindowFlag (Qt.Window)
#Cargar combo aerolineas
self.tabla_general_vuelos()
self.cargar_combo_aerolineas()
self.event_tabla_vuelo()
self.bt_guardarAereolinea.clicked.connect(self.aceptar_vuelo)
self.bt_regresarAereolinea.clicked.connect(self.denegar_vuelo)
# -----------------------------------------------------------------------------
def cargar_combo_aerolineas(self):
aerolineas = traer_todas_aerolineas()
#Aquí se convierte los valores de la tupla a str y sin esos caracteres
characters = "(,')"
i=0
while i < len(aerolineas):
string = str(aerolineas [i])
for x in range(len(characters)):
string = string.replace(characters[x],"")
# Aquí se manda ese string ya listo al combo box
self.cb_Aerolinea.addItem(str(string))
i += 1
# -------------------------------------------------------------------------------
def tabla_general_vuelos (self):
self.tb_agendAereolinea.setSelectionBehavior(QAbstractItemView.SelectRows)
header = self.tb_agendAereolinea.horizontalHeader()
header.setSectionResizeMode(QHeaderView.ResizeToContents)
self.tb_agendAereolinea.verticalHeader().setDefaultAlignment(Qt.AlignHCenter)
self.tb_agendAereolinea.resizeColumnsToContents()
# -------------------------------------------------------------------------------
def cargar_tabla_vuelos (self):
aerolinea = self.cb_Aerolinea.currentText()
print(aerolinea)
nit = consultar_aerolinea(aerolinea)
print(nit)
characters = "(,')"
i=0
while i < len(nit):
string = str(nit [i])
for x in range(len(characters)):
string = string.replace(characters[x],"")
i += 1
print(string)
datos = traer_vuelos_espera(string)
print (datos)
self.tb_agendAereolinea.setRowCount(len(datos))
for(index_fila, fila) in enumerate(datos):
#indice, datos
for (index_celda, celda) in enumerate(fila):
self.tb_agendAereolinea.setItem(index_fila, index_celda,
QTableWidgetItem(str(celda)))
# ---------------------------------------------------------------------------------
def event_tabla_vuelo (self):
self.cb_Aerolinea.activated.connect(self.cargar_tabla_vuelos)
# ---------------------------------------------------------------------------------
def aceptar_vuelo (self):
vuelo_seleccionado = self.tb_agendAereolinea.selectedItems()
if vuelo_seleccionado:
id_vuelo= vuelo_seleccionado[2].text()
print (id_vuelo)
vuelo = vuelo_seleccionado[0].row()
# Mensaje de confirmación de si quiere borrar la aerolinea
dlg = QMessageBox.question(self, "Guardar Vuelo",
"¿Esta seguro que quiere agendar este vuelo?",
QMessageBox.Ok, QMessageBox.Cancel)
#Si presiona Ok
if dlg == QMessageBox.Ok:
if aceptar_vuelo(id_vuelo):
self.tb_agendAereolinea.removeRow(vuelo)
QMessageBox.information(self, "Agendado", "Vuelo agendado con éxito", QMessageBox.Ok)
else:
dlg = QMessageBox(self)
dlg.setWindowTitle("Error")
dlg.setText("Para agendar un vuelo primero debe seleccionarlo")
dlg.setStandardButtons(QMessageBox.Ok)
dlg.setIcon(QMessageBox.Critical)
dlg.show()
# ---------------------------------------------------------------------------------
def denegar_vuelo (self):
vuelo_seleccionado = self.tb_agendAereolinea.selectedItems()
if vuelo_seleccionado:
id_vuelo= vuelo_seleccionado[2].text()
print (id_vuelo)
vuelo = vuelo_seleccionado[0].row()
# Mensaje de confirmación de si quiere borrar la aerolinea
dlg = QMessageBox.question(self, "Regresar Vuelo",
"¿Esta seguro que quiere regresar este vuelo?",
QMessageBox.Ok, QMessageBox.Cancel)
#Si presiona Ok
if dlg == QMessageBox.Ok:
if rechazar_vuelo(id_vuelo):
self.tb_agendAereolinea.removeRow(vuelo)
QMessageBox.information(self, "Denegado", "Vuelo regresado a la aerolinea", QMessageBox.Ok)
else:
dlg = QMessageBox(self)
dlg.setWindowTitle("Error")
dlg.setText("Para regresar un vuelo primero debe seleccionarlo")
dlg.setStandardButtons(QMessageBox.Ok)
dlg.setIcon(QMessageBox.Critical)
dlg.show()
| SofhiAM/Aeropuerto_Campanero | Controles/agenda.py | agenda.py | py | 5,405 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "PySide2.QtWidgets.QWidget",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "Ventanas.agendar_vuelo.AgendarVuelo",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.Qt.Window",
"line_number": 18,
"usage_type": "attribute"
}... |
26301777253 | from functools import reduce
from hashlib import md5
from jsonpath import jsonpath
from jsonpath_ng.parser import JsonPathParser
from tools.funclib import get_func_lib
import json
import re
import time
from tools.utils.utils import extract_by_jsonpath, quotation_marks
class Template:
def __init__(self, test, context, functions, params, variable_start_string='{{', variable_end_string='}}', function_prefix='@', param_prefix='$'):
self.param_prefix = param_prefix
self.data = None
self.context = context # 关联参数
self.params = params # 公共参数
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.function_prefix = function_prefix
self.param_prefix = param_prefix
self.stack = list()
# 动态存储接口的请求信息 以便渲染
self.request_url = None
self.request_path = None
self.request_headers = None
self.request_query = None
self.request_body = None
self.func_lib = get_func_lib(test, functions, self.context, self.params)
self.bytes_map = dict()
self.parser = JsonPathParser()
def init(self, data):
self.data = json.dumps(data, ensure_ascii=False)
self.stack.clear()
self.bytes_map.clear()
def set_help_data(self, url, path: str, headers: dict, query: dict, body: dict):
self.request_url = url
self.request_path = path
self.request_headers = headers
self.request_query = query
self.request_body = body
def render(self):
start_stack = list()
start_length = len(self.variable_start_string)
end_length = len(self.variable_end_string)
top = 0
flag = False
for cur in range(len(self.data)):
self.stack.append(self.data[cur])
top += 1
if flag:
self.stack.pop()
top -= 1
flag = False
continue
if reduce(lambda x, y: x + y, self.stack[-start_length:]) == self.variable_start_string:
start_stack.append(top - start_length)
if reduce(lambda x, y: x + y, self.stack[-end_length:]) == self.variable_end_string:
if len(start_stack) == 0:
continue
recent = start_stack.pop()
tmp = ''
for _ in range(top - recent):
tmp += self.stack.pop()
top -= 1
if self.stack[-1] == '"' and self.data[cur + 1] == '"':
self.stack.pop()
top -= 1
flag = True
else:
flag = False
tmp = tmp[::-1]
key = tmp[start_length:-end_length].strip()
key, json_path = self.split_key(key)
if key.startswith(self.function_prefix):
name_args = self.split_func(key, self.function_prefix)
value = self.func_lib(name_args[0], *name_args[1:])
elif key in self.context: # 优先从关联参数中取
if json_path is None:
value = self.context.get(key)
else:
value = extract_by_jsonpath(self.context.get(key), json_path)
elif key in self.params:
if json_path is None:
value = self.params.get(key)
else:
value = extract_by_jsonpath(self.params.get(key), json_path)
elif key.startswith(self.param_prefix) and key[1:] in self.params: # 兼容老版本
if json_path is None:
value = self.params.get(key[1:])
else:
value = extract_by_jsonpath(self.params.get(key[1:]), json_path)
else:
raise KeyError('不存在的公共参数、关联变量或内置函数: {}'.format(key))
if not flag and isinstance(value, str):
if '"' in value:
value = json.dumps(value)[1:-1]
final_value = value
elif isinstance(value, bytes):
final_value = self._bytes_save(value, flag)
elif isinstance(value, list):
final_value = list()
for list_item in value:
if isinstance(list_item, bytes):
final_value.append(self._bytes_save(list_item, False))
else:
final_value.append(list_item)
final_value = json.dumps(final_value)
else:
final_value = json.dumps(value)
for s in final_value:
self.stack.append(s)
top += 1
res = json.loads(reduce(lambda x, y: x + y, self.stack))
if len(self.bytes_map) > 0:
pattern = r'#\{(bytes_\w+_\d+?)\}'
if isinstance(res, str):
bytes_value = self._bytes_slove(res, pattern)
if bytes_value is not None:
res = bytes_value
elif isinstance(res, dict) or isinstance(res, list):
for i, j in zip(jsonpath(res, '$..'), jsonpath(res, '$..', result_type='PATH')):
if isinstance(i, str):
bytes_value = self._bytes_slove(i, pattern)
if bytes_value is not None:
expression = self.parser.parse(j)
expression.update(res, bytes_value)
return res
def _bytes_save(self, value, flag):
bytes_map_key = 'bytes_{}_{}'.format(md5(value).hexdigest(), int(time.time() * 1000000000))
self.bytes_map[bytes_map_key] = value
change_value = '#{%s}' % bytes_map_key
if flag:
final_value = json.dumps(change_value)
else:
final_value = change_value
return final_value
def _bytes_slove(self, s, pattern):
search_result = re.search(pattern, s)
if search_result is not None:
expr = search_result.group(1)
return self.bytes_map[expr]
def replace_param(self, param):
param = param.strip()
search_result = re.search(r'#\{(.*?)\}', param)
if search_result is not None:
expr = search_result.group(1).strip()
if expr.lower() == '_request_url':
return self.request_url
elif expr.lower() == '_request_path':
return self.request_path
elif expr.lower() == '_request_header':
return self.request_headers
elif expr.lower() == '_request_body':
return self.request_body
elif expr.lower() == '_request_query':
return self.request_query
elif expr.startswith('bytes_'):
return self.bytes_map[expr]
else:
# 支持从请求头和查询参数中取单个数据
if expr.lower().startswith("_request_header."):
data = self.request_headers
expr = '$.' + expr[16:]
elif expr.lower().startswith("_request_query."):
data = self.request_query
expr = '$.' + expr[15:]
else:
data = self.request_body
if expr.lower().startswith("_request_body."):
expr = '$.' + expr[14:]
elif not expr.startswith('$'):
expr = '$.' + expr
try:
return extract_by_jsonpath(data, expr)
except:
return param
else:
return param
def split_key(self, key: str):
if key.startswith(self.function_prefix):
return key, None
key_list = key.split(".")
key = key_list[0]
json_path = None
if len(key_list) > 1:
json_path = reduce(lambda x, y: x + '.' + y, key_list[1:])
if key.endswith(']') and '[' in key:
keys = key.split("[")
key = keys[0]
if json_path is None:
json_path = keys[-1][:-1]
else:
json_path = keys[-1][:-1] + "." + json_path
if json_path is not None:
json_path = "$." + json_path
return key, json_path
def split_func(self, statement: str, flag: 'str' = '@'):
pattern = flag + r'([_a-zA-Z][_a-zA-Z0-9]*)(\(.*?\))?'
m = re.match(pattern, statement)
result = list()
if m is not None:
name, _ = m.groups()
args = statement.replace(flag+name, "")
result.append(name)
if args is not None and args != '()':
argList = [str(_) for _ in map(self.replace_param, args[1:-1].split(','))]
argList_length = len(argList)
if not (argList_length == 1 and len(argList[0]) == 0):
if name not in self.func_lib.func_param:
for i in range(argList_length):
result.append(argList[i])
else:
type_list = self.func_lib.func_param[name]
j = 0
for i in range(len(type_list)):
if j >= argList_length:
break
if type_list[i] is str:
result.append(quotation_marks(argList[j]))
j += 1
elif type_list[i] is int:
result.append(int(argList[j]))
j += 1
elif type_list[i] is float:
result.append(float(argList[j]))
j += 1
elif type_list[i] is bool:
result.append(False if argList[j].lower() == 'false' else True)
j += 1
elif type_list[i] is dict:
j, r = self.concat(j, argList, '}')
result.append(r)
elif type_list[i] is list:
j, r = self.concat(j, argList, ']')
result.append(r)
elif type_list[i] is bytes:
result.append(argList[j])
j += 1
elif type_list[i] is None:
result.append(argList[j])
j += 1
else:
raise SplitFunctionError('函数{}第{}个参数类型错误: {}'.format(name, i + 1, type_list[i]))
return result
else:
raise SplitFunctionError('函数错误: {}'.format(statement))
@staticmethod
def concat(start: int, arg_list: list, terminal_char: str):
end = start
length = len(arg_list)
for i in range(start, length):
if terminal_char in arg_list[i]:
end = i
s = reduce(lambda x, y: x + ',' + y, arg_list[start:end + 1])
try:
return end + 1, eval(quotation_marks(s))
except:
try:
s = '"'+s+'"'
return end + 1, eval(json.loads(s))
except:
continue
else:
s = reduce(lambda x, y: x + ',' + y, arg_list[start:end + 1])
return end + 1, s
class SplitFunctionError(Exception):
"""函数处理错误"""
| Chras-fu/Liuma-engine | core/template.py | template.py | py | 12,092 | python | en | code | 116 | github-code | 36 | [
{
"api_name": "tools.funclib.get_func_lib",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "jsonpath_ng.parser.JsonPathParser",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 36,
"usage_type": "call"
},
{
"api_name"... |
23467111744 | import os
import io
import lmdb
import six
import cv2
from PIL import Image
IMAGE_SAMPLE_HEIGHT = 64
def image_bin_to_pil(image_bin):
buf = six.BytesIO()
buf.write(image_bin)
buf.seek(0)
img = Image.open(buf)
return img
def is_valid_label(label, classes):
for ch in label:
if classes.get(ch) is None:
print(f'{ch} is not valid')
return False
return True
def load_class_dictionary(path, add_space=False):
class_dict = {}
with open(path, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
items = line.strip().split()
# class_dict[items[1]] = items[0]
class_dict[items[0]] = items[1]
return class_dict
def load_and_resize(path, label, resize=False):
im = Image.open(path)
w, h = im.size
if h > w * 1.5 and len(label) > 1:
im = im.rotate(90.0, expand=True)
if resize:
scaled_w = int(IMAGE_SAMPLE_HEIGHT / h * w)
im = im.resize((scaled_w, IMAGE_SAMPLE_HEIGHT), Image.LANCZOS)
with io.BytesIO() as output:
im.save(output, format="JPEG")
contents = output.getvalue()
return contents
class MyLMDB:
def __init__(self, path, sync_period=100, mode='w', map_size=1e9):
if not os.path.exists(path):
os.makedirs(path)
self.path = path
self.session = lmdb.open(path, map_size=10e10)
self.cache = {}
self.sync_period = sync_period
self.num_of_write = 0
self.num_of_samples = self.get_number_of_samples()
self.mode = mode
def get_number_of_samples(self):
with self.session.begin(write=False) as txn:
num_samples = txn.get('num-samples'.encode())
if num_samples is None:
num_of_samples = 1
else:
num_of_samples = int(num_samples)
return num_of_samples
def write_im_label_path(self, im, label, path, row_index, resize=True):
image_key = 'image-%09d'.encode() % self.num_of_samples
label_key = 'label-%09d'.encode() % self.num_of_samples
path_key = 'path-%09d'.encode() % self.num_of_samples
row_index_key = 'row_index_key-%09d'.encode() % self.num_of_samples
_, image_bin = cv2.imencode('.jpeg', im)
# image_bin = im
self.cache[image_key] = image_bin
self.cache[label_key] = label.encode()
self.cache[path_key] = path.encode()
self.cache[row_index_key] = row_index.encode()
self.num_of_samples += 1
self.num_of_write += 1
if self.num_of_write > self.sync_period:
print(f'{self.path} cache write {self.num_of_samples}')
self.cache['num-samples'.encode()] = str(self.num_of_samples - 1).encode()
with self.session.begin(write=True) as txn:
for k, v in self.cache.items():
txn.put(k, v)
self.num_of_write = 0
self.cache = {}
def write_image_label(self, image_path, label, resize=True):
image_bin = load_and_resize(image_path, label, resize)
image_key = 'image-%09d'.encode() % self.num_of_samples
label_key = 'label-%09d'.encode() % self.num_of_samples
self.cache[image_key] = image_bin
self.cache[label_key] = label.encode()
self.num_of_samples += 1
self.num_of_write += 1
if self.num_of_write > self.sync_period:
print(f'{self.path} cache write {self.num_of_samples}')
self.cache['num-samples'.encode()] = str(self.num_of_samples - 1).encode()
with self.session.begin(write=True) as txn:
for k, v in self.cache.items():
txn.put(k, v)
self.num_of_write = 0
self.cache = {}
def read_image_label(self, index):
label_key = 'label-%09d'.encode() % index
image_key = 'image-%09d'.encode() % index
with self.session.begin(write=False) as txn:
im = txn.get(image_key)
label = txn.get(label_key).decode('utf-8')
return im, label
def read_image_label_path(self, index):
label_key = 'label-%09d'.encode() % index
image_key = 'image-%09d'.encode() % index
path_key = 'path-%09d'.encode() % index
with self.session.begin(write=False) as txn:
im = txn.get(image_key)
label = txn.get(label_key).decode('utf-8')
path = txn.get(path_key).decode('utf-8')
return im, label, path
def read_image_label_path_key(self, index):
label_key = 'label-%09d'.encode() % index
image_key = 'image-%09d'.encode() % index
path_key = 'path-%09d'.encode() % index
row_index_key = 'row_index-%09d'.encode() % index
with self.session.begin(write=False) as txn:
im = txn.get(image_key)
label = txn.get(label_key).decode('utf-8')
path = txn.get(path_key).decode('utf-8')
row_index = txn.get(row_index_key).decode('utf-8')
return im, label, path, row_index
def get_row_num(self, index):
label_key = 'label-%09d'.encode() % index
image_key = 'image-%09d'.encode() % index
path_key = 'path-%09d'.encode() % index
# id_key = 'id-%09d'.encode() % index
with self.session.begin(write=False) as txn:
im = txn.get(image_key)
label = txn.get(label_key).decode('utf-8')
path = txn.get(path_key).decode('utf-8')
# id = txn.get(id_key).decode('utf-8')
row_num = 0
index -= 1
while index >= 1:
prev_path_key = 'path-%09d'.encode() % index
prev_path = txn.get(prev_path_key).decode('utf-8')
if prev_path != path:
break
index -= 1
row_num += 1
return row_num
def close(self):
if self.mode != 'w':
return
self.cache['num-samples'.encode()] = str(self.num_of_samples - 1).encode()
with self.session.begin(write=True) as txn:
for k, v in self.cache.items():
txn.put(k, v) | gucheol/CreateLMDB | lmdb_helper.py | lmdb_helper.py | py | 6,254 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "six.BytesIO",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_numbe... |
8356155101 |
import re
import cv2
import numpy as np
import matplotlib.pyplot as plt
flat_chess=cv2.imread('DATA/flat_chessboard.png')
flat_chess=cv2.cvtColor(flat_chess,cv2.COLOR_BGR2RGB)
plt.subplot(321)
plt.imshow(flat_chess)
gray_flat_chess=cv2.cvtColor(flat_chess,cv2.COLOR_BGR2GRAY)
plt.subplot(322)
plt.imshow(gray_flat_chess,cmap='gray')
real_chess=cv2.imread('DATA/real_chessboard.jpg')
real_chess=cv2.cvtColor(real_chess,cv2.COLOR_BGR2RGB)
plt.subplot(323)
plt.imshow(real_chess)
gray_real_chess=cv2.cvtColor(real_chess,cv2.COLOR_BGR2GRAY)
plt.subplot(324)
plt.imshow(gray_real_chess,cmap="gray")
plt.show(block=False)
plt.pause(1.5)
## Harris Corner detection
gray=np.float32(gray_flat_chess)
dst=cv2.cornerHarris(src=gray,blockSize=2,ksize=3,k=0.04)
dst=cv2.dilate(dst,None)
#kjerkoli je vrednost več kot procent max() Harris obdelave naj bo rdeče
flat_chess[dst>0.01*dst.max()]=[255,0,0]
plt.subplot(325)
plt.imshow(flat_chess)
gray=np.float32(gray_real_chess)
dst=cv2.cornerHarris(src=gray,blockSize=2,ksize=3,k=0.04)
dst=cv2.dilate(dst,None)
#kjerkoli je vrednost več kot procent max() Harris obdelave naj bo rdeče
real_chess[dst>0.01*dst.max()]=[255,0,0]
plt.subplot(326)
plt.imshow(real_chess)
# Make it full Screen
wm = plt.get_current_fig_manager()
wm.window.state('zoomed')
plt.show(block=False)
plt.pause(4)
print("Konec")
| janezv/computerVision | plt.subplot.py | plt.subplot.py | py | 1,350 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplot... |
29412108576 | import mediapipe as mp
import json
mp_pose = mp.solutions.pose
def calculate_pose(image, pose):
results = pose.process(image)
if (results.pose_landmarks == None):
return {
"error": "NO LANDMARKS"
}
landmarks = results.pose_landmarks.landmark
landmarks_list = []
for landmark in results.pose_landmarks.landmark:
current_landmark = {"x": landmark.x, "y": landmark.y, "z": landmark.z, "visibility": landmark.visibility}
landmarks_list.append(current_landmark)
with open("json_output/landmarks_list.json", "w") as outfile:
json.dump(landmarks_list, outfile, indent=2)
landmarks_x_y_z_visibility = {}
for l in mp_pose.PoseLandmark:
# print(l.name)
landmarks_x_y_z_visibility[l.name.lower()] = {
"x": landmarks[l.value].x,
"y": landmarks[l.value].y,
"z": landmarks[l.value].z,
"visibility": landmarks[l.value].visibility,
}
# would be nice to extract the filename from args.video or args.output & add to json filename
with open(f"json_output/landmarks_with_labels.json", "w") as outfile:
json.dump(landmarks_x_y_z_visibility, outfile, indent=2)
return landmarks_x_y_z_visibility | flexinai/flexin-ipod-ad | calculate_pose.py | calculate_pose.py | py | 1,240 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mediapipe.solutions",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 34,
"usage_type": "call"
}
] |
4298322287 |
# 发送纯文本
# SMTP: 邮件传输协议
# 发邮件
import smtplib
# 邮件标题
from email.header import Header
# 邮件正文
from email.mime.text import MIMEText
"""
user, pwd, sender, receiver, content, title
用户名,授权码,发送方邮箱,接收方邮箱,内容,标题
"""
def sendEmail(user, pwd, sender, receiver, content, title):
# 163的SMTP服务器
mail_host = "smtp.163.com"
# 第一部分:准备工作
# 1. 将对应的邮件信息打包成一个对象: 内容, 格式, 编码
message = MIMEText(content, "plain", "utf-8")
message["Content-Type"] = 'application/octet-stream'
message["Content-Disposition"] = 'attachment;filename='+ file
# 2. 设置邮件的发送者
message["From"] = sender
# 3. 设置邮件的接收方
"""
seq.join(sep)
seq: 分隔符
sep: 要连接的元素序列,字符串,元组,字典
返回值:字符串
"""
message["To"] = ",".join(receiver)
# 4. 设置邮件标题
message["Subject"] = title
try:
# 第二部分:发送邮件
# 1. 启用SSL发送邮件,参数1: SMTP服务器, 参数2 : 端口号么一般使用465
smtpObj = smtplib.SMTP_SSL(mail_host, 465)
# 2.登录邮箱并进行验证
# 参数1: 发送方用户名,参数2:授权码
smtpObj.login(user, pwd)
# 3. 发送邮件
# 参数1:发送方邮箱 参数2: 接收方的邮箱 参数3:邮件正文
smtpObj.sendmail(sender, receiver, message.as_string())
print("mail has been send successful!")
except BaseException as e:
print(e)
if __name__ == '__main__':
mail_user = "zst0717123@163.com"
mail_pwd = "*****"
sender = "zst0717123@163.com"
receiver = "@qq.com"
file = "beautiful_girl.png"
content = open(file=file,mode='rb').read()
title = "666"
sendEmail(mail_user, mail_pwd, sender, receiver, content, title)
| H-Gang/exercise | learn.py | learn.py | py | 1,978 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "email.mime.text.MIMEText",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "smtplib.SMTP_SSL",
"line_number": 43,
"usage_type": "call"
}
] |
23411420670 | from django.conf.urls import url
from .import views
# from views import ClienteAutocomplete
urlpatterns = [
# url(r'^cliente-autocomplete/$', ClienteAutocomplete.as_view(), name='cliente-autocomplete'),
url(regex=r'^$', view=views.index, name='index'),
url(r'^cliente/(?P<cliente_id>\d+)/edit/$', views.cliente_edit, name='cliente_edit'),
url(r'^cliente/(?P<cliente_id>\d+)/$', views.cliente_detalle, name='cliente_detalle'),
url(r'^cliente/new/$', views.cliente_nuevo, name='cliente_nuevo'),
# Geraldo reports
url(r'reserva-report/(?P<pk>\d+)/$', views.reserva_report, name='reserva_report'),
] | pmmrpy/SIGB | clientes/urls.py | urls.py | py | 625 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.co... |
13076610042 | import numpy as np
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.ticker import AutoMinorLocator
# datafile from COMSOL
path_to_data = "E://COMSOL//laminar_mesh_refinement/"
# % Model: run40_lam_mesh_fluid_0.5_out.mph
# % Version: COMSOL 5.2.1.262
# % Date: Dec 13 2017, 08:40
# % Dimension: 1
# % Nodes: 16
# % Expressions: 1
# % Description: Line graph
# % cln1x Height
lst_files = [0.5, 0.75, 1.0, 1.25, 1.5]
lst_files = ['fluid_' + str(file) + '_T_along_z_beamspot.txt' for file in lst_files]
# -------------------------------------------------------------------
# plot
# -------------------------------------------------------------------
plt.rc('text', usetex=True)
plt.rc('font', weight='bold')
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['mathtext.rm'] = 'Arial'
matplotlib.rcParams['mathtext.it'] = 'Arial:italic'
matplotlib.rcParams['mathtext.bf'] = 'Arial:bold'
matplotlib.rcParams['mathtext.tt'] = 'Arial'
matplotlib.rcParams['mathtext.cal'] = 'Arial'
matplotlib.rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
fig = plt.figure(figsize=(8*0.60,5*0.60))
####################
# axis 1
####################
ax1 = fig.add_subplot(1, 1, 1)
# plot
for file in lst_files:
# import data
df = pd.read_csv(path_to_data + file, delimiter=r" \s+", skiprows=7)
print(df)
ax1.plot(df.iloc[:,0], df.iloc[:,1], 's-', linewidth=1)
# axes label
ax1.set_ylabel(r'\textbf{Maximum target temperature [$^{\circ}$C]}', fontsize=12, labelpad=10)
ax1.set_xlabel(r'\textbf{Mesh refinement}', fontsize=12, labelpad=2)
# plt.ylim(150,155)
# ticks
# ax1.xaxis.set_ticks(df['ref_fac'].values)
# ax1.yaxis.set_ticks([170, 175, 180, 185])
# minor ticks x
minor_locator = AutoMinorLocator(2)
ax1.xaxis.set_minor_locator(minor_locator)
# minor ticks y
minor_locator = AutoMinorLocator(2)
ax1.yaxis.set_minor_locator(minor_locator)
# tick font size
ax1.tick_params('x', colors='black', labelsize=12)
ax1.tick_params('y', colors='black', labelsize=12)
# grid
ax1.grid(b=True, which='major', linestyle='-')#, color='gray')
ax1.grid(b=True, which='minor', linestyle='--')#, color='gray')
# ####################
# # other axis
# ####################
# ax2 = ax1.twinx()
# # plot
# ax2.plot(df['vol_flow_rate_lpmin'], df['Re_number'], '--', marker='D', color='darkred', linewidth=2)
# ax2.yaxis.set_ticks([1000,2000,4000,6000])
# #ax2.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1e'))
# # Use the pyplot interface to change just one subplot...
# # cur_axes = plt.gca()
# # plt.yticks([0, 1.4e7], [r"\textbf{0}", r"\textbf{1.4e7}"])
# # ax2.spines['top'].set_visible(False)
fig.subplots_adjust(left=0.18, right=0.95, top=0.88, bottom=0.18)
#y label coordinates
# ax1.yaxis.set_label_coords(-0.11,0.5)
# plt.savefig('maximum_target_temperature_vs_coolant_flow_rate.eps', dpi=1200)
# plt.savefig('maximum_target_temperature_vs_coolant_flow_rate.svg', dpi=1200)
# plt.savefig('maximum_target_temperature_vs_coolant_flow_rate.png', dpi=1200)
plt.show()
| kromerh/phd_python | 03_COMSOL/02.rotatingTarget/old_py/COMSOL_new_target/mesh_refinement/mesh_refinement_fluid_T_along_z_at_beamspot.py | mesh_refinement_fluid_T_along_z_at_beamspot.py | py | 3,229 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotli... |
22509727291 | import re
import subprocess
import pygit2
from git_deps.utils import abort, standard_logger
from git_deps.gitutils import GitUtils
from git_deps.listener.base import DependencyListener
from git_deps.errors import InvalidCommitish
from git_deps.blame import blame_via_subprocess
class DependencyDetector(object):
"""Class for automatically detecting dependencies between git
commits. A dependency is inferred by diffing the commit with each
of its parents, and for each resulting hunk, performing a blame to
see which commit was responsible for introducing the lines to
which the hunk was applied.
Dependencies can be traversed recursively, building a dependency
tree represented (conceptually) by a list of edges.
"""
def __init__(self, options, repo=None, logger=None):
self.options = options
if logger is None:
self.logger = standard_logger(self.__class__.__name__,
options.debug)
if repo is None:
self.repo = GitUtils.get_repo()
else:
self.repo = repo
# Nested dict mapping dependents -> dependencies -> files
# causing that dependency -> numbers of lines within that file
# causing that dependency. The first two levels form edges in
# the dependency graph, and the latter two tell us what caused
# those edges.
self.dependencies = {}
# A TODO list (queue) and dict of dependencies which haven't
# yet been recursively followed. Only useful when recursing.
self.todo = []
self.todo_d = {}
# An ordered list and dict of commits whose dependencies we
# have already detected.
self.done = []
self.done_d = {}
# A cache mapping SHA1s to commit objects
self.commits = {}
# Memoization for branch_contains()
self.branch_contains_cache = {}
# Callbacks to be invoked when a new dependency has been
# discovered.
self.listeners = []
def add_listener(self, listener):
if not isinstance(listener, DependencyListener):
raise RuntimeError("Listener must be a DependencyListener")
self.listeners.append(listener)
listener.set_detector(self)
def notify_listeners(self, event, *args):
for listener in self.listeners:
fn = getattr(listener, event)
fn(*args)
def seen_commit(self, rev):
return rev in self.commits
def get_commit(self, rev):
if rev in self.commits:
return self.commits[rev]
self.commits[rev] = GitUtils.ref_commit(self.repo, rev)
return self.commits[rev]
def find_dependencies(self, dependent_rev, recurse=None):
"""Find all dependencies of the given revision, recursively traversing
the dependency tree if requested.
"""
if recurse is None:
recurse = self.options.recurse
try:
dependent = self.get_commit(dependent_rev)
except InvalidCommitish as e:
abort(e.message())
self.todo.append(dependent)
self.todo_d[dependent.hex] = True
first_time = True
while self.todo:
sha1s = [commit.hex[:8] for commit in self.todo]
if first_time:
self.logger.info("Initial TODO list: %s" % " ".join(sha1s))
first_time = False
else:
self.logger.info(" TODO list now: %s" % " ".join(sha1s))
dependent = self.todo.pop(0)
dependent_sha1 = dependent.hex
del self.todo_d[dependent_sha1]
self.logger.info(" Processing %s from TODO list" %
dependent_sha1[:8])
if dependent_sha1 in self.done_d:
self.logger.info(" %s already done previously" %
dependent_sha1)
continue
self.notify_listeners('new_commit', dependent)
if dependent.parents: # the root commit does not have parents
parent = dependent.parents[0]
self.find_dependencies_with_parent(dependent, parent)
self.done.append(dependent_sha1)
self.done_d[dependent_sha1] = True
self.logger.info(" Found all dependencies for %s" %
dependent_sha1[:8])
# A commit won't have any dependencies if it only added new files
dependencies = self.dependencies.get(dependent_sha1, {})
self.notify_listeners('dependent_done', dependent, dependencies)
self.logger.info("Finished processing TODO list")
self.notify_listeners('all_done')
def find_dependencies_with_parent(self, dependent, parent):
"""Find all dependencies of the given revision caused by the
given parent commit. This will be called multiple times for
merge commits which have multiple parents.
"""
self.logger.info(" Finding dependencies of %s via parent %s" %
(dependent.hex[:8], parent.hex[:8]))
diff = self.repo.diff(parent, dependent,
context_lines=self.options.context_lines)
for patch in diff:
path = patch.delta.old_file.path
self.logger.info(" Examining hunks in %s" % path)
for hunk in patch.hunks:
self.blame_diff_hunk(dependent, parent, path, hunk)
def blame_diff_hunk(self, dependent, parent, path, hunk):
"""Run git blame on the parts of the hunk which exist in the
older commit in the diff. The commits generated by git blame
are the commits which the newer commit in the diff depends on,
because without the lines from those commits, the hunk would
not apply correctly.
"""
line_range_before = "-%d,%d" % (hunk.old_start, hunk.old_lines)
line_range_after = "+%d,%d" % (hunk.new_start, hunk.new_lines)
self.logger.info(" Blaming hunk %s @ %s (listed below)" %
(line_range_before, parent.hex[:8]))
if not self.tree_lookup(path, parent):
# This is probably because dependent added a new directory
# which was not previously in the parent.
return
blame = self.run_blame(hunk, parent, path)
dependent_sha1 = dependent.hex
self.register_new_dependent(dependent, dependent_sha1)
line_to_culprit = {}
for blame_hunk in blame:
self.process_blame_hunk(dependent, dependent_sha1, parent,
path, blame_hunk, line_to_culprit)
self.debug_hunk(line_range_before, line_range_after, hunk,
line_to_culprit)
def process_blame_hunk(self, dependent, dependent_sha1, parent,
path, blame_hunk, line_to_culprit):
orig_line_num = blame_hunk.orig_start_line_number
line_num = blame_hunk.final_start_line_number
dependency_sha1 = blame_hunk.orig_commit_id.hex
line_representation = f"{dependency_sha1} {orig_line_num} {line_num}"
self.logger.debug(f" ! {line_representation}")
dependency = self.get_commit(dependency_sha1)
for i in range(blame_hunk.lines_in_hunk):
line_to_culprit[line_num + i] = dependency.hex
if self.is_excluded(dependency):
self.logger.debug(
" Excluding dependency %s from line %s (%s)" %
(dependency_sha1[:8], line_num,
GitUtils.oneline(dependency)))
return
if dependency_sha1 not in self.dependencies[dependent_sha1]:
self.process_new_dependency(dependent, dependent_sha1,
dependency, dependency_sha1,
path, line_num)
self.record_dependency_source(parent,
dependent, dependent_sha1,
dependency, dependency_sha1,
path, line_num, line_representation)
def debug_hunk(self, line_range_before, line_range_after, hunk,
line_to_culprit):
diff_format = ' | %8.8s %5s %s%s'
hunk_header = '@@ %s %s @@' % (line_range_before, line_range_after)
self.logger.debug(diff_format % ('--------', '-----', '', hunk_header))
line_num = hunk.old_start
for line in hunk.lines:
if "\n\\ No newline at end of file" == line.content.rstrip():
break
if line.origin == '+':
rev = ln = ''
else:
rev = line_to_culprit[line_num]
ln = line_num
line_num += 1
self.logger.debug(diff_format %
(rev, ln, line.origin, line.content.rstrip()))
def register_new_dependent(self, dependent, dependent_sha1):
if dependent_sha1 not in self.dependencies:
self.logger.info(" New dependent: %s" %
GitUtils.commit_summary(dependent))
self.dependencies[dependent_sha1] = {}
self.notify_listeners("new_dependent", dependent)
def run_blame(self, hunk, parent, path):
if self.options.pygit2_blame:
return self.repo.blame(path,
newest_commit=parent.hex,
min_line=hunk.old_start,
max_line=hunk.old_start + hunk.old_lines - 1)
else:
return blame_via_subprocess(path,
parent.hex,
hunk.old_start,
hunk.old_lines)
def is_excluded(self, commit):
if self.options.exclude_commits is not None:
for exclude in self.options.exclude_commits:
if self.branch_contains(commit, exclude):
return True
return False
def process_new_dependency(self, dependent, dependent_sha1,
dependency, dependency_sha1,
path, line_num):
if not self.seen_commit(dependency):
self.notify_listeners("new_commit", dependency)
self.dependencies[dependent_sha1][dependency_sha1] = {}
self.notify_listeners("new_dependency",
dependent, dependency, path, line_num)
self.logger.info(
" New dependency %s -> %s via line %s (%s)" %
(dependent_sha1[:8], dependency_sha1[:8], line_num,
GitUtils.oneline(dependency)))
if dependency_sha1 in self.todo_d:
self.logger.info(
" Dependency on %s via line %s already in TODO"
% (dependency_sha1[:8], line_num,))
return
if dependency_sha1 in self.done_d:
self.logger.info(
" Dependency on %s via line %s already done" %
(dependency_sha1[:8], line_num,))
return
if dependency_sha1 not in self.dependencies:
if self.options.recurse:
self.todo.append(dependency)
self.todo_d[dependency.hex] = True
self.logger.info(" + Added %s to TODO" %
dependency.hex[:8])
def record_dependency_source(self, parent,
dependent, dependent_sha1,
dependency, dependency_sha1,
path, line_num, line):
dep_sources = self.dependencies[dependent_sha1][dependency_sha1]
if path not in dep_sources:
dep_sources[path] = {}
self.notify_listeners('new_path',
dependent, dependency, path, line_num)
if line_num in dep_sources[path]:
abort("line %d already found when blaming %s:%s\n"
"old:\n %s\n"
"new:\n %s" %
(line_num, parent.hex[:8], path,
dep_sources[path][line_num], line))
dep_sources[path][line_num] = line
self.logger.debug(" New line for %s -> %s: %s" %
(dependent_sha1[:8], dependency_sha1[:8], line))
self.notify_listeners('new_line',
dependent, dependency, path, line_num)
def branch_contains(self, commit, branch):
sha1 = commit.hex
branch_commit = self.get_commit(branch)
branch_sha1 = branch_commit.hex
self.logger.debug(" Does %s (%s) contain %s?" %
(branch, branch_sha1[:8], sha1[:8]))
if sha1 not in self.branch_contains_cache:
self.branch_contains_cache[sha1] = {}
if branch_sha1 in self.branch_contains_cache[sha1]:
memoized = self.branch_contains_cache[sha1][branch_sha1]
self.logger.debug(" %s (memoized)" % memoized)
return memoized
cmd = ['git', 'merge-base', sha1, branch_sha1]
# self.logger.debug(" ".join(cmd))
out = subprocess.check_output(cmd, universal_newlines=True).strip()
self.logger.debug(" merge-base returned: %s" % out[:8])
result = out == sha1
self.logger.debug(" %s" % result)
self.branch_contains_cache[sha1][branch_sha1] = result
return result
def tree_lookup(self, target_path, commit):
"""Navigate to the tree or blob object pointed to by the given target
path for the given commit. This is necessary because each git
tree only contains entries for the directory it refers to, not
recursively for all subdirectories.
"""
segments = target_path.split("/")
tree_or_blob = commit.tree
path = ''
while segments:
dirent = segments.pop(0)
if isinstance(tree_or_blob, pygit2.Tree):
if dirent in tree_or_blob:
tree_or_blob = self.repo[tree_or_blob[dirent].oid]
# self.logger.debug(" %s in %s" % (dirent, path))
if path:
path += '/'
path += dirent
else:
# This is probably because we were called on a
# commit whose parent added a new directory.
self.logger.debug(" %s not in %s in %s" %
(dirent, path, commit.hex[:8]))
return None
else:
self.logger.debug(" %s not a tree in %s" %
(tree_or_blob, commit.hex[:8]))
return None
return tree_or_blob
def edges(self):
return [
[(dependent, dependency)
for dependency in self.dependencies[dependent]]
for dependent in self.dependencies.keys()
]
| aspiers/git-deps | git_deps/detector.py | detector.py | py | 15,150 | python | en | code | 291 | github-code | 36 | [
{
"api_name": "git_deps.utils.standard_logger",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "git_deps.gitutils.GitUtils.get_repo",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "git_deps.gitutils.GitUtils",
"line_number": 32,
"usage_type": "name"
... |
6714176411 | """
Core config implementations.
"""
import importlib
import logging
import os
import sys
from collections import KeysView, ItemsView, ValuesView, Mapping
from .. import abc
from ..compat import text_type, string_types
from ..decoders import Decoder
from ..exceptions import ConfigError
from ..interpolation import BashInterpolator, ConfigLookup, ChainLookup, EnvironmentLookup
from ..schedulers import FixedIntervalScheduler
from ..structures import IgnoreCaseDict
from ..utils import EventHandler, make_ignore_case, merge_dict
logger = logging.getLogger(__name__)
NESTED_DELIMITER = '.'
class BaseConfig(abc.Config):
"""
Base config class for implementing an `abc.Config`.
"""
def __init__(self):
self._lookup = ConfigLookup(self)
self._updated = EventHandler()
def get(self, key, default=None):
"""
Get the value for given key if key is in the configuration, otherwise default.
:param str key: The key to be found.
:param default: The default value if the key is not found.
:return: The value found, otherwise default.
"""
return self.get_value(key, object, default)
def get_bool(self, key, default=None):
"""
Get the value for given key as a bool if key is in the configuration, otherwise None.
:param str key: The key to be found.
:param default: The default value if the key is not found.
:return bool: The value found, otherwise default.
"""
return self.get_value(key, bool, default=default)
def get_dict(self, key, default=None):
"""
Get the value for given key as a dict if key is in the configuration, otherwise None.
:param str key: The key to be found.
:param default: The default value if the key is not found.
:return dict: The value found, otherwise default.
"""
return self.get_value(key, dict, default=default)
def get_int(self, key, default=None):
"""
Get the value for given key as an int if key is in the configuration, otherwise None.
:param str key: The key to be found.
:param default: The default value if the key is not found.
:return int: The value found, otherwise default.
"""
return self.get_value(key, int, default=default)
def get_float(self, key, default=None):
"""
Get the value for given key as a float if key is in the configuration, otherwise None.
:param str key: The key to be found.
:param default: The default value if the key is not found.
:return float: The value found, otherwise default.
"""
return self.get_value(key, float, default=default)
def get_list(self, key, default=None):
"""
Get the value for given key as a list if key is in the configuration, otherwise None.
:param str key: The key to be found.
:param default: The default value if the key is not found.
:return list: The value found, otherwise default.
"""
return self.get_value(key, list, default=default)
def get_str(self, key, default=None):
"""
Get the value for given key as a str if key is in the configuration, otherwise None.
:param str key: The key to be found.
:param default: The default value if the key is not found.
:return str: The value found, otherwise default.
"""
return self.get_value(key, text_type, default=default)
def keys(self):
"""
Get all the keys of the configuration.
:return tuple: The keys of the configuration.
"""
return KeysView(self)
def items(self):
"""
Get all the items of the configuration (key/value pairs).
:return tuple: The items of the configuration.
"""
return ItemsView(self)
def values(self):
"""
Get all the values of the configuration.
:return tuple: The values of the configuration.
"""
return ValuesView(self)
@property
def lookup(self):
"""
Get the lookup object used for interpolation.
:return StrLookup: The lookup object.
"""
return self._lookup
@lookup.setter
def lookup(self, value):
"""
Set the lookup object used for interpolation.
:param StrLookup value: The lookup object.
"""
if value is None:
self._lookup = ConfigLookup(self)
elif isinstance(value, abc.StrLookup):
self._lookup = value
else:
raise TypeError('lookup must be an abc.StrLookup')
self._lookup_changed(self._lookup)
@property
def updated(self):
"""
Get the updated event handler.
:return EventHandler: The event handler.
"""
return self._updated
def on_updated(self, func):
"""
Add a new callback for updated event.
It can also be used as decorator.
Example usage:
.. code-block:: python
from central.config import MemoryConfig
config = MemoryConfig()
@config.on_updated
def config_updated():
pass
:param func: The callback.
"""
self.updated.add(func)
def prefixed(self, prefix):
"""
Get a subset of the configuration prefixed by a key.
Example usage:
.. code-block:: python
from central.config import MemoryConfig
config = MemoryConfig().prefixed('database')
host = config.get('host')
:param str prefix: The prefix to prepend to the keys.
:return abc.Config: The subset of the configuration prefixed by a key.
"""
return PrefixedConfig(prefix, self)
def reload_every(self, interval):
"""
Get a reload configuration to reload the
current configuration every interval given.
:param Number interval: The interval in seconds between loads.
:return ReloadConfig: The reload config object.
"""
return ReloadConfig(self, FixedIntervalScheduler(interval))
def _lookup_changed(self, lookup):
"""
Called when the lookup property is changed.
:param lookup: The new lookup object.
"""
pass
def __contains__(self, key):
"""
Get true if key is in the configuration, otherwise false.
:param str key: The key to be checked.
:return bool: true if key is in the configuration, false otherwise.
"""
return self.get_raw(key) is not None
def __getitem__(self, key):
"""
Get the value if key is in the configuration, otherwise KeyError is raised.
:param str key: The key to be found.
:return: The value found.
"""
value = self.get_value(key, object)
if value is None:
raise KeyError(key)
return value
class BaseDataConfig(BaseConfig):
"""
Base config class that holds keys.
"""
def __init__(self):
super(BaseDataConfig, self).__init__()
self._data = IgnoreCaseDict()
self._decoder = Decoder.instance()
self._interpolator = BashInterpolator()
@property
def decoder(self):
"""
Get the decoder.
:return abc.Decoder: The decoder.
"""
return self._decoder
@decoder.setter
def decoder(self, value):
"""
Set the decoder.
:param abc.Decoder value: The decoder.
"""
if not isinstance(value, abc.Decoder):
raise TypeError('decoder must be an abc.Decoder')
self._decoder = value
@property
def interpolator(self):
"""
Get the interpolator.
:return abc.StrInterpolator: The interpolator.
"""
return self._interpolator
@interpolator.setter
def interpolator(self, value):
"""
Set the interpolator.
:param abc.StrInterpolator value: The interpolator.
"""
if not isinstance(value, abc.StrInterpolator):
raise TypeError('interpolator must be an abc.StrInterpolator')
self._interpolator = value
def get_raw(self, key):
"""
Get the raw value for given key if key is in the configuration, otherwise None.
Find the given key considering the nested delimiter as nested key.
:param str key: The key to be found.
:return: The value found, otherwise None.
"""
if key is None:
raise TypeError('key cannot be None')
value = self._data.get(key)
if value is not None:
return value
paths = key.split(NESTED_DELIMITER)
if key == paths[0]:
return None
value = self._data.get(paths[0])
for i in range(1, len(paths)):
if value is None:
break
if not isinstance(value, Mapping):
value = None
break
value = value.get(paths[i])
return value
def get_value(self, key, type, default=None):
"""
Get the value for given key as the specified type if key is in the configuration, otherwise default.
It can access a nested field by passing a . delimited path of keys and
the interpolator is used to resolve variables.
:param str key: The key to be found.
:param type: The data type to convert the value to.
:param default: The default value if the key is not found.
:return: The value found, otherwise default.
"""
if key is None:
raise TypeError('key cannot be None')
if type is None:
raise TypeError('type cannot be None')
value = self.get_raw(key)
if value is None:
if callable(default):
return default()
return default
if isinstance(value, string_types):
value = self._interpolator.resolve(value, self._lookup)
if type is object:
return value
return self._decoder.decode(value, type)
def __iter__(self):
"""
Get a new iterator object that can iterate over the keys of the configuration.
:return: The iterator.
"""
return iter(self._data)
def __len__(self):
"""
Get the number of keys.
:return int: The number of keys.
"""
return len(self._data)
class ChainConfig(BaseConfig):
"""
Combine multiple `abc.Config` in a fallback chain.
The chain does not merge the configurations but instead
treats them as overrides so that a key existing in a configuration supersedes
the same key in the previous configuration.
The chain works in reverse order, that means the last configuration
in the chain overrides the previous one.
Example usage:
.. code-block:: python
from central.config import CommandLineConfig, EnvironmentConfig, FallbackConfig
config = ChainConfig(EnvironmentConfig(), CommandLineConfig())
config.load()
value = config.get('key1')
:param configs: The list of `abc.Config`.
"""
def __init__(self, *configs):
super(ChainConfig, self).__init__()
for config in configs:
if not isinstance(config, abc.Config):
raise TypeError('config must be an abc.Config')
config.lookup = self._lookup
config.updated.add(self._config_updated)
self._configs = configs
self._keys_cached = None
@property
def configs(self):
"""
Get the sub configurations.
:return tuple: The list of `abc.Config`.
"""
return self._configs
def get_raw(self, key):
"""
Get the raw value for given key if key is in the configuration, otherwise None.
It goes through every child to find the given key.
:param str key: The key to be found.
:return: The value found, otherwise None.
"""
for config in reversed(self._configs):
value = config.get_raw(key)
if value is not None:
return value
return None
def get_value(self, key, type, default=None):
"""
Get the value for given key as the specified type if key is in the configuration, otherwise default.
It goes through every child to find the given key.
:param str key: The key to be found.
:param type: The data type to convert the value to.
:param default: The default value if the key is not found.
:return: The value found, otherwise default.
"""
for config in reversed(self._configs):
value = config.get_value(key, type)
if value is not None:
return value
if callable(default):
return default()
return default
def load(self):
"""
Load the sub configurations.
This method does not trigger the updated event.
"""
self._keys_cached = None
for config in self._configs:
config.load()
def _config_updated(self):
"""
Called by updated event from the children.
It is not intended to be called directly.
"""
# reset the cache because the children's
# configuration has been changed.
self._keys_cached = None
self.updated()
def _lookup_changed(self, lookup):
"""
Set the new lookup to the children.
:param lookup: The new lookup object.
"""
for config in self._configs:
config.lookup = lookup
def _build_cached_keys(self):
"""
Build the cache for the children's keys.
:return IgnoreCaseDict: The dict containing the keys.
"""
keys = IgnoreCaseDict()
for config in self._configs:
for key in config.keys():
keys[key] = True
return keys
def _get_cached_keys(self):
"""
Get the cache for the children's keys.
:return IgnoreCaseDict: The dict containing the keys.
"""
if self._keys_cached is None:
self._keys_cached = self._build_cached_keys()
return self._keys_cached
def __iter__(self):
"""
Get a new iterator object that can iterate over the keys of the configuration.
:return: The iterator.
"""
return iter(self._get_cached_keys())
def __len__(self):
"""
Get the number of keys.
:return int: The number of keys.
"""
return len(self._get_cached_keys())
class CommandLineConfig(BaseDataConfig):
"""
A command line based on `BaseDataConfig`.
Example usage:
.. code-block:: python
from central.config import CommandLineConfig
config = CommandLineConfig()
config.load()
value = config.get('key1')
"""
def load(self):
"""
Load the configuration from the command line args.
This method does not trigger the updated event.
"""
data = IgnoreCaseDict()
# the first item is the file name.
args = sys.argv[1:]
iterator = iter(args)
while True:
try:
current_arg = next(iterator)
except StopIteration:
break
key_start_index = 0
if current_arg.startswith('--'):
key_start_index = 2
elif current_arg.startswith('-'):
key_start_index = 1
separator = current_arg.find('=')
if separator == -1:
if key_start_index == 0:
raise ConfigError('Unrecognized argument %s format' % current_arg)
key = current_arg[key_start_index:]
try:
value = next(iterator)
except StopIteration:
raise ConfigError('Value for argument %s is missing' % key)
else:
key = current_arg[key_start_index:separator].strip()
if not key:
raise ConfigError('Unrecognized argument %s format' % current_arg)
value = current_arg[separator + 1:].strip()
data[key] = value
self._data = data
class EnvironmentConfig(BaseDataConfig):
"""
An environment variable configuration based on `BaseDataConfig`.
Example usage:
.. code-block:: python
from central.config import EnvironmentConfig
config = EnvironmentConfig()
config.load()
value = config.get('key1')
"""
def load(self):
"""
Load the configuration from environment variables.
This method does not trigger the updated event.
"""
self._data = IgnoreCaseDict(os.environ)
class MemoryConfig(BaseDataConfig):
"""
In-memory implementation of `BaseDataConfig`.
Example usage:
.. code-block:: python
from central.config import MemoryConfig
config = MemoryConfig(data={'key': 'value'})
value = config.get('key')
config.set('other key', 'other value')
value = config.get('other key')
:param dict data: The initial data.
"""
def __init__(self, data=None):
super(MemoryConfig, self).__init__()
if data is not None:
if not isinstance(data, Mapping):
raise TypeError('data must be a dict')
self._data = make_ignore_case(data)
def set(self, key, value):
"""
Set a value for the given key.
The updated event is triggered.
:param str key: The key.
:param value: The value.
"""
if key is None:
raise TypeError('key cannot be None')
if isinstance(value, Mapping):
value = make_ignore_case(value)
self._data[key] = value
self.updated()
def load(self):
"""
Do nothing
"""
class MergeConfig(BaseDataConfig):
"""
Merge multiple `abc.Config`, in case of key collision last-match wins.
Example usage:
.. code-block:: python
from central.config import FileConfig, MergeConfig
config = MergeConfig(FileConfig('base.json'), FileConfig('dev.json'))
config.load()
value = config.get('key1')
:param configs: The list of `abc.Config`.
"""
def __init__(self, *configs):
super(MergeConfig, self).__init__()
if not isinstance(configs, (tuple, list)):
raise TypeError('configs must be a list or tuple')
for config in configs:
if not isinstance(config, abc.Config):
raise TypeError('config must be an abc.Config')
config.lookup = self._lookup
config.updated.add(self._config_updated)
self._configs = configs
self._raw_configs = [self._RawConfig(config) for config in self._configs]
@property
def configs(self):
"""
Get the sub configurations.
:return tuple: The list of `abc.Config`.
"""
return self._configs
def load(self):
"""
Load the sub configurations and merge them
into a single configuration.
This method does not trigger the updated event.
"""
for config in self._configs:
config.load()
data = IgnoreCaseDict()
if len(self._configs) == 0:
return data
merge_dict(data, *self._raw_configs)
self._data = data
def _config_updated(self):
"""
Called by updated event from the children.
It is not intended to be called directly.
"""
self.updated()
class _RawConfig(Mapping):
"""
Internal class used to merge a `abc.Config`.
When we merge configs we want to merge the raw value
rather than decoded and interpolated value.
"""
def __init__(self, config):
self._config = config
def get(self, key, default=None):
value = self._config.get_raw(key)
if value is None:
return default
return value
def __contains__(self, key):
return key in self._config
def __getitem__(self, key):
value = self._config.get_raw(key)
if value is None:
raise KeyError(key)
return value
def __iter__(self):
return iter(self._config)
def __len__(self):
return len(self._config)
class ModuleConfig(BaseDataConfig):
"""
A config implementation that loads the configuration
from a Python module.
Example usage:
.. code-block:: python
from central.config import ModuleConfig
config = ModuleConfig('module_name')
config.load()
value = config.get('key')
:param str name: The module name to be loaded.
"""
def __init__(self, name):
super(ModuleConfig, self).__init__()
if not isinstance(name, string_types):
raise TypeError('name must be a str')
self._name = name
@property
def name(self):
"""
Get the module name.
:return str: The module name.
"""
return self._name
def load(self):
"""
Load the configuration from a file.
Recursively load any filename referenced by an @next property in the configuration.
This method does not trigger the updated event.
"""
to_merge = []
name = self._name
# create a chain lookup to resolve any variable left
# using environment variable.
lookup = ChainLookup(EnvironmentLookup(), self._lookup)
while name:
o = self._import_module(self._interpolator.resolve(name, lookup))
data = {}
for key in dir(o):
if not key.startswith('_'):
data[key] = getattr(o, key)
name = getattr(o, '_next', None)
if name is not None and not isinstance(name, string_types):
raise ConfigError('_next must be a str')
to_merge.append(data)
data = make_ignore_case(to_merge[0])
if len(to_merge) > 1:
merge_dict(data, *to_merge[1:])
self._data = data
def _import_module(self, name):
"""
Import module by name.
:param str name: The name of the module.
:return: The module loaded.
"""
return importlib.import_module(name)
class PrefixedConfig(BaseConfig):
"""
A config implementation to view into another Config
for keys starting with a specified prefix.
Example usage:
.. code-block:: python
from central.config import PrefixedConfig, MemoryConfig
config = MemoryConfig(data={'production.timeout': 10})
prefixed = PrefixedConfig('production', config)
value = prefixed.get('timeout')
:param str prefix: The prefix to prepend to the keys.
:param abc.Config config: The config to load the keys from.
"""
def __init__(self, prefix, config):
super(PrefixedConfig, self).__init__()
if not isinstance(prefix, string_types):
raise TypeError('prefix must be a str')
if not isinstance(config, abc.Config):
raise TypeError('config must be an abc.Config')
self._prefix = prefix.rstrip(NESTED_DELIMITER)
self._prefix_delimited = prefix if prefix.endswith(NESTED_DELIMITER) else prefix + NESTED_DELIMITER
self._config = config
self._config.lookup = self.lookup
@property
def config(self):
"""
Get the config.
:return abc.Config: The config.
"""
return self._config
@property
def prefix(self):
"""
Get the prefix.
:return str: The prefix.
"""
return self._prefix
def get_raw(self, key):
"""
Get the raw value for given key if key is in the configuration, otherwise None.
:param str key: The key to be found.
:return: The value found, otherwise default.
"""
if key is None:
raise TypeError('key cannot be None')
try:
key = self._prefix_delimited + key
except TypeError:
raise TypeError('key must be a str')
return self._config.get_raw(key)
def get_value(self, key, type, default=None):
"""
Get the value for given key as the specified type if key is in the configuration, otherwise default.
:param str key: The key to be found.
:param type: The data type to convert the value to.
:param default: The default value if the key is not found.
:return: The value found, otherwise default.
"""
if key is None:
raise TypeError('key cannot be None')
try:
key = self._prefix_delimited + key
except TypeError:
raise TypeError('key must be a str')
return self._config.get_value(key, type, default=default)
def load(self):
"""
Load the child configuration.
This method does not trigger the updated event.
"""
self._config.load()
def _lookup_changed(self, lookup):
"""
Set the new lookup to the child.
:param lookup: The new lookup object.
"""
self._config.lookup = lookup
def __iter__(self):
"""
Get a new iterator object that can iterate over the keys of the configuration.
:return: The iterator.
"""
keys = set()
for key in self._config:
if key == self._prefix:
value = self._config.get(key)
if isinstance(value, Mapping):
keys.update(value.keys())
elif key.startswith(self._prefix_delimited):
keys.update((key[len(self._prefix_delimited):],))
return iter(keys)
def __len__(self):
"""
Get the number of keys.
:return int: The number of keys.
"""
length = 0
for key in self._config:
if key == self._prefix:
value = self._config.get(key)
if isinstance(value, Mapping):
length += len(value)
elif key.startswith(self._prefix_delimited):
length += 1
return length
class ReloadConfig(BaseConfig):
"""
A reload config that loads the configuration from its child
from time to time, it is scheduled by a scheduler.
Example usage:
.. code-block:: python
from central.config import ReloadConfig, FileConfig
from central.schedulers import FixedIntervalScheduler
config = ReloadConfig(FileConfig('config.json'), FixedIntervalScheduler())
config.load()
value = config.get('key')
:param abc.Config config: The config to be reloaded from time to time.
:param abc.Scheduler scheduler: The scheduler used to reload the configuration from the child.
"""
def __init__(self, config, scheduler):
super(ReloadConfig, self).__init__()
if not isinstance(config, abc.Config):
raise TypeError('config must be an abc.Config')
if not isinstance(scheduler, abc.Scheduler):
raise TypeError('scheduler must be an abc.Scheduler')
self._config = config
self._config.lookup = self.lookup
self._scheduler = scheduler
self._loaded = False
@property
def config(self):
"""
Get the config.
:return abc.Config: The config.
"""
return self._config
@property
def scheduler(self):
"""
Get the scheduler.
:return abc.Scheduler: The scheduler.
"""
return self._scheduler
def get_raw(self, key):
"""
Get the raw value for given key if key is in the configuration, otherwise None.
:param str key: The key to be found.
:return: The value found, otherwise default.
"""
return self._config.get_raw(key)
def get_value(self, key, type, default=None):
"""
Get the value for given key as the specified type if key is in the configuration, otherwise default.
:param str key: The key to be found.
:param type: The data type to convert the value to.
:param default: The default value if the key is not found.
:return: The value found, otherwise default.
"""
return self._config.get_value(key, type, default=default)
def load(self):
"""
Load the child configuration and start the scheduler
to reload the child configuration from time to time.
This method does not trigger the updated event.
"""
self._config.load()
if not self._loaded:
self._scheduler.schedule(self._reload)
self._loaded = True
def _reload(self):
"""
Reload the child configuration and trigger the updated event.
It is only intended to be called by the scheduler.
"""
try:
self._config.load()
except:
logger.warning('Unable to load config ' + text_type(self._config), exc_info=True)
try:
self.updated()
except:
logger.warning('Error calling updated event from ' + str(self), exc_info=True)
def _lookup_changed(self, lookup):
"""
Set the new lookup to the child.
:param lookup: The new lookup object.
"""
self._config.lookup = lookup
def __iter__(self):
"""
Get a new iterator object that can iterate over the keys of the configuration.
:return: The iterator.
"""
return iter(self._config)
def __len__(self):
"""
Get the number of keys.
:return int: The number of keys.
"""
return len(self._config)
| viniciuschiele/central | central/config/core.py | core.py | py | 30,318 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "interpolation.ConfigLookup",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "utils.EventHandler",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "compa... |
6379736623 | from functools import partial
import jax
import numpy as np
from tessellate_ipu import tile_map, tile_put_sharded
data = np.array([1, -2, 3], np.float32)
tiles = (0, 2, 5)
@partial(jax.jit, backend="ipu")
def compute_fn(input):
input = tile_put_sharded(input, tiles)
# input = tile_put_replicated(input, tiles)
return tile_map(jax.lax.neg_p, input)
output = compute_fn(data)
print(output, output.array.device())
print("SHAPE:", output.shape)
print("RESULT:", output.array)
| graphcore-research/tessellate-ipu | examples/demo/demo1.py | demo1.py | py | 492 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tessellate_ipu.tile_put_sharded",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tessella... |
38269425685 | import random
import matplotlib.pyplot as plt
import math
import numpy as np
import time
plt.style.use('ggplot')
mutation = True
elitism = False
mutationPorcentage = .90
tournamentPercentage = 0.02
generations = 100
nPopulation = 400
fuzzyNetworks = 7
chromosomeSize = fuzzyNetworks*4
weight = 5
pMain = [8,25,4,45,10,17,35]
population = []
fa = []
yMain = []
xMain = []
plotDistanceX = []
plotDistanceY = []
plotGraphX = []
plotGraphY = []
line1 = []
line2 = []
line3 = []
def createPopulation():
for i in range(0, nPopulation):
chromosome=random.sample(range(0,256), chromosomeSize)
population.append(chromosome)
# population[0] = [193, 34, 248, 32, 99, 155, 0, 255, 58, 6 , 73, 71, 77, 8, 46, 237, 148, 32, 49, 157, 169, 159, 0, 255, 66, 16, 6, 242]
def populateFA(ei):
fa.clear()
global line1
global line2
global plotGraphX
global plotGraphX
plotGraphX = []
plotGraphY = []
sDistance = 999999999999999999999999
for chromosome in population:
fa_v = 0
plotGraphXTemp = []
plotGraphYTemp = []
for i in range(0,1000):
x = i/10
y = getYFuzzy(chromosome, x)
# y = getYpoint(chromosome[0]/weight,chromosome[1]/weight,chromosome[2]/weight,chromosome[3]/weight,chromosome[4]/weight,chromosome[5]/weight,chromosome[6]/weight, x)
plotGraphXTemp.append(x)
plotGraphYTemp.append(y)
fa_v += abs(yMain[i] - y)
fa.append(fa_v)
if sDistance > fa_v:
sDistance = fa_v
plotGraphX = plotGraphXTemp
plotGraphY = plotGraphYTemp
plotDistanceX.append(ei)
plotDistanceY.append(sDistance)
line1, line2 = live_plotter(plotDistanceX,plotDistanceY, plotGraphX,plotGraphY,line1, line2, sDistance, chromosome)
def getFA(po):
poFA = []
for chromosome in po:
fa_v = 0
for i in range(0,1000):
x = i/10
y = getYFuzzy(chromosome, x)
# y = getYpoint(chromosome[0]/weight,chromosome[1]/weight,chromosome[2]/weight,chromosome[3]/weight,chromosome[4]/weight,chromosome[5]/weight,chromosome[6]/weight, x)
fa_v += abs(yMain[i] - y)
poFA.append(fa_v)
return poFA
def getYpoint(a,b,c,d,e,f,g,xi):
if c == 0 or e == 0:
return 0
else:
return a * (b * math.sin(xi/c) + d * math.cos(xi/e)) + f * xi - g
def creatMainGraph():
for i in range(0,1000):
xMain.append(i/10)
yMain.append( getYpoint(pMain[0],pMain[1],pMain[2],pMain[3],pMain[4],pMain[5],pMain[6], xMain[i]) )
def betterOptions(participants):
betterOption = participants[0]
for i in participants:
if(fa[betterOption] > fa[i]):
betterOption = i
return betterOption
def reproduction(f,m):
cutPoint = random.randint(0, chromosomeSize * 8)
cutIndex = int(cutPoint / 8)
n = cutPoint - (cutIndex * 8)
if(n == 0):
fc = f[0:cutIndex] + m[cutIndex:chromosomeSize]
mc = m[0:cutIndex] + f[cutIndex:chromosomeSize]
else:
lowMask = (2**n)-1
highMask = ((2**chromosomeSize)-1)-((2**n)-1)
lowPar1 = f[cutIndex] & lowMask
lowPar2 = m[cutIndex] & lowMask
highPar1 = f[cutIndex] & highMask
highPar2 = m[cutIndex] & highMask
child1 = lowPar1 | highPar2
child2 = lowPar2 | highPar1
fc = f[0:cutIndex] + [child2] + m[cutIndex+1:chromosomeSize]
mc = m[0:cutIndex] + [child1] + f[cutIndex+1:chromosomeSize]
return fc,mc
def mutation(childList):
participants = random.sample(range(0,nPopulation), int(nPopulation * mutationPorcentage))
for i in participants:
cutPoint = random.randint(0, (chromosomeSize * 8) - 1)
numberIndex = int(cutPoint / 8)
n = cutPoint - (numberIndex * 8)
numberToMutate = childList[i][numberIndex]
bindata = '{0:08b}'.format(numberToMutate)
bitNot = '1' if bindata[n] == '0' else '0'
new = bindata[0:n] + bitNot + bindata[n+1:8]
newInt = int(new, 2)
childList[i][numberIndex] = newInt
def tournament():
global population
childList = []
for i in range(0, int(nPopulation/2)):
participants = random.sample(range(0,nPopulation), int(nPopulation * tournamentPercentage))
f = betterOptions(participants)
participants = random.sample(range(0,nPopulation), int(nPopulation * tournamentPercentage))
m = betterOptions(participants)
a,b = reproduction(population[f],population[m])
childList.append(a)
childList.append(b)
mutation(childList)
mutation(childList)
# mutation(childList)
# mutation(childList)
# mutation(childList)
# mutation(childList)
# mutation(childList)
# mutation(childList)
if(elitism):
completeList = population + childList
faList = fa + getFA(childList)
listIndex = {v: k for v, k in enumerate(faList)}
sortedListIndex = list(dict(sorted(listIndex.items(), key=lambda item: item[1])).items())
populationIndex = sortedListIndex[0:nPopulation]
betterList = [v for v, k in populationIndex]
population = [completeList[v] for v in betterList]
pass
else:
population = childList
def live_plotter(x_vec,y1_data, x_vec2, y1_data2,line1, line2,sDistance, identifier='',pause_time=0.0001):
global ax1
global ax2
global line3
global plt
if line1==[]:
# this is the call to matplotlib that allows dynamic plotting
plt.ion()
fig = plt.figure(figsize=(15,5))
ax1 = fig.add_subplot(111)
# create a variable for the line so we can later update it
line1, = ax1.plot(x_vec,y1_data,'-o',alpha=0.8)
fig2 = plt.figure(figsize=(10,5))
ax2 = fig2.add_subplot(111)
# create a variable for the line so we can later update it
line2, = ax2.plot(x_vec2,y1_data2,'-o',alpha=0.8)
line3, = ax2.plot(xMain,yMain,'-o',alpha=0.8)
#update plot label/title
plt.ylabel('Y Label')
plt.title( str(sDistance) + ' - ' + str(identifier))
plt.show()
# after the figure, axis, and line are created, we only need to update the y-data
line1.set_ydata(y1_data)
line1.set_xdata(x_vec)
line2.set_ydata(y1_data2)
line2.set_xdata(x_vec2)
# adjust limits if new data goes beyond bounds
if np.min(y1_data)<=line1.axes.get_ylim()[0] or np.max(y1_data)>=line1.axes.get_ylim()[1]:
# plt.ylim([np.min(y1_data)-np.std(y1_data),np.max(y1_data)+np.std(y1_data)])
ax1.set_ylim([np.min(y1_data)-np.std(y1_data),np.max(y1_data)+np.std(y1_data)])
if np.min(y1_data2)<=line2.axes.get_ylim()[0] or np.max(y1_data2)>=line2.axes.get_ylim()[1]:
ax2.set_ylim([np.min(yMain)-np.std(yMain),np.max(yMain)+np.std(yMain)])
# adjust limits if new data goes beyond bounds
if np.min(x_vec)<=line1.axes.get_xlim()[0] or np.max(x_vec)>=line1.axes.get_xlim()[1]:
# plt.xlim([np.min(x_vec)-np.std(x_vec),np.max(x_vec)+np.std(x_vec)])
ax1.set_xlim([np.min(x_vec)-np.std(x_vec),np.max(x_vec)+np.std(x_vec)])
if np.min(x_vec2)<=line2.axes.get_xlim()[0] or np.max(x_vec2)>=line2.axes.get_xlim()[1]:
ax2.set_xlim([np.min(x_vec2)-np.std(x_vec2),np.max(x_vec2)+np.std(x_vec2)])
# this pauses the data so the figure/axis can catch up - the amount of pause can be altered above
plt.pause(pause_time)
# return line so we can update it again in the next iteration
return line1, line2
weights = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
def getYFuzzy(data_fuzzy, x):
fuzzyNetworks
bf = 0
af = 0
for i in range(fuzzyNetworks):
m = data_fuzzy[i*4]
de = data_fuzzy[(i*4)+1]
p = data_fuzzy[(i*4)+2]
q = data_fuzzy[(i*4)+3]
if de == 0:
mf = 0
else:
mf = math.exp((-math.pow((x-m), 2))/(2*math.pow(de, 2)))
a = mf*(p*x+q)
bf += mf
af += a
if bf == 0:
y = 0
else:
y = af/bf
return y
def getYFuzzyBackup(data_fuzzy, x):
m1 = data_fuzzy[0]
de1 = data_fuzzy[1]
p1 = data_fuzzy[2]
q1 = data_fuzzy[3]
m2 = data_fuzzy[4]
de2 = data_fuzzy[5]
p2 = data_fuzzy[6]
q2 = data_fuzzy[7]
m3 = data_fuzzy[8]
de3 = data_fuzzy[9]
p3 = data_fuzzy[10]
q3 = data_fuzzy[11]
m4 = data_fuzzy[12]
de4 = data_fuzzy[13]
p4 = data_fuzzy[14]
q4 = data_fuzzy[15]
if de1 == 0 or de2 == 0 or de3 == 0 or de4 == 0:
return 0
mf1 = math.exp((-math.pow((x-m1), 2))/(2*math.pow(de1, 2)))
mf2 = math.exp((-math.pow((x-m2), 2))/(2*math.pow(de2, 2)))
mf3 = math.exp((-math.pow((x-m3), 2))/(2*math.pow(de3, 2)))
mf4 = math.exp((-math.pow((x-m4), 2))/(2*math.pow(de4, 2)))
bf = mf1+mf2+mf3+mf4
a1 = mf1*(p1*x+q1)
a2 = mf2*(p2*x+q2)
a3 = mf3*(p3*x+q3)
a4 = mf4*(p4*x+q4)
af = a1+a2+a3+a4
y = af/bf
return y
if __name__ == "__main__":
creatMainGraph()
line1, line2 = live_plotter([0],[0], [0],[0],line1, line2, 0)
# time.sleep(3)
createPopulation()
for i in range(0, generations):
populateFA(i)
tournament()
input("Press Enter to continue...") | monzter50/fuzzy_ga | copy.py | copy.py | py | 9,520 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name"... |
40794410326 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def zigzagLevelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:
if root==None:
return []
queue=deque()
level=0
queue.append(root)
result=[]
while (len(queue)):
current_level_len=len(queue)
current_level_ele=deque()
for _ in range(current_level_len):
current_element=queue.popleft()
if(level%2==0):
current_level_ele.append(current_element.val)
else:
current_level_ele.appendleft(current_element.val)
if current_element.left:
queue.append(current_element.left)
if current_element.right:
queue.append(current_element.right)
result.append(list(current_level_ele))
level+=1
return result
| Devjyoti29/LeetHub | 0103-binary-tree-zigzag-level-order-traversal/0103-binary-tree-zigzag-level-order-traversal.py | 0103-binary-tree-zigzag-level-order-traversal.py | py | 1,286 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 20,
"usage_type": "call"
}
] |
3637163420 | import cv2
from openvino.inference_engine.ie_api import IECore, IENetwork
import pprint
# default threshold
THRESHOLD = 0.5
class Face_Detection:
def __init__(self, model_name, device='CPU', extensions=None, perf_counts="False"):
self.model_weights = model_name + '.bin'
self.model_structure = model_name + '.xml'
self.device = device
self.extensions = extensions
try:
self.model = IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name = next(iter(self.model.inputs))
self.input_shape = self.model.inputs[self.input_name].shape
self.output_name = next(iter(self.model.outputs))
self.output_shape = self.model.outputs[self.output_name].shape
self.net = None
self.pp = None
if perf_counts == "True":
self.pp = pprint.PrettyPrinter(indent=4)
def load_model(self):
core = IECore()
if self.extensions != None:
core.add_extension(self.extensions, self.device)
self.net = core.load_network(network=self.model, device_name=self.device, num_requests=1)
def predict(self, image):
preprocessed_image = self.preprocess_input(image)
output = self.net.infer({self.input_name: preprocessed_image})
if self.pp is not None:
self.pp.pprint(self.net.requests[0].get_perf_counts())
coords = self.preprocess_output(output[self.output_name])
if not coords:
return None
else:
width = int(image.shape[1])
height = int(image.shape[0])
# here we consider only the first face found
x = int(coords[0][0] * width)
if x < 0:
x = 0
y = int(coords[0][1] * height)
if y < 0:
y = 0
w = int(coords[0][2] * width) - x
h = int(coords[0][3] * height) - y
# syntax reminder: getRectSubPix(InputArray image, Size patchSize, Point2f center)
return cv2.getRectSubPix(image, (w, h), (x + w / 2, y + h / 2))
def check_model(self):
raise NotImplementedError
def preprocess_input(self, image):
image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
image = image.transpose((2, 0, 1))
image = image.reshape(1, *image.shape)
return image
def preprocess_output(self, outputs):
coords = []
for bounding_box in outputs[0][0]:
conf = bounding_box[2]
if conf >= THRESHOLD:
coords.append([bounding_box[3], bounding_box[4], bounding_box[5], bounding_box[6]])
return coords
| alihussainia/Computer_Pointer_Controller | face_detection.py | face_detection.py | py | 2,813 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openvino.inference_engine.ie_api.IENetwork",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pprint.PrettyPrinter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "openvino.inference_engine.ie_api.IECore",
"line_number": 28,
"usage_type"... |
21250532752 | # -*- coding:utf-8 -*-
# coding=utf-8
import os
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
# from skimage.transform import resize
import numpy as np
import cv2
# 膨胀算法 Kernel
_DILATE_KERNEL = np.array([[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]], dtype=np.uint8)
class WatermarkRemover(object):
""""
去除图片中的水印(Remove Watermark)
"""
def __init__(self, verbose=True):
self.verbose = verbose
self.watermark_template_gray_img = None
self.watermark_template_mask_img = None
self.watermark_template_h = 0
self.watermark_template_w = 0
self.watermark_start_x = 0
self.watermark_start_y = 0
# 加载水印模板,以便后面批量处理去除水印
def load_watermark_template(self, watermark_template_filename):
self.generate_template_gray_and_mask(watermark_template_filename)
# 对图片进行膨胀计算
def dilate(self, img):
dilated = cv2.dilate(img, _DILATE_KERNEL)
return dilated
# 处理水印模板,生成对应的检索位图和掩码位图
def generate_template_gray_and_mask(self, watermark_template_filename):
img = cv2.imread(watermark_template_filename) # 水印模板原图
# 灰度图、掩码图
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(gray, 0, 255, cv2.THRESH_TOZERO + cv2.THRESH_OTSU)
_, mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
mask = self.dilate(mask) # 使得掩码膨胀一圈,以免留下边缘没有被修复
# mask = self.dilate(mask) # 使得掩码膨胀一圈,以免留下边缘没有被修复
# 水印模板原图去除非文字部分
img = cv2.bitwise_and(img, img, mask=mask)
# 后面修图时需要用到三个通道
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
self.watermark_template_gray_img = gray
self.watermark_template_mask_img = mask
self.watermark_template_h = img.shape[0]
self.watermark_template_w = img.shape[1]
cv2.imwrite('ww-gray.jpg', gray)
cv2.imwrite('ww-mask.jpg', mask)
return gray, mask
# 从原图中寻找水印位置
def find_watermark(self, filename):
# Load the images in gray scale
gray_img = cv2.imread(filename, 0)
return self.find_watermark_from_gray(gray_img, self.watermark_template_gray_img)
# 从灰度图中寻找水印位置
def find_watermark_from_gray(self, gray_img, watermark_template_gray_img):
# Load the images in gray scale
method = cv2.TM_CCOEFF
# Apply template Matching
res = cv2.matchTemplate(gray_img, watermark_template_gray_img, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
x, y = min_loc
else:
x, y = max_loc
return x, y, x + self.watermark_template_w, y + self.watermark_template_h
# 去除图片中的水印
def remove_watermark_raw(self, img, gray_mask):
"""
:param img: 待去除水印图片位图
:param watermark_template_gray_img: 水印模板的灰度图片位图,用于确定水印位置
:param watermark_template_mask_img: 水印模板的掩码图片位图,用于修复原始图片
:return: 去除水印后的图片位图
"""
self.watermark_template_gray_img, self.watermark_template_mask_img = gray_mask
# 寻找水印位置
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
x1, y1, x2, y2 = self.find_watermark_from_gray(img_gray, self.watermark_template_gray_img) # 水印模板的掩码图片
self.watermark_start_x = x1
self.watermark_start_y = y1
# 制作原图的水印位置遮板
mask = np.zeros(img.shape, np.uint8)
# watermark_template_mask_img = cv2.cvtColor(watermark_template_gray_img, cv2.COLOR_GRAY2BGR)
# mask[y1:y1 + self.watermark_template_h, x1:x1 + self.watermark_template_w] = watermark_template_mask_img
mask[y1:y2, x1:x2] = self.watermark_template_mask_img
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
# 用遮板进行图片修复,使用 TELEA 算法
dst = cv2.inpaint(img, mask, 4, cv2.INPAINT_TELEA)
# cv2.imwrite('dst.jpg', dst)
return dst
# 【不可直接调用】: 去除图片中的水印
def remove_watermark(self, filename, output_filename=None):
"""
:param filename: 待去除水印图片文件名称
:param output_filename: 去除水印图片后的输出文件名称
:return: 去除水印后的图片位图
"""
# 读取原图
img = cv2.imread(filename)
dst = self.remove_watermark_raw(
img, (self.watermark_template_gray_img, self.watermark_template_mask_img)
)
if output_filename is not None:
cv2.imwrite(output_filename, dst)
return dst
def get_loc(img_size, wm_size, mode='rb'):
print(img_size, wm_size)
x, y = img_size
w, h = wm_size
rst = (0, 0)
if mode == 'rb':
rst = (x - w, y - h)
elif mode == 'md': # 正中
rst = ((x-w)//2, (y-h)//2)
return rst
def wm1(img_src, wm_src, dest="out", loc=(50, 50), alpha=0.25): # scale=5,
fig = plt.figure()
# watermark = plt.imread(wm_src) # 读取水印
watermark = np.array(plt.imread(wm_src)) # 读取水印
# 调整水印大小
# new_size = [int(watermark.shape[0]/scale), int(watermark.shape[1]/scale), watermark.shape[2]]
# watermark = resize(watermark, new_size, mode='constant')
watermark[:, :, -1] *= alpha # 调整水印透明度
plt.imshow(plt.imread(img_src)) # 读取图像
plt.figimage(watermark, loc[0], loc[1], zorder=10) # 添加水印
plt.axis('off') # 隐藏坐标轴
plt.savefig(dest + "/wm1_rst.jpg", dpi=fig.dpi, bbox_inches='tight') # 保存图像
fig.show()
return fig
def wm2(src, logo, out="out"):
img = cv2.imread(src)
logo = cv2.imread(logo)
logo = cv2.resize(logo, (350, 50)) # (100, 717)
inew = img - img
locp = get_loc(img.shape[:2], logo.shape[:2], mode='md') # 位置,大小
inew[locp[0]: locp[0] + logo.shape[0], locp[1]:locp[1] + logo.shape[1]] = logo
inew = cv2.addWeighted(img, 1, inew, 0.4, 0) # m1 x alph + m2 x beta + 1
cv2.imshow("hi", inew)
cv2.waitKey()
savep = out + '/wm-out.jpg'
cv2.imwrite(savep, inew)
def i_water_maker(img):
logo = cv2.imread("logo.jpg")
# logo = cv2.resize(logo, (350, 50)) # (100, 717)
inew = img - img
locp = get_loc(img.shape[:2], logo.shape[:2], mode='md') # 位置,大小
inew[locp[0]: locp[0] + logo.shape[0], locp[1]:locp[1] + logo.shape[1]] = logo # 赋值(粘贴)
inew = cv2.addWeighted(img, 1, inew, 0.4, 0) # 加权
return inew
def wm3(src, logo):
im = Image.open(src)
mark = Image.open(logo)
layer = Image.new('RGBA', im.size, (0, 0, 0, 0))
layer.paste(mark, get_loc(im.size, mark.size, mode="md"))
out = Image.composite(layer, im, layer)
out.show()
if __name__ == '__main__':
src = 'test_dewm/raw.png'
logo = 'logos/jd_logo_.png'
# wm1(src, logo)
# wm2(src, logo)
# wm3(src, logo)
# """
wr = WatermarkRemover()
wr.remove_watermark_raw(img="wm-out.jpg", gray_mask=wr.generate_template_gray_and_mask("logos/jd-logo.jpg"))
# """
| Tkwitty/watermark | Put_water_mark.py | Put_water_mark.py | py | 7,777 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.dilate",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number... |
6667722703 | """empty message
Revision ID: 764dadce9dd1
Revises:
Create Date: 2017-09-28 01:16:36.519727
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '764dadce9dd1'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('bets',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('max_users', sa.String(length=60), nullable=True),
sa.Column('title', sa.String(length=60), nullable=False),
sa.Column('text', sa.String(length=255), nullable=True),
sa.Column('amount', sa.Integer(), nullable=False),
sa.Column('completed', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=32), nullable=False),
sa.Column('email', sa.String(length=60), nullable=False),
sa.Column('birthday', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
op.drop_table('bets')
# ### end Alembic commands ###
| ThreeOhSeven/Backend | migrations/versions/764dadce9dd1_.py | 764dadce9dd1_.py | py | 1,289 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
41908721379 | from django.http import JsonResponse
from django.shortcuts import render
from .models import Userdata,Entry
from django.shortcuts import render,HttpResponse,HttpResponseRedirect
# Create your views here.
# def check(request):
# return render(request,'check.html')
# def save(request):
# if request.method == 'POST':
# name = request.POST.get('name')
# email = request.POST.get('email')
# password = request.POST.get('password')
# print(name)
# print(email)
# print(password)
# # print('post request')
# # return JsonResponse('success')
# return HttpResponse('saved success')
# def save(request):
# if request.method == 'GET':
# age = request.GET['age']
# return HttpResponse('saved success aa gyaa')
# def call(request):
# if request.method == 'POST':
# name = request.POST.get('name')
# email = request.POST.get('email')
# password = request.POST.get('password')
# print(name,email,password)
# print('successfully sent')
# return HttpResponse('testing ho rha')
# def ajaxtest(request):
# return render(request,'ajaxtest.html')
# return JsonResponse()
# def check(request):
# return render(request,'check.html')
# def home1(request):
# return render(request,'home1.html')
# def call(request):
# if request.method == 'POST':
# age = request.POST.get('age')
# print(age)
# print('successfully sent it')
# return HttpResponse('testing check')
# Create your views here.
def home1(request):
return render(request,"home1.html")
def show(request):
data = Entry.objects.all()
return render(request,"show.html",{'data':data})
def send(request):
if request.method == 'POST':
ID = request.POST['id']
data1 = request.POST['data1']
data2 = request.POST['data2']
Entry(ID = ID,data1=data1,data2=data2).save()
msg="Data Stored Successfully"
return render(request,"home1.html",{'msg':msg})
else:
return HttpResponse("<h1>404 - Not Found</h1>")
def delete(request):
ID = request.GET['id']
Entry.objects.filter(ID=ID).delete()
return HttpResponseRedirect("show")
def edit(request):
ID = request.GET['id']
data1 = data2 = "Not Available"
for data in Entry.objects.filter(ID=ID):
data1 = data.data1
data2 = data.data2
return render(request,"edit.html",{'ID':ID,'id':id,'data1':data1,'data2':data2})
def RecordEdited(request):
if request.method == 'POST':
ID = request.POST['id']
data1 = request.POST['data1']
data2 = request.POST['data2']
Entry.objects.filter(ID=ID).update(data1=data1,data2=data2)
return HttpResponseRedirect("show")
else:
return HttpResponse("<h1>404 - Not Found</h1>") | Farheen-14/Crud-Operations | ajaxproject/ajaxapp2/views.py | views.py | py | 2,874 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "models.Entry.objects.all",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "models.Entry.objects",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_na... |
34912332593 | import serial
import time
"Arduino Controller Object"
class Arduino_Controller(object):
def __init__(self, path):
self.path = path
self.arduino = serial.Serial(self.path, 9600, timeout = 5)
self.nums = "0123456789"
self.angle = 0
self.is_Button_Pressed = False
"Arduino Thread Function"
def get_Data(self):
time.sleep(1)
self.arduino.flush()
while(True):
try:
dataRecieved = str(self.arduino.readline())
angle_val = dataRecieved[3:6]
button_state = dataRecieved[2]
if(angle_val[2] in self.nums):
angle = int(angle_val)
elif(angle_val[1] in self.nums):
angle = int(angle_val[:2])
else:
angle = int(angle_val[0])
self.angle = angle
if(button_state == "T"):
self.is_Button_Pressed = True
else:
self.is_Button_Pressed = False
self.arduino.flush()
except:
pass | rviccina/Space-Defense | Arduino_Controller_Class.py | Arduino_Controller_Class.py | py | 1,135 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "serial.Serial",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
}
] |
73137746664 | from django import forms
from .models import Submission, Dataset, Method
class SubmitForm(forms.Form):
def __init__(self,data=None,*args,**kwargs):
def conv(private):
if private:
return " (private Leaderboard)"
else:
return " (public Leaderboard)"
l_simu = kwargs.pop("l_simu")
super(SubmitForm,self).__init__(data,*args,**kwargs)
self.fields['dataset'] = forms.ChoiceField(widget=forms.Select,
choices=[(simu.name,simu.name +
conv(simu.private)) for simu in l_simu])
answer = forms.CharField(label = 'List of candidate SNPs', max_length=4000)
with_environment_variable = forms.TypedChoiceField(
coerce=lambda x: x == 'True',
choices=((False, 'Yes'), (True, 'No')),
widget=forms.RadioSelect
)
method = forms.ModelChoiceField(Method.objects.all(), required=True)
method_desc = forms.CharField(label="", widget=forms.Textarea,
max_length=1000, required=False)
| bcm-uga/ChallengeWebSite | challenge/forms.py | forms.py | py | 1,147 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.Form",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.forms.ChoiceField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.for... |
1165217345 | import requests
import bs4
print('Loading page...')
url=f'https://www.myjobmag.co.ke/search/jobs?q=&field=Engineering+%2F+Technical'
def getJobtitle(soup):
title=soup.select('section h1')
return str(title[0].getText())
def getDescription(soup):
Description=soup.select('.job-details p')
return str(Description[0].getText())
def getAcademics(soup):
Academics=soup.select('.job-key-info span')
return str(Academics[3].getText())
def getExperience(soup):
Experience=soup.select('.job-key-info span')
return str(Experience[5].getText())
def save_job_info(JobTitle,JobDescription,JobAcademics,JobExp,job_Link):
print('Saving job specifications...')
jobfile=open('C:\\Users\\peter\\Desktop\\spicebucks\\Jobmag scrapped.txt','a')
jobfile.write('Tittle :'+JobTitle+'\n'+'Descrition :'+JobDescription+'\n'+'Academics :'+JobAcademics+'\n'+'Experience'+str(JobExp)+'\n')
jobfile.write(job_Link+'\n'+'*'*70)
jobfile.close()
def mainfunc():
res=requests.get(url)
res.raise_for_status()
soup=bs4.BeautifulSoup(res.text,'html.parser')
elems=soup.select('section a')
for i in range (1,35,2):
jobUrl=requests.get('https://www.myjobmag.co.ke'+elems[i].get('href'))
job_Link='https://www.myjobmag.co.ke'+elems[i].get('href')
jobUrl.raise_for_status()
soup=bs4.BeautifulSoup(jobUrl.text,'html.parser')
print('Getting job specifications...')
JobTitle=getJobtitle(soup)
JobDescription=getDescription(soup)
JobAcademics=getAcademics(soup)
JobExp=getExperience(soup)
save_job_info(JobTitle,JobDescription,JobAcademics,JobExp,job_Link)
mainfunc()
print('Done')
| petreleven/Webscraping-101 | webscraping101/jobmagRefactored.py | jobmagRefactored.py | py | 1,791 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"... |
1163783579 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import csv
x = []
y = []
with open('input.csv','r',encoding = 'utf8') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
x.append(row[0])
y.append(float(row[1]))
plt.plot(x,y, label='banana')
plt.xlabel('date')
plt.ylabel('NT$')
plt.ylim([15, 25])
plt.title('Market Average Price')
plt.legend()
plt.show() | sytsao/TQC-web-data-capture-and-analysis-Python3 | TQC-web-crawler-and-analysis-Python3/第4章/綜合範例/GE4-1PYA401.py | GE4-1PYA401.py | py | 408 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.... |
44542833866 | # This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
import requests
import scrapy
api_endpoint = 'http://localhost:5000/api/domains'
class QuotesSpider(scrapy.Spider):
name = "quotes"
def start_requests(self):
current_page = 1
while True:
resp = requests.get(api_endpoint, params={'page': current_page}).json()
if not resp['objects']:
break
for i in resp['objects']:
yield scrapy.Request(url=i['domain'])
current_page += 1
def parse(self, response):
title = response.xpath("//title/text()").get()
self.logger.info("[%s] - %s" % (response.url, title))
| imfht/sec-flask-cookiecutter | spider/spider/spiders/__init__.py | __init__.py | py | 790 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 22,
"usage_type": "call"
}
] |
22192896243 | from telegram import Bot
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
bot = Bot(token='5593423447:AAEe0rCZnZdYXNpxxyatR37l6afSppj-yZI')
updater = Updater(token='5593423447:AAEe0rCZnZdYXNpxxyatR37l6afSppj-yZI')
dispatcher = updater.dispatcher
def start(update, context):
context.bot.send_message(update.effective_chat.id, 'Добро пожаловать')
def command(update, context):
context.bot.send_message(update.effective_chat.id, 'Я таких команд не знаю')
def delete_words(update, context):
context.bot.send_message(update.effective_chat.id, 'Обрабатываю...')
words = context.args
words = [word for word in words if not "а" in word]
words = [word for word in words if not "б" in word]
words = [word for word in words if not "в" in word]
answer = " ".join(words)
context.bot.send_message(update.effective_chat.id, f'Готово.\n"{answer}"')
start_handler = CommandHandler('start', start)
pars_handler = CommandHandler('pars', delete_words)
command_handler = MessageHandler(Filters.command, command)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(pars_handler)
dispatcher.add_handler(command_handler)
updater.start_polling()
updater.idle()
| Minions-Wave/GB-Minions-Wave | The Big Brain Solutions/Personal Zone/NighTramp Solutions/Blok 2/Python/HomeWork/Seminar009/main.py | main.py | py | 1,271 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "telegram.Bot",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "telegram.ext.Updater",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "telegram.ext.CommandHandler",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "telegram.... |
9624083818 | from boto3.session import Session
session = Session(aws_access_key_id='[your_key_id]', aws_secret_access_key='[your_secret_key]')
def shutdown_all(resource_name,region):
resource = session.resource(resource_name, region_name=region)
instances = resource.instances.filter(
Filters=[{'Values': ['running']}]
)
resource.stop_instances(InstanceIds=instances)
shutdown_all('ec2','us-west') | tiago-clementino/desafioDevOps | ops/scripts/q1.py | q1.py | py | 411 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.session.Session",
"line_number": 3,
"usage_type": "call"
}
] |
73161250024 | from fastapi import APIRouter, Depends, HTTPException, status
from fastapi.exceptions import ResponseValidationError
from app.crud import users_dao
from app.schemas import UserBase
from sqlalchemy.orm import Session
from ..database import get_db
router = APIRouter()
@router.get("/{cbu}", response_model=UserBase)
def get_user_info(cbu: str, db: Session = Depends(get_db)):
try:
user = users_dao.get_info(cbu, db)
if user:
return user
else:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Account not found")
except HTTPException:
raise
except ResponseValidationError:
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Account information wrongly formatted")
@router.get("/{cbu}/balance", response_model=float)
def get_user_balance(cbu: str, db: Session = Depends(get_db)):
try:
balance = users_dao.get_balance(cbu, db)
if balance:
return balance
else:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Account not found")
except HTTPException:
raise
except ResponseValidationError:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Account information wrongly formatted")
| FelipeCupito/DB2-TP | backendBanks/app/routers/users.py | users.py | py | 1,315 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "database.get_... |
23408765544 | # -*- coding: utf-8 -*-
from sqlalchemy import Table, Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_mate import EngineCreator, ExtendedBase
from sfm import rnd
import random
engine = EngineCreator(
host="rajje.db.elephantsql.com",
database="dplfpjna",
username="dplfpjna",
password="lIP2DKh4WxW92bTUo6LwojnLgdmPby6D"
).create_postgresql_psycopg2()
Base = declarative_base()
class Department(Base, ExtendedBase):
__tablename__ = "departments"
department_id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
class Employee(Base, ExtendedBase):
__tablename__ = "employees"
employee_id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
department_id = Column(Integer, ForeignKey("departments.department_id"))
_settings_engine = engine
Base.metadata.create_all(engine)
ses = Employee.get_ses()
n_department = 3
department_list = [
Department(department_id=1, name="HR"),
Department(department_id=2, name="Finance"),
Department(department_id=3, name="IT"),
]
n_employee = 50
employee_list = [
Employee(employee_id=i+1, name=rnd.rand_hexstr(8), department_id=random.randint(1, n_department))
for i in range(n_employee)
]
Department.smart_insert(ses, department_list)
Employee.smart_insert(ses, employee_list)
"""
machugwu
S7rXQuwhXMo^
""" | MacHu-GWU/Dev-Exp-Share | docs/source/01-AWS/01-All-AWS-Services-Root/31-Migration-and-Transfer/02-Database-Migration-Service-(DMS)-Root/Practice-Postgres-to-AWS-RDS/source_database.py | source_database.py | py | 1,468 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "sqlalchemy_mate.EngineCreator",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sqlalchemy_mate.ExtendedBase",
"line_number": 20,
"usage_type":... |
13397438206 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 1 11:47:13 2022
@author: noise
neural network plot utilitiy functions
"""
import matplotlib.pyplot as plt
import numpy as np
import os
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plotFullFields(Enn,Etest, epoch,i, dir):
if not os.path.isdir(dir+str(i)+'/'):
os.makedirs(dir+str(i)+'/')
Enn = Enn.detach().cpu().numpy()
vmax = np.max(np.abs(Etest)**2)
fig, axes = plt.subplots(nrows=1, ncols=3)
fig.suptitle('epoch = '+str(epoch), x=0.5, y=0.75, fontsize=16)
axes[0].set_title('$||E_{nn}||^2$')
im1 = axes[0].imshow(np.abs(Enn)**2, cmap='hot', vmin=0, vmax=vmax)
axes[0].xaxis.set_visible(False)
axes[0].yaxis.set_visible(False)
divider = make_axes_locatable(axes[0])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im1, cax=cax, orientation='vertical')
axes[1].set_title('||$E_{test}||^2$')
im2 = axes[1].imshow(np.abs(Etest)**2, cmap='hot', vmin=0, vmax=vmax)
axes[1].xaxis.set_visible(False)
axes[1].yaxis.set_visible(False)
divider = make_axes_locatable(axes[1])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im2, cax=cax, orientation='vertical')
axes[2].set_title('$||E_{nn}-E_{test}||^2$')
im3 = axes[2].imshow(np.abs(Enn-Etest)**2, cmap='hot', vmin=0, vmax=vmax)
axes[2].xaxis.set_visible(False)
axes[2].yaxis.set_visible(False)
divider = make_axes_locatable(axes[2])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im3, cax=cax, orientation='vertical')
#plt.subplots_adjust(top=0.75)
plt.tight_layout()
plt.savefig(dir+str(i)+'/'+str(epoch)+".png")
plt.close()
| demroz/pinn-ms | optimizeNeuralNetwork/plotUtil.py | plotUtil.py | py | 1,787 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.path.isdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number":... |
19557411940 | import sys
from collections import deque
input = sys.stdin.readline
t = int(input())
for _ in range(t):
l = int(input())
n_x, n_y = map(int, input().split())
m_x, m_y = map(int, input().split())
visited = [[0] * l for _ in range(l)]
visited[n_x][n_y] = 1
dq = deque([(n_x, n_y)])
while dq:
x, y = dq.popleft()
if x == m_x and y == m_y:
break
for dx, dy in [(2, 1), (2, -1), (-2, 1), (-2, -1), (1, 2), (-1, 2), (-1, -2), (1, -2)]:
nx, ny = x + dx, y + dy
if 0 <= nx < l and 0 <= ny < l and visited[nx][ny] == 0:
visited[nx][ny] = visited[x][y] + 1
dq.append((nx, ny))
print(visited[m_x][m_y] - 1)
| hyotaime/PS.py | Silver/Silver1/7562.py | 7562.py | py | 722 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 13,
"usage_type": "call"
}
] |
23073952009 | from webapp import app
import os
from flask import render_template, url_for, flash, redirect, request, abort
from webapp.models import Video
from os import path
from sqlalchemy.sql import text
@app.route("/")
@app.route("/home")
def home():
page = request.args.get('page', 1, type=int)
videos = Video.query.order_by(Video.channel.desc())
return render_template('home.html', videos=videos)
@app.route("/video/<title>/", methods=['GET', 'POST'])
def video_post(title):
videos = Video.query.filter_by(title=title).all()
all_videos = Video.query.order_by(Video.channel.desc())
for v in videos[:1]:
title_name = v.title
return render_template("video.html", videos=videos, title_name=title_name, all_videos=all_videos)
@app.route("/video-post/<fil>", methods=['GET', 'POST'])
def filter(fil):
if fil == 'all':
page = request.args.get('page', 1, type=int)
videos = Video.query.order_by(Video.channel.desc()).paginate(page=page, per_page=20)
return render_template("category.html", videos=videos, fil=fil, title_name='All Courses')
elif fil == 'Geeks Lesson':
videos = Video.query.filter_by(channel="Geek's Lesson").all()
return render_template("category.html", videos=videos, title_name=fil+' Courses')
elif fil == 'My CS':
videos = Video.query.filter_by(channel=fil).all()
return render_template("category.html", videos=videos, title_name=fil+' Courses')
elif fil == 'Academic Lesson':
videos = Video.query.filter_by(channel=fil).all()
return render_template("category.html", videos=videos, title_name=fil+' Courses')
else:
videos = Video.query.filter_by(category=fil).all()
return render_template("category.html", videos=videos, title_name=fil+' Courses')
@app.route("/partners")
def partners():
videos = Video.query.order_by(Video.channel.desc())
return render_template('partners.html', videos=videos, title_name = 'Our Partners')
@app.route("/contact")
def contact():
videos = Video.query.order_by(Video.channel.desc())
return render_template('contact.html', videos=videos, title_name = 'Contact Us')
| sheikhhanif/cslesson | webapp/routes.py | routes.py | py | 2,179 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.request.args.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "webapp.m... |
11893023589 | # -*- coding: utf-8 -*-
import pymysql
# by huxiajie
class MySqlPipeline(object):
def __init__(self):
self.conn = pymysql.connect(
host="localhost",
db="jroom",
user="root",
passwd="123456",
charset='utf8'
)
self.cursor = self.conn.cursor()
if self.conn is None:
print("***********数据库连接失败***********")
else:
print("***********数据库连接成功***********")
self.count = 0
def process_item(self, item, spider):
try:
# table:house
self.cursor.execute(
"insert into house (houseID, housetitle, city, district, area, street, community, room, lng, lat, housearea, housedirect, housefloor, househall, balcony, bathroom, housepic, housearound, housestatus, housetype,ownertel,mantel) values(%s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(item['houseID'],
item['housetitle'],
item['city'],
item['district'],
item['area'],
item['street'],
item['community'],
item['room'],
item['lng'],
item['lat'],
item['housearea'],
item['housedirect'],
item['housefloor'],
item['househall'],
item['balcony'],
item['bathroom'],
item['housepic'],
item['housearound'],
item['housestatus'],
item['housetype'],
item['ownertel'],
item['mantel']))
self.conn.commit()
except Exception as error:
print(error)
try:
# table:housepic
for imageurl in item['image_urls']:
self.cursor.execute("insert into housepic(houseID, pic) values(%s,%s)",
(item['houseID'], imageurl))
self.conn.commit()
except Exception as error:
print(error)
try:
# table:payment
for i in range(len(item['payname'])):
self.cursor.execute(
"insert into payment (houseID,payname,payrent,paydeposit,payservice) values(%s,%s,%s,%s,%s)",
(item['houseID'],
item['payname'][i],
item['payrent'][i],
item['paydeposit'][i],
item['payservice'][i]
))
self.conn.commit()
except Exception as error:
print(error)
self.count += 1
return item
def close_spider(self, spider):
print(str(self.count))
self.cursor.close()
self.conn.close()
# class ImgPipeline(ImagesPipeline):
# def get_media_requests(self, item, info):
# for image_url in item['image_urls']:
# yield Request(image_url)
#
# # 当一个单独项目中的所有图片请求完成时
# # item_completed()方法将被调用
# def item_completed(self, results, item, info):
# image_paths = [x['path'] for ok, x in results if ok]
# if not image_paths:
# raise DropItem("Item contains no images")
# item['image_paths'] = image_paths
# return item
| mohxjmo/jroomCrawl | JRoom/pipelines.py | pipelines.py | py | 3,424 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymysql.connect",
"line_number": 9,
"usage_type": "call"
}
] |
39510454656 | from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
def ngram_array(a_text, s_text, n):
'''
Calculate an ngram array for an answer and source text
Arguments:
a_text: answer text
s_text: source text
n : choice of n-gram (1 == unigram, 2 == bigram etc)
Returns:
ngram array
Note: Function created as part of the plagiarism detection project
for udacities machine learning engineer nanodegree
'''
# instantiate an ngram counter
counts = CountVectorizer(analyzer = 'word',
ngram_range = (n,n))
# create array of n-gram counts for the answer and source text
# here fit_transform learns the vocabulary dictionary and returns the document term matrix (in sparse format)
ngrams = counts.fit_transform([a_text, s_text])
return ngrams.toarray()
def containment(ngram_array):
'''
Containment is a measure of text similarity. It is the normalized,
intersection of ngram word counts in two texts.
Arguments:
param ngram_array: an array of ngram counts for an answer and source text.
Returns: a normalized containment value.
Note: Function created as part of the plagiarism detection project
for udacities machine learning engineer nanodegree
'''
# the intersection can be found by looking at the columns in the ngram array
# this creates a list that holds the min value found in a column
# so it will hold 0 if there are no matches, and 1+ for matching word(s)
intersection_list = np.amin(ngram_array, axis=0)
# optional debug: uncomment line below
# print(intersection_list)
# sum up number of the intersection counts
intersection = np.sum(intersection_list)
# count up the number of n-grams in the answer text
answer_idx = 0
answer_cnt = np.sum(ngram_array[answer_idx])
# normalize and get final containment value
containment_val = intersection / answer_cnt
return containment_val
# Calculate the ngram containment for one answer file/source file pair in a df
def calculate_containment(a_text, s_text, n):
'''
Calculates the containment between a given answer text and its associated source text.
This function creates a count of ngrams (of a size, n) for each text file in our data.
Then calculates the containmnet by finding the ngram count for a given answer text,
and its associated source text, and calculating the normalized intersection of those counts
Arguments:
df: a dataframe with columns 'File', 'Task', 'Category', 'Class', 'Text', and 'Datatype'
n : an integer that defines the ngram size
answer_filename: a filename for an answer text in the df ex 'g0pB_taskd.txt'
Return:
A single containment value that represents the similarity between an answer text and its source text
Note: Function created as part of the plagiarism detection project
for udacities machine learning engineer nanodegree
'''
# calculate ngram_array
ng_array = ngram_array(a_text, s_text, n)
# return containment
return containment(ng_array)
def containment_wrapper(article_pair, n_gram_choice = 20):
'''
A wrapper around the calculate_containmnet function that accepts a list as input
and performs error handling.
Argument:
article_pair : (list) list containing two articles to comparse [articleA, article B]
n_gram_choice : an integer that defines the ngram size
Returns:
A single containment value that represents the similarity between the two articles
'''
try:
containment = calculate_containment(article_pair[0], article_pair[1], n_gram_choice)
except:
print(f"We couldn't calculate the containment for row {article_pair[2]}")
containment = None
return containment
# Compute the normalized LCS given an answer text and a source text
def lcs_norm_word(answer_text, source_text):
'''
Computes the longest common subsequence of words in two texts; returns a normalized value.
Arguments:
answer_text: The pre-processed text for an answer text
source_text: The pre-processed text for an answer's associated source text
Returns:
A normalized LCS value
Note: Function created as part of the plagiarism detection project
for udacities machine learning engineer nanodegree
'''
# split text and find length
A_split = answer_text.split()
S_split = source_text.split()
len_A = len(A_split)
len_S = len(S_split)
# initialise matrix
lcs_matrix = np.zeros(shape = (len_S + 1, len_A + 1))
# populate matrix
for row in range(len_S):
for col in range(len_A):
# check equality of elements
if A_split[col] == S_split[row]:
# we have a match: add one to the top left diagonal cell
lcs_matrix[row + 1, col + 1] = lcs_matrix[row, col] + 1
else:
# no match so take max of top and left cells
lcs_matrix[row + 1, col + 1] = max(lcs_matrix[row + 1, col], lcs_matrix[row, col + 1])
LCS = lcs_matrix[len_S, len_A]
LCS_normalized = LCS / len_A
return LCS_normalized
def lcs_wrapper(article_pair):
'''
A wrapper around the lcs_norm_word function that accepts a list as input
and performs error handling.
Argument:
article_pair : (list) list containing two articles to comparse [articleA, article B]
Returns:
A normalized LCS value
'''
try:
LCS_normalized = lcs_norm_word(article_pair[0], article_pair[1])
except:
print(f"We couldn't calculate the longest common subsequence for row {article_pair[2]}")
LCS_normalized = None
return LCS_normalized | tonyjward/plagiarism_detection | src/utils/create_features.py | create_features.py | py | 6,006 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "... |
505960290 | """
Querying by location and extract data location
"""
def places_by_query(bfShp, epsgIn, keyword=None, epsgOut=4326,
_limit='100', onlySearchAreaContained=True):
"""
Get absolute location of facebook data using the Facebook API and
Pandas to validate data.
Works only for the 'places' search type
buffer_shp cloud be a shapefile with a single buffer feature
or a dict like:
buffer_shp = {
x: x_value,
y: y_value,
r: dist
}
or a list or a tuple:
buffer_shp = [x, y, r]
"""
import pandas
from geopandas import GeoDataFrame
from shapely.geometry import Polygon, Point
from glass.prop.feat.bf import getBufferParam
from glass.acq.dsn.fb.search import by_query
search_type = 'place'
x_center, y_center, dist = getBufferParam(bfShp, epsgIn, outSRS=4326)
data = by_query(
search_type, keyword=keyword,
x_center=x_center, y_center=y_center, dist=dist,
limit=_limit, face_fields=[
"location", "name", "category_list", "about",
"checkins", "description", "fan_count"
]
)
try:
if not data:
# Return NoData
return 0
except:
pass
# Sanitize category_list field
data = pandas.concat([
data.drop(["category_list"], axis=1),
data["category_list"].apply(pandas.Series)
], axis=1)
_int_cols = [
c for c in data.columns.values if type(c) == int
]
__int_cols = {
x : "col_{}".format(str(x)) for x in _int_cols
}
data.rename(columns=__int_cols, inplace=True)
data.rename(columns={"id" : "id_1", "name" : "name_1"}, inplace=True)
for k in __int_cols:
data = pandas.concat([
data.drop([__int_cols[k]], axis=1),
data[__int_cols[k]].apply(pandas.Series)
], axis=1)
data.rename(columns={
'id' : 'id_' + str(k+2),
'name' : 'name_' + str(k+2)
}, inplace=True)
if 0 in list(data.columns.values):
data.drop([0], axis=1, inplace=True)
# Pandas dataframe to Geopandas Dataframe
geoms = [Point(xy) for xy in zip(data.longitude, data.latitude)]
data.drop(["latitude", "longitude"], axis=1, inplace=True)
gdata = GeoDataFrame(data, crs='EPSG:4326', geometry=geoms)
if onlySearchAreaContained:
from shapely.wkt import loads
from glass.prj.obj import prj_ogrgeom
from glass.gp.prox.bfing.obj import xy_to_buffer
# Check if all retrieve points are within the search area
_x_center, _y_center, _dist = getBufferParam(
bfShp, epsgIn, outSRS=3857
)
search_area = xy_to_buffer(
float(_x_center), float(_y_center), float(_dist)
)
search_area = prj_ogrgeom(search_area, 3857, 4326)
search_area = loads(search_area.ExportToWkt())
gdata["tst_geom"] = gdata["geometry"].intersects(search_area)
gdata = gdata[gdata["tst_geom"] == True]
gdata.reset_index(drop=True, inplace=True)
# Sanitize id
gdata["fid"] = gdata["id_1"]
gdata["fb_type"] = search_type
__DROP_COLS = ["id_1", "city", "country", "street", "zip", "located_in"]
DROP_COLS = [c for c in __DROP_COLS if c in gdata.columns.values]
if onlySearchAreaContained:
DROP_COLS.append("tst_geom")
gdata.drop(DROP_COLS, axis=1, inplace=True)
if epsgOut != 4326:
gdata = gdata.to_crs('EPSG:{}'.format(str(epsgOut)))
return gdata
def places_to_shp(searchArea, epsgIn, epsgOut, outShp,
keyword_=None, onlySearchArea=True):
"""
Get Locations From Facebook and Write data in a Vetorial
File
"""
from glass.wt.shp import df_to_shp
places = places_by_query(
searchArea, epsgIn, keyword=keyword_, epsgOut=epsgOut,
onlySearchAreaContained=onlySearchArea
)
try:
if not places: return 0
except:
pass
df_to_shp(places, outShp)
return outShp
| jasp382/glass | glass/acq/dsn/fb/places.py | places.py | py | 4,253 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "glass.prop.feat.bf.getBufferParam",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "glass.acq.dsn.fb.search.by_query",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 53,
"usage_type": "call"
},
{
"... |
15566377203 | # complete k-means++
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
#load data
data = np.loadtxt('Downloads/mnist_small.txt')
#normalize the data
data=np.divide(data,16)
#distance function
def dist(a, b, ax=1):
return np.linalg.norm(b - a, axis=ax)
#20 iterations
for k in range(20):
#initialize the centroids
center=np.zeros((10,64))
temp=np.zeros(1797)
index=np.random.choice(1797)
center[0]=data[index]
for i in range(1,10):
for j in range(1797):
temp[j]=(np.amin(dist(data[j],center[0:i])))**2
temp=np.divide(temp,np.sum(temp))
index=np.random.choice(1797,p=temp)
center[i]=data[index]
center_old = np.zeros(center.shape)
clusters = np.zeros(data.shape[0])
error = dist(center, center_old, None)
sum=[]
while error!=0:
s=0
#assign each datapoint to cluster
for i in range (1797):
distances = dist(data[i], center)
cluster = np.argmin(distances)
s=s+distances[cluster]
clusters[i] = cluster
sum.append(s)
center_old = deepcopy(center)
#re-compute the centroids
for i in range(10):
points = [data[j] for j in range (1797) if clusters[j] == i]
if len(points)!=0:
center[i] = np.mean(points, axis=0)
error=dist(center, center_old, None)
x=range(len(sum))
plt.plot(x,sum)
plt.savefig('Downloads/kmeans++ distortion')
plt.show()
#center=center*16
#plt.gray()
#for i in range(10):
# plt.imshow(center[i].reshape(8,8))
# plt.savefig('Downloads/centroid_++(%d).png' % i)
# plt.show()
| lizihao1999/k-means | k-means++.py | k-means++.py | py | 1,688 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.loadtxt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_n... |
17581795142 | import urllib
from typing import Any, Optional
from functools import lru_cache
import requests
from starwhale.utils.retry import http_retry
from starwhale.base.uri.instance import Instance
from starwhale.base.uri.exceptions import UriTooShortException
class Project:
id: str
name: str
instance: Instance
path: str = ""
def __init__(
self,
name: str = "",
uri: Optional[str] = None,
instance: Optional[Instance] = None,
) -> None:
"""
:param name: When uri belongs to standalone, it is the name. When uri belongs to cloud, name is the id.
:param uri: project uri, like "local/project/starwhale" "http://127.0.0.1:8000/project/sw:p-1"
:param instance: instance object, default is the current selected.
"""
if name and uri:
raise Exception("name and uri can not both set")
# init instance
if "/" in name:
uri = name
name = ""
self.instance = instance or Instance(uri=uri or "")
if not name:
# use project name in path, the path must at least contain project/{name}
p = self.instance.path.split("/")
if len(p) >= 2 and p[0] == "project":
name = p[1]
self.path = "/".join(p[2:])
elif len(p) == 1:
# Compatible with features of URI
name = p[0]
if not name:
# use default project
name = self.instance.info.get("current_project", "")
if not name:
raise Exception("can not init project with empty name")
self.name = name
# TODO check if project exists for local and remote
if self.instance.is_cloud:
# TODO check whether contains namespace in name(like 'sw:project')?
self.id = (
self.name
if self.name.isdigit()
else str(
get_remote_project_id(
self.instance.url, self.instance.token, self.name
)
)
)
else:
self.id = self.name
@classmethod
def parse_from_full_uri(cls, uri: str, ignore_rc_type: bool) -> "Project":
"""
Parse project from full uri.
we do not parse instance and project info from uri less than 5 parts.
we prefer that users use `dataset copy mnist -dlp project` rather than `dataset copy project/dataset/mnist`.
the second is difficult to write correctly at once and the semantics are not very clear.
and the long uri usually copied from the website.
"""
if "://" in uri:
no_schema_uri = uri.split("://", 1)[-1]
else:
no_schema_uri = uri
if "//" in no_schema_uri:
raise Exception(f"wrong format uri({uri}) with '//'")
parts = no_schema_uri.split("/")
exp = len("local/project/self/dataset/mnist".split("/"))
if ignore_rc_type:
# ignore type in uri like dataset
exp = exp - 1
if len(parts) < exp:
raise UriTooShortException(
exp, len(parts), f"can not parse project info from {uri}"
)
return cls(uri=uri)
@property
def full_uri(self) -> str:
return "/".join([self.instance.url, "project", self.id])
def __str__(self) -> str:
return self.full_uri
def __repr__(self) -> str:
return f"<Project {self.full_uri}>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, Project):
return False
return self.full_uri == other.full_uri
@lru_cache(maxsize=None)
@http_retry
def get_remote_project_id(instance_uri: str, token: str, project: str) -> Any:
resp = requests.get(
urllib.parse.urljoin(instance_uri, f"/api/v1/project/{project}"),
headers={
"Content-Type": "application/json; charset=utf-8",
"Authorization": token,
},
timeout=60,
)
resp.raise_for_status()
return resp.json().get("data", {})["id"]
| star-whale/starwhale | client/starwhale/base/uri/project.py | project.py | py | 4,147 | python | en | code | 171 | github-code | 36 | [
{
"api_name": "starwhale.base.uri.instance.Instance",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "... |
570167896 | import logging
import math
from .geomsmesh import geompy
from .geomsmesh import smesh
from .putName import putName
def calculePointsAxiauxPipe_a(facesDefaut, centreFondFiss, wireFondFiss, \
lenSegPipe, \
nro_cas=None):
"""Maillage selon le rayon de courbure du fond de fissure"""
logging.info('start')
# Rayon de courbure maximal
disfond = list()
for filling in facesDefaut:
disfond.append(geompy.MinDistance(centreFondFiss, filling))
disfond.sort()
texte = "rcourb: {}, lenSegPipe: {}".format(disfond[0], lenSegPipe)
logging.info(texte)
# Maillage 1D
lgmin = lenSegPipe*0.25
lgmax = lenSegPipe*1.5
# la déflexion ets la distance maximale entre une arête du maillage et la courbe support
nbSegQuart = 5 # on veut 5 segments min sur un quart de cercle
alpha = math.pi/(4*nbSegQuart)
deflexion = disfond[0]*(1.0 -math.cos(alpha))
texte = "==> lgmin: {}, lgmax: {}, deflexion: {}".format(deflexion, lgmin, lgmax)
logging.info(texte)
meshFondFiss = smesh.Mesh(wireFondFiss)
putName(meshFondFiss, "wireFondFiss", i_pref=nro_cas)
algo1d = meshFondFiss.Segment()
putName(algo1d.GetSubMesh(), "wireFondFiss", i_pref=nro_cas)
hypo1d = algo1d.Adaptive(lgmin, lgmax, deflexion) # a ajuster selon la profondeur de la fissure
putName(hypo1d, "Adaptive_wireFondFiss", i_pref=nro_cas)
is_done = meshFondFiss.Compute()
text = "calculePointsAxiauxPipe meshFondFiss.Compute"
if is_done:
logging.info(text)
else:
text = "Erreur au calcul du maillage.\n" + text
logging.info(text)
raise Exception(text)
return meshFondFiss
| luzpaz/occ-smesh | src/Tools/blocFissure/gmu/calculePointsAxiauxPipe_a.py | calculePointsAxiauxPipe_a.py | py | 1,651 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.info",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "geomsmesh.geompy.MinDistance",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "geomsmesh.geompy",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "logging.in... |
36480992641 | from PyQt5 import QtWidgets, QtCore
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QFileDialog, QGraphicsScene, QGraphicsView, QGraphicsPixmapItem, QDialog
from PyQt5.QtGui import QPixmap, QPainter, QColor, QImage
from gui import Ui_MainWindow
import sys
import math as m
import numpy as np
import matplotlib.pyplot as plt
import cv2
import qimage2ndarray
from PIL import Image, ImageEnhance
import cv2
import numpy as np
# plt.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import pandas as pd
import matplotlib.patches as patches
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super(ApplicationWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# for plot sequence
self.figure_sequence = plt.figure()
self.canvas_sequence = FigureCanvas(self.figure_sequence)
self.ui.horizontalLayout_Sequence.addWidget(self.canvas_sequence)
# for plot kspace
self.figure_kspace = plt.figure()
self.canvas_kspace = FigureCanvas(self.figure_kspace)
self.ui.horizontalLayout_Kspace.addWidget(self.canvas_kspace)
# for plot phantom generated
# self.figure_phantom_Generated = plt.figure()
# self.canvas_phantom_Generated = FigureCanvas(self.figure_phantom_Generated)
# self.ui.horizontalLayout_phantom_generated.addWidget(self.canvas_phantom_Generated)
# adding items to combobox
self.ui.comboBox_property.addItems(
["Original", "T1", "T2", "SD"])
# connect ui elements to functions
self.ui.pushButton_Browse.clicked.connect(self.browse)
self.ui.pushButton_Plot.clicked.connect(self.plot)
# self.ui.pushButton_Run.clicked.connect(self.freqGradient)
self.ui.comboBox_property.currentIndexChanged.connect(self.combobox)
self.ui.label_view_img.mousePressEvent = self.getPixel
# create a function to get the pixel value of the image
def getPixel(self, event):
try:
# get image from combobox function and convert it to numpy array to get pixel value
self.orgImg = qimage2ndarray.rgb_view(self.image_orignal)
self.imgT1 = qimage2ndarray.rgb_view(self.t1(self.image))
self.imgT2 = qimage2ndarray.rgb_view(self.t2(self.image))
self.imgSD = qimage2ndarray.rgb_view(self.SD(self.image))
currentWidth = self.ui.label_view_img.width()
currentHeight = self.ui.label_view_img.height()
x = int(((event.pos().x())*64) / currentWidth)
y = int(((event.pos().y())*64) / currentHeight)
self.ui.lineEdit_t1.setText(str(self.imgT1[x, y][1]))
self.ui.lineEdit_t2.setText(str(self.imgT2[x, y][1]))
self.ui.lineEdit_sd.setText(str(self.imgSD[x, y][1]))
except Exception as e:
print(e)
def browse(self):
try:
loadImg = QFileDialog.getOpenFileName(self, 'Open file')
self.image = cv2.imread(loadImg[0], 0)
self.image = cv2.resize(self.image, (64, 64))
# print(self.image.shape)
self.rgbImage = cv2.cvtColor(self.image, cv2.COLOR_GRAY2RGB)
self.M = self.rgbImage
self.k_space = self.rgbImage
self.image_orignal = qimage2ndarray.array2qimage(self.image)
self.ui.label_view_img.setPixmap(QPixmap(self.image_orignal))
# print(self.rgbImage)
print(self.rgbImage[0][0])
plt.imshow(self.image, cmap='gray')
self.RF()
except Exception as e:
print(e)
def Rz(self, theta):
return np.matrix([[m.cos(theta), -m.sin(theta), 0],
[m.sin(theta), m.cos(theta), 0],
[0, 0, 1]])
def RF(self):
self.rgbImage = np.dot(self.Rx(m.radians(90)), self.rgbImage)
self.phaseGradient()
def phaseGradient(self):
for i, phaseAngle in enumerate(self.decimal_range(0, 360, 5.625)):
Rz_phase = self.Rz(m.radians(phaseAngle))
for j in range(63):
self.M[:, j] = np.dot(Rz_phase, self.rgbImage[:, j])
self.freqGradient()
def freqGradient(self):
self.figure_kspace.clear()
axs = self.figure_kspace.add_subplot(111)
axs.set_title("K-Space")
axs.set_xlabel("Phase")
axs.set_ylabel("Frequency")
axs.imshow(self.k_space, cmap='gray')
for i, freqAngle in enumerate(self.decimal_range(0, 360, 5.625)):
Rz_freq = self.Rz(m.radians(freqAngle))
PixelSummation = np.zeros(3)
for j in range(63):
self.M[:, j] = np.dot(Rz_freq, self.M[:, j])
PixelSummation += self.M[:, j]
self.k_space[i, :] = PixelSummation
self.canvas_kspace.draw()
self.canvas_kspace.flush_events()
def Rx(self,theta):
return np.matrix([[ 1 , 0 , 0],
[ 0, m.cos(theta),-m.sin(theta)],
[ 0, m.sin(theta), m.cos(theta)]])
def decimal_range(start, stop, increment):
while start < stop:
yield start
start += increment
def plot(self):
try:
self.figure_sequence.clear()
axs = self.figure_sequence.subplots(5, sharex=True)
self.figure_sequence.suptitle('Sequence')
x = np.linspace(-10, 10, 500)
y1 = np.sinc(x)
y2 = np.cos(x)
y3 = np.tan(x)
axs[0].plot(x, y1 ** 2)
axs[0].set_ylabel('RF')
axs[0].set_frame_on(False)
axs[0].xaxis.set_visible(False)
axs[1].plot(x, 0.3 * y2)
axs[1].set_ylabel('GX')
axs[1].set_frame_on(False)
axs[1].xaxis.set_visible(False)
axs[2].plot(x, 0.3 * y2)
axs[2].set_ylabel('GY')
axs[2].xaxis.set_visible(False)
axs[2].set_frame_on(False)
axs[3].plot(x, 0.3 * y2)
axs[3].set_ylabel('GZ')
axs[3].xaxis.set_visible(False)
axs[3].set_frame_on(False)
axs[4].plot(x, y3)
axs[4].set_ylabel('Read Out')
axs[4].set_frame_on(False)
self.canvas_sequence.draw()
except Exception as e:
print(e)
# combobox function for selecting image property
def combobox(self, index):
try:
if index == 0:
self.ui.label_view_img.setPixmap(QPixmap(self.image_orignal))
elif index == 1:
self.ui.label_view_img.setPixmap(QPixmap(self.t1(self.image)))
elif index == 2:
self.ui.label_view_img.setPixmap(QPixmap(self.t2(self.image)))
elif index == 3:
self.ui.label_view_img.setPixmap(QPixmap(self.SD(self.image)))
else:
pass
except Exception as e:
print(e)
# map range function from brain tissue properties to image pixel values
def map_range(self, input_value):
try:
# 2500 is the max value of the property and 255 is the max pixel value
output_value = input_value * (255 / 300)
return output_value
except Exception as e:
print(e)
def t1(self, in_image):
try:
in_image = cv2.resize(in_image, (256, 256))
# Define the conditionals and corresponding values
conditions = [
in_image == 255, # white matter
in_image == 150, # gray matter
in_image == 90, # fat
in_image == 0 # water
]
values = [self.map_range(500), self.map_range(
800), self.map_range(250), self.map_range(3000)]
# Apply the conditionals and assign values in a single step
shepp_t1 = np.select(conditions, values,
default=255).astype(np.uint8)
# Convert image to qimage
shepp_t1 = qimage2ndarray.array2qimage(shepp_t1)
return shepp_t1
except Exception as e:
print(e)
def t2(self, in_image):
try:
# Define the conditionals and corresponding values
conditions = [
in_image == 255, # white matter
in_image == 150, # gray matter
in_image == 90, # fat
in_image == 0 # water
]
values = [self.map_range(80), self.map_range(
100), self.map_range(55), self.map_range(2000)]
# Apply the conditionals and assign values in a single step
shepp_t2 = np.select(conditions, values,
default=200).astype(np.uint8)
# Convert image to qimage
shepp_t2 = qimage2ndarray.array2qimage(shepp_t2)
return shepp_t2
except Exception as e:
print(e)
def SD(self, in_image):
try:
# Define the conditionals and corresponding values
conditions = [
in_image == 255, # white matter
in_image == 150, # gray matter
in_image == 90, # fat
in_image == 0 # water
]
values = [self.map_range(0.1), self.map_range(
0.2), self.map_range(0.5), self.map_range(0.7)]
# Apply the conditionals and assign values in a single step
shepp_SD = np.select(conditions, values,
default=0.7).astype(np.uint8)
# Convert image to qimage
shepp_SD = qimage2ndarray.array2qimage(shepp_SD)
return shepp_SD
except Exception as e:
print(e)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
application = ApplicationWindow()
application.show()
sys.exit(app.exec_())
| HamzaJamal782/MRI-Image-reconstruction- | test.secret.py | test.secret.py | py | 10,448 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "gui.Ui_MainWindow",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "ma... |
35575503668 | """Miscellaneous functions used for plotting gradient data"""
import gzip
import math
import os.path as op
import pickle
import nibabel as nib
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from neuromaps.datasets import fetch_fslr
from nilearn import image, masking, plotting
from nilearn.plotting import plot_stat_map
from surfplot import Plot
from surfplot.utils import threshold
def plot_gradient(
data_dir,
grad_seg_fnames,
grad_seg_labels=None,
cmap="viridis",
threshold_=None,
color_range=None,
views=None,
title=False,
layout="grid",
cbar=False,
out_dir=None,
prefix="",
):
prefix_sep = "" if prefix == "" else "_"
if not prefix.endswith(prefix_sep):
prefix = prefix + prefix_sep
neuromaps_dir = op.join(data_dir, "neuromaps-data")
surfaces = fetch_fslr(density="32k", data_dir=neuromaps_dir)
lh, rh = surfaces["inflated"]
sulc_lh, sulc_rh = surfaces["sulc"]
for img_i, (grad_segment_lh, grad_segment_rh) in enumerate(grad_seg_fnames):
if grad_seg_labels is None:
base_name = op.basename(grad_segment_lh)
firts_name = base_name.split("_")[0].split("-")[1]
last_name = base_name.split("_")[1].split("-")[1]
id_name = base_name.split("_")[2].split("-")[1]
title_ = f"{firts_name}-{last_name}"
else:
title_ = grad_seg_labels[img_i]
if out_dir is not None:
out_file = op.join(out_dir, f"{prefix}{title_}.tiff")
if not op.isfile(out_file):
lh_grad = nib.load(grad_segment_lh).agg_data()
rh_grad = nib.load(grad_segment_rh).agg_data()
if threshold_ is not None:
lh_grad = threshold(lh_grad, threshold_)
rh_grad = threshold(rh_grad, threshold_)
if views:
p = Plot(surf_lh=lh, views=views, layout=layout)
p.add_layer({"left": sulc_lh}, cmap="binary_r", cbar=False)
p.add_layer({"left": lh_grad}, cmap=cmap, cbar=cbar, color_range=color_range)
else:
p = Plot(surf_lh=lh, surf_rh=rh, layout=layout)
p.add_layer({"left": sulc_lh, "right": sulc_rh}, cmap="binary_r", cbar=False)
p.add_layer(
{"left": lh_grad, "right": rh_grad},
cmap=cmap,
cbar=cbar,
color_range=color_range,
)
fig = p.build()
if title:
fig.axes[0].set_title(title_, pad=-3)
if out_dir is not None:
plt.savefig(out_file, bbox_inches="tight", dpi=500)
plt.close()
plt.clf()
def plot_subcortical_gradient(subcort_grad_fnames, cmap="viridis", threshold_=None):
for subcort_grad_fname in subcort_grad_fnames:
base_name = op.basename(subcort_grad_fname)
firts_name = base_name.split("_")[0].split("-")[1]
last_name = base_name.split("_")[1].split("-")[1]
title_ = f"{firts_name}: {last_name}"
plot_stat_map(
subcort_grad_fname,
draw_cross=False,
cmap=cmap,
threshold=threshold_,
title=title_,
)
plt.show()
def plot_meta_maps(
decoder_fn, map_idxs, threshold=2, model="decoder", colorbar=True, out_dir=None
):
decoder_file = gzip.open(decoder_fn, "rb")
decoder = pickle.load(decoder_file)
if model == "decoder":
meta_maps = decoder.images_
features = [f.split("__")[-1] for f in decoder.features_]
meta_maps_imgs = decoder.masker.inverse_transform(meta_maps[map_idxs, :])
elif model == "gclda":
topic_word_weights = decoder.p_word_g_topic_
n_topics = topic_word_weights.shape[1]
vocabulary = np.array(decoder.vocabulary)
sorted_weights_idxs = np.argsort(-topic_word_weights, axis=0)
top_tokens = [
"_".join(vocabulary[sorted_weights_idxs[:, topic_i]][:3])
for topic_i in range(n_topics)
]
features = [f"{i + 1}_{top_tokens[i]}" for i in range(n_topics)]
meta_maps_imgs = masking.unmask(decoder.p_voxel_g_topic_.T[map_idxs, :], decoder.mask)
features_to_plot = np.array(features)[map_idxs]
n_maps = len(map_idxs)
for i_feature in range(n_maps):
feature_img_3d = image.index_img(meta_maps_imgs, i_feature)
plotting.plot_stat_map(
feature_img_3d,
draw_cross=False,
colorbar=colorbar,
annotate=False,
threshold=threshold,
title=features_to_plot[i_feature],
)
if out_dir is not None:
out_file = op.join(out_dir, f"{features_to_plot[i_feature]}.tiff")
plt.savefig(out_file, bbox_inches="tight", dpi=1000)
plt.show()
def plot_top_words(model, feature_names, n_top_words, title):
n_topics = len(model.components_)
n_cols = 5
n_rows = math.ceil(n_topics / n_cols)
w = 30
h = (w / 2) * n_rows
fig, axes = plt.subplots(n_rows, n_cols, figsize=(w, h), sharex=True)
axes = axes.flatten()
for topic_idx, topic in enumerate(model.components_):
top_features_ind = topic.argsort()[: -n_top_words - 1 : -1]
top_features = [feature_names[i] for i in top_features_ind]
weights = topic[top_features_ind]
ax = axes[topic_idx]
ax.barh(top_features, weights, height=0.7)
ax.set_title(f"Topic {topic_idx +1}", fontdict={"fontsize": 30})
ax.invert_yaxis()
ax.tick_params(axis="both", which="major", labelsize=20)
for i in "top right left".split():
ax.spines[i].set_visible(False)
fig.suptitle(title, fontsize=40)
plt.subplots_adjust(top=0.977, bottom=0.05, wspace=0.90, hspace=0.1)
plt.show()
def plot_profile(data_df, metric, hue_order, cmap="tab20"):
# sns.set(style="whitegrid")
my_cmap = plt.get_cmap(cmap)
n_segments = 30
fig, axes = plt.subplots(n_segments, 2)
fig.set_size_inches(15, 90)
for seg_sol in range(n_segments):
test_df = data_df[data_df["segment_solution"] == seg_sol + 3]
sub_data_df = test_df.drop(columns=["segment_solution", "pvalue"])
sub_data_df = sub_data_df.pivot_table(
values=metric, index=sub_data_df["segment"], columns="method"
)
sub_data_df = sub_data_df.reindex(hue_order, axis=1)
sub_data_df.plot.bar(
rot=0,
width=1,
stacked=True,
color=my_cmap.colors[: len(hue_order)],
ax=axes[seg_sol, 0],
)
axes[seg_sol, 0].get_legend().remove()
if seg_sol == 0:
handles, labels = axes[0, 0].get_legend_handles_labels()
test_df = test_df.reset_index()
test_df["segment"] = test_df["segment"].astype(str)
"""
sns.barplot(
data=test_df,
x="segment",
y="max_corr",
palette=cmap,
hue="method",
hue_order=hue_order,
dodge=True,
ax=axes[seg_sol , 1],
)
axes[seg_sol , 1].get_legend().remove()
"""
# x = test_df["segment"]
# y = test_df["max_corr"]
# axes[seg_sol , 2].plot(x, y, 'o-')
# axes[seg_sol , 2].get_legend().remove()
sns.lineplot(
data=test_df,
x="segment",
y=metric,
palette=cmap,
hue="method",
hue_order=hue_order,
marker="o",
ax=axes[seg_sol, 1],
)
axes[seg_sol, 1].get_legend().remove()
text_lst = []
mean_lst = []
for approach in hue_order:
approach_df = test_df[test_df["method"] == approach]
# print(approach_df)
mean_corr = approach_df[metric]
text_lst.append(f"{mean_corr.mean():.3f} ± {mean_corr.std():.3f}")
mean_lst.append(mean_corr.mean())
ax_handles, ax_labels = axes[seg_sol, 1].get_legend_handles_labels()
sort_idx = np.argsort(-np.array(mean_lst))
"""
axes[seg_sol, 1].legend(
np.array(ax_handles)[sort_idx],
np.array(text_lst)[sort_idx],
loc="upper left",
bbox_to_anchor=(1.04, 1.15),
ncol=1,
)
"""
fig.legend(
handles,
labels,
loc="lower center",
ncol=6,
bbox_to_anchor=(0.5, -0.01),
)
fig.tight_layout()
# plt.savefig(op.join(result_dir, "gradient_segmentation", "Figures", "correlation_profile.png"), dpi=300, bbox_inches="tight")
plt.show()
def plot_mean_profile(data_df, metric, hue_order, cmap="tab20"):
# sns.set(style="whitegrid")
fig, ax = plt.subplots(1, 1)
fig.set_size_inches(3, 15)
sns.lineplot(
data=data_df,
x=metric,
y="segment_solution",
palette=cmap,
hue="method",
hue_order=hue_order,
sort=False,
marker="o",
ax=ax,
)
ax.get_legend().remove()
# plt.savefig(op.join("./Fig", "mean_correlation_profile.eps"), bbox_inches="tight")
plt.show()
def _get_twfrequencies(dset_nm, model_nm, n_top_terms, dec_data_dir):
model_fn = op.join(dec_data_dir, f"{model_nm}_{dset_nm}_model.pkl.gz")
model_file = gzip.open(model_fn, "rb")
model_obj = pickle.load(model_file)
topic_word_weights = (
model_obj.p_word_g_topic_.T
if model_nm == "gclda"
else model_obj.distributions_["p_topic_g_word"]
)
n_topics = topic_word_weights.shape[0]
sorted_weights_idxs = np.argsort(-topic_word_weights, axis=1)
frequencies_lst = []
for topic_i in range(n_topics):
frequencies = topic_word_weights[topic_i, sorted_weights_idxs[topic_i, :]][
:n_top_terms
].tolist()
frequencies = [freq / np.max(frequencies) for freq in frequencies]
frequencies = np.round(frequencies, 3).tolist()
frequencies_lst.append(frequencies)
return frequencies_lst | NBCLab/gradient-decoding | figures/utils.py | utils.py | py | 10,146 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "neuromaps.datasets.fetch_fslr",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.basename",... |
978437268 | import torch
from torch import nn
import numpy as np
import torch.nn.functional as F
def inverse_sigmoid(epoch, k=20):
return k / (k + np.exp(epoch/k))
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, latent_dim, num_layers=1, bidirectional=True):
super(Encoder, self).__init__()
if bidirectional == True:
num_directions = 2
else:
num_directions = 1
self.hidden_size = hidden_size
self.num_hidden = num_directions * num_layers
self.lstm = nn.LSTM(batch_first=True,
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional)
self.mu = nn.Linear(self.num_hidden * self.hidden_size, latent_dim)
self.std = nn.Linear(self.num_hidden * self.hidden_size, latent_dim)
self.norm = nn.LayerNorm(latent_dim, elementwise_affine=False)
def forward(self, x):
x, (h, c) = self.lstm(x)
h = h.transpose(0, 1).reshape(-1, self.num_hidden * self.hidden_size)
mu = self.norm(self.mu(h))
std = nn.Softplus()(self.std(h))
# reparam
z = self.reparameterize(mu, std)
return z, mu, std
def reparameterize(self, mu, std):
eps = torch.randn_like(std)
return mu + (eps * std)
class Conductor(nn.Module):
def __init__(self, input_size, hidden_size, device, num_layers=2, bar=4):
super(Conductor, self).__init__()
num_directions = 1
self.bar = bar
self.device = device
self.input_size = input_size
self.hidden_size = hidden_size
self.num_hidden = num_directions * num_layers
self.norm = nn.BatchNorm1d(input_size)
self.linear = nn.Linear(hidden_size, hidden_size)
self.conductor = nn.LSTM(batch_first=True,
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=False)
def init_hidden(self, batch_size, z):
h0 = z.repeat(self.num_hidden, 1, 1)
c0 = z.repeat(self.num_hidden, 1, 1)
return h0, c0
def forward(self, z):
batch_size = z.shape[0]
h, c = self.init_hidden(batch_size, z)
z = z.unsqueeze(1)
# initialize
feat = torch.zeros(batch_size, self.bar, self.hidden_size, device=self.device)
# conductor
z_input = z
for i in range(self.bar):
z_input, (h, c) = self.conductor(z_input, (h, c))
feat[:, i, :] = z_input.squeeze()
z_input = z
feat = self.linear(feat)
return feat
class Decoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers=2, bidirectional=False):
super(Decoder, self).__init__()
if bidirectional == True:
num_directions = 2
else:
num_directions = 1
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.num_hidden = num_directions * num_layers
self.logits= nn.Linear(hidden_size, output_size)
self.decoder = nn.LSTM(batch_first=True,
input_size=input_size+output_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional)
def forward(self, x, h, c, z, temp=1):
x = torch.cat((x, z.unsqueeze(1)), 2)
x, (h, c) = self.decoder(x, (h, c))
logits = self.logits(x) / temp
prob = nn.Softmax(dim=2)(logits)
out = torch.argmax(prob, 2)
return out, prob, h, c
class MusicVAE(nn.Module):
def __init__(self, args):
super().__init__()
self.encoder = Encoder(args.enc_input_size, args.enc_hidden_size, args.enc_latent_dim)
self.conductor = Conductor(args.enc_latent_dim, args.con_hidden_size, args.device)
self.decoder = Decoder(args.con_hidden_size, args.dec_hidden_size, args.dec_output_size)
self.bar_units = args.bar_units
self.num_hidden = self.decoder.num_hidden
self.hidden_size = self.decoder.hidden_size
self.output_size = self.decoder.output_size
self.device = args.device
def forward(self, x):
seq_len = x.shape[0]
latent_z, mu, std = self.encoder(x)
feat = self.conductor(latent_z)
x_train_inputs = torch.zeros((x.shape[0], 1, x.shape[2]), device=self.device)
x_train_label = torch.zeros(x.shape[:-1], device=self.device)
x_train_prob = torch.zeros(x.shape, device=self.device)
for j in range(seq_len):
bar_idx = j // self.bar_units
bar_change_idx = j % self.bar_units
z = feat[:, bar_idx, :]
if bar_change_idx == 0:
h = z.repeat(self.num_hidden, 1, int(self.hidden_size/z.shape[1]))
c = z.repeat(self.num_hidden, 1, int(self.hidden_size/z.shape[1]))
label, prob, h, c = self.decoder(x_train_inputs.to(self.device), h, c, z)
x_train_label[:, j] = label.squeeze()
x_train_prob[:, j, :] = prob.squeeze()
x_train_inputs = x[:, j, :].unsqueeze(1)
return x_train_prob, mu, std, x_train_label
def generate(self, bar_units=16, seq_len=64, n=1):
z = torch.empty((n, 512)).normal_(mean=0,std=1)
feat = self.conductor(z.cuda())
batch_size = n
hidden_size = self.decoder.hidden_size
output_size = self.decoder.output_size
num_hidden = self.decoder.num_hidden
inputs = torch.zeros((batch_size, 1, output_size), device=self.device)
outputs = torch.zeros((batch_size, seq_len, output_size), device=self.device)
for j in range(seq_len):
bar_idx = j // bar_units
bar_change_idx = j % bar_units
z = feat[:, bar_idx, :]
if bar_change_idx == 0:
h = z.repeat(num_hidden, 1, int(hidden_size/z.shape[1]))
c = z.repeat(num_hidden, 1, int(hidden_size/z.shape[1]))
label, prob, h, c = self.decoder(inputs, h, c, z, temp=n)
outputs[:, j, :] = prob.squeeze()
inputs = F.one_hot(label, num_classes=output_size)
return outputs.cpu().detach().numpy() | Jinhoss/MusicVAE | model.py | model.py | py | 6,869 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.exp",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.LSTM",
"line_number... |
5832039173 | import requests
from bs4 import BeautifulSoup
from datetime import datetime
from pymongo import MongoClient
client = MongoClient(port=27017)
db = client.historical_data4
def get_coins(url):
"""this function get all coins list"""
response = requests.get(url)
coin_list = [i['id'] for i in response.json()]
return coin_list
def get_csv(coin_id, end_date, start_date=20130428):
"""this function genrates will write into mongo
db collections"""
collection = db[coin_id]
keys = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Market Cap']
url = 'https://coinmarketcap.com/currencies/\
{0}/historical-data/?start={1}&end={2}'.format(coin_id, start_date, end_date)
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html, 'html.parser')
table = soup.find('table', attrs={'class': 'table'})
list_of_rows = []
for row in table.findAll('tr')[1:]:
list_of_cells = []
for cell in row.findAll('td'):
list_of_cells.append(cell.text)
list_of_rows.append(dict(zip(keys, list_of_cells)))
collection.insert(list_of_rows)
current_date = datetime.today().strftime('%Y%m%d')
coins = get_coins('https://api.coinmarketcap.com/v1/ticker/?limit=1')
print ("there are %d coins..", len(coins))
for coin in coins:
get_csv(coin, current_date)
print("Success all the coins data has been saved....")
| ShivaGuntuku/cryptos | coin_historical_data_with_mongodb.py | coin_historical_data_with_mongodb.py | py | 1,407 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
41979414512 | import questionary
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from BrokerManager import server as broker_manager
from BrokerManagerReadOnly import server as broker_manager_readonly
import sys
def create_app():
app = Flask(__name__)
answer = questionary.select(
"Which Server do you want to start?",
choices=["Broker Manager Primary", "Broker Manager Secondary"]
).ask()
if answer=="Broker Manager Primary":
app.register_blueprint(broker_manager)
if answer=="Broker Manager Secondary":
app.register_blueprint(broker_manager_readonly)
return app
port = 5000
if len(sys.argv)>1:
port = int(sys.argv[1])
if __name__ == '__main__':
create_app().run(host='0.0.0.0', port=port, debug=True, use_reloader=False)
| DistributedSystemsGroup-IITKGP/Assignment-3 | main.py | main.py | py | 754 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "questionary.select",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "BrokerManager.server",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "BrokerManagerR... |
12894335645 | import pygame
import random
class boss(pygame.sprite.Sprite):
def __init__(self, game):
super().__init__()
self.game = game
self.health = 350
self.max_health = 350
self.attack = 6
self.image = pygame.image.load('assets/BOSS T.png')
self.image = pygame.transform.scale(self.image, (500, 500))
self.rect = self.image.get_rect()
self.rect.x = 800
self.rect.y = 200
self.velocity = 0.01
def update_health_bar(self, surface):
#dessiner la bar de vie
pygame.draw.rect(surface, (255, 0, 0), [self.rect.x + 15, self.rect.y - 20, self.max_health, 5])
pygame.draw.rect(surface, (111, 210, 46), [self.rect.x + 15, self.rect.y - 20, self.health, 5])
def forward (self):
if not self.game.check_collision(self, self.game.all_players):
self.rect.x -= self.velocity
#si le monstre est en collision avec J
else :
self.game.player.damage(self.attack)
def damage(self, amount):
#infliger les degats
self.health -= amount
#verifier si son nouveau nombre de pv <= 0
if self.health <= 0 :
#reaparaitre comme un nouveau monstre
self.rect.x = 1000 + random.randint(0, 300)
self.health = self.max_health
self.velocity = random.randint(0, 2)
# ajout du score
self.game.score += 50
| MathieuTherias/Projet-Transverse | BOSS.py | BOSS.py | py | 1,440 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.sprite",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pygame.transfor... |
39274786289 | import re
# from urllib.parse import urlparse
# from urllib.parse import urljoin
# from urllib.parse import urldefrag
import urllib
import time
from datetime import datetime
from urllib.robotparser import RobotFileParser
import queue
import random
import socket
import csv
import lxml.html
DEFAULT_AGENT = 'wswp'
DEFAULT_DELAY = 5
DEFAULT_RETRIES = 1
DEFAULT_TIMEOUT = 60
# 链接爬虫
def link_crawler(seed_url, link_regex=None, delay=5, max_depth=-1, max_urls=-1, user_agent='wswp', proxies=None, num_retries=1, scrape_callback=None, cache=None):
"""Crawl from the given seed URL following links matched by link_regex
"""
# the queue of URL's that still need to be crawled
crawl_queue = [seed_url]
# the URL's that have been seen and at what depth
seen = {seed_url: 0}
# track how many URL's have been downloaded
num_urls = 0
rp = get_robots(seed_url)
D = Downloader(delay=delay, user_agent=user_agent, proxies=proxies, num_retries=num_retries, cache=cache)
while crawl_queue:
url = crawl_queue.pop()
depth = seen[url]
# check url passes robots.txt restrictions
if rp.can_fetch(user_agent, url):
html = D(url)
links = []
if scrape_callback:
links.extend(scrape_callback(url, html) or [])
# 避免爬虫陷进
if depth != max_depth:
# can still crawl further
if link_regex:
# filter for links matching our regular expression
links.extend(link for link in get_links(html) if re.match(link_regex, link))
for link in links:
link = normalize(seed_url, link)
# check whether already crawled this link
if link not in seen:
seen[link] = depth + 1
# check link is within same domain
if same_domain(seed_url, link):
# success! add this new link to queue
crawl_queue.append(link)
# check whether have reached downloaded maximum
num_urls += 1
if num_urls == max_urls:
break
else:
print('Blocked by robots.txt:'+ url)
# 下载限速
class Throttle:
"""Throttle downloading by sleeping between requests to same domain
"""
def __init__(self, delay):
# amount of delay between downloads for each domain
self.delay = delay
# timestamp of when a domain was last accessed
self.domains = {}
def wait(self, url):
domain = urllib.parse.urlparse(url).netloc
last_accessed = self.domains.get(domain)
if self.delay > 0 and last_accessed is not None:
sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
if sleep_secs > 0:
time.sleep(sleep_secs)
self.domains[domain] = datetime.now()
# 下载网页
class Downloader:
def __init__(self, delay=DEFAULT_DELAY, user_agent=DEFAULT_AGENT, proxies=None, num_retries=DEFAULT_RETRIES, timeout=DEFAULT_TIMEOUT, opener=None, cache=None):
socket.setdefaulttimeout(timeout)
self.throttle = Throttle(delay)
self.user_agent = user_agent
self.proxies = proxies
self.num_retries = num_retries
self.opener = opener
self.cache = cache
def __call__(self, url):
result = None
# 如果有缓存,不限速
if self.cache:
try:
result=self.cache[url]
except KeyError:
pass
else:
if self.num_retries > 0 and 500 <= result['code'] < 600:
result = None
if result is None:
# result was not loaded from cache so still need to download
self.throttle.wait(url)
proxy = random.choice(self.proxies) if self.proxies else None
headers = {'User-agent': self.user_agent}
result = self.download(url, headers, proxy=proxy, num_retries=self.num_retries)
if self.cache:
# save result to cache
self.cache[url] = result
return result['html']
def download(self, url, headers, proxy, num_retries, data=None):
print('Downloading:', url)
request = urllib.request.Request(url, data, headers)
opener = urllib.request.build_opener()
# 支持代理
if proxy:
proxy_params = {urllib.parse.urlparse(url).scheme: proxy}
opener.add_handler(urllib.request.ProxyHandler(proxy_params))
try:
response = opener.open(request)
html = response.read()
code = response.code
except Exception as e:
print('Download error:', e.reason)
html = ''
if hasattr(e, 'code'):
code = e.code
if num_retries > 0 and 500 <= code < 600:
# retry 5XX HTTP errors
return self._get(url, headers, proxy, num_retries - 1, data)
else:
code = None
return {'html': html, 'code': code}
def normalize(seed_url, link):
"""Normalize this URL by removing hash and adding domain
"""
link, _ = urllib.parse.urldefrag(link) # remove hash to avoid duplicates
return urllib.parse.urljoin(seed_url, link)
def same_domain(url1, url2):
"""Return True if both URL's belong to same domain
"""
return urllib.parse.urlparse(url1).netloc == urllib.parse.urlparse(url2).netloc
# 解析robotx.txt
def get_robots(url):
"""Initialize robots parser for this domain
"""
rp = RobotFileParser()
rp.set_url(urllib.parse.urljoin(url, '/robots.txt'))
rp.read()
return rp
def get_links(html):
"""Return a list of links from html
"""
# a regular expression to extract all links from the webpage
webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
# list of all links from the webpage
html = html.decode('utf-8')
return webpage_regex.findall(html)
# 抓取回调的回调类,把得到的数据存到csv表格里
class ScrapeCallback:
def __init__(self):
self.writer = csv.writer(open('countries.csv', 'w'))
self.fields = ('area', 'population', 'iso', 'country', 'capital', 'continent', 'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages', 'neighbours')
self.writer.writerow(self.fields)
def __call__(self, url, html):
if re.search('/view/', url):
tree = lxml.html.fromstring(html)
row = []
for field in self.fields:
row.append(tree.cssselect('table > tr#places_{}__row > td.w2p_fw'.format(field))[0].text_content())
self.writer.writerow(row)
if __name__ == '__main__':
link_crawler('http://example.webscraping.com/', '/(index|view)', delay=0, num_retries=1,max_depth=1,scrape_callback=ScrapeCallback())
link_crawler('https://www.lagou.com/jobs/list_%E5%89%8D%E7%AB%AF%E5%BC%80%E5%8F%91?px=new&city=%E6%9D%AD%E5%B7%9E#order', delay=0, num_retries=1,max_depth=1,scrape_callback=ScrapeCallback())
| Code-In-Action/python-in-action | webscrap/c1.py | c1.py | py | 7,304 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.match",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.no... |
36458063236 | import abc
import json
from typing import Any, Dict, List, NoReturn, Optional, Tuple, Union, final
from erniebot_agent.agents.base import BaseAgent
from erniebot_agent.agents.callback.callback_manager import CallbackManager
from erniebot_agent.agents.callback.default import get_default_callbacks
from erniebot_agent.agents.callback.handlers.base import CallbackHandler
from erniebot_agent.agents.mixins import GradioMixin
from erniebot_agent.agents.schema import AgentResponse, LLMResponse, ToolResponse
from erniebot_agent.chat_models.erniebot import BaseERNIEBot
from erniebot_agent.file import GlobalFileManagerHandler
from erniebot_agent.file.base import File
from erniebot_agent.file.file_manager import FileManager
from erniebot_agent.memory import Memory, WholeMemory
from erniebot_agent.memory.messages import Message, SystemMessage
from erniebot_agent.tools.base import BaseTool
from erniebot_agent.tools.tool_manager import ToolManager
from erniebot_agent.utils.exceptions import FileError
_PLUGINS_WO_FILE_IO: Tuple[str] = ("eChart",)
class Agent(GradioMixin, BaseAgent[BaseERNIEBot]):
"""The base class for agents.
Typically, this is the class that a custom agent class should inherit from.
A class inheriting from this class must implement how the agent orchestates
the components to complete tasks.
Attributes:
llm: The LLM that the agent uses.
memory: The message storage that keeps the chat history.
"""
llm: BaseERNIEBot
memory: Memory
def __init__(
self,
llm: BaseERNIEBot,
tools: Union[ToolManager, List[BaseTool]],
*,
memory: Optional[Memory] = None,
system_message: Optional[SystemMessage] = None,
callbacks: Optional[Union[CallbackManager, List[CallbackHandler]]] = None,
file_manager: Optional[FileManager] = None,
plugins: Optional[List[str]] = None,
) -> None:
"""Initialize an agent.
Args:
llm: An LLM for the agent to use.
tools: A list of tools for the agent to use.
memory: A memory object that equips the agent to remember chat
history. If not specified, a new WholeMemory object will be instantiated.
system_message: A message that tells the LLM how to interpret the
conversations. If `None`, the system message contained in
`memory` will be used.
callbacks: A list of callback handlers for the agent to use. If
`None`, a default list of callbacks will be used.
file_manager: A file manager for the agent to interact with files.
If `None`, a global file manager that can be shared among
different components will be implicitly created and used.
plugins: A list of names of the plugins for the agent to use. If
`None`, the agent will use a default list of plugins. Set
`plugins` to `[]` to disable the use of plugins.
"""
super().__init__()
self.llm = llm
if isinstance(tools, ToolManager):
self._tool_manager = tools
else:
self._tool_manager = ToolManager(tools)
if memory is None:
self.memory = WholeMemory()
else:
self.memory = memory
if system_message:
self.system_message = system_message
else:
self.system_message = self.memory.get_system_message()
if callbacks is None:
callbacks = get_default_callbacks()
if isinstance(callbacks, CallbackManager):
self._callback_manager = callbacks
else:
self._callback_manager = CallbackManager(callbacks)
self._file_manager = file_manager
self._plugins = plugins
self._init_file_needs_url()
@final
async def run(self, prompt: str, files: Optional[List[File]] = None) -> AgentResponse:
"""Run the agent asynchronously.
Args:
prompt: A natural language text describing the task that the agent
should perform.
files: A list of files that the agent can use to perform the task.
Returns:
Response from the agent.
"""
if files:
await self._ensure_managed_files(files)
await self._callback_manager.on_run_start(agent=self, prompt=prompt)
agent_resp = await self._run(prompt, files)
await self._callback_manager.on_run_end(agent=self, response=agent_resp)
return agent_resp
@final
async def run_tool(self, tool_name: str, tool_args: str) -> ToolResponse:
"""Run the specified tool asynchronously.
Args:
tool_name: The name of the tool to run.
tool_args: The tool arguments in JSON format.
Returns:
Response from the tool.
"""
tool = self._tool_manager.get_tool(tool_name)
await self._callback_manager.on_tool_start(agent=self, tool=tool, input_args=tool_args)
try:
tool_resp = await self._run_tool(tool, tool_args)
except (Exception, KeyboardInterrupt) as e:
await self._callback_manager.on_tool_error(agent=self, tool=tool, error=e)
raise
await self._callback_manager.on_tool_end(agent=self, tool=tool, response=tool_resp)
return tool_resp
@final
async def run_llm(self, messages: List[Message], **opts: Any) -> LLMResponse:
"""Run the LLM asynchronously.
Args:
messages: The input messages.
**opts: Options to pass to the LLM.
Returns:
Response from the LLM.
"""
await self._callback_manager.on_llm_start(agent=self, llm=self.llm, messages=messages)
try:
llm_resp = await self._run_llm(messages, **opts)
except (Exception, KeyboardInterrupt) as e:
await self._callback_manager.on_llm_error(agent=self, llm=self.llm, error=e)
raise
await self._callback_manager.on_llm_end(agent=self, llm=self.llm, response=llm_resp)
return llm_resp
def load_tool(self, tool: BaseTool) -> None:
"""Load a tool into the agent.
Args:
tool: The tool to load.
"""
self._tool_manager.add_tool(tool)
def unload_tool(self, tool: BaseTool) -> None:
"""Unload a tool from the agent.
Args:
tool: The tool to unload.
"""
self._tool_manager.remove_tool(tool)
def get_tools(self) -> List[BaseTool]:
"""Get the tools that the agent can choose from."""
return self._tool_manager.get_tools()
def reset_memory(self) -> None:
"""Clear the chat history."""
self.memory.clear_chat_history()
async def get_file_manager(self) -> FileManager:
if self._file_manager is None:
file_manager = await GlobalFileManagerHandler().get()
else:
file_manager = self._file_manager
return file_manager
@abc.abstractmethod
async def _run(self, prompt: str, files: Optional[List[File]] = None) -> AgentResponse:
"""Run the agent asynchronously without invoking callbacks.
This method is called in `run`.
"""
raise NotImplementedError
async def _run_tool(self, tool: BaseTool, tool_args: str) -> ToolResponse:
"""Run the given tool asynchronously without invoking callbacks.
This method is called in `run_tool`.
"""
parsed_tool_args = self._parse_tool_args(tool_args)
file_manager = await self.get_file_manager()
# XXX: Sniffing is less efficient and probably unnecessary.
# Can we make a protocol to statically recognize file inputs and outputs
# or can we have the tools introspect about this?
input_files = file_manager.sniff_and_extract_files_from_list(list(parsed_tool_args.values()))
tool_ret = await tool(**parsed_tool_args)
if isinstance(tool_ret, dict):
output_files = file_manager.sniff_and_extract_files_from_list(list(tool_ret.values()))
else:
output_files = []
tool_ret_json = json.dumps(tool_ret, ensure_ascii=False)
return ToolResponse(json=tool_ret_json, input_files=input_files, output_files=output_files)
async def _run_llm(self, messages: List[Message], functions=None, **opts: Any) -> LLMResponse:
"""Run the LLM asynchronously without invoking callbacks.
This method is called in `run_llm`.
"""
llm_ret = await self.llm.chat(messages, functions=functions, stream=False, **opts)
return LLMResponse(message=llm_ret)
def _init_file_needs_url(self):
self.file_needs_url = False
if self._plugins:
for plugin in self._plugins:
if plugin not in _PLUGINS_WO_FILE_IO:
self.file_needs_url = True
def _parse_tool_args(self, tool_args: str) -> Dict[str, Any]:
try:
args_dict = json.loads(tool_args)
except json.JSONDecodeError:
raise ValueError(f"`tool_args` cannot be parsed as JSON. `tool_args`: {tool_args}")
if not isinstance(args_dict, dict):
raise ValueError(f"`tool_args` cannot be interpreted as a dict. `tool_args`: {tool_args}")
return args_dict
async def _ensure_managed_files(self, files: List[File]) -> None:
def _raise_exception(file: File) -> NoReturn:
raise FileError(f"{repr(file)} is not managed by the file manager of the agent.")
file_manager = await self.get_file_manager()
for file in files:
try:
managed_file = file_manager.look_up_file_by_id(file.id)
except FileError:
_raise_exception(file)
if file is not managed_file:
_raise_exception(file)
| Southpika/ERNIE-Bot-SDK | erniebot-agent/src/erniebot_agent/agents/agent.py | agent.py | py | 9,943 | python | en | code | null | github-code | 36 | [
{
"api_name": "typing.Tuple",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "erniebot_agent.agents.mixins.GradioMixin",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "erniebot_agent.agents.base.BaseAgent",
"line_number": 24,
"usage_type": "name"
},... |
8231955014 | """Client library for sending events to the data processing system.
This is for use with the event collector system. Events generally track
something that happens in production that we want to instrument for planning
and analytical purposes.
Events are serialized and put onto a message queue on the same server. These
serialized events are then consumed and published to the remote event collector
by a separate daemon.
See also: https://github.com/reddit/event-collector
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import calendar
import json
import time
import uuid
from . import MAX_EVENT_SIZE, MAX_QUEUE_SIZE
from baseplate.message_queue import MessageQueue, TimedOutError
class Event(object):
"""An event."""
def __init__(self, topic, event_type, timestamp=None, id=None):
self.topic = topic
self.event_type = event_type
if timestamp:
if timestamp.tzinfo and timestamp.utcoffset().total_seconds() != 0:
raise ValueError("Timestamps must be in UTC")
self.timestamp = calendar.timegm(timestamp.timetuple()) * 1000
else:
self.timestamp = time.time() * 1000
self.id = id or uuid.uuid4()
self.payload = {}
def get_field(self, key, obfuscated=False):
"""Get the value of a field in the event.
If the field is not present, :py:data:`None` is returned.
:param str key: The name of the field.
:param bool obfuscated: Whether to look for the field in the obfuscated
payload.
"""
if not obfuscated:
payload = self.payload
else:
payload = self.payload.get("obfuscated_data", {})
return payload.get(key, None)
def set_field(self, key, value, obfuscate=False):
"""Set the value for a field in the event.
:param str key: The name of the field.
:param value: The value to set the field to. Should be JSON
serializable.
:param bool obfuscate: Whether or not to put the field in the obfuscated
section. This is used for sensitive info like IP addresses that must
be treated with care.
"""
# There's no need to send null/empty values, the collector will act
# the same whether they're sent or not. Zeros are important though,
# so we can't use a simple boolean truth check here.
if value is None or value == "":
return
if not obfuscate:
self.payload[key] = value
else:
obfuscated_payload = self.payload.setdefault("obfuscated_data", {})
obfuscated_payload[key] = value
def serialize(self):
return json.dumps({
"event_topic": self.topic,
"event_type": self.event_type,
"event_ts": int(self.timestamp),
"uuid": str(self.id),
"payload": self.payload,
})
class EventError(Exception):
"""Base class for event related exceptions."""
pass
class EventTooLargeError(EventError):
"""Raised when a serialized event is too large to send."""
def __init__(self, size):
super(EventTooLargeError, self).__init__(
"Event is too large to send (%d bytes)" % size)
class EventQueueFullError(EventError):
"""Raised when the queue of events is full.
This usually indicates that the event publisher is having trouble talking
to the event collector.
"""
def __init__(self):
super(EventQueueFullError, self).__init__("The event queue is full.")
class EventQueue(object):
"""A queue to transfer events to the publisher."""
def __init__(self, name):
self.queue = MessageQueue(
"/events-" + name,
max_messages=MAX_QUEUE_SIZE,
max_message_size=MAX_EVENT_SIZE,
)
def put(self, event):
"""Add an event to the queue.
The queue is local to the server this code is run on. The event
publisher on the server will take these events and send them to the
collector.
:param baseplate.events.Event event: The event to send.
:raises: :py:exc:`EventTooLargeError` The serialized event is too large.
:raises: :py:exc:`EventQueueFullError` The queue is full. Events are
not being published fast enough.
"""
serialized = event.serialize()
if len(serialized) > MAX_EVENT_SIZE:
raise EventTooLargeError(len(serialized))
try:
self.queue.put(serialized, timeout=0)
except TimedOutError:
raise EventQueueFullError
| Omosofe/baseplate | baseplate/events/queue.py | queue.py | py | 4,735 | python | en | code | null | github-code | 36 | [
{
"api_name": "calendar.timegm",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": ... |
14783345102 |
# Built-In Python
import time
from pathlib import Path
import pickle
import random
import logging
# Third-Party
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from fire import Fire
# Custom
from .population import Population
from .flips import Flips, Flip
class History:
def __init__(self):
self.moneyStamps = []
self.numFlips = []
self.populationDfs = []
self.stats = pd.DataFrame(columns=['numFlips', 'money', 'total', 'max', 'min', 'mean', 'median'])\
.set_index('numFlips', drop=False)
def __repr__(self):
return f"<{self.__class__.__name__} | Entries: {len(self)}>"
def __len__(self):
return len(self.moneyStamps)
def add(self, population: Population, numFlips: int):
self.moneyStamps.append(population.getMoneyStamp())
self.numFlips.append(numFlips)
self.populationDfs.append(population.toDf())
def getStatsOverTime(self, includeTopX=True, logProgress=False):
df = self.stats
new_data = []
if len(self):
start_flip = len(df)
for i in tqdm(range(start_flip, len(self)), desc=f'Converting History to df', disable=not logProgress):
# A DataFrame representing the population after a given number of flips (self.numFlips[i])
row = self.populationDfs[i]
# Converting the df to a single row, so it can be a part of the full history df
row_data = {
'numFlips': self.numFlips[i],
'money': row.money.values, # This will be a numpy array, saved in a single cell of the df
'total': row.money.sum(),
'max': row.money.max(),
'min': row.money.min(),
'mean': row.money.mean(),
'median': row.money.median()
}
if includeTopX:
# Split the People into bins by
money: np.ndarray = row.money.values
money[::-1].sort()
num_per_percent = round(len(money) * 0.1)
_split = np.array_split(money, num_per_percent)
use_indices = includeTopX if isinstance(includeTopX, list) else list(range(0, 100))
for idx, top_x_range in enumerate(_split):
if idx in use_indices:
row_data[f"top_{idx}_to_{idx+1}_percent_wealth"] = top_x_range.sum() / row.money.sum() * 100
new_data.append(row_data)
new_df = pd.DataFrame(new_data)
df = pd.concat([self.stats, new_df])
self.stats = df
return self.stats
def save(self, filepath, includeTopX=True):
df = self.getStatsOverTime(includeTopX=includeTopX)
df.to_pickle(str(filepath))
class CoinFlipper:
def __init__(self, population: Population, dollarsPerFlip: int = 1, allowDebt: bool = False, brokeIsOut=True,
selectionStyle: str = 'random', cacheDir='flipperCache/cli'):
self.population = population
self.dollarsPerFlip = int(dollarsPerFlip)
self.allowDebt = allowDebt
self.brokeIsOut = brokeIsOut
self.selectionStyle = selectionStyle
self.cacheDir = Path(cacheDir)
self.flips: Flips = Flips()
self.history: History = History()
self.history.add(self.population, numFlips=len(self.flips))
def __repr__(self):
return f"<{self.__class__.__name__}>"
def descriptiveFilepath(self, directory: Path = '.'):
directory = Path(directory)
return directory.joinpath(f'people_{len(self.population)}_start_{self.population[0].startMoney}_'
f'bet_{self.dollarsPerFlip}_debt_{self.allowDebt}_'
f'flips_{len(self.flips)}.pickle')
@property
def numFlips(self):
return len(self.flips)
def flip(self, num: int = 1, saveEvery=0, plotEvery=0, plotKind='topXPercentRanges', logProgress=False,
saveHistory=True, closePlt=True):
"""Flip a coin some number of times and settle the bets"""
for i in tqdm(range(num), total=num, unit='flips', desc='Flipping Coins', disable=not logProgress):
self.flipOnce()
if saveHistory:
self.history.add(self.population, numFlips=len(self.flips))
if saveEvery and len(self.flips) > 0 and len(self.flips) % saveEvery == 0:
filepath = self.descriptiveFilepath(self.cacheDir)
self.save(filepath, history=True)
if plotEvery and len(self.flips) % plotEvery == 0:
self.population.plot(t=0.1, keepAx=True, kind=plotKind,
title=f'Population after {i + 1:,} flips (Total: ${self.population.totalMoney:,})')
if closePlt:
plt.close()
def flipOnce(self):
# Pick 2 random people from the group to "flip" against each other
p1, p2 = self.getPeople(2)
winner = random.choice([p1, p2])
loser = p1 if winner == p2 else p2
# Since both are random selections, we just assume the "first" one was the winner
flip = Flip(winner=winner, loser=loser, bet=self.dollarsPerFlip)
# Log the flip
self.flips.append(flip)
if loser.has(self.dollarsPerFlip) or self.allowDebt:
flip.settleBet()
def getPeople(self, n):
pop = self.population.getPeopleWithMoreThan(0) if self.brokeIsOut else self.population
if self.selectionStyle == 'random':
return pop.pickRandoms(n)
elif self.selectionStyle == 'sequential':
if self.brokeIsOut:
raise Exception(f'Can not use sequential selection when brokeIsOut is set to True.')
nex = [pop.next(loop=True) for _ in range(n)]
return nex
else:
raise Exception(f"Unknown selectionStyle: {self.selectionStyle}")
def save(self, filepath, history=True, includeTopX=True):
filepath = Path(filepath)
if filepath.is_dir() or not filepath.suffix:
filepath = self.descriptiveFilepath(filepath)
filepath.parent.mkdir(parents=True, exist_ok=True)
with open(str(filepath), 'wb') as pf:
pickle.dump(self, pf)
if history:
self.history.save(filepath.with_stem(f"{filepath.stem}_history"), includeTopX=includeTopX)
@classmethod
def load(cls, filepath):
filepath = Path(filepath)
if filepath.exists():
flipper: cls
with open(str(filepath), 'rb') as pf:
try:
flipper = pickle.load(pf)
except (EOFError, pickle.UnpicklingError):
filepath.unlink(missing_ok=True)
raise EOFError(f"There was a problem loading {filepath}. It has been deleted.")
if len(flipper.history) > 0 and flipper.history.stats.empty:
# TODO: History cached stats do not seem to be loaded here...
# This is a tmp solution until I can dig into the pickling / unpickling procedures
history_path = filepath.with_stem(f'{filepath.stem}_history')
if history_path.exists():
logging.debug(
'Warning: FlipperManager history stats were not loaded correctly. Manually loading now')
flipper.history.stats = pd.read_pickle(str(history_path))
return flipper
else:
raise Exception(f"Filepath does not exist")
def flipCoins(numFlips=10_000, numPeople=1000, startMoney=100, dollarsPerFlip=1, allowDebt=False,
plot=False, plotEvery=100, saveHistory=False, showResults=True, plotKind='topXPercentRanges',
useCache=False):
people = Population([])
people.add(numPeople, startMoney)
flipper = CoinFlipper(people, dollarsPerFlip, allowDebt)
flipper.flip(numFlips, saveEvery=0, plotEvery=plotEvery if plot else 0, logProgress=True, saveHistory=saveHistory,
plotKind=plotKind, closePlt=False)
if useCache:
cache_dir = Path('flipperCache/cli')
flipper_path = cache_dir.joinpath(
f'people_{numPeople}_start_{startMoney}_bet_{dollarsPerFlip}_debt_{allowDebt}_'
f'flips_{numFlips}.pickle')
flipper.save(flipper_path)
if showResults:
print()
print('Results:')
print(flipper.population.getStatsByTopXRanges())
print('Done flipping!')
if plot:
flipper.population.plot(kind=plotKind, keepAx=True,
title=f'Population after {len(flipper.flips):,} flips '
f'(Total: ${flipper.population.totalMoney:,})')
if len(flipper.history):
flipper.history.getStatsOverTime()
if __name__ == '__main__':
Fire(flipCoins)
| ryanlague/thePerfectlyJustSociety | thePerfectlyJustSociety/coinFlip/coinFlip.py | coinFlip.py | py | 9,029 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "population.Population",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "population.getMoneyStamp",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "popul... |
35376021408 | import pandas as pd
import numpy as np
import streamlit as st
import plotly.express as px
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_squared_error
def main(verbosity=False):
cal_housing = fetch_california_housing()
df = pd.DataFrame(cal_housing.data, columns=cal_housing.feature_names)
y = cal_housing.target
X = df['MedInc']
st.dataframe(X)
st.dataframe(y)
beta, y_pred = my_reg(X, y)
st.dataframe(y_pred)
res_df = pd.DataFrame(dict(X=X, y=y, y_pred=y_pred))
fig = px.scatter(res_df, x="X", y=["y", 'y_pred'])
st.plotly_chart(fig, use_container_width=True)
st.latex(fr"Price = {beta[1]:.4f} \times MedInc + {beta[0]:.4f}")
st.write(f'You do loss : {mean_squared_error(y, y_pred)}')
def my_reg(X, y, verbose=False):
print("-------- start --------")
# My formula : y = Θ(1-e^-(y-b0-b1*x)^2)
beta = np.random.random(2)
alpha = 0.002
n_max_iter = 10000
theta = 0.001
for it in range(n_max_iter):
y_pred: np.ndarray = beta[0] + beta[1] * X
g_b0 = (-2 * theta * (y - y_pred) * np.exp(-1 * (y - y_pred) ** 2)).sum()
g_b1 = (-2 * theta * X * (y - y_pred) * np.exp(-1 * (y - y_pred) ** 2)).sum()
print(f"({it}) beta: {beta}, gradient: {g_b0} {g_b1}")
beta_prev = np.copy(beta)
beta[0] = beta[0] - alpha * g_b0
beta[1] = beta[1] - alpha * g_b1
if np.linalg.norm(beta - beta_prev) < alpha/100:
print(f"I do early stoping at iteration {it}")
break
print(f'You do loss : {mean_squared_error(y, y_pred)}')
return beta, y_pred
if __name__ == '__main__':
main(st.sidebar.checkbox("verbosity"))
| Wratch/TYBootcamp | youdo1.py | youdo1.py | py | 1,784 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.datasets.fetch_california_housing",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.dataframe",
"line_number": 15,
"usage_type": "call"
},
{
"api... |
70847780583 | from setuptools import setup
import os
import sys
import codecs
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
install_requires = [
"virtualenv==15.1.0",
"requests>=2.9.1,<3.0.0",
]
if sys.version_info[:2] == (2, 6):
install_requires += [
'argparse',
]
setup(
name='cloudify-agent-packager',
version='5.0.11',
url='https://github.com/cloudify-cosmo/cloudify-agent-packager',
author='Cloudify',
author_email='cosmo-admin@cloudify.co',
license='LICENSE',
platforms='All',
description='Creates Cloudify Agent Packages',
long_description=read('README.md'),
packages=['agent_packager'],
entry_points={
'console_scripts': [
'cfy-ap = agent_packager.cli:main',
]
},
install_requires=install_requires,
include_package_data=True
)
| cloudify-cosmo/cloudify-agent-packager | setup.py | setup.py | py | 981 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_num... |
38118890596 | import matplotlib.pyplot as plt
from matplotlib import animation
import numpy as np
x = np.linspace(0,2,201)
y = np.linspace(0,1,101)
xy, yx = np.meshgrid(x,y)
ims = []
tmin, tmax = 0, 0.02
h = 1e-3
fig = plt.figure()
for i in range(int(np.ceil((tmax-tmin)/h))):
plt.pcolormesh(xy,yx,np.sin(xy+np.pi*i*h)+np.cos(yx+np.pi*i*h),
cmap='RdBu_r')
plt.show()
plt.pause(0.01)
plt.clf()
| arnemagnus/_oldversion_physicsproject | calculation_scripts/old_assorted_test_scripts/testmatplotliblive.py | testmatplotliblive.py | py | 420 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
... |
10849889917 | import os
import datetime
from jira import JIRA
class JiraIssueReporterHandler(object):
def __init__(self, jiraURL, username, api_token, projectKey):
self.options = {'server': jiraURL}
# self.server = jiraURL
self.auth = (username, api_token)
self.projectKey = projectKey
# self.customField_Test_Method = customField_TestMethod
def get_jira_client(self):
return JIRA(self.options, basic_auth=self.auth)
def create_new_bug(self, summary, description):
jira = self.get_jira_client()
new_bug = None
try:
# Create new issue with issue type is Bug
new_bug = jira.create_issue(project=self.projectKey, summary=summary,
description=description, issuetype={'name': 'Bug'})
except Exception as ex:
print(ex)
finally:
if new_bug:
return new_bug.key
return None
def add_bug_comment(self, bug_id, comment):
jira = self.get_jira_client()
try:
bug = jira.issue(bug_id)
# comment = "This issue is still {color:red}*FAILING*{color}"
# Add comment
jira.add_comment(bug, comment)
except Exception as ex:
print(ex)
def get_issue_status(self, issue_id):
jira = self.get_jira_client()
try:
issue = jira.issue(issue_id)
status = issue.fields.status.name
return status
except Exception as ex:
print(ex)
def mark_issue_status(self, issue_id, status):
jira = self.get_jira_client()
try:
issue = jira.issue(issue_id)
transitions = jira.transitions(issue)
for transition in transitions:
if transition['name'] == status:
jira.transition_issue(issue, transition['id'])
break
except Exception as ex:
print(ex)
def get_today(self):
return datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
| gravity239/PySeleFramework | selenpy/common/jira_issue_reporter_handler.py | jira_issue_reporter_handler.py | py | 2,100 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jira.JIRA",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "jira.create_issue",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "jira.issue",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "jira.add_comment",
"line_n... |
75088426664 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
# path('', views.TaskListView.as_view(), name='home'),
# path('add/', views.TaskCreateView.as_view(), name='add'),
path('detail/<int:pk>/', views.TaskDetailView.as_view(), name='detail'),
path('delete/<int:pk>/', views.TaskDeleteView.as_view(), name='delete'),
path('update/<int:pk>', views.TaskUpdateView.as_view(), name='update')
]
| alen0577/todo | todo_app/urls.py | urls.py | py | 461 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
74290457704 | import json
import yaml
def refs_to_dict(filename):
data = yaml.load(open(filename), Loader=yaml.CLoader)
return {elem['id']: elem['title'] for elem in data.get('references', [])}
refs = {elem['id']: elem['title'] for elem in json.load(open('wg21/data/csl.json'))}
refs.update(refs_to_dict('md/wg21_fmt.yaml'))
refs.update(refs_to_dict('md/early.yaml'))
print(json.dumps(refs, indent=4))
| brevzin/sd6 | reduce_refs.py | reduce_refs.py | py | 399 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "yaml.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "yaml.CLoader",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 12... |
29648505683 | #! /usr/bin/env python3
# pylint: disable=missing-module-docstring,missing-function-docstring
from . import std_headers
from .utils import Object
def customHeaderIdentificationHandler(header):
"""
A couple of headers don't fit into the target identification heuristics
we have implemented in CppSourceDepsParser. We need custom handler to
identify the corresponding targets for those headers.
"""
return None
def getDefaultConfigs():
configs = Object()
# These top level directories are not source code directory. Build system
# should ignore them. DepG should not visit these directories to generate
# targets. Hence top level directories like `build/`, `third_party`,
# `tools` etc. should be forbidden.
configs.FORBIDDEN_PATHS = set([])
# Ignored paths are different from forbidden paths. These paths will be ignored
# by default. However if these paths are explicitly chosen, they will work as
# usual. For example, we don't want to build the code in experimental
# directories if someone choose to build root directory. However the
# code in experimental should be compliable if the 'experimental' directory is
# explicitly chosen.
configs.IGNORED_PATHS = set(["experimental"])
configs.CPP_HEADER_EXTENSIONS = ("-inl.hpp", ".hpp", ".h")
configs.CPP_SOURCE_EXTENSIONS = (".cpp", ".cc", ".c")
configs.PROTO_HEADER_EXTENSION = ".pb.h"
configs.GRPC_HEADER_EXTENSION = ".grpc.pb.h"
configs.PROTO_EXTENSION = ".proto"
configs.TEST_FILE_EXTENSION = "_test.cpp"
configs.IGNORED_HEADERS = set(["options.pb.h"])
configs.INCLUDE_PATHS = [ "." ]
configs.IGNORE_EXISTANCE = set([])
configs.CACHE_DIRECTORY = "build/.depg/cache"
configs.BUILD_FILE_NAME = "BUILD"
configs.THIRD_PARTY_TARGET_BUILD_FILES = []
configs.HEADER_PREFIXES_MAP = None
configs.GTEST_MAIN_TARGET = None # "testing/gtest/gtest_with_glog_main"
configs.SYS_STD_HEADERS = set(std_headers.STD_HEADERS) | set(std_headers.SYSTEM_HEADERS)
configs.CUSTOM_HEADER_IDENTIFICATION_HANDLER = customHeaderIdentificationHandler
# DepG doesn't recompute the deps of a C++ source file if the content of file have not changed.
# However if it is supposed to depend on external factors (for example change in header-prefix
# declaration of third-party targets), we would like to re-generate it.
# In that case we must compute the checksum of those dependency content and
# set to `configs.DEPG_DEPS_CACHE_CHECKSUM`.
configs.DEPG_DEPS_CACHE_CHECKSUM = None
return configs
| mohitmv/depg | default_configs.py | default_configs.py | py | 2,600 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 17,
"usage_type": "call"
}
] |
36819201120 | # type: ignore
from functools import cached_property
from dataclasses import dataclass, field
from pathlib import Path
@dataclass
class File:
path: Path
size: int
@dataclass
class Dir:
path: Path
files: list[File] = field(repr=False, default_factory=list)
dirs: list["Dir"] = field(repr=False, default_factory=list)
@cached_property
def size(self):
return sum(item.size for item in (*self.files, *self.dirs))
with open("input") as f:
lines = [line.strip() for line in f]
root = Path('/')
fs = {root: Dir(root)}
cwd = root
for line in lines[1:]:
if line.startswith('$'):
parts = line.split()
cmd = parts[1]
if cmd != 'cd':
continue
arg = parts[2]
if arg == '..':
cwd = cwd.parent
else:
cwd /= arg
else:
size, fname = line.split()
path = cwd / fname
if size == 'dir':
file = Dir(path)
fs[cwd].dirs.append(file)
else:
file = File(path, int(size))
fs[cwd].files.append(file)
fs[path] = file
print("Part 1:", sum(f.size for f in fs.values() if isinstance(f, Dir) and f.size <= 100000))
total = 7e7
need = 3e7
have = total - fs[root].size
for thing in sorted((f for f in fs.values() if isinstance(f, Dir)), key=lambda x: x.size):
if have + thing.size >= need:
print("Part 2:", thing.size)
break
| ocaballeror/adventofcode2022 | 07/day7.py | day7.py | py | 1,442 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
... |
17887416810 | from django.shortcuts import render, redirect
from .models import Reply
from review.models import Review
from .forms import ReplyForm
from django.contrib import messages
from django.core.exceptions import PermissionDenied
def Reply(request, pk):
""" Model for staff to reply to user reviews on menu """
review = Review.objects.get(id=pk)
form = ReplyForm()
# nun-staff access lead to 403 page
if not request.user.is_staff:
raise PermissionDenied()
if request.method == 'POST':
form = ReplyForm(request.POST, request.FILES)
if form.is_valid():
reply = form.save(commit=False)
reply.user = request.user
reply.review = review
reply.save()
# flash message
messages.success(
request, 'Your successfully replied to the review!')
return redirect('item-reviews', menu_id=review.menu_item.id)
context = {
'form': form,
'review_id': review.id,
'review': review
}
return render(request, 'reply/reply.html', context)
| Code-Institute-Submissions/yuyizhong-O.A.T-Vietnamese-Cuisine | reply/views.py | views.py | py | 1,090 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "review.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "review.models.Review.objects.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "review.models.Review.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"... |
11755542906 | from os import environ, listdir
from os.path import join, isfile, getmtime
from dataclasses import dataclass, field
import json
import time
import traceback
from datetime import datetime,timezone
savedGamePath = environ['USERPROFILE'] + "\Saved Games\Frontier Developments\Elite Dangerous"
@dataclass
class mission:
id: int
localisedName: str = ''
reward: int = 0
wing: bool = False
def __hash__(self) -> int:
return hash(self.id)
def __eq__(self, __o: object) -> bool:
return self.id == __o.id
@dataclass
class Journal:
@dataclass
class log:
path: str = ''
version: str = ''
latestUpdate: float = 0.0
updateInterval: float = 0.0
raw: str = ''
@dataclass
class ship:
model: str = ''
name: str = ''
ident: str = ''
fuel: float = 0.0
fuelCap: float = 0.0
fuelLevel: int = 100 # round((fuel/fuelCap)*100) %
isScooping: bool = False
hull: float = 0.0
cargoCap: int = 0
jumpRange: float = 0.0
modules: list = field(default_factory=list)
@dataclass
class nav:
navRoutes: list = field(default_factory=list)
location: str = ''
lastTarget: str = ''
target: str = ''
targetStarClass: str = ''
remainingJumps: int = 0
dockedStation: str = ''
status: str = ''
signs: set = field(default_factory=set) # unique signs
missions: set[mission] = field(default_factory=set)
journal = Journal()
# STATUS: normal,undocking,docking,startUndock,startDock,docked,startJump,finishJump,supercruise
def getNavRoute(routePath=None):
if not routePath:
routePath = savedGamePath+r"\NavRoute.json"
pass # WIP
def getLatestLogPath(logPath=None):
if not logPath:
logPath = savedGamePath
logsList = [join(logPath, f) for f in listdir(logPath) if isfile(join(logPath, f)) and f.startswith('Journal.')]
if not logsList:
return None
latestLog = max(logsList, key=getmtime)
return latestLog
UTC_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
latestLogLine = 0
def parseLogs(logPath=None,logger=None) -> Journal:
global latestLogLine, journal
if not logPath:
logPath = getLatestLogPath()
try:
linesRead = 0
with open(logPath, 'r', encoding='utf-8') as f:
for line in f:
journal.log.path = logPath
logJson = json.loads(line)
logEvent = logJson['event']
# Event Filters start
if logEvent == "FSSSignalDiscovered" or \
logEvent == "Friends" or \
logEvent == "Powerplay" or \
logEvent == "NpcCrewPaidWage" or \
logEvent == "MissionRedirected" or \
logEvent == "Statistics" or \
logEvent == "Materials":
continue
# Event Filters end
linesRead += 1
if linesRead>latestLogLine:
logTime = datetime.strptime(logJson['timestamp'],UTC_FORMAT).replace(tzinfo=timezone.utc) # change to UTC time
logTime = logTime.timestamp()
if logTime>=journal.log.latestUpdate: # should update
latestLogLine += 1
journal.log.latestUpdate=logTime
journal.log.updateInterval = time.time()-logTime
journal.log.raw = logJson
# print(logEvent+' ')
# print(logTime)
if logEvent == 'Fileheader':
journal.log.version = 'Odyssey' if logJson['Odyssey'] else 'Horizons'
elif logEvent == 'Loadout':
journal.ship.model = logJson['Ship']
journal.ship.name = logJson['ShipName']
journal.ship.ident = logJson['ShipIdent']
journal.ship.hull = logJson['HullHealth']
journal.ship.fuelCap = logJson['FuelCapacity']['Main']
journal.ship.cargoCap = logJson['CargoCapacity']
journal.ship.jumpRange = logJson['MaxJumpRange']
journal.ship.modules = logJson['Modules']
elif logEvent == 'RefuelAll' or logEvent == 'RefuelPartial':
journal.ship.fuel += logJson['Amount']
journal.ship.fuelLevel = round((journal.ship.fuel/journal.ship.fuelCap)*100)
elif logEvent == 'FuelScoop':
journal.ship.fuel += logJson['Scooped']
journal.ship.fuelLevel = round((journal.ship.fuel/journal.ship.fuelCap)*100)
if journal.log.updateInterval <= 10 and journal.ship.fuelLevel < 99: journal.ship.isScooping = True
else: journal.ship.isScooping = False
elif ((logEvent == 'ReceiveText' and 'AttackDutyStart' in logJson['Message']) or logEvent == 'Interdicted' or logEvent == 'UnderAttack' or (logEvent == 'Music' and (logJson['MusicTrack'] == 'Interdiction' or logJson['MusicTrack'] == 'Combat_Dogfight'))) and journal.log.updateInterval <= 30: # May be interdicted!
journal.signs.add('UnderAttack')
elif logEvent == 'Scanned' and journal.log.updateInterval <= 30 : # logged within 30 seconds
journal.signs.add('Scanned')
elif logEvent == 'Resurrect' or logEvent == 'LoadGame' or logEvent == 'Shutdown': # Ship destroyed / Reload the game
if 'UnderAttack' in journal.signs: journal.signs.remove('UnderAttack')
if 'Scanned' in journal.signs: journal.signs.remove('Scanned')
elif logEvent == 'Music': # music playing
if logJson['MusicTrack'] == 'DestinationFromHyperspace' and journal.nav.target is not None: # Finish multi-hop route
journal.nav.target = journal.nav.lastTarget = None
elif logJson['MusicTrack'] == 'MainMenu': journal.status = 'MainMenu'
elif logJson['MusicTrack'] == 'DockingComputer':
if journal.status == 'startUndock': journal.status = 'undocking'
elif journal.status == 'startDock': journal.status = 'docking'
elif logEvent == 'DockingRequested': journal.status = 'startDock'
elif logEvent == 'Docked':
journal.status = 'Docked'
journal.nav.dockedStation = logJson['StationName']
elif logEvent == 'StartJump':
if logJson['JumpType'] == 'Hyperspace' and 'StarSystem' in logJson:
journal.status = 'startJump'
journal.nav.location = logJson['StarSystem']
journal.nav.lastTarget = logJson['StarSystem']
journal.nav.targetStarClass = logJson['StarClass']
elif logJson['JumpType'] == 'Supercruise':
journal.status = 'supercruise'
elif logEvent == 'SupercruiseEntry': journal.status = 'supercruise'
elif logEvent == 'SupercruiseExit' or logEvent == 'DockingCancelled': journal.status = 'normal'
elif logEvent == 'Undocked': journal.status = 'normal'
elif logEvent == 'FSDTarget':
if logJson['Name'] == journal.nav.location : journal.nav.target = None
else:
journal.nav.target = logJson['Name']
if 'RemainingJumpsInRoute' in logJson: # Multi-Hop
journal.nav.remainingJumps = logJson['RemainingJumpsInRoute']
else : journal.nav.remainingJumps = 1 # single hop
elif logEvent == 'FSDJump':
journal.ship.fuel = logJson['FuelLevel']
journal.ship.fuelLevel = round((journal.ship.fuel/journal.ship.fuelCap)*100)
if journal.nav.lastTarget == logJson['StarSystem']:
journal.nav.lastTarget = None
journal.status = 'finishJump'
if journal.nav.target == logJson['StarSystem'] and journal.nav.remainingJumps == 0 :
journal.nav.targetStarClass = None
journal.nav.target = None # Finish route
elif (logEvent == 'Location' or logEvent == 'FSDJump') and 'StarSystem' in logJson:
journal.nav.location = logJson['StarSystem']
elif logEvent == 'MissionAccepted': # Add to Mission List
journal.missions.add(mission(id=logJson['MissionID'],localisedName=logJson['LocalisedName'],reward=logJson['Reward'],wing=logJson['Wing']))
elif (logEvent == 'MissionAbandoned' or logEvent == 'MissionCompleted' or logEvent == 'MissionFailed' )and mission(id=logJson['MissionID']) in journal.missions:
journal.missions.remove(mission(id=logJson['MissionID']))
if journal.status != 'Docked' and journal.nav.dockedStation is not None: journal.nav.dockedStation = None
except IOError as e:
if logger: logger.warn("Error in reading journal logs "+e)
else:
print("Error in reading journal logs "+e)
traceback.print_exc()
return journal
if __name__ == '__main__': # Test
# parseLogs()
# print(getLatestLogPath())
# print(journal['status'])
# print(journal['location'])
# print(journal['dockedStation'])
# print(journal['target'])
# print(journal['missions'])
# print(journal['isUnderAttack'])
# print(journal['isBeingScanned'])
jn = parseLogs()
print(jn.nav.targetStarClass) | Matrixchung/EDAutopilot-v2 | utils/journal.py | journal.py | py | 10,593 | python | en | code | 42 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "dataclasses.fiel... |
29344342096 | import matplotlib
from matplotlib import pyplot as plt
import numpy as np
import cv2 as cv
import IO
import api
# >>> Przykładowe zadanie i proces wykonania na podstawie zestawu Artroom2 <<<
# Korzystając z metody StereoSGBM udostępnionej przez bibliotekę OpenCV wyznacz mapę rozbieżności oraz mapę głębi.
# Dobierz odpowiednie parametry metody StereoSGBM.
# Porównaj otrzymane wyniki z referencyjnymi wartościami rozbieżności zapisanymi w formacie PFM
IMG_LEFT = r"./Artroom2/im0.png"
IMG_RIGHT = r"./Artroom2/im1.png"
img_left = cv.imread(IMG_LEFT)
img_right = cv.imread(IMG_RIGHT)
doffs = 0
baseline = 529.50
f = 1734.04 # Focal z macierzy
width = 1920
height = 1080
ndisp = 190
x = 830
y = 530
# Convert to grey
img_left = cv.cvtColor(img_left, cv.COLOR_BGR2GRAY)
img_right = cv.cvtColor(img_right, cv.COLOR_BGR2GRAY)
# Filtering for smoother
filter_size = 5
kernel = np.ones((filter_size, filter_size), np.float32) / (filter_size * filter_size)
img_left = cv.filter2D(img_left, -1, kernel)
img_right = cv.filter2D(img_right, -1, kernel)
# Obliczanie dysparycji z danych przy pomocy SGBM
disp = api.calculate_disparity_with_SGBM(img_left, img_right, max_disparity=ndisp, window_size=2)
for i in range(len(disp)):
for j in range(len(disp[i])):
disp[i][j] = api.clamp(0, 255, disp[i][j])
# Odczytanie dysparycji do porównania
ref_disp = cv.imread(r"./Artroom2/disp0.pfm", cv.IMREAD_UNCHANGED)
ref_disp = np.asarray(ref_disp)
ref_disp = ref_disp / 256
matplotlib.pyplot.imshow(disp)
plt.show()
cv.imwrite("Artroom2/Results/Zad 1/disparity.png", disp)
IO.save_disp_to_json(disp, "Artroom2/Results/Zad 1/disparity_data.json")
print(f"Zad 1 - Wartość dysparycji wyznaczonej przez StereoSGBM dla [{x},{y}]: {disp[y][x]}")
matplotlib.pyplot.imshow(ref_disp)
plt.show()
cv.imwrite("Artroom2/Results/Zad 1/ref_disparity.png", ref_disp)
IO.save_disp_to_json(ref_disp, "Artroom2/Results/Zad 1/ref_disparity_data.json")
print(f"Zad 1 - Wartość dysparycji referencyjnej dla [{x},{y}]: {ref_disp[y][x]}")
# Obliczanie głębi na podstawie uzyskanych dysparycji
depth = api.calculate_depth_with_disparity(disp, f, baseline, doffs)
ref_depth = api.calculate_depth_with_disparity(ref_disp, f, baseline, doffs)
matplotlib.pyplot.imshow(depth)
plt.show()
cv.imwrite("Artroom2/Results/Zad 1/depth.png", depth / 255)
print(f"Zad 2 - Wartość depth wyznaczonej przez StereoSGBM dla [{x},{y}]: {depth[y][x]}")
matplotlib.pyplot.imshow(ref_depth)
plt.show()
cv.imwrite("Artroom2/Results/Zad 1/ref_depth.png", ref_depth)
print(f"Zad 2 - Wartość depth referencyjnej dla [{x},{y}]: {ref_depth[y][x]}") | SmartMaatt/How-to-Zaowr | Zad2.py | Zad2.py | py | 2,635 | python | pl | code | 6 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_num... |
74865037543 | #-*- coding:utf-8 -*-
import random
import pymongo
client = pymongo.MongoClient('112.74.106.159', 27017)
db = client.develop
#获取useragent池
get_info = db.userAgents.find()
USER_AGENTS = [i['userAgent'] for i in get_info]
class MyUserAgent(object):
def process_request(self, request, spider):
request.headers.setdefault('User-Agent', random.choice(USER_AGENTS))
| dreamyteam/py_crawler | movie_spider/movie_spider/User_Agents.py | User_Agents.py | py | 378 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 14,
"usage_type": "call"
}
] |
18690414084 | import math
import numpy as np
import csv
from itertools import chain
import random
Feature_number=4
all_Feature=False
CUS_NUMBER=50
Training_number=50
count_setosa = 0
count_versicolor = 0
count_virginica = 0
k=0
l=40
fold = []
Output = []
variance = []
h =np.array([0.01,0.5,10])
confussion_matrix = [[0] * 3] * 3
confussion_matrix1 = [[0] * 3] * 3
confussion_matrix2 = [[0] * 3] * 3
confussion_matrix3 = [[0] * 3] * 3
confussion_matrix4 = [[0] * 3] * 3
Iris_setosa= []
setosa_train1=[]
setosa_train2=[]
setosa_train3=[]
setosa_train4=[]
setosa_train5=[]
Iris_versicolor=[]
versicolor_train1=[]
versicolor_train2=[]
versicolor_train3=[]
versicolor_train4=[]
versicolor_train5=[]
Iris_virginica=[]
virginica_train1=[]
virginica_train2=[]
virginica_train3=[]
virginica_train4=[]
virginica_train5=[]
maximum_density = np.array([])
Iris=[]
label=["sepal_length", "sepal_width", "petal_length", "petal_width", "class"]
def get_mean_vector(A):
mean_vector=[]
for i in range(Feature_number):
sum=0
for value in A[i]:
sum=sum+ float(value)#accumulate all element in row i
mean_vector.append(float(sum/len(A[i])))#add average value to MEAN_VECTOR
return mean_vector
def norm_pdf_multivariate(x,mu,sigma):
size = len(x)
if size == len(mu) and (size, size) == sigma.shape:
det = np.linalg.det(sigma)
if det == 0:
raise NameError("The covariance matrix can't be singular")
norm_const = 1.0/ ( math.pow((2*3.1416),float(size)/2) * math.pow(det,1.0/2) )
zip_object = zip(x, mu)
x_mu = []
for list1_i, list2_i in zip_object:
x_mu.append(list1_i-list2_i)
numpy_array_x_mu = np.array(x_mu)
inv = np.linalg.inv(sigma)
result = math.pow(math.e, -0.5 * (numpy_array_x_mu .dot( inv ).dot( numpy_array_x_mu.T)))
return norm_const * result
else:
raise NameError("The dimensions of the input don't match")
def data_processing():
X=-1
fn=open("iris.data","r")
for row in csv.DictReader(fn,label):
X=X+1
for i in range(Feature_number):
Iris.append(row[label[i]])
if str(row["class"]) == "Iris-setosa":
if all_Feature== True:
Iris_setosa.append(row[label[i]])
else:
if X%(Training_number/CUS_NUMBER)==0 and len(Iris_setosa)<CUS_NUMBER*4:
Iris_setosa.append(row[label[i]])
elif str(row["class"]) == "Iris-versicolor":
if all_Feature== True:
Iris_versicolor.append(row[label[i]])
else:
if X%(Training_number/CUS_NUMBER)==0 and len(Iris_versicolor)<CUS_NUMBER*4:
Iris_versicolor.append(row[label[i]])
else:
if all_Feature== True:
Iris_virginica.append(row[label[i]])
else:
if X%(Training_number/CUS_NUMBER)==0 and len(Iris_virginica)<CUS_NUMBER*4:
Iris_virginica.append(row[label[i]])
fn.close()
def to_matrix(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
def data_availablity(test_I):
setosa = Iris_setosa
versicolor = Iris_versicolor
virginica = Iris_virginica
setosa = to_matrix(setosa,4)
#print(setosa)
versicolor = to_matrix(versicolor,4)
virginica = to_matrix(virginica,4)
setosa = [[float(y) for y in x] for x in setosa]
versicolor = [[float(y) for y in x] for x in versicolor]
virginica = [[float(y) for y in x] for x in virginica]
for i in range(50):
if(test_I == setosa[i]):
return 'setosa';
elif(test_I == versicolor[i]):
return 'versicolor'
elif(test_I == virginica[i]):
return 'virginica';
def get_variance(data):
n = len(data)
mean = sum(data) / n
deviations = [(x - mean) ** 2 for x in data]
variance = sum(deviations) / n
return variance
def generate_parzebWindow_confussion_matrix(test_data,train_data,bandWidth,mean_setosa,mean_versicolor,
mean_virginica,covMatrix_setosa,covMatrix_versicolor,covMatrix_virginica):
C_matrix = np.array([[0] * 3] * 3)
Iris_setosa_Posteriror = 0
Iris_versicolor_Posterior = 0
Iris_virginica_Posterior = 0
total_setosa_Posteriror = 0
total_versicolor_Posterior = 0
total_virginica_Posterior = 0
for i in range(len(test_data)):
for j in range(len(train_data)):
zip_object = zip(test_data[i], train_data[j])
X_XI= []
for list1_i, list2_i in zip_object:
X_XI.append(list1_i-list2_i)
newList = [x / bandWidth for x in X_XI]
Iris_setosa_Posteriror = (norm_pdf_multivariate(newList, mean_setosa, covMatrix_setosa))
total_setosa_Posteriror = total_setosa_Posteriror + Iris_setosa_Posteriror
Iris_versicolor_Posterior = (norm_pdf_multivariate(newList, mean_versicolor, covMatrix_versicolor))
total_versicolor_Posterior = total_versicolor_Posterior + Iris_versicolor_Posterior
Iris_virginica_Posterior = norm_pdf_multivariate(newList, mean_virginica, covMatrix_virginica)
total_virginica_Posterior = total_virginica_Posterior + Iris_virginica_Posterior
setosa_density= (1/(40*pow(bandWidth, 4)))*total_setosa_Posteriror
versicolor_dansity = (1/(40*pow(bandWidth, 4)))*total_versicolor_Posterior
virginica_density = (1/(40*pow(bandWidth, 4)))*total_virginica_Posterior
#print(setosa_density,versicolor_dansity,virginica_density,"\n")
maximum_density = max(setosa_density,versicolor_dansity,virginica_density)
#print(maximum_density)
class_name = data_availablity(test_data[i])
#print(class_name)
if((class_name == 'setosa') and maximum_density == setosa_density):
C_matrix[0][0] = C_matrix[0][0] + 1;
elif((class_name == 'setosa') and maximum_density == versicolor_dansity):
C_matrix[0][1] = C_matrix[0][1] + 1;
elif((class_name== 'setosa') and maximum_density == virginica_density):
C_matrix[0][2]= C_matrix[0][2] + 1;
elif((class_name == 'versicolor') and maximum_density == setosa_density):
C_matrix[1][0] = C_matrix[1][0] + 1;
elif((class_name == 'versicolor') and maximum_density == versicolor_dansity):
C_matrix[1][1]= C_matrix[1][1] + 1;
elif((class_name == 'versicolor') and maximum_density == virginica_density):
C_matrix[1][2]=C_matrix[1][2] + 1;
elif((class_name== 'virginica') and maximum_density == setosa_density):
C_matrix[2][0] = C_matrix[2][0] + 1;
elif((class_name== 'virginica') and maximum_density == versicolor_dansity):
C_matrix[2][1]=C_matrix[2][1] + 1;
elif((class_name== 'virginica') and maximum_density == virginica_density):
C_matrix[2][2] = C_matrix[2][2] + 1;
#print(C_matrix)
return C_matrix
if __name__ == "__main__":
data_processing()
for i in range(5):
fold.append(Iris_setosa[k:l])
fold.append(Iris_versicolor[k:l])
fold.append(Iris_virginica[k:l])
k = l
l= l+40
fold = list(chain.from_iterable(fold))
a = (to_matrix(fold,4))
test_x = a[0:30]
train_y = a[30:150]
test_x1 = a[30:60]
train_y1 = a[0:30]
for i in range(60,150):
train_y1.append(a[i])
test_x2 = a[60:90]
train_y2 = a[0:60]
for i in range(90,150):
train_y2.append(a[i])
test_x3 = a[90:120]
train_y3 = a[0:90]
for i in range(120,150):
train_y3.append(a[i])
test_x4 = a[120:150]
train_y4 = a[0:120]
train_y = [[float(y) for y in x] for x in train_y]
test_x = [[float(y) for y in x] for x in test_x]
bandWidth = h[0]
random.shuffle(train_y)
random.shuffle(test_x)
for i in range(len(train_y)):
if(data_availablity(train_y[i])== 'setosa'):
setosa_train1.append(train_y[i])
elif(data_availablity(train_y[i]) == 'versicolor'):
versicolor_train1.append(train_y[i])
else: virginica_train1.append(train_y[i])
setosa_train1 = np.array(setosa_train1)
#mean_setosa_result1 = get_mean_vector(setosa_train1)
mean_setosa_result1 = np.mean(setosa_train1, axis= 0)
variance_setosa1 = np.var(setosa_train1, axis = 0)
covMatrix_setosa_result1 = [[0] * 4] * 4
covMatrix_setosa_result1 = [[0] * len(variance_setosa1) for _ in range(len(variance_setosa1))]
for i, e in enumerate(variance_setosa1):
covMatrix_setosa_result1[i][i] = e
covMatrix_setosa_result1= np.array(covMatrix_setosa_result1)
versicolor_train1 = np.array(versicolor_train1)
#mean_versicolor_result1 = get_mean_vector(versicolor_train1)
mean_versicolor_result1 = np.mean(versicolor_train1,axis =0)
covMatrix_versicolor_result1 = [[0] * 4] * 4
variance_versicolor1= np.var(versicolor_train1, axis = 0)
covMatrix_versicolor_result1 = [[0] * len(variance_versicolor1) for _ in range(len(variance_versicolor1))]
for i, e in enumerate(variance_versicolor1):
covMatrix_versicolor_result1[i][i] = e
covMatrix_versicolor_result1 = np.array(covMatrix_versicolor_result1)
virginica_train1 = np.array(virginica_train1)
#mean_virginica_result1 = get_mean_vector(virginica_train1)
mean_virginica_result1 = np.mean(virginica_train1, axis=0)
covMatrix_virginica_result1= [[0] * 4] * 4
variance_virginica1= np.var(virginica_train1, axis = 0)
covMatrix_virginica_result1 = [[0] * len(variance_virginica1) for _ in range(len(variance_virginica1))]
for i, e in enumerate(variance_virginica1):
covMatrix_virginica_result1[i][i] = e
covMatrix_virginica_result1 = np.array(covMatrix_virginica_result1)
confussion_matrix = np.array(generate_parzebWindow_confussion_matrix(test_x,train_y,bandWidth,mean_setosa_result1,mean_versicolor_result1,
mean_virginica_result1,covMatrix_setosa_result1,
covMatrix_versicolor_result1,covMatrix_virginica_result1))
Sum1 = np.trace(confussion_matrix)
Accuracy1 = (Sum1/30)*100
variance.append(Accuracy1)
print("confussion matrix of first fold:\n",confussion_matrix,"\n")
print("Accuracy of First fold:", Accuracy1,"%")
train_y1 = [[float(y) for y in x] for x in train_y1]
test_x1 = [[float(y) for y in x] for x in test_x1]
random.shuffle(train_y1)
random.shuffle(test_x1)
for i in range(len(train_y1)):
if(data_availablity(train_y1[i])== 'setosa'):
setosa_train2.append(train_y1[i])
elif(data_availablity(train_y1[i]) == 'versicolor'):
versicolor_train2.append(train_y1[i])
else: virginica_train2.append(train_y1[i])
setosa_train2 = np.array(setosa_train2)
#mean_setosa_result2 = get_mean_vector(setosa_train2)
mean_setosa_result2 = np.mean(setosa_train2, axis =0)
variance_setosa2 = np.var(setosa_train2, axis = 0)
covMatrix_setosa_result2 = [[0] * 4] * 4
covMatrix_setosa_result2 = [[0] * len(variance_setosa2) for _ in range(len(variance_setosa2))]
for i, e in enumerate(variance_setosa2):
covMatrix_setosa_result2[i][i] = e
covMatrix_setosa_result2= np.array(covMatrix_setosa_result2)
versicolor_train2 = np.array(versicolor_train2)
#mean_versicolor_result2 = get_mean_vector(versicolor_train2)
mean_versicolor_result2 = np.mean(versicolor_train2, axis=0)
covMatrix_versicolor_result2 = [[0] * 4] * 4
variance_versicolor2= np.var(versicolor_train2, axis = 0)
covMatrix_versicolor_result2 = [[0] * len(variance_versicolor2) for _ in range(len(variance_versicolor2))]
for i, e in enumerate(variance_versicolor2):
covMatrix_versicolor_result2[i][i] = e
covMatrix_versicolor_result2 = np.array(covMatrix_versicolor_result2)
virginica_train2 = np.array(virginica_train2)
#mean_virginica_result2 = get_mean_vector(virginica_train2)
mean_virginica_result2 = np.mean(virginica_train2, axis=0)
covMatrix_virginica_result2= [[0] * 4] * 4
variance_virginica2= np.var(virginica_train2, axis = 0)
covMatrix_virginica_result2 = [[0] * len(variance_virginica2) for _ in range(len(variance_virginica2))]
for i, e in enumerate(variance_virginica2):
covMatrix_virginica_result2[i][i] = e
covMatrix_virginica_result2 = np.array(covMatrix_virginica_result2)
confussion_matrix1 = np.array(generate_parzebWindow_confussion_matrix(test_x1,train_y1,bandWidth,mean_setosa_result2,mean_versicolor_result2,
mean_virginica_result2,covMatrix_setosa_result2,
covMatrix_versicolor_result2,covMatrix_virginica_result2))
Sum2 = np.trace(confussion_matrix1)
Accuracy2 = (Sum2/30)*100
variance.append(Accuracy2)
print("confussion matrix of second fold:\n",confussion_matrix1,"\n")
print("Accuracy of 2nd fold:", Accuracy2,"%")
train_y2 = [[float(y) for y in x] for x in train_y2]
test_x2 = [[float(y) for y in x] for x in test_x2]
random.shuffle(train_y2)
random.shuffle(test_x2)
for i in range(len(train_y2)):
if(data_availablity(train_y2[i])== 'setosa'):
setosa_train3.append(train_y2[i])
elif(data_availablity(train_y2[i]) == 'versicolor'):
versicolor_train3.append(train_y2[i])
else: virginica_train3.append(train_y2[i])
setosa_train3 = np.array(setosa_train3)
#mean_setosa_result3 = get_mean_vector(setosa_train3)
mean_setosa_result3 = np.mean(setosa_train3, axis=0)
variance_setosa3 = np.var(setosa_train3, axis = 0)
covMatrix_setosa_result3 = [[0] * 4] * 4
covMatrix_setosa_result3 = [[0] * len(variance_setosa3) for _ in range(len(variance_setosa3))]
for i, e in enumerate(variance_setosa3):
covMatrix_setosa_result3[i][i] = e
covMatrix_setosa_result3 = np.array(covMatrix_setosa_result3)
versicolor_train3 = np.array(versicolor_train3)
#mean_versicolor_result3 = get_mean_vector(versicolor_train3)
mean_versicolor_result3 = np.mean(versicolor_train3, axis =0)
covMatrix_versicolor_result3 = [[0] * 4] * 4
variance_versicolor3= np.var(versicolor_train3, axis = 0)
covMatrix_versicolor_result3 = [[0] * len(variance_versicolor3) for _ in range(len(variance_versicolor3))]
for i, e in enumerate(variance_versicolor3):
covMatrix_versicolor_result3[i][i] = e
covMatrix_versicolor_result3 = np.array(covMatrix_versicolor_result3)
virginica_train3 = np.array(virginica_train3)
#mean_virginica_result3 = get_mean_vector(virginica_train3)
mean_virginica_result3 = np.mean(virginica_train3, axis=0)
covMatrix_virginica_result3= [[0] * 4] * 4
variance_virginica3= np.var(virginica_train3, axis = 0)
covMatrix_virginica_result3 = [[0] * len(variance_virginica3) for _ in range(len(variance_virginica3))]
for i, e in enumerate(variance_virginica3):
covMatrix_virginica_result3[i][i] = e
covMatrix_virginica_result3 = np.array(covMatrix_virginica_result3)
confussion_matrix2 = np.array(generate_parzebWindow_confussion_matrix(test_x2,train_y2,bandWidth,mean_setosa_result3,mean_versicolor_result3,
mean_virginica_result3,covMatrix_setosa_result3,
covMatrix_versicolor_result3,covMatrix_virginica_result3))
Sum3 = np.trace(confussion_matrix2)
Accuracy3 = (Sum3/30)*100
variance.append(Accuracy3)
print("confussion matrix of 3rd fold:\n",confussion_matrix2,"\n")
print("Accuracy of 3rd fold:", Accuracy3,"%")
train_y3 = [[float(y) for y in x] for x in train_y3]
test_x3 = [[float(y) for y in x] for x in test_x3]
random.shuffle(train_y3)
random.shuffle(test_x3)
for i in range(len(train_y3)):
if(data_availablity(train_y3[i])== 'setosa'):
setosa_train4.append(train_y3[i])
elif(data_availablity(train_y3[i]) == 'versicolor'):
versicolor_train4.append(train_y3[i])
else: virginica_train4.append(train_y3[i])
setosa_train4 = np.array(setosa_train4)
#mean_setosa_result4 = get_mean_vector(setosa_train4)
mean_setosa_result4 = np.mean(setosa_train4, axis =0)
variance_setosa4 = np.var(setosa_train4, axis = 0)
covMatrix_setosa_result4 = [[0] * 4] * 4
covMatrix_setosa_result4 = [[0] * len(variance_setosa4) for _ in range(len(variance_setosa4))]
for i, e in enumerate(variance_setosa4):
covMatrix_setosa_result4[i][i] = e
covMatrix_setosa_result4 = np.array(covMatrix_setosa_result4)
versicolor_train4 = np.array(versicolor_train4)
#mean_versicolor_result4 = get_mean_vector(versicolor_train4)
mean_versicolor_result4 = np.mean(versicolor_train4, axis =0)
covMatrix_versicolor_result4 = [[0] * 4] * 4
variance_versicolor4= np.var(versicolor_train4, axis = 0)
covMatrix_versicolor_result4 = [[0] * len(variance_versicolor4) for _ in range(len(variance_versicolor4))]
for i, e in enumerate(variance_versicolor4):
covMatrix_versicolor_result4[i][i] = e
covMatrix_versicolor_result4 = np.array(covMatrix_versicolor_result4)
virginica_train4 = np.array(virginica_train4)
#mean_virginica_result4 = get_mean_vector(virginica_train4)
mean_virginica_result4 = np.mean(virginica_train4, axis=0)
covMatrix_virginica_result4= [[0] * 4] * 4
variance_virginica4= np.var(virginica_train4, axis = 0)
covMatrix_virginica_result4 = [[0] * len(variance_virginica4) for _ in range(len(variance_virginica4))]
for i, e in enumerate(variance_virginica4):
covMatrix_virginica_result4[i][i] = e
covMatrix_virginica_result4 = np.array(covMatrix_virginica_result4)
confussion_matrix3 = np.array(generate_parzebWindow_confussion_matrix(test_x3,train_y3,bandWidth,mean_setosa_result4,mean_versicolor_result4,
mean_virginica_result4,covMatrix_setosa_result4,
covMatrix_versicolor_result4,covMatrix_virginica_result4))
Sum4 = np.trace(confussion_matrix3)
Accuracy4 = (Sum4/30)*100
variance.append(Accuracy4)
print("confussion matrix of 4th fold:\n",confussion_matrix3,"\n")
print("Accuracy of 4th fold:", Accuracy4,"%")
train_y4 = [[float(y) for y in x] for x in train_y4]
test_x4 = [[float(y) for y in x] for x in test_x4]
random.shuffle(train_y4)
random.shuffle(test_x4)
for i in range(len(train_y4)):
if(data_availablity(train_y4[i])== 'setosa'):
setosa_train5.append(train_y4[i])
elif(data_availablity(train_y4[i]) == 'versicolor'):
versicolor_train5.append(train_y4[i])
else: virginica_train5.append(train_y4[i])
setosa_train5 = np.array(setosa_train5)
#mean_setosa_result5 = get_mean_vector(setosa_train5)
mean_setosa_result5 = np.mean(setosa_train5, axis=0)
variance_setosa5 = np.var(setosa_train5, axis = 0)
covMatrix_setosa_result5 = [[0] * 4] * 4
covMatrix_setosa_result5 = [[0] * len(variance_setosa5) for _ in range(len(variance_setosa5))]
for i, e in enumerate(variance_setosa5):
covMatrix_setosa_result5[i][i] = e
covMatrix_setosa_result5 = np.array(covMatrix_setosa_result5)
versicolor_train5 = np.array(versicolor_train5)
#mean_versicolor_result5 = get_mean_vector(versicolor_train5)
mean_versicolor_result5 = np.mean(versicolor_train5, axis =0)
covMatrix_versicolor_result5 = [[0] * 4] * 4
variance_versicolor5= np.var(versicolor_train5, axis = 0)
covMatrix_versicolor_result5 = [[0] * len(variance_versicolor5) for _ in range(len(variance_versicolor4))]
for i, e in enumerate(variance_versicolor4):
covMatrix_versicolor_result5[i][i] = e
covMatrix_versicolor_result5 = np.array(covMatrix_versicolor_result5)
virginica_train5 = np.array(virginica_train5)
#mean_virginica_result5 = get_mean_vector(virginica_train5)
mean_virginica_result5 = np.mean(virginica_train5, axis=0)
covMatrix_virginica_result5= [[0] * 4] * 4
variance_virginica5= np.var(virginica_train5, axis = 0)
covMatrix_virginica_result5 = [[0] * len(variance_virginica5) for _ in range(len(variance_virginica5))]
for i, e in enumerate(variance_virginica5):
covMatrix_virginica_result5[i][i] = e
covMatrix_virginica_result5 = np.array(covMatrix_virginica_result5)
confussion_matrix4 = np.array(generate_parzebWindow_confussion_matrix(test_x4,train_y4,bandWidth,mean_setosa_result5,mean_versicolor_result5,
mean_virginica_result5,covMatrix_setosa_result5,
covMatrix_versicolor_result5,covMatrix_virginica_result5))
Sum5 = np.trace(confussion_matrix4)
Accuracy5 = (Sum5/30)*100
variance.append(Accuracy5)
print("confussion matrix of 5th fold:\n",confussion_matrix4,"\n")
print("Accuracy of 5th fold:", Accuracy5,"% \n")
Avg_accuracy = (Accuracy1+Accuracy2+Accuracy3+Accuracy4+Accuracy5)/5
Variance = get_variance(variance)
print("Avarage accuracy:",Avg_accuracy,"%" "with variance:",Variance)
| mmSohan/IrisDataSet_Classification_GaussianMultivariant-ParzenWindow_MachineLearning | parzen_window.py | parzen_window.py | py | 20,987 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.det",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_num... |
29852609530 | import collections
f = open("movie.txt","r")
print("Count number of movies in the file")
list_1 = []
for i in f:
list_1.append(i.split())
print(len(list_1))
f.close()
print("Add a new movie detail (War Amit 180 2019) at the end of file.")
f = open("movie.txt","a")
f.write("\nWar Amit 180 2019")
f.close()
print("Display details of all movies where production cost is more than 80 Crores")
f = open("movie.txt","r")
list_1 = []
for i in f:
list_1.append(i.split())
t = []
for i in list_1:
t.append(int(2))
print(list_1)
for i in list_1:
if int(i[2]) > 80:
print(i)
print("Display first five movie details.")
for i in range(5):
print(list_1[i])
print("Display director name who has worked in more than two movies.")
t = []
for i in list_1:
t.append(i[1])
k = collections.Counter(t)
print(k)
for i in k:
if k[i] > 1:
print(i)
| Hardik121020/Pyhton-Code | File Handling/File3.py | File3.py | py | 882 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 38,
"usage_type": "call"
}
] |
193648592 |
from asyncio.windows_events import NULL
import pygame
class Selector:
def __init__(self,w,h,x,y):
self.width = w
self.height = h
self.x = x
self.y = y
self.isSelected = False
self.selectedColor = -1
self.selectedTube = NULL
def draw(self,surface):
coord = [(self.x,self.y), (self.x+self.width,self.y),(self.x+self.width/2,self.y+self.height)]
pygame.draw.polygon(surface, (255,255,255), coord)
def setPos(self,x,y):
self.x = x + self.width/2
self.y = y - self.height * 2
def select(self,selection_pair_C_T):
selected_tube_class_str = str(type(selection_pair_C_T[1]))
if(selection_pair_C_T[0] >=0 and selection_pair_C_T[0] < 14 and selected_tube_class_str == "<class 'Tube.Tube'>"):
self.isSelected = True
self.selectedColor, self.selectedTube = selection_pair_C_T
def unselect(self):
self.isSelected = False
self.selectedColor, self.selectedTube = (-1, NULL) | Houdeifa/Lquid-tube | Selector.py | Selector.py | py | 946 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "asyncio.windows_events.NULL",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pygame.draw.polygon",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "asyn... |
3320859772 | import scrapy
import json
import csv
from ..items import FoodyItem
OUTPUT_DIRECTORY = "/Users/user/Desktop/Crawl/foody/OUTPUT/foody_output.json"
class CrawlfoodySpider(scrapy.Spider):
name = 'crawlFoody'
allowed_domains = ['www.foody.vn']
total_comment = 0
num_of_page = 1
url = 'https://www.foody.vn/__get/Place/HomeListPlace?t=1631867059243&page={}&lat=10.823099&lon=106.629664&count=12&districtId=21&cateId=&cuisineId=&isReputation=&type=1'
def start_requests(self):
yield scrapy.Request(self.url.format(self.num_of_page),callback=self.parse)
def parse(self, response):
if response.status == 200 and self.total_comment <= 4000:
res = response.json()
for i in res['Items']:
item = {'ResId': i['Id'], 'ResName': i['Name']}
if i['Url'] is not None:
yield scrapy.Request(url='https://www.foody.vn'+i['Url'], callback=self.parse_res, meta={'item': item})
def parse_res(self,response):
item = response.meta.get('item')
resid = item['ResId']
url = 'https://www.foody.vn/__get/Review/ResLoadMore?t=1632416315325&Type=1&fromOwner=&isLatest=true&ExcludeIds=&ResId=' + str(
resid) + '&Count=20'
score = response.css('div.microsite-top-points span ::text').getall()
resinfo = {
'ResId': item['ResId'],
'ResName': item['ResName'],
'streetAddress': response.css('span[itemprop="streetAddress"] ::text').get(),
'district': response.css('span[itemprop="addressLocality"] ::text').get(),
'region': response.css('span[itemprop="addressRegion"] ::text').get(),
'Res_rating': response.css('div[itemprop="ratingValue"] ::text').get().strip("\r\n "),
'Res_pos_score': score[0],
'Res_price_score': score[1],
'Res_food_score': score[2],
'Res_atmosphere_score': score[3],
'Res_services_score': score[4],
}
# print(response.css('div.res-common-info div.disableSection span[itemprop="streetAddress"] ::text').get())
yield scrapy.Request(url, callback=self.parse_comment,meta={'item':resinfo})
def parse_comment(self, response):
reviews = response.json()
# print(len(reviews))
meta1 = response.meta.get('item')
print('_____LEN______',len(reviews['Items']))
for review in reviews['Items']:
if len(review['Pictures']) > 0:
self.total_comment += 1
item = {
# 'ResId': response.meta.get('ResId'),
'RevId': review['Id'],
'UserId': review['Owner']['Id'],
'UserName': review['Owner']['DisplayName'],
'Rating': review['AvgRating'],
'Comment': review['Description'],
'image_urls': [picture['Url'] for picture in review['Pictures']]
}
item.update(meta1)
url = 'https://www.foody.vn/__get/Review/GetReviewInfo?reviewId={}'
# print(review.css('div.review-user a::attr(data-user)').get())
yield scrapy.Request(url.format(item['RevId']) ,callback=self.parse_comment_score, meta={'item': item})
# print(item)
print('------Total----', self.total_comment)
self.num_of_page += 1
# if self.num_of_page <= 10:
if self.num_of_page < 100:
yield scrapy.Request(self.url.format(self.num_of_page),callback=self.parse)
def parse_comment_score(self,response):
item = response.meta.get('item')
comment_attr = response.json()
item2 = {
'Food_score_cmt': comment_attr['Food'],
'Services_score_cmt': comment_attr['Services'],
'Atmosphere_score_cmt': comment_attr['Atmosphere'],
'Position_score_cmt': comment_attr['Position'],
'Price_score_cmt': comment_attr['Price']
}
item.update(item2)
url = 'https://www.foody.vn/__get/Review/GetUserInfoReview?userId={}'
yield scrapy.Request(url.format(item['UserId']),callback=self.parse_user_info, meta={'item': item})
def parse_user_info(self,response):
item = response.meta.get('item')
user_info = response.json()
item3 = {
'Total_reviews': user_info['TotalReviews'],
'Followers': user_info['TotalFollowers']
}
item.update(item3)
# food_item = FoodyItem()
# for key in item:
# food_item[key] = item[key]
yield item
def close(self, reason):
start_time = self.crawler.stats.get_value('start_time')
finish_time = self.crawler.stats.get_value('finish_time')
print("Total run time: ", finish_time - start_time)
| LENGHIA-CN8/FoodyCrawl | foody/spiders/crawlFoody.py | crawlFoody.py | py | 4,850 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
... |
74845408103 | import logging
from datetime import date
from flask.app import Flask
def get_logger(name=__name__, level=logging.INFO):
"""Returns a logger object"""
logger = logging.getLogger(name)
if not len(logger.handlers):
logger.setLevel(level)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(name)s %(levelname)s [%(filename)s:%(lineno)s - %(funcName)s] %(message)s"
)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def get_app_rules(app: Flask):
rules = []
for rule in app.url_map.iter_rules():
methods = ",".join(sorted(rule.methods)) if rule.methods else ""
rules.append(
{
"endpoint": rule.endpoint,
"methods": methods,
"name": str(rule),
}
)
rules_sorted = sorted(rules, key=lambda x: x["name"])
return rules_sorted
def date_today_str() -> str:
today = date.today()
# dd/mm/YY
return today.strftime("%Y-%m-%d")
| carneirofc/utility-api | application/common/utils.py | utils.py | py | 1,104 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.INFO",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.DEBU... |
32476914350 | import sys
import csv
import sqlite3
# creates db if one does not exist
# conn = sqlite3.connect('some.db')
class CSVTool(object):
def __init__(self, path=None):
self.path = None
self.csv_name = None
self.db = None
def dump_to_db(self, csv_path):
# store connection
self.db = sqlite3.connect('csi.db')
# get cursor object
cur = self.db.cursor()
# sql staement to create a tbale with only ID field
sqlst = 'CREATE TABLE csi(ID int PRIMARY KEY NOT NULL)'
# preform the above statement
cur.execute(sqlst)
with open(csv_path) as csv_file:
read_csv = csv.DictReader(csv_file)
import pdb; pdb.set_trace()
for row in read_csv:
sqlst = """
"""
cur.execute(sqlst.format(int(read_csv.line_num)))
self.db.commit()
self.db.close()
if __name__ == '__main__':
ct = CSVTool()
ct.dump_to_db('csi.csv')
| cmhedrick/GalenDBTool | csvToDB.py | csvToDB.py | py | 1,047 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pdb.set_trace",
"line_number": 25,
"usage_type": "call"
}
] |
71846051304 | # -*- coding: utf-8 -*-
'''
这段代码将做出函数f(x)=e^2-2在[0,2]之间的图像,并描画出在该曲线下面的近似矩形
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import math
num = 4
x = np.linspace(0,2,1000)
y = np.power(np.e,x)-2
fig = plt.figure(figsize=(8,8))
x1 = np.linspace(0,2,num+1)
width=2.0/num
ax1 = fig.add_subplot(111,aspect='equal')
for xn in range(num):
ax1.add_patch(patches.Rectangle((x1[xn],0),width,np.power(np.e,x1[xn]+0.25)-2))
plt.plot(x,y,label='e^2-2',color='red',linewidth=2)
plt.show()
| coolban/somemath | plotf3.py | plotf3.py | py | 584 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.e",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
... |
13988017107 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from skfmm import travel_time, distance
from scipy.interpolate import interp1d
import os
from utils import plot_2d_image
from math import log10
plt.style.use('ggplot')
def transform_normal_scores(scores, nscore):
# for now, the values of our scores are less than the values of the nscore.
# we dont have to worry about more extreme values. ultimately, we need some way to map an arbitrary distribution
# function to these normal values.
x = nscore['nscore']
y = nscore['x']
f = interp1d(x,y)
return f(scores)
def linear_taper(n, inds=(0,-1), vals=(0.0,1.0) ):
"""
Returns normalized coefficient for linear taper between (start, end) and
values (start_value, end_value)
Args:
n (int) : length of taper
inds (tuple) : indexes of taper, default n
vals (tuple) : coresponding to inds, default {0, 1.0}
Returns:
coef (ndarray) : coefficient {0 .. 1.0} of linear taper over indexes = inds with
values = vals
"""
import numpy as np
# vars
ix = np.arange(n)
coef = np.ones(n)
# linear model
delta_y = vals[1] - vals[0]
if inds == (0,-1):
delta_x = n
else:
delta_x = inds[1] - inds[0]
slope = delta_y / delta_x
intercept = vals[0] - slope * inds[0]
coef[inds[0]:inds[1]] = slope * ix[inds[0]:inds[-1]] + intercept
# returns
return coef
def boundary_taper( field, taper_width=10, free_surface=True, values=0 ):
"""
returns a field tapered along to boundary to zero.
can add taper to some percentage later.
field (2d ndarray) : rupture field to taper.
taper_width (int) : boundary to taper
free_surface (bool) : (true) taper the free surface
(false) do NOT taper free surface
values sequence or int (optional) : ending values for taper. default is zero. value should be specfied
in terms of percentages.
return
tapered_field (ndarray) : tapered field with shape = field.shape
"""
ny, nx = field.shape
if free_surface:
baseline = np.ones( (ny-2*taper_width, nx-2*taper_width) )
padded = np.pad( baseline, ((taper_width,taper_width), (taper_width,taper_width)), 'linear_ramp', end_values=values )
else:
baseline = np.ones( (ny-taper_width, nx-2*taper_width) )
padded = np.pad( baseline, ((0,taper_width), (taper_width,taper_width)), 'linear_ramp', end_values=values )
assert field.shape == padded.shape
return field*padded
def main():
plot_on = True
tapering = True
writing = True
layered = False
src_dir = './source_models/'
output_name = 'sokrg-bbp_source1'
out_dir = './source_models/source1'
if not os.path.isdir( out_dir ):
os.makedirs( out_dir )
params = {
'nx' : 273,
'nz' : 136,
'dx' : 100,
'ihypo' : (120, 136),
'fault_top' : 0,
'avg_slip' : 0.74,
'std_slip' : 0.42,
'avg_psv' : 1.42,
'std_psv' : 0.7,
'avg_vrup' : 0.79,
'std_vrup' : 0.04,
}
# read normal score transforms
slip_sc = pd.read_csv('slip_nscore_transform_table.csv')
psv_sc = pd.read_csv('psv_nscore_transform_table.csv')
vrup_sc = pd.read_csv('vrup_nscore_transform_table.csv')
# extract data
slip_sim1 = np.fromfile(src_dir + 'slip_sim1.bin').reshape(params['nz'], params['nx'])
psv_sim1 = np.fromfile(src_dir + 'psv_sim1.bin').reshape(params['nz'], params['nx'])
vrup_sim1 = np.fromfile(src_dir + 'vrup_sim1.bin').reshape(params['nz'], params['nx'])
if layered:
material = np.loadtxt("bbp1d_1250_dx_25.asc")[params['fault_top']:params['fault_top'] + params['nz'], :]
vp = material[:,1]*1e3
vs = material[:,2]*1e3
rho = material[:,3]*1e3
vs = np.repeat(vs, params['nx']).reshape(params['nz'], params['nx'])
rho = np.repeat(rho, params['nx']).reshape(params['nz'], params['nx'])
else:
vs = 3464*np.ones((params['nz'], params['nx']))
rho = 2700*np.ones((params['nz'], params['nx']))
# cut size of model down for computational ease
slip = slip_sim1[:-1, :-1]
psv = psv_sim1[:-1, :-1]
# psv=(psv-psv.mean())/psv.std()
vrup = vrup_sim1[:-1, :-1]
vs = vs[:-1, :-1]
rho = rho[:-1, :-1]
# update parameters
params['nx'] = params['nx'] - 1
params['nz'] = params['nz'] - 1
if tapering:
# transform from normal-scores
slip = transform_normal_scores(slip, slip_sc)
psv = transform_normal_scores(psv, psv_sc)
vrup = transform_normal_scores(vrup, vrup_sc)
# according to xu et. al, 2016 for landers
# taper = linear_taper( slip_sim1.shape[0], inds=(0, int(4000/params['dx'])), vals = (0.8, 1.0) )
# slip = np.repeat(taper, params['nx']).reshape(params['nz'], params['nx']) * (slip_sim1 * params['std_slip'] + params['avg_slip'])
# taper = linear_taper( psv_sim1.shape[0], inds=(0, int(4000/params['dx'])), vals = (0.5, 1.0) )
# psv = np.repeat(taper, params['nx']).reshape(params['nz'], params['nx']) * (psv_sim1 * params['std_psv'] + params['avg_psv'])
#vrup_norm = vrup_sim1 * params['std_vrup'] + params['avg_vrup']
#taper = linear_taper( vrup_norm.shape[0], inds=(0, int(4000/params['dx'])), vals = (0.2, 1.0) )
#vrup = np.repeat(taper, params['nx']).reshape(params['nz'], params['nx']) * vrup_norm * vs
# from simulations, slip tapers larger
taper_width = 37
slip = boundary_taper(slip,
taper_width=taper_width,
free_surface=True,
values=np.array(((0.60, 0.05), (0.05,0.05))) )
# taper to 30% of mean along-strike psv at z = taper_width * dx
taper_width = 12
ny,nx=psv.shape
baseline = np.ones( (ny-4*taper_width, nx-2*taper_width) )
padded = np.pad( baseline,
((3*taper_width,taper_width), (taper_width,taper_width)),
'linear_ramp',
end_values=np.array(((0.30, 0.05), (0.05,0.05))) )
psv = padded * psv
vrup = vrup * vs
# else:
# slip = slip_sim1 * params['std_slip'] + params['avg_slip']
# psv = psv_sim1 * params['std_psv'] + params['avg_psv']
# vrup_norm = vrup_sim1 * params['std_vrup'] + params['avg_vrup']
# vrup = vrup_norm * vs
trup = compute_trup(vrup, params)
if plot_on:
# print(f'slip: min, max ({slip.min():.2f}, {slip.max():.2f})')
# print(f'psv: min, max ({psv.min():.2f}, {psv.max():.2f})')
# print(f'vrup: min, max ({vrup.min():.2f}, {vrup.max():.2f})')
# print(f'trup: min, max ({trup.min():.2f}, {trup.max():.2f})')
x = np.arange(0, params['nx'])
z = np.arange(0, params['nz'])
plotting_data = {'data':slip, 'contour':trup}
plot_2d_image( plotting_data, "slip-" + output_name + ".pdf" , nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = "Slip (m)", xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = True, contour = True, clim=(0, slip.max()), cmap='jet' )
plot_2d_image( psv, "psv-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r'$V^{peak}$ (m/s)', xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour = False, clim=(0, psv.max()), cmap='jet' )
plot_2d_image( trup, "trup-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = "Trup (s)", xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour = True, clim=(0,20) )
plot_2d_image( vrup/vs, "vrup-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r'$V_{rup}/c_s$', xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour = False, cmap='viridis', clim=(0, 1.0) )
plot_2d_image( vs, "vs-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r'$V_{rup}/c_s$', xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour = False, cmap='jet' )
# generate strike, dip, and rake
nhat1 = np.fromfile("nhat1", "f").reshape(801,2601)
nhat2 = np.absolute(np.fromfile("nhat2", "f").reshape(801,2601))
nhat3 = np.fromfile("nhat3", "f").reshape(801,2601) # make vector point "up"
# NOTE: starting at x=1000 to reduce model size for small model
nhat1 = nhat1[::4, ::4]
nhat2 = nhat2[::4, ::4]
nhat3 = nhat3[::4, ::4]
print(nhat1.shape)
print(nhat2.shape)
print(nhat3.shape)
#fienen "the three-point problem"
# project onto horizontal plane, calculate angle between
dip = get_dip(nhat1, nhat2, nhat3)
strike = get_strike(nhat1, nhat3)
#rake = np.ones(strike.shape)*180.0 # constant rake
rake = strike - 90 # strike is 270 and rake is 180
# compute moment
moment = get_moment(slip, vs, rho, params)
print('moment')
print(moment.sum())
print(2./3 * (log10(moment.sum()) - 9.1))
# write to file for input
if writing:
dtype = '<f4'
print('writing files...')
# start at 1000 to reduce the size of the source simulation.
vs.astype(dtype).tofile(os.path.join(out_dir, output_name + '_vs.bin'))
rho.astype(dtype).tofile(os.path.join(out_dir, output_name + '_rho.bin'))
slip.astype(dtype).tofile(os.path.join(out_dir, output_name + '_slip.bin'))
psv.astype(dtype).tofile(os.path.join(out_dir, output_name + '_psv.bin'))
vrup.astype(dtype).tofile(os.path.join(out_dir, output_name + '_vrup.bin'))
trup.astype(dtype).tofile(os.path.join(out_dir, output_name + '_trup.bin'))
strike.astype(dtype).tofile(os.path.join(out_dir, output_name + '_strike.bin'))
dip.astype(dtype).tofile(os.path.join(out_dir, output_name + '_dip.bin'))
rake.astype(dtype).tofile(os.path.join(out_dir, output_name + '_rake.bin'))
moment.astype(dtype).tofile(os.path.join(out_dir, output_name + '_moment.bin'))
"""
Helping functions.
"""
def get_dip(nhat1, nhat2, nhat3):
nz,nx = nhat1.shape
dip = np.ones([nz,nx])
for i in range(nz):
for j in range(nx):
nproj = (nhat1[i,j], 0, nhat3[i,j])
n = (nhat1[i,j], nhat2[i,j], nhat3[i,j])
norm = lambda v: np.sqrt(v[0]**2+v[1]**2+v[2]**2)
scaling = 1.0 / ( norm(nproj) * norm(n) )
arg = scaling*(n[0]**2+n[2]**2)
if np.isclose(1.0, arg):
arg = 1.0
arg=np.arccos(arg)
theta = np.rad2deg(arg)
dip[i,j] = 90 - theta
return dip
def get_moment(slip, vs, rho, params):
mu = vs * vs * rho
area = params['dx'] * params['dx']
moment = mu * area * slip
return moment
def get_strike(nhat1, nhat3):
nz,nx = nhat1.shape
strike = np.ones([nz,nx])
for i in range(nz):
for j in range(nx):
nproj = (nhat1[i,j], 0, nhat3[i,j])
x3 = (1,0,0)
norm = lambda v: np.sqrt(v[0]**2+v[1]**2+v[2]**2)
scaling = 1.0 / ( norm(x3) * norm( nproj) )
theta = np.rad2deg(scaling * np.arccos(nproj[2]))
if nhat1[i,j] > 0 and nhat3[i,j] > 0:
strike[i,j] = 270 + theta
elif nhat1[i,j] < 0 and nhat3[i,j] > 0:
strike[i,j] = 270 - theta
elif nhat1[i,j] < 0 and nhat3[i,j] < 0:
# in 3rd quad
strike[i,j] = 270 - theta
elif nhat1[i,j] > 0 and nhat3[i,j] < 0:
# in 4th quad
strike[i,j] = theta - 90
return strike
def source_time_function():
pass
def compute_trup(vrup, params):
phi = np.ones( (params['nz'], params['nx']) ) #* params['dx']
print(phi.shape)
ihypo = params['ihypo']
phi[ ihypo[0], ihypo[1] ] = -1
trup = travel_time( phi, speed=vrup, dx=params['dx'] )
return np.array(trup)
if __name__ == "__main__":
main()
| wsavran/sokrg | generate_scale_truncated.py | generate_scale_truncated.py | py | 12,819 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_na... |
32057438346 | from aiogram.types import Message, ReplyKeyboardRemove
from aiogram.dispatcher.filters.builtin import Text
from aiogram.dispatcher import FSMContext
from states.StateStart import StateStart
from loader import dp
@dp.message_handler(Text(equals="Mukammal Telegram Bot"),state=StateStart.Kurs)
async def MTBKurs(msg: Message, state: FSMContext):
message = ("Kurs nomi: Mukammal Telegram Bot\n"
"\n"
"Kurs muallifi: Anvar Narzullayev\n"
"\n"
"Kurs narxi: 247.000 so'm\n"
"\n"
"Kurs manzili:https://praktikum.mohirdev.uz/dashboard/practicums/645264c869d972027cd033c3")
await msg.answer(message, reply_markup=ReplyKeyboardRemove())
await state.finish()
@dp.message_handler(Text(equals="C++ Asoslari"),state=StateStart.Kurs)
async def CppKurs(msg: Message, state: FSMContext):
message = ("Kurs nomi: C++ Asoslari\n"
"\n"
"Kurs muallifi: Qudrat Abduraximov\n"
"\n"
"Kurs narxi: 1.000.000 so'm\n"
"\n"
"Kurs manzili:https://praktikum.mohirdev.uz/dashboard/practicums/645264a369d972027cd02963/purchase")
await msg.answer(message, reply_markup=ReplyKeyboardRemove())
await state.finish()
@dp.message_handler(Text(equals="Python Asoslari"),state=StateStart.Kurs)
async def PythonKurs(msg: Message, state: FSMContext):
message = ("Kurs nomi: Python Asoslari\n"
"\n"
"Kurs muallifi: Anvar Narzullayev\n"
"\n"
"Kurs narxi: Bepul\n"
"\n"
"Kurs manzili:https://praktikum.mohirdev.uz/dashboard/practicums/645264af69d972027cd02d52/free")
await msg.answer(message, reply_markup=ReplyKeyboardRemove())
await state.finish()
@dp.message_handler(Text(equals="Professional Node.JS"),state=StateStart.Kurs)
async def NodeJsKurs(msg: Message, state: FSMContext):
message = ("Kurs nomi: Professional Node.JS\n"
"\n"
"Kurs muallifi: Behruz Xolmominov\n"
"\n"
"Kurs narxi: 247.000 so'm\n"
"\n"
"Kurs manzili:https://praktikum.mohirdev.uz/dashboard/practicums/645264bd69d972027cd030f4/purchase")
await msg.answer(message, reply_markup=ReplyKeyboardRemove())
await state.finish()
@dp.message_handler(Text(equals="Django3: Python Full stack"),state=StateStart.Kurs)
async def DjangoFsKurs(msg: Message, state: FSMContext):
message = ("Kurs nomi: Django3: Python Full stack\n"
"\n"
"Kurs muallifi: Anvar Narzullayev\n"
"\n"
"Kurs narxi: Bepul\n"
"\n"
"Kurs manzili:https://praktikum.mohirdev.uz/dashboard/practicums/645264bf69d972027cd03180/free")
await msg.answer(message, reply_markup=ReplyKeyboardRemove())
await state.finish()
@dp.message_handler(Text(equals="Python: Ma'lumotlar tuzilmasi"),state=StateStart.Kurs)
async def PythonMtKurs(msg: Message, state: FSMContext):
message = ("Kurs nomi: Python: Ma'lumotlar tuzilmasi\n"
"\n"
"Kurs muallifi: Anvar Narzullayev\n"
"\n"
"Kurs narxi: Bepul\n"
"\n"
"Kurs manzili:https://praktikum.mohirdev.uz/dashboard/practicums/645264b769d972027cd02ea8/free")
await msg.answer(message, reply_markup=ReplyKeyboardRemove())
await state.finish()
@dp.message_handler(Text(equals="PostgreSQL"),state=StateStart.Kurs)
async def PSQLKurs(msg: Message, state: FSMContext):
message = ("Kurs nomi: PostgreSQL\n"
"\n"
"Kurs muallifi: Behruz Xolmominov\n"
"\n"
"Kurs narxi: Bepul\n"
"\n"
"Kurs manzili:https://praktikum.mohirdev.uz/dashboard/practicums/645264ad69d972027cd02d02/free")
await msg.answer(message, reply_markup=ReplyKeyboardRemove())
await state.finish()
@dp.message_handler(Text(equals="Yii2 Framework"),state=StateStart.Kurs)
async def Yii2Kurs(msg: Message, state: FSMContext):
message = ("Kurs nomi: Yii2 Framework\n"
"\n"
"Kurs muallifi: Sardor Dushamov\n"
"\n"
"Kurs narxi: Bepul\n"
"\n"
"Kurs manzili:https://praktikum.mohirdev.uz/dashboard/practicums/645264ac69d972027cd02cc0/free")
await msg.answer(message, reply_markup=ReplyKeyboardRemove())
await state.finish()
@dp.message_handler(Text(equals="Django Rest Framework"),state=StateStart.Kurs)
async def DjangoKurs(msg: Message, state: FSMContext):
message = ("Kurs nomi: Django Rest Framework\n"
"Kurs muallifi: Muhammad Ertmatov\n"
"Kurs narxi: Bepul\n"
"Kurs manzili:https://praktikum.mohirdev.uz/dashboard/practicums/64526477ecc3657ffcec10e5/free")
await msg.answer(message, reply_markup=ReplyKeyboardRemove())
await state.finish()
| ozodbekernazarov6642/mohirdev.uz_bot | handlers/users/KursBackendHendler.py | KursBackendHendler.py | py | 5,008 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiogram.types.Message",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "aiogram.dispatcher.FSMContext",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "aiogram.types.ReplyKeyboardRemove",
"line_number": 19,
"usage_type": "call"
},
{
... |
20003378974 | import os
import re
import requests
import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Declaramos algunas variables necesarias para el Bot
PORT = int(os.environ.get('PORT', 5000))
SECRET_KEY = os.environ['TELEGRAM_BOT_API_KEY']
HEROKU_APP = os.environ['HEROKU_APP']
# Definimos algunos comandos básicos que funcionaran como handlers.
def saludo(update, context):
"""Manda un mensaje cuando el usuario ingresa /saludo """
update.message.reply_text('Hola! Soy el BOT de Análisis Matemático! Sirvo como un ayuda-memoria para la cursada')
update.message.reply_text('Estoy desarrollado por estudiantes de la universidad, no soy una herramienta oficial de la misma ni de la cátedra de Análisis Matemático.')
update.message.reply_text('Para más información, revisá [mi repositorio en github](https://github.com/matiasnoriega/bot-telegram-analisis)', parse_mode='MarkdownV2')
def factoreo(update, context):
""" Devuelve tabla de casos mas comunes de factoreo """
update.message.reply_photo(photo=open('assets/factoreo.png', 'rb'))
def resolvente(update, context):
""" Devuelve tabla de casos mas comunes de factoreo """
update.message.reply_photo(photo=open('assets/resolvente.png', 'rb'))
def ayuda(update, context):
"""Manda un mensaje cuando el usuario ingresa /ayuda """
update.message.reply_text('Probá el comando /factoreo para recibir los casos más comunes de factoreo o /derivar funcion para ver la derivada de f(x)!')
def derivadas(update, context):
""" Retorna casos de derivadas"""
if not context.args:
caso = None
else:
caso = context.args[0]
if caso == None:
update.message.reply_photo(photo=open('assets/tabla_derivadas.png', 'rb'))
elif caso == 'suma':
update.message.reply_photo(photo=open('assets/derivadas_suma.png', 'rb'))
elif caso == 'resta':
update.message.reply_photo(photo=open('assets/derivadas_resta.png', 'rb'))
elif caso == 'division':
update.message.reply_photo(photo=open('assets/derivadas_division.png', 'rb'))
elif caso == 'multiplicacion':
update.message.reply_photo(photo=open('assets/derivadas_multiplicacion.png', 'rb'))
else:
update.message.reply_text('Intenta de nuevo con alguna de las opciones: suma, resta, division o multiplicacion o vacio para la tabla de derivadas!')
def derivar(update, context):
""" Toma la función base y muestra su derivada """
#se declara un diccionario de patrones para contrastar la funcion recibida
patron_funciones = {
'^[0-9]+$': '0',
'x$': '1',
'^[0-9]+x$': 'kx',
}
#Tomamos el primer argumento que se le pasa al comando como función f(x)
funcion = context.args[0]
resultado = None
for key in patron_funciones.keys():
#compilamos el patron de esta entrada particular del diccionario para compararlo con la funcion recibida
#y guardamos el resultado en la variable match
regex_matcher = re.compile(key)
match = regex_matcher.match(funcion)
#si hubo una coincidencia, guardamos en la variable resultado el valor asociado a esa clave en el diccionario
#devolvemos resultado y detenemos el ciclo for
if match:
resultado = patron_funciones.get(key)
break
# Si encuentra una coincidencia usa la API de Latex2PNG
if(resultado):
if(resultado == 'kx'):
r = re.compile("([0-9]+)(x)")
m = r.match(funcion)
resultado = m.group(1)
# Data para mandar a la API de latex2png
payload = "{\n\"auth\": {\n\"user\": \"guest\",\n\"password\": \"guest\"\n},\n\"latex\": \"%(latex)s\" ,\n\"resolution\": 600,\n\"color\": \"111111\"\n}"%{'latex': resultado}
# Headers del Request
headers = {
'Content-Type': 'application/json'
}
r = requests.request("POST", 'http://latex2png.com/api/convert', headers=headers, data=payload)
json_response = r.json()
img_url = json_response["url"]
update.message.reply_photo('http://latex2png.com' + img_url)
else:
update.message.reply_text('No encontré una función base que coincida con '+ context.args[0])
def error(update, context):
"""Loggea los errores causados por el Updater."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
"""Inicia el bot."""
# Se crea un objeto Updater pasandole el token del bota
updater = Updater(SECRET_KEY, use_context=True)
# Obtenemos el dispatcher, del updater, para poder registrar los handlers (métodos)
disp = updater.dispatcher
# Para cada comando en telegram se asigna un handler
disp.add_handler(CommandHandler("start", saludo))
disp.add_handler(CommandHandler("saludo", saludo))
disp.add_handler(CommandHandler("ayuda", ayuda))
disp.add_handler(CommandHandler("resolvente", resolvente))
disp.add_handler(CommandHandler("factoreo", factoreo))
disp.add_handler(CommandHandler("derivadas", derivadas, pass_args=True))
disp.add_handler(CommandHandler("derivar", derivar, pass_args=True))
# loggea todos los errores
disp.add_error_handler(error)
# Corre el bot hasta que se presione CTRL+C o el proceso reciba un SIGINT,
# SIGTERM o SIGABRT.
updater.start_webhook(listen="0.0.0.0",
port=int(PORT),
url_path=SECRET_KEY)
updater.bot.setWebhook(
HEROKU_APP + SECRET_KEY)
# Dejamos el bot en modo idle para que siga escuchando.
updater.idle()
if __name__ == '__main__':
main()
| matiasnoriega/bot-telegram-analisis | bot.py | bot.py | py | 5,873 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ.get... |
11226132142 | from datetime import timedelta
from django.utils import timezone
from celery import shared_task
from django.contrib.auth.models import User
from .models import ReadingStatistics, ReadingSession
@shared_task
def update_reading_statistics():
# Завдання Celery для оновлення статистики читання користувачів
print("Завантаження даних...")
# Отримуємо всіх користувачів
users = User.objects.all()
for user in users:
now = timezone.now() # Поточний час
# Визначаємо дати для вибірки (7 та 30 днів назад)
seven_days_ago = now - timedelta(days=7)
thirty_days_ago = now - timedelta(days=30)
# Фільтруємо сесії читання для користувача
sessions = ReadingSession.objects.filter(user=user, end_time__isnull=False)
# Фільтруємо сесії читання за останні 7 днів
sessions_7_days = sessions.filter(end_time__gte=seven_days_ago, end_time__lte=now)
# Фільтруємо сесії читання за останні 30 днів
sessions_30_days = sessions.filter(end_time__gte=thirty_days_ago, end_time__lte=now)
# Розраховуємо загальний час читання за 7 та 30 днів
total_reading_7_days = sum(
(session.end_time - session.start_time).total_seconds() / 3600 for session in sessions_7_days)
total_reading_30_days = sum(
(session.end_time - session.start_time).total_seconds() / 3600 for session in sessions_30_days)
# Оновлюємо запис в моделі ReadingStatistics для користувача
reading_statistics, created = ReadingStatistics.objects.get_or_create(user=user)
reading_statistics.total_reading_7_days = total_reading_7_days
reading_statistics.total_reading_30_days = total_reading_30_days
reading_statistics.save()
| RomanovDanii1/Books | api/tasks.py | tasks.py | py | 2,075 | python | uk | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.models.User.objects.all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 16,... |
4759735391 | import streamlit as st
from PIL import Image
from detection import process
import numpy as np
from torchdetect import process
import os
st.set_page_config(
page_title="Image Detection",
layout="wide",
initial_sidebar_state="expanded"
)
st.title('Tuberculosis Detection from sputum sample')
st.markdown("## inference using torch.hub")
uploaded_file = st.file_uploader(label="Choose a file", type=['jpg', 'jpeg','png'])
idebar = st.sidebar
conf_level = st.sidebar.slider("conf_level", min_value=0.0, max_value=1.0, value=0.55)
IoU = st.sidebar.slider("IoU", min_value=0.0, max_value=1.0, value=0.45)
if uploaded_file is not None:
image = Image.open(uploaded_file).convert('RGB')
col1, col2 = st.columns([0.5, 0.5])
#Col 1
with col1:
st.markdown('<p style="text-align: center;">Input Image</p>', unsafe_allow_html=True)
st.image(image,caption="Sputum sample")
imgpath = os.path.join('data/uploads', uploaded_file.name)
with open(imgpath, mode="wb") as f:
f.write(uploaded_file.getbuffer())
with col2:
st.markdown('<p style="text-align: center;">Detected Image</p>', unsafe_allow_html=True)
img,total,runtime = process(imgpath,conf_level,IoU)
st.image(img,caption="Tuberculosis detected")
st.markdown(f'<p style="text-align: center;">Bacteria detected : {total}</p>', unsafe_allow_html=True)
st.markdown(f'<p style="text-align: center;">{runtime}</p>', unsafe_allow_html=True)
# with col2:
# st.markdown('<p style="text-align: center;">Detected Image</p>', unsafe_allow_html=True)
# image_arr,total = process(np.array(image))
# st.image(Image.fromarray(image_arr),caption="Tuberculosis detected")
# st.markdown(f'<p style="text-align: center;">Bacteria detected : {total}</p>', unsafe_allow_html=True)
| irfanheru66/Tuberculosis-Detector | pages/page_2.py | page_2.py | py | 1,882 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "streamlit... |
5535184380 | import sys
import os
import json
import threading
BASE_DIR = os.getcwd()+"/.."
EOF = ""
def setPlatformCfg(env):
cfgPath = BASE_DIR+"/stays-platform/config.json"
cfg = ""
with open(cfgPath) as f:
cfg = json.load(f)
with open(cfgPath, "w") as f:
cfg['env'] = env
txt = json.dumps(cfg, indent=2) + EOF
f.write(txt)
def setUiCfg(env):
cfgPath = BASE_DIR+"/stays-ui/config.json"
cfg = ""
with open(cfgPath, 'r') as f:
cfg = json.load(f)
with open(cfgPath, "w") as f:
cfg['env'] = env
cfg['platform']['env'] = env
txt = json.dumps(cfg, indent=2) + EOF
f.write(txt)
def build():
os.chdir(BASE_DIR)
os.system("docker-compose build")
def tag(env, service):
os.chdir(BASE_DIR)
os.system("docker tag stays_"+service +
" gcr.io/stays-"+env+"/stays-"+service)
def push(env, service):
os.chdir(BASE_DIR)
os.system("docker push gcr.io/stays-"+env+"/stays-"+service)
def deploy(env, service):
os.chdir(BASE_DIR)
os.system("gcloud run deploy stays-"+service +
" --image gcr.io/stays-"+env+"/stays-"+service+" --project stays-"+env+" --region us-central1")
def fullDeploy(env, service):
tag(env, service)
push(env, service)
deploy(env, service)
if __name__ == "__main__":
if(len(sys.argv) < 1):
print("Please specify environment")
exit(-1)
env = sys.argv[1]
if env != "dev" and env != "prod":
print("Invalid environment!")
exit(-1)
else:
setPlatformCfg(env)
setUiCfg(env)
build()
t1 = threading.Thread(target=fullDeploy, args=(env, "platform"))
t2 = threading.Thread(target=fullDeploy, args=(env, "ui"))
t1.start()
t2.start()
| nickrunner/stays | scripts/deploy.py | deploy.py | py | 1,806 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 25,
... |
41239382060 | # coding:utf-8
import sys
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
sys.path.append('../synthesize_blur')
import viz_flow as viz
data_dir = "./"
old_path = data_dir + "1.jpeg"
new_path = data_dir + "2.jpeg"
gap = [0, 100]
feature_params = dict(maxCorners=100000,
qualityLevel=0.001,
minDistance=10,
blockSize=7)
lk_params = dict(winSize=(15, 15),
maxLevel=3,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
def find_last(s, t):
start = 0
ret = -1
while True:
result = s.find(t, start)
if result != -1:
ret = result
else:
break
start = result + 1
return ret
def viz_lk(flow, path):
H, W, _ = flow.shape
f = np.abs(flow[:, :, 0]) + np.abs(flow[:, :, 1])
f = (f - f.min()) / (f.max() - f.min()+1e-8)
f *= 255
f = cv2.GaussianBlur(f, (11, 11), 0)
f = torch.FloatTensor(f).view(1, 1, H, W)
f = F.max_pool2d(f, kernel_size=7, stride=7)
f = f.squeeze().numpy()
f = cv2.GaussianBlur(f, (5,5), 0)
plt.imshow(f)
plt.savefig(path)
#plt.show()
plt.clf()
return f
def cal_lk(old_path, new_path):
old_img = cv2.imread(old_path)
H, W, _ = old_img.shape
old_gray = cv2.cvtColor(old_img, cv2.COLOR_BGR2GRAY)
new_img = cv2.imread(new_path)
new_gray = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY)
print(old_img.shape)
color = np.random.randint(0, 255, (100000, 3))
mask = np.zeros_like(old_img)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
p1, st, err = cv2.calcOpticalFlowPyrLK(
old_gray, new_gray, p0, None, **lk_params)
# select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
good = good_new - good_old
flow = np.zeros((H, W, 2))
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
gapx = abs(a-c)
gapy = abs(b-d)
if gapx >= gap[0] and gapy >= gap[0] and gapx <= gap[1] and gapy <= gap[1]:
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(new_img, (a, b), 5, color[i].tolist(), -1)
flow[int(d)-1, int(c)-1] = np.array((d-b, c-a))
img = cv2.add(frame, mask)
flag = find_last(new_path, '.')
path = new_path[:flag] + '_flow.png'
viz_lk(flow, path)
# plot
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#'''
plt.subplot(221)
plt.imshow(img)
plt.subplot(222)
plt.imshow(mask, cmap=plt.cm.gray)
plt.subplot(223)
#'''
good_x = good[:, 0]
good_y = good[:, 1]
good_x = good_x.clip(good_x.mean()-3*good_x.std(), good_x.mean()+3*good_x.std())
good_y = good_y.clip(good_y.mean()-3*good_y.std(), good_y.mean()+3*good_y.std())
n1, bins1, patches1 = plt.hist(
good_x, bins=100, range=(good_x.min(), good_x.max()), density=False, facecolor='green', alpha=0.75)
plt.subplot(224)
n2, bins2, patches2 = plt.hist(
good_y, bins=100, range=(good_y.min(), good_y.max()), density=False, facecolor='blue', alpha=0.75)
plt.savefig(new_path[:flag] + '_total.png')
#'''
width = 0.02
fig, ax = plt.subplots()
n1 /= n1.sum()
n2 /= n2.sum()
rects1 = ax.bar(bins1[:-1], n1, width,
label="x displacement", lw=1, alpha=0.4, facecolor='orange')
rects2 = ax.bar(bins2[:-1], n2, width,
label="y displacement", lw=1, alpha=0.4, facecolor='green')
bin_min = min(bins1.min(), bins2.min())
rects3 = ax.bar(bin_min, 0.3, width, lw=1, facecolor='blue', alpha=0.4, label='reference')
plt.legend(loc="upper left")
#'''
#plt.show()
plt.savefig(new_path[:flag] + '_hist.png')
plt.clf()
def main(data_list):
with open(data_list, 'r') as f:
paths = f.readlines()
count = 0
for pair in paths:
old_path, new_path = pair[:-1].split(' ')
cal_lk(old_path, new_path)
print(old_path)
count += 1
if __name__ == '__main__':
#data_list = './motion_trans.txt'
data_list = './motion.txt'
main(data_list)
| MingmChen/burst-deghost-deblur | code/utils/cal_lk.py | cal_lk.py | py | 4,325 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.TERM_CRITERIA_EPS",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "cv2.TERM_CRITE... |
16961930308 | import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
# input parameter
den = 8880.0
cp = 386.0
cond = 398.0
temp_bc = 100.0
temp_init = 0.0
lx = 1.0
nx = 101
tend = 20000.0
dt = 0.1
tout = 100.0
alpha = cond / (den * cp)
dx = lx / (nx - 1)
nt = int(tend / dt)
nout = int(tout / dt)
#initial condition
temp = np.full(nx, temp_init)
time = 0.0
temp_new = np.zeros(nx)
# Boundary condition
temp[0] = temp_bc # Dirichlet @ x=0
temp[nx-1] = temp[nx-2] # Neumann @ x=Lx
# graph data array
ims = []
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
gx = np.zeros(nx)
for i in range(nx):
gx[i] = i * dx
# time loop
for n in range(1, nt+1):
# FTCS
for i in range(1, nx-1):
temp_new[i] = temp[i] + dt * alpha * (temp[i+1] - 2.0 * temp[i] + temp[i-1]) / (dx * dx)
# update
for i in range(1, nx-1):
temp[i] = temp_new[i]
# Boundary condition
temp[0] = temp_bc # Dirichlet @ x=0
temp[nx-1] = temp[nx-2] # Neumann @ x=Lx
time += dt
if n % nout == 0:
print('n: {0:7d}, time: {1:8.1f}, temp: {2:10.6f}'.format(n, time, temp[nx-1]))
im_line = ax.plot(gx, temp, 'b')
im_time = ax.text(0, 110, 'Time = {0:8.1f} [s]'.format(time))
ims.append(im_line + [im_time])
# graph plot
ax.set_xlabel('x [m]')
ax.set_ylabel('Temperature [C]')
ax.grid()
anm = animation.ArtistAnimation(fig, ims, interval=50)
anm.save('animation.gif', writer='pillow')
plt.show()
| cattech-lab/lecture3_fdm_thermal | thermal_1d_ftcs.py | thermal_1d_ftcs.py | py | 1,474 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.full",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
72809322024 | from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.submodel_kind import SubmodelKind
from ..types import UNSET, Unset
if TYPE_CHECKING:
from ..models.administrative_information import AdministrativeInformation
from ..models.embedded_data_specification import EmbeddedDataSpecification
from ..models.extension import Extension
from ..models.lang_string_name_type import LangStringNameType
from ..models.lang_string_text_type import LangStringTextType
from ..models.qualifier import Qualifier
from ..models.reference import Reference
from ..models.submodel_element import SubmodelElement
T = TypeVar("T", bound="Submodel")
@attr.s(auto_attribs=True)
class Submodel:
"""
Attributes:
submodel_elements (Union[Unset, List['SubmodelElement']]):
kind (Union[Unset, SubmodelKind]):
supplemental_semantic_ids (Union[Unset, List['Reference']]):
id (Union[Unset, str]):
administration (Union[Unset, AdministrativeInformation]):
category (Union[Unset, str]):
extensions (Union[Unset, List['Extension']]):
qualifiers (Union[Unset, List['Qualifier']]):
display_name (Union[Unset, List['LangStringNameType']]):
description (Union[Unset, List['LangStringTextType']]):
id_short (Union[Unset, str]):
embedded_data_specifications (Union[Unset, List['EmbeddedDataSpecification']]):
semantic_id (Union[Unset, Reference]):
"""
submodel_elements: Union[Unset, List["SubmodelElement"]] = UNSET
kind: Union[Unset, SubmodelKind] = UNSET
supplemental_semantic_ids: Union[Unset, List["Reference"]] = UNSET
id: Union[Unset, str] = UNSET
administration: Union[Unset, "AdministrativeInformation"] = UNSET
category: Union[Unset, str] = UNSET
extensions: Union[Unset, List["Extension"]] = UNSET
qualifiers: Union[Unset, List["Qualifier"]] = UNSET
display_name: Union[Unset, List["LangStringNameType"]] = UNSET
description: Union[Unset, List["LangStringTextType"]] = UNSET
id_short: Union[Unset, str] = UNSET
embedded_data_specifications: Union[Unset, List["EmbeddedDataSpecification"]] = UNSET
semantic_id: Union[Unset, "Reference"] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
submodel_elements: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.submodel_elements, Unset):
submodel_elements = []
for submodel_elements_item_data in self.submodel_elements:
submodel_elements_item = submodel_elements_item_data.to_dict()
submodel_elements.append(submodel_elements_item)
kind: Union[Unset, str] = UNSET
if not isinstance(self.kind, Unset):
kind = self.kind.value
supplemental_semantic_ids: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.supplemental_semantic_ids, Unset):
supplemental_semantic_ids = []
for supplemental_semantic_ids_item_data in self.supplemental_semantic_ids:
supplemental_semantic_ids_item = supplemental_semantic_ids_item_data.to_dict()
supplemental_semantic_ids.append(supplemental_semantic_ids_item)
id = self.id
administration: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.administration, Unset):
administration = self.administration.to_dict()
category = self.category
extensions: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.extensions, Unset):
extensions = []
for extensions_item_data in self.extensions:
extensions_item = extensions_item_data.to_dict()
extensions.append(extensions_item)
qualifiers: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.qualifiers, Unset):
qualifiers = []
for qualifiers_item_data in self.qualifiers:
qualifiers_item = qualifiers_item_data.to_dict()
qualifiers.append(qualifiers_item)
display_name: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.display_name, Unset):
display_name = []
for display_name_item_data in self.display_name:
display_name_item = display_name_item_data.to_dict()
display_name.append(display_name_item)
description: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.description, Unset):
description = []
for description_item_data in self.description:
description_item = description_item_data.to_dict()
description.append(description_item)
id_short = self.id_short
embedded_data_specifications: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.embedded_data_specifications, Unset):
embedded_data_specifications = []
for embedded_data_specifications_item_data in self.embedded_data_specifications:
embedded_data_specifications_item = embedded_data_specifications_item_data.to_dict()
embedded_data_specifications.append(embedded_data_specifications_item)
semantic_id: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.semantic_id, Unset):
semantic_id = self.semantic_id.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if submodel_elements is not UNSET:
field_dict["submodelElements"] = submodel_elements
if kind is not UNSET:
field_dict["kind"] = kind
if supplemental_semantic_ids is not UNSET:
field_dict["supplementalSemanticIds"] = supplemental_semantic_ids
if id is not UNSET:
field_dict["id"] = id
if administration is not UNSET:
field_dict["administration"] = administration
if category is not UNSET:
field_dict["category"] = category
if extensions is not UNSET:
field_dict["extensions"] = extensions
if qualifiers is not UNSET:
field_dict["qualifiers"] = qualifiers
if display_name is not UNSET:
field_dict["displayName"] = display_name
if description is not UNSET:
field_dict["description"] = description
if id_short is not UNSET:
field_dict["idShort"] = id_short
if embedded_data_specifications is not UNSET:
field_dict["embeddedDataSpecifications"] = embedded_data_specifications
if semantic_id is not UNSET:
field_dict["semanticId"] = semantic_id
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
from ..models.administrative_information import AdministrativeInformation
from ..models.embedded_data_specification import EmbeddedDataSpecification
from ..models.extension import Extension
from ..models.lang_string_name_type import LangStringNameType
from ..models.lang_string_text_type import LangStringTextType
from ..models.qualifier import Qualifier
from ..models.reference import Reference
from ..models.submodel_element import SubmodelElement
d = src_dict.copy()
submodel_elements = []
_submodel_elements = d.pop("submodelElements", UNSET)
for submodel_elements_item_data in _submodel_elements or []:
submodel_elements_item = SubmodelElement.from_dict(submodel_elements_item_data)
submodel_elements.append(submodel_elements_item)
_kind = d.pop("kind", UNSET)
kind: Union[Unset, SubmodelKind]
if isinstance(_kind, Unset):
kind = UNSET
else:
kind = SubmodelKind(_kind)
supplemental_semantic_ids = []
_supplemental_semantic_ids = d.pop("supplementalSemanticIds", UNSET)
for supplemental_semantic_ids_item_data in _supplemental_semantic_ids or []:
supplemental_semantic_ids_item = Reference.from_dict(supplemental_semantic_ids_item_data)
supplemental_semantic_ids.append(supplemental_semantic_ids_item)
id = d.pop("id", UNSET)
_administration = d.pop("administration", UNSET)
administration: Union[Unset, AdministrativeInformation]
if isinstance(_administration, Unset):
administration = UNSET
else:
administration = AdministrativeInformation.from_dict(_administration)
category = d.pop("category", UNSET)
extensions = []
_extensions = d.pop("extensions", UNSET)
for extensions_item_data in _extensions or []:
extensions_item = Extension.from_dict(extensions_item_data)
extensions.append(extensions_item)
qualifiers = []
_qualifiers = d.pop("qualifiers", UNSET)
for qualifiers_item_data in _qualifiers or []:
qualifiers_item = Qualifier.from_dict(qualifiers_item_data)
qualifiers.append(qualifiers_item)
display_name = []
_display_name = d.pop("displayName", UNSET)
for display_name_item_data in _display_name or []:
display_name_item = LangStringNameType.from_dict(display_name_item_data)
display_name.append(display_name_item)
description = []
_description = d.pop("description", UNSET)
for description_item_data in _description or []:
description_item = LangStringTextType.from_dict(description_item_data)
description.append(description_item)
id_short = d.pop("idShort", UNSET)
embedded_data_specifications = []
_embedded_data_specifications = d.pop("embeddedDataSpecifications", UNSET)
for embedded_data_specifications_item_data in _embedded_data_specifications or []:
embedded_data_specifications_item = EmbeddedDataSpecification.from_dict(
embedded_data_specifications_item_data
)
embedded_data_specifications.append(embedded_data_specifications_item)
_semantic_id = d.pop("semanticId", UNSET)
semantic_id: Union[Unset, Reference]
if isinstance(_semantic_id, Unset):
semantic_id = UNSET
else:
semantic_id = Reference.from_dict(_semantic_id)
submodel = cls(
submodel_elements=submodel_elements,
kind=kind,
supplemental_semantic_ids=supplemental_semantic_ids,
id=id,
administration=administration,
category=category,
extensions=extensions,
qualifiers=qualifiers,
display_name=display_name,
description=description,
id_short=id_short,
embedded_data_specifications=embedded_data_specifications,
semantic_id=semantic_id,
)
submodel.additional_properties = d
return submodel
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| sdm4fzi/aas2openapi | ba-syx-submodel-repository-client/ba_syx_submodel_repository_client/models/submodel.py | submodel.py | py | 11,684 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.TypeVar",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"li... |
20645490044 | from my_socket import *
# from myMath import *
import inspect
import myMath
from myMath import *
from my_inspect import *
import logging
def send_functions():
global func_list
global server
func_info = []
n = 0
for f in func_list:
func_info.append(';'.join([f['name'], f['info'], str(n)]))
n += 1
func_info = '\n'.join(func_info)
server.send(func_info)
def send_objects():
global vars_list
global server
obj_info = []
n = 0
for var in vars_list:
obj_info.append(';'.join([str(var), str(type(var)), str(n)]))
n += 1
obj_info = '\n'.join(obj_info)
server.send(obj_info)
def send_checked_objects():
global server
check_var_info_list = server.data.decode().split(':')[1].split('.')
# as the message should be in the format of
# co:index to check a preexisting var
# and co:index.attribute to check an attribute that is an object
# and co:index.name_of_list.index
if len(check_var_info_list) == 1:
check_preexisting_object(check_var_info_list)
elif len(check_var_info_list) == 2:
create_var_from_attribute(check_var_info_list)
elif len(check_var_info_list) == 3:
create_var_from_list_attribute(check_var_info_list)
def check_preexisting_object(check_var_info_list):
global vars_list
global server
var_index = check_var_info_list[0]
var = vars_list[int(var_index)]
logging.debug("Checking %s" % (str(var)))
# format: name;type;attr_name:type,value(s);attr_name:type,value(s)
# type can be function, list, or any other type
# if the type is a function, it will be added in the function view in unity
var_name = str(var)
var_type = str(type(var))
var_attrs = []
for key in var.__dict__:
attr_name = key
attr = var.__dict__[attr_name]
if type(attr) == list:
attr_type = 'list'
else:
attr_type = 'other'
if attr == None:
attr_values = "None"
elif type(attr) == list:
attr_values = ','.join([str(i) for i in attr])
else:
attr_values = str(attr)
attr_info = "%s:%s,%s" % (attr_name, attr_type, attr_values)
var_attrs.append(attr_info)
for key in get_functions(var):
attr_name = key
attr_type = 'func'
attr_values = key
attr_info = "%s:%s,%s" % (attr_name, attr_type, attr_values)
var_attrs.append(attr_info)
var_attrs = ';'.join(var_attrs)
var_info = "%s;%s;%s" % (var_name, var_type, var_attrs)
print(var_info)
server.send(var_info)
def create_var_from_attribute(check_var_info_list):
global vars_list
global server
var_parent_index = check_var_info_list[0]
var_parent = vars_list[int(var_parent_index)]
attr_name = check_var_info_list[1]
var = getattr(var_parent, attr_name)
vars_list.append(var)
server.send(str(len(vars_list)-1))
def create_var_from_list_attribute(check_var_info_list):
global vars_list
global server
var_parent_index = check_var_info_list[0]
var_parent = vars_list[int(var_parent_index)]
list_name = check_var_info_list[1]
var_list = getattr(var_parent, list_name)
list_index = check_var_info_list[2]
var = var_list[int(list_index)]
vars_list.append(var)
server.send(str(len(vars_list)-1))
def get_func_list():
global func_list
func_list = []
for name, member in inspect.getmembers(myMath):
# print(name, inspect.isclass(member), inspect.isfunction(member))
if name.startswith("__"):
continue
elif inspect.isclass(member) or inspect.isfunction(member):
#
member_info = {
'name': name,
'func': member,
'info': ' '.join(str(member.__doc__).strip().split('\n')),
}
func_list.append(member_info)
vars_list.append(member)
def create_string():
pass
def call_function():
global server
function_message = server.data.decode()[3:]
function_info, parameters_index = function_message.split(":")
function_info_list = function_info.split(".")
logging.debug("Function Info: %s" % (function_info))
logging.debug("Parameter Info: %s" % (parameters_index))
if len(function_info_list) == 1:
function_index = function_info_list[0]
function = func_list[int(function_index)]['func']
elif len(function_info_list) == 2:
obj_index, function_name = function_info_list
obj = vars_list[int(obj_index)]
function = getattr(obj, function_name)
logging.debug("function: %s" % (str(function)))
if parameters_index != '':
parameters_index_list = parameters_index.split(",")
# parameter_list = [vars_list[int(i)] for i in parameters_index_list]
# except ValueError:
parameter_list = []
for p in parameters_index_list:
try:
parameter_list.append(vars_list[int(p)])
except ValueError:
parameter_list.append(p)
else:
parameter_list = []
logging.debug("parameters: %s" % (', '.join([str(i) for i in parameter_list])))
new_var = function(*parameter_list)
if new_var != None:
vars_list.append(new_var)
server.send("success")
# func_list = [
# {
# 'name': 'find inner conclusion',
# 'func': find_inner_conclusion,
# 'info': 'Takes two parameters: statement1 and statement2 and return a deduced statement if there is one.'
# },
# {
# 'name': 'find conclusion',
# 'func': find_conclusion,
# 'info': 'Takes two parameters: statement1 and statement2 and return a deduced statement if there is one.'
# },
# {
# 'name': 'Addition',
# 'func': Addition,
# 'info': 'Takes two parameters: a and b, returns a+b.'
# },
# {
# 'name': 'Multiplication',
# 'func': Multiplication,
# 'info': 'Takes two parameters: a and b, returns a*b.'
# },
# ]
func_list = []
# vars_list = []
vars_list = [
Variable('a'),
Variable('b'),
# Multiplication(vars_list[0], vars_list[1]),
]
t = Variable('t')
x = Substraction(t, Power(t, 2))
y = Substraction(t, Power(t, 3))
d = Derivative(y, x)
d.find_parametric_form(t)
d = d.equivalences[-1]
d.var1.general_prop_deduction()
d.var2.general_prop_deduction()
vars_list.extend([t, d, x, y])
logging.basicConfig(filename='example.log',level=logging.DEBUG)
server = socket_server('localhost', 50000)
server.accept_client()
logging.info('client accepted')
get_func_list()
while 1:
try:
server.receive_data()
if server.data == b'get functions':
logging.debug("Starting to send functions.")
send_functions()
elif server.data == b'get objects':
logging.debug("Starting to send objects.")
send_objects()
elif server.data.decode().startswith("co:"):
logging.debug("Starting to send checked objects.")
send_checked_objects()
elif server.data.decode().startswith("cf:"): # Call the function
logging.debug("Starting to call a function.")
call_function()
except KeyboardInterrupt:
server.close_client() | HarryFengYX/Py2GUI | free_op_interface.py | free_op_interface.py | py | 7,358 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.debug",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "inspect.getmembers",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "inspect.isclass",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "inspect.isfunction... |
31058313098 | from operator import itemgetter
import networkx as nx
import prefect
from sklearn.metrics import roc_auc_score
logger = prefect.context.get("logger")
def compute_centrality_metrics(G):
closeness_centrality = nx.centrality.closeness_centrality(G)
degree_centrality = nx.centrality.degree_centrality(G)
betweenness_centrality = nx.centrality.betweenness_centrality(G)
eigenvector_centrality = nx.centrality.eigenvector_centrality(G)
return (
closeness_centrality,
degree_centrality,
betweenness_centrality,
eigenvector_centrality,
)
def evaluate_roc_auc(clf, link_features, link_labels):
predicted = clf.predict_proba(link_features)
# check which class corresponds to positive links
positive_column = list(clf.classes_).index(1)
return roc_auc_score(link_labels, predicted[:, positive_column])
def node_with_largest_degree(G):
node_and_degree = G.degree()
return sorted(node_and_degree, key=itemgetter(1))[-1]
def connected_components(G):
components = nx.connected_components(G)
print(components)
largest_component = max(components, key=len)
return components, largest_component
| ryankarlos/networks_algos | networks/models/metrics.py | metrics.py | py | 1,184 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "prefect.context.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "prefect.context",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "networkx.centrality.closeness_centrality",
"line_number": 11,
"usage_type": "call"
},
{
"a... |
16523777795 | from django.core import urlresolvers
from django.core.mail import send_mail
from django.conf import settings
from django.contrib.comments.moderation import CommentModerator, moderator
from django.contrib.sites.models import Site
from akismet import Akismet
AKISMET_KEY = getattr(settings, "AKISMET_KEY", None)
class BlogdorModerator(CommentModerator):
email_notification = False
enable_field = 'comments_enabled'
def email(self, comment, content_object, request):
from_email = "bounce@%s" % Site.objects.get_current().domain
subject = "Comment on %s pending your approval" % content_object.title
appr_link = 'http://%s%s' % (Site.objects.get_current().domain, urlresolvers.reverse('comments-approve', args=(comment.id,)) )
message = '\n\n'.join((comment.get_as_text(), appr_link))
recipient_email = content_object.author.email
#send_mail(subject, message, from_email, (recipient_email,), fail_silently=False)
def moderate(self, comment, content_object, request):
a = Akismet(AKISMET_KEY, blog_url='http://%s/' % Site.objects.get_current().domain)
akismet_data = {
'user_ip': comment.ip_address,
'user_agent': request.META['HTTP_USER_AGENT'],
'comment_author': comment.user_name.encode('ascii','ignore'),
'comment_author_email': comment.user_email.encode('ascii','ignore'),
'comment_author_url': comment.user_url.encode('ascii','ignore'),
'comment_type': 'comment',
}
is_spam = a.comment_check(comment.comment.encode('ascii','ignore'), akismet_data)
comment.is_public = False
comment.save()
if not is_spam:
pass
#self.email(comment, content_object, request)
return True
| sunlightlabs/reportingsite | reporting/comments.py | comments.py | py | 1,809 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.settings",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "django.contrib.comments.moderation.CommentModerator",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.contrib.sites.models.Site.objects.get_current",
"line_num... |
40095793121 | from django.urls import path
from . import views
urlpatterns = [
path('', views.TmListView.as_view(), name='home'),
path('table/', views.TableListView.as_view(), name='table'),
path('profile/', views.EmpCreateView.as_view(), name='profile'),
path('table/employe/<int:pk>/update', views.EmpUpdView.as_view(), name='update'),
path('table/employe/<int:pk>/delete', views.EmpDelView.as_view(), name='delete'),
] | kostik2295/Django_Web-site | employees/urls.py | urls.py | py | 429 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
6209532245 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 31 12:39:09 2023
@author: Mouhamad Ali Elamine
"""
import argparse
import json
import re
parser = argparse.ArgumentParser(description='A1T1')
parser.add_argument('--input_file', type=str, default='./review.json', help='the input file ')
parser.add_argument('--output_file', type=str, default= './a1t1.json', help='the output file contains your answer')
parser.add_argument('--stopwords', type=str, default='./stopwords',help='the file contains the stopwords')
parser.add_argument('--y', type=int, default=2018, help='year')
parser.add_argument('--m', type=int, default=10, help='top m users')
parser.add_argument('--n', type=int, default=10, help='top n frequent words')
args = parser.parse_args()
stopwords = args.stopwords
f = open(stopwords)
#initialize stopwords list with empty string to account for trailing spaces
stopwords_list = ['']
#append stop words to list
for line in f:
stopwords_list.append(line.rstrip())
f.close()
#create context
from pyspark import SparkConf, SparkContext
if __name__ == '__main__':
sc_conf = SparkConf() \
.setAppName('task1') \
.setMaster('local[*]') \
.set('spark.driver.memory','8g') \
.set('spark.executor.memory','4g')
sc = SparkContext.getOrCreate(conf=sc_conf)
sc.setLogLevel('OFF')
input_file = 'review.json'
#persist lines -> read input file only once
lines = sc.textFile(input_file).persist()
#persist review_rdd -> convert json into dictionary only once
review_rdd = lines.map(lambda x: json.loads(x)).persist()
#part A
rdd_count = review_rdd.count()
#part B
year_rdd = review_rdd.filter(lambda x: x["date"][:4] == str(args.y)).persist()
year_count = year_rdd.count()
#part C
user_rdd = review_rdd.map(lambda x: x["user_id"]).distinct()
user_count = user_rdd.count()
#part D
topm_rdd = review_rdd.map(lambda x: (x["user_id"],1) )
topm_rdd1 = topm_rdd.groupByKey().mapValues(lambda x: sum(x))
topm_rdd2 = topm_rdd1.sortBy(lambda x: (-x[1], x[0])).take(args.m)
#part E
text_rdd = review_rdd.map(lambda x: re.sub('[\(\[,.!?:;\]\)]','', x["text"]))
text_rdd1 = text_rdd.flatMap(lambda x: x.lower().split())
text_rdd2 = text_rdd1.filter(lambda x: x not in stopwords_list)
text_rdd3 = text_rdd2.map(lambda x: (x,1))
text_rdd4 = text_rdd3.groupByKey().mapValues(lambda x: sum(x))
text_rdd5 = text_rdd4.sortBy(lambda x: (-x[1],x[0]))
text_rdd6 = text_rdd5.map(lambda x: x[0]).take(args.n)
task1 = {
"A": rdd_count,
"B":year_count,
"C":user_count,
"D":topm_rdd2,
"E":text_rdd6
}
json_task1 = json.dumps(task1, indent=4)
with open(args.output_file, "w") as outfile:
outfile.write(json_task1) | elami018/CSCI_5523 | HW1/task1.py | task1.py | py | 2,746 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkConf",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext.getOrCreate",
"line_number": 38,
"usage_type": "call"
},
{
"api_na... |
35899003132 | import discord
import asyncio
from dotenv import load_dotenv
from collections import Counter
from os import getenv
intents = discord.Intents.default()
intents.members = True
intents.presences = True
load_dotenv()
TOKEN = getenv('DISCORD_TOKEN')
class MyClient(discord.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# create the background task and run it in the background
self.bg_task = self.loop.create_task(self.check_game_update())
async def on_ready(self):
print('--------------------------------------------------')
print(f'Logged in as: {self.user.name} {self.user.id}')
print('--------------------------------------------------')
await self.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name="Game Activity"))
async def check_game_update(self):
await self.wait_until_ready()
while not self.is_closed():
for guild in self.guilds:
for channel in guild.voice_channels:
if(len(channel.members) > 0):
numChatting = 0
vidChatting = 0
activityList = []
for chatter in channel.members:
if(chatter.activity == None):
numChatting += 1
if(chatter.voice.self_video):
vidChatting += 1
elif(chatter.activity.type.name == 'playing' or chatter.activity.type.name == 'streaming'):
activityList.append(chatter.activity.name)
else:
numChatting += 1
if(chatter.voice.self_video):
vidChatting += 1
if(numChatting > (len(channel.members)/2)):
if(vidChatting > (numChatting/2)):
new_name = 'Garage Talk'
else:
new_name = 'Just Chatting'
else:
new_name = str(Counter(activityList).most_common(1)[0][0])
await channel.edit(name=new_name)
channel2 = self.get_channel(791066555085094923)
await channel2.send(new_name)
await asyncio.sleep(300) # task runs every 5 Minutes, temp set to 5 seconds when sending messages instead of updating channel name
client = MyClient(intents = intents)
client.run(TOKEN) | mcl650s/DynamicChannelNameBot | DynamicNameBot.py | DynamicNameBot.py | py | 2,688 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.Intents.default",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.gete... |
49873569 | # demonstration of the YCbCr encoder/decoder functionality
import numpy as np
import cv2
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
K_VALS = [.299, .587, .114] # ITU-R BT.601
#K_VALS = [.2627, .678, .0593] # ITU-R BT.2020
R = 0
G = 1
B = 2
def color_matrix(values):
"""
Generates a color matrix from the K values
:param values: list of values [Kr, Kg, Kb]
:return: 3x3 matrix (list) of the color matrix
"""
kr = values[0]
kg = values[1]
kb = values[2]
matrix = []
matrix.append(values)
matrix.append([-.5*kr/(1-kb), -.5*kg/(1-kb), .5])
matrix.append([.5, -.5*kg/(1-kr), -.5*kb/(1-kr)])
return matrix
def rgb_to_ycbcr(image, constants=K_VALS):
"""
Converts an RGB image (list of lists of lists of uint8's) to a YCbCr image
:param values: input image
:param constants: The conversion constants to use [Kr, Kg, Kb]
:return: a new 3-channel image in YCbCr encoding
"""
kr = constants[0]
kg = constants[1]
kb = constants[2]
m = color_matrix(constants)
new_image = []
for y in range(len(image)):
row = []
for x in range(len(image[y])):
pixel = []
# JPEG conversion
r = image[y][x][R]
g = image[y][x][G]
b = image[y][x][B]
luma_p = 0 + (kr*r) + (kg*g) + (kb*b)
cb = 128 + (m[1][0]*r) + (m[1][1]*g) + (m[1][2]*b)
cr = 128 + (m[2][0]*r) + (m[2][1]*g) + (m[2][2]*b)
pixel = [luma_p, cr, cb]
row.append(np.array(pixel, dtype = 'uint8'))
new_image.append(np.array(row, dtype = 'uint8'))
return np.array(new_image, dtype = 'uint8')
def ycbcr_to_rgb(image, constants=K_VALS):
"""
Converts a YCbCr image (list of lists of lists of uint8's) to an RGB image
:param values: input image
:param constants: The conversion constants to use
:return: a new 3-channel image in RGB encoding
"""
m = np.linalg.inv(np.array(color_matrix(constants)))
new_image = []
for y in range(len(image)):
row = []
for x in range(len(image[y])):
pixel = []
# JPEG conversion
luma_p = image[y][x][R]
cr = image[y][x][G]
cb = image[y][x][B]
r = luma_p + (m[0][2] * (cr-128))
g = luma_p + (m[1][1] * (cb-128)) + (m[1][2] * (cr-128))
b = luma_p + (m[2][1] * (cb-128))
pixel = [r, g, b]
# restricting range of the values to 0-255
for i in range(len(pixel)):
if pixel[i] < 0:
pixel[i] = 0
elif pixel[i] > 255:
pixel[i] = 255
row.append(np.array(pixel, dtype = 'uint8'))
new_image.append(np.array(row, dtype = 'uint8'))
return np.array(new_image, dtype = 'uint8')
if __name__ == "__main__":
img = cv2.imread('software tests/full_ycbcr_demo/surf.png')
img2 = rgb_to_ycbcr(img)
#img3 = cv2.cvtColor(img2, cv2.COLOR_YCrCb2RGB)
#img4 = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
img5 = ycbcr_to_rgb(img2)
cv2.imshow('original', img)
cv2.imshow('converted', img2)
cv2.imshow('reconstructed', img5)
cv2.waitKey(0)
cv2.destroyAllWindows() | octinhuh/hdvid21 | tests/ycbcr.py | ycbcr.py | py | 3,034 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.inv",
"line_numb... |
30112331556 | import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
# constants
TRAIN_DATA_FILE_PATH = "Lyrics-Genre-Train.csv"
TEST_DATA_FILE_PATH = "Lyrics-Genre-Test-GroundTruth.csv"
LYRICS_COLUMN = "Lyrics"
GENRE_COLUMN = "Genre"
# useful variables
output_mappings = {}
# Pentru ca vrem ca input doar versurile si ca output genul vom citi tot setul de date si vom arunca toate datele
# care nu ne sunt de folos.
def read_data(file_path):
data = pd.read_csv(file_path)
# return data[LYRICS_COLUMN], data[GENRE_COLUMN]
return pd.read_csv(file_path)[[LYRICS_COLUMN, GENRE_COLUMN]]
def create_new_dataset(dataset):
global output_mappings
dataset[GENRE_COLUMN] = dataset[GENRE_COLUMN].map(output_mappings)
return dataset
# Encoding labels with [0, number of labels] so we will have proper outputs
def label_encoding(train_data, test_data):
possible_labels = train_data[GENRE_COLUMN].unique()
global output_mappings
output_mappings = {genre: index for index, genre in enumerate(possible_labels)}
new_train_data = create_new_dataset(train_data)
new_test_data = create_new_dataset(test_data)
return new_train_data, new_test_data
def bag_of_words(train_data, test_data):
vectorizer = CountVectorizer()
data = np.concatenate((train_data[LYRICS_COLUMN].as_matrix(), test_data[LYRICS_COLUMN].as_matrix()), axis=0)
bow_model = vectorizer.fit(data)
bow = vectorizer.transform(data)
print(bow)
return bow
if __name__ == "__main__":
# Read data
train_data = read_data(TRAIN_DATA_FILE_PATH)
test_data = read_data(TEST_DATA_FILE_PATH)
# changel labeling from string to int
train_data, test_data = label_encoding(train_data, test_data)
# Feature extraction
# Machine learning
| daneel95/Master_Homework | FirstYear/Regasirea Informatiei/Homework2/main.py | main.py | py | 1,806 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 40,
"usage_type": "call"
},
{
"a... |
9052754713 | """Support for the Mastertherm Sensors."""
from decimal import Decimal
from datetime import date, datetime
import logging
from homeassistant.core import HomeAssistant
from homeassistant.components.sensor import SensorEntity, SensorDeviceClass
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ENTITIES, UnitOfTemperature, Platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from .const import DOMAIN, MasterthermSensorEntityDescription
from .coordinator import MasterthermDataUpdateCoordinator
from .entity import MasterthermEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
):
"""Load Sensors from the config settings."""
coordinator: MasterthermDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
entities: list[SensorEntity] = []
for entity_key, entity_description in coordinator.entity_types[
Platform.SENSOR
].items():
for module_key, module in coordinator.data["modules"].items():
if entity_key in module[CONF_ENTITIES]:
entities.append(
MasterthermSensor(
coordinator, module_key, entity_key, entity_description
)
)
async_add_entities(entities, True)
coordinator.remove_old_entities(Platform.SENSOR)
class MasterthermSensor(MasterthermEntity, SensorEntity):
"""Representation of a MasterTherm Sensor, e.g. ."""
def __init__(
self,
coordinator: MasterthermDataUpdateCoordinator,
module_key: str,
entity_key: str,
entity_description: MasterthermSensorEntityDescription,
):
"""Initialize the sensor."""
self._icon_state_map = entity_description.icon_state_map
if entity_description.device_class == SensorDeviceClass.TEMPERATURE:
entity_description.native_unit_of_measurement = UnitOfTemperature.CELSIUS
entity_description.suggested_unit_of_measurement = UnitOfTemperature.CELSIUS
self._attr_state_class = entity_description.state_class
super().__init__(
coordinator=coordinator,
module_key=module_key,
entity_key=entity_key,
entity_type=Platform.SENSOR,
entity_description=entity_description,
)
@property
def icon(self) -> str | None:
"""Set dynamic icons if available."""
if self._icon_state_map:
return self._icon_state_map[self.native_value]
else:
return self.entity_description.icon
@property
def native_value(self) -> StateType | date | datetime | Decimal:
"""Return the sensor value."""
return self.coordinator.data["modules"][self._module_key]["entities"][
self._entity_key
]
| sHedC/homeassistant-mastertherm | custom_components/mastertherm/sensor.py | sensor.py | py | 2,980 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "homeassistant.core.HomeAssistant",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "homeassistant.config_entries.ConfigEntry",
"line_number": 22,
"usage_type": "name"
}... |
35095640017 | from django.shortcuts import render
from .models import cities, city
from django.http import Http404
from datetime import datetime
import pytz
import folium
now = datetime.now(pytz.timezone('Europe/Warsaw')).strftime("%H")
def weather_view(request, *args, **kwargs):
cities_ = cities.objects.all()
city_ = city.objects.all()
ci_date = city.objects.get(city_id=1)
if int(now) >= 6 and int(now) < 20:
for c in city_:
c.pic = c.pic.replace("n","d")
context = {
'cities': cities_,
'city': city_,
'c_date':ci_date,
}
return render(request, "weather.html", context)
def weather_view_detail(request,pk):
city_ = city.objects.get(city_id=pk)
city_name = cities.objects.get(id=pk)
city_name_n = city_name.name.replace(" ","_")
timezone = int(city_.timezone) // 3600
timezone = f"UTC {('+' if timezone >= 0 else '')}{timezone}"
sunset = city_.sunset
sunrise = datetime.utcfromtimestamp(city_.sunrise).strftime("%H:%M")
sunset = datetime.utcfromtimestamp(sunset)
sunset = sunset.strftime("%H:%M")
if int(now) >= 6 and int(now) < 20:
city_.pic = city_.pic.replace("n","d")
m = folium.Map(location=[city_.lat, city_.lon], zoom_start=12)
folium.Marker([city_.lat, city_.lon], popup=city_name.name).add_to(m)
map_html = m._repr_html_()
context = {
'map_html': map_html,
'city': city_,
'name_space': city_name.name,
'name' : city_name_n,
'timezone': timezone,
'sunrise': sunrise,
'sunset': sunset,
}
if city_ is not None:
return render(request, 'detail.html', context)
else:
raise Http404('Something gone wrong')
# def index(request):
# cities_ = cities.objects.all()
# return render(request, 'weather/index.html', {'cities': cities_})
#
# def city(request, city_id):
# city_ = city.objects.get(id=city_id)
# return render(request, 'weather/city.html', {'city': city_}) | Kapiura/weather.is | src/cities/views.py | views.py | py | 2,002 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pytz.timezone",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.cities.obje... |
19169680561 | from setuptools import setup
with open('requirements.txt') as fp:
install_requires = fp.read()
setup(
name='mapped_config',
packages=['mapped_config'],
version='2.36',
description='Mapped config loader for python for secure, easy and modular configuration management',
author='Alvaro Garcia Gomez',
author_email='maxpowel@gmail.com',
url='https://github.com/maxpowel/mapped_config',
download_url='https://github.com/maxpowel/mapped_config/archive/master.zip',
keywords=['config', 'configuration', 'yml', 'json'],
classifiers=['Topic :: Adaptive Technologies', 'Topic :: Software Development', 'Topic :: System',
'Topic :: Utilities'],
install_requires=install_requires
)
| maxpowel/mapped_config | setup.py | setup.py | py | 738 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
}
] |
71899575783 | import csv
import random
import numpy as np
from sklearn import preprocessing
import matplotlib.pyplot as plt
from keras.models import Sequential, Model
from keras.layers import Dense, LSTM, TimeDistributed, Conv2D, Flatten
from keras.optimizers import Adam
import utm
max_latitude = 330414.05273900216
max_longitude = 3463503.3311055717
min_latitude = 328787.97245886514
min_longitude = 3462203.7096695383
la_size = 16*2
lo_size = 13*2
la = (max_latitude - min_latitude)/la_size
lo = (max_longitude - min_longitude)/lo_size
print(lo)
print(la)
def coordinate_to_label(coordinate):
la_label = int((coordinate[0] - min_latitude - 1e-12)/la)
lo_label = int((coordinate[1] - min_longitude - 1e-12)/lo)
tmp = np.zeros(la_size * lo_size, dtype=int)
tmp[la_label + lo_label*la_size] = 1
return tmp
def shuffle_train_test(X, Y, LEN, ratio):
random.seed(1)
random_list = random.sample(range(LEN), k=int(ratio*LEN))
X_Train = []
Y_Train = []
X_Test = []
Y_Test = []
for i in range(LEN):
if i in random_list:
X_Test.append(X[i])
Y_Test.append(Y[i])
else:
X_Train.append(X[i])
Y_Train.append(Y[i])
return np.array(X_Train), np.array(Y_Train), np.array(X_Test), np.array(Y_Test)
# lat, lon, 51 'R'
def check_validation(value_dict):
validate_list = []
validate_data = {
'Latitude': value_dict['Latitude'],
'Longitude': value_dict['Longitude'],
'MRTime': value_dict['MRTime'],
'TrajID': value_dict['TrajID']
}
if float(value_dict['Accuracy']) > 30:
return False, validate_data
for i in range(6):
if value_dict['RNCID_' + str(i+1)] != '-999' and value_dict['CellID_' + str(i+1)] != '-999' and value_dict['Dbm_' + str(i+1)] != '-999' \
and value_dict['AsuLevel_' + str(i+1)] != '-1' and value_dict['SignalLevel_' + str(i+1)] != '-1':
validate_list.append(i)
validate_data['RNCID_' + str(i+1)] = value_dict['RNCID_' + str(i+1)]
validate_data['CellID_' + str(i+1)] = value_dict['CellID_' + str(i+1)]
validate_data['Dbm_' + str(i+1)] = value_dict['Dbm_' + str(i+1)]
validate_data['AsuLevel_' + str(i+1)] = value_dict['AsuLevel_' + str(i+1)]
validate_data['SignalLevel_' + str(i+1)] = value_dict['SignalLevel_' + str(i+1)]
if len(validate_list) < 3: # 我认为基站数量少于3,即理论上不可能得出手机坐标
return False, validate_data
else:
for i in range(6):
if i not in validate_list:
np.random.seed(1)
k = np.random.choice(validate_list)
validate_data['RNCID_' + str(i+1)] = value_dict['RNCID_' + str(k+1)]
validate_data['CellID_' + str(i+1)] = value_dict['CellID_' + str(k+1)]
validate_data['Dbm_' + str(i+1)] = value_dict['Dbm_' + str(k+1)]
validate_data['AsuLevel_' + str(i+1)] = value_dict['AsuLevel_' + str(k+1)]
validate_data['SignalLevel_' + str(i+1)] = value_dict['SignalLevel_' + str(k+1)]
return True, validate_data
# 按照轨迹ID进行切割
def cut_to_slices(X, Y, slice_length, LEN):
tmp_count = 1
tmp_X = []
tmp_Y = []
x_slice_list = []
y_slice_list = []
tmp_traj_label = int(X[0][1])
for i in range(LEN):
traj_id = int(X[i][1])
if tmp_count == slice_length:
if traj_id == tmp_traj_label:
x_slice_list.append(X[i])
y_slice_list.append(Y[i])
tmp_X += x_slice_list
tmp_Y += y_slice_list
x_slice_list = []
y_slice_list = []
tmp_count = 1
if (i+1) < LEN:
tmp_traj_label = int(X[i+1][1])
else:
x_slice_list.insert(0, X[i-slice_length])
y_slice_list.insert(0, Y[i-slice_length])
tmp_X += x_slice_list
tmp_Y += y_slice_list
x_slice_list = [X[i]]
y_slice_list = [Y[i]]
tmp_count = 2
tmp_traj_label = traj_id
else:
if traj_id == tmp_traj_label:
tmp_count += 1
x_slice_list.append(X[i])
y_slice_list.append(Y[i])
else:
x_slice_list = []
y_slice_list = []
for k in range(slice_length):
x_slice_list.append(X[i+k-slice_length])
y_slice_list.append(Y[i+k-slice_length])
tmp_X += x_slice_list
tmp_Y += y_slice_list
x_slice_list = [X[i]]
y_slice_list = [Y[i]]
tmp_count = 2
tmp_traj_label = traj_id
return np.array(tmp_X), np.array(tmp_Y)
signal_tower_dict = {}
with open('../data/gongcan.csv', 'r') as file:
reader = csv.DictReader(file)
for row in reader:
coordinate = utm.from_latlon(float(row['Latitude']), float(row['Longitude']))[:2]
signal_tower_dict[row['RNCID'] + '|' + row['CellID']] = coordinate
X = []
Y = []
features = 32
with open('../data/train_2g.csv', 'r') as file:
reader = csv.DictReader(file)
for row in reader:
validation, parsed_row = check_validation(row)
if(not validation):
continue
coordinate = utm.from_latlon(float(parsed_row['Latitude']), float(parsed_row['Longitude']))[:2]
Y.append([coordinate[0], coordinate[1]])
mr_sample = np.zeros(features)
mr_sample[0] = parsed_row['MRTime']
mr_sample[1] = parsed_row['TrajID']
for i in range(6):
coordinate = signal_tower_dict[parsed_row['RNCID_' + str(i+1)] + '|' + parsed_row['CellID_' + str(i+1)]]
mr_sample[i*5 + 2] = coordinate[0] # 转换成信号站的经纬度坐标
mr_sample[i*5 + 3] = coordinate[1]
mr_sample[i*5 + 4] = parsed_row['Dbm_' + str(i+1)]
mr_sample[i*5 + 5] = parsed_row['AsuLevel_' + str(i+1)]
mr_sample[i*5 + 6] = parsed_row['SignalLevel_' + str(i+1)]
X.append(np.array(mr_sample))
LEN = len(X)
slice_length = 6
X = np.array(X).astype('float64')
Y = np.array(Y).astype('float64')
X, Y = cut_to_slices(X, Y, slice_length, LEN)
LEN = X.shape[0]
scalerX = preprocessing.StandardScaler()
X = scalerX.fit_transform(X)
X = X.reshape(round(LEN/slice_length), slice_length, features)
Y = Y.reshape(round(LEN/slice_length), slice_length, 2)
X_CNN, Y_CNN, X_LSTM, Y_LSTM = shuffle_train_test(X, Y, round(LEN/slice_length), 0.5)
X_CNN = X_CNN.reshape(X_CNN.shape[0]*X_CNN.shape[1], features)
X_LSTM = X_LSTM.reshape(X_LSTM.shape[0]*X_LSTM.shape[1], features)
Y_CNN = Y_CNN.reshape(Y_CNN.shape[0]*Y_CNN.shape[1], 2)
Y_LSTM = Y_LSTM.reshape(Y_LSTM.shape[0]*Y_LSTM.shape[1], 2)
X_CNN = np.delete(X_CNN, [1], axis=1) # 删除轨迹ID
X_LSTM = np.delete(X_LSTM, [1], axis=1)
X_CNN = np.delete(X_CNN, [0], axis=1) # CNN训练时,不需要时间戳
X_LSTM = np.delete(X_LSTM, [0], axis=1) # CNN训练时,不需要时间戳
X_CNN = X_CNN.reshape(X_CNN.shape[0], 6, 5)
X_LSTM = X_LSTM.reshape(X_LSTM.shape[0], 6, 5)
X_LSTM = X_LSTM[:, :, :, np.newaxis]
X_CNN = X_CNN[:, :, :, np.newaxis]
tmp_Y = []
for i in range(Y_CNN.shape[0]):
tmp_Y.append(coordinate_to_label(Y_CNN[i]))
Y_CNN = np.array(tmp_Y)
adam_cnn = Adam(lr=2e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=2e-9, amsgrad=False)
model_cnn = Sequential()
model_cnn.add(Conv2D(128, kernel_size=3, activation='relu', input_shape=(6, 5, 1)))
model_cnn.add(Conv2D(256, kernel_size=3, activation='relu'))
model_cnn.add(Flatten())
model_cnn.add(Dense(512, activation='relu'))
model_cnn.add(Dense(256, activation='relu'))
model_cnn.add(Dense(la_size*lo_size, activation='softmax'))
model_cnn.compile(optimizer=adam_cnn, loss='categorical_crossentropy', metrics=['accuracy'])
model_cnn.fit(X_CNN, Y_CNN, epochs=300, batch_size=64)
cnn_result = model_cnn.predict(X_LSTM)
print(cnn_result.shape) # 4710, 832
# def label_to_coordinate(label_list): # error!
# label = np.argmax(label_list)
# la_label = label % la_size
# lo_label = (label - la_label) / la_size
# return ((0.5+la_label)*la + min_latitude, (0.5+lo_label)*lo + min_longitude)
# def calcu_distance(true_latitude, true_longitude, pred_latitude, pred_longitude):
# vector1 = np.array([true_latitude, true_longitude])
# vector2 = np.array([pred_latitude, pred_longitude])
# return np.sqrt(np.sum(np.square(vector1 - vector2)))
# error_list = []
# coordinate_result = []
# for i in range(cnn_result.shape[0]):
# tmp = label_to_coordinate(cnn_result[i])
# coordinate_result.append(tmp)
# error = calcu_distance(Y_LSTM[i][0], Y_LSTM[i][1], tmp[0], tmp[1])
# error_list.append(error)
# coordinate_result = np.array(coordinate_result)
# print(np.median(error_list)) # 这就是中位误差?
# print(np.mean(error_list)) # 这就是中位误差?
# plt.figure()
# plt.scatter(Y_LSTM[:,0], Y_LSTM[:,1], c='blue', s=5)
# plt.scatter(coordinate_result[:,0], coordinate_result[:,1], c='red', s=3)
# plt.show()
lstm_input = cnn_result
lstm_input = lstm_input.reshape(round(lstm_input.shape[0]/slice_length), slice_length, la_size*lo_size)
print(lstm_input.shape)
Y_LSTM = Y_LSTM.reshape(round(Y_LSTM.shape[0]/slice_length), slice_length, 2)
X_Train, Y_Train, X_Test, Y_Test = shuffle_train_test(lstm_input, Y_LSTM, lstm_input.shape[0], 0.1)
Y_Train = Y_Train.reshape(Y_Train.shape[0]*Y_Train.shape[1], 2)
tmp_Y = []
for i in range(Y_Train.shape[0]):
tmp_Y.append(coordinate_to_label(Y_Train[i]))
Y_Train = np.array(tmp_Y)
Y_Train = Y_Train.reshape(round(Y_Train.shape[0]/slice_length), slice_length, la_size*lo_size)
adam_lstm = Adam(lr=5e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-9, amsgrad=False)
model_lstm = Sequential()
model_lstm.add(LSTM(slice_length*20, input_shape=(X_Train.shape[1], X_Train.shape[2]), return_sequences=True))
model_lstm.add(TimeDistributed(Dense(2*la_size*lo_size, activation='relu')))
model_lstm.add(TimeDistributed(Dense(la_size*lo_size, activation='softmax')))
model_lstm.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=adam_lstm)
model_lstm.fit(X_Train, Y_Train, epochs=100, batch_size=6)
lstm_result = model_lstm.predict(X_Test)
print(lstm_result.shape)
print(Y_Test.shape)
lstm_result = lstm_result.reshape(lstm_result.shape[0]*slice_length, lo_size*la_size)
Y_Test = Y_Test.reshape(Y_Test.shape[0]*slice_length, 2)
def label_to_coordinate(label_list): # error!
label = np.argmax(label_list)
# print(label)
la_label = label % la_size
lo_label = (label - la_label) / la_size
return ((0.5+la_label)*la + min_latitude, (0.5+lo_label)*lo + min_longitude)
def calcu_distance(true_latitude, true_longitude, pred_latitude, pred_longitude):
vector1 = np.array([true_latitude, true_longitude])
vector2 = np.array([pred_latitude, pred_longitude])
return np.sqrt(np.sum(np.square(vector1 - vector2)))
error_list = []
coordinate_result = []
error_dict = {
'0': 0,
'1': 0,
'2': 0,
'3': 0,
'4': 0
}
for i in range(Y_Test.shape[0]):
tmp = label_to_coordinate(lstm_result[i])
coordinate_result.append(tmp)
error = calcu_distance(Y_Test[i][0], Y_Test[i][1], tmp[0], tmp[1])
if error <= 20:
error_dict['0'] += 1
elif error <= 40:
error_dict['1'] += 1
elif error <= 60:
error_dict['2'] += 1
elif error <= 80:
error_dict['3'] += 1
elif error <= 100:
error_dict['4'] += 1
error_list.append(error)
coordinate_result = np.array(coordinate_result)
error_list.sort()
print(np.median(error_list)) # 这就是中位误差?
print(np.mean(error_list)) # 这就是中位误差?
print(error_list[int(len(error_list)*0.9)]) # 90%误差
error_ratio = []
for i in range(5):
if i > 0:
error_dict[str(i)] += error_dict[str(i-1)]
error_ratio.append(error_dict[str(i)]/len(error_list))
plt.figure('figure 1')
plt.scatter(Y_Test[:,0], Y_Test[:,1], c='blue', s=5)
plt.scatter(coordinate_result[:,0], coordinate_result[:,1], c='red', s=3)
plt.figure('figure 2')
plt.plot([20, 40, 60, 80, 100], error_ratio, marker='o')
plt.show()
| haoranpb/datamining | hw3/code/e.py | e.py | py | 12,319 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number"... |
26195476426 | from typing import List
from tensorflow import keras
from sklearn.model_selection import train_test_split
import librosa
import numpy as np
from tqdm.notebook import tqdm
import os
import data_augmentation
import random
RATE = 8000
def load_recordings(paths=["recordings"], label_type="number", sr=RATE):
"""
Load the recordings in the given directories
:param paths: List containing the path(s) where audio files are stored
:param label_type: whether the audio tracks will be used for number or speaker recognition
:param sr: Sample Rate of the given tracks. Default is 8000
:return:
"""
res = []
for path in paths:
print(f"Loading from {path}")
for f in tqdm(sorted(os.listdir(path))):
if f.endswith('.wav'):
if "pitch" in f and label_type == "speaker":
# do not consider pitch alteration
next
else:
# Load file and extract features
audio, sample_rate = librosa.load(path + "/" + f, sr=sr)
res.append(audio)
return np.array(res)
def mfcc(track, rate=8000, sampling=1, n_mfcc=20, flatten=True):
"""
Compute MFCC of the given track
:param track: input audio
:param rate: sampling rate
:param min_len: minimum length of the resulting mfcc
:param sampling:
:param n_mfcc: number of mfcc to include
:param flatten: whether to flatten the output mfcc or not (useful for SVM)
:return:
"""
# Campiona i valori
signal = track[::sampling]
# Calcola coefficienti MFCC
mfcc_coefs = librosa.feature.mfcc(signal * 1.0, sr=int(rate / sampling), n_mfcc=n_mfcc)
if flatten:
# Appiattisci rappresentazione per uso con SVM
mfcc_coefs = mfcc_coefs.flatten()
return mfcc_coefs
def load_labels(paths=["recordings"], label_type="number"):
"""
Load labels (a.k.a Y) of the recordings in the given inputs
:param paths: List containing the path(s) where audio files are stored
:param label_type: whether the audio tracks will be used for number or speaker recognition
:return:
"""
labels = []
for path in paths:
for f in sorted(os.listdir(path)):
if f.endswith('.wav'):
if "pitch" in f and label_type == "speaker":
next
else:
if label_type.startswith("n"):
label = f.split('_')[0]
else:
label = f.split('_')[1]
labels.append(label)
return labels
def pad_zeros(recordings, compute_max_rec_length=True, max_rec_length=0):
"""
Add zeros, at the beginning and at the end, of the given recordings
:param recordings: List of recordings to preprocess
:param compute_max_rec_length:
:param max_rec_length:
:return:
"""
if compute_max_rec_length:
max_rec_length = max(map(np.shape, recordings))[0]
res = [padding(max_rec_length, rec) for rec in recordings]
return np.array(res)
def padding(max_rec_length, rec):
"""
Add zeros at the start and end of the given recording if length(recording) < max_rec_length
:param max_rec_length: length that all recordings must have
:param rec: current recording
:return:
"""
diff_in_rec_length = max_rec_length - rec.shape[0]
if diff_in_rec_length > 0:
half_diff = int(diff_in_rec_length / 2)
remaining_diff = diff_in_rec_length - half_diff
v = np.pad(rec, (half_diff, remaining_diff), 'constant', constant_values=0)
return v
else:
return rec
def compute_spectrogram(audio, rate=8000, normalize=False, paper_data=False):
"""
Compute spectrogram of the given recording
:param audio: Input audio track
:param rate: sampling rate of the input audio track
:param normalize: whether to apply dynamic range compression of the spectrograms or not
:param paper_data: whether to trasform the input recordings according to the paper specs
:return:
"""
if paper_data:
spectrogram = librosa.feature.melspectrogram(y=np.array(audio),
sr=rate,
n_fft=1024,
hop_length=160)
else:
spectrogram = librosa.feature.melspectrogram(y=np.array(audio),
sr=rate)
if normalize:
spectrogram = np.log10(1000 * spectrogram + 1)
return spectrogram
def prepare_data_nn(X_train, X_val, X_test, y_train, y_val, y_test, number_mode):
"""
Transform recordings, in MFCC or spectrograms form, and their label for NN training
:param X_train:
:param X_val:
:param X_test:
:param y_train:
:param y_val:
:param y_test:
:param number_mode:
:return:
"""
# Change shape of X for model training purpose
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2], 1)
X_val = X_val.reshape(X_val.shape[0], X_val.shape[1], X_val.shape[2], 1)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2], 1)
input_shape = (X_train.shape[1], X_train.shape[2], 1)
target_names = []
if number_mode:
y_train = keras.utils.to_categorical(y_train, 10)
y_val = keras.utils.to_categorical(y_val, 10)
y_test = keras.utils.to_categorical(y_test, 10)
else:
enc, y_train, target_names = transform_categorical_y(y_train)
y_val = enc.fit_transform(np.array(y_val).reshape(-1, 1)).toarray()
y_test = enc.fit_transform(np.array(y_test).reshape(-1, 1)).toarray()
X_data = [np.array(X_train), np.array(X_val), np.array(X_test)]
y_data = [np.array(y_train), np.array(y_val), np.array(y_test)]
return X_data, y_data, input_shape, target_names
def transform_categorical_y(labels):
"""
Perform one hot encoding and return transformed y and labels for building the classification report
:param labels: Original labels
:return:
"""
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder()
y = enc.fit_transform(np.array(labels).reshape(-1, 1)).toarray()
label_0 = enc.inverse_transform(np.array([1, 0, 0, 0, 0, 0, 0, 0]).reshape(1, -1))[0][0]
label_1 = enc.inverse_transform(np.array([0, 1, 0, 0, 0, 0, 0, 0]).reshape(1, -1))[0][0]
label_2 = enc.inverse_transform(np.array([0, 0, 1, 0, 0, 0, 0, 0]).reshape(1, -1))[0][0]
label_3 = enc.inverse_transform(np.array([0, 0, 0, 1, 0, 0, 0, 0]).reshape(1, -1))[0][0]
label_4 = enc.inverse_transform(np.array([0, 0, 0, 0, 1, 0, 0, 0]).reshape(1, -1))[0][0]
label_5 = enc.inverse_transform(np.array([0, 0, 0, 0, 0, 1, 0, 0]).reshape(1, -1))[0][0]
label_6 = enc.inverse_transform(np.array([0, 0, 0, 0, 0, 0, 1, 0]).reshape(1, -1))[0][0]
label_7 = enc.inverse_transform(np.array([0, 0, 0, 0, 0, 0, 0, 1]).reshape(1, -1))[0][0]
target_names = [label_0, label_1, label_2, label_3, label_4, label_5, label_6, label_7]
return enc, y, target_names
def get_pattern_indexes(lst: List, pattern: str, split_index: int):
"""
Return the indexes of elements, in the given list, that contain the given pattern, surrounded by _
:param lst: List with elements of interest
:param pattern: pattern we are interested in finding
:param split_index: which element, after splitting by _ , is of interest
:return:
"""
occurrences = [i for i, x in enumerate(lst) if pattern in "_" + x.split('_')[split_index] + "_"]
return occurrences
def split_and_augment_dataset(audio_dir: str,
y_type: str,
n_category_audio_to_pick_test: int,
include_pitch: bool,
max_length: int,
recordings_made_by_us: bool):
"""
Augment and split in train, validation and test the given recordings
:param audio_dir: path where the recordings of interest are stored
:param y_type: whether we are interest in speakers or digits
:param n_category_audio_to_pick_test: how many sample, for each unique Y value, should be put in the test set
:param include_pitch: whether to include audio with modified pitch or not
:param max_length: maximum length a given recording should have in order to be included
:param recordings_made_by_us: whether the recs are made by us or not
:return:
"""
print("split_and_augment_dataset >>>")
n_noise = 5
n_pitch = 0
if y_type == "speakers_us":
categories = ['_gian_', '_alinda_', '_khaled_', '_ale_']
# Used later on for getting y label
split_index = 1
elif y_type == "speakers_default":
categories = ['jackson', 'nicolas', 'theo', 'yweweler']
split_index = 1
else:
categories = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
split_index = 0
n_pitch = 5
augmented_tracks = data_augmentation.enrich_dataset(audio_dir,
mode="normal",
n_noise=n_noise,
n_pitch=n_pitch,
recordings_made_by_us=recordings_made_by_us,
max_length=max_length)
all_keys = [k for k in augmented_tracks.keys()]
test_labels = []
# Get the list of recordings name that will compose our test set
for c in categories:
indexes_of_c = get_pattern_indexes(all_keys, c, split_index)
records_to_pick = random.sample(indexes_of_c, n_category_audio_to_pick_test)
current_test_categories = [all_keys[i] for i in records_to_pick]
test_labels = test_labels + current_test_categories
# Get the recordings for the test set
test_recordings = []
for k in test_labels:
# store original recording
current_key = augmented_tracks[k]
test_recordings.append(current_key['original'][0])
# eliminate the original + augmented tracks from the dataset
del augmented_tracks[k]
train_recordings = []
train_labels = []
for k in augmented_tracks.keys():
# Original track
train_labels.append(k)
train_recordings.append(augmented_tracks[k]['original'][0])
# Noise track
noise_recordings = augmented_tracks[k]['noise']
noise_labels = len(noise_recordings) * [k]
train_labels = train_labels + noise_labels
train_recordings = train_recordings + noise_recordings
# Pitch tracks
if include_pitch:
pitch_recordings = augmented_tracks[k]['pitch']
pitch_labels = len(pitch_recordings) * [k]
train_labels = train_labels + pitch_labels
train_recordings = train_recordings + pitch_recordings
# Get final label. The file format is number_speaker_n.wav
train_labels = [label.split('_')[split_index] for label in train_labels]
test_labels = [label.split('_')[split_index] for label in test_labels]
train_recordings, val_recordings, train_labels, val_labels = train_test_split(train_recordings,
train_labels,
test_size=0.2,
random_state=1)
print("split_and_augment_dataset <<<")
return train_recordings, train_labels, val_recordings, val_labels, test_recordings, test_labels
def prepare_augmented_recordings(audio_dirs: List[str],
y_type: List[str],
n_category_test: int,
include_pitch: bool,
max_length: int,
recordings_source: List[bool],
transform_function="spectrogram",
paper_data=False):
"""
Augment, split in train-val-test and compute spectrograms of the given recordings
:param audio_dirs: list of path where the recordings of interest are stored
:param y_type: category (digits, baseline speakers and "us" speakers) of each directory
:param n_category_test: how many sample, for each unique Y value, should be put in the test set
:param include_pitch: whether to include audio with modified pitch or not
:param max_length: maximum length a given recording should have in order to be included
:param recordings_source: whether the current recordings are made by us(useful for setting data augmentation params)
:param transform_function: whether to transform recordings using MFCC or spectrograms
:param paper_data: whether to transform the data as specified in the paper or not
:return:
"""
X_train = []
y_train = []
X_val = []
y_val = []
X_test = []
y_test = []
for i, dir_path in enumerate(audio_dirs):
train_recordings, train_labels, val_recordings, val_labels, test_recordings, test_labels = split_and_augment_dataset(
dir_path,
y_type[i],
n_category_test,
include_pitch,
max_length,
recordings_source[i])
X_train = X_train + train_recordings
y_train = y_train + train_labels
X_val = X_val + val_recordings
y_val = y_val + val_labels
X_test = X_test + test_recordings
y_test = y_test + test_labels
X_train = [np.array(x) for x in X_train]
y_train = [np.array(x) for x in y_train]
X_val = [np.array(x) for x in X_val]
y_val = [np.array(x) for x in y_val]
X_test = [np.array(x) for x in X_test]
y_test = [np.array(x) for x in y_test]
X_train, X_val, X_test = transform_recordings(X_train, X_val, X_test, transform_function, paper_data)
X_data = [np.array(X_train), np.array(X_val), np.array(X_test)]
y_data = [np.array(y_train), np.array(y_val), np.array(y_test)]
return X_data, y_data
def transform_recordings(X_train, X_val, X_test, transform_function, paper_data):
"""
Normalize through padding and compute the spectrograms of train, validation and test recordings
:param X_train: train recordings
:param X_val: validation recordings
:param X_test: test recordings
:param transform_function: whether to apply spectrogram or mfcc
:param paper_data: whether to transform the recordings as specified in the paper or not
:return:
"""
print("transform_recordings >>>")
# In order to normalise the length of recordings we have to define the maximum length of the various recordings
max_length_rec = max(map(np.shape, X_train + X_val + X_test))[0]
X_train = pad_zeros(X_train, compute_max_rec_length=False, max_rec_length=max_length_rec)
X_val = pad_zeros(X_val, compute_max_rec_length=False, max_rec_length=max_length_rec)
X_test = pad_zeros(X_test, compute_max_rec_length=False, max_rec_length=max_length_rec)
# Now let's transform our recordings the spectrograms
if transform_function == "spectrogram":
X_train = [compute_spectrogram(x, normalize=True, paper_data=paper_data) for x in X_train]
X_val = [compute_spectrogram(x, normalize=True, paper_data=paper_data) for x in X_val]
X_test = [compute_spectrogram(x, normalize=True, paper_data=paper_data) for x in X_test]
else:
X_train = [mfcc(x, flatten=False) for x in X_train]
X_val = [mfcc(x, flatten=False) for x in X_val]
X_test = [mfcc(x, flatten=False) for x in X_test]
print("transform_recordings <<<")
return X_train, X_val, X_test
def balanced_train_val_test_split(X, y, train_size=0.6):
"""
Split the current X and y in balanced (all labels have the same n of recordings) train, val and test set
:param X:
:param y:
:param train_size:
:return:
"""
X_train = []
X_val = []
X_test = []
y_train = []
y_val = []
y_test = []
# Find out unique values and their occurences
unique, counts = np.unique(y, return_counts=True)
# Occurences of the least frequent class
min_len = np.min(counts)
# How many samples should train, val and test have:
train_freq = int(min_len * train_size)
val_freq = (min_len - train_freq) // 2
test_freq = min_len - train_freq - val_freq
for c in unique:
current_indexes = np.where(y == c)[0]
np.random.shuffle(current_indexes)
train_indexes = current_indexes[0:train_freq]
val_indexes = current_indexes[train_freq:train_freq + val_freq]
test_indexes = current_indexes[train_freq + val_freq: train_freq + val_freq + test_freq]
X_train = X_train + [X[i] for i in train_indexes]
y_train = y_train + [y[i] for i in train_indexes]
X_val = X_val + [X[i] for i in val_indexes]
y_val = y_val + [y[i] for i in val_indexes]
X_test = X_test + [X[i] for i in test_indexes]
y_test = y_test + [y[i] for i in test_indexes]
X_data = [np.array(X_train), np.array(X_val), np.array(X_test)]
y_data = [np.array(y_train), np.array(y_val), np.array(y_test)]
return X_data, y_data
def balanced_train_val_split(X, y, train_size=0.75):
"""
Split the current X and y in balanced (all labels have the same n of recordings) train and validation set
:param X:
:param y:
:param train_size: how big should the train set be, compared to the validation set
:return:
"""
X_train = []
X_val = []
y_val = []
y_train = []
# Find out unique values and their occurences
unique, counts = np.unique(y, return_counts=True)
# Occurences of the least frequent class
min_len = np.min(counts)
# How many samples should train, val and test have:
train_freq = int(min_len * train_size)
val_freq = min_len - train_freq
print(train_freq, val_freq)
for c in unique:
print(c)
current_indexes = np.where(y == c)[0]
np.random.shuffle(current_indexes)
train_indexes = current_indexes[0:train_freq]
val_indexes = current_indexes[train_freq:train_freq + val_freq]
X_train = X_train + [X[i] for i in train_indexes]
y_train = y_train + [y[i] for i in train_indexes]
X_val = X_val + [X[i] for i in val_indexes]
y_val = y_val + [y[i] for i in val_indexes]
X_data = [np.array(X_train), np.array(X_val)]
y_data = [np.array(y_train), np.array(y_val)]
return X_data, y_data
| GianCarloMilanese/dsim_project | Audio/data_preparation.py | data_preparation.py | py | 18,819 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tqdm.notebook.tqdm",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "librosa.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_nu... |
11936612348 | from typing import Optional, TYPE_CHECKING
from django.db import models
from django.contrib.auth.models import Group, User
from rest_framework import serializers
from rest_framework.exceptions import NotFound
from processes.exception import UnprocessableEntity
from .uuid_model import UuidModel
if TYPE_CHECKING:
from .run_environment import RunEnvironment
class NamedWithUuidModel(UuidModel):
class Meta:
abstract = True
name = models.CharField(max_length=200)
description = models.CharField(max_length=5000, blank=True)
created_by_user = models.ForeignKey(User, on_delete=models.SET_NULL,
null=True, editable=False)
created_by_group = models.ForeignKey(Group, on_delete=models.CASCADE,
editable=False)
@classmethod
def find_by_uuid_or_name(cls, obj_dict,
required_group: Optional[Group] = None,
required_run_environment: 'Optional[RunEnvironment]' = None,
check_conflict: bool = True,
allowed_run_environment: 'Optional[RunEnvironment]' = None,
allow_any_run_environment: Optional[bool] = None):
uuid = obj_dict.get('uuid')
name = obj_dict.get('name')
if uuid is not None:
entity = cls.objects.get(uuid=uuid)
if check_conflict and (name is not None) and (entity.name != name):
raise UnprocessableEntity(
f"{cls.__name__} {uuid} is named '{entity.name}', not '{name}'")
else:
if name is None:
raise serializers.ValidationError('Neither uuid or name found in request')
entity = cls.objects.get(name=name, created_by_group=required_group)
if required_group and (entity.created_by_group != required_group):
raise NotFound()
if required_run_environment and (
entity.run_environment != required_run_environment):
raise NotFound()
# So that allowed_run_environment can be omitted if
# required_run_environment is set
allowed_run_environment = allowed_run_environment or \
required_run_environment
if allow_any_run_environment is None:
allow_any_run_environment = (allowed_run_environment is None)
if (not allow_any_run_environment) and \
hasattr(entity, 'run_environment') and \
entity.run_environment:
if allowed_run_environment:
if entity.run_environment != allowed_run_environment:
raise NotFound()
else:
raise NotFound()
return entity
def __str__(self) -> str:
return (self.name or 'Unnamed') + ' / ' + str(self.uuid)
| CloudReactor/task_manager | server/processes/models/named_with_uuid_model.py | named_with_uuid_model.py | py | 2,749 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "uuid_model.UuidModel",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "... |
7775088104 | from django.shortcuts import render
from operator import attrgetter
# Para a paginação
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage, InvalidPage
# Importamos a função criada anteriormente
from post.views import get_post_queryset
POSTS_PER_PAGE = 2 # Pode ser qualquer valor acima de 0, coloquei 1 pois não tenho muitas postagens cadastradas
LIMIT_OF_PAGES = 10
def home_view(request):
context = {}
# inicialmente inicializamos a query com texto vazio
query = ''
# Caso no request.GET não sejá vazio, no caso seja diferente de <QueryDict: {}>
# Lembre que qualquer ideia de vazio é considerado falso pelo Django
if request.GET:
# Coleta-se a query
query = request.GET.get('q', '') # Tenta pegar valor de 'q' caso não tenha retorna ''
# Envia o valor da query para o template, para se manter a query da busca no search bar
context['query'] = query
# Faz a busca dos posts usando a query pra filtrar
posts = get_post_queryset(query)
# Ordena as postagens
posts = sorted(posts, key=attrgetter('date_published'), reverse=True)
# Pagination
# Tenta pegar o valor de page do template, caso não encontre retorna o valor 1 que representa a primeira página
page = request.GET.get('page', 1)
# Cria-se um paginator, para isso se passa uma lista de objetos e a quantidade de páginas
posts_paginator = Paginator(posts, POSTS_PER_PAGE)
# Vai fazer toda a lógica de separação e navegação entre páginas, e já fazendo o tratamento das exceptions
try:
# Tenta acessar a página que foi passada pelo template, caso dê erro vai para as exceptions
posts = posts_paginator.page(page)
except InvalidPage:
posts = posts_paginator.page(1)
# Passa as postagens para o context do template
context['posts'] = posts
context['limit_of_pages'] = LIMIT_OF_PAGES
return render(request, 'personal/home_view.html', context)
| imklesley/SimpleBlog_Site_API | personal/views.py | views.py | py | 1,994 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "post.views.get_post_queryset",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "operator.attrgetter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 37,
"usage_type": "call"
},
{
"... |
33943744293 | import asyncio
import atexit
import dataclasses
import multiprocessing
import time
from typing import Any, Dict
from aiogram import Bot, Dispatcher
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import Text
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.types import BotCommand, InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, Message
from aiogram.utils import executor
from aiogram.utils.exceptions import MessageToForwardNotFound, ChatNotFound
from aiogram.contrib.middlewares.i18n import I18nMiddleware
from tg_filtering_bot.async_queue import AsyncQueue
from tg_filtering_bot.config import settings
from tg_filtering_bot.crud.crud import (
create_or_update_user_chat,
add_filter,
get_user_filters,
disable_filter
)
from tg_filtering_bot.crud.dto import UserFilterDTO, UserChatDTO, ForwardMessageDTO
from tg_filtering_bot.logger import get_logger
I18N = I18nMiddleware(settings.I18N_DOMAIN, settings.LOCALES_DIR)
_ = I18N.gettext
class UserState(StatesGroup):
adding = State()
deleting = State()
@dataclasses.dataclass
class DeleteFilterData:
order: int
button: InlineKeyboardButton
filter_: UserFilterDTO
def build_delete_filters_markup(
storage_data: Dict[str, DeleteFilterData]
) -> InlineKeyboardMarkup:
inline_kb = InlineKeyboardMarkup()
delete_filters_data = [d for d in storage_data.values() if isinstance(d, DeleteFilterData)]
for data in sorted(delete_filters_data, key=lambda d: d.order):
inline_kb.add(data.button)
return inline_kb
class FilteringBot:
LOGGER = get_logger("FilteringBot")
_BTN_DELETE_PREFIX = "delete_"
def __init__(self, message_queue: AsyncQueue) -> None:
self._message_queue = message_queue
self.storage = MemoryStorage()
self.bot = Bot(settings.BOT_TOKEN)
self.dispatcher = Dispatcher(self.bot, storage=self.storage)
self.dispatcher.middleware.setup(I18N)
self.LOGGER.info("Bot created")
self.dispatcher.register_message_handler(
self.start_command,
commands=['start'],
state="*"
)
self.dispatcher.register_message_handler(
self.add_command,
commands=['add'],
state="*"
)
self.dispatcher.register_message_handler(
self.delete_command,
commands=['delete'],
state="*"
)
self.dispatcher.register_message_handler(
self.list_command,
commands=['list'],
state="*"
),
self.dispatcher.register_message_handler(
self.cancel_command,
commands=['cancel'],
state="*"
),
self.dispatcher.register_message_handler(
self.add_address,
state=UserState.adding
),
self.dispatcher.register_callback_query_handler(
self.delete_address,
Text(startswith=self._BTN_DELETE_PREFIX),
state=UserState.deleting
)
async def _set_base_commands(self) -> None:
await self.bot.delete_my_commands()
await self.bot.set_my_commands(commands=[
BotCommand(
command="add",
description=_("Add new address")
),
BotCommand(
command="list",
description=_("List addresses")
),
BotCommand(
command="delete",
description=_("Delete address")
),
BotCommand(
command="cancel",
description=_("Cancel previous command")
),
])
async def _register_user(self, message: Any) -> None:
if message:
self.LOGGER.info("Adding or updating user %s, chat %s", message.from_user.id, message.chat.id)
user_chat_dto = UserChatDTO(
user_id=message.from_user.id,
chat_id=message.chat.id,
name=message.from_user.full_name,
username=message.from_user.username,
language_code=message.from_user.language_code
)
await create_or_update_user_chat(user_chat_dto)
async def _process_message_queue(self) -> None:
message = await self._message_queue.get()
if isinstance(message, ForwardMessageDTO):
return await self.forward_message(message)
raise NotImplementedError(f"Processing for {message} is not implemented")
async def _periodic(self) -> None:
while True:
try:
await self._process_message_queue()
except Exception as e:
self.LOGGER.exception(e)
def start_bot(self) -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.create_task(self._set_base_commands())
loop.create_task(self._periodic())
executor.start_polling(self.dispatcher, loop=loop)
def stop_bot(self) -> None:
self.dispatcher.stop_polling()
async def _clear_state(self, state: FSMContext) -> None:
await state.reset_state()
await state.reset_data()
await self._set_base_commands()
async def start_command(self, message: Message, state: FSMContext) -> None:
await self._register_user(message)
await self._clear_state(state)
await message.reply(_("""Hi there,
I am a filtering bot for '{channel}' channel.
Please use command {command} to add a new address.""").format(
channel=settings.LISTENER_CHANNEL_NAME, command="/add"
))
async def add_command(self, message: Message, state: FSMContext) -> None:
await self._register_user(message)
await state.set_state(UserState.adding)
await self.bot.send_message(message.chat.id, _("Please type street name to create a filter"))
async def delete_command(self, message: Message, state: FSMContext) -> None:
await self._register_user(message)
filters = await get_user_filters(user_id=message.from_user.id)
if not filters:
return await message.reply(_("You have no addresses to delete"))
await state.set_state(UserState.deleting)
storage_data = {}
for i, f in enumerate(filters, start=1):
key = f"{self._BTN_DELETE_PREFIX}{f.filter_id}"
storage_data[key] = DeleteFilterData(
order=i,
filter_=f,
button=InlineKeyboardButton(
_("{i}. Delete {address}").format(i=i, address=f.filter_),
callback_data=key
)
)
await state.set_data(storage_data)
inline_kb = build_delete_filters_markup(storage_data)
await message.reply(_("Click addresses to delete"), reply_markup=inline_kb)
async def list_command(self, message: Message, state: FSMContext) -> Message:
await self._register_user(message)
filters = await get_user_filters(user_id=message.from_user.id)
if not filters:
return await message.reply(_("You have no addresses"))
response = _("You addresses are:") + "\r\n" + "\r\n".join(
f"{i}. {f.filter_}" for i, f in enumerate(filters, start=1)
)
return await message.reply(response)
async def cancel_command(self, message: Message, state: FSMContext) -> None:
await self._register_user(message)
await self._clear_state(state)
async def add_address(self, message: Message, state: FSMContext) -> None:
await self._register_user(message)
address = message.text
self.LOGGER.info("Adding new address: %s", message)
user_filter = UserFilterDTO(user_id=message.from_user.id, filter_=address, filter_id=-1)
await add_filter(user_filter)
# await self.bot.delete_state(message.from_user.id, message.chat.id)
await self.bot.send_message(
message.chat.id, _("Filter '{address}' was added for monitoring").format(address=address)
)
await self._clear_state(state)
async def delete_address(self, event: CallbackQuery, state: FSMContext) -> Message:
self.LOGGER.warning(event)
storage_data = await state.get_data()
if not storage_data or event.data not in storage_data:
return await event.message.answer(_("No address to delete"))
delete_filter_data = storage_data.pop(event.data)
await disable_filter(delete_filter_data.filter_)
await state.set_data(storage_data)
await event.message.edit_reply_markup(build_delete_filters_markup(storage_data))
return await event.message.answer(
_("Filter '{address}' was deleted").format(address=delete_filter_data.filter_.filter_)
)
async def send_message_to_user(self, forward_message: ForwardMessageDTO) -> None:
self.LOGGER.debug(
"Sending message %s to user %s", forward_message.message.message_id, forward_message.user_chat.user_id
)
await self.bot.send_message(
forward_message.user_chat.chat_id,
forward_message.message.message
)
async def forward_message(self, forward_message: ForwardMessageDTO) -> None:
self.LOGGER.debug(
"Forwarding message %s to user %s", forward_message.message.message_id, forward_message.user_chat.user_id
)
try:
await self.bot.forward_message(
forward_message.user_chat.chat_id,
from_chat_id=forward_message.message.channel_id,
message_id=forward_message.message.message_id
)
except (MessageToForwardNotFound, ChatNotFound):
self.LOGGER.warning(
"Cannot forward message %s", forward_message.message.message_id
)
return await self.send_message_to_user(forward_message)
def start_filtering_bot() -> AsyncQueue[ForwardMessageDTO]:
queue: multiprocessing.Queue[ForwardMessageDTO] = multiprocessing.Queue(maxsize=settings.QUEUE_SIZE)
message_queue = AsyncQueue(queue)
bot = FilteringBot(
message_queue=message_queue
)
process = multiprocessing.Process(target=bot.start_bot)
process.start()
atexit.register(bot.stop_bot)
atexit.register(process.join)
time.sleep(1)
if process.exitcode is not None:
raise RuntimeError("FilteringBot closed after launch")
return message_queue
| i026e/tg_filtering_bot | tg_filtering_bot/bot/filtering_bot.py | filtering_bot.py | py | 10,642 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiogram.contrib.middlewares.i18n.I18nMiddleware",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tg_filtering_bot.config.settings.I18N_DOMAIN",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "tg_filtering_bot.config.settings",
"line_nu... |
33408291882 | from selenium import webdriver
import time
from selenium.webdriver.common.by import By
class clickSendkey():
def test(self):
driver = webdriver.Firefox(executable_path='/Users/Jatin Singh/Downloads/geckodriver')
driver.get('https://google.com/')
driver.maximize_window()
texttype = driver.find_element_by_xpath("//input[@type='text']")
texttype.send_keys('letskodeitselenium')
time.sleep(3)
search = driver.find_element_by_xpath("//div[@class='FPdoLc tfB0Bf']//input[@class='gNO89b']")
search.click()
time.sleep(3)
driver.close()
o = clickSendkey()
o.test()
| JatinSsingh/SeleniumPython | pythonProject/webelement.py | webelement.py | py | 648 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
22565700808 | from abc import ABC, abstractmethod
from dataclasses import dataclass
from functools import partial
from typing import Any, Dict, List, Optional, Tuple
import torch
from shap_e.models.nn.utils import sample_pmf
from shap_e.models.volume import Volume, VolumeRange
from shap_e.util.collections import AttrDict
from .model import NeRFModel, Query
def render_rays(
rays: torch.Tensor,
parts: List["RayVolumeIntegral"],
void_model: NeRFModel,
shared: bool = False,
prev_raw_outputs: Optional[List[AttrDict]] = None,
render_with_direction: bool = True,
importance_sampling_options: Optional[Dict[str, Any]] = None,
) -> Tuple["RayVolumeIntegralResults", List["RaySampler"], List[AttrDict]]:
"""
Perform volumetric rendering over a partition of possible t's in the union
of rendering volumes (written below with some abuse of notations)
C(r) := sum(
transmittance(t[i]) *
integrate(
lambda t: density(t) * channels(t) * transmittance(t),
[t[i], t[i + 1]],
)
for i in range(len(parts))
) + transmittance(t[-1]) * void_model(t[-1]).channels
where
1) transmittance(s) := exp(-integrate(density, [t[0], s])) calculates the
probability of light passing through the volume specified by [t[0], s].
(transmittance of 1 means light can pass freely)
2) density and channels are obtained by evaluating the appropriate
part.model at time t.
3) [t[i], t[i + 1]] is defined as the range of t where the ray intersects
(parts[i].volume \\ union(part.volume for part in parts[:i])) at the surface
of the shell (if bounded). If the ray does not intersect, the integral over
this segment is evaluated as 0 and transmittance(t[i + 1]) :=
transmittance(t[i]).
4) The last term is integration to infinity (e.g. [t[-1], math.inf]) that
is evaluated by the void_model (i.e. we consider this space to be empty).
:param rays: [batch_size x ... x 2 x 3] origin and direction.
:param parts: disjoint volume integrals.
:param void_model: use this model to integrate over the empty space
:param shared: All RayVolumeIntegrals are calculated with the same model.
:param prev_raw_outputs: Raw outputs from the previous rendering step
:return: A tuple of
- AttrDict containing the rendered `channels`, `distances`, and the `aux_losses`
- A list of importance samplers for additional fine-grained rendering
- A list of raw output for each interval
"""
if importance_sampling_options is None:
importance_sampling_options = {}
origin, direc = rays[..., 0, :], rays[..., 1, :]
if prev_raw_outputs is None:
prev_raw_outputs = [None] * len(parts)
samplers = []
raw_outputs = []
t0 = None
results = None
for part_i, prev_raw_i in zip(parts, prev_raw_outputs):
# Integrate over [t[i], t[i + 1]]
results_i = part_i.render_rays(
origin,
direc,
t0=t0,
prev_raw=prev_raw_i,
shared=shared,
render_with_direction=render_with_direction,
)
# Create an importance sampler for (optional) fine rendering
samplers.append(
ImportanceRaySampler(
results_i.volume_range, results_i.raw, **importance_sampling_options
)
)
raw_outputs.append(results_i.raw)
# Pass t[i + 1] as the start of integration for the next interval.
t0 = results_i.volume_range.next_t0()
# Combine the results from [t[0], t[i]] and [t[i], t[i+1]]
results = results_i if results is None else results.combine(results_i)
# While integrating out [t[-1], math.inf] is the correct thing to do, this
# erases a lot of useful information. Also, void_model is meant to predict
# the channels at t=math.inf.
# # Add the void background over [t[-1], math.inf] to complete integration.
# results = results.combine(
# RayVolumeIntegralResults(
# output=AttrDict(
# channels=void_model(origin, direc),
# distances=torch.zeros_like(t0),
# aux_losses=AttrDict(),
# ),
# volume_range=VolumeRange(
# t0=t0,
# t1=torch.full_like(t0, math.inf),
# intersected=torch.full_like(results.volume_range.intersected, True),
# ),
# # Void space extends to infinity. It is assumed that no light
# # passes beyond the void.
# transmittance=torch.zeros_like(results_i.transmittance),
# )
# )
results.output.channels = results.output.channels + results.transmittance * void_model(
Query(origin, direc)
)
return results, samplers, raw_outputs
@dataclass
class RayVolumeIntegralResults:
"""
Stores the relevant state and results of
integrate(
lambda t: density(t) * channels(t) * transmittance(t),
[t0, t1],
)
"""
# Rendered output and auxiliary losses
# output.channels has shape [batch_size, *inner_shape, n_channels]
output: AttrDict
"""
Optional values
"""
# Raw values contain the sampled `ts`, `density`, `channels`, etc.
raw: Optional[AttrDict] = None
# Integration
volume_range: Optional[VolumeRange] = None
# If a ray intersects, the transmittance from t0 to t1 (e.g. the
# probability that the ray passes through this volume).
# has shape [batch_size, *inner_shape, 1]
transmittance: Optional[torch.Tensor] = None
def combine(self, cur: "RayVolumeIntegralResults") -> "RayVolumeIntegralResults":
"""
Combines the integration results of `self` over [t0, t1] and
`cur` over [t1, t2] to produce a new set of results over [t0, t2] by
using a similar equation to (4) in NeRF++:
integrate(
lambda t: density(t) * channels(t) * transmittance(t),
[t0, t2]
)
= integrate(
lambda t: density(t) * channels(t) * transmittance(t),
[t0, t1]
) + transmittance(t1) * integrate(
lambda t: density(t) * channels(t) * transmittance(t),
[t1, t2]
)
"""
assert torch.allclose(self.volume_range.next_t0(), cur.volume_range.t0)
def _combine_fn(
prev_val: Optional[torch.Tensor],
cur_val: Optional[torch.Tensor],
*,
prev_transmittance: torch.Tensor,
):
assert prev_val is not None
if cur_val is None:
# cur_output.aux_losses are empty for the void_model.
return prev_val
return prev_val + prev_transmittance * cur_val
output = self.output.combine(
cur.output, combine_fn=partial(_combine_fn, prev_transmittance=self.transmittance)
)
combined = RayVolumeIntegralResults(
output=output,
volume_range=self.volume_range.extend(cur.volume_range),
transmittance=self.transmittance * cur.transmittance,
)
return combined
@dataclass
class RayVolumeIntegral:
model: NeRFModel
volume: Volume
sampler: "RaySampler"
n_samples: int
def render_rays(
self,
origin: torch.Tensor,
direction: torch.Tensor,
t0: Optional[torch.Tensor] = None,
prev_raw: Optional[AttrDict] = None,
shared: bool = False,
render_with_direction: bool = True,
) -> "RayVolumeIntegralResults":
"""
Perform volumetric rendering over the given volume.
:param position: [batch_size, *shape, 3]
:param direction: [batch_size, *shape, 3]
:param t0: Optional [batch_size, *shape, 1]
:param prev_raw: the raw outputs when using multiple levels with this model.
:param shared: means the same model is used for all RayVolumeIntegral's
:param render_with_direction: use the incoming ray direction when querying the model.
:return: RayVolumeIntegralResults
"""
# 1. Intersect the rays with the current volume and sample ts to
# integrate along.
vrange = self.volume.intersect(origin, direction, t0_lower=t0)
ts = self.sampler.sample(vrange.t0, vrange.t1, self.n_samples)
if prev_raw is not None and not shared:
# Append the previous ts now before fprop because previous
# rendering used a different model and we can't reuse the output.
ts = torch.sort(torch.cat([ts, prev_raw.ts], dim=-2), dim=-2).values
# Shape sanity checks
batch_size, *_shape, _t0_dim = vrange.t0.shape
_, *ts_shape, _ts_dim = ts.shape
# 2. Get the points along the ray and query the model
directions = torch.broadcast_to(direction.unsqueeze(-2), [batch_size, *ts_shape, 3])
positions = origin.unsqueeze(-2) + ts * directions
optional_directions = directions if render_with_direction else None
mids = (ts[..., 1:, :] + ts[..., :-1, :]) / 2
raw = self.model(
Query(
position=positions,
direction=optional_directions,
t_min=torch.cat([vrange.t0[..., None, :], mids], dim=-2),
t_max=torch.cat([mids, vrange.t1[..., None, :]], dim=-2),
)
)
raw.ts = ts
if prev_raw is not None and shared:
# We can append the additional queries to previous raw outputs
# before integration
copy = prev_raw.copy()
result = torch.sort(torch.cat([raw.pop("ts"), copy.pop("ts")], dim=-2), dim=-2)
merge_results = partial(self._merge_results, dim=-2, indices=result.indices)
raw = raw.combine(copy, merge_results)
raw.ts = result.values
# 3. Integrate the raw results
output, transmittance = self.integrate_samples(vrange, raw)
# 4. Clean up results that do not intersect with the volume.
transmittance = torch.where(
vrange.intersected, transmittance, torch.ones_like(transmittance)
)
def _mask_fn(_key: str, tensor: torch.Tensor):
return torch.where(vrange.intersected, tensor, torch.zeros_like(tensor))
def _is_tensor(_key: str, value: Any):
return isinstance(value, torch.Tensor)
output = output.map(map_fn=_mask_fn, should_map=_is_tensor)
return RayVolumeIntegralResults(
output=output,
raw=raw,
volume_range=vrange,
transmittance=transmittance,
)
def integrate_samples(
self,
volume_range: VolumeRange,
raw: AttrDict,
) -> Tuple[AttrDict, torch.Tensor]:
"""
Integrate the raw.channels along with other aux_losses and values to
produce the final output dictionary containing rendered `channels`,
estimated `distances` and `aux_losses`.
:param volume_range: Specifies the integral range [t0, t1]
:param raw: Contains a dict of function evaluations at ts. Should have
density: torch.Tensor [batch_size, *shape, n_samples, 1]
channels: torch.Tensor [batch_size, *shape, n_samples, n_channels]
aux_losses: {key: torch.Tensor [batch_size, *shape, n_samples, 1] for each key}
no_weight_grad_aux_losses: an optional set of losses for which the weights
should be detached before integration.
after the call, integrate_samples populates some intermediate calculations
for later use like
weights: torch.Tensor [batch_size, *shape, n_samples, 1] (density *
transmittance)[i] weight for each rgb output at [..., i, :].
:returns: a tuple of (
a dictionary of rendered outputs and aux_losses,
transmittance of this volume,
)
"""
# 1. Calculate the weights
_, _, dt = volume_range.partition(raw.ts)
ddensity = raw.density * dt
mass = torch.cumsum(ddensity, dim=-2)
transmittance = torch.exp(-mass[..., -1, :])
alphas = 1.0 - torch.exp(-ddensity)
Ts = torch.exp(torch.cat([torch.zeros_like(mass[..., :1, :]), -mass[..., :-1, :]], dim=-2))
# This is the probability of light hitting and reflecting off of
# something at depth [..., i, :].
weights = alphas * Ts
# 2. Integrate all results
def _integrate(key: str, samples: torch.Tensor, weights: torch.Tensor):
if key == "density":
# Omit integrating the density, because we don't need it
return None
return torch.sum(samples * weights, dim=-2)
def _is_tensor(_key: str, value: Any):
return isinstance(value, torch.Tensor)
if raw.no_weight_grad_aux_losses:
extra_aux_losses = raw.no_weight_grad_aux_losses.map(
partial(_integrate, weights=weights.detach()), should_map=_is_tensor
)
else:
extra_aux_losses = {}
output = raw.map(partial(_integrate, weights=weights), should_map=_is_tensor)
if "no_weight_grad_aux_losses" in output:
del output["no_weight_grad_aux_losses"]
output.aux_losses.update(extra_aux_losses)
# Integrating the ts yields the distance away from the origin; rename the variable.
output.distances = output.ts
del output["ts"]
del output["density"]
assert output.distances.shape == (*output.channels.shape[:-1], 1)
assert output.channels.shape[:-1] == raw.channels.shape[:-2]
assert output.channels.shape[-1] == raw.channels.shape[-1]
# 3. Reduce loss
def _reduce_loss(_key: str, loss: torch.Tensor):
return loss.view(loss.shape[0], -1).sum(dim=-1)
# 4. Store other useful calculations
raw.weights = weights
output.aux_losses = output.aux_losses.map(_reduce_loss)
return output, transmittance
def _merge_results(
self, a: Optional[torch.Tensor], b: torch.Tensor, dim: int, indices: torch.Tensor
):
"""
:param a: [..., n_a, ...]. The other dictionary containing the b's may
contain extra tensors from earlier calculations, so a can be None.
:param b: [..., n_b, ...]
:param dim: dimension to merge
:param indices: how the merged results should be sorted at the end
:return: a concatted and sorted tensor of size [..., n_a + n_b, ...]
"""
if a is None:
return None
merged = torch.cat([a, b], dim=dim)
return torch.gather(merged, dim=dim, index=torch.broadcast_to(indices, merged.shape))
class RaySampler(ABC):
@abstractmethod
def sample(self, t0: torch.Tensor, t1: torch.Tensor, n_samples: int) -> torch.Tensor:
"""
:param t0: start time has shape [batch_size, *shape, 1]
:param t1: finish time has shape [batch_size, *shape, 1]
:param n_samples: number of ts to sample
:return: sampled ts of shape [batch_size, *shape, n_samples, 1]
"""
class StratifiedRaySampler(RaySampler):
"""
Instead of fixed intervals, a sample is drawn uniformly at random from each
interval.
"""
def __init__(self, depth_mode: str = "linear"):
"""
:param depth_mode: linear samples ts linearly in depth. harmonic ensures
closer points are sampled more densely.
"""
self.depth_mode = depth_mode
assert self.depth_mode in ("linear", "geometric", "harmonic")
def sample(
self,
t0: torch.Tensor,
t1: torch.Tensor,
n_samples: int,
epsilon: float = 1e-3,
) -> torch.Tensor:
"""
:param t0: start time has shape [batch_size, *shape, 1]
:param t1: finish time has shape [batch_size, *shape, 1]
:param n_samples: number of ts to sample
:return: sampled ts of shape [batch_size, *shape, n_samples, 1]
"""
ones = [1] * (len(t0.shape) - 1)
ts = torch.linspace(0, 1, n_samples).view(*ones, n_samples).to(t0.dtype).to(t0.device)
if self.depth_mode == "linear":
ts = t0 * (1.0 - ts) + t1 * ts
elif self.depth_mode == "geometric":
ts = (t0.clamp(epsilon).log() * (1.0 - ts) + t1.clamp(epsilon).log() * ts).exp()
elif self.depth_mode == "harmonic":
# The original NeRF recommends this interpolation scheme for
# spherical scenes, but there could be some weird edge cases when
# the observer crosses from the inner to outer volume.
ts = 1.0 / (1.0 / t0.clamp(epsilon) * (1.0 - ts) + 1.0 / t1.clamp(epsilon) * ts)
mids = 0.5 * (ts[..., 1:] + ts[..., :-1])
upper = torch.cat([mids, t1], dim=-1)
lower = torch.cat([t0, mids], dim=-1)
t_rand = torch.rand_like(ts)
ts = lower + (upper - lower) * t_rand
return ts.unsqueeze(-1)
class ImportanceRaySampler(RaySampler):
"""
Given the initial estimate of densities, this samples more from
regions/bins expected to have objects.
"""
def __init__(
self, volume_range: VolumeRange, raw: AttrDict, blur_pool: bool = False, alpha: float = 1e-5
):
"""
:param volume_range: the range in which a ray intersects the given volume.
:param raw: dictionary of raw outputs from the NeRF models of shape
[batch_size, *shape, n_coarse_samples, 1]. Should at least contain
:param ts: earlier samples from the coarse rendering step
:param weights: discretized version of density * transmittance
:param blur_pool: if true, use 2-tap max + 2-tap blur filter from mip-NeRF.
:param alpha: small value to add to weights.
"""
self.volume_range = volume_range
self.ts = raw.ts.clone().detach()
self.weights = raw.weights.clone().detach()
self.blur_pool = blur_pool
self.alpha = alpha
@torch.no_grad()
def sample(self, t0: torch.Tensor, t1: torch.Tensor, n_samples: int) -> torch.Tensor:
"""
:param t0: start time has shape [batch_size, *shape, 1]
:param t1: finish time has shape [batch_size, *shape, 1]
:param n_samples: number of ts to sample
:return: sampled ts of shape [batch_size, *shape, n_samples, 1]
"""
lower, upper, _ = self.volume_range.partition(self.ts)
batch_size, *shape, n_coarse_samples, _ = self.ts.shape
weights = self.weights
if self.blur_pool:
padded = torch.cat([weights[..., :1, :], weights, weights[..., -1:, :]], dim=-2)
maxes = torch.maximum(padded[..., :-1, :], padded[..., 1:, :])
weights = 0.5 * (maxes[..., :-1, :] + maxes[..., 1:, :])
weights = weights + self.alpha
pmf = weights / weights.sum(dim=-2, keepdim=True)
inds = sample_pmf(pmf, n_samples)
assert inds.shape == (batch_size, *shape, n_samples, 1)
assert (inds >= 0).all() and (inds < n_coarse_samples).all()
t_rand = torch.rand(inds.shape, device=inds.device)
lower_ = torch.gather(lower, -2, inds)
upper_ = torch.gather(upper, -2, inds)
ts = lower_ + (upper_ - lower_) * t_rand
ts = torch.sort(ts, dim=-2).values
return ts
| openai/shap-e | shap_e/models/nerf/ray.py | ray.py | py | 19,663 | python | en | code | 10,619 | github-code | 36 | [
{
"api_name": "torch.Tensor",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "model.NeRFModel",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"... |
3163314445 | #!/usr/bin/python
import sys, glob
from sys import platform
if platform == "linux" or platform == "linux2":
sys.path.insert(0, glob.glob('/home/yaoliu/src_code/local/lib/lib/python2.7/site-packages/')[0])
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.Thrift import TProcessor
from thrift.protocol import TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
import re
import hashlib
class NodeID:
"""
Attributes:
- id
- ip
- port
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None,), # 1
(2, TType.STRING, 'ip', None, None,), # 2
(3, TType.I32, 'port', None, None,), # 3
)
def __init__(self, id=None, ip=None, port=None, ):
self.id = id
self.ip = ip
self.port = port
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans,
TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ip = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.port = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NodeID')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.ip is not None:
oprot.writeFieldBegin('ip', TType.STRING, 2)
oprot.writeString(self.ip)
oprot.writeFieldEnd()
if self.port is not None:
oprot.writeFieldBegin('port', TType.I32, 3)
oprot.writeI32(self.port)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Iface:
def setReplicationtable(self, node_list):
"""
Parameters:
- node_list
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def setReplicationtable(self, node_list):
"""
Parameters:
- node_list
"""
self.send_setReplicationtable(node_list)
self.recv_setReplicationtable()
def send_setReplicationtable(self, node_list):
self._oprot.writeMessageBegin('setReplicationtable', TMessageType.CALL, self._seqid)
args = setReplicationtable_args()
args.node_list = node_list
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setReplicationtable(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setReplicationtable_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
class setReplicationtable_args:
"""
Attributes:
- node_list
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'node_list', (TType.STRUCT, (NodeID, NodeID.thrift_spec)), None,), # 1
)
def __init__(self, node_list=None, ):
self.node_list = node_list
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans,
TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.node_list = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = NodeID()
_elem5.read(iprot)
self.node_list.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setReplicationtable_args')
if self.node_list is not None:
oprot.writeFieldBegin('node_list', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.node_list))
for iter6 in self.node_list:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setReplicationtable_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans,
TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setReplicationtable_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def create_node(s):
sha256 = hashlib.sha256()
sha256.update(s.encode())
k = sha256.hexdigest()
m = re.match('([^:]+):([^:]+)', s)
return NodeID(k, m.group(1), int(m.group(2)))
def createReplicas(nid, lst):
print('Creation replica for', nid)
index = lst.index(nid) // 4
start = 4 * index
end = 4 * (index + 1) if 4 * (index + 1) < len(lst) else len(lst)
replicas = lst[start:end]
# print(replicas)
return replicas
def init_replicatiom(nid, lst, cdct=None):
replicaTable = createReplicas(nid, lst)
if cdct == None:
transport = TSocket.TSocket(nid.ip, nid.port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Client(protocol)
transport.open()
try:
client.setReplicationtable(replicaTable)
finally:
transport.close()
else:
# buffered clients
client = cdct['%s:%d' % (nid.ip, nid.port)]
client.setReplicationtable(replicaTable)
if __name__ == '__main__':
fn = sys.argv[1]
lst = sorted([create_node(l[:-1]) for l in open(fn)], key=lambda x: x.id)
print ('starting replicas')
for l in lst:
print('replica for' , l.port)
init_replicatiom(l, lst)
| sonaliw-pointers/distributed-systems | DistributedStore/initReplica.py | initReplica.py | py | 10,236 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.platform",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "sys.path.insert",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number"... |
14570130326 | from skimage.io import imread, imsave
from numpy import clip
img = imread('img.png')
pixels = []
for row in img:
for pixel in row:
pixels.append(pixel)
pixels.sort()
k = round(len(pixels) * 0.05)
mn, mx = pixels[k], pixels[-k]
img = img.astype('float')
img = (img - mn) / (mx - mn) * 255
img = clip(img, 0, 255)
img = img.astype('uint8')
imsave('out_img.png', img)
| vfolunin/stepic-image-processing-course | week2/3. Устойчивый автоконтраст черно-белого изображения.py | 3. Устойчивый автоконтраст черно-белого изображения.py | py | 382 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "skimage.io.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "skimage.io.imsave",
"line_number": 20,
"usage_type": "call"
}
] |
74160035623 | '''
Descripttion: 两个数组的交集 II
version: 1
Author: Jason
Date: 2020-11-22 16:58:35
LastEditors: Jason
LastEditTime: 2020-11-22 16:59:35
'''
import random
from typing import List
def GenerateRandomList(number, size):
temp = list()
random_legth = random.randint(0, size)
current_length = 0
while current_length < random_legth:
temp.append(random.randint(1, number))
current_length += 1
return temp
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
res = list()
nums1.sort()
nums2.sort()
p1 = 0
p2 = 0
while p1 < len(nums1) and p2 < len(nums2):
if nums1[p1] == nums2[p2]:
res.append(nums1[p1])
p1 += 1
p2 += 1
elif nums1[p1] < nums2[p2]:
p1 += 1
else:
p2 += 1
return res
def intersect2(self, nums1: List[int], nums2: List[int]) -> List[int]:
from collections import defaultdict
dict1 = defaultdict(int)
dict2 = defaultdict(int)
res = list()
for each in nums1:
dict1[each] += 1
for each in nums2:
dict2[each] += 1
for each_key in dict1:
if dict2[each_key] != 0:
nums = min(dict1[each_key], dict2[each_key])
res.extend([each_key] * nums)
return res
s = Solution()
for _ in range(10):
nums1 = GenerateRandomList(4, 12)
nums2 = GenerateRandomList(4, 12)
print(s.intersect2(nums1, nums2))
print(s.intersect(nums1, nums2))
| CodingProgrammer/Algorithm | 双指针/350.py | 350.py | py | 1,700 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_num... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.