seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10441407969 | import xlwt
import csv
import requests
import pprint
class GitClient(object):
def __init__(self):
self.base_url = 'https://api.github.com/'
self.userName = ''
self.password = ''
self.autorization = False
self.notAut = ''
self.exporter = []
self.toWrite = {}
def initial(self,name='def',pas='def'):
self.userName = name if name != 'def' else input('User Name: ')
self.password = pas if pas != 'def' else input('User password: ')
self.autorization = True
def aboutUser(self):
r = requests.get(self.base_url + 'users/' + self.userName)
if r.status_code == 200:
data = ["Username : " + self.userName, "Name : " + str(r.json().get('name')), "Email : " + str(
r.json().get('email')), "Followers : " + str(r.json().get('followers'))]
else:
data = ['Error '+str(r.status_code) +' '+str(r.json()['message'])]
return data
def getRepos(self):
data = []
elem = {}
response = requests.get(self.base_url + 'users/' + self.userName + '/repos')
if response.status_code == 200:
json = response.json()
for i in range(len(json)):
# print(str(i + 1) + ": " + json[i].get('name'))
elem = {str(i + 1) : json[i].get('name')}
data.append(elem)
else:
elem = {
'0':'Error '+str(response.status_code) +' '+str(response.json()['message'])
}
data.append(elem)
return data
def createRep(self,names='def'):
name = names if names != 'def' else input('Enter repository name: ')
data = '{"name":"' + name + '"}'
response = requests.post('https://api.github.com/user/repos', data=data, auth=(self.userName, self.password))
if response.status_code == 201:
return "Repository "+ name +" created"
else:
return ("Sorry we can't create "+name+" Repository! Error " + str(response.status_code) +" "+str(response.json()['message']))
def repoInfo(self,names='def'):
data = []
elem = {}
response = requests.get(self.base_url + 'users/' + self.userName + '/repos')
name = names if names != 'def' else input('Enter repository name: ')
resCommit = requests.get(self.base_url + 'repos/' + self.userName + '/'+ name +'/commits')
resBranch = requests.get(self.base_url + 'repos/' + self.userName + '/' + name + '/branches')
if response.status_code == 200:
json = response.json()
for i in range(len(json)):
if json[i].get('name') == name:
jsonr = json[i]
commit = resCommit.json()
branch = resBranch.json()
elem = {
"Name " : jsonr.get('name'),
"Full name " : jsonr.get('full_name'),
"Language " : str(jsonr.get('language')),
"Count commits " : str(len(commit)),
"Count branches " : str(len(branch)),
"Forks count " : str(jsonr.get('forks_count')),
"Open issues count " : str(jsonr.get('open_issues_count')),
"Size": str(jsonr.get('size')) + " bytes"
}
data.append(elem)
else:
data.append({'Error' : str(response.status_code)+" "+str(response.json()['message'])})
return data
def followers(self):
followersList = [
]
response = requests.get(self.base_url + 'users/' + self.userName + '/followers')
if response.status_code == 200:
json = response.json()
for i in range(len(json)):
elem= {
'follower': json[i].get('login')
}
followersList.append(elem)
else:
followersList.append({'Error': str(response.status_code) + " " + str(response.json()['message'])})
if len(followersList) == 0:
followersList.append({'follower':'none'})
return followersList
def sizeGit(self):
sizeGit=0
res = ''
response = requests.get(self.base_url + 'users/' + self.userName + '/repos')
if response.status_code == 200:
json = response.json()
for i in range(len(json)):
sized = requests.get(self.base_url + 'repos/' + self.userName + '/' + json[i].get('name'))
if sized.status_code == 200:
sized = sized.json()
sizeGit += + float(sized.get('size'))
else:
res = 'Error ' + str(response.status_code)+" "+str(response.json()['message'])
res = str(sizeGit) + ' bytes'
else:
res = 'Error ' + str(response.status_code)+" "+str(response.json()['message'])
return res
def prints(self,obj):
toPrint = {}
for elem in obj:
toPrint.update(elem)
for k in toPrint:
print(k +": "+toPrint[k])
def exports(self):
self.initial()
saveAs = input('enter format saved (csv or xls)')
user = [{
'User': self.userName,
'Size Git Repositories': self.sizeGit()
}]
self.exporter.append(self.getRepos())
self.exporter.append(self.repoInfo())
self.exporter.append(self.followers())
self.exporter.append(user)
data = self.exporter
toWrite = {}
for elem in data:
if type(elem) == 'dict':
toWrite.update(elem)
else:
for sub in elem:
toWrite.update(sub)
for k in toWrite:
print(k +": "+toWrite[k])
name = input('Enter file name: ')
if saveAs.lower() == 'csv':
file = open(name+'.csv', 'w')
writer = csv.writer(file,delimiter=";",quotechar='"')
writer.writerows(toWrite)
file.close()
print('File saved as '+name+'.csv')
elif saveAs.lower() == 'xls':
book = xlwt.Workbook()
sh = book.add_sheet("About")
for n, k in enumerate(toWrite):
print(n, k, toWrite[k])
sh.write(n, 0, k)
sh.write(n, 1, toWrite[k])
book.save(name + ".xls")
print('File saved as ' + name + '.xls')
else:
print('Incorrect file sys!!!')
| Delight116/003-004-be-addo-Cmd_and_Html_GitClient | GitClient.py | GitClient.py | py | 6,608 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_numb... |
27275697128 | """Very simple example using a pair of Lennard-Jones particles.
This script has several pieces to pay attention to:
- Importing the pieces from wepy to run a WExplore simulation.
- Definition of a distance metric for this system and process.
- Definition of the components used in the simulation: resampler,
boundary conditions, runner.
- Definition of the reporters which will write the data out.
- Create the work mapper for a non-parallel run.
- Construct the simulation manager with all the parts.
- Actually run the simulation.
"""
import sys
from copy import copy
import os
import os.path as osp
import numpy as np
import openmm.app as omma
import openmm as omm
import simtk.unit as unit
from openmm_systems.test_systems import LennardJonesPair
import mdtraj as mdj
from wepy.util.mdtraj import mdtraj_to_json_topology
from wepy.sim_manager import Manager
from wepy.resampling.distances.distance import Distance
from wepy.resampling.resamplers.wexplore import WExploreResampler
from wepy.walker import Walker
from wepy.runners.openmm import OpenMMRunner, OpenMMState
from wepy.runners.openmm import UNIT_NAMES, GET_STATE_KWARG_DEFAULTS
from wepy.work_mapper.mapper import Mapper
from wepy.boundary_conditions.receptor import UnbindingBC
from wepy.reporter.hdf5 import WepyHDF5Reporter
from wepy.reporter.dashboard import DashboardReporter
from wepy.reporter.receptor.dashboard import ReceptorBCDashboardSection
from wepy.reporter.wexplore.dashboard import WExploreDashboardSection
from wepy.reporter.openmm import OpenMMRunnerDashboardSection
from scipy.spatial.distance import euclidean
## PARAMETERS
# Platform used for OpenMM which uses different hardware computation
# kernels. Options are: Reference, CPU, OpenCL, CUDA.
# we use the Reference platform because this is just a test
PLATFORM = 'Reference'
# Langevin Integrator
TEMPERATURE= 300.0*unit.kelvin
FRICTION_COEFFICIENT = 1/unit.picosecond
# step size of time integrations
STEP_SIZE = 0.002*unit.picoseconds
# Resampler parameters
# the maximum weight allowed for a walker
PMAX = 0.5
# the minimum weight allowed for a walker
PMIN = 1e-12
# the maximum number of regions allowed under each parent region
MAX_N_REGIONS = (10, 10, 10, 10)
# the maximum size of regions, new regions will be created if a walker
# is beyond this distance from each voronoi image unless there is an
# already maximal number of regions
MAX_REGION_SIZES = (1, 0.5, .35, .25) # nanometers
# boundary condition parameters
# maximum distance between between any atom of the ligand and any
# other atom of the protein, if the shortest such atom-atom distance
# is larger than this the ligand will be considered unbound and
# restarted in the initial state
CUTOFF_DISTANCE = 1.0 # nm
# reporting parameters
# these are the properties of the states (i.e. from OpenMM) which will
# be saved into the HDF5
SAVE_FIELDS = ('positions', 'box_vectors', 'velocities')
## INPUTS/OUTPUTS
# the inputs directory
inputs_dir = osp.realpath('input')
# the outputs path
outputs_dir = osp.realpath('_output/we')
# make the outputs dir if it doesn't exist
os.makedirs(outputs_dir, exist_ok=True)
# inputs filenames
json_top_filename = "pair.top.json"
# outputs
hdf5_filename = 'results.wepy.h5'
dashboard_filename = 'wepy.dash.org'
# normalize the output paths
hdf5_path = osp.join(outputs_dir, hdf5_filename)
dashboard_path = osp.join(outputs_dir, dashboard_filename)
## System and OpenMMRunner
# make the test system
test_sys = LennardJonesPair()
# make the integrator
integrator = omm.LangevinIntegrator(TEMPERATURE, FRICTION_COEFFICIENT, STEP_SIZE)
# make a context and set the positions
context = omm.Context(test_sys.system, copy(integrator))
context.setPositions(test_sys.positions)
# get the data from this context so we have a state to start the
# simulation with
get_state_kwargs = dict(GET_STATE_KWARG_DEFAULTS)
init_sim_state = context.getState(**get_state_kwargs)
init_state = OpenMMState(init_sim_state)
# initialize the runner
runner = OpenMMRunner(test_sys.system, test_sys.topology, integrator, platform=PLATFORM)
## Distance Metric
# we define a simple distance metric for this system, assuming the
# positions are in a 'positions' field
class PairDistance(Distance):
def __init__(self, metric=euclidean):
self.metric = metric
def image(self, state):
return state['positions']
def image_distance(self, image_a, image_b):
dist_a = self.metric(image_a[0], image_a[1])
dist_b = self.metric(image_b[0], image_b[1])
return np.abs(dist_a - dist_b)
# make a distance object which can be used to compute the distance
# between two walkers, for our scorer class
distance = PairDistance()
## Resampler
resampler = WExploreResampler(distance=distance,
init_state=init_state,
max_region_sizes=MAX_REGION_SIZES,
max_n_regions=MAX_N_REGIONS,
pmin=PMIN, pmax=PMAX)
## Boundary Conditions
# the mdtraj here is needed for the distance function
mdtraj_topology = mdj.Topology.from_openmm(test_sys.topology)
json_str_top = mdtraj_to_json_topology(mdtraj_topology)
# initialize the unbinding boundary conditions
ubc = UnbindingBC(cutoff_distance=CUTOFF_DISTANCE,
initial_state=init_state,
topology=json_str_top,
ligand_idxs=np.array(test_sys.ligand_indices),
receptor_idxs=np.array(test_sys.receptor_indices))
## Reporters
# make a dictionary of units for adding to the HDF5
units = dict(UNIT_NAMES)
# open it in truncate mode first, then switch after first run
hdf5_reporter = WepyHDF5Reporter(file_path=hdf5_path, mode='w',
save_fields=SAVE_FIELDS,
resampler=resampler,
boundary_conditions=ubc,
topology=json_str_top,
units=units)
wexplore_dash = WExploreDashboardSection(resampler=resampler)
openmm_dash = OpenMMRunnerDashboardSection(runner=runner,
step_time=STEP_SIZE)
ubc_dash = ReceptorBCDashboardSection(bc=ubc)
dashboard_reporter = DashboardReporter(
file_path=dashboard_path,
mode='w',
resampler_dash=wexplore_dash,
runner_dash=openmm_dash,
bc_dash=ubc_dash,
)
reporters = [hdf5_reporter, dashboard_reporter]
## Work Mapper
# a simple work mapper
mapper = Mapper()
## Run the simulation
if __name__ == "__main__":
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
print("arguments: n_cycles, n_steps, n_walkers")
else:
n_cycles = int(sys.argv[1])
n_steps = int(sys.argv[2])
n_walkers = int(sys.argv[3])
print("Number of steps: {}".format(n_steps))
print("Number of cycles: {}".format(n_cycles))
# create the initial walkers
init_weight = 1.0 / n_walkers
init_walkers = [Walker(OpenMMState(init_sim_state), init_weight) for i in range(n_walkers)]
# initialize the simulation manager
sim_manager = Manager(init_walkers,
runner=runner,
resampler=resampler,
boundary_conditions=ubc,
work_mapper=mapper,
reporters=reporters)
# make a number of steps for each cycle. In principle it could be
# different each cycle
steps = [n_steps for i in range(n_cycles)]
# actually run the simulation
print("Starting run")
sim_manager.run_simulation(n_cycles, steps)
print("Finished run")
| ADicksonLab/wepy | info/examples/Lennard_Jones_Pair/source/we.py | we.py | py | 7,753 | python | en | code | 44 | github-code | 36 | [
{
"api_name": "simtk.unit.kelvin",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "simtk.unit",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "simtk.unit.picosecond",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "simtk.un... |
40461953299 | import os
import gspread
from oauth2client.service_account import ServiceAccountCredentials as SAC
from linebot import LineBotApi, WebhookParser
from linebot.models import MessageEvent, TextMessage, ImageSendMessage, URITemplateAction, TextSendMessage, TemplateSendMessage, ButtonsTemplate, MessageTemplateAction, CarouselColumn, CarouselContainer, CarouselTemplate, ImageCarouselColumn, ConfirmTemplate, MessageAction, ImageCarouselTemplate, LocationSendMessage
import requests
import bs4
import time
channel_access_token = "d7LmOXC9iutUkR//iJHfWy55z5oWmOkz2/Nx1FM34Y5LVfbTdwe+n/cCjcARAax+CD1oRkPGMKAWTKdi2P84uJSq5RBLYs4P4nwdMc9SkTtXK2oiKMyp6ch5XFsRFSOGNlOyH3iId6EPkKJnugb3+QdB04t89/1O/w1cDnyilFU=/bfDiSkEsrHaYtAS/fKH6vi9aMnwsM08hZJmg/xwPJVD="
styles = {'不限': "", '現代風' : "Modern", '簡約風':"Simplicity",
'飯店風':"Hotel", '奢華風':"Luxury", '休閒風':"Leisure",
'鄉村風':"Rustic", '混搭風':"Mashup", '日式':"Japanese",
'LOFT':"Industrial", '前衛風':"AvantGarde"}
styles_ind = ['不限', '現代風', '簡約風', '飯店風', '奢華風', '休閒風',
'鄉村風', '混搭風', '日式', 'LOFT', '前衛風']
def send_text_message(reply_token, text):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token, TextSendMessage(text=text))
return "OK"
def show_search_style_or_category(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url='https://i.imgur.com/GVWVqOO.png',
title='請選擇要查詢的風格或類別',
text=' ',
actions=[
MessageTemplateAction(
label='風格',
text='search'
),
MessageTemplateAction(
label="類別",
text="category"
)
]
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_category(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Image Carousel template',
template=ImageCarouselTemplate(
columns=[
ImageCarouselColumn(
image_url='https://i.imgur.com/EsGdyZi.png',
action=URITemplateAction(
label='大會議室',
uri='https://officesnapshots.com/photos/?filter_meeting-spaces=large-meeting-room'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/lpdriMH.png',
action=URITemplateAction(
label='小會議室',
uri='https://officesnapshots.com/photos/?filter_meeting-spaces=small-meeting-room'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/o8sd5bc.png',
action=URITemplateAction(
label='開放式工作站',
uri='https://officesnapshots.com/photos/?filter_work-spaces=open-office'
)
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_search(reply_token):
line_bot_api = LineBotApi(channel_access_token)
prefer = "請輸入想要的風格或編號:\n"
ind = 1
for key, value in styles.items():
if(ind != len(styles)):
prefer += f"{ind:2d}: {key}\n"
else:
prefer += f"{ind:2d}: {key}"
ind += 1
line_bot_api.reply_message(reply_token, TextSendMessage(text=prefer))
def show_start_search(reply_token, text):
line_bot_api = LineBotApi(channel_access_token)
url_end = '&pattern=Office&page=1'
if(text.isdigit() and (int(text) > 0 and int(text) <= len(styles_ind))):
url = 'https://decotv.com.tw/gallery?works=' + styles[styles_ind[int(text)-1]] + url_end
elif(not text.isdigit() and text in styles.keys()):
url = 'https://decotv.com.tw/gallery?works=' + styles[text] + url_end
else:
line_bot_api.reply_message(reply_token,TextSendMessage(text="輸入錯誤,請重新輸入"))
# , headers={"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36"}
response = requests.get(url)
html = bs4.BeautifulSoup(response.text, 'html.parser')
links = html.find_all("img", {"class": "bgimg"})
links2 = html.find_all("div", {"class": "frameitemin caseclicknew btn"})
time.sleep(3)
popup_url_link = []
img_urls = []
popup_url = 'https://decotv.com.tw/works_thisid,'
popup_url_mid = '_thispic,'
links_length = 5 if len(links) > 5 else len(links)
for i in range(links_length):
img_urls.append('https://decotv.com.tw/' + links[i]['src'])
popup_url_link.append(popup_url + links2[i].get("data-id") + popup_url_mid + links2[i].get("data-pic"))
imagecarouselcolumns = []
for i in range(links_length):
imagecarouselcolumns.append(
ImageCarouselColumn(
image_url=img_urls[i],
action=URITemplateAction(label=str(i+1), uri=popup_url_link[i])))
if(len(links) > 5):
imagecarouselcolumns.append(
ImageCarouselColumn(
image_url="https://i.imgur.com/4CIrAa9.png",
action=URITemplateAction(label="查看更多", uri=url)))
imagecarouselcolumns.append(
ImageCarouselColumn(
image_url="https://i.imgur.com/PoXcTmZ.png",
action=MessageTemplateAction(label="重新查詢", text="重新查詢")))
imagecarouselcolumns.append(
ImageCarouselColumn(
image_url='https://i.imgur.com/iuDuTbt.png?1',
action=MessageTemplateAction(label='返回主選單', text='main menu')))
Carousel_template = TemplateSendMessage(
alt_text='Image Carousel template',
template=ImageCarouselTemplate(
columns=imagecarouselcolumns))
line_bot_api.reply_message(reply_token,Carousel_template)
def show_enter_menu(reply_token):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token, TextSendMessage(text="請從選單中選擇您要的服務項目"))
def show_main_menu(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url='https://i.imgur.com/dXfoAvK.jpg',
title='主選單',
text='請選擇想要的服務項目',
actions=[
MessageTemplateAction(
label='查詢家具目錄&圖片',
text='contents and images'
),
MessageTemplateAction(
label='保養方法',
text="maintenance method"
),
MessageTemplateAction(
label='聯絡我們',
text="contact us"
)
]
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_maintenance_method(reply_token):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token,ImageSendMessage(
original_content_url='https://i.imgur.com/ITshKAM.png',
preview_image_url='https://i.imgur.com/ITshKAM.png'))
def show_FSM(reply_token):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token,ImageSendMessage(
original_content_url="https://i.imgur.com/rR8CR8W.png",
preview_image_url="https://i.imgur.com/rR8CR8W.png"))
def show_contact_us(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url='https://i.imgur.com/XjDtpGl.png',
title='聯絡我們',
text=' ',
actions=[
MessageTemplateAction(
label='地址',
text='address'
),
MessageTemplateAction(
label="聯絡電話",
text="contact number"
)
]
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_address(reply_token):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token, LocationSendMessage(title="地址", address="台北市內湖區成功路四段188號14樓之11", latitude=25.08414356007363, longitude=121.59439182744914))
def show_contact_number(reply_token):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token, TextSendMessage(text="聯絡電話:(02)2794-2268"))
def show_search_contents_and_images(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url='https://i.imgur.com/WSh5g9U.jpg',
title='家具目錄&圖片',
text='請選擇想要的家具目錄或圖片',
actions=[
MessageTemplateAction(
label='目錄',
text='contents'
),
MessageTemplateAction(
label='辦公桌',
text='office tables'
),
MessageTemplateAction(
label='辦公椅&沙發',
text='office chairs and sofas'
)
]
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_office_chairs(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Image Carousel template',
template=ImageCarouselTemplate(
columns=[
ImageCarouselColumn(
image_url='https://i.imgur.com/vA4AV0k.jpg',
action=URITemplateAction(
label='辦公椅(SD)',
uri="https://drive.google.com/drive/folders/1zb0oE92j4H7nwSjnREH1qO9gctXaAO45"
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/hKw6Hfw.jpg',
action=URITemplateAction(
label='造型椅(LG)',
uri='https://drive.google.com/drive/folders/1VvAbvKri-wz1mswbvyQ4eB-uv6jhdSP1'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/Calzg5r.png',
action= URITemplateAction(
label='沙發',
uri='https://drive.google.com/drive/folders/12kw46rchbYTRjybj2BH0pGglCM8t5KnE'
)
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_office_tables(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Image Carousel template',
template=ImageCarouselTemplate(
columns=[
ImageCarouselColumn(
image_url='https://i.imgur.com/C1qXu2a.jpg',
action=URITemplateAction(
label='獨立桌(LG)',
uri='https://drive.google.com/drive/folders/1PohqcyoW1TPUdDfVoGP8Tu0bKorexUtp'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/emrkJns.jpg',
action=URITemplateAction(
label='獨立桌(KT)',
uri="https://drive.google.com/drive/folders/1_ds45brlPQq5WK5cyIwZsgAe_GSFVcQQ"
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/pQFaPYe.jpg',
action=URITemplateAction(
label='獨立桌(OS)',
uri='https://drive.google.com/drive/folders/1KcZU87EBVlUiShEQhuvQS0Fa-LNnyxQ4'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/Kp9l3J9.jpg',
action=URITemplateAction(
label='升降桌(LG)',
uri="https://drive.google.com/drive/folders/1iTJy6aX9tVHDeJrVZ6mjQZJW7WgoEdZe"
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/5ok0DxE.jpg',
action=URITemplateAction(
label='主管桌(DS)',
uri="https://drive.google.com/drive/folders/1Zp0vS6zQdBHcKK2ReqP6lHOZQ0hJ3jxl"
)
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_contents(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Image Carousel template',
template=ImageCarouselTemplate(
columns=[
ImageCarouselColumn(
image_url='https://i.imgur.com/cAuLdQJ.png',
action=URITemplateAction(
label='主管桌目錄',
uri='https://drive.google.com/drive/folders/1JKvSRxe4ynifQpUz0yLxu9bJSGAzC5xt'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/O0Fe27J.png',
action=URITemplateAction(
label='家具綜合目錄',
uri="https://drive.google.com/drive/folders/1-0X1bsMc8DVgJxMlvSrrsX0P8NIqVrLl"
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/V3FdBxP.png',
action=URITemplateAction(
label='獨立桌目錄',
uri='https://drive.google.com/drive/folders/1DFXEHQA9nILGK9TCCW2pW4bTHAAk3Me5'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/W1UMAsj.png',
action=URITemplateAction(
label='辦公椅目錄',
uri="https://drive.google.com/drive/folders/11tUf4GMAW2jtIEdQmnPT25gaiTuJtRSo"
)
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template) | JasperLin0118/linebot | utils.py | utils.py | py | 15,459 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "linebot.LineBotApi",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "linebot.models.TextSendMessage",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "linebot.LineBotApi",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "... |
9194727466 | import unittest
import torch
import lightly
class TestNestedImports(unittest.TestCase):
def test_nested_imports(self):
# active learning
lightly.active_learning.agents.agent.ActiveLearningAgent
lightly.active_learning.agents.ActiveLearningAgent
lightly.active_learning.config.sampler_config.SamplerConfig
lightly.active_learning.config.SamplerConfig
lightly.active_learning.scorers.classification.ScorerClassification
lightly.active_learning.scorers.ScorerClassification
lightly.active_learning.scorers.detection.ScorerObjectDetection
lightly.active_learning.scorers.ScorerObjectDetection
lightly.active_learning.utils.bounding_box.BoundingBox
lightly.active_learning.utils.BoundingBox
lightly.active_learning.utils.object_detection_output.ObjectDetectionOutput
lightly.active_learning.utils.ObjectDetectionOutput
# api imports
lightly.api.api_workflow_client.ApiWorkflowClient
lightly.api.ApiWorkflowClient
lightly.api.bitmask.BitMask
# data imports
lightly.data.LightlyDataset
lightly.data.dataset.LightlyDataset
lightly.data.BaseCollateFunction
lightly.data.collate.BaseCollateFunction
lightly.data.ImageCollateFunction
lightly.data.collate.ImageCollateFunction
lightly.data.MoCoCollateFunction
lightly.data.collate.MoCoCollateFunction
lightly.data.SimCLRCollateFunction
lightly.data.collate.SimCLRCollateFunction
lightly.data.imagenet_normalize
lightly.data.collate.imagenet_normalize
# embedding imports
lightly.embedding.BaseEmbedding
lightly.embedding._base.BaseEmbedding
lightly.embedding.SelfSupervisedEmbedding
lightly.embedding.embedding.SelfSupervisedEmbedding
# loss imports
lightly.loss.NTXentLoss
lightly.loss.ntx_ent_loss.NTXentLoss
lightly.loss.SymNegCosineSimilarityLoss
lightly.loss.sym_neg_cos_sim_loss.SymNegCosineSimilarityLoss
lightly.loss.memory_bank.MemoryBankModule
lightly.loss.regularizer.CO2Regularizer
lightly.loss.regularizer.co2.CO2Regularizer
# models imports
lightly.models.ResNetGenerator
lightly.models.resnet.ResNetGenerator
lightly.models.SimCLR
lightly.models.simclr.SimCLR
lightly.models.MoCo
lightly.models.moco.MoCo
lightly.models.SimSiam
lightly.models.simsiam.SimSiam
lightly.models.ZOO
lightly.models.zoo.ZOO
lightly.models.checkpoints
lightly.models.zoo.checkpoints
lightly.models.batchnorm.get_norm_layer
# transforms imports
lightly.transforms.GaussianBlur
lightly.transforms.gaussian_blur.GaussianBlur
lightly.transforms.RandomRotate
lightly.transforms.rotation.RandomRotate
# utils imports
lightly.utils.save_embeddings
lightly.utils.io.save_embeddings
lightly.utils.load_embeddings
lightly.utils.io.load_embeddings
lightly.utils.load_embeddings_as_dict
lightly.utils.io.load_embeddings_as_dict
lightly.utils.fit_pca
lightly.utils.embeddings_2d.fit_pca
# core imports
lightly.train_model_and_embed_images
lightly.core.train_model_and_embed_images
lightly.train_embedding_model
lightly.core.train_embedding_model
lightly.embed_images
lightly.core.embed_images | tibe97/thesis-self-supervised-learning | tests/imports/test_nested_imports.py | test_nested_imports.py | py | 3,548 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "lightly.active_learning",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "lightly.active_learning",
"line_number": 12,
"usage_type": "attribute"
},
{
"a... |
74328503143 | import logging
import pandas as pd
from openfisca_ceq.tools.data import config_parser, year_by_country
from openfisca_ceq.tools.data_ceq_correspondence import (
ceq_input_by_harmonized_variable,
ceq_intermediate_by_harmonized_variable,
data_by_model_weight_variable,
model_by_data_id_variable,
model_by_data_role_index_variable,
non_ceq_input_by_harmonized_variable,
variables_by_entity,
)
log = logging.getLogger(__name__)
missing_revenus_by_country = {
'cote_d_ivoire': [
# 'rev_i_independants',
],
'mali': [
'cov_i_type_ecole',
# 'rev_i_independants_taxe',
# 'rev_i_independants_Ntaxe',
'rev_i_locatifs',
'rev_i_autres_revenus_capital',
'rev_i_pensions',
'rev_i_transferts_publics',
],
'senegal': [
# 'rev_i_independants',
],
}
def build_income_dataframes(country):
year = year_by_country[country]
income_data_path = config_parser.get(country, 'revenus_harmonises_{}'.format(year))
model_variable_by_person_variable = dict()
variables = [
ceq_input_by_harmonized_variable,
ceq_intermediate_by_harmonized_variable,
model_by_data_id_variable,
non_ceq_input_by_harmonized_variable,
]
for item in variables:
model_variable_by_person_variable.update(item)
income = pd.read_stata(income_data_path)
for variable in income.columns:
if variable.startswith("rev"):
assert income[variable].notnull().any(), "{} income variable for {} is all null".format(
variable, country)
assert (
set(model_variable_by_person_variable.keys()).difference(
set(missing_revenus_by_country.get(country, []))
)
<= set(income.columns)
), \
"Missing {} in {} income data source".format(
set(model_variable_by_person_variable.keys()).difference(
set(missing_revenus_by_country.get(country, []))
).difference(set(income.columns)),
country,
)
data_by_model_id_variable = {v: k for k, v in model_by_data_id_variable.items()}
data_by_model_role_index_variable = {v: k for k, v in model_by_data_role_index_variable.items()}
dataframe_by_entity = dict()
for entity, variables in variables_by_entity.items():
data_entity_id = data_by_model_id_variable["{}_id".format(entity)]
data_entity_weight = data_by_model_weight_variable["person_weight"]
filtered_variables = list(
set(variables).difference(
set(missing_revenus_by_country.get(country, [])))
)
data_group_entity_ids = list()
data_group_entity_role_index = list()
if entity == 'person':
for group_entity in variables_by_entity.keys():
if group_entity == 'person':
continue
data_group_entity_ids += [data_by_model_id_variable["{}_id".format(group_entity)]]
data_group_entity_role_index += [data_by_model_role_index_variable["{}_role_index".format(group_entity)]]
dataframe = income[
filtered_variables
+ [
data_entity_id,
data_entity_weight,
]
+ data_group_entity_ids
+ data_group_entity_role_index
].copy()
if entity != 'person':
person_weight_variable = data_by_model_weight_variable["person_weight"]
group_id_variable = data_by_model_id_variable["{}_id".format(group_entity)]
household_weight = dataframe.groupby(group_id_variable)[person_weight_variable].mean()
weight_by_group_ok = dataframe.groupby(group_id_variable)[person_weight_variable].nunique() == 1
problematic_group_id = weight_by_group_ok.reset_index().query(
"~{}".format(person_weight_variable)
)[group_id_variable].tolist()
assert weight_by_group_ok.all(), "Problematic weights:\n{}".format(
dataframe.loc[dataframe[group_id_variable].isin(problematic_group_id)]
)
dataframe = dataframe.groupby(data_by_model_id_variable["{}_id".format(group_entity)]).sum()
del dataframe[data_by_model_weight_variable["person_weight"]]
dataframe['household_weight'] = household_weight.values
dataframe = dataframe.reset_index()
dataframe_by_entity[entity] = dataframe
log.info("For country {}: {} persons and {} households".format(
country, len(dataframe_by_entity["person"]), len(dataframe_by_entity["household"])
))
assert len(dataframe_by_entity["person"]) == dataframe_by_entity["person"].pers_id.nunique()
assert len(dataframe_by_entity["household"]) == dataframe_by_entity["person"].hh_id.nunique()
return dataframe_by_entity["person"], dataframe_by_entity["household"]
if __name__ == "__main__":
# for country in year_by_country.keys():
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
country = "senegal"
person_dataframe, household_dataframe = build_income_dataframes(country)
| openfisca/openfisca-ceq | openfisca_ceq/tools/data/income_loader.py | income_loader.py | py | 5,243 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "openfisca_ceq.tools.data.year_by_country",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "openfisca_ceq.tools.data.config_parser.get",
"line_number": 43,
"usage_type": ... |
18252810201 | from functools import lru_cache
from typing import List
class Solution:
def maxCoins(self, nums: List[int]) -> int:
@lru_cache(None)
def dfs(l, r):
if l > r:
return 0
# if (l, r) in dic:
# return dic[(l, r)]
# dic[(l, r)] = 0
res = 0
for i in range(l, r+1):
cur = nums[l-1] * nums[i] * nums[r+1]
cur += dfs(l, i-1) + dfs(i+1, r)
# dic[(l, r)] = max(cur, dic[(l, r)])
res = max(cur, res)
return res
nums = [1] + nums + [1]
# dic = {}
return dfs(1, len(nums)-2)
solution = Solution()
assert solution.maxCoins([3,1,5,8]) == 167, "Should be 167"
| hujienan/Jet-Algorithm | leetcode/312. Burst Balloons/index.py | index.py | py | 797 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "functools.lru_cache",
"line_number": 7,
"usage_type": "call"
}
] |
34181109892 | #tempurature range is 4-95 degrees C
#we use a GEN2 tempurature module
#tolerance on read about 5 degrees for heating (at least with IR thermometer)
# can hold steady at a tempurature really well though would need to test more at differing tempurature
#idling tempurature 55 C can hold within .5 C of tempurature since readout is in integers
#if using capsules/containers for the temp module to use it easier set module to opentrons_24_aluminumblock_nest_1.5ml_snapcap or something similar
#lab has both aluminumblock 24 well and 96 well plates
temp=4
from opentrons import protocol_api
metadata = {
'protocolName': 'Temp_module_test_cooling',
'author': 'parke',
'description':'protocol to run temp_module',
'apiLevel':'2.10'
}
def run(protocol: protocol_api.ProtocolContext):
temp_mod = protocol.load_module('temperature module gen2', '4')
plate = temp_mod.load_labware('corning_96_wellplate_360ul_flat')
temp_mod.set_temperature(celsius=temp)
temp_mod.status # 'holding at target'
temp_mod.deactivate()
temp_mod.status # 'idle'
| MyersResearchGroup/OpenTrons_OT2_Protocols | temperature_module/tempurature_module.py | tempurature_module.py | py | 1,122 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "opentrons.protocol_api.ProtocolContext",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "opentrons.protocol_api",
"line_number": 22,
"usage_type": "name"
}
] |
14551270733 | from io import StringIO
from itertools import chain
from typing import Any, Dict, List, Union
import simplejson as json
from jubeatools import song as jbt
from jubeatools.formats.filetypes import SongFile
from jubeatools.utils import lcm
from ..tools import make_memon_dumper
from . import schema
def _long_note_tail_value_v0(note: jbt.LongNote) -> int:
dx = note.tail_tip.x - note.position.x
dy = note.tail_tip.y - note.position.y
try:
return schema.X_Y_OFFSET_TO_P_VALUE[dx, dy]
except KeyError:
raise ValueError(
f"memon cannot represent a long note with its tail starting ({dx}, {dy}) away from the note"
) from None
def _get_timing(song: jbt.Song) -> jbt.Timing:
if song.common_timing is not None:
return song.common_timing
else:
return next(
chart.timing for chart in song.charts.values() if chart.timing is not None
)
def _raise_if_unfit_for_v0(song: jbt.Song, version: str) -> None:
"""Raises an exception if the Song object is ill-formed or contains information
that cannot be represented in a memon v0.x.y file (includes legacy)"""
if song.common_timing is None and all(
chart.timing is None for chart in song.charts.values()
):
raise ValueError("The song has no timing information")
chart_timings = [
chart.timing for chart in song.charts.values() if chart.timing is not None
]
if chart_timings:
first_one = chart_timings[0]
if any(t != first_one for t in chart_timings):
raise ValueError(
f"memon:{version} cannot represent a song with per-chart timing"
)
timing = _get_timing(song)
number_of_timing_events = len(timing.events)
if number_of_timing_events != 1:
if number_of_timing_events == 0:
raise ValueError("The song has no BPM")
else:
raise ValueError(f"memon:{version} does not handle BPM changes")
event = timing.events[0]
if event.BPM <= 0:
raise ValueError(f"memon:{version} only accepts strictly positive BPMs")
if event.time != 0:
raise ValueError(f"memon:{version} only accepts a BPM on the first beat")
for difficulty, chart in song.charts.items():
if len(set(chart.notes)) != len(chart.notes):
raise ValueError(
f"{difficulty} chart has duplicate notes, these cannot be represented"
)
def _dump_to_json(memon: dict) -> bytes:
memon_fp = StringIO()
json.dump(memon, memon_fp, use_decimal=True, indent=4)
return memon_fp.getvalue().encode("utf-8")
def _compute_resolution(notes: List[Union[jbt.TapNote, jbt.LongNote]]) -> int:
return lcm(
*chain(
iter(note.time.denominator for note in notes),
iter(
note.duration.denominator
for note in notes
if isinstance(note, jbt.LongNote)
),
)
)
def _dump_memon_note_v0(
note: Union[jbt.TapNote, jbt.LongNote], resolution: int
) -> Dict[str, int]:
"""converts a note into the {n, t, l, p} form"""
memon_note = {
"n": note.position.index,
"t": note.time.numerator * (resolution // note.time.denominator),
"l": 0,
"p": 0,
}
if isinstance(note, jbt.LongNote):
memon_note["l"] = note.duration.numerator * (
resolution // note.duration.denominator
)
memon_note["p"] = _long_note_tail_value_v0(note)
return memon_note
def _dump_memon_legacy(song: jbt.Song, **kwargs: Any) -> SongFile:
_raise_if_unfit_for_v0(song, "legacy")
timing = _get_timing(song)
memon: Dict[str, Any] = {
"metadata": {
"song title": song.metadata.title,
"artist": song.metadata.artist,
"music path": str(song.metadata.audio),
"jacket path": str(song.metadata.cover),
"BPM": timing.events[0].BPM,
"offset": -timing.beat_zero_offset,
},
"data": [],
}
for difficulty, chart in song.charts.items():
resolution = _compute_resolution(chart.notes)
memon["data"].append(
{
"dif_name": difficulty,
"level": chart.level,
"resolution": resolution,
"notes": [
_dump_memon_note_v0(note, resolution)
for note in sorted(
set(chart.notes), key=lambda n: (n.time, n.position)
)
],
}
)
return SongFile(contents=_dump_to_json(memon), song=song)
dump_memon_legacy = make_memon_dumper(_dump_memon_legacy)
def _dump_memon_0_1_0(song: jbt.Song, **kwargs: Any) -> SongFile:
_raise_if_unfit_for_v0(song, "v0.1.0")
timing = _get_timing(song)
memon: Dict[str, Any] = {
"version": "0.1.0",
"metadata": {
"song title": song.metadata.title,
"artist": song.metadata.artist,
"music path": str(song.metadata.audio),
"album cover path": str(song.metadata.cover),
"BPM": timing.events[0].BPM,
"offset": -timing.beat_zero_offset,
},
"data": dict(),
}
for difficulty, chart in song.charts.items():
resolution = _compute_resolution(chart.notes)
memon["data"][difficulty] = {
"level": chart.level,
"resolution": resolution,
"notes": [
_dump_memon_note_v0(note, resolution)
for note in sorted(set(chart.notes), key=lambda n: (n.time, n.position))
],
}
return SongFile(contents=_dump_to_json(memon), song=song)
dump_memon_0_1_0 = make_memon_dumper(_dump_memon_0_1_0)
def _dump_memon_0_2_0(song: jbt.Song, **kwargs: Any) -> SongFile:
_raise_if_unfit_for_v0(song, "v0.2.0")
timing = _get_timing(song)
memon: Dict[str, Any] = {
"version": "0.2.0",
"metadata": {
"song title": song.metadata.title,
"artist": song.metadata.artist,
"music path": str(song.metadata.audio),
"album cover path": str(song.metadata.cover),
"BPM": timing.events[0].BPM,
"offset": -timing.beat_zero_offset,
},
"data": {},
}
if song.metadata.preview is not None:
memon["metadata"]["preview"] = {
"position": song.metadata.preview.start,
"length": song.metadata.preview.length,
}
for difficulty, chart in song.charts.items():
resolution = _compute_resolution(chart.notes)
memon["data"][difficulty] = {
"level": chart.level,
"resolution": resolution,
"notes": [
_dump_memon_note_v0(note, resolution)
for note in sorted(set(chart.notes), key=lambda n: (n.time, n.position))
],
}
return SongFile(contents=_dump_to_json(memon), song=song)
dump_memon_0_2_0 = make_memon_dumper(_dump_memon_0_2_0)
def _dump_memon_0_3_0(song: jbt.Song, **kwargs: Any) -> SongFile:
_raise_if_unfit_for_v0(song, "v0.3.0")
timing = _get_timing(song)
memon: Dict[str, Any] = {
"version": "0.3.0",
"metadata": {
"song title": song.metadata.title,
"artist": song.metadata.artist,
"BPM": timing.events[0].BPM,
"offset": -timing.beat_zero_offset,
},
"data": {},
}
if song.metadata.audio is not None:
memon["metadata"]["music path"] = str(song.metadata.audio)
if song.metadata.cover is not None:
memon["metadata"]["album cover path"] = str(song.metadata.cover)
if song.metadata.preview is not None:
memon["metadata"]["preview"] = {
"position": song.metadata.preview.start,
"length": song.metadata.preview.length,
}
if song.metadata.preview_file is not None:
memon["metadata"]["preview path"] = str(song.metadata.preview_file)
for difficulty, chart in song.charts.items():
resolution = _compute_resolution(chart.notes)
memon["data"][difficulty] = {
"level": chart.level,
"resolution": resolution,
"notes": [
_dump_memon_note_v0(note, resolution)
for note in sorted(set(chart.notes), key=lambda n: (n.time, n.position))
],
}
return SongFile(contents=_dump_to_json(memon), song=song)
dump_memon_0_3_0 = make_memon_dumper(_dump_memon_0_3_0)
| Stepland/jubeatools | jubeatools/formats/memon/v0/dump.py | dump.py | py | 8,590 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "jubeatools.song.LongNote",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "jubeatools.song",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "jubeatools.song.Song",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name"... |
35609459703 | import streamlit as st
import cv2
import time
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from streamlit_lottie import st_lottie
# Initialize the parameters
confThreshold = 0.2 #Confidence threshold
nmsThreshold = 0.4 #Non-maximum suppression threshold
inpWidth = 416 #Width of network's input image
inpHeight = 416 #Height of network's input image
def obj_detection(my_img):
st.set_option('deprecation.showPyplotGlobalUse', False)
column1, column2 = st.beta_columns(2)
column1.subheader("Input image")
st.text("")
# plt.figure(figsize=(16, 16))
# plt.imshow(my_img)
# original = Image.open(image)
#col1.header("Original")
if my_img.mode != 'RGB':
my_img = my_img.convert('RGB')
column1.image(my_img, use_column_width=True)
# column1.pyplot(use_column_width=True)
# YOLO model : # load the YOLO network
# net = cv2.dnn.readNet("yolov3_training_last.weights","yolov3_testing.cfg")
# net = cv2.dnn.readNetFromDarknet("yolov4-custom.cfg","yolov4-custom_best.weights" )
net = cv2.dnn.readNet('yolov4-custom_best.weights', 'yolov4-custom.cfg')
# labels = []
# with open("classes.txt", "r") as f:
# labels = [line.strip() for line in f.readlines()]
# loading all the class labels (objects)
classes = []
with open("classes.txt", "r") as f:
classes = f.read().splitlines()
# names_of_layer = net.getLayerNames()
# output_layers = [names_of_layer[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# generating colors for each object for later plotting
font = cv2.FONT_HERSHEY_PLAIN
colors = np.random.uniform(0, 255, size=(100, 3))
# colors = np.random.uniform(0, 255, size=(len(classes), 3))
print("Colors:", colors)
# Image loading
newImage = np.array(my_img.convert('RGB'))
img = cv2.cvtColor(newImage, 1)
height, width, channels = img.shape
# Objects detection (Converting into blobs)
# (image, scalefactor, size, mean(mean subtraction from each layer), swapRB(Blue to red), crop)
# blob = cv2.dnn.blobFromImage(img, 0.00392, (inpWidth, inpHeight), (0, 0, 0), True,
# crop=False)
blob = cv2.dnn.blobFromImage(img, 1/255, (416, 416), (0,0,0), swapRB=True, crop=False)
# sets the blob as the input of the network
net.setInput(blob)
# outputs = net.forward(output_layers)
output_layers_names = net.getUnconnectedOutLayersNames()
# layerOutputs = net.forward(output_layers_names)
# get all the layer names
# ln = net.getLayerNames()
# ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# names_of_layer = net.getLayerNames()
# output_layers = [names_of_layer[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# feed forward (inference) and get the network output
# measure how much it took in seconds
# start = time.perf_counter()
# outputs = net.forward(output_layers)
outputs = net.forward(output_layers_names)
# time_took = time.perf_counter() - start
# print(f"Time took: {time_took:.2f}s")
# The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
infLabel = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
# cv2.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
classID = []
confidences = []
boxes = []
# SHOWING INFORMATION CONTAINED IN 'outputs' VARIABLE ON THE SCREEN
# loop over each of the layer outputs
for op in outputs:
for detection in op:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.2:
# OBJECT DETECTED
# Get the coordinates of object: center,width,height
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width) # width is the original width of image
h = int(detection[3] * height) # height is the original height of the image
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
# RECTANGLE COORDINATES
x = int(center_x - w / 2) # Top-Left x
y = int(center_y - h / 2) # Top-left y
# To organize the objects in array so that we can extract them later
boxes.append([x, y, w, h])
confidences.append(float(confidence))
classID.append(class_id)
# score_threshold = st.sidebar.slider("Confidence_threshold", 0.00, 1.00, 0.5, 0.01)
# nms_threshold = st.sidebar.slider("NMS_threshold", 0.00, 1.00, 0.4, 0.01)
score_threshold = 0.2
st.sidebar.info(f"Confidence_threshold:{ score_threshold }")
nms_threshold = 0.4
st.sidebar.info(f"NMS_threshold :{nms_threshold} ")
st.sidebar.success(infLabel)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold, nms_threshold)
print("DNN Index:", indexes)
font = cv2.FONT_HERSHEY_SIMPLEX
items = []
for i in range(len(boxes)):
if i in indexes.flatten():
x, y, w, h = boxes[i]
# To get the name of object
label = str.upper((classes[classID[i]]))
# label = str(classes[class_ids[i]])
confidence = str(round(confidences[i], 2))
print("value of i:", i)
color = colors[i]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 3)
cv2.putText(img, label + " " + confidence, (x, y + 10), font, 0.25, (0, 0, 255), 1)
items.append(label)
st.text("")
st.spinner('Model working....')
column2.subheader("Output image")
st.text("")
# plt.figure(figsize=(15, 15))
# plt.imshow(img)
# column2.pyplot(use_column_width=True)
column2.image(img, use_column_width=True)
if len(indexes) > 1:
st.success("Found {} Objects - {}".format(len(indexes), [item for item in set(items)]))
st.balloons()
elif len(indexes) == 1:
st.success("Found {} Object - {}".format(len(indexes), [item for item in set(items)]))
st.balloons()
else:
st.warning("Found {} Object - {}".format(len(indexes), [item for item in set(items)]))
# with open("custom.css") as f:
# st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
st.title('Welcome to Vehicle Classification App')
st.markdown("Welcome to this Computer Vision application that classifies Vehicles on Indian Highways. The Vehicles are classified into Seven different classes namely: Bus, Truck, Car, Jeep,Van,miniTruck and Lorry.It will find Person too if present.")
def main():
st.write(
"You can view Vehicle Classification using YOLO model here. Select one of the following options to proceed:")
choice = st.radio("", ("Default", "Choose an image of your choice"))
# st.write()
if choice == "Choose an image of your choice":
# st.set_option('deprecation.showfileUploaderEncoding', False)
image_file = st.file_uploader("Upload", type=['jpg', 'png', 'jpeg'])
if image_file is not None:
my_img = Image.open(image_file)
obj_detection(my_img)
elif choice == "Default":
my_img = Image.open("v999.jpg")
obj_detection(my_img)
if __name__ == '__main__':
main()
| nlkkumar/vehicle-class-yolov4 | nlk-vehi-class-classification.py | nlk-vehi-class-classification.py | py | 7,582 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "streamlit.set_option",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streamlit.beta_columns",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "streamlit.text",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.re... |
30452253568 | import pandas as pd
import requests
from bs4 import BeautifulSoup
u = 'https://www.amazon.in/OnePlus-Nord-Gray-128GB-Storage/product-reviews/B08695ZSP6/ref=cm_cr_arp_d_paging_btm_next_2?ie=UTF8&reviewerType=all_reviews&pageNumber='
def amazon(link,Number_of_pages):
r={}
allreview1 = pd.DataFrame(r)
name=[]
body = []
star_rating = []
review = []
urls=[]
for i in range(1,Number_of_pages+1):
i=str(i)
a=u+i
urls.append(a)
for i in urls:
data = requests.get(i)
data
data.content
soup = BeautifulSoup(data.content,'html.parser')
soup.title
# if any tag i.e.div, a,span,i,etc has class use it in soup.findAll
# div is not compulsory always
#if span doec not has itemprop use parent class (could be a, div, span,i, etc)
name1 = soup.findAll('div', class_=['a-profile-content'])
ct=0
for i in name1:
if ct>=2:
name.append(i.find('span', class_=['a-profile-name']).text)
ct+=1
title1 = soup.findAll('a', attrs={'data-hook' : 'review-title'}, class_=['a-size-base a-link-normal review-title a-color-base review-title-content a-text-bold'])
for i in title1:
review.append(i.find('span').text)
rating = soup.findAll('i', attrs={'data-hook' : 'review-star-rating'})
for i in rating:
star_rating.append(i.find('span', class_=['a-icon-alt']).text)
body1 = soup.findAll('span', attrs={'data-hook' : 'review-body'},class_=['a-size-base review-text review-text-content'])
for i in body1:
body.append(i.find('span').text)
allreview1['name'] = name
allreview1['review'] = review
allreview1['star_rating'] = star_rating
allreview1['body'] = body
allreview1.to_csv(r'C:\...\allreview1.csv')
amazon(u,3)
| SHRIKAR5/Amazon-review-webscraping | amazon_review-webscraping.py | amazon_review-webscraping.py | py | 2,060 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 25,
"usage_type": "call"
}
] |
33521363177 | import os, re, importlib.util
import LogManager
from Module import Module
from ModuleThreadHandler import ModuleThreadHandler, ThreadTask
class ModuleRunner:
def __init__(self, parent):
self.parent = parent
self.logger = LogManager.create_logger('MODULES')
self.closing = False
self.thread_handler = ModuleThreadHandler(self.logger)
self.logger.info('Initialising modules...')
self.get_modules()
self.logger.info(f"Currently running {len(self.modules)} module{'s' if len(self.modules) != 1 else ''}...")
def close(self):
self.thread_handler.close()
self.closing = True
def get_modules(self):
self.modules = []
self.bad_modules = []
# Get possible files that could contain a module
module_filenames = []
x = '/' if os.path.sep == '/' else r'\\'
pattern = re.compile(f'^.{x}modules{x}[^{x}]+$')
for subdir, _, files in os.walk(os.path.join(os.curdir, 'modules')):
if pattern.match(subdir):
if 'main.py' in files:
module_filenames.append(os.path.join(subdir, 'main.py'))
else:
self.bad_modules.append((subdir.split(os.path.sep)[-1], 'No main.py file'))
# Go through files and try to import a class
for filename in module_filenames:
spec = importlib.util.spec_from_file_location(filename[2:-3].replace(os.path.sep, '.'), filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if not hasattr(module, 'export'):
self.bad_modules.append((filename.split(os.path.sep)[-2], 'No exported class in main.py'))
elif not Module.is_valid(module.export):
self.bad_modules.append((filename.split(os.path.sep)[-2], 'Exported module is invalid'))
else:
self.init_module(module.export)
for module_name, reason in self.bad_modules:
self.logger.warning(f'Installed module `{module_name}` cannot be loaded: {reason}')
def run_modules(self, minutes_past):
for module in [m for m in self.modules if m.should_occur(minutes_past)]:
self.run_module(module)
def run_module(self, module):
self.thread_handler.add_task(module, ThreadTask.RUN)
def init_module(self, base_class):
module = Module(self, base_class)
self.thread_handler.add_task(module, ThreadTask.INIT, -1)
self.modules.append(module)
| gregormaclaine/AutoHome | ModuleRunner.py | ModuleRunner.py | py | 2,346 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "LogManager.create_logger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "ModuleThreadHandler.ModuleThreadHandler",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api... |
43114992788 | """
The codes are heavily borrowed from NeuS
"""
import os
import cv2 as cv
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import logging
import mcubes
from icecream import ic
from models.render_utils import sample_pdf
from models.projector import Projector
from tsparse.torchsparse_utils import sparse_to_dense_channel
from models.fast_renderer import FastRenderer
from models.patch_projector import PatchProjector
class SparseNeuSRenderer(nn.Module):
"""
conditional neus render;
optimize on normalized world space;
warped by nn.Module to support DataParallel traning
"""
def __init__(self,
rendering_network_outside,
sdf_network,
variance_network,
rendering_network,
n_samples,
n_importance,
n_outside,
perturb,
alpha_type='div',
conf=None
):
super(SparseNeuSRenderer, self).__init__()
self.conf = conf
self.base_exp_dir = conf['general.base_exp_dir']
# network setups
self.rendering_network_outside = rendering_network_outside
self.sdf_network = sdf_network
self.variance_network = variance_network
self.rendering_network = rendering_network
self.n_samples = n_samples
self.n_importance = n_importance
self.n_outside = n_outside
self.perturb = perturb
self.alpha_type = alpha_type
self.rendering_projector = Projector() # used to obtain features for generalized rendering
self.h_patch_size = self.conf.get_int('model.h_patch_size', default=3)
self.patch_projector = PatchProjector(self.h_patch_size)
self.ray_tracer = FastRenderer() # ray_tracer to extract depth maps from sdf_volume
# - fitted rendering or general rendering
try:
self.if_fitted_rendering = self.sdf_network.if_fitted_rendering
except:
self.if_fitted_rendering = False
def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_variance,
conditional_valid_mask_volume=None):
device = rays_o.device
batch_size, n_samples = z_vals.shape
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3
if conditional_valid_mask_volume is not None:
pts_mask = self.get_pts_mask_for_conditional_volume(pts.view(-1, 3), conditional_valid_mask_volume)
pts_mask = pts_mask.reshape(batch_size, n_samples)
pts_mask = pts_mask[:, :-1] * pts_mask[:, 1:] # [batch_size, n_samples-1]
else:
pts_mask = torch.ones([batch_size, n_samples]).to(pts.device)
sdf = sdf.reshape(batch_size, n_samples)
prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]
prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:]
mid_sdf = (prev_sdf + next_sdf) * 0.5
dot_val = None
if self.alpha_type == 'uniform':
dot_val = torch.ones([batch_size, n_samples - 1]) * -1.0
else:
dot_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)
prev_dot_val = torch.cat([torch.zeros([batch_size, 1]).to(device), dot_val[:, :-1]], dim=-1)
dot_val = torch.stack([prev_dot_val, dot_val], dim=-1)
dot_val, _ = torch.min(dot_val, dim=-1, keepdim=False)
dot_val = dot_val.clip(-10.0, 0.0) * pts_mask
dist = (next_z_vals - prev_z_vals)
prev_esti_sdf = mid_sdf - dot_val * dist * 0.5
next_esti_sdf = mid_sdf + dot_val * dist * 0.5
prev_cdf = torch.sigmoid(prev_esti_sdf * inv_variance)
next_cdf = torch.sigmoid(next_esti_sdf * inv_variance)
alpha_sdf = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)
alpha = alpha_sdf
# - apply pts_mask
alpha = pts_mask * alpha
weights = alpha * torch.cumprod(
torch.cat([torch.ones([batch_size, 1]).to(device), 1. - alpha + 1e-7], -1), -1)[:, :-1]
z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach()
return z_samples
def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, lod,
sdf_network, gru_fusion,
# * related to conditional feature
conditional_volume=None,
conditional_valid_mask_volume=None
):
device = rays_o.device
batch_size, n_samples = z_vals.shape
_, n_importance = new_z_vals.shape
pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None]
if conditional_valid_mask_volume is not None:
pts_mask = self.get_pts_mask_for_conditional_volume(pts.view(-1, 3), conditional_valid_mask_volume)
pts_mask = pts_mask.reshape(batch_size, n_importance)
pts_mask_bool = (pts_mask > 0).view(-1)
else:
pts_mask = torch.ones([batch_size, n_importance]).to(pts.device)
new_sdf = torch.ones([batch_size * n_importance, 1]).to(pts.dtype).to(device) * 100
if torch.sum(pts_mask) > 1:
new_outputs = sdf_network.sdf(pts.reshape(-1, 3)[pts_mask_bool], conditional_volume, lod=lod)
new_sdf[pts_mask_bool] = new_outputs['sdf_pts_scale%d' % lod] # .reshape(batch_size, n_importance)
new_sdf = new_sdf.view(batch_size, n_importance)
z_vals = torch.cat([z_vals, new_z_vals], dim=-1)
sdf = torch.cat([sdf, new_sdf], dim=-1)
z_vals, index = torch.sort(z_vals, dim=-1)
xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1)
index = index.reshape(-1)
sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance)
return z_vals, sdf
@torch.no_grad()
def get_pts_mask_for_conditional_volume(self, pts, mask_volume):
"""
:param pts: [N, 3]
:param mask_volume: [1, 1, X, Y, Z]
:return:
"""
num_pts = pts.shape[0]
pts = pts.view(1, 1, 1, num_pts, 3) # - should be in range (-1, 1)
pts = torch.flip(pts, dims=[-1])
pts_mask = F.grid_sample(mask_volume, pts, mode='nearest') # [1, c, 1, 1, num_pts]
pts_mask = pts_mask.view(-1, num_pts).permute(1, 0).contiguous() # [num_pts, 1]
return pts_mask
def render_core(self,
rays_o,
rays_d,
z_vals,
sample_dist,
lod,
sdf_network,
rendering_network,
background_alpha=None, # - no use here
background_sampled_color=None, # - no use here
background_rgb=None, # - no use here
alpha_inter_ratio=0.0,
# * related to conditional feature
conditional_volume=None,
conditional_valid_mask_volume=None,
# * 2d feature maps
feature_maps=None,
color_maps=None,
w2cs=None,
intrinsics=None,
img_wh=None,
query_c2w=None, # - used for testing
if_general_rendering=True,
if_render_with_grad=True,
# * used for blending mlp rendering network
img_index=None,
rays_uv=None,
# * used for clear bg and fg
bg_num=0
):
device = rays_o.device
N_rays = rays_o.shape[0]
_, n_samples = z_vals.shape
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape).to(device)], -1)
mid_z_vals = z_vals + dists * 0.5
mid_dists = mid_z_vals[..., 1:] - mid_z_vals[..., :-1]
pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3
dirs = rays_d[:, None, :].expand(pts.shape)
pts = pts.reshape(-1, 3)
dirs = dirs.reshape(-1, 3)
# * if conditional_volume is restored from sparse volume, need mask for pts
if conditional_valid_mask_volume is not None:
pts_mask = self.get_pts_mask_for_conditional_volume(pts, conditional_valid_mask_volume)
pts_mask = pts_mask.reshape(N_rays, n_samples).float().detach()
pts_mask_bool = (pts_mask > 0).view(-1)
if torch.sum(pts_mask_bool.float()) < 1: # ! when render out image, may meet this problem
pts_mask_bool[:100] = True
else:
pts_mask = torch.ones([N_rays, n_samples]).to(pts.device)
# import ipdb; ipdb.set_trace()
# pts_valid = pts[pts_mask_bool]
sdf_nn_output = sdf_network.sdf(pts[pts_mask_bool], conditional_volume, lod=lod)
sdf = torch.ones([N_rays * n_samples, 1]).to(pts.dtype).to(device) * 100
sdf[pts_mask_bool] = sdf_nn_output['sdf_pts_scale%d' % lod] # [N_rays*n_samples, 1]
feature_vector_valid = sdf_nn_output['sdf_features_pts_scale%d' % lod]
feature_vector = torch.zeros([N_rays * n_samples, feature_vector_valid.shape[1]]).to(pts.dtype).to(device)
feature_vector[pts_mask_bool] = feature_vector_valid
# * estimate alpha from sdf
gradients = torch.zeros([N_rays * n_samples, 3]).to(pts.dtype).to(device)
# import ipdb; ipdb.set_trace()
gradients[pts_mask_bool] = sdf_network.gradient(
pts[pts_mask_bool], conditional_volume, lod=lod).squeeze()
sampled_color_mlp = None
rendering_valid_mask_mlp = None
sampled_color_patch = None
rendering_patch_mask = None
if self.if_fitted_rendering: # used for fine-tuning
position_latent = sdf_nn_output['sampled_latent_scale%d' % lod]
sampled_color_mlp = torch.zeros([N_rays * n_samples, 3]).to(pts.dtype).to(device)
sampled_color_mlp_mask = torch.zeros([N_rays * n_samples, 1]).to(pts.dtype).to(device)
# - extract pixel
pts_pixel_color, pts_pixel_mask = self.patch_projector.pixel_warp(
pts[pts_mask_bool][:, None, :], color_maps, intrinsics,
w2cs, img_wh=None) # [N_rays * n_samples,1, N_views, 3] , [N_rays*n_samples, 1, N_views]
pts_pixel_color = pts_pixel_color[:, 0, :, :] # [N_rays * n_samples, N_views, 3]
pts_pixel_mask = pts_pixel_mask[:, 0, :] # [N_rays*n_samples, N_views]
# - extract patch
if_patch_blending = False if rays_uv is None else True
pts_patch_color, pts_patch_mask = None, None
if if_patch_blending:
pts_patch_color, pts_patch_mask = self.patch_projector.patch_warp(
pts.reshape([N_rays, n_samples, 3]),
rays_uv, gradients.reshape([N_rays, n_samples, 3]),
color_maps,
intrinsics[0], intrinsics,
query_c2w[0], torch.inverse(w2cs), img_wh=None
) # (N_rays, n_samples, N_src, Npx, 3), (N_rays, n_samples, N_src, Npx)
N_src, Npx = pts_patch_mask.shape[2:]
pts_patch_color = pts_patch_color.view(N_rays * n_samples, N_src, Npx, 3)[pts_mask_bool]
pts_patch_mask = pts_patch_mask.view(N_rays * n_samples, N_src, Npx)[pts_mask_bool]
sampled_color_patch = torch.zeros([N_rays * n_samples, Npx, 3]).to(device)
sampled_color_patch_mask = torch.zeros([N_rays * n_samples, 1]).to(device)
sampled_color_mlp_, sampled_color_mlp_mask_, \
sampled_color_patch_, sampled_color_patch_mask_ = sdf_network.color_blend(
pts[pts_mask_bool],
position_latent,
gradients[pts_mask_bool],
dirs[pts_mask_bool],
feature_vector[pts_mask_bool],
img_index=img_index,
pts_pixel_color=pts_pixel_color,
pts_pixel_mask=pts_pixel_mask,
pts_patch_color=pts_patch_color,
pts_patch_mask=pts_patch_mask
) # [n, 3], [n, 1]
sampled_color_mlp[pts_mask_bool] = sampled_color_mlp_
sampled_color_mlp_mask[pts_mask_bool] = sampled_color_mlp_mask_.float()
sampled_color_mlp = sampled_color_mlp.view(N_rays, n_samples, 3)
sampled_color_mlp_mask = sampled_color_mlp_mask.view(N_rays, n_samples)
rendering_valid_mask_mlp = torch.mean(pts_mask * sampled_color_mlp_mask, dim=-1, keepdim=True) > 0.5
# patch blending
if if_patch_blending:
sampled_color_patch[pts_mask_bool] = sampled_color_patch_
sampled_color_patch_mask[pts_mask_bool] = sampled_color_patch_mask_.float()
sampled_color_patch = sampled_color_patch.view(N_rays, n_samples, Npx, 3)
sampled_color_patch_mask = sampled_color_patch_mask.view(N_rays, n_samples)
rendering_patch_mask = torch.mean(pts_mask * sampled_color_patch_mask, dim=-1,
keepdim=True) > 0.5 # [N_rays, 1]
else:
sampled_color_patch, rendering_patch_mask = None, None
if if_general_rendering: # used for general training
# [512, 128, 16]; [4, 512, 128, 59]; [4, 512, 128, 4]
ren_geo_feats, ren_rgb_feats, ren_ray_diff, ren_mask, _, _ = self.rendering_projector.compute(
pts.view(N_rays, n_samples, 3),
# * 3d geometry feature volumes
geometryVolume=conditional_volume[0],
geometryVolumeMask=conditional_valid_mask_volume[0],
# * 2d rendering feature maps
rendering_feature_maps=feature_maps, # [n_views, 56, 256, 256]
color_maps=color_maps,
w2cs=w2cs,
intrinsics=intrinsics,
img_wh=img_wh,
query_img_idx=0, # the index of the N_views dim for rendering
query_c2w=query_c2w,
)
# (N_rays, n_samples, 3)
if if_render_with_grad:
# import ipdb; ipdb.set_trace()
# [nrays, 3] [nrays, 1]
sampled_color, rendering_valid_mask = rendering_network(
ren_geo_feats, ren_rgb_feats, ren_ray_diff, ren_mask)
# import ipdb; ipdb.set_trace()
else:
with torch.no_grad():
sampled_color, rendering_valid_mask = rendering_network(
ren_geo_feats, ren_rgb_feats, ren_ray_diff, ren_mask)
else:
sampled_color, rendering_valid_mask = None, None
inv_variance = self.variance_network(feature_vector)[:, :1].clip(1e-6, 1e6)
true_dot_val = (dirs * gradients).sum(-1, keepdim=True) # * calculate
iter_cos = -(F.relu(-true_dot_val * 0.5 + 0.5) * (1.0 - alpha_inter_ratio) + F.relu(
-true_dot_val) * alpha_inter_ratio) # always non-positive
iter_cos = iter_cos * pts_mask.view(-1, 1)
true_estimate_sdf_half_next = sdf + iter_cos.clip(-10.0, 10.0) * dists.reshape(-1, 1) * 0.5
true_estimate_sdf_half_prev = sdf - iter_cos.clip(-10.0, 10.0) * dists.reshape(-1, 1) * 0.5
prev_cdf = torch.sigmoid(true_estimate_sdf_half_prev * inv_variance)
next_cdf = torch.sigmoid(true_estimate_sdf_half_next * inv_variance)
p = prev_cdf - next_cdf
c = prev_cdf
if self.alpha_type == 'div':
alpha_sdf = ((p + 1e-5) / (c + 1e-5)).reshape(N_rays, n_samples).clip(0.0, 1.0)
elif self.alpha_type == 'uniform':
uniform_estimate_sdf_half_next = sdf - dists.reshape(-1, 1) * 0.5
uniform_estimate_sdf_half_prev = sdf + dists.reshape(-1, 1) * 0.5
uniform_prev_cdf = torch.sigmoid(uniform_estimate_sdf_half_prev * inv_variance)
uniform_next_cdf = torch.sigmoid(uniform_estimate_sdf_half_next * inv_variance)
uniform_alpha = F.relu(
(uniform_prev_cdf - uniform_next_cdf + 1e-5) / (uniform_prev_cdf + 1e-5)).reshape(
N_rays, n_samples).clip(0.0, 1.0)
alpha_sdf = uniform_alpha
else:
assert False
alpha = alpha_sdf
# - apply pts_mask
alpha = alpha * pts_mask
# pts_radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(N_rays, n_samples)
# inside_sphere = (pts_radius < 1.0).float().detach()
# relax_inside_sphere = (pts_radius < 1.2).float().detach()
inside_sphere = pts_mask
relax_inside_sphere = pts_mask
weights = alpha * torch.cumprod(torch.cat([torch.ones([N_rays, 1]).to(device), 1. - alpha + 1e-7], -1), -1)[:,
:-1] # n_rays, n_samples
weights_sum = weights.sum(dim=-1, keepdim=True)
alpha_sum = alpha.sum(dim=-1, keepdim=True)
if bg_num > 0:
weights_sum_fg = weights[:, :-bg_num].sum(dim=-1, keepdim=True)
else:
weights_sum_fg = weights_sum
if sampled_color is not None:
color = (sampled_color * weights[:, :, None]).sum(dim=1)
else:
color = None
# import ipdb; ipdb.set_trace()
if background_rgb is not None and color is not None:
color = color + background_rgb * (1.0 - weights_sum)
# print("color device:" + str(color.device))
# if color is not None:
# # import ipdb; ipdb.set_trace()
# color = color + (1.0 - weights_sum)
###################* mlp color rendering #####################
color_mlp = None
# import ipdb; ipdb.set_trace()
if sampled_color_mlp is not None:
color_mlp = (sampled_color_mlp * weights[:, :, None]).sum(dim=1)
if background_rgb is not None and color_mlp is not None:
color_mlp = color_mlp + background_rgb * (1.0 - weights_sum)
############################ * patch blending ################
blended_color_patch = None
if sampled_color_patch is not None:
blended_color_patch = (sampled_color_patch * weights[:, :, None, None]).sum(dim=1) # [N_rays, Npx, 3]
######################################################
gradient_error = (torch.linalg.norm(gradients.reshape(N_rays, n_samples, 3), ord=2,
dim=-1) - 1.0) ** 2
# ! the gradient normal should be masked out, the pts out of the bounding box should also be penalized
gradient_error = (pts_mask * gradient_error).sum() / (
(pts_mask).sum() + 1e-5)
depth = (mid_z_vals * weights[:, :n_samples]).sum(dim=1, keepdim=True)
# print("[TEST]: weights_sum in render_core", weights_sum.mean())
# print("[TEST]: weights_sum in render_core NAN number", weights_sum.isnan().sum())
# if weights_sum.isnan().sum() > 0:
# import ipdb; ipdb.set_trace()
return {
'color': color,
'color_mask': rendering_valid_mask, # (N_rays, 1)
'color_mlp': color_mlp,
'color_mlp_mask': rendering_valid_mask_mlp,
'sdf': sdf, # (N_rays, n_samples)
'depth': depth, # (N_rays, 1)
'dists': dists,
'gradients': gradients.reshape(N_rays, n_samples, 3),
'variance': 1.0 / inv_variance,
'mid_z_vals': mid_z_vals,
'weights': weights,
'weights_sum': weights_sum,
'alpha_sum': alpha_sum,
'alpha_mean': alpha.mean(),
'cdf': c.reshape(N_rays, n_samples),
'gradient_error': gradient_error,
'inside_sphere': inside_sphere,
'blended_color_patch': blended_color_patch,
'blended_color_patch_mask': rendering_patch_mask,
'weights_sum_fg': weights_sum_fg
}
def render(self, rays_o, rays_d, near, far, sdf_network, rendering_network,
perturb_overwrite=-1,
background_rgb=None,
alpha_inter_ratio=0.0,
# * related to conditional feature
lod=None,
conditional_volume=None,
conditional_valid_mask_volume=None,
# * 2d feature maps
feature_maps=None,
color_maps=None,
w2cs=None,
intrinsics=None,
img_wh=None,
query_c2w=None, # -used for testing
if_general_rendering=True,
if_render_with_grad=True,
# * used for blending mlp rendering network
img_index=None,
rays_uv=None,
# * importance sample for second lod network
pre_sample=False, # no use here
# * for clear foreground
bg_ratio=0.0
):
device = rays_o.device
N_rays = len(rays_o)
# sample_dist = 2.0 / self.n_samples
sample_dist = ((far - near) / self.n_samples).mean().item()
z_vals = torch.linspace(0.0, 1.0, self.n_samples).to(device)
z_vals = near + (far - near) * z_vals[None, :]
bg_num = int(self.n_samples * bg_ratio)
if z_vals.shape[0] == 1:
z_vals = z_vals.repeat(N_rays, 1)
if bg_num > 0:
z_vals_bg = z_vals[:, self.n_samples - bg_num:]
z_vals = z_vals[:, :self.n_samples - bg_num]
n_samples = self.n_samples - bg_num
perturb = self.perturb
# - significantly speed up training, for the second lod network
if pre_sample:
z_vals = self.sample_z_vals_from_maskVolume(rays_o, rays_d, near, far,
conditional_valid_mask_volume)
if perturb_overwrite >= 0:
perturb = perturb_overwrite
if perturb > 0:
# get intervals between samples
mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
upper = torch.cat([mids, z_vals[..., -1:]], -1)
lower = torch.cat([z_vals[..., :1], mids], -1)
# stratified samples in those intervals
t_rand = torch.rand(z_vals.shape).to(device)
z_vals = lower + (upper - lower) * t_rand
background_alpha = None
background_sampled_color = None
z_val_before = z_vals.clone()
# Up sample
if self.n_importance > 0:
with torch.no_grad():
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None]
sdf_outputs = sdf_network.sdf(
pts.reshape(-1, 3), conditional_volume, lod=lod)
# pdb.set_trace()
sdf = sdf_outputs['sdf_pts_scale%d' % lod].reshape(N_rays, self.n_samples - bg_num)
n_steps = 4
for i in range(n_steps):
new_z_vals = self.up_sample(rays_o, rays_d, z_vals, sdf, self.n_importance // n_steps,
64 * 2 ** i,
conditional_valid_mask_volume=conditional_valid_mask_volume,
)
# if new_z_vals.isnan().sum() > 0:
# import ipdb; ipdb.set_trace()
z_vals, sdf = self.cat_z_vals(
rays_o, rays_d, z_vals, new_z_vals, sdf, lod,
sdf_network, gru_fusion=False,
conditional_volume=conditional_volume,
conditional_valid_mask_volume=conditional_valid_mask_volume,
)
del sdf
n_samples = self.n_samples + self.n_importance
# Background
ret_outside = None
# Render
if bg_num > 0:
z_vals = torch.cat([z_vals, z_vals_bg], dim=1)
# if z_vals.isnan().sum() > 0:
# import ipdb; ipdb.set_trace()
ret_fine = self.render_core(rays_o,
rays_d,
z_vals,
sample_dist,
lod,
sdf_network,
rendering_network,
background_rgb=background_rgb,
background_alpha=background_alpha,
background_sampled_color=background_sampled_color,
alpha_inter_ratio=alpha_inter_ratio,
# * related to conditional feature
conditional_volume=conditional_volume,
conditional_valid_mask_volume=conditional_valid_mask_volume,
# * 2d feature maps
feature_maps=feature_maps,
color_maps=color_maps,
w2cs=w2cs,
intrinsics=intrinsics,
img_wh=img_wh,
query_c2w=query_c2w,
if_general_rendering=if_general_rendering,
if_render_with_grad=if_render_with_grad,
# * used for blending mlp rendering network
img_index=img_index,
rays_uv=rays_uv
)
color_fine = ret_fine['color']
if self.n_outside > 0:
color_fine_mask = torch.logical_or(ret_fine['color_mask'], ret_outside['color_mask'])
else:
color_fine_mask = ret_fine['color_mask']
weights = ret_fine['weights']
weights_sum = ret_fine['weights_sum']
gradients = ret_fine['gradients']
mid_z_vals = ret_fine['mid_z_vals']
# depth = (mid_z_vals * weights[:, :n_samples]).sum(dim=1, keepdim=True)
depth = ret_fine['depth']
depth_varaince = ((mid_z_vals - depth) ** 2 * weights[:, :n_samples]).sum(dim=-1, keepdim=True)
variance = ret_fine['variance'].reshape(N_rays, n_samples).mean(dim=-1, keepdim=True)
# - randomly sample points from the volume, and maximize the sdf
pts_random = torch.rand([1024, 3]).float().to(device) * 2 - 1 # normalized to (-1, 1)
sdf_random = sdf_network.sdf(pts_random, conditional_volume, lod=lod)['sdf_pts_scale%d' % lod]
result = {
'depth': depth,
'color_fine': color_fine,
'color_fine_mask': color_fine_mask,
'color_outside': ret_outside['color'] if ret_outside is not None else None,
'color_outside_mask': ret_outside['color_mask'] if ret_outside is not None else None,
'color_mlp': ret_fine['color_mlp'],
'color_mlp_mask': ret_fine['color_mlp_mask'],
'variance': variance.mean(),
'cdf_fine': ret_fine['cdf'],
'depth_variance': depth_varaince,
'weights_sum': weights_sum,
'weights_max': torch.max(weights, dim=-1, keepdim=True)[0],
'alpha_sum': ret_fine['alpha_sum'].mean(),
'alpha_mean': ret_fine['alpha_mean'],
'gradients': gradients,
'weights': weights,
'gradient_error_fine': ret_fine['gradient_error'],
'inside_sphere': ret_fine['inside_sphere'],
'sdf': ret_fine['sdf'],
'sdf_random': sdf_random,
'blended_color_patch': ret_fine['blended_color_patch'],
'blended_color_patch_mask': ret_fine['blended_color_patch_mask'],
'weights_sum_fg': ret_fine['weights_sum_fg']
}
return result
@torch.no_grad()
def sample_z_vals_from_sdfVolume(self, rays_o, rays_d, near, far, sdf_volume, mask_volume):
# ? based on sdf to do importance sampling, seems that too biased on pre-estimation
device = rays_o.device
N_rays = len(rays_o)
n_samples = self.n_samples * 2
z_vals = torch.linspace(0.0, 1.0, n_samples).to(device)
z_vals = near + (far - near) * z_vals[None, :]
if z_vals.shape[0] == 1:
z_vals = z_vals.repeat(N_rays, 1)
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None]
sdf = self.get_pts_mask_for_conditional_volume(pts.view(-1, 3), sdf_volume).reshape([N_rays, n_samples])
new_z_vals = self.up_sample(rays_o, rays_d, z_vals, sdf, self.n_samples,
200,
conditional_valid_mask_volume=mask_volume,
)
return new_z_vals
@torch.no_grad()
def sample_z_vals_from_maskVolume(self, rays_o, rays_d, near, far, mask_volume): # don't use
device = rays_o.device
N_rays = len(rays_o)
n_samples = self.n_samples * 2
z_vals = torch.linspace(0.0, 1.0, n_samples).to(device)
z_vals = near + (far - near) * z_vals[None, :]
if z_vals.shape[0] == 1:
z_vals = z_vals.repeat(N_rays, 1)
mid_z_vals = (z_vals[:, 1:] + z_vals[:, :-1]) * 0.5
pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None]
pts_mask = self.get_pts_mask_for_conditional_volume(pts.view(-1, 3), mask_volume).reshape(
[N_rays, n_samples - 1])
# empty voxel set to 0.1, non-empty voxel set to 1
weights = torch.where(pts_mask > 0, torch.ones_like(pts_mask).to(device),
0.1 * torch.ones_like(pts_mask).to(device))
# sample more pts in non-empty voxels
z_samples = sample_pdf(z_vals, weights, self.n_samples, det=True).detach()
return z_samples
@torch.no_grad()
def filter_pts_by_depthmaps(self, coords, pred_depth_maps, proj_matrices,
partial_vol_origin, voxel_size,
near, far, depth_interval, d_plane_nums):
"""
Use the pred_depthmaps to remove redundant pts (pruned by sdf, sdf always have two sides, the back side is useless)
:param coords: [n, 3] int coords
:param pred_depth_maps: [N_views, 1, h, w]
:param proj_matrices: [N_views, 4, 4]
:param partial_vol_origin: [3]
:param voxel_size: 1
:param near: 1
:param far: 1
:param depth_interval: 1
:param d_plane_nums: 1
:return:
"""
device = pred_depth_maps.device
n_views, _, sizeH, sizeW = pred_depth_maps.shape
if len(partial_vol_origin.shape) == 1:
partial_vol_origin = partial_vol_origin[None, :]
pts = coords * voxel_size + partial_vol_origin
rs_grid = pts.unsqueeze(0).expand(n_views, -1, -1)
rs_grid = rs_grid.permute(0, 2, 1).contiguous() # [n_views, 3, n_pts]
nV = rs_grid.shape[-1]
rs_grid = torch.cat([rs_grid, torch.ones([n_views, 1, nV]).to(device)], dim=1) # [n_views, 4, n_pts]
# Project grid
im_p = proj_matrices @ rs_grid # - transform world pts to image UV space # [n_views, 4, n_pts]
im_x, im_y, im_z = im_p[:, 0], im_p[:, 1], im_p[:, 2]
im_x = im_x / im_z
im_y = im_y / im_z
im_grid = torch.stack([2 * im_x / (sizeW - 1) - 1, 2 * im_y / (sizeH - 1) - 1], dim=-1)
im_grid = im_grid.view(n_views, 1, -1, 2)
sampled_depths = torch.nn.functional.grid_sample(pred_depth_maps, im_grid, mode='bilinear',
padding_mode='zeros',
align_corners=True)[:, 0, 0, :] # [n_views, n_pts]
sampled_depths_valid = (sampled_depths > 0.5 * near).float()
valid_d_min = (sampled_depths - d_plane_nums * depth_interval).clamp(near.item(),
far.item()) * sampled_depths_valid
valid_d_max = (sampled_depths + d_plane_nums * depth_interval).clamp(near.item(),
far.item()) * sampled_depths_valid
mask = im_grid.abs() <= 1
mask = mask[:, 0] # [n_views, n_pts, 2]
mask = (mask.sum(dim=-1) == 2) & (im_z > valid_d_min) & (im_z < valid_d_max)
mask = mask.view(n_views, -1)
mask = mask.permute(1, 0).contiguous() # [num_pts, nviews]
mask_final = torch.sum(mask.float(), dim=1, keepdim=False) > 0
return mask_final
@torch.no_grad()
def get_valid_sparse_coords_by_sdf_depthfilter(self, sdf_volume, coords_volume, mask_volume, feature_volume,
pred_depth_maps, proj_matrices,
partial_vol_origin, voxel_size,
near, far, depth_interval, d_plane_nums,
threshold=0.02, maximum_pts=110000):
"""
assume batch size == 1, from the first lod to get sparse voxels
:param sdf_volume: [1, X, Y, Z]
:param coords_volume: [3, X, Y, Z]
:param mask_volume: [1, X, Y, Z]
:param feature_volume: [C, X, Y, Z]
:param threshold:
:return:
"""
device = coords_volume.device
_, dX, dY, dZ = coords_volume.shape
def prune(sdf_pts, coords_pts, mask_volume, threshold):
occupancy_mask = (torch.abs(sdf_pts) < threshold).squeeze(1) # [num_pts]
valid_coords = coords_pts[occupancy_mask]
# - filter backside surface by depth maps
mask_filtered = self.filter_pts_by_depthmaps(valid_coords, pred_depth_maps, proj_matrices,
partial_vol_origin, voxel_size,
near, far, depth_interval, d_plane_nums)
valid_coords = valid_coords[mask_filtered]
# - dilate
occupancy_mask = sparse_to_dense_channel(valid_coords, 1, [dX, dY, dZ], 1, 0, device) # [dX, dY, dZ, 1]
# - dilate
occupancy_mask = occupancy_mask.float()
occupancy_mask = occupancy_mask.view(1, 1, dX, dY, dZ)
occupancy_mask = F.avg_pool3d(occupancy_mask, kernel_size=7, stride=1, padding=3)
occupancy_mask = occupancy_mask.view(-1, 1) > 0
final_mask = torch.logical_and(mask_volume, occupancy_mask)[:, 0] # [num_pts]
return final_mask, torch.sum(final_mask.float())
C, dX, dY, dZ = feature_volume.shape
sdf_volume = sdf_volume.permute(1, 2, 3, 0).contiguous().view(-1, 1)
coords_volume = coords_volume.permute(1, 2, 3, 0).contiguous().view(-1, 3)
mask_volume = mask_volume.permute(1, 2, 3, 0).contiguous().view(-1, 1)
feature_volume = feature_volume.permute(1, 2, 3, 0).contiguous().view(-1, C)
# - for check
# sdf_volume = torch.rand_like(sdf_volume).float().to(sdf_volume.device) * 0.02
final_mask, valid_num = prune(sdf_volume, coords_volume, mask_volume, threshold)
while (valid_num > maximum_pts) and (threshold > 0.003):
threshold = threshold - 0.002
final_mask, valid_num = prune(sdf_volume, coords_volume, mask_volume, threshold)
valid_coords = coords_volume[final_mask] # [N, 3]
valid_feature = feature_volume[final_mask] # [N, C]
valid_coords = torch.cat([torch.ones([valid_coords.shape[0], 1]).to(valid_coords.device) * 0,
valid_coords], dim=1) # [N, 4], append batch idx
# ! if the valid_num is still larger than maximum_pts, sample part of pts
if valid_num > maximum_pts:
valid_num = valid_num.long()
occupancy = torch.ones([valid_num]).to(device) > 0
choice = np.random.choice(valid_num.cpu().numpy(), valid_num.cpu().numpy() - maximum_pts,
replace=False)
ind = torch.nonzero(occupancy).to(device)
occupancy[ind[choice]] = False
valid_coords = valid_coords[occupancy]
valid_feature = valid_feature[occupancy]
print(threshold, "randomly sample to save memory")
return valid_coords, valid_feature
@torch.no_grad()
def get_valid_sparse_coords_by_sdf(self, sdf_volume, coords_volume, mask_volume, feature_volume, threshold=0.02,
maximum_pts=110000):
"""
assume batch size == 1, from the first lod to get sparse voxels
:param sdf_volume: [num_pts, 1]
:param coords_volume: [3, X, Y, Z]
:param mask_volume: [1, X, Y, Z]
:param feature_volume: [C, X, Y, Z]
:param threshold:
:return:
"""
def prune(sdf_volume, mask_volume, threshold):
occupancy_mask = torch.abs(sdf_volume) < threshold # [num_pts, 1]
# - dilate
occupancy_mask = occupancy_mask.float()
occupancy_mask = occupancy_mask.view(1, 1, dX, dY, dZ)
occupancy_mask = F.avg_pool3d(occupancy_mask, kernel_size=7, stride=1, padding=3)
occupancy_mask = occupancy_mask.view(-1, 1) > 0
final_mask = torch.logical_and(mask_volume, occupancy_mask)[:, 0] # [num_pts]
return final_mask, torch.sum(final_mask.float())
C, dX, dY, dZ = feature_volume.shape
coords_volume = coords_volume.permute(1, 2, 3, 0).contiguous().view(-1, 3)
mask_volume = mask_volume.permute(1, 2, 3, 0).contiguous().view(-1, 1)
feature_volume = feature_volume.permute(1, 2, 3, 0).contiguous().view(-1, C)
final_mask, valid_num = prune(sdf_volume, mask_volume, threshold)
while (valid_num > maximum_pts) and (threshold > 0.003):
threshold = threshold - 0.002
final_mask, valid_num = prune(sdf_volume, mask_volume, threshold)
valid_coords = coords_volume[final_mask] # [N, 3]
valid_feature = feature_volume[final_mask] # [N, C]
valid_coords = torch.cat([torch.ones([valid_coords.shape[0], 1]).to(valid_coords.device) * 0,
valid_coords], dim=1) # [N, 4], append batch idx
# ! if the valid_num is still larger than maximum_pts, sample part of pts
if valid_num > maximum_pts:
device = sdf_volume.device
valid_num = valid_num.long()
occupancy = torch.ones([valid_num]).to(device) > 0
choice = np.random.choice(valid_num.cpu().numpy(), valid_num.cpu().numpy() - maximum_pts,
replace=False)
ind = torch.nonzero(occupancy).to(device)
occupancy[ind[choice]] = False
valid_coords = valid_coords[occupancy]
valid_feature = valid_feature[occupancy]
print(threshold, "randomly sample to save memory")
return valid_coords, valid_feature
@torch.no_grad()
def extract_fields(self, bound_min, bound_max, resolution, query_func, device,
# * related to conditional feature
**kwargs
):
N = 64
X = torch.linspace(bound_min[0], bound_max[0], resolution).to(device).split(N)
Y = torch.linspace(bound_min[1], bound_max[1], resolution).to(device).split(N)
Z = torch.linspace(bound_min[2], bound_max[2], resolution).to(device).split(N)
u = np.zeros([resolution, resolution, resolution], dtype=np.float32)
with torch.no_grad():
for xi, xs in enumerate(X):
for yi, ys in enumerate(Y):
for zi, zs in enumerate(Z):
xx, yy, zz = torch.meshgrid(xs, ys, zs, indexing="ij")
pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1)
# ! attention, the query function is different for extract geometry and fields
output = query_func(pts, **kwargs)
sdf = output['sdf_pts_scale%d' % kwargs['lod']].reshape(len(xs), len(ys),
len(zs)).detach().cpu().numpy()
u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = -1 * sdf
return u
@torch.no_grad()
def extract_geometry(self, sdf_network, bound_min, bound_max, resolution, threshold, device, occupancy_mask=None,
# * 3d feature volume
**kwargs
):
# logging.info('threshold: {}'.format(threshold))
u = self.extract_fields(bound_min, bound_max, resolution,
lambda pts, **kwargs: sdf_network.sdf(pts, **kwargs),
# - sdf need to be multiplied by -1
device,
# * 3d feature volume
**kwargs
)
if occupancy_mask is not None:
dX, dY, dZ = occupancy_mask.shape
empty_mask = 1 - occupancy_mask
empty_mask = empty_mask.view(1, 1, dX, dY, dZ)
# - dilation
# empty_mask = F.avg_pool3d(empty_mask, kernel_size=7, stride=1, padding=3)
empty_mask = F.interpolate(empty_mask, [resolution, resolution, resolution], mode='nearest')
empty_mask = empty_mask.view(resolution, resolution, resolution).cpu().numpy() > 0
u[empty_mask] = -100
del empty_mask
vertices, triangles = mcubes.marching_cubes(u, threshold)
b_max_np = bound_max.detach().cpu().numpy()
b_min_np = bound_min.detach().cpu().numpy()
vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :]
return vertices, triangles, u
@torch.no_grad()
def extract_depth_maps(self, sdf_network, con_volume, intrinsics, c2ws, H, W, near, far):
"""
extract depth maps from the density volume
:param con_volume: [1, 1+C, dX, dY, dZ] can by con_volume or sdf_volume
:param c2ws: [B, 4, 4]
:param H:
:param W:
:param near:
:param far:
:return:
"""
device = con_volume.device
batch_size = intrinsics.shape[0]
with torch.no_grad():
ys, xs = torch.meshgrid(torch.linspace(0, H - 1, H),
torch.linspace(0, W - 1, W), indexing="ij") # pytorch's meshgrid has indexing='ij'
p = torch.stack([xs, ys, torch.ones_like(ys)], dim=-1) # H, W, 3
intrinsics_inv = torch.inverse(intrinsics)
p = p.view(-1, 3).float().to(device) # N_rays, 3
p = torch.matmul(intrinsics_inv[:, None, :3, :3], p[:, :, None]).squeeze() # Batch, N_rays, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # Batch, N_rays, 3
rays_v = torch.matmul(c2ws[:, None, :3, :3], rays_v[:, :, :, None]).squeeze() # Batch, N_rays, 3
rays_o = c2ws[:, None, :3, 3].expand(rays_v.shape) # Batch, N_rays, 3
rays_d = rays_v
rays_o = rays_o.contiguous().view(-1, 3)
rays_d = rays_d.contiguous().view(-1, 3)
################## - sphere tracer to extract depth maps ######################
depth_masks_sphere, depth_maps_sphere = self.ray_tracer.extract_depth_maps(
rays_o, rays_d,
near[None, :].repeat(rays_o.shape[0], 1),
far[None, :].repeat(rays_o.shape[0], 1),
sdf_network, con_volume
)
depth_maps = depth_maps_sphere.view(batch_size, 1, H, W)
depth_masks = depth_masks_sphere.view(batch_size, 1, H, W)
depth_maps = torch.where(depth_masks, depth_maps,
torch.zeros_like(depth_masks.float()).to(device)) # fill invalid pixels by 0
return depth_maps, depth_masks
| One-2-3-45/One-2-3-45 | reconstruction/models/sparse_neus_renderer.py | sparse_neus_renderer.py | py | 44,709 | python | en | code | 1,164 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "models.projector.Projector",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "models.patch... |
20189362717 | import argparse
from pathlib import Path
from os.path import basename, isfile, isdir, splitext
import glob
def _list_modules(folder_name):
modules = glob.glob(str(Path(__file__).resolve().parent / folder_name / '*'))
return list(
splitext(basename(f))[0]
for f in modules
if (isfile(f) and splitext(f)[1] == '.py' and basename(f) != '__init__.py')
or (isdir(f) and isfile(Path(f) / '__init__.py'))
)
def get_args():
parser = argparse.ArgumentParser(
description='COMP9517 20T1 Project - Pedestrian Detecting, Tracking and Clustering',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'-si',
'--source-images',
help='(REQUIRED) Source images, can be either a directory or wildcard files, e.g. sequence/**/*.jpg',
dest='path',
default=str(Path(__file__).resolve().parent.parent / 'sequence' / '*.jpg'),
)
parser.add_argument(
'-sv',
'--source-video',
help='(REQUIRED) Source video, can be a camera index, a file or a url.', # TODO: Unfinished
dest='path_video',
default='',
)
parser.add_argument(
'-iw',
'--image-width',
help='Image width in pixels for resizing. '
'0: take width from the first image; '
'-1: same with 0 but keeps aspect ratio and creates black edges.',
dest='width',
type=int,
default=-1,
)
parser.add_argument(
'-ih',
'--image-height',
help='Image height in pixels for resizing.'
'0: take width from the first image; '
'-1: same with 0 but keeps aspect ratio and creates black edges.',
dest='height',
type=int,
default=-1,
)
parser.add_argument(
'-fps',
'--frames-per-second',
help='Playback frames per second.',
dest='fps',
type=float,
default=10,
)
parser.add_argument(
'--frame-skipping',
help='Enable frame skipping when the processing speed cannot keep up.',
dest='frame_skipping',
action='store_true',
)
parser.add_argument(
'--listening',
help='Enable pulling listener.',
dest='listening',
action='store_true',
)
parser.add_argument(
'-pp',
'--preprocessor',
help='Use a specific preprocessor',
dest='preprocessor',
choices=_list_modules('preprocessors'),
default='sample_preprocessor',
)
parser.add_argument(
'--no-preprocessor',
help='Disable image preprocessing',
dest='no_preprocessor',
action='store_true',
)
parser.add_argument(
'-dt',
'--detector',
help='Use a specific detector',
dest='detector',
choices=_list_modules('detectors'),
default='sample_detector',
)
parser.add_argument(
'--no-detector',
help='Disable pedestrian detecting',
dest='no_detector',
action='store_true',
)
parser.add_argument(
'-tk',
'--tracker',
help='Use a specific tracker',
dest='tracker',
choices=_list_modules('trackers'),
default='sample_tracker',
)
parser.add_argument(
'--no-tracker',
help='Disable pedestrian re-id and path tracking',
dest='no_tracker',
action='store_true',
)
parser.add_argument(
'-cl',
'--clusterer',
help='Use a specific clusterer',
dest='clusterer',
choices=_list_modules('clusterers'),
default='sample_clusterer',
)
parser.add_argument(
'--no-clusterer',
help='Disable pedestrian clustering (group detection)',
dest='no_clusterer',
action='store_true',
)
parser.add_argument(
'-time',
'--measure-time',
help='Measure and print the time consumption of each step',
dest='measure_time',
action='store_true',
)
# if len(sys.argv) == 1:
# parser.print_help(sys.stderr)
# sys.exit(1)
return parser.parse_args()
| patli96/COMP9517_20T1 | pedestrian_monitor/console_arguments.py | console_arguments.py | py | 4,198 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_nu... |
38642289342 | import nltk
import pickle
from utils import tokenize_document
resume_file = open('../assets/resume.txt', 'r')
resume = resume_file.read()
resume_file.close()
tokenizer = nltk.RegexpTokenizer(r'\w+')
resume_tokenized = tokenize_document(resume, tokenizer)
print(resume_tokenized)
pickle.dump(resume_tokenized, open('../assets/resume_tokens.p', 'wb'))
| anishLearnsToCode/stop-words-removal | src/driver.py | driver.py | py | 352 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.RegexpTokenizer",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "utils.tokenize_document",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 12,
"usage_type": "call"
}
] |
496383977 | from dagster_datadog import datadog_resource
from dagster import ModeDefinition, execute_solid, solid
from dagster.seven import mock
@mock.patch('datadog.statsd.timing')
@mock.patch('datadog.statsd.timed')
@mock.patch('datadog.statsd.service_check')
@mock.patch('datadog.statsd.set')
@mock.patch('datadog.statsd.distribution')
@mock.patch('datadog.statsd.histogram')
@mock.patch('datadog.statsd.decrement')
@mock.patch('datadog.statsd.increment')
@mock.patch('datadog.statsd.gauge')
@mock.patch('datadog.statsd.event')
def test_datadog_resource(
event,
gauge,
increment,
decrement,
histogram,
distribution,
statsd_set,
service_check,
timed,
timing,
):
@solid(required_resource_keys={'datadog'})
def datadog_solid(context):
assert context.resources.datadog
# event
context.resources.datadog.event('Man down!', 'This server needs assistance.')
event.assert_called_with('Man down!', 'This server needs assistance.')
# gauge
context.resources.datadog.gauge('users.online', 1001, tags=["protocol:http"])
gauge.assert_called_with('users.online', 1001, tags=["protocol:http"])
# increment
context.resources.datadog.increment('page.views')
increment.assert_called_with('page.views')
# decrement
context.resources.datadog.decrement('page.views')
decrement.assert_called_with('page.views')
context.resources.datadog.histogram('album.photo.count', 26, tags=["gender:female"])
histogram.assert_called_with('album.photo.count', 26, tags=["gender:female"])
context.resources.datadog.distribution('album.photo.count', 26, tags=["color:blue"])
distribution.assert_called_with('album.photo.count', 26, tags=["color:blue"])
context.resources.datadog.set('visitors.uniques', 999, tags=["browser:ie"])
statsd_set.assert_called_with('visitors.uniques', 999, tags=["browser:ie"])
context.resources.datadog.service_check('svc.check_name', context.resources.datadog.WARNING)
service_check.assert_called_with('svc.check_name', context.resources.datadog.WARNING)
context.resources.datadog.timing("query.response.time", 1234)
timing.assert_called_with("query.response.time", 1234)
@context.resources.datadog.timed('run_fn')
def run_fn():
pass
run_fn()
timed.assert_called_with('run_fn')
result = execute_solid(
datadog_solid,
environment_dict={
'resources': {'datadog': {'config': {'api_key': 'NOT_USED', 'app_key': 'NOT_USED'}}}
},
mode_def=ModeDefinition(resource_defs={'datadog': datadog_resource}),
)
assert result.success
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-datadog/dagster_datadog_tests/test_resources.py | test_resources.py | py | 2,747 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "dagster.solid",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "dagster.execute_solid",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "dagster.ModeDefinition",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "dagster_da... |
32480271471 | from django.urls import include, path
from rest_framework.routers import DefaultRouter
from .views import (IngredientViewSet, LikedRecipeDetailView, RecipeViewSet,
SubscribtionListView, SubscriptionDetailView, TagViewSet)
v1_router = DefaultRouter()
v1_router.register(
'tags',
TagViewSet,
basename='tags-list'
)
v1_router.register(
'recipes',
RecipeViewSet,
basename='recipes-list'
)
v1_router.register(
'ingredients',
IngredientViewSet,
basename='ingredients-list'
)
urlpatterns = [
path('',
include(v1_router.urls)),
path('users/subscriptions/',
SubscribtionListView.as_view(),
name='subscription'),
path('users/<int:id>/subscribe/',
SubscriptionDetailView.as_view(),
name='subscribe'),
path('recipes/<int:id>/favorite/',
LikedRecipeDetailView.as_view(),
name='favorite'),
]
| JCoffeeYP/foodgram-project-react | backend/cookbook/urls.py | urls.py | py | 917 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.TagViewSet",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "views.RecipeViewSet",
"line_number": 15,
"usage_type": "argument"
},
{
"... |
74873018984 | from flask_wtf import Form
from wtforms import StringField,BooleanField,PasswordField,IntegerField,SelectField
from wtforms.validators import DataRequired, ValidationError
from webapp.Models.db_basic import Session
from webapp.Models.prod_cat import Prod_cat
from webapp.Models.prod_sub_cat import Prod_sub_cat
def levelone_not_exists_check(self,field):
session = Session()
if not session.query(Prod_cat).filter_by(prod_cat_name=field.data).first() is None:
raise ValidationError( 'Level One Category %s is already exists !' % field.data )
def leveltwo_not_exists_check(self,field):
session = Session()
if not session.query(Prod_sub_cat).filter_by(prod_cat_sub_name=field.data).first() is None:
raise ValidationError( 'Level One Category %s is already exists !' % field.data )
class DeleteLevelOneForm(Form):
prod_cat_id = IntegerField('Level One Category ID', validators = [DataRequired()])
class CreateNewLevelOneForm(Form):
prod_cat_name = StringField('Level One Category Name', validators = [DataRequired(),levelone_not_exists_check])
prod_cat_desc = StringField('Level One Category Description')
prod_cat_order = IntegerField('Level One Order Number')
class UpdateLevelOneForm(Form):
prod_cat_id = IntegerField('Product Categroy ID',validators = [DataRequired()])
prod_cat_name = StringField('Level One Category Name', validators = [DataRequired()])
prod_cat_desc = StringField('Level One Category Description')
prod_cat_order = IntegerField('Level One Order Number')
valid_flg = BooleanField('Valid Flag')
class DeleteLevelTwoForm(Form):
prod_cat_sub_id = IntegerField('Level One Category ID', validators = [DataRequired()])
class CreateNewLevelTwoForm(Form):
prod_cat_sub_name = StringField('Level One Category Name', validators = [DataRequired(),levelone_not_exists_check])
prod_cat_id = SelectField('Product Category ID',choices=[],coerce=int)#,choices=[(i.prod_cat_id,i.prod_cat_name) for i in level_one_list])
prod_cat_sub_desc = StringField('Level One Category Description')
class UpdateLevelTwoForm(Form):
prod_cat_sub_id = IntegerField('Product Subcategroy ID',validators = [DataRequired()])
prod_cat_id = SelectField('Product Category ID',choices=[],coerce=int)
prod_cat_sub_name = StringField('Level One Category Name', validators = [DataRequired()])
prod_cat_sub_desc = StringField('Level One Category Description')
valid_flg = BooleanField('Valid Flag') | kangnwh/Emall | webapp/viewrouting/admin/forms/category_forms.py | category_forms.py | py | 2,511 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "webapp.Models.db_basic.Session",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "webapp.Models.prod_cat.Prod_cat",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "wtforms.validators.ValidationError",
"line_number": 12,
"usage_type": ... |
5054088480 | from abei.implements.service_basic import ServiceBasic
from abei.implements.util import (
FileLikeWrapper,
LazyProperty,
)
from abei.interfaces import (
IProcedure,
IProcedureLink,
IProcedureFactory,
IProcedureJointFactory,
IProcedureBuilder,
service_entry as _,
)
from .procedure_joint_basic import (
joint_validate_dependents,
)
keyword_procedure_signature = 'fn'
keyword_procedure_input_signatures = 'args'
keyword_procedure_output_signatures = 'return'
keyword_procedure_outputs = 'out'
keyword_procedure_document = 'doc'
keyword_joints = 'statements'
keyword_joint_name = 'name'
keyword_joint_procedure = 'call'
keyword_joint_inputs = 'in'
def parse_signature(signature):
signature_list = signature.split('@')
if len(signature_list) > 2:
raise ValueError('invalid signature {}'.format(signature))
if len(signature_list) == 2:
return signature_list[0], signature_list[1]
return signature_list[0], None
class ProcedureJointBuilder(object):
def __init__(
self,
procedure_builder,
procedure_site,
procedure,
data,
):
self.procedure_builder = procedure_builder
self.procedure_site = procedure_site
self.procedure = procedure
assert isinstance(data, dict)
self.data = data
@property
def name(self):
return self.data.get(keyword_joint_name)
@property
def inputs(self):
return self.data.get(keyword_joint_inputs)
@LazyProperty
def instance(self):
joint_procedure_signature = self.data.get(keyword_joint_procedure)
signature, site = parse_signature(joint_procedure_signature)
joint_procedure = self.procedure_site.get_procedure(
signature,
site=site,
)
return self.procedure_builder.procedure_joint_factory.create(
joint_procedure,
self.procedure,
signature=self.name,
)
class ProcedureBuilder(ServiceBasic, IProcedureBuilder):
def __init__(self, service_site, **kwargs):
self.service_site = service_site
@LazyProperty
def procedure_factory(self):
return self.service_site.get_service(_(IProcedureFactory))
@LazyProperty
def procedure_joint_factory(self):
return self.service_site.get_service(_(IProcedureJointFactory))
@classmethod
def get_dependencies(cls):
return ['PyYAML']
def load_procedure_data(self, procedure_site, procedure_data_object):
raise NotImplementedError()
def load_procedure(self, procedure_site, procedure_object):
if not isinstance(procedure_object, dict):
raise ValueError(
'invalid procedure in configuration file')
def get_full_signature(sig):
sig, site = parse_signature(sig)
data = procedure_site.get_data_class(sig, site=site)
return data.get_signature()
input_signatures = procedure_object.get(
keyword_procedure_input_signatures, [])
if not isinstance(input_signatures, list):
raise ValueError(
'invalid procedure input signatures')
input_signatures = [get_full_signature(
sig) for sig in input_signatures]
output_signatures = procedure_object.get(
keyword_procedure_output_signatures, [])
if not isinstance(output_signatures, list):
raise ValueError(
'invalid procedure output signatures')
output_signatures = [get_full_signature(
sig) for sig in output_signatures]
procedure = self.procedure_factory.create(
'composite',
signature=str(procedure_object.get(
keyword_procedure_signature, '')),
docstring=str(procedure_object.get(
keyword_procedure_document, '')),
input_signatures=input_signatures,
output_signatures=output_signatures,
)
assert (
isinstance(procedure, IProcedure) and
isinstance(procedure, IProcedureLink)
)
procedure_joints = procedure_object.get(keyword_joints, [])
procedure_joints = [
ProcedureJointBuilder(
self,
procedure_site,
procedure,
jt
) for jt in procedure_joints
]
procedure_joints = {jt.name: jt for jt in procedure_joints}
self.load_joints(
procedure_site,
procedure,
procedure_joints,
)
procedure_output_joints = procedure_object.get(
keyword_procedure_outputs, [])
if not isinstance(procedure_output_joints, list):
raise ValueError('invalid procedure joints')
output_joints, output_indices = self.load_joint_inputs(
procedure_output_joints, procedure_joints)
for j in output_joints:
if j is not None:
joint_validate_dependents(j)
procedure.set_joints(output_joints, output_indices)
procedure_site.register_procedure(procedure)
def load_joints(self, procedure_site, procedure, joint_objects):
if not isinstance(joint_objects, dict):
raise ValueError('invalid procedure joints')
# connect joints
for joint_signature, joint_object in joint_objects.items():
joint_inputs = joint_object.inputs
if not isinstance(joint_inputs, list):
raise ValueError('invalid procedure joint config')
joint_object.instance.set_joints(
*self.load_joint_inputs(joint_inputs, joint_objects))
@staticmethod
def load_joint_inputs(joint_inputs, joint_objects):
input_joints = []
input_indices = []
for joint_input in joint_inputs:
if not isinstance(joint_input, str):
raise ValueError('invalid procedure joint input')
if joint_input.startswith('$'):
joint_input = joint_input.strip('$')
if not joint_input.isdigit():
raise ValueError('invalid joint input')
input_joints.append(None)
input_indices.append(int(joint_input))
else:
joint_input_tokens = joint_input.split('[')
if len(joint_input_tokens) != 2:
raise ValueError('invalid joint input')
joint_input_joint, joint_input_index = joint_input_tokens
joint_input_joint = joint_input_joint.strip()
joint_input_index = joint_input_index.strip(']').strip()
if joint_input_joint not in joint_objects:
raise ValueError('invalid joint')
if not joint_input_index.isdigit():
raise ValueError('invalid joint input')
input_joints.append(
joint_objects[joint_input_joint].instance)
input_indices.append(int(joint_input_index))
return input_joints, input_indices
def load_object(self, procedure_site, config_object):
if not isinstance(config_object, (tuple, list)):
raise ValueError('invalid procedure configuration file')
for config_item in config_object:
self.load_procedure(procedure_site, config_item)
def load_json(self, procedure_site, file_or_filename):
import json
with FileLikeWrapper(file_or_filename) as file:
self.load_object(procedure_site, json.loads(file.read()))
def save_json(self, procedure_site, file_or_filename):
raise NotImplementedError
def load_yaml(self, procedure_site, file_or_filename):
import yaml
with FileLikeWrapper(file_or_filename) as file:
self.load_object(procedure_site, yaml.safe_load(file))
def save_yaml(self, procedure_site, file_or_filename):
raise NotImplementedError
| mind-bricks/abei | abei/implements/procedure_builder.py | procedure_builder.py | py | 8,029 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "abei.implements.util.LazyProperty",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "abei.implements.service_basic.ServiceBasic",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "abei.interfaces.IProcedureBuilder",
"line_number": 79,
"usag... |
22840734386 |
from urllib.request import urlopen
import json
def E(X):
'''
:param X: [(xi, pi), ...]
:return: int expected value of random variable X
'''
return sum([e[0] * e[1] for e in X])
url = 'https://api.blockchain.info/charts/market-price?timespan=1year&format=json'
http_file = urlopen(url)
lines = http_file.readlines()
s = [str(line, encoding = 'utf-8') for line in lines]
t =''
for line in s:
t += line
s = json.loads(t)
values = s.get('values')
X = []
P = [0.1550018235964546, 0.20615242538328463, 0.27418272575976854, 0.36466302526049216]
for i in range(1, len(values)):
e2, e1 = values[i], values[i-1]
dzeta = e2.get('y')/e1.get('y')
quarter = int(i//91.25)
X.append((dzeta-1, P[quarter]/91))
if __name__ == '__main__':
print(E(X)) # revenue per day
k = (E(X)) * 30 # revenue per month
def kbn(month_sum):
return month_sum/k
if __name__ == '__main__':
print(1/k)
| naurlaunim/other | btc_calc.py | btc_calc.py | py | 922 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 21,
"usage_type": "call"
}
] |
31453786297 | import os, sys, pygame
class Animation:
def __init__(self, names_files, cd_images, screen, width_screen, height_screen, sounds, colorkey=None, music=None):
self.images = []
for elem in names_files:
self.images.append(pygame.transform.scale(self.load_image(elem, colorkey=colorkey), (width_screen, height_screen)))
self.images_cd = cd_images
self.common_cd = 5
self.screen = screen
self.music = music
self.sounds = sounds
def load_image(self, name, colorkey=None):
fullname = os.path.join('images', name)
if not os.path.isfile(fullname):
print(f"Файл с изображением '{fullname}' не найден")
image = pygame.image.load(fullname)
if colorkey is not None:
image = image.convert()
if colorkey == -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey)
else:
image = image.convert_alpha()
return image
def play(self):
if self.images != []:
number = 0
fps = 10
clock = pygame.time.Clock()
running = True
cd = self.set_cd(number) * fps
pygame.mixer.music.load(self.music)
while running:
screen.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
if cd == 0:
print('here')
number += 1
if number < len(self.images):
cd = self.set_cd(number) * fps
else:
return True
cd -= 1
screen.blit(self.images[number], (0, 0))
clock.tick(fps)
pygame.display.update()
else:
print('в этой анимвции нету изображений')
def set_cd(self, number):
if 0 <= number < len(self.images_cd):
return self.images_cd[number]
return self.common_cd
if __name__ == '__main__':
pygame.init()
pygame.display.set_caption('свой курсор мыши')
size = width, height = 300, 450
screen = pygame.display.set_mode(size)
animation = Animation(['third.png', 'first.jpg', 'second.jpg'], [], screen, width, height, colorkey=-1)
running = True
while running:
screen.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
animation.play()
pygame.display.update() | ScarletFlame611/1984-game | animation_comics.py | animation_comics.py | py | 2,789 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.transform.scale",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
31638903937 | import json
import numpy as np
import matplotlib.pyplot as plt
def read_file():
# 读取数据
hero_list = json.load(open("./file/heroskin.json", 'r', encoding='utf-8'))
hero_skin_data = hero_list["hero_skin_data"]
return hero_skin_data
def get_hero_skin_count():
hero_skin_data = read_file()
# 所有英雄、皮肤个数
hero_name_list = []
hero_skin_count = []
for hero_skin_item in hero_skin_data:
temp_name = hero_skin_item["hero_name"]
temp_skin_count = hero_skin_item["hero_skin_count"]
hero_name_list.append(temp_name)
hero_skin_count.append(temp_skin_count)
return hero_name_list, hero_skin_count
pass
def drawLine():
# 显示负号
plt.rcParams['axes.unicode_minus'] = False
# 显示中文标签
plt.rcParams['font.sans-serif'] = ['SimHei']
# 显示负号
plt.rcParams['axes.unicode_minus'] = False
# 英雄数据
hero_name_list, hero_skin_count = get_hero_skin_count()
# 设置坐标数据 数组
x_text = hero_name_list
y_text = hero_skin_count
# 设置窗口展示大小
plt.figure(figsize=(20, 8), dpi=80)
# 显示网格
plt.grid(True, linestyle="--", color='gray', linewidth='0.5', axis='both')
# 标题
plt.title('英雄皮肤数据折线图')
# 设置坐标轴名称
plt.xlabel('英雄名称')
plt.ylabel('皮肤数量')
# 设置x轴文字角度
plt.xticks(rotation=60, fontsize=9)
# 设置间隔
plt.xlim(-0.5, 100)
# 柱形图
bar = plt.bar(x=x_text, height=y_text, color='steelblue', alpha=0.8)
# 折线图
line = plt.plot(x_text, y_text, color='red', linewidth=1, marker='o', markerfacecolor='salmon', markersize=3)
# 设置数字标签
for x, y in zip(x_text, y_text):
plt.text(x, y, y, ha='center', va='bottom', fontsize=10)
# 设置图例样式
plt.legend(line, ("英雄皮肤数量",), loc='upper left')
# 设置平均线
avg = np.mean(y_text)
plt.axhline(y=avg, color="green")
# 显示
plt.show()
def main():
drawLine()
if __name__ == '__main__':
main() | N-Wind/League-of-Legends | hero/heroSkinLine.py | heroSkinLine.py | py | 2,138 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib... |
15293966281 | # -*- coding:utf-8 -*-
import os
import sys
import time
import math
import threading
import json
import random
import logging
from apscheduler.schedulers.background import BackgroundScheduler
sys.path.append('./lib')
import mylog
mylog.setLog('KD48Monitor', logging.WARNING)
loggerInfo = logging.getLogger('mylogger')
loggerInfo.setLevel(logging.INFO)
fh = logging.FileHandler('./log/info.log')
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
fh.setFormatter(formatter)
loggerInfo.addHandler(fh)
from utility import *
from KD48API import KD48API
from longQQMsg import LongQQMsg
from MsgCounter import MsgCounter
from cqsdk import CQBot, CQAt, CQRecord, RcvdPrivateMessage, RcvdGroupMessage, \
GroupMemberIncrease, GroupMemberDecrease
import utils
pid = os.getpid()
qqbot = CQBot(11235)
longQQMsg = LongQQMsg()
def killmyself():
killProcess(pid)
def SendDebugMsgs(debugQQ, t):
PrintLog(t)
if debugQQ:
utils.SendPrivateMsg(qqbot, str(debugQQ), t)
def PrintLog(text):
currTimeStr = Time2ISOString(time.time())
print(currTimeStr, gbkIgnore(text))
logging.info('PrintLog ' + text)
loggerInfo.info('PrintLog ' + text)
class KD48Monitor(object):
def __init__(self, accountInfo, monitorInfo):
# 登录信息
self.account = accountInfo['userId']
self.password = accountInfo['password']
self.token = '0'
# debugQQ: 接收错误信息
self.debugQQ = accountInfo['debugQQ']
# 接收消息QQ设置
# QQGroups_pro: 转发所有消息
# QQGroups_lite: 只提示房间出现,不转发消息
# QQIds: 私聊转发所有消息
self.QQIds = monitorInfo['QQIds']
self.QQGroups_pro = monitorInfo['QQGroups_pro']
if 'QQGroups_lite' in list(monitorInfo.keys()):
self.QQGroups_lite = monitorInfo['QQGroups_lite']
else:
self.QQGroups_lite = []
self.QQGroups_all = list(set(self.QQGroups_pro).union(set(self.QQGroups_lite)))
self.sendToLite = monitorInfo['sendToLite']
# 被监控成员的信息
self.memberId = monitorInfo['memberId']
self.memberName = ''
self.roomId = None
self.groupId = 0
self.roomInfo = {}
self.roomInfoOld = {}
self.beginHot = 0
# CoolQ
self.CoolQRoot = monitorInfo['CoolQRoot']
# self.CoolQImageDir = os.path.join(self.CoolQRoot, 'data', 'image')
# room msg alert
self.isappeared = False
self.timegap = 1800 #second
self.lastPrintTime = time.time()
self.msgCounter = MsgCounter(self.memberId)
self.lastOtherMemberTime = 0 #上次在房间出现其他成员的时间
if 'refreshInterval' in monitorInfo:
self.refreshInterval = monitorInfo['refreshInterval']
else:
self.refreshInterval = 15
# 更新房间信息
def updateRoomInfo(self):
response = self.api.getRoomInfo(self.token, self.memberId)
if response['status'] != -1:
self.roomInfo = response['data']
return 1
else:
return -1
# 打印房间信息
def printRoomInfo(self):
self.updateRoomInfo()
currHot = self.getRoomHot()
info = ''
if self.roomInfo:
info += self.roomInfo['memberName'] + '的房间:' + '\n'
info += '房间名称:' + self.roomInfo['roomName'] + '\n'
info += '房间话题:' + self.roomInfo['topic'] + '\n'
info += '房间心情:' + self.roomInfo['moodName'] + '\n'
info += '房间热度:' + str(currHot) + '\n'
# info += '最后发言时间:' + self.roomInfo['lastCommentTime'] + '\n'
info += '房间头像:' + self.roomInfo['roomAvatar'] + '\n'
info += '房间背景:' + self.roomInfo['bgPath']
else:
info += '当前房间为空'
return info
def checkRoom(self):
self.updateRoomInfo()
monitorDicts = {'roomName' : '房间名称',
'topic' : '房间话题',
'moodName' : '房间心情',
'roomAvatar': '房间头像',
'bgPath' : '房间背景'
}
modifiedKeys = []
response = ''
for key in list(monitorDicts.keys()):
if self.roomInfo[key] != self.roomInfoOld[key]:
modifiedKeys.append(key)
if modifiedKeys:
response = self.memberName + '修改了房间信息'
for key in modifiedKeys:
response += '\n修改了' + monitorDicts[key] + ':' + self.roomInfo[key]
self.roomInfoOld = self.roomInfo
saveJson(self.roomInfoOld, 'config/roomInfo.json')
if response:
SendDebugMsgs(self.debugQQ, response)
def getRoomHot(self):
'''获取当前成员的房间热度
'''
page = 1
while True:
result = self.api.getHotRooms(self.token, page=page,
groupId=self.groupId)
hotRooms = result['data']
if self.memberId in hotRooms:
return hotRooms[self.memberId]['hot']
else:
page += 1
if not hotRooms:
return -1
def initKD(self):
self.api = KD48API()
# 登录获取token
interval = 10
loginSucc = False
loginCnt = 0
while not loginSucc:
response = self.api.login(self.account, self.password)
if response['status'] != -1:
self.token = response['token']
loginSucc = True
log = response['msg']
PrintLog(log)
else:
loginCnt += 1
log = response['msg']
PrintLog(log)
PrintLog('%d秒钟后重试...'%interval)
time.sleep(interval)
if loginCnt >= 10:
PrintLog('登录失败!请重新启动。')
os.system('pause')
sys.exit()
# 根据成员ID获取房间其他信息
interval = 10
rinfoSucc = False
rinfoCnt = 0
while not rinfoSucc:
response = self.api.getRoomInfo(self.token, self.memberId)
if response['status'] != -1:
self.roomInfo = response['data']
self.roomId = response['data']['roomId']
self.memberName = response['data']['memberName']
self.groupId = response['data']['groupId']
rinfoSucc = True
log = response['msg']
PrintLog(log)
else:
rinfoCnt += 1
log = response['msg']
PrintLog(log)
PrintLog('%d秒钟后重试...'%interval)
time.sleep(interval)
if rinfoCnt >= 10:
PrintLog('获取房间信息失败!请重新启动。')
os.system('pause')
sys.exit()
# 初始化
self.msgLastTime = 0
self.oldLiveIds = []
self.oldReviewIds = []
response = self.api.getRoomMsgs(self.token, self.roomId)
if response['status'] != -1:
messages = response['data']
self.msgLastTime = messages[0]['msgTime']
PrintLog('初始化房间成功')
else:
log = response['msg']
PrintLog(log)
os.system('pause')
sys.exit()
response = self.api.getLiveList(self.token, memberId=self.memberId, limit=30)
if response['status'] != -1:
liveList = response['liveList']
reviewList = response['reviewList']
if liveList:
for live in reversed(liveList):
self.oldLiveIds.append(live['liveId'])
if reviewList:
for review in reversed(reviewList):
self.oldReviewIds.append(review['liveId'])
PrintLog('初始化直播成功')
else:
log = response['msg']
PrintLog(log)
os.system('pause')
sys.exit()
# 房间信息初始化
self.roomInfoPath = 'config/roomInfo.json'
if not os.path.exists(self.roomInfoPath):
saveJson(self.roomInfo, self.roomInfoPath)
self.roomInfoOld = self.roomInfo
else:
self.roomInfoOld = loadJson(self.roomInfoPath)
def initial(self):
try:
self.initKD()
except Exception as e:
logging.exception(e)
PrintLog('口袋监控初始化失败!请重新启动程序。')
os.system('pause')
sys.exit()
def roomMonitor(self):
try:
messages = []
response = self.api.getRoomMsgs(self.token, self.roomId)
if response['status'] != -1:
messages = response['data']
for msg in reversed(messages):
if msg['msgTime'] <= self.msgLastTime:
continue
msgInfo = self.api.analyzeMsg(msg, self.CoolQRoot)
if msgInfo['ignore']:
continue
# 其他成员发消息
if msgInfo['senderId'] != self.memberId and msgInfo['senderId'] > 0:
# 半小时内只播报一次 TODO: 对不同成员分别计时
if time.time()-self.lastOtherMemberTime > 1800:
self.lastOtherMemberTime = time.time()
# log = '%s在%s口袋房间出现了!'%(
# msgInfo['senderName'], self.memberName)
log_lite = '其他成员在%s口袋房间出现了!快去看看是谁!'%(self.memberName)
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log_lite)
log_pro = '其他成员在%s口袋房间出现了!'%(self.memberName)
utils.SendPrivatesMsg(qqbot, self.QQIds, log_pro)
utils.SendGroupsMsg(qqbot, self.QQGroups_pro, log_pro)
log = msgInfo['printText'].strip() + '\n来自%s口袋房间'%(self.memberName)
# 其他成员消息只pro版本转发
if msgInfo['msgType'] == 2:
# 语音消息特殊处理
utils.SendRecordMsg(qqbot, log, QQGroups=self.QQGroups_pro, QQIds=self.QQIds)
else:
utils.SendPrivatesMsg(qqbot, self.QQIds, log)
utils.SendGroupsMsg(qqbot, self.QQGroups_pro, log)
elif msgInfo['senderId'] == self.memberId: # 房间拥有者发消息
# 通知判定,半小时为临界点
if self.isappeared == False:
self.isappeared = True
log_lite = (self.memberName + '在口袋房间出现了!大家快去调戏互动啦!'
'(具体消息暂停搬运,请大家移步口袋房间)')
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log_lite)
log_pro = (self.memberName + '在口袋房间出现了!大家快去调戏互动啦!')
utils.SendPrivatesMsg(qqbot, self.QQIds, log_pro)
utils.SendGroupsMsg(qqbot, self.QQGroups_pro, log_pro)
self.beginHot = self.getRoomHot()
# 留言统计
self.cmtStat = {}
self.cmtLastTime = int(time.time()*1000)
self.scheduler.add_job(self.roomCommentMonitor, 'interval', seconds=8,
id='roomCommentMonitor', coalesce=True, max_instances=1)
time.sleep(1)
##### 转发消息 #####
log = msgInfo['printText'].strip()
##### pro版本:全部转发 #####
if msgInfo['msgType'] == 2:
# 语音消息特殊处理
utils.SendRecordMsg(qqbot, log, QQGroups=self.QQGroups_pro, QQIds=self.QQIds)
else:
utils.SendPrivatesMsg(qqbot, self.QQIds, log)
utils.SendGroupsMsg(qqbot, self.QQGroups_pro, log)
##### lite版本:根据自定义转发 #####
if msgInfo['msgType'] == 0 and self.sendToLite['text']:
# 文字消息
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log)
if msgInfo['messageObject'] == 'faipaiText' and self.sendToLite['fanpai'] \
and not self.sendToLite['text']:
# 翻牌消息
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log)
if msgInfo['msgType'] == 1 and self.sendToLite['image']:
# 图片消息
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log)
if msgInfo['msgType'] == 2 and self.sendToLite['audio']:
# 语音消息
utils.SendRecordMsg(qqbot, log, QQGroups=self.QQGroups_lite)
if msgInfo['msgType'] == 3 and self.sendToLite['video']:
# 视频消息
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log)
self.msgCounter.counter(msgInfo)
self.lastPrintTime = time.time()
# 下载非文字消息
try:
download_thread = threading.Thread(
target=self.api.downloadMsg, args=(msgInfo,), daemon=True)
download_thread.start()
except Exception as e:
SendDebugMsgs(self.debugQQ, '多线程下载错误!')
logging.exception(e)
time.sleep(1)
else:
pass
self.msgLastTime = msgInfo['msgTime']
# if messages:
# self.msgLastTime = messages[0]['msgTime'] # msgInfo['msgTime']
# 消息统计
if time.time() - self.lastPrintTime > self.timegap and self.isappeared == True:
self.isappeared = False
log = self.memberName + '从房间消失了半个小时了......\n'
log += self.msgCounter.info()
self.msgCounter.reset()
deltaHot = self.getRoomHot() - self.beginHot
log += "\n房间热度增加了:%d"%deltaHot
utils.SendPrivatesMsg(qqbot, self.QQIds, log.strip())
utils.SendGroupsMsg(qqbot, self.QQGroups_all, log.strip())
# 留言统计
sortedCmt = [self.cmtStat[y] for y in sorted(self.cmtStat,
key=lambda x:self.cmtStat[x]['count'], reverse=True)]
log = '留言统计前10名:\n'
# log += str(sortedCmt) + '\n' # save to file
log += str(sortedCmt[0:10]) + '\n'
log += '留言人数:%d人'%len(self.cmtStat)
utils.SendPrivatesMsg(qqbot, self.QQIds, log.strip())
self.scheduler.remove_job('roomCommentMonitor')
except Exception as e:
SendDebugMsgs(self.debugQQ, '房间消息解析错误!可能跳过了消息!')
logging.exception(e)
# 如果出错,则跳过这几条消息
self.msgLastTime = messages[0]['msgTime']
def roomCommentMonitor(self):
try:
comments = []
response = self.api.getRoomComments(self.token, self.roomId, limit=50)
if response['status'] != -1:
comments = response['data']
for cmt in reversed(comments):
if cmt['msgTime'] <= self.cmtLastTime:
continue
# msgInfo = self.api.analyzeMsg(cmt, self.CoolQRoot)
extInfo = json.loads(cmt['extInfo'])
senderId = extInfo['senderId']
if senderId not in self.cmtStat:
self.cmtStat[senderId] = {'count':1, 'name':extInfo['senderName']}
else:
self.cmtStat[senderId]['count'] += 1
self.cmtLastTime = cmt['msgTime']
except Exception as e:
SendDebugMsgs(self.debugQQ, '房间留言监控错误')
logging.exception(e)
def liveMonitor(self):
try:
liveList = []
reviewList = []
response = self.api.getLiveList(self.token, memberId=self.memberId)
if response['status'] != -1:
liveList = response['liveList']
reviewList = response['reviewList']
for live in reversed(liveList):
if live['liveId'] not in self.oldLiveIds:
self.oldLiveIds.append(live['liveId'])
if live['memberId'] == self.memberId:
liveInfo = self.api.getLiveInfo(live, isLive=True)
log = live['title'] + "开始直播了!\n"
log += liveInfo['printText']
utils.SendPrivatesMsg(qqbot, self.QQIds, log.strip())
utils.SendGroupsMsg(qqbot, self.QQGroups_all, log.strip())
secret = "直播封面图:" + liveInfo['picPath'] + "\n"
secret += "弹幕文件:" + liveInfo['lrcPath'] + "\n"
secret += "直播源:" + liveInfo['streamPath']
SendDebugMsgs(self.debugQQ, secret.strip())
if not liveList and response['status'] != -1:
del self.oldLiveIds[:]
for review in reversed(reviewList):
if review['liveId'] not in self.oldReviewIds:
if review['liveId'] in self.oldLiveIds:
self.oldLiveIds.remove(review['liveId'])
self.oldReviewIds.append(review['liveId'])
# self.oldReviewIds.pop(0)
if review['memberId'] == self.memberId:
liveInfo = self.api.getLiveInfo(review, isLive=False)
log = review['title'] + "的最新直播回放已出!\n"
log += liveInfo['printText']
utils.SendPrivatesMsg(qqbot, self.QQIds, log.strip())
utils.SendGroupsMsg(qqbot, self.QQGroups_all, log.strip())
except Exception as e:
SendDebugMsgs(self.debugQQ, '直播消息解析错误!')
logging.exception(e)
def run(self):
PrintLog('正在启动口袋监控...')
self.scheduler = BackgroundScheduler()
self.scheduler.add_job(self.roomMonitor, 'interval', seconds=self.refreshInterval,
id='roomMonitor', coalesce=True, max_instances=1)
time.sleep(5)
self.scheduler.add_job(self.liveMonitor, 'interval', seconds=self.refreshInterval,
id='liveMonitor', coalesce=True, max_instances=1)
time.sleep(3)
self.scheduler.add_job(self.checkRoom, 'interval', seconds=30, id='checkroom',
coalesce=True, max_instances=1)
self.scheduler.start()
PrintLog('所有监控器启动完毕')
##### 群管理设置 #####
from config import KD_admins
group_admins = KD_admins.admins['Group']
private_admins = KD_admins.admins['Private']
QQGroups = KD_admins.QQGroups
QQGroups_lite = KD_admins.QQGroups_lite
adminQQ = KD_admins.adminQQ
welcomeGroups = KD_admins.welcomeGroups
# 每个group分别有个lasttime
# 'lastTime': {'group1':0, 'group2':0}
# level: 0:全体禁止 1:管理员命令 2:普通成员命令
groupCmdAuthority = {"房间信息": {'level': 1, 'lastTime': {}},
"直播回放": {'level': 1, 'lastTime': {}},
"集资链接": {'level': 2, 'lastTime': {}},
"更新集资链接": {'level': 1, 'lastTime': {}},
"补档列表": {'level': 1, 'lastTime': {}},
"房间消息回放": {'level': 1, 'lastTime': {}},
}
############ 自动回复消息设置 #############
def ReplyHandler(msg):
global groupCmdAuthority
result = ''
try:
if msg == "命令列表":
result += '口袋命令列表:\n'
for comm in sorted(groupCmdAuthority):
result += comm + '\n'
result = result.strip()
if msg == "房间信息":
result = monitor.printRoomInfo()
if msg == "直播回放":
response = monitor.api.getLiveList(monitor.token, memberId=monitor.memberId)
if response['status'] != -1:
reviewList = response['reviewList']
review = reviewList[0]
reviewInfo = monitor.api.getLiveInfo(review, isLive=False)
result = monitor.memberName + "最近的一次直播是:\n"
result += reviewInfo['printText']
else:
result = '发生错误,请重试!'
if msg.split()[0] == "房间消息回放":
limit = 1
if len(msg.split()) == 1:
limit = 1
elif len(msg.split()) == 2:
limit = int(msg.split()[1])
else:
return '参数错误'
response = monitor.api.getRoomMsgs(monitor.token, monitor.roomId, limit=limit)
if response['status'] != -1:
messages = response['data']
result = monitor.memberName + "房间消息回放:\n"
for msg in reversed(messages):
msgInfo = monitor.api.analyzeMsg(msg)
result += msgInfo['printText'] + '\n\n'
else:
result = '发生错误,请重试!'
if "淘宝链接" in msg or "集资链接" in msg:
data = loadJson('config/KD_data.json')
result = data['moneylink']
if msg.split()[0] == '更新集资链接':
txt = msg.lstrip('更新集资链接').strip()
data = loadJson('config/KD_data.json')
data['moneylink'] = txt
saveJson(data, 'config/KD_data.json')
result = '成功更新集资链接,回复【集资链接】查看内容'
if msg == "补档列表":
result = longQQMsg.videoList
except Exception as e:
logging.exception(e)
finally:
return result.strip()
# 处理群消息
@qqbot.listener((RcvdGroupMessage,))
def ReplyGroupMsg(message):
if message.text.strip() == "":
return
global groupCmdAuthority
global group_admins
currQQLevel = 100
result = ''
if message.group in group_admins:
if message.qq in group_admins[message.group]:
currQQLevel = 1
else:
currQQLevel = 2
if 'all' in group_admins[message.group]:
currQQLevel = 1
currCommand = message.text.split()[0]
if currCommand in groupCmdAuthority:
level = groupCmdAuthority[currCommand]['level']
lastTimeDict = groupCmdAuthority[currCommand]['lastTime']
if message.group not in lastTimeDict:
lastTimeDict[message.group] = 0
lastTime = 0
else:
lastTime = lastTimeDict[message.group]
# 命令冷却时间300秒
if currQQLevel <= level and time.time() - lastTime >= 300:
result = ReplyHandler(message.text)
lastTimeDict[message.group] = time.time()
if result:
msg = "{text}\n{qq}".format(text=result, qq=CQAt(message.qq))
utils.reply(qqbot, message, msg)
# 处理私聊消息
@qqbot.listener((RcvdPrivateMessage,))
def ReplyRrivateMsg(message):
if message.text.strip() == "":
return
global private_admins
result = ''
if message.qq in private_admins:
result = ReplyHandler(message.text)
# 强制关闭
if message.text == '强制关闭口袋监控':
utils.reply(qqbot, message, '你确定要强制关闭吗?\n'
'若确定,请回复“我确定强制关闭口袋监控”')
if message.text == '我确定强制关闭口袋监控':
try:
killmyself()
utils.reply(qqbot, message, '关闭成功')
except Exception as e:
utils.reply(qqbot, message, '关闭失败')
if result:
utils.reply(qqbot, message, result)
##### 新人加群 #####
@qqbot.listener((GroupMemberIncrease))
def Welcome(message):
# QQ群自动欢迎,并私聊安利信息
if message.group in welcomeGroups:
try:
text = longQQMsg.welcome
wel = "欢迎新成员 {qq} \n{text}".format(qq=CQAt(message.operatedQQ), text=text)
time.sleep(0.5)
utils.SendGroupMsg(qqbot, message.group, wel)
time.sleep(3)
textPrivate = "{qq} {msg}".format(qq=CQAt(message.operatedQQ),
msg=longQQMsg.newMemberPrivateMsg)
utils.SendPrivateMsg(qqbot, message.operatedQQ, textPrivate)
except Exception as e:
logging.exception(e)
PrintLog(e)
else: #其他群
pass
PrintLog('有新人加群 Group: %s Join QQ: %s Admin QQ: %s'%(message.group,
message.operatedQQ, message.qq))
##### 退群监控 #####
@qqbot.listener((GroupMemberDecrease))
def GroupMemberQuit(message):
log = '有人已退群 Group: %s Quit QQ: %s Admin QQ: %s'%(message.group,
message.operatedQQ, message.qq)
PrintLog(log)
##### 口袋房间监控 #####
try:
qqbot.start()
[accountInfo, monitorInfo] = loadJson('./config/monitor.json')
monitor = KD48Monitor(accountInfo, monitorInfo)
monitor.initial()
monitor.run()
except Exception as e:
logging.exception(e)
utils.error('启动失败\n')
PrintLog(e)
os.system('pause')
sys.exit()
# 主程序循环,防止退出程序
while True:
time.sleep(100)
| momo-xii/KD48 | KDMonitor.py | KDMonitor.py | py | 26,545 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "mylog.setLog",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"lin... |
23406422554 | # -*- coding: utf-8 -*-
import cottonformation as ctf
from cottonformation.res import iam, awslambda
# create a ``Template`` object to represent your cloudformation template
tpl = ctf.Template(
Description="Aws Lambda Versioning Example",
)
iam_role_for_lambda = iam.Role(
"IamRoleForLambdaExecution",
rp_AssumeRolePolicyDocument=ctf.helpers.iam.AssumeRolePolicyBuilder(
ctf.helpers.iam.ServicePrincipal.awslambda()
).build(),
p_RoleName="lbd-versioning-poc",
p_ManagedPolicyArns=[
ctf.helpers.iam.AwsManagedPolicy.AmazonDynamoDBFullAccess,
]
)
tpl.add(iam_role_for_lambda)
lbd_func = awslambda.Function(
"LbdFuncVersioningPOC",
rp_Code=awslambda.PropFunctionCode(
p_S3Bucket="sanhe-admin-for-everything",
p_S3Key="lambda/MacHu-GWU/lbd-versioning/066212d310fb9d829154d197be860d0f.zip",
),
rp_Role=iam_role_for_lambda.rv_Arn,
p_FunctionName="lbd-versioning-poc",
p_MemorySize=256,
p_Timeout=3,
p_Runtime=ctf.helpers.awslambda.LambdaRuntime.python36,
p_Handler="lbd_handler.main",
ra_DependsOn=iam_role_for_lambda,
p_Tags=ctf.Tag.make_many(Stage="Dev", Description="Changed"),
)
tpl.add(lbd_func)
if __name__ == "__main__":
import boto3
boto_ses = boto3.session.Session(profile_name="sanhe")
env = ctf.Env(boto_ses=boto_ses)
env.deploy(
template=tpl,
stack_name="lbd-versioning-poc",
bucket_name="sanhe-admin-for-everything",
include_iam=True,
) | MacHu-GWU/Dev-Exp-Share | docs/source/01-AWS/01-All-AWS-Services-Root/01-Compute/02-AWS-Lambda-Root/05-Versioning/deploy.py | deploy.py | py | 1,506 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "cottonformation.Template",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cottonformation.res.iam.Role",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cottonformation.res.iam",
"line_number": 11,
"usage_type": "name"
},
{
"api_... |
17966322508 | """Config files
"""
import logging
import os
import sys
from pathlib import Path
import torch
MAIN_PATH = Path(__file__).resolve().parents[1]
DATA_PATH = MAIN_PATH / "data"
DEPLOY_PATH = MAIN_PATH / "src" / "deploy"
ARTIFACT_PATH = MAIN_PATH / "artifacts"
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
NUM_WORKERS = os.cpu_count()
EPOCHS = 20
LEARNING_RATE = 1e-4
EPS = 1e-8
BATCH_SIZE = 32
CLIP = 1.5
ALPHA = 1000.0
HIDDEN_SIZE = 2
### Class Translation
fmnist_classes = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
cifar10_classes = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
CLASSES = {
"mnist": {digit: digit for digit in range(10)},
"fmnist": {label: idx for idx, label in enumerate(fmnist_classes)},
"cifar10": {label: idx for idx, label in enumerate(cifar10_classes)},
}
### Model Params
MODEL_PARAMS = {
"BaseVAE": {},
"DeepVAE": {},
"ConvVAE": {"kernel_size": 3},
"BaseCVAE": {},
"DeepCVAE": {},
"ConvCVAE": {},
"GAN": {},
"CGAN": {},
"ConvGAN": {},
"ConvCGAN": {},
}
### Logging configurations
LOGGER = logging.getLogger(__name__)
stream_handler = logging.StreamHandler(sys.stdout)
if not (ARTIFACT_PATH / "model_ckpt").exists():
(ARTIFACT_PATH / "model_ckpt").mkdir(parents=True)
file_handler = logging.FileHandler(
filename=str(ARTIFACT_PATH / "model_ckpt" / "logfile.log")
)
formatter = logging.Formatter("%(asctime)s:%(levelname)s: %(message)s")
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(stream_handler)
LOGGER.addHandler(file_handler)
| benjaminlq/Image-Generation | src/config.py | config.py | py | 1,838 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
... |
27053438479 | import pytest
from subarraySum import Solution
@pytest.mark.parametrize("nums, k, expected", [
([], 2, 0),
([1, 1, 1], 2, 2),
([1, 1, 1, 1], 2, 3)
])
def test_subarraySum(nums, k, expected):
actual = Solution().subarraySum(nums, k)
assert actual == expected
| ikedaosushi/leetcode | problems/python/tests/test_subarraySum.py | test_subarraySum.py | py | 280 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "subarraySum.Solution",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 5,
"usage_type": "attribute"
}
] |
15991381065 | import torch.nn as nn
try:
from .resnet import resnet50_v1b
except:
from resnet import resnet50_v1b
import torch.nn.functional as F
import torch
class SegBaseModel(nn.Module):
r"""Base Model for Semantic Segmentation
Parameters
----------
backbone : string
Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50',
'resnet101' or 'resnet152').
"""
def __init__(self, nclass, aux, backbone='resnet50', dilated=True, pretrained_base=False, **kwargs):
super(SegBaseModel, self).__init__()
self.aux = aux
self.nclass = nclass
if backbone == 'resnet50':
self.pretrained = resnet50_v1b(pretrained=pretrained_base, dilated=dilated, **kwargs)
def base_forward(self, x):
"""forwarding pre-trained network"""
x = self.pretrained.conv1(x)
x = self.pretrained.bn1(x)
x = self.pretrained.relu(x)
x = self.pretrained.maxpool(x)
c1 = self.pretrained.layer1(x)
c2 = self.pretrained.layer2(c1)
c3 = self.pretrained.layer3(c2)
c4 = self.pretrained.layer4(c3)
return c1, c2, c3, c4
class _FCNHead(nn.Module):
def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm2d, **kwargs):
super(_FCNHead, self).__init__()
inter_channels = in_channels // 4
self.block = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
nn.Conv2d(inter_channels, channels, 1)
)
def forward(self, x):
return self.block(x)
class _PositionAttentionModule(nn.Module):
""" Position attention module"""
def __init__(self, in_channels, **kwargs):
super(_PositionAttentionModule, self).__init__()
self.conv_b = nn.Conv2d(in_channels, in_channels // 8, 1)
self.conv_c = nn.Conv2d(in_channels, in_channels // 8, 1)
self.conv_d = nn.Conv2d(in_channels, in_channels, 1)
self.alpha = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch_size, _, height, width = x.size()
feat_b = self.conv_b(x).view(batch_size, -1, height * width).permute(0, 2, 1)
feat_c = self.conv_c(x).view(batch_size, -1, height * width)
attention_s = self.softmax(torch.bmm(feat_b, feat_c))
feat_d = self.conv_d(x).view(batch_size, -1, height * width)
feat_e = torch.bmm(feat_d, attention_s.permute(0, 2, 1)).view(batch_size, -1, height, width)
out = self.alpha * feat_e + x
return out
class _ChannelAttentionModule(nn.Module):
"""Channel attention module"""
def __init__(self, **kwargs):
super(_ChannelAttentionModule, self).__init__()
self.beta = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch_size, _, height, width = x.size()
feat_a = x.view(batch_size, -1, height * width)
feat_a_transpose = x.view(batch_size, -1, height * width).permute(0, 2, 1)
attention = torch.bmm(feat_a, feat_a_transpose)
attention_new = torch.max(attention, dim=-1, keepdim=True)[0].expand_as(attention) - attention
attention = self.softmax(attention_new)
feat_e = torch.bmm(attention, feat_a).view(batch_size, -1, height, width)
out = self.beta * feat_e + x
return out
class _DAHead(nn.Module):
def __init__(self, in_channels, nclass, aux=True, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):
super(_DAHead, self).__init__()
self.aux = aux
inter_channels = in_channels // 4
self.conv_p1 = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.conv_c1 = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.pam = _PositionAttentionModule(inter_channels, **kwargs)
self.cam = _ChannelAttentionModule(**kwargs)
self.conv_p2 = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.conv_c2 = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.out = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, nclass, 1)
)
if aux:
self.conv_p3 = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, nclass, 1)
)
self.conv_c3 = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, nclass, 1)
)
def forward(self, x):
feat_p = self.conv_p1(x)
feat_p = self.pam(feat_p)
feat_p = self.conv_p2(feat_p)
feat_c = self.conv_c1(x)
feat_c = self.cam(feat_c)
feat_c = self.conv_c2(feat_c)
feat_fusion = feat_p + feat_c
outputs = []
fusion_out = self.out(feat_fusion)
outputs.append(fusion_out)
if self.aux:
p_out = self.conv_p3(feat_p)
c_out = self.conv_c3(feat_c)
outputs.append(p_out)
outputs.append(c_out)
return tuple(outputs)
class DANet(SegBaseModel):
r"""Pyramid Scene Parsing Network
Parameters
----------
nclass : int
Number of categories for the training dataset.
backbone : string
Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50',
'resnet101' or 'resnet152').
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
aux : bool
Auxiliary loss.
Reference:
Jun Fu, Jing Liu, Haijie Tian, Yong Li, Yongjun Bao, Zhiwei Fang,and Hanqing Lu.
"Dual Attention Network for Scene Segmentation." *CVPR*, 2019
"""
def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=False, **kwargs):
super(DANet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs)
self.head = _DAHead(2048, nclass, aux, **kwargs)
def forward(self, x):
size = x.size()[2:]
_, _, c3, c4 = self.base_forward(x)
outputs = []
x = self.head(c4)
x0 = F.interpolate(x[0], size, mode='bilinear', align_corners=True)
if self.aux:
x1 = F.interpolate(x[1], size, mode='bilinear', align_corners=True)
x2 = F.interpolate(x[2], size, mode='bilinear', align_corners=True)
outputs.append(x0)
outputs.append(x1)
outputs.append(x2)
return outputs
return x0
if __name__ == '__main__':
from tools.flops_params_fps_count import flops_params_fps
model = DANet(nclass=6)
flops_params_fps(model)
| zyxu1996/Efficient-Transformer | models/danet.py | danet.py | py | 7,495 | python | en | code | 67 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "resnet.resnet50_v1b",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
... |
26967199764 | from flask import Flask, jsonify, request, make_response
from functions import calculate_penalty,possession
app = Flask(__name__)
@app.route('/')
def index():
return 'This is index page'
@app.route('/penalties', methods={'POST'})
def getpenalities():
drug_class=request.form.get('drug_class')
culpability = request.form.get('culpability')
harm = request.form.get('harm')
print((drug_class,culpability,harm))
result = calculate_penalty(drug_class,culpability,int(harm))
return jsonify( response = result, status=200,message="success" )
@app.route('/possession_of_a_drug', methods={'POST'})
def possession_of_a_drugapi():
drug_class = request.form.get('drug_class')
result = possession(drug_class)
return jsonify(response=result, status=200, message="success")
if __name__ == "__main__":
app.run(debug=True)
| Mubashar2014/penalties | main.py | main.py | py | 899 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.reque... |
3202338276 | import numpy as np
import sys
import time
sys.path.append("Interface/python/")
from init import NeuronLayerBox
import cv2
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
if __name__ == '__main__':
NLB=NeuronLayerBox(step_ms=1,model=1,spike=0,restore=0)
input_src=[]
img=cv2.imread("load_data/input.bmp")
img=rgb2gray(img).astype(int)
input_src.append(img)
NLB.step(20)
NLB.input(input_src)
for i in range(50):
NLB.step(5)
a=(NLB.output()['11']/max(np.max(NLB.output()['11']),0.0000001))*255
cv2.imshow("1.jpg",a)
cv2.waitKey(1)
time.sleep(10)
NLB.save()
NLB.exit()
| Megatron2032/NeuronLayerBox | NeuronLayerBox1.1/main.py | main.py | py | 641 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "init.NeuronLayerBox",
"line_... |
19848816787 | import json
no = 0
groups = 0
reserve = []
data = {
"no": no,
"groups": groups,
"reserve": reserve
}
with open("data.json", "w") as f:
json.dump(data, f)
opend = open("./data.json","r")
loaded = json.load(opend)
print(loaded)
print("no: ", loaded["no"], "groups: ", loaded["groups"])
| hmjn023/dev-hmjn | js.py | js.py | py | 318 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.dump",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
}
] |
34181502633 | from typing import Dict, Optional, Type, Union, Callable, Any
from types import TracebackType
from functools import wraps
from threading import Lock
from qiskit_ibm_provider.utils.converters import hms_to_seconds
from qiskit_ibm_runtime import QiskitRuntimeService
from .runtime_job import RuntimeJob
from .utils.result_decoder import ResultDecoder
from .ibm_backend import IBMBackend
from .utils.default_session import set_cm_session
from .utils.deprecation import deprecate_arguments
def _active_session(func): # type: ignore
"""Decorator used to ensure the session is active."""
@wraps(func)
def _wrapper(self, *args, **kwargs): # type: ignore
if not self._active:
raise RuntimeError("The session is closed.")
return func(self, *args, **kwargs)
return _wrapper
class Session:
"""Class for creating a flexible Qiskit Runtime session.
A Qiskit Runtime ``session`` allows you to group a collection of iterative calls to
the quantum computer. A session is started when the first job within the session
is started. Subsequent jobs within the session are prioritized by the scheduler.
Data used within a session, such as transpiled circuits, is also cached to avoid
unnecessary overhead.
You can open a Qiskit Runtime session using this ``Session`` class and submit jobs
to one or more primitives.
For example::
from qiskit.test.reference_circuits import ReferenceCircuits
from qiskit_ibm_runtime import Sampler, Session, Options
options = Options(optimization_level=3)
with Session(backend="ibmq_qasm_simulator") as session:
sampler = Sampler(session=session, options=options)
job = sampler.run(ReferenceCircuits.bell())
print(f"Sampler job ID: {job.job_id()}")
print(f"Sampler job result: {job.result()}")
"""
def __init__(
self,
service: Optional[QiskitRuntimeService] = None,
backend: Optional[Union[str, IBMBackend]] = None,
max_time: Optional[Union[int, str]] = None,
): # pylint: disable=line-too-long
"""Session constructor.
Args:
service: Optional instance of the ``QiskitRuntimeService`` class.
If ``None``, the service associated with the backend, if known, is used.
Otherwise ``QiskitRuntimeService()`` is used to initialize
your default saved account.
backend: Optional instance of :class:`qiskit_ibm_runtime.IBMBackend` class or
string name of backend. An instance of :class:`qiskit_ibm_provider.IBMBackend` will not work.
If not specified, a backend will be selected automatically (IBM Cloud channel only).
max_time: (EXPERIMENTAL setting, can break between releases without warning)
Maximum amount of time, a runtime session can be open before being
forcibly closed. Can be specified as seconds (int) or a string like "2h 30m 40s".
This value must be less than the
`system imposed maximum
<https://docs.quantum.ibm.com/run/max-execution-time>`_.
Raises:
ValueError: If an input value is invalid.
"""
if service is None:
if isinstance(backend, IBMBackend):
self._service = backend.service
else:
self._service = (
QiskitRuntimeService()
if QiskitRuntimeService.global_service is None
else QiskitRuntimeService.global_service
)
else:
self._service = service
if self._service.channel == "ibm_quantum" and not backend:
raise ValueError('"backend" is required for ``ibm_quantum`` channel.')
self._instance = None
if isinstance(backend, IBMBackend):
self._instance = backend._instance
backend = backend.name
self._backend = backend
self._setup_lock = Lock()
self._session_id: Optional[str] = None
self._active = True
self._max_time = (
max_time
if max_time is None or isinstance(max_time, int)
else hms_to_seconds(max_time, "Invalid max_time value: ")
)
@_active_session
def run(
self,
program_id: str,
inputs: Dict,
options: Optional[Dict] = None,
callback: Optional[Callable] = None,
result_decoder: Optional[Type[ResultDecoder]] = None,
) -> RuntimeJob:
"""Run a program in the session.
Args:
program_id: Program ID.
inputs: Program input parameters. These input values are passed
to the runtime program.
options: Runtime options that control the execution environment.
See :class:`qiskit_ibm_runtime.RuntimeOptions` for all available options.
callback: Callback function to be invoked for any interim results and final result.
Returns:
Submitted job.
"""
options = options or {}
if "instance" not in options:
options["instance"] = self._instance
options["backend"] = self._backend
if not self._session_id:
# Make sure only one thread can send the session starter job.
self._setup_lock.acquire()
# TODO: What happens if session max time != first job max time?
# Use session max time if this is first job.
options["session_time"] = self._max_time
try:
job = self._service.run(
program_id=program_id,
options=options,
inputs=inputs,
session_id=self._session_id,
start_session=self._session_id is None,
callback=callback,
result_decoder=result_decoder,
)
if self._session_id is None:
self._session_id = job.job_id()
finally:
if self._setup_lock.locked():
self._setup_lock.release()
if self._backend is None:
self._backend = job.backend().name
return job
def cancel(self) -> None:
"""Cancel all pending jobs in a session."""
self._active = False
if self._session_id:
self._service._api_client.cancel_session(self._session_id)
def close(self) -> None:
"""Close the session so new jobs will no longer be accepted, but existing
queued or running jobs will run to completion. The session will be terminated once there
are no more pending jobs."""
self._active = False
if self._session_id:
self._service._api_client.close_session(self._session_id)
def backend(self) -> Optional[str]:
"""Return backend for this session.
Returns:
Backend for this session. None if unknown.
"""
return self._backend
def status(self) -> Optional[str]:
"""Return current session status.
Returns:
The current status of the session, including:
Pending: Session is created but not active.
It will become active when the next job of this session is dequeued.
In progress, accepting new jobs: session is active and accepting new jobs.
In progress, not accepting new jobs: session is active and not accepting new jobs.
Closed: max_time expired or session was explicitly closed.
None: status details are not available.
"""
details = self.details()
if details:
state = details["state"]
accepting_jobs = details["accepting_jobs"]
if state in ["open", "inactive"]:
return "Pending"
if state == "active" and accepting_jobs:
return "In progress, accepting new jobs"
if state == "active" and not accepting_jobs:
return "In progress, not accepting new jobs"
return state.capitalize()
return None
def details(self) -> Optional[Dict[str, Any]]:
"""Return session details.
Returns:
A dictionary with the sessions details, including:
id: id of the session.
backend_name: backend used for the session.
interactive_timeout: The maximum idle time (in seconds) between jobs that
is allowed to occur before the session is deactivated.
max_time: Maximum allowed time (in seconds) for the session, subject to plan limits.
active_timeout: The maximum time (in seconds) a session can stay active.
state: State of the session - open, active, inactive, or closed.
accepting_jobs: Whether or not the session is accepting jobs.
last_job_started: Timestamp of when the last job in the session started.
last_job_completed: Timestamp of when the last job in the session completed.
started_at: Timestamp of when the session was started.
closed_at: Timestamp of when the session was closed.
"""
if self._session_id:
response = self._service._api_client.session_details(self._session_id)
if response:
return {
"id": response.get("id"),
"backend_name": response.get("backend_name"),
"interactive_timeout": response.get("interactive_ttl"),
"max_time": response.get("max_ttl"),
"active_timeout": response.get("active_ttl"),
"state": response.get("state"),
"accepting_jobs": response.get("accepting_jobs"),
"last_job_started": response.get("last_job_started"),
"last_job_completed": response.get("last_job_completed"),
"started_at": response.get("started_at"),
"closed_at": response.get("closed_at"),
}
return None
@property
def session_id(self) -> str:
"""Return the session ID.
Returns:
Session ID. None until a job runs in the session.
"""
return self._session_id
@property
def service(self) -> QiskitRuntimeService:
"""Return service associated with this session.
Returns:
:class:`qiskit_ibm_runtime.QiskitRuntimeService` associated with this session.
"""
return self._service
@classmethod
def from_id(
cls,
session_id: str,
service: Optional[QiskitRuntimeService] = None,
backend: Optional[Union[str, IBMBackend]] = None,
) -> "Session":
"""Construct a Session object with a given session_id
Args:
session_id: the id of the session to be created. This must be an already
existing session id.
service: instance of the ``QiskitRuntimeService`` class.
backend: instance of :class:`qiskit_ibm_runtime.IBMBackend` class or
string name of backend.
Returns:
A new Session with the given ``session_id``
"""
if backend:
deprecate_arguments("backend", "0.15.0", "Sessions do not support multiple backends.")
session = cls(service, backend)
session._session_id = session_id
return session
def __enter__(self) -> "Session":
set_cm_session(self)
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
set_cm_session(None)
self.close()
| Qiskit/qiskit-ibm-runtime | qiskit_ibm_runtime/session.py | session.py | py | 11,847 | python | en | code | 106 | github-code | 36 | [
{
"api_name": "functools.wraps",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "qiskit_ibm_runtime.QiskitRuntimeService",
"line_number": 57,
"usage_type": "name"
},
{
"api_name"... |
19033935152 | """Module contains functionality that parses main page for RedHat vulnerabilities."""
import time
import lxml
import lxml.etree
from selenium import webdriver
from selenium.common.exceptions import WebDriverException, NoSuchElementException
from selenium.webdriver.chrome.options import Options
from cve_connector.vendor_cve.implementation.parsers.general_and_format_parsers\
.html_parser import HtmlParser
from cve_connector.vendor_cve.implementation.utilities.utility_functions import string_to_date
class RedHatMainPageParser(HtmlParser):
"""
Class providing functionality for parsing RedHat main page.
"""
def __init__(self, url, logger, from_date=None, to_date=None):
super().__init__(url, from_date, to_date)
self.date_format = '%d %b %Y' # 20 Apr 2018
self.driver = None
self.entry_added = False
self.last_added = False
self.first_cve_on_page = ''
self.logger = logger
try:
self.load_content()
except ValueError:
self.logger.error('Unable to load content from {0}'.format(self.url))
def get_content_from_ulr(self):
"""
Gets and returns content from URL.
:return: content
"""
if not self.url:
raise ValueError('Url must not be empty.')
options = Options()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
driver = webdriver.Chrome(chrome_options=options)
driver.get(self.url)
driver.implicitly_wait(10)
content = driver.page_source
self.driver = driver
return content
def parse(self):
"""
Provides parsing functionality.
:return: None
"""
try:
loading = True
while loading:
loading = self.parse_current_page_content()
if loading:
loaded = False
while not loaded:
loaded = self.load_next_page()
except ValueError as val_err:
self.logger.error('Error while parsing RH.')
self.logger.error(val_err)
finally:
self.driver.close()
def do_click(self):
"""
Accomplishes clicking on a web page.
:return: True if successful
"""
try:
elm = self.driver.find_element_by_link_text("›")
elm.click()
return True
except WebDriverException:
return False
def load_next_page(self):
"""
Load web page.
:return: True if successful
"""
driver = self.driver
try:
click = False
start = time.time()
end = time.time()
while not click:
if (end - start) > 120:
raise ValueError('RedHat page could not be loaded.')
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
click = self.do_click()
self.logger.debug("CLICK in RedHat")
end = time.time()
time.sleep(3)
content = driver.page_source
self.data = lxml.etree.HTML(content.replace('<br>', ''))
self.driver = driver
return True
except (NoSuchElementException, WebDriverException):
return False
def parse_current_page_content(self):
"""
Parses current page in property data.
:return: True if next page is needed to load, False otherwise.
"""
table_rows = self.data.xpath(
'.//table[contains(@summary, "Common Vulnerabilities and Exposures")]//tbody/tr')
for row in table_rows:
url_list = row.xpath('.//th//a/@href')
date_str_list = row.xpath('.//td//time/text()')
if len(url_list) != 1 or len(date_str_list) != 1:
raise ValueError('Format of data provided in RH table has changed.')
date = string_to_date(date_str_list[0], self.date_format)
if date < self.from_date:
return False
if self.from_date <= date <= self.to_date:
self.entities.append(url_list[0])
self.entry_added = True
self.last_added = True
else:
self.last_added = False
if not self.last_added and self.entry_added:
return False
return self.last_added or not self.entry_added
| CSIRT-MU/CRUSOE | crusoe_observe/cve-connector/cve_connector/vendor_cve/implementation/parsers/vendor_parsers/redhat_parsers/red_hat_main_page_parser.py | red_hat_main_page_parser.py | py | 4,549 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "cve_connector.vendor_cve.implementation.parsers.general_and_format_parsers.html_parser.HtmlParser",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "selen... |
17440403303 | from tensorflow.keras import layers, models
import glob
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from datetime import datetime
width = 75
height = 100
channel = 1
def load_data():
images = np.array([]).reshape(0, height, width)
labels = np.array([])
dictionary = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'A': 10,
'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15, 'G': 16, 'H': 17, 'I': 18, 'J': 19, 'K': 20,
'L': 21, 'M': 22, 'N': 23, 'P': 24, 'Q': 25, 'R': 26, 'S': 27, 'T': 28, 'U': 29, 'V': 30,
'W': 31, 'X': 32, 'Y': 33, 'Z': 34}
directories = [directory for directory in glob.glob('datasets/BelgianLicencePlates/TrainLetters/*')]
for directory in directories:
file_list = glob.glob(directory + '/*.jpg')
sub_images = np.array([np.array(Image.open(file_name)) for file_name in file_list])
sub_labels = [dictionary[directory[-1]]] * len(sub_images)
images = np.append(images, sub_images, axis=0)
labels = np.append(labels, sub_labels, axis=0)
x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2, random_state=42, shuffle=True)
return (x_train, y_train), (x_test, y_test)
(train_images, train_labels), (test_images, test_labels) = load_data()
train_images = train_images.reshape((train_images.shape[0], height, width, channel))
test_images = test_images.reshape((test_images.shape[0], height, width, channel))
train_images, test_images = train_images / 255.0, test_images / 255.0
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(height, width, channel)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(35, activation='softmax'))
start = datetime.now().replace(microsecond=0)
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=8)
end = datetime.now().replace(microsecond=0)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy: ', test_acc)
print('Test loss: ', test_loss)
print('Training duration: ', (end - start))
model.save('models/character_recognition_cnn.h5')
print('> Saved model to disk <')
| NikolaBrodic/VehicleLicencePlateAndLogoRecognition | character_recognition_cnn.py | character_recognition_cnn.py | py | 2,599 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 24,
... |
6795368681 | from django.conf import settings
from django.db.models import Q
from django_filters import rest_framework as filters
from ...models import Event
class EventFilterSet(filters.FilterSet):
category = filters.MultipleChoiceFilter(
method='filter_category',
label='Category',
choices=settings.EVENT_TYPE_CHOICES,
)
follow_type = filters.MultipleChoiceFilter(
method='filter_follow_type',
label='Follow Type',
choices=settings.EVENT_FOLLOW_MODE_CHOICES,
)
languages = filters.CharFilter(
method='filter_languages',
)
location = filters.CharFilter(
method='filter_location',
)
class Meta:
model = Event
fields = [
'category',
'follow_type',
'languages',
'location'
]
def filter_category(self, queryset, name, value):
query = Q()
for query_value in value:
query |= Q(category__code=query_value)
return queryset.filter(query)
def filter_follow_type(self, queryset, name, value):
query = Q()
for query_value in value:
query |= Q(follow_type__contains=query_value)
return queryset.filter(query)
def filter_languages(self, queryset, name, value):
query = Q()
for language in value.split(','):
query |= Q(languages__contains=[language])
return queryset.filter(query)
def filter_location(self, queryset, name, value):
query = Q()
for location in value.split(','):
query |= Q(location__icontains=location)
return queryset.filter(query)
| tomasgarzon/exo-services | service-exo-events/event/api/filters/event.py | event.py | py | 1,666 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django_filters.rest_framework.FilterSet",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django_filters.rest_framework",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django_filters.rest_framework.MultipleChoiceFilter",
"line_number": ... |
41551040241 | import cv2
import os
import numpy as np
import json
import mmcv
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import matplotlib.pyplot as plt
from glob import glob
import ast
from mmrotate.core import poly2obb_np
def poly2obb_np_oc(poly):
"""Convert polygons to oriented bounding boxes.
Args:
polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]
Returns:
obbs (ndarray): [x_ctr,y_ctr,w,h,angle]
"""
bboxps = np.array(poly).reshape((4, 2))
rbbox = cv2.minAreaRect(bboxps)
x, y, w, h, a = rbbox[0][0], rbbox[0][1], rbbox[1][0], rbbox[1][1], rbbox[
2]
if w < 2 or h < 2:
return
while not 0 < a <= 90:
if a == -90:
a += 180
else:
a += 90
w, h = h, w
a = a / 180 * np.pi
assert 0 < a <= np.pi / 2
return x, y, w, h, a
def _get_adaptive_scales(areas, min_area=800, max_area=30000):
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``'min_area'``, the scale is 0.5 while the area is larger than
``'max_area'``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Default: 800.
max_area (int): Upper bound areas for adaptive scales.
Default: 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
def draw_rbboxes(ax, bboxes, color='g', alpha=0.3, thickness=2):
"""Draw oriented bounding boxes on the axes.
Args:
ax (matplotlib.Axes): The input axes.
bboxes (ndarray): The input bounding boxes with the shape
of (n, 5).
color (list[tuple] | matplotlib.color): the colors for each
bounding boxes.
alpha (float): Transparency of bounding boxes. Default: 0.8.
thickness (int): Thickness of lines. Default: 2.
Returns:
matplotlib.Axes: The result axes.
"""
polygons = []
for i, bbox in enumerate(bboxes):
xc, yc, w, h, ag = bbox[:5]
wx, wy = w / 2 * np.cos(ag), w / 2 * np.sin(ag)
hx, hy = -h / 2 * np.sin(ag), h / 2 * np.cos(ag)
p1 = (xc - wx - hx, yc - wy - hy)
p2 = (xc + wx - hx, yc + wy - hy)
p3 = (xc + wx + hx, yc + wy + hy)
p4 = (xc - wx + hx, yc - wy + hy)
poly = np.int0(np.array([p1, p2, p3, p4]))
polygons.append(Polygon(poly))
p = PatchCollection(
polygons,
facecolor='none',
edgecolors=color,
linewidths=thickness,
alpha=alpha)
ax.add_collection(p)
return ax
# arirang json to txt
ann_path = '/data/2_data_server/cv_data/arirang/validate_objects_labeling_json/'
# out_path = '/data/2_data_server/cv_data/arirang/val/annfiles/'
# ann_path = '/data/2_data_server/cv_data/arirang/validate_objects_labeling_json/'
label_test = []
for i in glob(ann_path+'*.json'):
# print(i)
with open(i) as f:
json_data = json.load(f)
img_id = json_data['features'][0]['properties']['image_id']
for j in range(len(json_data['features'])):
bbox_info = json_data['features'][j]['properties']['object_imcoords']
bbox_info = ast.literal_eval(bbox_info)
bbox_info = list(bbox_info)
bbox_label = json_data['features'][j]['properties']['type_name'].replace(" ","-")
bbox_id = json_data['features'][j]['properties']['type_id']
if bbox_label == "military-aircraft":
print(img_id)
exit()
if label_test == []:
# label_test.append(bbox_id)
label_test.append(bbox_label)
if bbox_label not in label_test:
# label_test.append(bbox_id)
label_test.append(bbox_label)
# first [:4] 지운 후
# if j == 0:
# with open(out_path+img_id[:-4]+'.txt',"w") as (fw):
# for k in range(len(bbox_info)):
# fw.write(str(int(bbox_info[k])))
# fw.write(" ")
# # fw.write(bbox_info)
# # fw.write(" ")
# fw.write(bbox_label)
# fw.write(" ")
# fw.write("0\n")
# else:
# with open(out_path+img_id[:-4]+'.txt',"a") as (fw):
# for k in range(len(bbox_info)):
# fw.write(str(int(bbox_info[k])))
# fw.write(" ")
# # fw.write(bbox_info)
# # fw.write(" ")
# fw.write(bbox_label)
# fw.write(" ")
# fw.write("0\n")
# aitod json to txt
# ann_path = '/data/2_data_server/cv_data/ai_todv2/aitodv2_train.json'
# out_path = '/data/2_data_server/cv_data/ai_todv2/train/annfiles/'
# # ann_path = '/data/2_data_server/cv_data/arirang/validate_objects_labeling_json/'
# label_test = []
# for i in glob(ann_path):
# # print(i)
# with open(i) as f:
# json_data = json.load(f)
# img_id = json_data['features'][0]['properties']['image_id']
# for j in range(len(json_data['features'])):
# bbox_info = json_data['features'][j]['properties']['object_imcoords']
# bbox_info = ast.literal_eval(bbox_info)
# bbox_info = list(bbox_info)
# bbox_label = json_data['features'][j]['properties']['type_name'].replace(" ","-")
# # bbox_id = json_data['features'][j]['properties']['type_id']
# # if label_test == []:
# # # label_test.append(bbox_id)
# # label_test.append(bbox_label)
# # if bbox_label not in label_test:
# # # label_test.append(bbox_id)
# # label_test.append(bbox_label)
# # first [:4] 지운 후
# if j == 0:
# with open(out_path+img_id[:-4]+'.txt',"w") as (fw):
# for k in range(len(bbox_info)):
# fw.write(str(int(bbox_info[k])))
# fw.write(" ")
# # fw.write(bbox_info)
# # fw.write(" ")
# fw.write(bbox_label)
# fw.write(" ")
# fw.write("0\n")
# else:
# with open(out_path+img_id[:-4]+'.txt',"a") as (fw):
# for k in range(len(bbox_info)):
# fw.write(str(int(bbox_info[k])))
# fw.write(" ")
# # fw.write(bbox_info)
# # fw.write(" ")
# fw.write(bbox_label)
# fw.write(" ")
# fw.write("0\n")
# min,max 출력
# ann_path = '/data/2_data_server/cv_data/arirang_split/train_ms/annfiles/'
# num_min = 100000
# num_max = 0
# num_total = 0
# for i in glob(ann_path+'*.txt'):
# # print(i)
# num_lines = sum(1 for line in open(i))
# num_min = min(num_lines, num_min)
# num_max = max(num_lines, num_max)
# if num_max == 1891:
# print(i)
# exit()
# print(num_min,num_max)
# # gt 개수
# ann_path = '/data/2_data_server/cv_data/arirang_split/val_ms/annfiles/'
# # CLASSES = ('small-ship', 'large-ship', 'civilian-aircraft', 'military-aircraft', 'small-car', 'bus', 'truck', 'train', 'crane', 'bridge',
# # 'oil-tank', 'dam', 'outdoor-playground', 'helipad', 'roundabout', 'indoor-playground','helicopter','individual-container','grouped-container','swimming-pool','etc')
# CLASSES = ('small-ship', 'large-ship', 'civilian-aircraft', 'military-aircraft', 'small-car', 'bus', 'truck', 'train')
# label_cnt = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# cls_map = {c: i
# for i, c in enumerate(CLASSES)
# }
# for i in glob(ann_path+'*.txt'):
# # print(i)
# f = open(i,"r")
# lines = f.readlines()
# for line in lines:
# label = line.split()
# cls_name = label[8]
# if cls_name == 'military-aircraft':
# print(i)
# exit()
# # label = cls_map[cls_name]
# # label_cnt[label] = label_cnt[label] + 1
# # print(label_cnt)
##### size check
# ann_path = '/data/2_data_server/cv_data/arirang/validate_objects_labeling_json/'
# # out_path = '/data/2_data_server/cv_data/arirang/val/annfiles/'
# # ann_path = '/data/2_data_server/cv_data/arirang/validate_objects_labeling_json/'
# label_test = []
# c1 = 0
# c2 = 0
# c3 = 0
# c4 = 0
# c5 = 0
# c6 = 0
# c7 = 0
# c8 = 0
# c9 = 0
# c10 = 0
# c11 = 0
# c12 = 0
# c13 = 0
# c14 = 0
# c15 = 0
# c16 = 0
# c17 = 0
# c18 = 0
# c19 = 0
# c20 = 0
# c21 = 0
# c1_num = 0
# c2_num = 0
# c3_num = 0
# c4_num = 0
# c5_num = 0
# c6_num = 0
# c7_num = 0
# c8_num = 0
# c9_num = 0
# c10_num = 0
# c11_num = 0
# c12_num = 0
# c13_num = 0
# c14_num = 0
# c15_num = 0
# c16_num = 0
# c17_num = 0
# c18_num = 0
# c19_num = 0
# c20_num = 0
# c21_num = 0
# for i in glob(ann_path+'*.json'):
# # print(i)
# with open(i) as f:
# json_data = json.load(f)
# for j in range(len(json_data['features'])):
# bbox_info = json_data['features'][j]['properties']['object_imcoords']
# bbox_info = ast.literal_eval(bbox_info)
# poly = np.array(bbox_info,dtype=np.float32)
# poly = poly2obb_np(poly)
# if poly is not None:
# w = poly[2]
# h = poly[3]
# area = w*h
# # area =
# bbox_label = json_data['features'][j]['properties']['type_name'].replace(" ","-")
# if bbox_label =="small-ship":
# c1 += 1
# c1_num += area
# if bbox_label =="large-ship":
# c2 += 1
# c2_num += area
# if bbox_label =="civilian-aircraft":
# c3 += 1
# c3_num += area
# if bbox_label =="military-aircraft":
# c4 += 1
# c4_num += area
# if bbox_label =="small-car":
# c5 += 1
# c5_num += area
# if bbox_label =="bus":
# c6 += 1
# c6_num += area
# if bbox_label =="truck":
# c7 += 1
# c7_num += area
# if bbox_label =="train":
# c8 += 1
# c8_num += area
# if bbox_label =="crane":
# c9 += 1
# c9_num += area
# if bbox_label =="bridge":
# c10 += 1
# c10_num += area
# if bbox_label =="oil-tank":
# c11 += 1
# c11_num += area
# if bbox_label =="dam":
# c12 += 1
# c12_num += area
# if bbox_label =="outdoor-playground":
# c13 += 1
# c13_num += area
# if bbox_label =="helipad":
# c14 += 1
# c14_num += area
# if bbox_label =="roundabout":
# c15 += 1
# c15_num += area
# if bbox_label =="indoor-playground":
# c16 += 1
# c16_num += area
# if bbox_label =="helicopter":
# c17 += 1
# c17_num += area
# if bbox_label =="individual-container":
# c18 += 1
# c18_num += area
# if bbox_label =="grouped-container":
# c19 += 1
# c19_num += area
# if bbox_label =="swimming-pool":
# c20 += 1
# c20_num += area
# print("c1------")
# print(c1,c1_num)
# print("------")
# print("c2------")
# print(c2,c2_num)
# print("------")
# print("c3------")
# print(c3,c3_num)
# print("------")
# print("c4------")
# print(c4,c4_num)
# print("------")
# print("c5------")
# print(c5,c5_num)
# print("------")
# print("c6------")
# print(c6,c6_num)
# print("------")
# print("c7------")
# print(c7,c7_num)
# print("------")
# print("c8------")
# print(c8,c8_num)
# print("------")
# print("c9------")
# print(c9,c9_num)
# print("------")
# print("c10------")
# print(c10,c10_num)
# print("------")
# print("c11------")
# print(c11,c11_num)
# print("------")
# print("c12------")
# print(c12,c12_num)
# print("------")
# print("c13------")
# print(c13,c13_num)
# print("------")
# print("c14------")
# print(c14,c14_num)
# print("------")
# print("c15------")
# print(c15,c15_num)
# print("------")
# print("c16------")
# print(c16,c16_num)
# print("------")
# print("c17------")
# print(c17,c17_num)
# print("------")
# print("c18------")
# print(c18,c18_num)
# print("------")
# print("c19------")
# print(c19,c19_num)
# print("------")
# print("c20------")
# print(c20,c20_num)
# print("------")
| parkyongjun1/rotated_deformabledetr | AO2-DETR/tools/arirang_json_to_txt.py | arirang_json_to_txt.py | py | 12,936 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.minAreaRect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number... |
70970697063 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 07:57:10 2020
@author: KANNAN
"""
from flask import Flask, render_template, request
import pandas as pd
#import sklearn
import pickle
model = pickle.load(open("flight_rf.pkl", "rb"))
app = Flask(__name__)
@app.route('/')
def home():
return render_template('Airlines.html')
@app.route('/Predict', methods = ["GET", "POST"])
def Predict():
if request.method == "POST":
#Date of Journey
# Departure DMY
date_dep = request.form["Departure_Date"]
Dep_day = int(pd.to_datetime(date_dep, format="%Y-%m-%dT%H:%M").day)
Dep_month = int(pd.to_datetime(date_dep, format="%Y-%m-%dT%H:%M").month)
Dep_year = int(pd.to_datetime(date_dep, format="%Y-%m-%dT%H:%M").year)
# Departure Time
Dep_hour = int(pd.to_datetime(date_dep, format="%Y-%m-%dT%H:%M").hour)
Dep_minute = int(pd.to_datetime(date_dep, format="%Y-%m-%dT%H:%M").minute)
date_arrival = request.form["Arrival_Date"]
#Arrival DMY
Arrival_day = int(pd.to_datetime(date_arrival, format="%Y-%m-%dT%H:%M").day)
Arrival_month = int(pd.to_datetime(date_arrival, format="%Y-%m-%dT%H:%M").month)
Arrival_year = int(pd.to_datetime(date_arrival, format="%Y-%m-%dT%H:%M").year)
# Arrival Time
Arrival_hour = int(pd.to_datetime(date_arrival, format="%Y-%m-%dT%H:%M").hour)
Arrival_minute = int(pd.to_datetime(date_arrival, format="%Y-%m-%dT%H:%M").minute)
# Duration in hrs
from datetime import datetime
dep_date = datetime(Dep_year,Dep_month,Dep_day,Dep_hour,Dep_minute)
arrival_date = datetime(Arrival_year,Arrival_month,Arrival_day,Arrival_hour,Arrival_minute)
diff = arrival_date - dep_date
t = pd.to_datetime(diff,format="%H:%M:%S")
duration_hour = t.hour
duration_minute = t.minute
# Source
source = request.form["Source"]
if (source == 'New Delhi'):
s_New_Delhi = 1
s_Kolkata = 0
s_Mumbai = 0
s_Chennai = 0
elif (source == 'Kolkata'):
s_New_Delhi = 0
s_Kolkata = 1
s_Mumbai = 0
s_Chennai = 0
elif (source == 'Mumbai'):
s_New_Delhi = 0
s_Kolkata = 0
s_Mumbai = 1
s_Chennai = 0
elif (source == 'Chennai'):
s_New_Delhi = 0
s_Kolkata = 0
s_Mumbai = 0
s_Chennai = 1
else:
s_New_Delhi = 0
s_Kolkata = 0
s_Mumbai = 0
s_Chennai = 0
# Destination
destination = request.form["Destination"]
if (destination == 'Cochin'):
d_Cochin = 1
d_New_Delhi = 0
d_Hyderabad = 0
d_Kolkata = 0
elif (destination == 'New Delhi'):
d_Cochin = 0
d_New_Delhi = 1
d_Hyderabad = 0
d_Kolkata = 0
elif (destination == 'Hyderabad'):
d_Cochin = 0
d_New_Delhi = 0
d_Hyderabad = 1
d_Kolkata = 0
elif (destination == 'Kolkata'):
d_Cochin = 0
d_New_Delhi = 0
d_Hyderabad = 0
d_Kolkata = 1
else:
d_Cochin = 0
d_Delhi = 0
d_New_Delhi = 0
d_Hyderabad = 0
d_Kolkata = 0
# Airline
airline = request.form["Airline"]
if(airline=='Jet Airways'):
Jet_Airways = 1
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='IndiGo'):
Jet_Airways = 0
IndiGo = 1
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Air India'):
Jet_Airways = 0
IndiGo = 0
Air_India = 1
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Multiple carriers'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 1
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='SpiceJet'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 1
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Vistara'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 1
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='GoAir'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 1
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Multiple carriers Premium economy'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 1
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Jet Airways Business'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 1
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Vistara Premium economy'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 1
Trujet = 0
elif (airline=='Trujet'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 1
else:
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
# Total_Stops
stops = int(request.form["Total_Stops"])
prediction=model.predict([[
stops,
Dep_day,
Dep_month,
Dep_hour,
Dep_minute,
Arrival_hour,
Arrival_minute,
duration_hour,
duration_minute,
Air_India,
GoAir,
IndiGo,
Jet_Airways,
Jet_Airways_Business,
Multiple_carriers,
Multiple_carriers_Premium_economy,
SpiceJet,
Trujet,
Vistara,
Vistara_Premium_economy,
s_Chennai,
s_Kolkata,
s_Mumbai,
s_New_Delhi,
d_Cochin,
d_Hyderabad,
d_Kolkata,
d_New_Delhi
]])
output=round(prediction[0],2)
return render_template('Airlines.html',prediction_text = "Your Flight Fare is {} INR".format(output))
return render_template('Airlines.html')
if __name__ == '__main__':
app.run()
| GuruYohesh/ML | Flight Fare Prediction/app.py | app.py | py | 9,556 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
... |
36538905715 | import matplotlib.pyplot as plt
from pymol import cmd
from multiprocessing import Pool, cpu_count
from tqdm import tqdm # Import tqdm for progress bar
# Define a function to calculate RMSD for a single frame
def calculate_rmsd(frame_number, object_1, reference_object):
cmd.frame(frame_number)
#cmd.align(object_1, reference_object, frame_number, 1)
return cmd.rms_cur(object_1, reference_object, frame_number, 0)
# Define the process_frame function outside of RMSD_vs_frame
def process_frame(args):
return calculate_rmsd(*args)
# Define a function to plot RMSD vs frame number
def RMSD_vs_frame(object_1, stride=1, num_process=1):
# Initialize an empty list to store RMSDs
RMSDs = []
reference_object = 'ref'
cmd.create(reference_object, object_1, 1, 0)
# Calculate total number of frames
total_frames = cmd.count_states(object_1)
# Set default number of processes if not specified
if num_process == 1:
num_process = min(cpu_count(), total_frames) # Use the number of CPU cores or total_frames, whichever is smaller
# Create a list of frame numbers to process
frames_to_process = range(1, total_frames + 1, stride)
# Create a list of arguments for the process_frame function
args_list = [(frame_number, object_1, reference_object) for frame_number in frames_to_process]
# Create a pool of workers for multiprocessing
with Pool(processes=num_process) as pool, tqdm(total=len(args_list), desc="Calculating RMSD") as pbar:
# Use multiprocessing to calculate RMSD for each frame
for rmsd in pool.imap_unordered(process_frame, args_list):
RMSDs.append(rmsd)
pbar.update(1) # Update the progress bar
# Create x and y data for the plot
x1 = frames_to_process # Frame numbers will be on the x-axis
y1 = RMSDs # RMSDs will be on the y-axis
# Plot the data
plt.figure()
plt.plot(x1, y1, label=object_1, color='blue', linewidth=2)
plt.xlabel('Frame', fontsize=12)
plt.ylabel('Distance', fontsize=12)
plt.title('Distance vs Frame', fontsize=16)
plt.legend(fontsize=10)
plt.grid(True, linestyle='--', linewidth=0.5, alpha=0.7)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.tight_layout() # Adjust the spacing for better layout
plt.show() # Display the plot
# Usage: RMSD_vs_frame('object_name', stride=1, num_process=1)
print("USAGE: RMSD_vs_frame('protein selection', stride=5, num_process=4)")
| raafik980/charmm-md-analysis-in-pymol | 02_rmsd_vs_frame_parallel.py | 02_rmsd_vs_frame_parallel.py | py | 2,523 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymol.cmd.frame",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pymol.cmd",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pymol.cmd.rms_cur",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pymol.cmd",
"line_numbe... |
29632863333 | import discord
from discord.ext import commands
from datetime import datetime
from fun_config import *
class Work(commands.Cog):
def __init__(self, client: commands.Bot):
self.client = client
@commands.command()
@commands.cooldown(1, 86400, commands.BucketType.user)
async def sleep(self, ctx):
# Get the data as users
users = await get_inventory_data()
# Check the user's max energy
max_energy = int(users[str(ctx.author.id)]['Stats']['MaxEnergy'])
users[str(ctx.author.id)]['Stats']['Energy'] = max_energy
with open(inventory_json_file, "w") as json_file:
json.dump(users, json_file, indent=1)
await ctx.reply("You slept and refilled your energy!")
async def setup(client: commands.Bot) -> None:
await client.add_cog(Work(client))
| Maghish/HoeX | cogs/work.py | work.py | py | 835 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 8,
"usage_type": "attribute"
},
{
"api... |
9507730487 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 13 18:52:23 2020
@author: Hasnain Khan
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
# Convolv function for convolving the kernel with real image matrix
def convolve_np(image, kernel):
X_height = image.shape[0]
X_width = image.shape[1]
F_height = kernel.shape[0]
F_width = kernel.shape[1]
H = int((F_height - 1) / 2)
W = int((F_width - 1) / 2)
out = np.zeros((X_height, X_width))
for i in np.arange(H, X_height-H):
for j in np.arange(W, X_width-W):
sum = 0
for k in np.arange(-H, H+1):
for l in np.arange(-W, W+1):
a = image[i+k, j+l]
w = kernel[H+k, W+l]
sum += (w * a)
out[i,j] = sum
return out # Returning the Convolved Image
def box_blur():
img = cv2.imread('Lenna.png', 0)
# Sobel Operator for Horizontal Edge Detection
Hx = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
# Sobel Operator for Vertical Edge Detection
Hy = np.array([[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]])
img_x = convolve_np(img, Hx) / 8.0 # Output of Sobel Horizontal
img_y = convolve_np(img, Hy) / 8.0 # Output of Sobel Vertical
if __name__ == '__main__':
box_blur()
| HasnainKhanNiazi/Convolutional-Kernels | Sobel_Operator.py | Sobel_Operator.py | py | 1,479 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number... |
70721444264 | import numpy as np
import plotly.offline as ply
import plotly.graph_objs as go
from scipy import stats as st
from scipy import signal as sg
import source_localization as src
file_name = "/home/dima/Projects/Touchdown/Data/test.bin"
ch_total = 6
length = 2**15 + 100000
fs = 100e6
t_pulse_width = 100e-6
t_period = 200e-6
snr = 1000000 # dB
start_period = 1
f0 = 50e6
post_id = 0
serial = 0
y = st.norm.rvs(size=(ch_total, length), scale=10**(-snr/20)) +\
1j * st.norm.rvs(size=(ch_total, length), scale=10**(-snr/20))
frame = src.SpecFrame(post_id=post_id, serial=serial, f0=f0, fs=fs, f_res=fs/length, sig=y)
writer = src.SpecFrameSaver(post_id)
writer.open(file_name)
writer.save_title(start_period)
writer.save(frame)
writer.close()
print("Successfully saved frame")
loader = src.SpecFrameLoader(post_id)
success = loader.open(file_name)
if not success:
print("Enable to open file")
exit(1)
i_start_period = loader.read_title()
i_frame = src.SpecFrame()
loader.load(i_frame)
loader.close()
error = 1e-7
failed = False
if abs(start_period - i_start_period) > error:
print("\n\n\nTest failed. \nSaved start period was: ", start_period, "\nLoaded start period is: ", i_start_period)
failed = True
if post_id is not i_frame.get_post_id():
print("\n\n\nTest failed. \nSaved post id was: ", post_id, "\nLoaded post id is: ", i_frame.get_post_id())
failed = True
if serial is not i_frame.get_serial():
print("\n\n\nTest failed. \nSaved serial was: ", serial, "\nLoaded serial is: ", i_frame.get_serial())
failed = True
if abs(f0 - i_frame.get_central_frequency()) > error:
print("\n\n\nTest failed. \nSaved central frequency was: ", f0, "\nLoaded central frequency is: ",
i_frame.get_central_frequency())
failed = True
if abs(fs - i_frame.get_sampling_frequency()) > error:
print("\n\n\nTest failed. \nSaved sampling frequency was: ", fs, "\nLoaded sampling frequency is: ",
i_frame.get_sampling_frequency())
failed = True
if abs(fs/length - i_frame.get_frequency_resolution()) > error:
print("\n\n\nTest failed. \nSaved frequency resolution was: ", fs/length, "\nLoaded frequency resolution is: ",
i_frame.get_frequency_resolution())
failed = True
data = i_frame.get_data()
if ch_total is not data.shape[0]:
print("\n\n\nTest failed. \nSaved channels total was: ", ch_total, "\nLoaded channels total is: ",
data.shape[0])
failed = True
if abs(length - data.shape[1]) > error:
print("\n\n\nTest failed. \nSaved samples per channel was: ", length, "\nLoaded samples per channel is: ",
data.shape[1])
failed = True
for ch in range(ch_total):
for s in range(length):
if abs((data[ch, s]) - y[ch, s]) > error:
print("\n\n\nTest failed. \nData mismatch. \nChannel: ", ch, "\nSample: ", s)
failed = True
if not failed:
print("Test passed successfully")
| DimaZhu/libsource_localization | tests/test_specframewriter.py | test_specframewriter.py | py | 2,935 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.stats.norm.rvs",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "scipy.stats.no... |
1369587477 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from models import Company
def index(request):
companies = Company.objects.all().order_by('name')
paginator = Paginator(companies, 10)
if request.method == 'POST' and request.POST.get('search'):
companies = companies.filter(name__icontains=request.POST.get('search'))
return render(request, 'index.html', {'data': companies})
else:
page = request.GET.get('page')
try:
companies = paginator.page(page)
except PageNotAnInteger:
companies = paginator.page(1)
except EmptyPage:
companies = paginator.page(paginator.num_pages)
return render(request, 'index.html', {'data': companies})
| avallete/ft_companysee | company/views.py | views.py | py | 863 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Company.objects.all",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Company.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "models.Company",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "dj... |
41646833738 | import datetime, requests, csv, argparse
class MarketwatchScraper():
def __init__(self, stock: str = "AAPL", timeout: int = 1) -> None:
self.stock = stock
self.timeout = timeout
pass
def scrape(self) -> None:
self.saveToFile(self.getURLS())
def saveToFile(self, urls: list) -> None:
localFile = f"{self.stock.lower()}.csv"
with open(localFile, "w") as f:
f.write("Date,Open,High,Low,Close,Volume\n")
f.close()
for url in urls:
print(f"Getting data from url {url}...")
try:
resp = requests.get(url, timeout=self.timeout, headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 OPR/96.0.0.0 (Edition std-1)"
})
if resp.status_code != 200:
print(f"Error! Status code: {resp.status_code}")
continue
except Exception as e:
print(f"Error! Exception: {e}")
continue
data = resp.content.decode("utf-8")
csvData = csv.reader(data.splitlines(), delimiter=",")
next(csvData)
with open(localFile, "a") as f:
print(f"Writing data to {localFile}...")
writer = csv.writer(f)
for row in csvData:
writer.writerow(row)
f.close()
def getURLS(self) -> list:
urls = []
startDate = datetime.datetime(1970, 1, 1)
endDate = datetime.datetime.today()
if endDate > datetime.datetime.today():
endDate = datetime.datetime.today()
while startDate < endDate:
date1 = startDate.strftime("%m/%d/%Y%%2000:00:00")
date2 = (startDate + datetime.timedelta(days=366)).strftime("%m/%d/%Y%%2000:00:00")
url = f"https://www.marketwatch.com/investing/stock/{self.stock}/downloaddatapartial?startdate={date1}&enddate={date2}&daterange=d30&frequency=p1d&csvdownload=true&downloadpartial=false&newdates=false"
print(f"Added URL for {startDate.strftime('%m/%d/%Y')} to {(startDate + datetime.timedelta(days=366)).strftime('%m/%d/%Y')}")
urls.append(url)
startDate = startDate + datetime.timedelta(days=365)
return urls
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--stock", type=str, required=True)
parser.add_argument("--timeout", type=int, required=False)
args = parser.parse_args()
scraper = MarketwatchScraper(stock=args.stock, timeout=args.timeout if args.timeout else 1)
scraper.scrape() | chaarlottte/MarketWatch-Scraper | scrape.py | scrape.py | py | 2,814 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_numb... |
21683749340 | #!/usr/bin/env python3
import os
import re
import click
import json
import logging
import zipfile
import portalocker
import contextlib
import traceback
import imghdr
import multiprocessing
import functools
import threading
import time
import sys
import ctypes
import psutil
from pathlib import Path
from functools import partial
from tqdm import tqdm
from atomicwrites import atomic_write
from contextlib import contextmanager, nullcontext
from functools import partial
from origami.core.time import elapsed_timer
from origami.batch.core.io import *
from origami.batch.core.utils import Spinner
from origami.batch.core.mutex import DatabaseMutex, FileMutex, DummyMutex
def qt_app():
try:
from PySide2 import QtGui
except ImportError:
from PySide6 import QtGui
os.environ["QT_QPA_PLATFORM"] = "offscreen"
return QtGui.QGuiApplication()
class WatchdogState(enum.Enum):
RUNNING = 0
DONE = 1
CANCEL = 2
class StopWatch:
def __init__(self):
self._last_reset = time.time()
def reset(self):
self._last_reset = time.time()
@property
def age(self):
return time.time() - self._last_reset
class SharedMemoryStopWatch:
def __init__(self):
self._shared = multiprocessing.Value('L', int(time.time()))
def reset(self):
with self._shared.get_lock():
self._shared.value = int(time.time())
@property
def age(self):
with self._shared.get_lock():
return time.time() - self._shared.value
WorkSetEntry = collections.namedtuple(
'WorkSetEntry', ['path', 'pid', 'age'])
class SharedMemoryWorkSet:
def __init__(self, access, n):
assert n >= 1
self._array = multiprocessing.Array(
ctypes.c_int64, n * 4)
# each slot has 4 integer entries:
# 0: value
# 1: pid
# 2: timestamp
# 3: not used
self._n = n
for i in range(self._n * 4):
self._array[i] = -1
self._access = access
def _cleanup(self):
with self._array.get_lock():
for i in range(self._n):
pid = self._array[4 * i + 1]
if pid >= 0 and not psutil.pid_exists(pid):
logging.warning(f"removing killed pid {pid} from work set.")
self._array[4 * i] = -1
self._array[4 * i + 1] = -1
self._array[4 * i + 2] = -1
def add(self, value):
assert value >= 0
with self._array.get_lock():
self._cleanup()
free = None
for i in range(self._n):
if self._array[4 * i] == value:
return
elif free is None and self._array[4 * i] < 0:
free = i
if free is None:
raise RuntimeError(
f"no free slots for adding {value}, pid {os.getpid()}: {self.active}")
self._array[4 * free] = value
self._array[4 * free + 1] = int(os.getpid())
self._array[4 * free + 2] = int(time.time())
def remove(self, value):
assert value >= 0
with self._array.get_lock():
found = None
for i in range(self._n):
if self._array[4 * i] == value:
found = i
break
assert found is not None
self._array[4 * found] = -1
self._array[4 * found + 1] = -1
self._array[4 * found + 2] = -1
@property
def active(self):
result = []
with self._array.get_lock():
self._cleanup()
for i in range(self._n):
if self._array[4 * i] >= 0:
result.append(WorkSetEntry(
path=self._access(self._array[4 * i]),
pid=self._array[4 * i + 1],
age=int(time.time() - self._array[4 * i + 2])))
return result
def print(self):
active = self.active
if active:
logging.error(f"{len(active)} entries in work set:")
for i, entry in enumerate(active):
logging.error(f" ({i + 1}) {entry}]")
else:
logging.error("no entries in work set.")
global global_stop_watch
global_stop_watch = SharedMemoryStopWatch()
# global_stop_watch needs to be global indeed, as pickling
# over the fork in imap_unordered will not work otherwise.
global global_work_set
class Watchdog(threading.Thread):
def __init__(self, pool, stop_watch, work_set, timeout):
threading.Thread.__init__(self)
self._pool = pool
self._timeout = timeout
self._stop_watch = stop_watch
self._work_set = work_set
self._state = WatchdogState.RUNNING
self._cond = threading.Condition()
stop_watch.reset()
def _print_work_set(self):
self._work_set.print()
def _cancel(self):
if self._state != WatchdogState.CANCEL:
logging.error("no new results after %d s. stopping." % self._stop_watch.age)
self._print_work_set()
self._state = WatchdogState.CANCEL
self._pool.terminate()
t = threading.Thread(target=lambda: self._pool.join(), args=())
t.start()
self._stop_watch.reset()
elif self._state == WatchdogState.CANCEL:
logging.error("stopping failed. killing process.")
self._print_work_set()
os._exit(1)
def run(self):
with self._cond:
while True:
self._cond.wait(
max(0, self._timeout - self._stop_watch.age))
if self._state == WatchdogState.DONE:
break
if self._stop_watch.age > self._timeout:
self._cancel()
def set_is_done(self):
with self._cond:
if self._state == WatchdogState.RUNNING:
self._state = WatchdogState.DONE
self._cond.notify()
def is_cancelled(self):
return self._state == WatchdogState.CANCEL
def chunks(items, n):
for i in range(0, len(items), n):
yield items[i:i + n]
class Processor:
def __init__(self, options, needs_qt=False):
self._overwrite = options.get("overwrite", False)
self._processes = options.get("processes", 1)
self._timeout = options.get("alive", 600)
self._name = options.get("name", "")
self._verbose = False
self._lock_strategy = options.get("lock_strategy", "DB")
self._lock_level = options.get("lock_level", "PAGE")
self._lock_timeout = options.get("lock_timeout", "60")
self._max_lock_age = options.get("max_lock_age")
self._lock_chunk_size = 25
self._mutex = None
if self._lock_strategy == "DB":
self._lock_database = options.get("lock_database")
elif self._lock_strategy in ("FILE", "NONE"):
pass
else:
raise ValueError(self._lock_strategy)
if needs_qt:
self._qt_app = qt_app()
if self._processes > 1:
logging.warning(
"this batch does not support multiple processes.")
self._processes = 1 # cannot safely fork here.
else:
self._qt_app = None
if options.get("profile"):
from profiling.sampling import SamplingProfiler
self._profiler = SamplingProfiler()
self._overwrite = True # profile implies overwrite
else:
self._profiler = None
self._print_paths = False
self._plain = options.get("plain")
if self._plain:
self._print_paths = True
self._debug_write = options.get("debug_write", False)
self._track_changes = options.get("track_changes", False)
@staticmethod
def options(f):
options = [
click.option(
'--processes',
type=int,
default=1,
help="Number of parallel processes to employ."),
click.option(
'--alive',
type=int,
default=600,
help="Seconds to wait after inactive process is killed."),
click.option(
'--name',
type=str,
default="",
help="Only process paths that conform to the given pattern."),
click.option(
'--lock-strategy',
type=click.Choice(['FILE', 'DB', 'NONE'], case_sensitive=False),
default="DB",
help="How to implement locking for concurrency."),
click.option(
'--lock-level',
type=click.Choice(['PAGE', 'TASK'], case_sensitive=False),
default="PAGE",
help="Lock granularity."),
click.option(
'--lock-database',
type=click.Path(),
required=False,
help="Mutex database path used for concurrent processing"),
click.option(
'--lock-timeout',
type=int,
default=60,
required=False,
help="Seconds to wait to acquire locking. NFS volumes might need high values."),
click.option(
'--max-lock-age',
type=int,
default=600,
required=False,
help="Maximum age of a lock in seconds until it is considered invalid."),
click.option(
'--overwrite',
is_flag=True,
default=False,
help="Recompute and overwrite existing result files."),
click.option(
'--profile',
is_flag=True,
default=False,
help="Enable profiling and show results."),
click.option(
'--plain',
is_flag=True,
default=False,
help="Print plain output that is friendly to piping."),
click.option(
'--debug-write',
is_flag=True,
default=False,
help="Debug which files are written."),
click.option(
'--track-changes',
type=str,
default="",
help="Recompute files and track changes with given tag.")
]
return functools.reduce(lambda x, opt: opt(x), options, f)
@property
def processor_name(self):
return self.__class__.__name__
def is_image(self, path):
# imghdr might be the perfect tool for this, but
# it fails to detect some valid images. so we go
# with extenstions for the most part.
# see https://stackoverflow.com/questions/36870661/
# imghdr-python-cant-detec-type-of-some-images-image-extension
if path.suffix.lower() in (".jpg", ".png", ".tif", ".tiff"):
return True
return imghdr.what(path) is not None
def should_process(self, page_path):
return True
def prepare_process(self, page_path):
artifacts = self.artifacts()
if self._track_changes:
file_writer = TrackChangeWriter(self._track_changes)
else:
file_writer = AtomicFileWriter(overwrite=self._overwrite)
if self._debug_write:
file_writer = DebuggingFileWriter(file_writer)
kwargs = dict()
for arg, spec in artifacts:
f = spec.instantiate(
page_path=page_path,
processor=self,
file_writer=file_writer)
f.fix_inconsistent()
if not f.is_ready():
if self._verbose:
print("skipping %s: missing %s" % (page_path, f.missing))
return False
kwargs[arg] = f
return kwargs
def _trigger_process1(self, p, kwargs, locked):
work = locked
if not locked:
logging.warning(f"failed to obtain lock for {p}. ignoring.")
try:
if work:
# a concurrent worker might already have done this.
for f in kwargs.values():
if not f.is_ready():
work = False
break
if work:
with elapsed_timer() as elapsed:
data_path = find_data_path(p)
data_path.mkdir(exist_ok=True)
runtime_info = self.process(p, **kwargs)
if runtime_info is None:
runtime_info = dict()
runtime_info["status"] = "COMPLETED"
runtime_info["elapsed"] = round(elapsed(), 2)
self._update_runtime_info(
p, {self.processor_name: runtime_info})
except KeyboardInterrupt:
logging.exception("Interrupted at %s." % p)
raise
except:
logging.exception("Failed to process %s." % p)
runtime_info = dict(
status="FAILED",
traceback=traceback.format_exc())
self._update_runtime_info(p, {
self.processor_name: runtime_info})
finally:
# free memory allocated in cached io.Reader
# attributes. this can get substantial for
# long runs.
kwargs.clear()
def _trigger_process(self, chunk):
if self._lock_level == "PAGE":
lock_actor_name = "page"
elif self._lock_level == "TASK":
lock_actor_name = self.processor_name
else:
raise ValueError(self._lock_level)
with self._mutex.lock(
lock_actor_name,
[str(p) for _, p, _ in chunk]) as locked:
for i, p, kwargs in chunk:
global_work_set.add(i)
try:
self._trigger_process1(p, kwargs, locked)
finally:
global_work_set.remove(i)
yield i, p
def _trigger_process_async(self, chunk):
results = []
for i, p in self._trigger_process(chunk):
results.append((i, p))
global_stop_watch.reset()
return results
def _process_queue(self, queued):
global global_work_set
global_work_set = SharedMemoryWorkSet(
lambda i: queued[i][1], max(1, self._processes))
with self._profiler or nullcontext():
chunked_queue_gen = chunks(queued, self._lock_chunk_size)
def iprogress(i):
nd = len(str(len(queued)))
return f"[{str(i + 1).rjust(nd)} / {len(queued)}]"
if self._processes > 1:
with multiprocessing.Pool(self._processes, maxtasksperchild=4) as pool:
watchdog = Watchdog(
pool=pool,
stop_watch=global_stop_watch,
work_set=global_work_set,
timeout=self._timeout)
watchdog.start()
with tqdm(total=len(queued), disable=self._print_paths) as progress:
for chunk in pool.imap_unordered(
self._trigger_process_async, chunked_queue_gen):
if self._print_paths:
for i, p in chunk:
print(f"{iprogress(i)} {p}", flush=True)
else:
progress.update(len(chunk))
global_stop_watch.reset()
if watchdog.is_cancelled():
watchdog.kill()
sys.exit(1)
else:
watchdog.set_is_done()
else:
with tqdm(total=len(queued), disable=self._print_paths) as progress:
for chunk in chunked_queue_gen:
for i, p in self._trigger_process(chunk):
if self._print_paths:
print(f"{iprogress(i)} {p}", flush=True)
else:
progress.update(1)
def _build_queue(self, path):
path = Path(path)
if not path.exists():
raise FileNotFoundError("%s does not exist." % path)
queued = []
counts = dict(images=0)
def add_path(p):
if not p.exists():
print("skipping %s: path does not exist." % p)
return
if self._name and not re.search(self._name, str(p)):
return
if not self.is_image(p):
if self._verbose:
print("skipping %s: not an image." % p)
return
counts['images'] += 1
if not self.should_process(p):
if self._verbose:
print("skipping %s: should_process is False" % p)
return
kwargs = self.prepare_process(p)
if kwargs is not False:
queued.append((len(queued), p, kwargs))
if not path.is_dir():
if path.suffix == ".txt":
with open(path, "r") as f:
for line in f:
line = line.strip()
if line:
add_path(Path(line))
else:
raise FileNotFoundError(
"%s is not a valid path or text file of paths." % path)
else:
print(f"scanning {path}... ", flush=True, end="")
with Spinner(disable=self._plain):
for folder, dirs, filenames in os.walk(path):
folder = Path(folder)
if folder.name.endswith(".out"):
dirs.clear()
continue
else:
dirs.sort()
for filename in sorted(filenames):
add_path(folder / filename)
print("done.", flush=True)
print(f"{counts['images']} documents found, {len(queued)} ready to process.")
return queued
def traverse(self, path: Path):
print(f"running {self.processor_name}.", flush=True)
queued = self._build_queue(path)
if self._lock_strategy == "DB":
if self._lock_database:
db_path = Path(self._lock_database)
elif Path(path).is_dir():
db_path = Path(path) / "origami.lock.db"
else:
db_path = Path(path).parent / "origami.lock.db"
self._mutex = DatabaseMutex(
db_path, timeout=self._lock_timeout)
self._mutex.clear_locks(self._max_lock_age)
elif self._lock_strategy == "FILE":
self._mutex = FileMutex()
elif self._lock_strategy == "NONE":
self._mutex = DummyMutex()
else:
raise ValueError(self._lock_strategy)
try:
self._process_queue(queued)
finally:
self._mutex = None
if self._profiler:
self._profiler.run_viewer()
def process(self, p: Path):
pass
def lock_or_open(self, path, mode):
if self._lock_strategy == "FILE":
return portalocker.Lock(
path,
mode,
flags=portalocker.LOCK_EX,
timeout=1,
fail_when_locked=True)
else:
return open(path, mode)
def _update_json(self, page_path, artifact, updates):
try:
data_path = find_data_path(page_path)
json_path = data_path / artifact.filename()
new_json_path = json_path.parent / (
json_path.stem + ".updated" + json_path.suffix)
if new_json_path.exists():
os.remove(new_json_path)
if json_path.exists():
with open(json_path, "r") as f:
file_data = f.read()
data = json.loads(file_data)
else:
data = dict()
for k, v in updates.items():
if v is None:
del data[k]
else:
data[k] = v
with open(new_json_path, "w") as f:
json.dump(data, f)
if json_path.exists():
os.remove(json_path)
os.rename(new_json_path, json_path)
except:
logging.error(traceback.format_exc())
def _update_runtime_info(self, page_path, updates):
self._update_json(page_path, Artifact.RUNTIME, updates)
| poke1024/origami | origami/batch/core/processor.py | processor.py | py | 16,273 | python | en | code | 69 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "PySide6.QtGui.QGuiApplication",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PySide6.QtGui",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "time.time... |
29184316598 | import sys
import numpy as np
import pyautogui
import win32api, win32con, win32gui
import cv2
import time
import torch
import time
class_names = [ 'counter-terrorist', 'terrorist' ]
opponent = 'terrorist'
opponent_color = (255, 0, 0)
ally_color = (0, 128, 255)
model_path = 'best-640.pt'
image_size = 640
scale_map = { 1280:1.7, 640:4.5, 320:8 }
def load_frame():
hwnd = win32gui.FindWindow(None, 'Counter-Strike: Global Offensive - Direct3D 9')
rect = win32gui.GetWindowRect(hwnd)
region = rect[0], rect[1] + 27, rect[2] - rect[0], rect[3] - rect[1] - 27
frame = np.array(pyautogui.screenshot(region=region))
frame = cv2.resize(frame, (image_size, int(image_size / 16 * 9)))
return frame
def process_frame(frame):
height, width = frame.shape[:2]
top_padding = int((image_size - height) / 2)
padded_frame = np.zeros((image_size, image_size, 3), dtype=np.uint8)
padded_frame.fill(255)
padded_frame[top_padding:top_padding+height, :width] = frame
return padded_frame
def is_opponent(label):
return class_names[label] == opponent
def find_closest(detected_boxes):
max = 0
closest_at = 0
for i, box in enumerate(detected_boxes):
x1, _, x2, _ = box
w = int(x1 - x2)
if w > max:
closest_at = i
max = w
return closest_at
if __name__ == "__main__":
opponent = sys.argv[1]
image_size = int(sys.argv[2])
model_path = "model/best-%d.pt" % image_size
model = torch.hub.load('ultralytics/yolov5', 'custom', path=model_path)
while True:
frame = load_frame()
frame = process_frame(frame)
height, width = frame.shape[:2]
display_frame = cv2.resize(frame, (500, 500))
# Detection
start_time = time.time()
results = model(frame)
print(time.time() - start_time)
rl = results.xyxy[0].tolist()
# Check every detected object
detected_boxes = []
color = (0, 0, 0)
for item in rl:
x1, y1, x2, y2, confidence, label = item
if confidence > 0.5:
if is_opponent(int(label)):
detected_boxes.append((x1, y1, x2, y2))
color = opponent_color
else:
color = ally_color
cv2.rectangle(display_frame, (int(x1/image_size*500), int(y1/image_size*500)), (int(x2/image_size*500), int(y2/image_size*500)), color, 1)
print("Detected:", len(detected_boxes), "enemies.")
# Check Closest
if len(detected_boxes) >= 1:
closest_at = find_closest(detected_boxes)
x1, y1, x2, y2 = detected_boxes[closest_at]
x = int((x1 + x2) / 2 - width / 2)
y = int((y1 + y2) / 2 - height / 2) - (y2 - y1) * 0.43 # For head shot
scale = scale_map[image_size]
x = int(x * scale)
y = int(y * scale)
# Move mouse and shoot
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, x, y, 0, 0)
time.sleep(0.05)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
display_frame = cv2.cvtColor(display_frame, cv2.COLOR_BGR2RGB)
cv2.imshow("frame", display_frame)
cv2.waitKey(1) | anaandjelic/soft-computing-project | aimbot.py | aimbot.py | py | 3,412 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "win32gui.FindWindow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "win32gui.GetWindowRect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyautogui.scre... |
19130481877 | from datetime import datetime
import json
class Observation():
def __init__(self, observationTime, numericValue, stringValue, booleanValue, sensorId):
self.observationTime = observationTime.strftime("%m/%d/%Y, %H:%M:%S")
self.Value = numericValue
self.valueString = stringValue
self.valueBoolean = booleanValue
self.sensorId = sensorId
class RecEdgeMessage():
def __init__(self, deviceId, observations):
self.format = "rec3.2"
self.deviceId = deviceId
self.observations = observations
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
| midcoreboot/RaspberryPiSensors | REC.py | REC.py | py | 675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 18,
"usage_type": "call"
}
] |
16919406854 | from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.decorators import api_view
import io
from rest_framework import status
from todos.models import Task
from .serializers import TaskSerializer
@api_view(['GET'])
def get_task_list(request):
tasks = Task.objects.all()
# print('>>> tasks : ',tasks)
serializer = TaskSerializer(tasks, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET'])
def get_task_detail(request, pk):
task = Task.objects.get(pk=pk)
serializer = TaskSerializer(task)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['POST'])
def create_task(request):
serializer = TaskSerializer(data=request.data, context={'request':request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE'])
def delete_task(request, pk):
task = Task.objects.get(pk=pk)
task.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['PUT', 'PATCH'])
def update_task(request, pk):
task = Task.objects.get(pk=pk)
if request.method == 'PUT':
serializer = TaskSerializer(task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'PATCH':
serializer = TaskSerializer(task, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def completed(request,pk):
task = Task.objects.get(pk=pk)
serializer = TaskSerializer(task, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | Khot-abhishek/TODO_WITH_API | api/views.py | views.py | py | 2,318 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "todos.models.Task.objects.all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "todos.models.Task.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "todos.models.Task",
"line_number": 13,
"usage_type": "name"
},
{
"ap... |
30504364246 | import numpy as np
import cv2
from sklearn.cluster import KMeans
import pdb
from skimage.util import montage
import matplotlib.pyplot as plt
''' K means clustering with 30 classes: 26 letters, 1 blank time, 1 double letter, 1 triple letter, 1 empty'''
# gather images that have been labelled
f = open('labels.txt')
dir = '/Users/Alex/Desktop/Summer 2019/scrabble/data/'
# since scrabble is 15 by 15 i should be divisible by 15
i = 825
# if you divide i by 15 (number of rows and columns in Scrabble) you get the width and height (pixels) of each square
s = int(i/15)
# data to be clustered
data = []
counter = 0
# number of boards to cluster
num_boards = 1
for line in f.readlines():
strr = ''
# split the line in the text file
x = line.split()
# store the image name
img = dir + x[0]
# read and resize the image
img = cv2.imread(img, 0)
img = cv2.resize(img, (640, 480))
# store the 4 points in x
x = x[1:]
# convert the points to a string
pts1 = strr.join(x)
# eval converts the string to an array
pts1 = np.float32(eval(pts1))
# pts1 are the corners and pts2 is the width and height
pts2 = np.float32([[0, 0], [i, 0], [0, i], [i, i]])
# M is the perspective matrix
M = cv2.getPerspectiveTransform(pts1, pts2)
# dst is the resulting flat image
dst = cv2.warpPerspective(img, M, (i, i))
# now we need to extract the tiles
for j in range(15):
for k in range(15):
fname = str(j) + str(k) + ".txt"
square = np.float32(dst[s * j: s + s * j, s * k: s + s * k])
square = square.reshape((-1))
data.append(square)
counter += 1
if counter == num_boards:
break
features = np.asarray(data)
kmeans = KMeans(n_clusters=30, random_state=0, max_iter=500).fit(features)
inds = np.where(kmeans.labels_ == 13)
fs = np.uint8(features)
fs = fs.reshape((225,55,-1))
y = montage(fs, grid_shape=(15, 15))
plt.imshow(y)
plt.show()
# montages = build_montages(features[inds], (128, 196), (7, 3))
# for montage in montages:
# cv2.imshow("Montage", montage)
# cv2.waitKey(0)
# # for x in features[inds]:
# # cv2.imshow("yes", np.uint8(x).reshape((55,-1)))
# # cv2.waitKey(0)
# pdb.set_trace()t | meicholtz/scrabble | clustering.py | clustering.py | py | 2,242 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number"... |
33149847347 | import argparse
import collections
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy
import sys
import time
import json
import zenoh
# --- Command line argument parsing --- --- --- --- --- ---
parser = argparse.ArgumentParser(
prog='z_plot',
description='zenoh plotting example')
parser.add_argument('--mode', '-m', type=str, choices=['peer', 'client'],
help='The zenoh session mode.')
parser.add_argument('--connect', '-e', type=str, metavar='ENDPOINT', action='append',
help='Endpoints to connect to.')
parser.add_argument('--listen', '-l', type=str, metavar='ENDPOINT', action='append',
help='Endpoints to listen on.')
parser.add_argument('-k', '--key', type=str, default='demo/random',
help='The key expression to subscribe to.')
parser.add_argument('-i', '--history', type=float, default=10.0,
help='The history depth in seconds.')
parser.add_argument('-c', '--config', type=str, metavar='FILE',
help='A zenoh configuration file.')
args = parser.parse_args()
conf = zenoh.config_from_file(args.config) if args.config is not None else zenoh.Config()
if args.mode is not None:
conf.insert_json5(zenoh.config.MODE_KEY, json.dumps(args.mode))
if args.connect is not None:
conf.insert_json5(zenoh.config.CONNECT_KEY, json.dumps(args.connect))
if args.listen is not None:
conf.insert_json5(zenoh.config.LISTEN_KEY, json.dumps(args.listen))
lines = {}
fig, ax = plt.subplots()
ax.xaxis.axis_date()
def listener(sample):
if not str(sample.key_expr) in lines:
lines[str(sample.key_expr)] = ax.plot([], [], '-o', label=str(sample.key_expr))[0]
now = time.time()
xdata, ydata = lines[str(sample.key_expr)].get_data()
xdata = numpy.append(xdata, datetime.fromtimestamp(now if sample.timestamp is None else sample.timestamp.time))
ydata = numpy.append(ydata, float(sample.payload.decode("utf-8")))
lines[str(sample.key_expr)].set_data(zip(*filter(lambda t: t[0].timestamp() > now - args.history, zip(xdata, ydata))))
def update(_):
if len(lines):
ax.axes.relim()
ax.axes.autoscale_view(True,True,True)
ax.legend(loc=2)
zenoh.init_logger()
print("Openning session...")
z = zenoh.open(conf)
print("Declaring Subscriber on '{}'...".format(args.key))
sub = z.declare_subscriber(args.key, listener)
ani = FuncAnimation(fig, update)
plt.show()
| eclipse-zenoh/zenoh-demos | plotting/zplot/z_plot.py | z_plot.py | py | 2,515 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "zenoh.config_from_file",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "zenoh.Config",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "zenoh.con... |
74477036264 | from flask import Flask
from flask import render_template, request, jsonify
from engine.inv_ind import get_inv_ind
from engine.retrieval import get_retrieved_docs
app = Flask(__name__)
@app.route('/')
def hello():
return render_template("default.html")
@app.route('/index', methods=["GET", "POST"])
def index_files():
if request.method == 'POST':
form_result = request.form
form_r = form_result.to_dict(flat=False)
print("form_r", form_r)
inv_ind, docs, stats = get_inv_ind(corpus=form_r["corpus"][0], do_stem=form_r["stem"][0], do_stop=form_r["sw"][0])
print(stats)
# return render_template("index.html", data=inv_ind, docs=docs, stats=stats, do_stem=form_r["stem"][0], corpus=form_r["corpus"][0])
return render_template("index.html", data=inv_ind, docs=docs, stats=stats, opts=form_r)
@app.route('/result', methods=["GET", "POST"])
def send_result():
if request.method == 'POST':
form_result = request.form
form_r = form_result.to_dict(flat=False)
print(form_r)
docs = get_retrieved_docs(form_r["query"][0], form_r["corpus"][0], form_r["stem"][0], form_r["sw"][0])
num_docs = len(docs)
if list(docs[0].keys())[0] == "null":
num_docs = 0
return render_template("display.html", docs=docs, query=form_r["query"][0], num_docs=num_docs)
if __name__ == '__main__':
# app.run(host='0.0.0.0')
app.run() | ashishu007/IR-Engine | main.py | main.py | py | 1,441 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.requ... |
16172950617 | """This script creates a regression test over metarl-TRPO and baselines-TRPO.
Unlike metarl, baselines doesn't set max_path_length. It keeps steps the action
until it's done. So we introduced tests.wrappers.AutoStopEnv wrapper to set
done=True when it reaches max_path_length. We also need to change the
metarl.tf.samplers.BatchSampler to smooth the reward curve.
"""
import datetime
import multiprocessing
import os.path as osp
import random
from baselines import logger as baselines_logger
from baselines.bench import benchmarks
from baselines.common import set_global_seeds
from baselines.common.tf_util import _PLACEHOLDER_CACHE
from baselines.logger import configure
from baselines.ppo1.mlp_policy import MlpPolicy
from baselines.trpo_mpi import trpo_mpi
import dowel
from dowel import logger as dowel_logger
import gym
import pytest
import tensorflow as tf
import torch
from metarl.envs import normalize
from metarl.experiment import deterministic, LocalRunner
from metarl.np.baselines import LinearFeatureBaseline
from metarl.tf.algos import TRPO
from metarl.tf.baselines import GaussianMLPBaseline
from metarl.tf.envs import TfEnv
from metarl.tf.experiment import LocalTFRunner
from metarl.tf.optimizers import FirstOrderOptimizer
from metarl.tf.policies import GaussianMLPPolicy
from metarl.torch.algos import TRPO as PyTorch_TRPO
from metarl.torch.policies import GaussianMLPPolicy as PyTorch_GMP
from tests import benchmark_helper
from tests.fixtures import snapshot_config
import tests.helpers as Rh
from tests.wrappers import AutoStopEnv
hyper_parameters = {
'hidden_sizes': [64, 32], # following openai/spinning
'max_kl': 0.01,
'gae_lambda': 0.97,
'discount': 0.99,
'max_path_length': 100,
'cg_iters': 10,
'batch_size': 2048,
'n_epochs': 500,
'n_trials': 10,
'training_epochs': 3,
'learning_rate': 1e-3
}
class TestBenchmarkPPO: # pylint: disable=too-few-public-methods
"""Compare benchmarks between metarl and baselines."""
@pytest.mark.huge
def test_benchmark_trpo(self): # pylint: disable=no-self-use
"""Compare benchmarks between metarl and baselines."""
mujoco1m = benchmarks.get_benchmark('Mujoco1M')
timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')
benchmark_dir = './data/local/benchmarks/trpo/%s/' % timestamp
result_json = {}
for task in mujoco1m['tasks']:
env_id = task['env_id']
env = gym.make(env_id)
baseline_env = AutoStopEnv(env_name=env_id, max_path_length=100)
seeds = random.sample(range(100), hyper_parameters['n_trials'])
task_dir = osp.join(benchmark_dir, env_id)
plt_file = osp.join(benchmark_dir,
'{}_benchmark.png'.format(env_id))
baselines_csvs = []
metarl_tf_csvs = []
metarl_pytorch_csvs = []
for trial in range(hyper_parameters['n_trials']):
_PLACEHOLDER_CACHE.clear()
seed = seeds[trial]
trial_dir = task_dir + '/trial_%d_seed_%d' % (trial + 1, seed)
metarl_tf_dir = trial_dir + '/metarl'
metarl_pytorch_dir = trial_dir + '/metarl_pytorch'
baselines_dir = trial_dir + '/baselines'
# Run metarl algorithms
# env.reset()
# metarl_pytorch_csv = run_metarl_pytorch(
# env, seed, metarl_pytorch_dir)
# pylint: disable=not-context-manager
with tf.Graph().as_default():
env.reset()
metarl_tf_csv = run_metarl(env, seed, metarl_tf_dir)
# Run baseline algorithms
baseline_env.reset()
baselines_csv = run_baselines(baseline_env, seed,
baselines_dir)
metarl_tf_csvs.append(metarl_tf_csv)
# metarl_pytorch_csvs.append(metarl_pytorch_csv)
baselines_csvs.append(baselines_csv)
env.close()
# benchmark_helper.plot_average_over_trials(
# [baselines_csvs, metarl_tf_csvs, metarl_pytorch_csvs],
# [
# 'eprewmean', 'Evaluation/AverageReturn',
# 'Evaluation/AverageReturn'
# ],
# plt_file=plt_file,
# env_id=env_id,
# x_label='Iteration',
# y_label='Evaluation/AverageReturn',
# names=['baseline', 'metarl-TensorFlow', 'metarl-PyTorch'],
# )
benchmark_helper.plot_average_over_trials_with_x(
[baselines_csvs, metarl_tf_csvs],
['EpRewMean', 'Evaluation/AverageReturn'],
['TimestepsSoFar', 'TotalEnvSteps'],
plt_file=plt_file,
env_id=env_id,
x_label='EnvTimeStep',
y_label='Performance',
names=['baseline', 'metarl-TensorFlow'],
)
# Rh.relplot(g_csvs=metarl_tf_csvs,
# b_csvs=baselines_csvs,
# g_x='TotalEnvSteps',
# g_y='Evaluation/AverageReturn',
# g_z='MetaRL',
# b_x='TimestepsSoFar',
# b_y='EpRewMean',
# b_z='Openai/Baseline',
# trials=hyper_parameters['n_trials'],
# seeds=seeds,
# plt_file=plt_file,
# env_id=env_id,
# x_label='EnvTimeStep',
# y_label='Performance')
# result_json[env_id] = benchmark_helper.create_json(
# [baselines_csvs, metarl_tf_csvs, metarl_pytorch_csvs],
# seeds=seeds,
# trials=hyper_parameters['n_trials'],
# xs=['nupdates', 'Iteration', 'Iteration'],
# ys=[
# 'eprewmean', 'Evaluation/AverageReturn',
# 'Evaluation/AverageReturn'
# ],
# factors=[hyper_parameters['batch_size']] * 3,
# names=['baseline', 'metarl-TF', 'metarl-PT'])
Rh.write_file(result_json, 'TRPO')
def run_metarl_pytorch(env, seed, log_dir):
"""Create metarl PyTorch PPO model and training.
Args:
env (dict): Environment of the task.
seed (int): Random positive integer for the trial.
log_dir (str): Log dir path.
Returns:
str: Path to output csv file
"""
env = TfEnv(normalize(env))
deterministic.set_seed(seed)
runner = LocalRunner(snapshot_config)
policy = PyTorch_GMP(env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = PyTorch_TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_kl_step=hyper_parameters['max_kl'],
max_path_length=hyper_parameters['max_path_length'],
discount=hyper_parameters['discount'],
gae_lambda=hyper_parameters['gae_lambda'])
# Set up logger since we are not using run_experiment
tabular_log_file = osp.join(log_dir, 'progress.csv')
dowel_logger.add_output(dowel.StdOutput())
dowel_logger.add_output(dowel.CsvOutput(tabular_log_file))
dowel_logger.add_output(dowel.TensorBoardOutput(log_dir))
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size'])
dowel_logger.remove_all()
return tabular_log_file
def run_metarl(env, seed, log_dir):
"""Create metarl Tensorflow PPO model and training.
Args:
env (dict): Environment of the task.
seed (int): Random positive integer for the trial.
log_dir (str): Log dir path.
Returns:
str: Path to output csv file
"""
deterministic.set_seed(seed)
with LocalTFRunner(snapshot_config) as runner:
env = TfEnv(normalize(env))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
# baseline = LinearFeatureBaseline(env_spec=env.spec)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
regressor_args=dict(
hidden_sizes=hyper_parameters['hidden_sizes'],
use_trust_region=False,
# optimizer=FirstOrderOptimizer,
# optimizer_args=dict(
# batch_size=hyper_parameters['batch_size'],
# max_epochs=hyper_parameters['training_epochs'],
# tf_optimizer_args=dict(
# learning_rate=hyper_parameters['learning_rate'],
# ),
# ),
),
)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=hyper_parameters['max_path_length'],
discount=hyper_parameters['discount'],
gae_lambda=hyper_parameters['gae_lambda'],
max_kl_step=hyper_parameters['max_kl'])
# Set up logger since we are not using run_experiment
tabular_log_file = osp.join(log_dir, 'progress.csv')
dowel_logger.add_output(dowel.CsvOutput(tabular_log_file))
dowel_logger.add_output(dowel.StdOutput())
dowel_logger.add_output(dowel.TensorBoardOutput(log_dir))
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size'])
dowel_logger.remove_all()
return tabular_log_file
def run_baselines(env, seed, log_dir):
"""Create Baseline model and training.
Args:
env (dict): Environment of the task.
seed (int): Random positive integer for the trial.
log_dir (str): Log dir path.
Returns:
str: Path to output csv file
"""
ncpu = max(multiprocessing.cpu_count() // 2, 1)
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=ncpu,
inter_op_parallelism_threads=ncpu)
tf.compat.v1.Session(config=config).__enter__()
# Set up logger for baselines
configure(dir=log_dir, format_strs=['stdout', 'log', 'csv', 'tensorboard'])
baselines_logger.info('rank {}: seed={}, logdir={}'.format(
0, seed, baselines_logger.get_dir()))
set_global_seeds(seed)
def policy_fn(name, ob_space, ac_space):
"""Create policy for baselines.
Args:
name (str): Policy name.
ob_space (gym.spaces.Box) : Observation space.
ac_space (gym.spaces.Box) : Action space.
Returns:
baselines.ppo1.mlp_policy: MLP policy for baselines.
"""
return MlpPolicy(name=name,
ob_space=ob_space,
ac_space=ac_space,
hid_size=hyper_parameters['hidden_sizes'][0],
num_hid_layers=len(hyper_parameters['hidden_sizes']))
trpo_mpi.learn(env,
policy_fn,
timesteps_per_batch=hyper_parameters['batch_size'],
max_kl=hyper_parameters['max_kl'],
cg_iters=hyper_parameters['cg_iters'],
# cg_damping=0.1,
max_timesteps=(hyper_parameters['batch_size'] *
hyper_parameters['n_epochs']),
gamma=hyper_parameters['discount'],
lam=hyper_parameters['gae_lambda'],
vf_iters=hyper_parameters['training_epochs'],
vf_stepsize=hyper_parameters['learning_rate'])
return osp.join(log_dir, 'progress.csv')
| icml2020submission6857/metarl | tests/benchmarks/metarl/tf/algos/test_benchmark_trpo.py | test_benchmark_trpo.py | py | 12,332 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "baselines.bench.benchmarks.get_benchmark",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "baselines.bench.benchmarks",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 65,
"usage_type": "call"
},
... |
27550085200 | import datetime
from imap_tools import EmailAddress
DATA = dict(
subject='double_fields',
from_='kaukinvk@yandex.ru',
to=('aa@aa.ru', 'bb@aa.ru'),
cc=('cc@aa.ru', 'dd@aa.ru'),
bcc=('zz1@aa.ru', 'zz2@aa.ru'),
reply_to=('foma1@company.ru', 'petr1@company.ru', 'foma2@company.ru', 'petr2@company.ru'),
date=datetime.datetime(2019, 5, 1, 12, 20),
date_str='Wed, 01 May 2019 12:20',
text='',
html='<div>double_fields</div>',
headers={'to': ('aa@aa.ru', 'bb@aa.ru', ''), 'cc': ('cc@aa.ru', 'dd@aa.ru'), 'bcc': ('zz1@aa.ru', 'zz2@aa.ru'), 'reply-to': ('=?UTF-8?B?0L/RgNC40LLQtdGC?= <foma1@company.ru>,\r\n =?UTF-8?B?0L/QvtC60LA=?= <petr1@company.ru>', '=?UTF-8?B?0L/RgNC40LLQtdGC?= <foma2@company.ru>,\r\n =?UTF-8?B?0L/QvtC60LA=?= <petr2@company.ru>'), 'from': ('=?utf-8?B?0JrQsNGD0LrQuNC9INCS0LvQsNC00LjQvNC40YA=?= <kaukinvk@yandex.ru>',), 'envelope-from': ('kaukinvk@yandex.ru',), 'subject': ('double_fields',), 'mime-version': ('1.0',), 'date': ('Wed, 01 May 2019 12:20',), 'message-id': ('<8872861556695229@myt5-262fb1897c00.qloud-c.yandex.net>',), 'content-type': ('multipart/mixed;\r\n\tboundary="----==--bound.887287.myt5-262fb1897c00.qloud-c.yandex.net"',), 'return-path': ('kaukinvk@yandex.ru',)},
attachments=[],
from_values=EmailAddress(name='Каукин Владимир', email='kaukinvk@yandex.ru'),
to_values=(EmailAddress(name='', email='aa@aa.ru'), EmailAddress(name='', email='bb@aa.ru')),
cc_values=(EmailAddress(name='', email='cc@aa.ru'), EmailAddress(name='', email='dd@aa.ru')),
bcc_values=(EmailAddress(name='', email='zz1@aa.ru'), EmailAddress(name='', email='zz2@aa.ru')),
reply_to_values=(EmailAddress(name='привет', email='foma1@company.ru'), EmailAddress(name='пока', email='petr1@company.ru'), EmailAddress(name='привет', email='foma2@company.ru'), EmailAddress(name='пока', email='petr2@company.ru')),
) | ikvk/imap_tools | tests/messages_data/double_fields.py | double_fields.py | py | 1,917 | python | en | code | 608 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "imap_tools.EmailAddress",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "imap_tools.EmailAddress",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "ima... |
27648052145 | from sqlalchemy import create_engine, text
db_connection_string = "mysql+pymysql://zi6id5p25yfq60ih6t1y:pscale_pw_OG991jS2It86MJJqrCYvCmcJ5psfFaYkxyOLA9GoTwy@ap-south.connect.psdb.cloud/enantiomer?charset=utf8mb4"
engine = create_engine(db_connection_string,
connect_args={"ssl": {
"ssl_ca": "/etc/ssl/cert.pem"
}})
def load_jobs_from_db():
with engine.connect() as conn:
result = conn.execute(text("select * from jobs"))
jobs = []
for row in result.all():
jobs.append(row)
return jobs | ismaehl-2002/enantiomer-website-v2 | database.py | database.py | py | 579 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.text",
"line_number": 13,
"usage_type": "call"
}
] |
24688793907 | from typing import Sequence
class NetStats:
def __init__(
self, net, input_shape: Sequence[int], backend: str = "torch",
self_defined_imp_class = None
):
if self_defined_imp_class is None:
if backend == "torch":
from reunn.implementation import torch_imp
imp = torch_imp.TorchStatsImp(net, input_shape)
elif backend == "spikingjelly":
from reunn.implementation import spikingjelly_imp
imp = spikingjelly_imp.SpikingjellyStatsImp(net, input_shape)
else:
raise ValueError(f"{backend} backend not supported!")
else:
imp = self_defined_imp_class(net, input_shape)
self.imp = imp
def count_parameter(self):
return self.imp.count_parameter()
def count_mac(self):
return self.imp.count_mac()
def print_summary(self):
self.imp.print_summary()
| AllenYolk/reusable-nn-code | reunn/stats.py | stats.py | py | 954 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.Sequence",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "reunn.implementation.torch_imp.TorchStatsImp",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "reunn.implementation.torch_imp",
"line_number": 13,
"usage_type": "name"
},... |
29947905371 | import requests
class Test_new_joke():
"""Создание новой шутки"""
def __init__(self):
pass
def get_categories(self):
"""Получение категориb шуток"""
url_categories = "https://api.chucknorris.io/jokes/categories"
print("Получение категорий шуток по ссылке - " + url_categories)
all_categories_get = requests.get(url_categories)
assert all_categories_get.status_code == 200
if all_categories_get.status_code == 200:
print("Статус код: 200\nКатегории получены\n")
all_categories = all_categories_get.json()
global categories_ok
categories_ok = False
for f in all_categories: # Проверка введеной категории и получение шутки
if f == user_categories:
url = "https://api.chucknorris.io/jokes/random?category=" + f
print("Получение шутки по ссылке -" + url)
result = requests.get(url)
assert result.status_code == 200
print("Статус код: 200\nШутка в данной категории получена:")
joke_get = result.json()
joke = joke_get.get('value')
print(joke + "\n")
categories_ok = True
def description(self):
"""Получение списка категорий"""
url_categories = "https://api.chucknorris.io/jokes/categories"
all_categories_get = requests.get(url_categories)
assert all_categories_get.status_code == 200
all_categories = all_categories_get.json()
for f in all_categories:
print(f)
user_categories = input("Введите название категории из которой хотите получить шутку: ")
cat = Test_new_joke()
cat.get_categories()
while not categories_ok:
print("Такой категории не существует.\nСписок категорий:")
cat.description()
user_categories = input("Введите название категории из этого списка: ")
cat.get_categories() | Grassh-str/Test_api_ChuckNorris | api_chuck.py | api_chuck.py | py | 2,399 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 43,
"usage_type": "call"
}
] |
13866614560 | import datetime
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
from sqlalchemy.orm import relationship
from app.db.base_model import Base
class ArticleModel(Base):
__tablename__ = "articles"
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(255))
description = Column(String(255))
url_font = Column(String(255))
user_id = Column(Integer, ForeignKey("users.id"))
creator = relationship("UserModel",
back_populates="articles",
lazy="joined")
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow)
| matheus-feu/FastAPI-JWT-Security | app/models/article.py | article.py | py | 753 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "app.db.base_model.Base",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "sqlalc... |
26377040664 | import ast
import os
from django.http import JsonResponse
from django.shortcuts import render, HttpResponse
from django.conf import settings
import json
import commons.helper
# Create your views here.
"""
Google Map
"""
def map_hello_world(request):
"""
Renders a page with embedded Google map. Passes variables to the associated html template via dictionary
'context'. The URL associated with this view function is defined in urls.py.
"""
context = {
"google_api_key": settings.GOOGLE_MAP_API_KEY,
"lat_coord": 29.4190,
"lng_coord": -98.4836,
"all_routes": json.dumps(commons.helper.getAllActiveRoutesDropDown())
}
return render(request, 'map/map_index.html', context)
def getRouteDetailsAJAX(request):
user_data = ast.literal_eval(request.GET.get('data'))
stops = commons.helper.getRoutesDetails(user_data)
if stops:
stops = list(stops)
allStops = {
'all_stops': stops
}
return HttpResponse(json.dumps(allStops))
def getBusColorDescriptionAJAX(request):
static_base_url = settings.STATIC_URL
if settings.DEBUG:
static_base_url = request.build_absolute_uri('/')[:-1].strip("/") + '/static/'
result = [
{
'icon': f'{static_base_url}map/icons/red_bus.png',
'description': "No Seats Available"
},
{
'icon': f'{static_base_url}map/icons/yellow_bus.png',
'description': "Less than 3 seats Available"
},
{
'icon': f'{static_base_url}map/icons/green_bus.png',
'description': "More than 3 seats Available"
}
]
return HttpResponse(json.dumps(result))
| TAMUSA-nsf-project/django_smartmap | map/views.py | views.py | py | 1,701 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "django.conf.settings.GOOGLE_MAP_API_KEY",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 26,
"usage_type": "call"
},
{
"api_... |
41772303500 | import glob
import json
import subprocess
from utils import PathUtils
from plot_sim_results import plot_multiple_results
EXP_CONFIGS = ['ring_local_config',
'ring_consensus_config']
if __name__ == '__main__':
ring_configs = glob.glob(str(PathUtils.exp_configs_folder) + '/ring' + '/*.py')
for config in ring_configs:
config = config.rsplit('/', 1)[1].rsplit('.',1)[0]
command = ['python',
PathUtils.run_ring_file,
'--exp_config', config,
#'--no_render',
'--no_plot_outcome'
]
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
if output:
print(output.decode())
if error:
print(error.decode())
with open(str(PathUtils.ring_json_file), 'r') as f:
params_dict = json.load(f)
f.close()
plot_multiple_results(params_dict) | matteobettini/Autonomous-Vehicles-Consensus-2021 | run_experiments_ring.py | run_experiments_ring.py | py | 1,084 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.PathUtils.exp_configs_folder",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "utils.PathUtils",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "uti... |
13966927417 | from itertools import combinations
def make_combinations_set(order, menu_num):
result = set()
menus = sorted([ch for ch in order])
comb_menu = combinations(menus, menu_num)
for each_comb in comb_menu:
result.add(''.join(each_comb))
# print(result)
return result
def solution(orders, course):
answer = []
comb_menu = set()
for menu_num in course:
for order in orders:
comb_menu |= make_combinations_set(order, menu_num)
comb_menu = sorted(list(comb_menu))
# print(comb_menu)
count = 0
each_count = 0
menus_count = []
for each_comb in comb_menu:
for order in orders:
for each_menu in each_comb:
if each_menu not in order:
break
else:
each_count += 1
if each_count == menu_num:
count += 1
each_count = 0
if count >= 2:
menus_count.append([each_comb, count])
count = 0
menus_count = sorted(menus_count, key = lambda x : x[1], reverse = True)
#print(menus_count)
try:
max_count = menus_count[0][1]
for each_menu in menus_count:
if each_menu[1] == max_count:
answer.append(each_menu[0])
else:
break
except:
pass
comb_menu = set()
answer = sorted(answer)
print(answer)
return answer | Devlee247/NaverBoostCamp_AlgorithmStudy | week5/P01_myeongu.py | P01_myeongu.py | py | 1,634 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "itertools.combinations",
"line_number": 7,
"usage_type": "call"
}
] |
32889657187 | import math
import os
from pickle import FALSE, TRUE
import nltk
import string
from nltk.stem import PorterStemmer
import json
import tkinter as tk
from nltk import WordNetLemmatizer
lemmatizer=WordNetLemmatizer()
remove_punctuation_translator = str.maketrans(string.punctuation, ' '*len(string.punctuation))
def stopWord():
stop_words=[]
f=open("Stopword-List.txt")
#make an array of all stop words
for word in f.read().split("\n")[0:]:
if word:
stop_words.append(word.strip())
return stop_words
def readFromFileAndMakeIndexes():
#getcwd brings the existing path of file
# path=os.getcwd()
# path=path+'/Abstracts/'
#getting all files in path
# files=os.listdir(path)
i=0
vectorSpace=[]
docVector=[]
df={}
stop_words=stopWord()
# print(stop_words)
# for file in files:
for i in range(448):
doc_id=i+1
f=open("Abstracts\\"+str(doc_id)+".txt")
words=[]
new_words=[]
#split is a built in function used to break the documents into sentences
for line in f.read().split("\n")[0:]:
if line:
#remove any punctuation in a line
line=line.translate(remove_punctuation_translator)
#nltk libarary function used to make sentences into word tokens
words=nltk.word_tokenize(line)
for items in words:
if len(items)>1:
items=items.translate(remove_punctuation_translator)
items=lemmatizer.lemmatize(items)
new_words.append(items.lower())
# print(line)
# new_words.append(word_stemmer.stem(items))
#patition function is sued to break string at the first occurence of '.'
# doc_id=(file.partition(".")[0])
#convert from sting to int
doc_id=int(doc_id)
#Creating TermFrequency VECTOR (TF)
tf={}
temp=[]
# flag=False
for word in new_words:
if tf.__contains__(word):
tf[word]+=1
# print(word)
else:
vectorSpace.append(word)
tf[word]=1
if word not in temp:
if df.__contains__(word):
df[word]+=1
else:
df[word]=1
temp.append(word)
docVector.append(tf)
if(i==100):
docVector,df=tfIdfScore(vectorSpace,docVector,df)
print(docVector)
return vectorSpace,docVector,df
def tfIdfScore(vectorSpace,docVector,df):
N=len(docVector)
for word in vectorSpace:
for d in docVector:
if word in d:
# print(d[word])
d[word]=1+math.log10(d[word] if d[word]>0 else 1)
# else:
# d[word]=0
df[word]=math.log10(N/df[word] if df[word]>0 else 1)
# print(docVector)
for word in df:
for d in docVector:
if word in d:
d[word]=d[word]*df[word]
return docVector,df
def queryProcess(q,vectorSpace,df,docVector):
queryVector={}
N=len(docVector)
for word in vectorSpace:
queryVector[word]=0
stop_words=stopWord()
q=q.lower().split(" ")
for q_word in q:
# lemmatizer.lemmatize(q_word)
if q_word not in stop_words:
if q_word in vectorSpace:
queryVector[q_word]+=1
else:
continue
for q_word in q:
if q_word in vectorSpace:
queryVector[q_word]=1+math.log10(queryVector[q_word])
for q_word in q:
if q_word in vectorSpace:
queryVector[q_word]=queryVector[q_word]*df[q_word]
similarity(q,docVector,queryVector)
# print(queryVector)
# print(q)
def vectorDotProduct(v1,v2):
dp=0
for i in range(0,len(v1)):
dp=dp+(v1[i]*v2[i])
return(dp)
def vectorMagnitude(v):
m=0
for i in range(0,len(v)):
m=m+(v[i]**2)
return(math.sqrt(m))
def cosineScore(v1,v2):
cs=vectorDotProduct(v1,v2)
vm=vectorMagnitude(v1)*vectorMagnitude(v2)
if vm==0:
cs=0
else:
cs=cs/(vm)
return cs
def similarity(q,docVector,queryVector):
docScore={}
print(queryVector)
docCount=0
for i in range(0,len(docVector)):
v1=[]
v2=[]
for word in q:
if word in docVector[docCount]:
v1.append(docVector[docCount][word])
v2.append(queryVector[word])
# if word=="ensemble":
# print(docVector[docCount][word],queryVector[word])
docCount+=1
docScore[docCount]=(cosineScore(v1,v2))
print(docScore)
vectorSpace,docVector,df=readFromFileAndMakeIndexes()
queryProcess("ensemble",vectorSpace,df,docVector)
| mustafabawani/Vector-Space-Model | ajeeb.py | ajeeb.py | py | 4,976 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.WordNetLemmatizer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "nltk.word_tokenize",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "mat... |
38767218323 | import streamlit as st
import pandas as pd
import openai
import time
import re
openai.api_key = st.secrets["openai"]
def txt(file):
content = file.getvalue().decode('utf-8')
documents = content.split("____________________________________________________________")
# Removing any empty strings or ones that don't look like documents
documents = [doc.strip() for doc in documents if "Full text:" in doc]
# Checking the number of documents identified and displaying the first document to validate our approach
num_documents = len(documents)
# Re-initializing the lists to store the extracted data
document_names = []
document_urls = []
publication_dates = []
publication_titles = []
full_texts = []
# Re-extracting the required information from each document
for doc in documents:
# Extracting document name (defaulting to None if not found)
doc_name_match = re.search(r"Document \d+ of \d+\n\n(.*?)\n\n", doc)
document_name = doc_name_match.group(1) if doc_name_match else None
document_names.append(document_name)
# Extracting document URL (defaulting to None if not found)
url_match = re.search(r"http[^\n]+", doc)
document_url = url_match.group(0) if url_match else None
document_urls.append(document_url)
# Extracting publication date (defaulting to None if not found)
date_match = re.search(r"Publication date: ([^\n]+)", doc)
pub_date = date_match.group(1) if date_match else None
publication_dates.append(pub_date)
# Extracting publication title (defaulting to None if not found)
title_match = re.search(r"Publication title: ([^\n]+)", doc)
pub_title = title_match.group(1) if title_match else None
publication_titles.append(pub_title)
# Extracting full text (defaulting to None if not found)
full_text_match = re.search(r"Full text:([\s\S]+)", doc)
full_text = full_text_match.group(1).strip() if full_text_match else None
full_texts.append(full_text)
# Constructing the dataframe
df = pd.DataFrame({
"Document URL": document_urls,
"Publication Date": publication_dates,
"Publication Title": publication_titles,
"Full Text": full_texts
})
return df
def gpt(prompt, text, model="gpt-3.5-turbo-16k", temperature=0.2):
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": text}
],
temperature=temperature
)
response = response.choices[0].message['content']
return response
def process(df, target, prompts):
placeholder = st.empty()
# Ensure that all the columns are present
for name in prompts.keys():
if name not in df:
df[name] = ''
df[name] = df[name].astype('string')
# Loop through the dataframe rows
for i in range(0, len(df)):
for name, prompt in prompts.items():
try:
text = df.loc[i, target][0:6000] # Consider refining this based on GPT's token limits
output = gpt(prompt, text)
df.loc[i, name] = output
subset = df[[target, *prompts.keys()]]
placeholder.dataframe(subset)
except Exception as e:
st.write(f"Error encountered at index {i}. Reason: {str(e)}")
time.sleep(20) # Wait for 20 seconds
return True
example = ["Summarize the article", "List specific individuals mentioned",
"Classify article type (op-ed, report, etc.", "Prompt 4", "Prompt 5"]
file = st.file_uploader("Upload a file", type=("csv", "txt"))
if file:
try:
df = pd.read_csv(file)
except:
df = txt(file)
column = st.selectbox("Column of interest:", tuple(df.columns))
prompts = {}
n = st.number_input('Number of prompts:', min_value = 0, max_value=5)
for i in range(0,n):
prompts[f"Column {i+1}"] = st.text_input(f"Prompt {i+1}",
placeholder=example[i]
)
is_any_empty = (any(not val for val in prompts.values()))
if st.button("Process", disabled=is_any_empty):
if process(df, column, prompts):
st.download_button(
label="Download data as CSV",
data=df.to_csv().encode('utf-8'),
file_name='cleaned.csv',
mime='text/csv',
)
| skacholia/AnnotateDemo | main.py | main.py | py | 4,539 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openai.api_key",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "streamlit.secrets",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.search",
"l... |
27016642063 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 1 17:38:34 2019
@author: Martín Márquez Cervantes
"""
print("UNion de las tres compuertas, aun por trabajar")
import matplotlib.pyplot as plt
import numpy as np
class MyNeuron:
#VAriable w global
WG=0
xTestPredic=0
#entrenamiento perceptron
def training(self,X,Y):
#inicializar w con valores pseudo-aleatorios
w = np.random.rand(3)
#Nota: Dimensiones de w son el numero de columnas de x+1
#-----------------------
#Paso 2 : Agregar una columna de unos a la matriz x
X=np.append(np.ones((X.shape[0],1)),X,axis=1)
#Antes de modificar w guardamos para impresión
wInicial = w
XN = X.shape[0] #40
#Algoritmo perceptron
for i in range(1,21):
for j in range(XN):
if np.dot(w,X[j]) >= 0:
y=1
else:
y=0
w=w+(Y[j]-y)*X[j]
#guardamos w modificada en wG para despues utilizarla en algoritmo predictivo
self.WG=w
#impersión de resultados
print("\n\n")
print("w Inicial: "+str(wInicial))
print("w Final: "+str(w))
#graficación de vectores
plt.plot(wInicial,'.-')
plt.plot(w,'.-')
print("\n\n")
print('Linea azul W Inicial')
print('Linea naranja W Final aplicando algoritmo')
#predice si esta aprovado o no
def predic(self,i,xTest):
#calcular y la salida de la prediccion
y=np.dot(self.WG,xTest[i]) #producto interno entre w y x
#clasificación
if y>=0:
return 1
else:
return 0
#return 1/(1+np.exp(-y))
def comparar(self,XT,Predicciones):
TP = 0
FP = 0
FN = 0
TN = 0
#contar true positive, true negative, false negative, false positive
for i in range(XT.shape[0]):
if XT[i] ==1 and Predicciones[i]==1:
TP +=1
if XT[i] ==0 and Predicciones[i]==0:
TN +=1
if XT[i] ==1 and Predicciones[i]==0:
FN +=1
if XT[i] ==0 and Predicciones[i]==1:
FP +=1
print("\n\nTP = "+str(TP)+"\nTN = "+str(TN)+"\nFP = "+str(FP)+"\nFN = "+str(FN))
print("\nMatrix Confussion")
MatrixConfussion = np.array([TP,TN,FP,FN])
print(MatrixConfussion.reshape(2,2))
#calculo de Precisión de clasificación
ClassificationAccuary = ( (TP+TN)/(TP+TN+FN+FP) )*100
print("\nPrecisión de clasificación: "+str(ClassificationAccuary)+" %")
#calculo de Presición
Presicion = TP/(TP+FP)*100
print("Precisión : "+str(Presicion)+" %")
#calculo de Presición
Recall = TP/(TP+FN)*100
print("Recall : "+str(Recall)+" %")
#calculo de F-Score
FScore = 2*( (Presicion*Recall)/(Presicion+Recall) )
print("F-Score : "+str(FScore))
clf = MyNeuron()
#Entradas AND y OR
TotalElementos = 10
ceros = np.random.uniform(0,0.4,TotalElementos)
unos = np.random.uniform(0.75,0.9,TotalElementos)
numRenglones = ceros.shape[0]*4
#Conjunto de datos entrenamiento
X = np.append(ceros,ceros)
X = np.append(X,unos)
X = np.append(X,unos)
X = np.append(X,ceros)
X = np.append(X,unos)
X = np.append(X,ceros)
X = np.append(X,unos)
X = X.reshape(numRenglones,2,order=True)
#Entradas Not
unosNot = np.random.uniform(0.75,0.9,TotalElementos*2)
#Conjunto de datos entrenamiento
Xnot = np.append(ceros,ceros)
Xnot = np.append(Xnot,unosNot)
Xnot = Xnot.reshape(numRenglones,1)
#Clases para And
YAND = np.zeros([TotalElementos*3,1])
YAND = np.append(Y,np.ones([TotalElementos,1]))
YAND.reshape(numRenglones,1)
#Clases para Or
YOR = np.zeros([TotalElementos,1])
YOR = np.append(Y,np.ones([TotalElementos*3,1]))
YOR.reshape(numRenglones,1)
#Clases para Not
YNOT = np.zeros([TotalElementos*2,1])
YNOT = np.append(Y,np.ones([TotalElementos*2,1]))
YNOT.reshape(numRenglones,1)
clf.training(X,YAND)
#conjuntos de datos de prueba 20 elementos
cerosTest = np.zeros(5)
unosTest = np.ones(5)
#Conjunto de datos
XT = np.append(cerosTest,cerosTest)
XT = np.append(XT,unosTest)
XT = np.append(XT,unosTest)
XT = np.append(XT,cerosTest)
XT = np.append(XT,unosTest)
XT = np.append(XT,cerosTest)
XT = np.append(XT,unosTest)
XT = XT.reshape(20,2,order=True)
YT = np.zeros(15)
YT = np.append(YT,np.ones(5))
YT.reshape(YT.size,1)
XT=np.append(np.ones((XT.shape[0],1)),XT,axis=1)
Predicciones = []
for i in range(XT.shape[0]):
Predicciones.append(clf.predic(i,XT))
Predicciones = np.array(Predicciones)
#impresión
print("\n\n")
for i in range(XT.shape[0]):
print("Indice " +str(i) +" prediccion " +str(Predicciones[i]))
#Impresión en el método de los calculos Clasificación precisión, precisión,recall y F-score
clf.comparar(YT,Predicciones)
| MarqCervMartin/RedesNeuronales | Laboratorio5/CompuertaAndOrNot.py | CompuertaAndOrNot.py | py | 5,014 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.rand",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.append",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"li... |
14129279608 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import sqlite3
from datetime import datetime
db_name = 'dbmovie{0}.db'.format(str(datetime.now())[:10].replace('-', ''))
class DbmoviePipeline(object):
def process_item(self, item, spider):
if item:
conn = sqlite3.connect(db_name)
cursor = conn.cursor()
try:
cursor.execute(
'CREATE TABLE IF NOT EXISTS movies(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, movieName VARCHAR(50),url VARCHAR (50), directors VARCHAR(50), actors VARCHAR(200), countries VARCHAR (50), genres VARCHAR (50), languages VARCHAR (50), runtime INTEGER , udate VARCHAR (15), rate VARCHAR (5), votes INTEGER )')
cursor.execute(
'insert into movies(id, movieName,url, directors, actors, countries, genres, languages, runtime, udate,rate, votes) VALUES (NULL, \'{0}\',\'{1}\',\'{2}\',\'{3}\',\'{4}\',\'{5}\',\'{6}\',\'{7}\',\'{8}\',\'{9}\',\'{10}\' )'.format(
item['movieName'], item['url'], item['directors'], item['actors'], item['countries'],
item['genres'],
item['languages'], item['runtime'], item['date'], item['rate'], item['votes'])
)
except sqlite3.Error as e:
print(e.args[0])
cursor.close()
conn.close()
else:
conn.commit()
cursor.close()
conn.close()
return item
| zenmeder/dbmovie | dbmovie/pipelines.py | pipelines.py | py | 1,434 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sqlite3.Error... |
23212333774 | from django.test import TestCase
from .models import User, Routine, RoutineContent, Task, Advice, Appointment, DayWeek
from .get import *
from datetime import datetime, timedelta
# Create your tests here.
class getsFunctionTestCase(TestCase):
def setUp(self):
user_a = User.objects.create_user(username='user_a', email='user_a@exp.com', password='user_a')
content_a = RoutineContent.objects.create(content='content_a')
task_a = Task.objects.create(routine_content=content_a)
advice_a = Advice.objects.create(routine_content=content_a)
appointment_a = Appointment.objects.create(routine_content=content_a)
routine_a = Routine.objects.create()
routine_a.routine_tasks.add(task_a)
routine_a.routine_advices.add(advice_a)
routine_a.routine_appointments.add(appointment_a)
def test_getUserRoutine(self):
user_a = User.objects.get(username='user_a')
routine_a = Routine.objects.create()
user_a.user_routine = routine_a
user_a.save()
routine = getUserRoutine(user_a)
self.assertEqual(routine, routine_a)
def test_getUserByUsername(self):
user_a = User.objects.get(pk=1)
user_a_get = getUserByUsername('user_a')
self.assertEqual(user_a, user_a_get)
def test_getUserByPK(self):
user_a = User.objects.get(username='user_a')
user_a_get = getUserByPK(1)
self.assertEqual(user_a, user_a_get)
def test_getUserTasks(self):
user_a = getUserByUsername('user_a')
content = RoutineContent.objects.create(content='content')
task_a = Task.objects.create(routine_content=content)
task_b = Task.objects.create(routine_content=content)
routine_a = Routine.objects.create()
routine_a.routine_tasks.add(task_a, task_b)
tasks = routine_a.routine_tasks.all()
user_a.user_routine = routine_a
user_a.save()
task_a_get = getUserTasks(user_a)
tasks = list(tasks)
task_a_get = list(task_a_get)
self.assertEqual(tasks, task_a_get)
def test_getUserAdvices(self):
user_a = getUserByUsername('user_a')
content = RoutineContent.objects.create(content='content')
advice_a = Advice.objects.create(routine_content=content)
routine_a = Routine.objects.create()
routine_a.routine_advices.add(advice_a)
advices = routine_a.routine_advices.all()
user_a.user_routine = routine_a
user_a.save()
advices_get = getUserAdvices(user_a)
advices = list(advices)
advices_get = list(advices_get)
self.assertEqual(advices, advices_get)
def test_getRoutineContent(self):
content = RoutineContent.objects.get(pk=1)
task_a = Task.objects.get(pk=1)
appointment_a = Appointment.objects.get(pk=1)
advice_a = Advice.objects.get(pk=1)
content_task = getRoutineContent(task_a)
content_appointment = getRoutineContent(appointment_a)
content_advice = getRoutineContent(advice_a)
self.assertEqual(content, content_task)
self.assertEqual(content, content_advice)
self.assertEqual(content, content_appointment)
def test_timeOfContent(self):
now = datetime.now()
finish = now + timedelta(days=2)
content = RoutineContent.objects.create(date_finish=finish)
task = Task.objects.create(routine_content=content)
day_finish = getFinishDay(task)
day_created = getCreatedDay(task)
self.assertEquals(day_created+2, day_finish)
def test_getDayWeek(self):
monday = DayWeek.objects.create(day=0)
wednesday = DayWeek.objects.create(day=2)
content = RoutineContent.objects.create(content='content')
content.day_week.add(monday, wednesday)
content.save()
array_day = [monday, wednesday]
task = Task.objects.create(routine_content=content)
days = getDayWeek(task)
self.assertEquals(days, array_day)
def test_getDateCreated(self):
content = RoutineContent.objects.create(content='content')
date = content.date_created
task = Task.objects.create(routine_content=content)
date_get = getCreatedDate(task)
self.assertEqual(date, date_get)
def test_getFinishDate(self):
content = RoutineContent.objects.create(content='content')
date = content.date_finish
task = Task.objects.create(routine_content=content)
date_get = getFinishDate(task)
self.assertEqual(date, date_get)
def test_getContent(self):
content = RoutineContent.objects.create(content='content')
content_content = content.content
task = Task.objects.create(routine_content=content)
content_get = getContent(task)
self.assertEqual(content_content, content_get)
def test_getPriority(self):
content = RoutineContent.objects.create(content='content', priority=1)
priority = content.priority
task = Task.objects.create(routine_content=content)
priority_get = getPriority(task)
self.assertEquals(priority, priority_get)
def test_getIsRoutine(self):
content = RoutineContent.objects.create(content='content', is_routine=True)
is_routine = content.is_routine
task = Task.objects.create(routine_content=content)
is_routine_get = getIsRoutine(task)
self.assertEquals(is_routine, is_routine_get)
def test_getPlace(self):
content = RoutineContent.objects.create(content='content')
appointment = Appointment.objects.create(routine_content=content, place='Rue')
place = appointment.place
place_get = getPlace(appointment)
self.assertEquals(place, place_get)
def test_getActive(self):
content = RoutineContent.objects.create(content='content')
advice = Advice.objects.create(routine_content=content, active=True)
active_get = getActive(advice)
self.assertTrue(active_get)
def test_getFinished(self):
content = RoutineContent.objects.create(content='content')
task = Task.objects.create(routine_content=content)
finished_get = getFinished(task)
self.assertFalse(finished_get)
| eduardofcabrera/CS50-CalendarDay | day_day/tests.py | tests.py | py | 6,307 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "models.User.objects.create_user",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_... |
43508294902 | import sys
from pathlib import Path
sys.path.append(Path(__file__).resolve().parents[2])
# rel imports when in package
if __name__ == '__main__' and __package__ is None:
__package__ = 'kuosc'
print(Path(__file__).resolve())
print(__package__)
# from kurosc.lib.plotformat import setup
| chriswilly/kuramoto-osc | Python/kurosc/kurosc/tests/pathtest.py | pathtest.py | py | 293 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_numb... |
73679903143 | import telebot
from telebot import types
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
bot = telebot.TeleBot("") # Замените на свой токен!
DB_URL = ''
user_dict = {}
class User:
def __init__(self, tgid):
self.tgid = tgid
self.fio_name = ''
self.fac = ''
@bot.message_handler(commands=['start'])
def send_welcome(message):
#print(user_id_tg)
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
markup.add('✏️ | Заполнить заявку', '📗 | О нас')
msg = bot.send_message(message.chat.id, 'Привет!👋🏾 Выбери нужный тебе пункт меню! ⬇️⬇️⬇️', reply_markup=markup)
#print(msg.chat.username)
bot.register_next_step_handler(msg, markup_handler)
def markup_handler(message):
if message.text == '✏️ | Заполнить заявку':
msg = bot.send_message(message.chat.id, 'Как тебя зовут?©️')
bot.register_next_step_handler(msg, fio_handler)
elif message.text == '📗 | О нас':
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
markup.add('⬅️ | Вернуться')
bot.register_next_step_handler(message, send_welcome)
msg = bot.send_message(message.chat.id,
'Привет! 🤟🏿 Я первый киберспортивный 🤖бот-помощник,\n'
'который проведет тебя в мир игр! 👾\n'
'С моей помощью ты сможешь найти новых друзей,🤝\n'
'научить или научиться чему-то новому!\n'
'Преодолеть все границы и стать настоящим победителем! 🏆\n\n'
'С уважением, команда ODIN⚡️', reply_markup=markup)
def handle_return(message):
send_welcome(message)
bot.register_next_step_handler(message, markup_handler)
def fio_handler(message):
user_info = {
'tg_id': message.from_user.id,
'username' : message.chat.username,
'fio' : message.text
}
msg = bot.send_message(message.chat.id, 'На каком факультете ты обучаешься?💎')
bot.register_next_step_handler(msg, faculty_handler, user_info)
def faculty_handler(message, user_info):
user_info['faculty'] = message.text
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
markup.add('CS2','Dota 2', 'LoL', 'Valorant')
msg = bot.send_message(message.chat.id, 'В какой дисциплине ты хочешь принимать участие?⚖️',reply_markup=markup)
bot.register_next_step_handler(msg, disciplines_handler, user_info)
def disciplines_handler(message, user_info):
user_info['disc'] = message.text
msg = bot.send_message(message.chat.id, 'Кратко расскажи о своих достижениях 📝')
bot.register_next_step_handler(msg, achievements_handler, user_info)
def achievements_handler(message, user_info):
user_info['achi'] = message.text
print(user_info)
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add('⬅️ | Вернуться')
bot.send_message(message.chat.id, 'Спасибо! Твой запрос обработан и скоро будет рассмотрен!🔔',reply_markup=markup)
bot.register_next_step_handler(message,send_welcome)
save_to_database(user_info)
def save_to_database(user_info):
if not firebase_admin._apps:
cred = credentials.Certificate('admin.json')
firebase_admin.initialize_app(cred, {'databaseURL': DB_URL})
# Запись данных о пользователе в Realtime Database
write_user_data(user_info)
# Функция для записи данных о пользователе в Realtime Database
def write_user_data(user_info):
ref = db.reference('Telegram/' + str(user_info['tg_id']))
ref.set({
'6 - достижения': user_info['achi'],
'5 - дисциплины': user_info['disc'],
'4 - факультет': user_info['faculty'],
'3 - ФИО': user_info['fio'],
'2 - Nickname': "@" + user_info['username'],
'1 - TelegramID': user_info['tg_id']
})
#716578611
#428571723
send_notific(999999999, user_info)
def send_notific(ADMIN_ID, user_info):
text = 'Пользователь ['+user_info['fio']+'](https://t.me/'+user_info['username']+') оставил\(а\) заявку:\nфакультет: '+user_info['faculty']+'\nдисциплины: '+user_info['disc']+'\nдостижения: '+user_info['achi']
bot.send_message(ADMIN_ID, text, parse_mode='MarkdownV2')
bot.polling() | psshamshin/CuberClub_BOT | tgbotlastfinal.py | tgbotlastfinal.py | py | 4,885 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "telebot.TeleBot",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "telebot.types.ReplyKeyboardMarkup",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "telebot.types",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "telebo... |
6689643445 | import os
import uuid
import json
import minio
import logging
class storage:
instance = None
client = None
def __init__(self):
try:
"""
Minio does not allow another way of configuring timeout for connection.
The rest of configuration is copied from source code of Minio.
"""
import urllib3
from datetime import timedelta
timeout = timedelta(seconds=1).seconds
mgr = urllib3.PoolManager(
timeout=urllib3.util.Timeout(connect=timeout, read=timeout),
maxsize=10,
retries=urllib3.Retry(
total=5, backoff_factor=0.2, status_forcelist=[500, 502, 503, 504]
)
)
self.client = minio.Minio(
os.getenv("MINIO_STORAGE_CONNECTION_URL"),
access_key=os.getenv("MINIO_STORAGE_ACCESS_KEY"),
secret_key=os.getenv("MINIO_STORAGE_SECRET_KEY"),
secure=False,
http_client=mgr
)
except Exception as e:
logging.info(e)
raise e
@staticmethod
def unique_name(name):
name, extension = name.split(".")
return "{name}.{random}.{extension}".format(
name=name, extension=extension, random=str(uuid.uuid4()).split("-")[0]
)
def upload(self, bucket, file, filepath):
key_name = storage.unique_name(file)
self.client.fput_object(bucket, key_name, filepath)
return key_name
def download(self, bucket, file, filepath):
self.client.fget_object(bucket, file, filepath)
def download_directory(self, bucket, prefix, path):
objects = self.client.list_objects(bucket, prefix, recursive=True)
for obj in objects:
file_name = obj.object_name
self.download(bucket, file_name, os.path.join(path, file_name))
def upload_stream(self, bucket, file, bytes_data):
key_name = storage.unique_name(file)
self.client.put_object(
bucket, key_name, bytes_data, bytes_data.getbuffer().nbytes
)
return key_name
def download_stream(self, bucket, file):
data = self.client.get_object(bucket, file)
return data.read()
@staticmethod
def get_instance():
if storage.instance is None:
storage.instance = storage()
return storage.instance
| spcl/serverless-benchmarks | benchmarks/wrappers/openwhisk/python/storage.py | storage.py | py | 2,464 | python | en | code | 97 | github-code | 36 | [
{
"api_name": "datetime.timedelta",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "urllib3.PoolManager",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "urllib3.util.Timeout",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "urllib3.u... |
44647926786 | import boto3
import json
from decimal import Decimal
from boto3.dynamodb.conditions import Key
dynamodb = boto3.resource('dynamodb')
attendance_table = dynamodb.Table('attendance_table_user')
#queryで特定のuser_idの出社予定取ってくる
def query_attendance(id):
result = attendance_table.query(
KeyConditionExpression=Key('user_id').eq(id)
)
return result['Items']
def decimal_default_proc(obj):
if isinstance(obj, Decimal):
return float(obj)
raise TypeError
def lambda_handler(event, context):
attendance = query_attendance(event['pathParameters']['user_id'])
# print("attendanceのリスト", attendance)
return {
'statusCode': 200,
'body': json.dumps(attendance, default=decimal_default_proc),
'isBase64Encoded': False,
'headers' : {"content-type": "application/json",
"Access-Control-Allow-Origin": "*"}
}
| SOICHI0826/kinikare_server | lambdafunction/get_attendance.py | get_attendance.py | py | 957 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.resource",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "boto3.dynamodb.conditions.Key",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "json.... |
11210169685 | from django.conf import settings
from travels import models
from django.utils.html import escapejs
def project_settings(request):
project_settings = models.Settings.objects.all()[0]
return { 'project_settings' : project_settings }
def settings_variables(request):
''' Provides base URLs for use in templates '''
project_settings = models.Settings.objects.all()[0]
d = {
'APP_NAME': escapejs(settings.APP_NAME),
'PROJECT_DESCRIPTION': escapejs(project_settings.project_description),
}
# Allows settings to define which variables
# it wants to expose to templates
if settings.CONTEXT_VARIABLES:
for var in settings.CONTEXT_VARIABLES:
if hasattr(settings, var):
d[var] = getattr(settings, var)
return d | UNICEF-Youth-Section/Locast-Web-Rio | travels/settings_context_processor.py | settings_context_processor.py | py | 754 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "travels.models.Settings.objects.all",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "travels.models.Settings",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "travels.models",
"line_number": 6,
"usage_type": "name"
},
{
"api_... |
947943428 | #!/usr/bin/env python3
import requests
import bs4
base_url = "https://quotes.toscrape.com/page/{}/"
authors = set()
quotations = []
for page_num in range(1,2):
page = requests.get(base_url.format(page_num))
soup = bs4.BeautifulSoup(page.text,'lxml')
boxes = soup.select(".quote") #selected all the quotes boxes
for box in boxes:
author = box.select('span')[1].select('small')[0].getText()
quotation = box.select('span')[0].getText()
authors.add(author)
quotations.append(quotation)
print("Authors are: ")
print(authors)
print('\n')
print("Quotations are: ")
print(quotations)
print('\n')
top10 = soup.select(".tag-item")
for i in range(10 ):
print(top10[i].select('a')[0].getText())
| SKT27182/web_scaping | get_quotes_author.py | get_quotes_author.py | py | 751 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
}
] |
29790427487 | import json
from flask import Flask, request
from flask_cors import CORS
from queue import Queue
from helpers.utils import utils
from helpers.db import queries
from helpers.algo import boundary
from helpers.algo import neighbours
from helpers.algo import degrees_count
from helpers.algo.new_hex_loc import*
app = Flask(__name__)
CORS(app)
def logger(val):
print("\n{}\n".format(val))
@app.route('/get-hex-by-name', methods=['GET', 'POST'])
# @cross_origin()
def search_hex_byName():
name = request.args['name']
logger(name)
if(name):
try:
resp = queries.get_hex_details_by_name(name)
logger(resp)
return resp
except:
return {"err": "error"}
else:
return {"Please enter the name correctly to get all the details"}
return {'Network Error'}
@app.route('/get-hex-by-id', methods=['GET', 'POST'])
# @cross_origin()
def search_hex_byId():
id = request.args['id']
logger(id)
if(id):
try:
resp = queries.get_hex_details_by_id(id)
logger(resp)
return resp
except:
logger(resp)
return resp
else:
return {"Please enter the id correctly to get all the details"}
return {"err": 'Network Error'}
@app.route('/get-all-coordinates', methods=['GET', 'POST'])
# @cross_origin()
def get_all_coords():
try:
coords = queries.get_all_locations()
logger(coords)
return {'body': coords}
except:
return {"err": 'Network Error'}
@app.route('/add-hex', methods=['GET', 'POST'])
# @cross_origin()
def add_hex():
origin_hex = request.args['src']
new_hex = request.args['new']
boundary_of_origin_hex = request.args['loc']
boundary_of_origin_hex = int(boundary_of_origin_hex)
if(origin_hex and new_hex and (boundary_of_origin_hex >= 0)):
origin_coordinates_hex = queries.get_hex_location_by_name(origin_hex)
origin_hex_is_active_or_not = origin_coordinates_hex.get("hexagons")[0].get(
'is_active', '')
# checking if the src hex is_active or not
if origin_hex_is_active_or_not == "FALSE":
return {"err": "This origin hex is not active"}
logger('-----here-----get_hex_location_by_name-origin---')
logger(origin_coordinates_hex)
origin_existing_neighbours = queries.get_hex_details_by_name(
origin_hex).get("hexagons", "")[0].get("hex", "")
if origin_existing_neighbours[utils.user_boundary_choice[boundary_of_origin_hex]] != 'NO':
return {'err': 'already a hex exists at this boundary'}
origin_id = origin_coordinates_hex.get("hexagons")[0].get(
'location', '').get('hexagon_id', '')
# Find location of the new hex
# find neighbours around it , if present query their cluster table rows
new_hex_loc = boundary.find_new_hex_loc(
boundary_of_origin_hex, origin_hex, origin_coordinates_hex) # New Hex location
logger('-----here-----new-hex-loc-using-origin-loc-and-border---')
logger(new_hex_loc)
new_hex_neighbours = neighbours.find_new_hex_neighbours(
new_hex_loc, boundary_of_origin_hex) # Neighbours around new hex
# insertions new hex // fetch id
logger('-----here-----inserting-new-node---')
insert_new_hex_resp = queries.insert_new_hex(new_hex)
new_hexagon_id = list(map(lambda data: data.get(
'hexagon_id'), insert_new_hex_resp))[0]
logger(new_hexagon_id)
# insert neighbours of new node
logger('-----here-----inserting-new-node-neighbours---')
new_hex_neighbours["hexagon_id"] = new_hexagon_id
logger(new_hex_neighbours)
column_updates = ['n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'updated_at']
insert_new_hex_neighbours = queries.insert_hex_neighbours(
{"data": new_hex_neighbours, "colm": column_updates}) # Inserting New hex Neighs. in cluster
# insert location of new node
insert_new_hex_loc = queries.insert_new_hex_loc(
new_hexagon_id, new_hex_loc[0], new_hex_loc[1], new_hex_loc[2])
# insert neighbours of origin node
origin_req = {}
origin_req[utils.user_boundary_choice[boundary_of_origin_hex]
] = new_hexagon_id
origin_req["hexagon_id"] = origin_id
column_updates = [
utils.user_boundary_choice[boundary_of_origin_hex], 'updated_at']
logger({"data": origin_req, "colm": column_updates})
update_origin_hex_neighbour = queries.insert_hex_neighbours(
{"data": origin_req, "colm": column_updates})
logger("----moving to update----")
update_neighbours(new_hex_neighbours)
return {"statusCode": 200, 'response': update_origin_hex_neighbour}
else:
return {'response': 'err'}
def update_neighbours(updating_neighbours):
# logger(updating_neighbours)
for border in updating_neighbours:
if (updating_neighbours[border] != 'NO'):
hex_id = updating_neighbours[border]
# logger(hex_id)
neighbour_location_obj = queries.get_hex_location_by_id(hex_id)
neighbour_is_active = neighbour_location_obj.get(
'hexagons', [{'location': {}}])[0].get('is_active', '')
if neighbour_is_active == 'TRUE':
neighbour_location_dict = neighbour_location_obj.get(
'hexagons', [{'location': {}}])[0].get('location', '')
# logger(neighbour_location_dict)
if(neighbour_location_dict):
loc = [
neighbour_location_dict['q'],
neighbour_location_dict['r'],
neighbour_location_dict['s']
]
updated_neighbours = neighbours.find_new_hex_neighbours(
loc, 1)
logger(updated_neighbours)
updated_neighbours["hexagon_id"] = hex_id
# logger(updated_neighbours)
column_updates = ['n1', 'n2', 'n3',
'n4', 'n5', 'n6', 'updated_at']
insert_updated_neighbours = queries.insert_hex_neighbours(
{"data": updated_neighbours, "colm": column_updates})
return {"body": insert_updated_neighbours}
return {"err": "error"}
@app.route('/remove-hex', methods=['GET', 'POST'])
# @cross_origin()
def delete_hex_bfs():
borders = ['n1', 'n2', 'n3', 'n4', 'n4', 'n5', 'n6']
border_map = {'n1': 'n4', 'n2': 'n5', 'n3': 'n6',
'n4': 'n1', 'n5': 'n2', 'n6': 'n3'}
origin_hex = request.args['src']
if origin_hex:
try:
neighbours_of_origin = queries.find_neighbours_by_name(origin_hex)
except:
return {"err": "error"}
# The hex is alerady deleted or doesn't exist
if len(neighbours_of_origin) > 0:
neighbours_of_origin = neighbours_of_origin[0]
else:
return {"err": "error"}
origin_hex_id = neighbours_of_origin.get("hex", "").get("hexagon_id", "")
degree = degrees_count.calc_degree(neighbours_of_origin)
if degree < 2:
delete_resp = delete_hexagon_final(
neighbours_of_origin, origin_hex, origin_hex_id, borders, border_map)
if delete_resp:
return {"body": "Done!"}
else:
return {"err": "error while removing"}
# starting bfs
frontier = Queue()
frontier.put(origin_hex_id)
# this map for id's
reached = set()
reached.add(origin_hex_id)
# this map for (id, border(n1, n2...n6)) to uniquely identify the path we are using
# to find it
reached_border = []
level = 0
count_of_origin_hits_using_diff_path = 0
while not frontier.empty():
level = level + 1
current = frontier.get()
# fetching all the neighbour id's in a level
details_neighbour_hex = queries.get_hex_details_by_id(
current).get("hexagons", "")
if len(details_neighbour_hex) > 0:
details_neighbour_hex = details_neighbour_hex[0]
count_of_entry_to_hex = 0
# iterating in all the neighbours of the recent id
for border in borders:
if details_neighbour_hex.get("hex", "").get(border, "") != "NO":
neighbour_id = details_neighbour_hex.get(
"hex", "").get(border, "")
if level == 1 and count_of_entry_to_hex == 0:
# reached_border.append((current_id, border))
reached_border.append((current, border_map[border]))
list(set(reached_border))
count_of_entry_to_hex = count_of_entry_to_hex + 1
# already visited node also traversed through the same path
if (neighbour_id in reached) and (neighbour_id, border) in reached_border:
continue
if (level > 1):
if ((neighbour_id not in reached) or
((neighbour_id in reached) and (neighbour_id, border) not in reached_border)):
# the origin hex is found but not from the same path ,
# from a different path
if(neighbour_id == origin_hex_id):
count_of_origin_hits_using_diff_path = count_of_origin_hits_using_diff_path + 1
# if there are 3 neighs , out of which 2 of them belongs to the same connected comp.
# and the other 1 belongs other connected comp. , we need to verify its connected or not
if count_of_origin_hits_using_diff_path == (degree - 1):
# if the hex is found update its neighs. and itself
delete_resp = delete_hexagon_final(
neighbours_of_origin, origin_hex, origin_hex_id, borders, border_map)
if delete_resp:
return {"body": "Done!"}
else:
return {"err": "error while removing"}
# mapping the new neighbour and its correspoding border
# so that we dom't end up seeing that id from the previous path
frontier.put(neighbour_id)
reached.add(neighbour_id)
logger(neighbour_id)
logger(border)
# reached_border.append((neighbour_id, border))
if level > 1:
reached_border.append((current, border_map[border]))
list(set(reached_border))
return {"err": "Not possible to remove"}
def delete_hexagon_final(neighbours_of_origin, origin_hex, origin_hex_id, borders, border_map):
for border in borders:
if neighbours_of_origin.get("hex", "").get(border, "") != "NO":
neighbour_id = neighbours_of_origin.get(
"hex", "").get(border, "")
origin_req = {}
origin_req["hexagon_id"] = neighbour_id
origin_req[border_map[border]] = "NO"
column_updates = [
border_map[border], "updated_at"]
insert_updated_neighbours = queries.insert_hex_neighbours(
{"data": origin_req, "colm": column_updates})
try:
deletion_resp = queries.delete_hex(
origin_hex, origin_hex_id)
return True
except:
return False
| ricksr/cluster-anywhr | cluster/app.py | app.py | py | 11,777 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
... |
70390276585 | from flask import Flask,render_template,request,jsonify
import utils
app = Flask(__name__)
@app.route('/') #Base API
def home():
print('Testing Home API')
return render_template('home.html')
@app.route('/predict', methods = ['POST'])
def prediction():
print('Testing prediction API')
data = request.form
if request.method == 'POST':
print('Input data is :',data)
x1 = float(data['SepalLengthCm'])
x2 = float(data['SepalWidthCm'])
x3 = float(data['PetalLengthCm'])
x4 = float(data['PetalWidthCm'])
prediction = utils.predict_class(x1,x2,x3,x4)
return render_template('after.html', data=prediction)
else:
return jsonify({'Message':'Unsuccessful'})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=False) | PrashantBodhe/irisproject1 | interface.py | interface.py | py | 820 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.request... |
18190036430 | import pandas as pd
import Levenshtein
import numpy as np
from anytree.search import find
from utils.category_tree import get_category_tree
from utils.io_custom import read_pickle_object
from scipy.spatial.distance import cosine
import re
def find_node(id, tree):
return find(tree, lambda node: node.name == id)
def get_relative_depth(id, tree):
return (find_node(id, tree).depth - find_node(872901, tree).depth)
def count_children(id, tree):
return len(find_node(id, tree).children)
def count_descendants(id, tree):
return len(find_node(id, tree).descendants)
def preprocessing_text(s):
"""
Предобработка текста.
:param s: str - входная строка.
:return: str - обработанная строка.
"""
return str(" ".join(re.findall("[a-zA-Zа-яА-Я0-9]+", s)).lower())
def get_levenshtein_distance_between(first_line, second_line):
"""
Получает расстояние Левенштейна между двумя строками.
:param first_line: str - первая строка.
:param second_line: str - вторая строка.
:return: int - расстояние левеншейна между этими строками.
"""
return Levenshtein.distance(first_line, second_line)
def get_lev_dist_between_query_category(query, category):
"""
Получает расстояние Левенштейна между двумя сериями.
:param query: pd.Series - запрос.
:param second_line: pd.Series - категория.
:return: np.array, int - расстояние левеншейна между соответствующими элементами серий.
"""
levenshtein_distances = []
for query, category in zip(query.values, category.values):
current_distance = get_levenshtein_distance_between(query, category)
levenshtein_distances.append(current_distance)
return np.array(levenshtein_distances)
def get_brands_and_products_lists(path_to_data):
"""
Получаем и преобразовываем списки брендов и продуктов из файлов.
"""
brands = pd.read_csv(path_to_data + "/unique_brands.csv")
brands = [str(brand) for brand in brands.iloc[:, 0]]
products = pd.read_csv(path_to_data + "/unique_products.csv")
products = [str(product) for product in products.iloc[:, 0]]
return brands, products
def create_data_with_features(path_to_data):
"""
Загружает данные для обучения и генерирует для них.
:param path_to_data: str - относительный путь к данным для обучения.
:return data: pd.DataFrame - датафрейм с кучей признаков
Оставлено для обратной совместимости с двумя блокнотами.
"""
data = pd.read_csv(path_to_data + "/data_for_model.csv")
return get_data_with_feature(data, path_to_data)
def get_cosine_dist_between_query_category(query, category, vectorizer):
"""
Получает косинусное расстояние между двумя колонками датафрейма.
:param query: pd.Series - запрос.
:param second_line: pd.Series - категория.
:param vectorizer: sklearn.feature_extraction.text.TfidfVectorizer - предобученный векторайзер на запросах и категориях из трейн выборки.
:return: np.array, int - косинусное расстояние между соответствующими элементами серий.
"""
query_sparse_matrix = vectorizer.transform(query.values)
category_sparse_matrix = vectorizer.transform(category.values)
distances = []
for query_vec, category_vec in zip(query_sparse_matrix, category_sparse_matrix):
current_distance = cosine(query_vec.toarray(), category_vec.toarray())
distances.append(current_distance)
return np.array(distances)
def get_data_with_feature(data, path_to_data):
"""
Генерирует признаки для обучающих и валидационных данных.
:param data: pd.DataFrame - обучающие или валидационные данные с колонками [query, category_id, category_name, is_redirect]
:param path_to_data: str - относительный путь к данным о брендах и продуктах.
:return data: pd.DataFrame - датафрейм с кучей признаков
"""
brands, products = get_brands_and_products_lists(path_to_data)
root = get_category_tree(path_to_data)
data['query'] = data['query'].apply(preprocessing_text)
data['category_name'] = data['category_name'].apply(preprocessing_text)
data['len_of_query'] = data['query'].apply(lambda query: len(query))
data['num_of_word_in_query'] = data['query'].apply(
lambda query:
len(query.split(' '))
)
data['category_name'] = data['category_name'].apply(preprocessing_text)
data['len_of_category'] = data['category_name'].apply(
lambda category:
len(category)
)
data['num_of_word_in_category'] = data['category_name'].apply(
lambda category:
len(category.split(' '))
)
data['how_match_brands_name_in_query'] = data['query'].apply(
lambda query:
sum([True for brand in brands if query.find(brand) != -1])
)
data['how_match_products_name_in_query'] = data['query'].apply(
lambda query:
sum([True for product in products if query.find(product) != -1])
)
data['mean_word_len_in_category'] = data['category_name'].apply(
lambda category_name:
np.mean([len(word) for word in category_name.split(' ')])
)
data['mean_word_len_in_query'] = data['query'].apply(
lambda query:
np.mean([len(word) for word in query.split(' ')])
)
data['max_word_len_in_category'] = data['category_name'].apply(
lambda category_name:
np.max([len(word) for word in category_name.split(' ')])
)
data['max_word_len_in_query'] = data['query'].apply(
lambda query:
np.max([len(word) for word in query.split(' ')])
)
data['min_word_len_in_category'] = data['category_name'].apply(
lambda category_name:
np.min([len(word) for word in category_name.split(' ')])
)
data['min_word_len_in_query'] = data['query'].apply(
lambda query:
np.min([len(word) for word in query.split(' ')])
)
data['is_query_long'] = data['len_of_query'].apply(lambda l: int(l > 50))
# TODO проверить генерацию признаков с дерева категорий (3 штуки):
data['relative_depth'] = data['category_id'].apply(
lambda category_id:
get_relative_depth(category_id, root)
)
data['children_count'] = data['category_id'].apply(
lambda category_id:
count_children(category_id, root)
)
data['descendants_count'] = data['category_id'].apply(
lambda category_id:
count_descendants(category_id, root)
)
data['lev_dist'] = get_lev_dist_between_query_category(data['query'],
data['category_name'])
vectorizer = read_pickle_object(path_to_data + '/vectorizer.obj')
data['cosine_dist'] = get_cosine_dist_between_query_category(data['query'],
data['category_name'],
vectorizer)
# data['number_of_children_category'] = get_relative_depth(data['category_id'])
# data['number_of_descendants_category'] = count_descendants(data['category_id'])
# data['category_depth'] = get_relative_depth(data['category_id'])
data = data.drop(columns=['category_id', 'query', 'category_name'])
return data
| comptech-winter-school/online-store-redirects | utils/feature_generation.py | feature_generation.py | py | 8,101 | python | ru | code | 3 | github-code | 36 | [
{
"api_name": "anytree.search.find",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "Levenshtein.distance",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
71551898663 | from __future__ import annotations
import falcon
from app.models import Rating
from app.schemas.ratings import rating_item_schema
class RateResource:
deserializers = {"post": rating_item_schema}
def on_post(self, req: falcon.Request, resp: falcon.Response, id: int):
"""
---
summary: Add rating for movie as logged in user
tags:
- Rating
parameters:
- in: body
schema: RatingSchema
consumes:
- application/json
produces:
- application/json
responses:
201:
description: Vote successful
401:
description: Unauthorized
422:
description: Input body formatting issue
"""
db = req.context["db"]
user = req.context["user"]
rating = req._deserialized["rating"]
user_rating = Rating(rating=rating, user=user, movie_id=id)
db.session.add(user_rating)
db.session.commit()
resp.status = falcon.HTTP_CREATED
resp.media = {"message": "rating saved"}
| alysivji/falcon-batteries-included | app/resources/ratings.py | ratings.py | py | 1,133 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "app.schemas.ratings.rating_item_schema",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "falcon.Request",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "falcon.Response",
"line_number": 12,
"usage_type": "attribute"
},
{
"a... |
73435015143 | import numpy
import statsmodels.regression
import statsmodels.tools
import scipy.optimize as opti
import scipy.interpolate as interp
import scipy.signal as signal
import matplotlib.pyplot as plt
class PVP:
def __init__(self, sampling_period=0.01):
self.sampling_period = sampling_period
self._kinematic_profile = None
self.fit_params = [None, None, None]
self._pvp_params = None
self._removed_outliers = 0
def __repr__(self):
_str = f"{self.__class__.__name__}\n"
try:
_str += f"Duration: \t {self.timestamps[-1]:.2f} seconds \n"
except AttributeError:
pass
try:
_str += f"Number of trajectories: \t {self.kinematic_profile.shape[0]}\n"
except AttributeError:
pass
try:
_str += f"Outliers Removed: \t {self._removed_outliers}\n"
except AttributeError:
pass
try:
_str += "PVP stats: \t tau={:.3f}, sigma0={:.2e}, Dtau={:.3f}\n".format(
*tuple(self._pvp_params.values())[1:]
)
except AttributeError:
pass
try:
_str += "PVP fit: \t C={:.2f}, Omega = {:.3f}".format(
-self.fit_params[1], self.fit_params[2]
)
except (AttributeError, TypeError):
pass
return _str
@property
def timestamps(self):
return [
self.sampling_period * i for i in range(self._kinematic_profile.shape[0])
]
@property
def pvp_params(self):
self._pvp_params = {
"tau_index": numpy.argmax(self.std_prof),
"tau": numpy.argmax(self.std_prof) * self.sampling_period,
"sigma_0": numpy.max(self.std_prof),
"Dtau": self.mean_prof[numpy.argmax(self.std_prof)],
}
return self._pvp_params
@property
def kinematic_profile(self):
return self._kinematic_profile.T
@kinematic_profile.setter
def kinematic_profile(self, item):
if len(item.shape) != 3:
raise ValueError(
f"The shape of kinematic profiles should be of length 3 (time, dimension, number of movements) but it has length {len(item.shape)}"
)
self._kinematic_profile = item
@property
def _reference_signal(self):
return self.kinematic_profile
def _remove_outliers(self, remove_outliers_k_sigma_away=3.5):
"""_remove_outliers
Remove trajectory outliers, by removing all trajectories that are outside of the range (m +- k sigma), where m is the mean trajectory and sigma is the standard deviation of the set of trajectories.
.. note:
The formula above is based on the confidence interval for a Gaussian, and we apply it component per component. A true multivariate approach would use the confidence interval for a multivariate Gaussian see e.g. https://stats.stackexchange.com/questions/29860/confidence-interval-of-multivariate-gaussian-distribution
:param remove_outliers_k_sigma_away: k, defaults to 3.5
:type remove_outliers_k_sigma_away: float, optional
:return: (index of removed trajectories in old array, new array)
:rtype: tuple(list, array)
"""
_indx = []
k = remove_outliers_k_sigma_away
for ncomp in range(self.kinematic_profile.shape[1]):
mean = numpy.mean(self.kinematic_profile[:, ncomp, :], axis=0)
std = numpy.std(self.kinematic_profile[:, ncomp, :], axis=0)
for n, _traj in enumerate(self.kinematic_profile[:, ncomp, :]):
if (_traj > mean + k * std).any() or (_traj < mean - k * std).any():
_indx.append(n)
_indx = list(set(_indx))
self._kinematic_profile = numpy.delete(self._kinematic_profile, _indx, axis=2)
self.compute_profiles()
self._removed_outliers += len(_indx)
return _indx, self._kinematic_profile
def plot_std_profiles(self, ax=None, fit=True, prof_kwargs=None, fit_kwargs=None):
"""plot_std_profiles
Plots the standard deviation profiles on the provided axis. If not provided, will create a new figure from scratch.
If fit is True, will also compute the spline fit to the second and third phase. Keyword arguments to the std plotter (prof_kwargs) and to the fit plotter (fit_kwargs) can also be given.
.. note::
You should have fitted the profiles prior to plotting them.
:param ax: axis on which to draw, defaults to None. If None, creates a new figure and axis to draw on.
:type ax: plt.axis, optional
:param fit: whether to plot the spline fit, defaults to True
:type fit: bool, optional
:param prof_kwargs: keyword arguments are passed to plotter for the standard deviation profile, defaults to None
:type prof_kwargs: dict, optional
:param fit_kwargs: keyword arguments are passed to plotter for the spline fit, defaults to None
:type fit_kwargs: dict, optional
"""
if ax is None:
_, ax = plt.subplots(1, 1)
prof_kwargs = {} if prof_kwargs is None else prof_kwargs
fit_kwargs = {} if fit_kwargs is None else fit_kwargs
y = self.std_prof
x = self.timestamps
ax.semilogy(x, y, "k-", lw=3, label="PVP", **prof_kwargs)
if fit:
ax.semilogy(
self.pvp_fit_x,
self.pvp_fit_y,
"r-",
lw=3,
label="Spline fit",
**fit_kwargs,
)
ax.set_title(
r"$\tau = {:.3f}, C = {{{:.1f}}}, \Omega = {:.1f}$".format(
self.pvp_params["tau"], -self.fit_params[1], self.fit_params[2]
)
)
ax.grid(visible=True, which="minor", linestyle="--")
ax.set_xlabel("Time (s)")
ax.set_ylabel(r"$\sigma\mathrm{(t) (m)}$")
def _compute_mean_profile(self):
for k in range(self._reference_signal.shape[1]):
self._mean_prof_incr = numpy.mean(self._reference_signal[:, k, :], axis=0)
if k == 0:
self.mean_prof = self._mean_prof_incr ** 2
else:
self.mean_prof += self._mean_prof_incr ** 2
self.mean_prof = numpy.sqrt(self.mean_prof)
return self.mean_prof
def _compute_std_profile(self):
for k in range(self._reference_signal.shape[1]):
self._std_prof_incr = numpy.std(self._reference_signal[:, k, :], axis=0)
if k == 0:
self.std_prof = self._std_prof_incr ** 2
else:
self.std_prof += self._std_prof_incr ** 2
self.std_prof = numpy.sqrt(self.std_prof)
return self.std_prof
def compute_profiles(self):
"""compute_profiles
Computes the mean and standard deviation profiles for a set of trajectories.
:return: (mean profile, standard deviation profile)
:rtype: tuple(array, array)
"""
self._compute_mean_profile()
self._compute_std_profile()
return self.mean_prof, self.std_prof
def compute_pvp(self, remove_outliers_k_sigma_away=3.5):
"""compute_pvp
Run the full PVP routine:
+ compute profiles
+ remove outliers k sigma away
+ fit profiles
:param remove_outliers_k_sigma_away: remove outliers k sigma away, defaults to 3.5
:type remove_outliers_k_sigma_away: float, optional
:return: standard deviation profile, x and y values of the fit, kinematic profiles
:rtype: tuple(array, array, array, array)
"""
_, std_prof = self.compute_profiles()
_, kinematic_profiles = self._remove_outliers(
remove_outliers_k_sigma_away=remove_outliers_k_sigma_away
)
_, fit_x, fit_y = self._fit_profiles()
return std_prof, fit_x, fit_y, kinematic_profiles
def _fit_profiles(self, **optim_kwargs):
### Define cost function for optimization procedure
def monospline(THETA, *args):
x = args[0]
y = args[1]
a, b, mt = THETA
out = 0
for i, v in enumerate(x):
if v < mt:
out += (a + b * v - y[i]) ** 2
else:
out += (a + b * mt - y[i]) ** 2
return out
## Once Omega has been determined, run a classical LR on the second phase to get LR diagnostics
def get_fit_second_phase(y, indx_omega):
x = [self.sampling_period * i for i in range(indx_omega)]
yy = y[:indx_omega]
xx = statsmodels.tools.add_constant(x)
model = statsmodels.regression.linear_model.OLS(yy, xx)
self.second_phase_fit = model.fit()
return self.second_phase_fit
indx_tau, tau, sigma0, Dtau = tuple(self.pvp_params.values())
### Initialize optimization algorithm - Data and start parameters
theta0 = optim_kwargs.pop(
"spline_param_guess", [sigma0, -5, 1]
) # should work for most cases
if indx_tau:
x = self.timestamps[0:-indx_tau]
y = numpy.log2(self.std_prof[indx_tau:])
else:
x = self.timestamps
y = numpy.log2(self.std_prof)
## Global Optimization
n_iter = optim_kwargs.pop("basinhopping_n_iter", 50)
res = opti.basinhopping(
func=monospline,
x0=theta0,
niter=n_iter,
minimizer_kwargs={
"method": "Nelder-Mead",
"args": (x, y),
"options": {"maxiter": 1000, "disp": 0},
},
)
a, b, c = res.x
c0 = int(numpy.ceil(c / self.sampling_period))
self.omega = c
a, b = get_fit_second_phase(y, c0).params
_yy = [a + b * i * self.sampling_period for i in range(0, c0)] + [
a + c * b for i in range(c0, len(y[indx_tau:]))
]
_yy = [2 ** v for v in _yy]
t_fit = [
self.pvp_params["tau"] + i * self.sampling_period
for i in range(0, len(_yy))
]
self.pvp_fit_x = t_fit
self.pvp_fit_y = _yy
self.fit_params = [a, b, c + self.pvp_params["tau"]]
return self.fit_params, self.pvp_fit_x, self.pvp_fit_y
def _extend(self, trajectory, extend_to=3):
"""_extend extend trajectories
Extends the self._kinematic_profile buffer with a new trajectory while ensuring the series in the buffer always have the same size as the trajectory. For example, if the buffer has shape (X, Y) and the trajectory series has length (Z):
+ if Z > Y, then the buffer is filled with the last values to reach shape (X, Z)
+ if Z < Y, then the trajectory is filled with the last value to reach shape (1, Y)
The minimum duration of the series can be set with extend_to.
:param trajectory: trajectory to add to the self._kinematic_profile buffer
:type trajectory: array_like
:param extend_to: minimum duration of the series in seconds, defaults to 3
:type extend_to: int, optional
"""
if len(trajectory.shape) == 1:
trajectory = trajectory.reshape(-1, 1)
if self._kinematic_profile is None: # First traj
Nmin = 1 + int(numpy.ceil(extend_to / self.sampling_period))
if trajectory.shape[0] < Nmin:
fill = numpy.full(
shape=(Nmin - trajectory.shape[0], trajectory.shape[1]),
fill_value=trajectory[-1, :],
)
trajectory = numpy.concatenate((trajectory, fill), axis=0)
self._kinematic_profile = numpy.expand_dims(trajectory, axis=2)
else:
if self._kinematic_profile.shape[0] < trajectory.shape[0]:
fill = numpy.full(
shape=(
-self._kinematic_profile.shape[0] + trajectory.shape[0],
self._kinematic_profile.shape[1],
self._kinematic_profile.shape[2],
),
fill_value=self._kinematic_profile[-1, :, :],
)
self._kinematic_profile = numpy.concatenate(
(self._kinematic_profile, fill), axis=0
)
elif self._kinematic_profile.shape[0] > trajectory.shape[0]:
fill = numpy.full(
shape=(
self._kinematic_profile.shape[0] - trajectory.shape[0],
self._kinematic_profile.shape[1],
),
fill_value=trajectory[-1, :],
)
trajectory = numpy.concatenate((trajectory, fill), axis=0)
self._kinematic_profile = numpy.concatenate(
(self._kinematic_profile, numpy.expand_dims(trajectory, axis=2)), axis=2
)
return self._kinematic_profile
def _correct_edges(self, container, method="speed_threshold", edges = ['start', 'stop'], thresholds = [1,5], **kwargs):
"""_find_start correct start
Trajectories may not always be consistently segmented. This function performs a correction for the start point, as indicated by the method.
+ method = 'speed_threshold' :
Computes a threshold for speed as x_percent * max speed. All points the target and the first time when the threshold is crossed are removed.
\*\*kwargs = {'percent' : x_percent}
:param container: output from add_traj
:type container: numpy.ndarray
:param method: method to correct start, defaults to "speed_threshold"
:type method: str, optional
:return: trajectory with correction for speed
:rtype: numpy.ndarray
"""
time, traj, speed = container
indx = 1
stp_index = len(traj)-1
if method == "speed_threshold":
### Removes points until having reached a speed that is 1% of the max speed.
max_speed = numpy.max(numpy.abs(speed[1:]))
if 'start' in edges:
percent = thresholds[0]
while abs(speed[indx]) < max_speed * percent / 100:
indx += 1
if 'stop' in edges:
try:
percent = thresholds[1]
except IndexError:
percent = thresholds[0]
while abs(speed[stp_index]) < max_speed * percent / 100: # find first bump
stp_index -= 1
while abs(speed[stp_index]) > max_speed * percent / 100: # find start of decrease
stp_index -= 1
else:
raise NotImplementedError(
"Only method speed_threshold is implemented for now."
)
container = numpy.concatenate(
(time[indx:stp_index].reshape(1, -1), traj[indx:stp_index].reshape(1, -1)), axis=0
)
container = numpy.concatenate((container, speed[indx:stp_index].reshape(1, -1)), axis=0)
return container, indx, stp_index
def plot_kinematic_profiles(self, ax=None, **kwargs):
"""plot_kinematic_profiles
Plots the kinematic profiles on the provided axis. If not provided, will create a new figure from scratch.
:param ax: axis on which to draw, defaults to None. If None, creates a new figure and axis to draw on.
:type ax: plt.axis, optional
:param **kwargs: keyword arguments are passed to plt.plot()
:type **kwargs: key-values
"""
if ax is None:
fig, ax = plt.subplots(1, 2)
x = self.timestamps
for k in range(self.kinematic_profile.shape[1]):
for y in self.kinematic_profile[:, k, :]:
ax[k].plot(x, y, "-", **kwargs)
ax[k].set_xlabel("Time (s)")
ax[k].set_ylabel("Position")
def add_trajectory(self, t, *args, extend_to=3, target=None, correct_edges=False, correct_edges_kwargs = None):
"""Add trajectory to the set from which PVPs are computed
Pass the time series, and any number of positional series. For example in dim3 with x, y, z, you would call (with defaults kwargs)
.. code-block:: python
pvp.add_trajectory(t, x, y, z, extend_to = 3, target = None, correct_start = False)
You control the duration of the PVP (e.g. how far in time trajectories are extended). You also need to specify the target location for each trajectory. You can optionally synchronize the trajectories by pre-processing them (correct_start). Currently, a simple thresholding rule takes care of this synchronization.
:param t: time series
:type t: numpy.array like
:param args: positional series
:type args: numpy.array like
:param extend_to: minimal PVP duration, defaults to 3
:type extend_to: int, optional
:param target: target location, defaults to None. If None, will use the null vector as target.
:type target: iterable, optional
:param correct_start: whether to correct the location of the start of the movement for synchronization, defaults to False
:type correct_start: bool, optional
"""
default_correct_edges_kwargs = dict(method="speed_threshold", edges = ['start'], percent=[2, 5])
if correct_edges_kwargs is not None:
default_correct_edges_kwargs.update(correct_edges_kwargs)
target = [0 for i in args] if target is None else target
projections = self._project(target, *args)
container = self._interp_filt(
numpy.array(t),
*projections,
deriv=0,
resampling_period=self.sampling_period,
)
indx = 0
if correct_edges:
_norm = numpy.sqrt(numpy.sum(container[1, :, :] ** 2, axis=1))
tmp_container = self._interp_filt(
container[0, :, 0],
_norm,
deriv=1,
resampling_period=self.sampling_period,
)
_, indx, stp_indx = self._correct_edges(
tmp_container, **default_correct_edges_kwargs
)
self._extend(container[1, :, :], extend_to)
def _get_orthonormal_basis(self, target, x0):
target = numpy.asarray(target).squeeze()
x0 = numpy.atleast_1d(numpy.asarray(x0).squeeze())
if x0.shape[0] == 1:
return self._bon1(target, x0)
elif x0.shape[0] == 2:
return self._bon2(target, x0)
elif x0.shape[0] == 3:
return self._bon3(target, x0)
else:
raise NotImplementedError("Dimensions above 3 are not supported yet. ")
# below does not reliably produce an orthonormal basis
# switching to a manual cse disjunction up to 3D for now
# def _get_orthonormal_basis(self, target, x0):
# target = numpy.asarray(target).squeeze()
# x0 = numpy.asarray(x0).squeeze()
# random_basis = numpy.array(
# [
# (target - x0),
# *[
# -1 + 2 * numpy.random.random(x0.shape[0])
# for v in range(x0.shape[0] - 1)
# ],
# ]
# ).T
# self.Q, _ = numpy.linalg.qr(random_basis)
# return self.Q
def _bon1(self, target, x0):
return normalize(target - x0).reshape(-1, 1)
def _bon2(self, target, x0):
v1 = normalize(target - x0)
v2 = numpy.array([-v1[1], v1[0]])
return numpy.array([[v1], [v2]]).T
def _bon3(self, target, x0):
array = self._bon2(target, x0).T
vec3 = numpy.cross(array[0], array[1])
return numpy.array([[array[0]], [array[1]], [vec3]]).T
def _project_x(self, Q, target, x):
u = (numpy.asarray(x) - numpy.asarray(target)).reshape(-1, 1)
return (Q.T @ u).squeeze()
def _project(self, target, *args):
dim = len(args)
output = numpy.zeros(shape=(dim, len(args[0])))
args = numpy.array(args).T
Q = self._get_orthonormal_basis(target, args[0, :])
for n, arg in enumerate(args):
output[:, n] = self._project_x(Q, target, arg)
return output
def _interp_filt(
self,
t,
*args,
resampling_period=0.01,
filter_kwargs={"filtername": "kaiser", "fc": 10, "rdb": 10, "width": 5},
deriv=2,
):
"""_interp_filt interpolates and filters a 1D trajectory
Takes a trajectory, resamples it with the chosen resampling_period and filters it with the given filter. Also provides the unfiltered derivatives up to order "deriv".
:param t: trajectory time
:type t: array like
:param x: trajectory position
:type x: array like
:param resampling_period: timestep at which the trajectory will be down/over sampled, defaults to 0.01
:type resampling_period: float, optional
:param filter_kwargs: scipy.signal filter description, defaults to {"filtername": "kaiser", "fc": 10, "rdb": 10, "width": 5}
:type filter_kwargs: dict, optional
:param deriv: order for the trajectory derivatives, defaults to 2
:type deriv: int, optional
:return: an array, where the first line is the time vector, and all other lines are the nth derivatives of the trajectory (0 <= n <= deriv).
:rtype: numpy.ndarray
"""
t = numpy.asarray(t)
t = t - t[0] # set null time target
Ts = resampling_period
output_container = None
for n, x in enumerate(args):
x = numpy.asarray(x)
interpolator = interp.interp1d(t, x, fill_value="extrapolate")
resampling_instants = numpy.linspace(
0,
t[-1] + (Ts - t[-1] % Ts),
num=1 + int((t[-1] + (Ts - t[-1] % Ts)) / Ts),
)
x_interp = interpolator(resampling_instants)
if filter_kwargs["filtername"] == "kaiser":
N, beta = signal.kaiserord(
filter_kwargs["rdb"], filter_kwargs["width"] * 2 * Ts
)
taps = signal.firwin(
N, filter_kwargs["fc"] * 2 * Ts, window=("kaiser", beta)
)
filtered_x = signal.filtfilt(taps, 1, x_interp)
else:
b, a = filter_kwargs["b"], filter_kwargs["a"]
filtered_x = signal.filtfilt(b, a, x_interp)
container = numpy.concatenate(
(resampling_instants.reshape(1, -1), filtered_x.reshape(1, -1)), axis=0
)
## Compute derivatives
resampling_instants = numpy.append(
resampling_instants, resampling_instants[-1] + Ts
)
for i in range(deriv):
filtered_x = numpy.concatenate(
(
filtered_x.reshape(-1)[0].reshape(1, -1),
filtered_x.reshape(1, -1),
),
axis=1,
)
filtered_x = numpy.divide(
numpy.diff(filtered_x), numpy.diff(resampling_instants)
)
container = numpy.concatenate((container, filtered_x), axis=0)
if n == 0:
output_container = numpy.expand_dims(container, axis=2)
else:
container = numpy.expand_dims(container, axis=(2))
output_container = numpy.concatenate(
(output_container, container), axis=2
)
return output_container
class PVP_alpha(PVP):
@property
def _reference_signal(self):
return numpy.expand_dims(self.kinematic_profile[:, 0, :], 1)
class PVP_total(PVP):
@property
def _reference_signal(self):
return self.kinematic_profile
class PVP_generalized(PVP):
@property
def _reference_signal(self):
return self.kinematic_profile
def _compute_std_profile(self):
std_prof = numpy.empty(shape=(self._kinematic_profile.shape[0]))
for nt, t in enumerate(self._kinematic_profile):
cov = numpy.cov(t)
if len(cov.shape) >= 2:
std_prof[nt] = (numpy.linalg.det(cov)) ** (
1 / 2 / self._kinematic_profile.shape[1]
)
else:
std_prof[nt] = (cov.squeeze()) ** (
1 / 2 / self._kinematic_profile.shape[1]
)
self.std_prof = std_prof
return self.std_prof
def normalize(a):
return a / numpy.linalg.norm(a, 2)
| jgori-ouistiti/PVPlib | pvplib/core.py | core.py | py | 24,949 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.argmax",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number":... |
12220208294 | from json import loads
from logging_tools import Logger
from population import create_sample
from requests import get
from requests import post
from requests.auth import HTTPBasicAuth
from string import Template
from time import sleep
from uuid import uuid1
""" Template for the whisk rest api. """
whisk_rest_api = Template(
'$APIHOST/api/v1/namespaces/$NAMESPACE/$ENDPOINT/$VALUE'
)
def evolution_parameters(function=3, instance=1, dim=3, population_size=20):
""" Returns the evolution parameters. """
return {
'id': str(uuid1()),
'problem': {
'name': 'BBOB',
'function': function,
'instance': instance,
'search_space': [-5, 5],
'dim': dim,
'error': 1e-8
},
'population': [],
'population_size': population_size,
'experiment': {
'experiment_id': 'dc74efeb-9d64-11e7-a2bd-54e43af0c111',
'owner': 'mariosky',
'type': 'benchmark'
},
'algorithm': {
'name': 'GA',
'iterations': 5,
'selection': {
'type': 'tools.selTournament',
'tournsize': 12
},
'crossover': {
'type': 'cxTwoPoint',
'CXPB': [0, 0.2]
},
'mutation': {
'type': 'mutGaussian',
'mu': 0,
'sigma': 0.5,
'indpb' : 0.05,
'MUTPB':0.5
}
}
}
def create_parameters(settings, population=None):
""" Creates the evolution parameters with a population. """
parameters = evolution_parameters(
settings.function,
settings.instance,
settings.dim,
settings.population_size
)
parameters['population'] = population or create_sample(parameters)
return parameters
def get_request_data(settings, endpoint, value, json={}):
""" Gets the request data. """
auth = settings.auth.split(':')
return {
'url': whisk_rest_api.safe_substitute(
APIHOST=settings.apihost,
NAMESPACE=settings.namespace,
ENDPOINT=endpoint,
VALUE=value,
),
'json': json,
'params': {
'blocking': str(settings.blocking),
'result': 'True'
},
'auth': HTTPBasicAuth(auth[0], auth[1]),
'verify': not settings.insecure
}
def crossover_migration(pop1, pop2, key = lambda p: p['fitness']['score']):
""" Does the crossover migration. """
pop1.sort(key=key)
pop2.sort(key=key)
size = min(len(pop1), len(pop2))
cxpoint = int(size / 2)
pop1[cxpoint:] = pop2[:cxpoint + size % 2]
return pop1
def request_evolution(settings, population):
""" Gets the population using blocking. """
data = get_request_data(settings, 'actions', 'gaService', population)
logger = Logger(settings.verbose)
logger.log('POST request to ' + data['url'])
response = post(**data).json()
logger.log('POST complete!')
return response
def request_evolution_id(settings, population):
""" Evolves a population and returns it's OpenWhisk activationid. """
response = request_evolution(settings, population)
return response['activationId']
def request_evolved(settings, id):
""" Gets the population data with it's OpenWhisk activation id. """
data = get_request_data(settings, 'activations', id)
logger = Logger(settings.verbose)
logger.log('Polling activationId ' + str(id))
for _ in range(0, settings.timeout):
logger.log('GET request to ' + data['url'])
response = get(**data).json()
logger.log('GET complete!')
if 'error' not in response:
return loads(response['response']['result']['value'])
sleep(1)
raise ValueError('Timeout exception.')
def evolve(settings, population):
""" Evolves the population with the given settings. """
response = request_evolution(settings, population)
print(response)
if 'activationId' in response:
return request_evolved(settings, response['activationId'])
else:
return loads(response['value'])
| mariosky/ga_action | py_client/evolution.py | evolution.py | py | 4,228 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "string.Template",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "uuid.uuid1",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "population.create_sample",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "requests.auth.HTT... |
74436411945 |
import os
from pathlib import Path
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from shutil import copyfile
config = {
# General
'symbol': 'spy',
'symbol_name': 'S&P500',
'category': {'unespecified': ['spy']}, # 'gld', 'spy','xle', 'emb','dia', 'qqq', 'ewp'
'extension': '.csv',
'separator': ';',
'desired_length': 550, # for mahab states
'pattern': '_no_missing_data', # besides the name, gaps have been handled (the name it's a typo)
'cols': ['open', 'high', 'low', 'close', 'volume', 'datetime', 'gap', 'timestamp'],
'names_per_set': {
'dev': 'devset',
'train': 'train',
'mah': 'mahalanobis_state'
},
# ############ For sec level
# 'years_to_explore': ['2016', '2017', '2018', '2019', '2020'],
# 'output_path': 'C:\\Users\\suare\\data\\tmp\\spy_seeds_seconds',
# 'name': 'spy-seconds',
# 'lvl_str': 's',
# 'path': 'C:\\Users\\suare\\data\\analysis\\quantquote\\',
# 'resample': False, # resampling done in a previous script
# 'ms_field': 'timestamp', # time
# 'dt_field': 'datetime', # date
# 'desired_abs_mean_tresh': 0.01,
# 'desired_abs_min_tresh': 0.00000000000001,
# 'allowed_outliers_pct': 0.01,
# 'pattern': 'csv', # no pattern needed here by 05/04/2020
# 'prefix': 'table_'
#### get largets and get smallest applied to 1 (min and maxs)
# ############ For minute level
'years_to_explore': ['2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010',
'2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020'],
# IMP. don't do 2020 alone or period id == 1
'output_path': 'C:\\Users\\suare\\data\\tmp\\spy_seeds_minutes',
'name': 'spy-minutes',
# 'lvl_str': 'm',
# 'desired_abs_mean_tresh': 0.1,
'lvl_str': 'm',
'desired_abs_mean_tresh': 1,
'path': 'C:\\Users\\suare\\data\\analysis\\quantquote\\',
'resample': True, # resampling done in a previous script
'ms_field': 'timestamp', # time
'dt_field': 'datetime', # date
'desired_abs_min_tresh': 0.00000000000001,
'allowed_outliers_pct': 0.01,
'prefix': ''
}
def read_file(filename: str) -> pd.DataFrame:
path = os.sep.join([config['path'], filename])
filepath = os.sep.join([path, config['prefix'] + config['symbol'] + config['extension']])
if os.path.isfile(filepath):
print(f'Reading {filename}')
df = pd.read_csv(filepath, names=config['cols'])
# print(df.head())
return df
def get_pretraining_states(mahabset_df: pd.DataFrame, config: dict) -> dict:
"""
This function get the dataframes for pretraining
:param mahabset_df:
:param config:
:return:
"""
mahabset_df = remove_non_trading_hours_minimised(df=mahabset_df) # remove non trading hours
# Generate close price returns and moving average to select the period with a mean close to 0
log_ret = False # otherwise percentual
if log_ret:
mahabset_df['close_returns'] = np.log(mahabset_df['close'] / mahabset_df['close'].shift(1))
# mahabset_df['close_returns'] = np.log(1 + mahabset_df['close'].pct_change())
else:
mahabset_df['close_returns'] = mahabset_df['close'] / mahabset_df['close'].shift(1) - 1
# mahabset_df['close_returns'] = mahabset_df['close'].pct_change(1) # same result
mahabset_df.dropna(inplace=True)
sma_col = f"SMA_{config['desired_length']}"
mahabset_df[sma_col] = mahabset_df['close_returns'].rolling(window=config['desired_length']).mean()
mahabset_df[sma_col+'_abs'] = mahabset_df[sma_col].abs()
mahabset_df['roll_std'] = mahabset_df['close_returns'].rolling(window=config['desired_length']).std()
mahabset_df['SMA_start_date'] = mahabset_df[config['dt_field']].shift(config['desired_length'])
mahabset_df['SMA_start_ms'] = mahabset_df[config['ms_field']].shift(config['desired_length'])
# Drop first rows as the moving average is NaN
len_with_nans = len(mahabset_df)
# Prepare extra cols
# mahabset_df['datetime'] = mahabset_df.index.astype(str) # maybe for min level is relevant
mahabset_df.set_index(config['dt_field'], drop=False, inplace=True)
mahabset_df.dropna(inplace=True)
assert (len_with_nans - len(mahabset_df)) == config['desired_length'], 'There are non expected NaNs'
states_dict = dict()
for i in range(1, 4): # States 1, 2 and 3 (hardcoded by now)
selected_df = identify_state(config, mahabset_df, sma_col, state_id=i)
# assert len(selected_df) == 1, "The maximum value is not a unique value."
print(len(selected_df))
print(selected_df)
for idx, rw in selected_df.iterrows():
print(f"Current selection from {rw['SMA_start_date']} to {rw[config['dt_field']]} with mean {sma_col}")
mah_state = \
mahabset_df[(mahabset_df['SMA_start_date'].between(rw['SMA_start_date'], rw[config['dt_field']]))]
# avoid overwriting an state with another which len is <=35 (it has to be greater because of the indicators)
if len(mah_state) >= 35:
states_dict[i] = mah_state
states_dict[i].sort_index(ascending=True, inplace=True)
assert len(states_dict) == 3, f"State missing or diff than 3 states? states ok:{states_dict.keys()}"
return states_dict
def identify_state(config: dict, mahabset_df: pd.DataFrame, sma_col: str, state_id: int) -> pd.DataFrame:
"""
This function identifies the last row of a mahab state depending on a logic defined manually for that state id.
Values 35, 20 and 20 were given manually to be able to have enough records hourly for all mahab states.
These have been left as values by default after.
"""
if state_id == 1: # Select one close to 0 (the closest)
# Select one close to 0
# (not the closest cos there are period = 0 due to lack of liquidity at certain frequencies)
selected_df = mahabset_df[mahabset_df[sma_col + '_abs'] <= config['desired_abs_mean_tresh']]
# selected_df = selected_df[selected_df[sma_col + '_abs'].max() == selected_df[sma_col + '_abs']]
# Filter by desired mean fpr the lateral movement (the one with greatest STDEV)
# selected_df = selected_df[selected_df['roll_std'].max() == selected_df['roll_std']]
# inst.of max, t.g.m.
selected_df = selected_df[selected_df['roll_std'].isin(list(selected_df['roll_std'].nlargest(35)))]
elif state_id == 2: # Select one negative (min)
# get the min from a boundary. filter periods with 0 return at all. it may be due to lack of liquidity at s lvl
selected_df = mahabset_df[mahabset_df[sma_col + '_abs'] >= (config['desired_abs_min_tresh'])]
# selected_df = selected_df[selected_df[sma_col].min() == selected_df[sma_col]]
# inst.of min, to get many
selected_df = selected_df[selected_df[sma_col].isin(list(selected_df[sma_col].nsmallest(20)))]
elif state_id == 3: # Select one positive (max)
# selected_df = mahabset_df[mahabset_df[sma_col].nlargest(3) == mahabset_df[sma_col]]
# inst.of max, to get many
selected_df = mahabset_df[mahabset_df[sma_col].isin(list(mahabset_df[sma_col].nlargest(20)))]
else:
assert False, "The trend/pattern for this state has not been specified"
return selected_df
def remove_non_trading_hours_minimised(df) -> pd.DataFrame:
trading_dates = pd.read_csv('trading_dates_Q11998_to_Q32021.csv') # this list has format: dd/MM/yyyy
# inverting date format (hardcoded)
trading_dates = trading_dates.trading_dates.astype(str).apply(lambda x: x[6:10]+'-'+x[3:5]+'-'+x[:2])
df.index = pd.to_datetime(df.datetime)
df = df.between_time('09:31', '15:59')
df = df[df.index.astype(str).str[:10].isin(trading_dates)]
return df
def remove_non_trading_hours(df, config: dict(), level: str = None) -> pd.DataFrame:
# Parse cols, dates and sort
# this may be useful at minute level
# df['date'] = pd.to_datetime(df['date'], format='%Y%m%d').dt.strftime('%Y-%m-%d')
# df['time'] = (pd.to_datetime(df['time'].astype(str).str.strip(), format='%H%M').dt.strftime('%H:%M'))
# df['datetime'] = df.date.astype(str) + ' ' + df.time.astype(str)
# trading_dates = df.datetime.str[:10].unique() # list of market days
trading_dates = pd.read_csv('trading_dates_Q11998_to_Q32021.csv') # this list has format: dd/MM/yyyy
# inverting date format (hardcoded)
trading_dates = trading_dates.trading_dates.astype(str).apply(lambda x: x[6:10]+'-'+x[3:5]+'-'+x[:2])
df.index = pd.to_datetime(df.datetime)
df.drop(columns=['date', 'time', 'datetime', 'splits', 'earnings', 'dividends'],
errors='ignore', inplace=True)
df.sort_index(inplace=True, ascending=True)
# Resample (but not fill gaps. This should have been done already)
print(f' - Original size: {len(df)}')
if config['resample']:
ohlc_dict = {'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'volume': 'sum'}
df = df.resample(level.split('-')[0]).agg(ohlc_dict)
print(f' - Size after resampling at {level}: {len(df)}')
# Remove non-trading hours and non-trading days
df = df.between_time('09:31', '15:59') # This is potentially the only important line from the function
print(f' - Size after filtering out non-market hours: {len(df)}')
df = df[df.index.astype(str).str[:10].isin(trading_dates)]
# (low impact..) df = df.between_time('09:31','13:00') for half days (shortened sessions / early closing)
print(f' - Size after filtering out non-trading days: {len(df)}')
if config['resample']:
# Fill gaps
df['volume'] = df['volume'].fillna(0)
df['close'] = df['close'].ffill()
df['open'] = df['open'].fillna(df['close'])
df['low'] = df['low'].fillna(df['close'])
df['high'] = df['high'].fillna(df['close'])
return df
def parse_and_save(file_dict: dict, level: str, period_id: int,
setid: str, setname: str, config: dict, all_files: list()) -> list():
file_path = file_dict[setid]
if setid == 'mah':
# For the mahalanobis set, it creates a moving average of x examples over the previous period to the devset.
states_dict = get_pretraining_states(mahabset_df=pd.read_csv(file_path, sep=config['separator']),
config=config)
for k in states_dict.keys():
# non trading hours have been removed in get_pretraining_states
state_filepath = \
os.sep.join([config['output_path'], level, str(period_id), f'{config["symbol"]}_{setname}_{k}.csv'])
states_dict[k] = \
remove_non_trading_hours(df=states_dict[k], config=config, level=level) # for the sake of standarisation
assert len(states_dict[k]) >= 35, "Mahalanobis set too small (must be >40 for technical indicators)"
states_dict[k].to_csv(state_filepath, sep=';')
all_files.append(state_filepath)
else:
set_filepath = os.sep.join([config['output_path'], level, str(period_id), f'{config["symbol"]}_{setname}.csv'])
print(file_path)
df = remove_non_trading_hours(df=pd.read_csv(file_path, sep=config['separator']), config=config, level=level)
assert len(df) >=35 , "Dataset set too small (must be >=35 for technical indicators)"
df.to_csv(set_filepath, sep=';')
all_files.append(set_filepath)
return all_files
def compute(files: dict, periods: (), period_id: int, files_for_indicators: list, config: dict) -> (dict, list):
"""
This function orchestrates the whole process in both levels
"""
period, mahab_period, dev_period = periods
files[period] = dict()
# 1. it picks a period and changes the name.
for level in os.listdir(config['path']):
files[period][level] = dict()
# Process second and minute level data. For min level, filter only files with pattern (as there are many others)
if config['lvl_str'] in level and '.' not in level: # and '30' in level: # IMP!! comment last condition out
print('=========='+level+'\n'+'==========')
lvl_path = config['path'] + level + os.sep + config['symbol_name']
for file in os.listdir(lvl_path): # all of these loops are not efficient at all
if '.csv' in file and config['pattern'] in file:
if mahab_period in file:
files[period][level]['mah'] = lvl_path + os.sep + file
if dev_period in file:
files[period][level]['dev'] = lvl_path + os.sep + file
if period in file:
files[period][level]['train'] = lvl_path + os.sep + file
# 2. Export all sets in a folder with a period number (like a seed)
Path(os.sep.join([config['output_path'], level, str(period_id)])).mkdir(parents=True, exist_ok=True)
for setid, setname in config['names_per_set'].items():
print(f'set id: {setid}')
files_for_indicators = \
parse_and_save(file_dict=files[period][level],
level=level, period_id=period_id, setid=setid, setname=setname,
config=config, all_files=files_for_indicators)
# Debug
print(files[period][level]['dev'])
print(os.sep.join([config['output_path'], level, str(period_id), f'{config["symbol"]}_devset.csv']))
return files, files_for_indicators
##########################################################
# Specific handling for second or minute level frequencies
def compute_seconds() -> (pd.DataFrame, list):
mahab_m = '07'
dev_m = '10'
first = True
period_id = 1
files = dict()
files_for_indicators = list()
for yr in config['years_to_explore']:
# this will need to be refactored/changed (maybe to an iterator) for min level
for q_month in ['01', '04', '07', '10']: # just for s level
# it picks the prior period as a devset
if not first:
mahab_period = dev_period
dev_period = period
else:
mahab_period = f'{int(yr) - 1}-{mahab_m}'
dev_period = f'{int(yr) - 1}-{dev_m}'
first = False
period = yr + '-' + q_month
# Compute periods in order
files, files_for_indicators = \
compute(files=files, periods=(period, mahab_period, dev_period), period_id=period_id,
files_for_indicators=files_for_indicators, config=config)
period_id = period_id + 1
return files, files_for_indicators
def compute_minutes() -> (pd.DataFrame, list):
files = dict()
files_for_indicators = list()
period_id = 1
for yr in config['years_to_explore']:
period = str(yr)
dev_period = str(int(yr) - 1)
mahab_period = str(int(yr) - 2)
files[period] = dict()
# if period_id == 6:
# Compute periods in order
files, files_for_indicators = \
compute(files=files, periods=(period, mahab_period, dev_period), period_id=period_id,
files_for_indicators=files_for_indicators, config=config)
period_id = period_id + 1
return files, files_for_indicators
compute_func = {
's': compute_seconds,
'm': compute_minutes,
'h': compute_minutes,
}
if __name__ == "__main__":
# Difference handling periods at the second and minutes level due to data granularity and volume
all_files_dict, file_list = compute_func[config['lvl_str']]()
# Let's generate /txt files too in a TMP location
pd.DataFrame({'files': file_list}).to_csv(f'tmp/files_for_indicators_lvl-{config["lvl_str"]}.csv')
# Then trigger from here the whole convertion (technical indicators).
| cetrulin/Quant-Quote-Data-Preprocessing | src/select_mahab_series.py | select_mahab_series.py | py | 16,131 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.sep.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.sep.join",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 63,
... |
38625939524 | from tkinter import *
from tkinter import messagebox
from tkinter import ttk #css for tkinter
from configparser import ConfigParser
# import io
# import urllib.request
# import base64
import time
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import requests
weather_url = 'http://api.openweathermap.org/data/2.5/weather?q={},{}&appid={}'
bg_url = 'https://api.unsplash.com/search/photos?query={}&client_id={}'
config_file = 'config.ini'
config = ConfigParser()
config.read(config_file)
weather_api_key = config['weather_api_key']['key']
unsplash_access_key = config['unsplash_api_key']['access_key']
def get_image(city):
image_index = 0
result = requests.get(bg_url.format(city, unsplash_access_key))
if result:
json = result.json()
first_url = json['results'][image_index]['urls']['raw']
return first_url
u = urllib.request.urlopen(first_url)
image_byt = u.read()
u.close()
# photo = PhotoImage(data=base64.encodestring(image_byt))
# return photo
else:
return None
def get_weather(city, country):
result = requests.get(weather_url.format(city, country, weather_api_key))
if result:
json = result.json()
# (City, Country, temp_celsius, temp_fahrenheit, icon, weather)
city = json['name']
country = json['sys']['country']
temp_kelvin = json['main']['temp']
temp_celsius = temp_kelvin - 273.15
temp_fahrenheit = temp_celsius * 9/5 + 32
icon = json['weather'][0]['icon']
weather = json['weather'][0]['main']
final = (city, country, temp_celsius, temp_fahrenheit, icon, weather)
return final
else:
return None
def search():
city = city_text.get()
country = country_text.get()
weather = get_weather(city, country)
photo = get_image(city)
if weather and city and country:
location_lbl['text'] = '{}, {}'.format(weather[0], weather[1])
image['bitmap'] = 'weather_icons/{}.png'.format(weather[4])
weather_lbl['text'] = weather[5]
temp_lbl['text'] = '{:.2f}°C \n {:.2f}°F'.format(weather[2], weather[3])
url_lbl['text'] = photo
elif not city or not country:
messagebox.showerror('Error', 'Cannot find city: {} in country: {}'.format(city, country))
else:
messagebox.showerror('Error', 'Error Occured')
app = Tk()
app.title("Weather App")
app.geometry('900x700')
# city_image = Tk()
#Top Frame
top_frame = LabelFrame(app, text='Search', padx=50, pady=5)
top_frame.pack(side='top',padx=10, pady=10)
##Search Field
city_text = StringVar()
city_entry = ttk.Entry(top_frame, textvariable=city_text)
city_entry.pack(pady=2)
##Country Field
country_text = StringVar()
country_entry = ttk.Entry(top_frame, textvariable=country_text)
country_entry.pack(pady=2)
##Search Button
search_btn = ttk.Button(top_frame, text="Search by City, Country", width=20, command=search)
search_btn.pack(pady=10)
#Bottom Frame
bottom_frame = LabelFrame(app, text='Details', height=500, padx=100, pady=5)
bottom_frame.pack(side='top', padx=10, pady=10)
##Location
location_lbl = ttk.Label(bottom_frame, text='--', font=('bold', 20))
location_lbl.pack()
##Image
image = Label(bottom_frame, bitmap='--', relief='sunken')
image.pack(pady=10)
##Weather
weather_lbl = ttk.Label(bottom_frame, text='--')
weather_lbl.pack()
##Temperature
temp_lbl = ttk.Label(bottom_frame, text='--', font=('bold', 30))
temp_lbl.pack(padx=10, pady=10)
url_lbl = ttk.Label(bottom_frame, text='--')
url_lbl.pack(padx=10, pady=10)
#Bottom Frame
def bottom():
statusbar = ttk.Label(app, text='Application Opened: {}'.format(time.asctime(time.localtime())), relief='sunken', anchor='w', font=('Italic', 15))
statusbar.pack(side='bottom', fill='x')
bottom()
app.mainloop() | superduperkevin/WeatherGUI | weather_app.py | weather_app.py | py | 3,867 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ssl._create_default_https_context",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "ssl._create_unverified_context",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 24,
"usage_type": "... |
16028301314 | import pymongo
import os
import pandas as pd
import json
def main():
client = pymongo.MongoClient("mongodb://localhost:27017/")
databases = client.list_database_names()
if "fifa" not in databases:
db = client["fifa"]
players_collection = db["players"]
ultimate_team_collection = db["ultimate_teams"]
for file in os.listdir("data/players"):
data = pd.read_csv("data/players/" + file)
data["year"] = "20" + file.split(".")[0][-2:]
if "female" in file:
data["gender"] = "F"
else:
data["gender"] = "M"
data_json = json.loads(data.to_json(orient='records'))
for player in data_json:
columns_to_format = ["ls", "st", "rs", "lw", "lf", "cf", "rf", "rw", "lam", "cam", "ram", "lm", "lcm", "cm", "rcm", "rm", "lwb", "ldm", "cdm", "rdm", "rwb", "lb", "lcb", "cb", "rcb", "rb", "gk"]
for column in columns_to_format:
if isinstance(player[column], str):
if "+" in player[column]:
split = player[column].split("+")
player[column] = int(split[0]) + int(split[1])
elif "-" in player[column]:
split = player[column].split("-")
player[column] = int(split[0]) - int(split[1])
list_columns = ["player_positions", "player_tags", "player_traits"]
for column in list_columns:
if player[column] is not None:
player[column] = [x.strip() for x in player[column].split(',')]
players_collection.insert_many(data_json)
print("Successfully loaded data for", file)
print("Creating Indices for Faster Searching")
players_collection.create_index([('year', pymongo.ASCENDING), ('gender', pymongo.ASCENDING)])
players_collection.create_index([('year', pymongo.ASCENDING), ('gender', pymongo.ASCENDING), ('short_name', pymongo.ASCENDING)])
players_collection.create_index([('year', pymongo.ASCENDING), ('gender', pymongo.ASCENDING), ('overall', pymongo.DESCENDING)])
ultimate_team_collection.create_index([('year', pymongo.ASCENDING), ('username', pymongo.ASCENDING), ('team_name', pymongo.ASCENDING)])
else:
print("Data has been previously loaded.")
if __name__ == "__main__":
main() | wconti27/DS4300_FIFA_Tool | import_data.py | import_data.py | py | 2,486 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_... |
19982458840 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import unittest
from tutorons.regex.extract import ApacheConfigRegexExtractor, JavascriptRegexExtractor,\
GrepRegexExtractor, SedRegexExtractor
from tutorons.common.htmltools import HtmlDocument
logging.basicConfig(level=logging.INFO, format="%(message)s")
'''
TODO consider implementing regular expression checking for these languages:
1. tcl shell
2. Python regular expression methods
3. Java methods
'''
class ExtractRegexFromModRewriteTest(unittest.TestCase):
def setUp(self):
self.extractor = ApacheConfigRegexExtractor()
def test_extract_regex_for_rewrite_rule(self):
node = HtmlDocument('\n'.join([
"<code>",
"RewriteRule ^.*$ index.php",
"</code>",
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.node, node)
self.assertEqual(r.start_offset, 13)
self.assertEqual(r.end_offset, 16)
def test_extract_regex_for_rewrite_condition(self):
node = HtmlDocument('\n'.join([
"<code>",
"RewriteCond %{HTTP_USER_AGENT} ^Mozilla",
"</code>",
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.start_offset, 34)
self.assertEqual(r.end_offset, 41)
def test_allow_whitespace_before_directive(self):
node = HtmlDocument('\n'.join([
"<code>",
" RewriteCond %{HTTP_USER_AGENT} ^Mozilla",
"</code>",
]))
regions = self.extractor.extract(node)
r = regions[0]
self.assertEqual(r.start_offset, 38)
self.assertEqual(r.end_offset, 45)
def test_case_insensitive_directive_detected(self):
node = HtmlDocument('\n'.join([
"<code>",
"REWRITEcOnD %{HTTP_USER_AGENT} ^Mozilla",
"</code>",
]))
regions = self.extractor.extract(node)
r = regions[0]
self.assertEqual(r.start_offset, 34)
self.assertEqual(r.end_offset, 41)
class ExtractRegexFromJavascriptTest(unittest.TestCase):
def setUp(self):
self.extractor = JavascriptRegexExtractor()
def test_extract_regex_from_variable_declaration(self):
node = HtmlDocument('\n'.join([
'<code>',
"var pattern = /regular-expression/g;",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.node, node)
self.assertEqual(r.start_offset, 16)
self.assertEqual(r.end_offset, 33)
def test_skip_code_that_doesnt_pass_javascript_parser(self):
node = HtmlDocument('\n'.join([
'<code>',
"<>/regex/;",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 0)
def test_skip_regex_with_repeated_flags(self):
node = HtmlDocument('\n'.join([
'<code>',
"var pattern = /regular-expression/gg;",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 0)
def test_skip_regex_with_invalid_flags(self):
node = HtmlDocument('\n'.join([
'<code>',
"var pattern = /regular-expression/x;",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 0)
class ExtractRegexFromGrepTest(unittest.TestCase):
def setUp(self):
self.extractor = GrepRegexExtractor()
def test_extract_regex_from_variable_declaration(self):
node = HtmlDocument('\n'.join([
'<code>',
"grep pattern *",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.node, node)
self.assertEqual(r.start_offset, 6)
self.assertEqual(r.end_offset, 12)
def test_extract_same_pattern_from_multiple_greps_in_one_element(self):
node = HtmlDocument('\n'.join([
'<code>',
"grep pattern *",
"grep pattern *",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 2)
r1 = regions[0]
self.assertEqual(r1.start_offset, 6)
self.assertEqual(r1.end_offset, 12)
r2 = regions[1]
self.assertEqual(r2.start_offset, 21)
self.assertEqual(r2.end_offset, 27)
def test_extract_pattern_containing_spaces(self):
node = HtmlDocument('\n'.join([
'<code>',
"grep 'Pattern with spaces' *",
'</code>',
]))
regions = self.extractor.extract(node)
r = regions[0]
self.assertEqual(r.start_offset, 7)
self.assertEqual(r.end_offset, 25)
def test_extract_pattern_from_option(self):
node = HtmlDocument('\n'.join([
'<code>',
"grep -e pattern *",
'</code>',
]))
regions = self.extractor.extract(node)
r = regions[0]
self.assertEqual(r.start_offset, 9)
self.assertEqual(r.end_offset, 15)
def test_extract_patterns_from_multiple_options(self):
node = HtmlDocument('\n'.join([
'<code>',
"grep -e pattern1 -e pattern2 *",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 2)
self.assertTrue(any([r.start_offset == 9 and r.end_offset == 16 for r in regions]))
self.assertTrue(any([r.start_offset == 21 and r.end_offset == 28 for r in regions]))
class ExtractRegexFromSedTest(unittest.TestCase):
def setUp(self):
self.extractor = SedRegexExtractor()
def test_extract_regexes_from_address_range(self):
node = HtmlDocument('\n'.join([
'<code>',
'sed "/addr1/,/addr2/p" file',
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 2)
r1 = regions[0]
self.assertEqual(r1.node, node)
self.assertEqual(r1.start_offset, 7)
self.assertEqual(r1.end_offset, 11)
r2 = regions[1]
self.assertEqual(r2.start_offset, 15)
self.assertEqual(r2.end_offset, 19)
def test_ignore_addresses_that_arent_regex(self):
node = HtmlDocument('\n'.join([
'<code>',
'sed "0,1p" file',
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 0)
def test_extract_regex_from_substitute_pattern(self):
node = HtmlDocument('\n'.join([
'<code>',
'sed "s/patt/replace/" file',
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.start_offset, 8)
self.assertEqual(r.end_offset, 11)
def test_extract_regex_from_multiple_substitute_patterns(self):
node = HtmlDocument('\n'.join([
'<code>',
'sed -e "s/patt1/replace/" -e "s/patt2/replace/" file',
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 2)
self.assertTrue(any([r.start_offset == 11 and r.end_offset == 15 for r in regions]))
self.assertTrue(any([r.start_offset == 33 and r.end_offset == 37 for r in regions]))
def test_handle_escaped_characters(self):
node = HtmlDocument('\n'.join([
'<code>',
'sed \'s/pa\/tt/replace/\' file',
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.start_offset, 8)
self.assertEqual(r.end_offset, 13)
self.assertEqual(r.pattern, r'pa/tt')
def test_handle_find_pattern_with_character_class(self):
'''
This test case failed earlier as we performed a regex search with the pattern found
against the original command, and it was being interpreted as a regex, and not a raw string.
'''
node = HtmlDocument('<code>sed "s/[A-Z]bc//g" file.txt</code>')
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
if __name__ == '__main__':
unittest.main()
| andrewhead/tutorons-server | tutorons/tests/regex/test_extractor.py | test_extractor.py | py | 8,711 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "tutorons... |
13042186116 | #!/usr/bin/env python
"""
Datapath for QEMU qdisk
"""
import urlparse
import os
import sys
import xapi
import xapi.storage.api.v5.datapath
import xapi.storage.api.v5.volume
import importlib
from xapi.storage.libs.libcow.datapath import QdiskDatapath
from xapi.storage import log
def get_sr_callbacks(dbg, uri):
u = urlparse.urlparse(uri)
sr = u.netloc
sys.path.insert(
0,
'/usr/libexec/xapi-storage-script/volume/org.xen.xapi.storage.' + sr)
mod = importlib.import_module(sr)
return mod.Callbacks()
class Implementation(xapi.storage.api.v5.datapath.Datapath_skeleton):
"""
Datapath implementation
"""
def activate(self, dbg, uri, domain):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.activate(dbg, uri, domain, callbacks)
def attach(self, dbg, uri, domain):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.attach(dbg, uri, domain, callbacks)
def deactivate(self, dbg, uri, domain):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.deactivate(dbg, uri, domain, callbacks)
def detach(self, dbg, uri, domain):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.detach(dbg, uri, domain, callbacks)
def open(self, dbg, uri, domain):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.epc_open(dbg, uri, domain, callbacks)
def close(self, dbg, uri):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.epc_close(dbg, uri, callbacks)
if __name__ == "__main__":
log.log_call_argv()
CMD = xapi.storage.api.v5.datapath.Datapath_commandline(Implementation())
CMD_BASE = os.path.basename(sys.argv[0])
if CMD_BASE == "Datapath.activate":
CMD.activate()
elif CMD_BASE == "Datapath.attach":
CMD.attach()
elif CMD_BASE == "Datapath.close":
CMD.close()
elif CMD_BASE == "Datapath.deactivate":
CMD.deactivate()
elif CMD_BASE == "Datapath.detach":
CMD.detach()
elif CMD_BASE == "Datapath.open":
CMD.open()
else:
raise xapi.storage.api.v5.datapath.Unimplemented(CMD_BASE)
| xcp-ng/xcp-ng-xapi-storage | plugins/datapath/qdisk/datapath.py | datapath.py | py | 2,192 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "urlparse.urlparse",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "importlib.import_modu... |
30844385629 | """
USC Spring 2020
INF 553 Foundations of Data Mining
Assignment 3
Student Name: Jiabin Wang
Student ID: 4778-4151-95
"""
from pyspark import SparkConf, SparkContext, StorageLevel
from trainAuxiliary import *
'''
import os
import re
import json
import time
import sys
import math
import random
import itertools
'''
if __name__ == "__main__":
time_start = time.time()
# Get the input parameters
input_file_path = sys.argv[1] #"./Dataset/train_review.json"
model_file_path = sys.argv[2]
cf_type = sys.argv[3]
# Configure the Spark
conf = (
SparkConf()
.setAppName("task3")
.set("spark.driver.memory", "4g")
.set("spark.executor.memory", "4g")
)
sc = SparkContext(conf=conf)
sc.setLogLevel("ERROR")
if cf_type == "item_based":
pairCollection = itemBasedTrain(sc, input_file_path, model_file_path)
itemBasedOutputModel(model_file_path, pairCollection)
if cf_type == "user_based":
result = userBasedTrain(sc, input_file_path, model_file_path)
userBasedOutputModel(model_file_path, result)
time_end = time.time()
print("Duration: ", time_end - time_start, "s")
| jiabinwa/DSCI-INF553-DataMining | Assignment-3/task3train.py | task3train.py | py | 1,215 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.SparkConf",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 37,
"usage_type": "call"
}
] |
20851705442 | # Take the code from the How To Decode A Website exercise
# (if you didn’t do it or just want to play with some different code, use the code from the solution),
# and instead of printing the results to a screen, write the results to a txt file.
# In your code, just make up a name for the file you are saving to.
# Extras:
# Ask the user to specify the name of the output file that will be saved.
import requests
from bs4 import BeautifulSoup
base_url = 'http://www.nytimes.com'
r = requests.get(base_url)
soup = BeautifulSoup(r.text)
titles = []
for story_heading in soup.find_all(class_="story-heading"):
if story_heading.a:
# print(story_heading.a.text.replace("\n", " ").strip())
titles.append(story_heading.a.text.replace("\n", " ").strip())
else:
print(story_heading.contents[0].strip())
name_file = input('Rename the file please ')
with open('{}.txt'.format(name_file), 'w') as open_file:
open_file.write(str(titles)) | ismsadek/python-basics | Ex 21.py | Ex 21.py | py | 973 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 15,
"usage_type": "call"
}
] |
69839957223 | from django.conf.urls.defaults import *
from django.contrib import admin
import os.path
admin.autodiscover()
MEDIA_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), "media")
urlpatterns = patterns('',
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': MEDIA_ROOT}),
url(r'^p/(\w+)$', 'air.views.object', name='object'),
url(r'^compare/(\w+)$', 'air.views.compareTo', name='compareTo'),
url(r'^compare/(\w+)/(\w+)$', 'air.views.compare', name='compare'),
url(r'^categories/$', 'air.views.categories', name='categories'),
url(r'^category/$', 'air.views.category', name='category'),
url(r'^projection/(\w+)/(\w+)$', 'air.views.projection', name='projection'),
url(r'^add/$', 'air.views.add', name='add'),
url(r'^reset/$', 'air.views.reset', name='reset'),
url(r'^$', 'air.views.explore', name='explore'),
)
| friendofrobots/ice-divisi | explore/urls.py | urls.py | py | 995 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": ... |
19970778695 | import eventlet
import msgpack
import random
from copy import copy
from datetime import datetime
from . import cmds
from . import msgs
import os
log_file = open(os.path.join(os.getcwd(), 'client.log'), 'w')
def write_log(msg):
global log_file
log_file.write(
"{0} - {1}\n".format(datetime.now(), str(msg)),
)
class RaftClient(object):
def __init__(self, server_address_list):
self.server_address_list = server_address_list
self._leader_address = None
self._leader_sock = None
self.status = 'init'
self.cmd_seq = 0
self.client_id = None
@classmethod
def select_server(cls, server_address_list):
return random.choice(server_address_list)
def register(self):
cmd = cmds.get_client_register_cmd()
cmd_msg = msgs.get_client_register_req_msg(cmd)
self.cmd_seq = 0
ret = self.execute_command(cmd_msg)
self.client_id = ret['resp'][1]
return ret
def get_next_seq(self):
self.cmd_seq += 1
return self.cmd_seq
def send_command_req(self, command_msg):
self._leader_sock.sendall(command_msg)
def set_value(self, key, value):
cmd = cmds.get_client_update_cmd(
key,
value,
)
cmd_msg = msgs.get_client_update_req_msg(
self.client_id,
self.get_next_seq(),
cmd,
)
return self.execute_command(cmd_msg)
def get_value(self, key):
cmd = cmds.get_client_query_cmd(key)
cmd_msg = msgs.get_client_query_req_msg(
self.client_id,
self.get_next_seq(),
cmd,
)
return self.execute_command(cmd_msg)
def wait_command_ret(self):
unpacker = msgpack.Unpacker()
while True:
chunk = self._leader_sock.recv(1024)
if len(chunk) == 0:
break
unpacker.feed(chunk)
try:
return unpacker.next()
except StopIteration:
pass
return None
def execute_command(self, command_msg):
s_addr_list = copy(self.server_address_list)
while len(s_addr_list) > 0:
try:
if self._leader_address is None:
self._leader_address = RaftClient.select_server(s_addr_list)
write_log(
"selected server {0}".format(self._leader_address))
if self._leader_sock is None:
self._leader_sock = eventlet.connect(self._leader_address)
timeout = eventlet.Timeout(2)
try:
self.send_command_req(command_msg)
write_log(
"sent {0} - cmd: {1}".format(
self._leader_address,
msgpack.unpackb(command_msg),
)
)
ret = self.wait_command_ret()
finally:
timeout.cancel()
if ret is not None:
if ret['success']:
if ret['resp'][2] < self.cmd_seq:
continue
return ret
else:
if 'leader_hint' in ret:
self._leader_sock.close()
self._leader_sock = None
self._leader_address = (
ret['leader_hint'][0],
ret['leader_hint'][1] + 1000,
)
continue
except (eventlet.timeout.Timeout, Exception) as e:
write_log("hit exception:\n {0}".format(str(e)))
pass
if self._leader_address in s_addr_list:
s_addr_list.remove(self._leader_address)
if len(s_addr_list) == 0:
s_addr_list = copy(self.server_address_list)
self._leader_address = None
self._leader_sock = None
| jason-ni/eventlet-raft | eventlet_raft/client.py | client.py | py | 4,133 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line... |
38777184708 | #!/usr/bin/python
import csv
import json
import pprint
import re
import sys
def replace_if_not_empty(dict, key, value):
if key not in dict or not dict[key]:
dict[key] = value
def to_float_or_none(value):
# lmao lazy
try:
return float(value)
except ValueError:
return None
def replace_slashes(str):
return str.replace("/", "\\") if str else None
def strip_or_none(str):
try:
return str.strip() if str.strip() else None
except:
return None
def write_json(dic, file_name):
print(f"saving {file_name}")
with open(file=file_name, mode="w") as f:
json.dump(dic, f, indent=2)
f.write("\n")
def main(tsv_name: str):
print(f"reading {tsv_name}")
with open(file=tsv_name) as tsv_file:
# skip first 2 lines
tsv_file.readline()
tsv_file.readline()
# convert file to array of dicts
# not efficient but good for reuse
rows = list(csv.DictReader(tsv_file, delimiter="\t"))
extract_keyboards(rows)
extract_mousepads(rows)
extract_mice(rows)
extract_users(rows)
def extract_keyboards(rows):
kbs = {}
for row in rows:
kb_model = replace_slashes(strip_or_none(row["Keyboard Model"]))
if not kb_model:
continue
if kb_model not in kbs or not kbs[kb_model]:
kb = {}
else:
kb = kbs[kb_model]
# set keyboard switch
replace_if_not_empty(kb, "switch", strip_or_none(row["Key Switch"]))
kbs[kb_model] = kb
# pprint.PrettyPrinter().pprint(kbs)
write_json(kbs, "keyboards.json")
def extract_mousepads(rows):
mps = {}
for row in rows:
mp = replace_slashes(strip_or_none(row["Mousepad"]))
if not mp:
continue
if mp not in mps or not mps[mp]:
mps[mp] = {}
# pprint.PrettyPrinter().pprint(mps)
write_json(mps, "mousepads.json")
def extract_mice(rows):
mice = {}
# iterate through rows
for row in rows:
mm = replace_slashes(strip_or_none(row["Mouse Model"]))
# skip row
if not mm:
continue
# get mouse model
if mm not in mice or not mice[mm]:
mouse = {}
else:
mouse = mice[mm]
# update mouse model
# using split because sometimes there is a range of values
# im lazy so i just take first value
replace_if_not_empty(
mouse, "sensor", row["Sensor"] if row["Sensor"] else None)
replace_if_not_empty(mouse, "weight", to_float_or_none(
row["Weight"].split("g")[0]))
replace_if_not_empty(mouse, "length", to_float_or_none(
row["Length"].split("m")[0]))
replace_if_not_empty(mouse, "width", to_float_or_none(
row["Width"].split("m")[0]))
replace_if_not_empty(mouse, "height", to_float_or_none(
row["Height"].split("m")[0]))
replace_if_not_empty(
mouse, "switch", row["Mouse Switch"] if row["Mouse Switch"] else None)
# set mouse model
mice[mm] = mouse
# pprint.PrettyPrinter().pprint(mice)
write_json(mice, "mice.json")
def extract_users(rows):
status = "Mouse"
users = {}
# iterate through rows
for row in rows:
if row["Rank"].startswith("Notable"):
status = "Notable"
elif row["Rank"].startswith("Traitors"):
status = "Traitors"
elif row["Rank"].startswith("Banned"):
break
# skip notable mentions
if status == "Notable":
continue
# skip row if username is not well-formatted
pattern = "=HYPERLINK\\(\"https:\\/\\/osu\\.ppy\\.sh\\/u\\/(\\d+)\",\"(\\w+)\"\\)"
result = re.search(pattern, row["Name"])
if not result or not result.group(1) or not result.group(2):
continue
# extract user info
userID = result.group(1)
userName = result.group(2)
is_traitor = status == "Trators"
# win settings, very safe way
win_settings = row['=HYPERLINK("http://puu.sh/nJtmY/e2a5589f67.png","OS")'].split()
if win_settings:
win_sensitivity = to_float_or_none(win_settings[0].split("/")[0])
if len(win_settings) >= 2:
accl_setting = win_settings[1].strip().lower()
if accl_setting.startswith("off"):
win_acceleration = False
elif accl_setting.startswith("on"):
win_acceleration = True
else:
win_acceleration = None
# osu settings, again it is focused on safe
osu_multiplyer = to_float_or_none(
row["Multiplier"].strip().split("~")[0].rstrip("xX"))
if row["Raw"].strip().lower().startswith("on"):
osu_raw = True
elif row["Raw"].strip().lower().startswith("off"):
osu_raw = False
else:
osu_raw = None
# hardware setup info
screen_resolution = row["Resolution"].strip().split("~")[0].split("x")
if len(screen_resolution) >= 2:
screen_width = to_float_or_none(screen_resolution[0])
screen_height = to_float_or_none(screen_resolution[1])
else:
screen_width = None
screen_height = None
mousepad = strip_or_none(row["Mousepad"])
keyboard = strip_or_none(row["Keyboard Model"])
# mouse playstyle info
playstyle = strip_or_none(row["Playstyle"])
mouse = strip_or_none(row["Mouse Model"])
dpi = to_float_or_none(row["DPI"].strip().rstrip("dpi"))
polling = to_float_or_none(row["Polling"].lower().rstrip("hz"))
# possibly calculate mouse area
if win_sensitivity and osu_multiplyer and dpi and screen_width and screen_height:
m = [0.00625, 0.0125, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5]
win_multi = m[int(win_sensitivity - 1)]
# get play area
if (screen_width / screen_height) >= (4 / 3):
play_height = screen_height
play_width = screen_height * (4 / 3)
else:
play_width = screen_width
play_height = screen_width / (4 / 3)
# get area
effective_ppi = dpi * win_multi * osu_multiplyer
area_width = round(25.4 * play_width / effective_ppi)
area_height = round(25.4 * play_height / effective_ppi)
else:
area_width = None
area_height = None
# create new user
users[userID] = {
"name": userName, # dont really need username tho
"rank": None,
"pp": None,
"is_banned": False,
"is_traitor": is_traitor,
"windows_sensitivity": win_sensitivity,
"windows_acceleration": win_acceleration,
"osu_multiplyer": osu_multiplyer,
"osu_raw": osu_raw,
"screen_width": screen_width,
"screen_height": screen_height,
"playstyle": playstyle,
"dpi": dpi,
"polling": polling,
"area_width": area_width,
"area_height": area_height,
"mouse": mouse,
"mousepad": mousepad,
"keyboard": keyboard,
}
# pprint.PrettyPrinter().pprint(users)
write_json(users, "users.json")
if __name__ == "__main__":
if len(sys.argv) == 2:
main(sys.argv[1])
else:
print("Usage: python ./parse_tsv 'tsv/file/path'")
| penguinuwu/Mousebase | backend/csv_parser/parse_csv.py | parse_csv.py | py | 7,636 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.dump",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 249... |
13909907482 | """Module with lagrangian decomposition methods."""
# Python packages
# Package modules
import logging as log
from firedecomp.AL import ARPP
from firedecomp.AL import ADPP
from firedecomp.fix_work import utils as _utils
from firedecomp.original import model as _model
from firedecomp.classes import problem as _problem
import time
import math
import gurobipy
import copy
###############################################################################
# CLASS LagrangianRelaxation()
###############################################################################
class AugmentedLagrangian(object):
def __init__(
self,
problem_data,
min_res_penalty=1000000,
valid_constraints=None,
gap=0.01,
max_iters=100000,
max_time=10,
log_level="AL",
solver_options=None,
):
"""Initialize the Lagrangian Relaxation object.
Args:
problem_data (:obj:`Problem`): problem data.
min_res_penalty (:obj:`int`):
gap (:obj:`float`): GAP tolerance for stop criteria.
Defaults to 0.01.
max_iters (:obj:`int`): maximum number of iterations. Defaults to
10.
max_time (:obj:`float`): maximum cpu time (in seconds). Defaults to
3600.
log_level (:obj:`str`): logging level. Defaults to ``'benders'``.
"""
# PROBLEM DATA
if problem_data.period_unit is False:
raise ValueError("Time unit of the problem is not a period.")
self.problem_data = problem_data
self.solution_best = None # index of DPP best solution
self.solution_best_original = None
# GLOBAL VARIABLES
self.max_iters = max_iters
self.max_time = max_time
self.init_time = time.time()
self.v = 1 # iterations
self.NL = (1 + len(problem_data.get_names("wildfire"))) # +
# len(problem_data.get_names("wildfire"))*len(problem_data.get_names("groups"))*2);
# GUROBI OPTIONS
if solver_options is None:
solver_options = {
'OutputFlag': 0,
'LogToConsole': 0,
}
self.solver_options = solver_options
# PARAMETERS INNER METHODS
self.change = []
self.beta_matrix = []
self.lambda_matrix = []
self.lambda_matrix_prev = []
self.upperbound_matrix = []
self.lobj_global = float("-inf")
self.fobj_global = float("inf")
self.infeas_global = float("inf")
self.subgradient_global = []
self.penalties_global = []
self.index_best = -1
self.lambda_min = 1e1
self.lambda_max = 1e5
self.lamdba_init = 1e3
self.th_sol = 10
# INITIALIZE DDecomposedPrimalProblemPP PROBLEM
# Initialize Decomposite Primal Problem Variables
self.problem_DPP = []
self.N = len(self.problem_data.get_names("resources"))
self.y_master_size = len(
self.problem_data.get_names("wildfire") + [int(min(self.problem_data.get_names("wildfire"))) - 1])
self.counterh_matrix = []
self.lobj_local = []
self.lobj_local_prev = []
self.fobj_local = []
self.infeas_local = []
self.subgradient_local = []
self.subgradient_local_prev = []
self.penalties_local = []
self.termination_counter = []
self.best_list_y = []
for i in range(0, self.y_master_size):
self.termination_counter.append(0)
self.lobj_local.append(float("inf"))
self.lobj_local_prev.append(float("inf"))
self.fobj_local.append(float("inf"))
self.infeas_local.append(float("inf"))
self.subgradient_local.append([])
self.penalties_local.append([])
self.upperbound_matrix.append(float("inf"))
self.termination_counter[self.y_master_size - 1] = self.th_sol + 1
# INITIALIZE LAMBDA AND BETA
for i in range(0, self.y_master_size):
lambda_row = []
lambda_row_prev = []
lambda_row_inf = []
beta_row = []
subgradient_prev_row = []
for j in range(0, self.NL):
lambda_row.append(self.lamdba_init)
lambda_row_inf.append(float("inf"))
beta_row.append(0.3)
lambda_row_prev.append(self.lamdba_init)
subgradient_prev_row.append(0)
self.subgradient_local_prev.append(subgradient_prev_row)
self.lambda_matrix.append(lambda_row)
self.lambda_matrix_prev.append(lambda_row_prev)
self.beta_matrix.append(beta_row)
self.change.append(1.0)
# CREATE ORIGINAL Problems list
_utils.get_initial_sol(self.problem_data)
dict_update = self.problem_data.get_variables_solution()
print("UPDATE original problem")
for i in range(0, self.y_master_size - 1):
self.y_master = dict([(p, 1) for p in range(0, self.y_master_size)])
for p in range(self.y_master_size - (1 + i), self.y_master_size):
self.y_master[p] = 0
print("Create index: " + str(i) + " y: " + str(self.y_master))
model_DPP = ADPP.DecomposedPrimalProblem(self.problem_data,
self.lambda_matrix[i], self.beta_matrix[i],
self.y_master, self.N,
min_res_penalty=min_res_penalty,
valid_constraints=valid_constraints)
self.problem_DPP.append(model_DPP)
###############################################################################
# PUBLIC METHOD subgradient()
###############################################################################
def subgradient(self, subgradient, subgradient_prev, lambda_vector, beta_vector, lambda_matrix_prev, ii):
lambda_old = lambda_vector.copy()
beta_old = beta_vector.copy()
stuck = 0
if max(subgradient_prev) < 0 and max(subgradient) > 0:
stuck = 1
for i in range(0, self.NL):
LRpen = subgradient[i]
if stuck == 1 and LRpen < 0:
new_lambda = lambda_matrix_prev[i]
else:
new_lambda = (lambda_old[i] + LRpen * beta_old[i])
lambda_vector[i] = min(max(self.lambda_min, new_lambda), self.lambda_max)
beta_vector[i] = beta_vector[i] * 1.2
# print(str(LRpen) + " -> lambda " + str(lambda_old[i]) + " + " + str(beta_old[i] * LRpen) + " = " + str(
# lambda_vector[i]) + " update " + str(beta_old[i]) + " diff " + str(
# abs(abs(lambda_vector[i]) - abs(lambda_old[i]))) + " beta " + str(
# beta_vector[i])) # + " change_per "+str(change_per) )
# print("")
# print("")
for i in range(0, self.NL):
subgradient_prev[i] = subgradient[i]
lambda_matrix_prev[i] = lambda_old[i]
del lambda_old
del beta_old
###############################################################################
# PUBLIC METHOD convergence_checking()
###############################################################################
def convergence_checking(self):
stop = bool(False)
result = 0
optimal_solution_found = 0
# print("TERMINATION COUNTER"+str(self.termination_counter))
# CHECK PREVIOUS LAMBDAS CHANGES
for i in range(0, len(self.lambda_matrix) - 1):
# print(str(self.termination_counter[i])+" "+str(self.infeas_local[i]))
if self.infeas_local[i] > 0:
self.termination_counter[i] = self.termination_counter[i] + 1
if self.termination_counter[i] < self.th_sol and self.infeas_local[i] <= 0:
lobj_diff = abs(
(abs(self.lobj_local[i]) - abs(self.lobj_local_prev[i])) / abs(self.lobj_local[i])) * 100
# # print(str(i) + "self.lobj_local[i] - self.lobj_local_prev[i] " + str(lobj_diff) + "% ")
if (lobj_diff < 0.1):
self.termination_counter[i] = self.termination_counter[i] + 1
else:
self.termination_counter[i] = 0
self.lobj_local_prev[i] = self.lobj_local[i]
# CHECK TERMINATION COUNTER MATRIX
counter = 0
all_termination_counter_finished = 0
for i in range(0, self.y_master_size):
if self.termination_counter[i] >= (self.th_sol):
counter = counter + 1
if counter == self.y_master_size:
all_termination_counter_finished = 1
# print("counter" + str(counter) + " termination_counter" + str(self.y_master_size))
# STOPPING CRITERIA CASES
current_time = time.time() - self.init_time
# check convergence
if (self.v >= self.max_iters):
print("[STOP] Max iters achieved!")
stop = bool(True)
if (current_time >= self.max_time):
print("[STOP] Max execution time achieved!")
stop = bool(True)
elif (all_termination_counter_finished == 1):
print("[STOP] Convergence achieved, optimal local point searched!")
stop = bool(True)
elif (optimal_solution_found == 1):
print("[STOP] Convergence achieved, optimal solution found!")
stop = bool(True)
return stop
###############################################################################
# PUBLIC METHOD solve()
###############################################################################
def solve(self):
print("SOLVE ALGORITHM")
termination_criteria = bool(False)
while termination_criteria == False:
# (1) Solve DPP problems
for i in range(0, self.y_master_size - 1):
# Show iteration results
if i == 0:
log.info("Iteration # mi lambda f(x) L(x,mi,lambda) penL")
print("\n\nIter: " + str(self.v) + " " +
"LR(x): " + str(self.lobj_global) + " " +
"f(x):" + str(self.fobj_global) + " " +
"penL:" + str(self.infeas_global) + " time:" +
str(time.time() - self.init_time) + "\n")
if self.termination_counter[i] < self.th_sol:
# print("### Y -> " + str(self.problem_DPP[i].list_y))
DPP_sol_row = []
DPP_sol_unfeasible = False
total_obj_function = []
total_unfeasibility = []
total_subgradient = []
total_obj_function_pen = []
total_problem = []
self.lobj_local[i] = 0
self.fobj_local[i] = 0
self.subgradient_local[i] = []
for z in range(0, self.NL):
self.subgradient_local[i].append(float("-inf"))
self.penalties_local[i] = []
self.infeas_local[i] = 0
for j in range(0, self.N):
try:
self.problem_DPP[i].change_resource(j, self.lambda_matrix[i], self.beta_matrix[i], self.v)
DPP_sol_row.append(self.problem_DPP[i].solve(self.solver_options))
if (DPP_sol_row[j].model.Status == 3) or (DPP_sol_row[j].model.Status == 4):
DPP_sol_unfeasible = True
break
except:
print("Error Solver: Lambda/beta error")
DPP_sol_unfeasible = True
break
total_problem.append(self.problem_DPP[i].problem_data.copy_problem())
subgradient = self.problem_DPP[i].return_LR_obj2()
total_obj_function_pen.append(self.problem_DPP[i].return_function_obj_total_pen())
total_obj_function.append(self.problem_DPP[i].return_function_obj_total())
total_unfeasibility.append(max(subgradient))
total_subgradient.append(subgradient)
# print(str(j) + " fobj " + str(self.problem_DPP[i].return_function_obj()) + " total " +
# str(self.problem_DPP[i].return_function_obj_total()) + "unfeas " + str(max(subgradient)))
if DPP_sol_unfeasible:
self.termination_counter[i] = self.th_sol + 1
else:
bestid = self.problem_DPP[i].return_best_candidate(total_obj_function, total_unfeasibility)
self.lobj_local[i] = total_obj_function_pen[bestid]
self.fobj_local[i] = total_obj_function[bestid]
self.infeas_local[i] = total_unfeasibility[bestid]
self.subgradient_local[i] = total_subgradient[bestid]
#print("TOTAL" + str(i) +
# " LR " + str(self.lobj_local[i]) +
# " fobj " + str(self.fobj_local[i]) +
# " Infeas " + str(self.infeas_local[i]))
self.subgradient(self.subgradient_local[i], self.subgradient_local_prev[i],
self.lambda_matrix[i], self.beta_matrix[i],
self.lambda_matrix_prev[i], i)
self.change[i] = 0
if self.fobj_global > self.fobj_local[i] and (self.infeas_local[i] <= 0):
self.problem_DPP[i].problem_data = total_problem[bestid]
self.lobj_global = self.lobj_local[i]
self.fobj_global = self.fobj_local[i]
self.subgradient_global = self.subgradient_local[i]
self.infeas_global = self.infeas_local[i]
self.solution_best_original = total_problem[
bestid].copy_problem() # self.update_problem_data_sol(self.problem_DPP[i])
self.solution_best_original.constrvio = self.infeas_global
self.solution_best_original.solve_status = 2
# print("New Solution:")
# print(self.solution_best_original.get_solution_info())
self.change[i] = 1
self.problem_DPP[i].update_original_values(DPP_sol_row[bestid], self.change[i])
DPP_sol_row.clear()
# (3) Check termination criteria
termination_criteria = self.convergence_checking()
self.v = self.v + 1
# DESTROY DPP
# print(self.solution_best_original.get_solution_info())
if self.solution_best_original is None:
self.problem_DPP[0].change_resource(0, self.lambda_matrix[i], self.beta_matrix[i], self.v)
self.problem_DPP[0].solve(self.solver_options)
min_fobj = self.problem_DPP[0].return_function_obj_total()
min_feasibility = max(self.problem_DPP[0].return_LR_obj2())
best_index = 0
for i in range(1, self.N):
self.problem_DPP[i].change_resource(0, self.lambda_matrix[i], self.beta_matrix[i], self.v)
self.problem_DPP[i].solve(self.solver_options)
fobj = self.problem_DPP[i].return_function_obj_total()
feas = max(self.problem_DPP[i].return_LR_obj2())
if min_feasibility >= feas:
if min_feasibility == feas:
if min_fobj > fobj:
best_index = i
else:
best_index = i
self.solution_best_original = self.problem_DPP[best_index].problem_data.copy_problem()
self.solution_best_original.constrvio = max(self.problem_DPP[i].return_LR_obj2())
self.solution_best_original.solve_status = 2
self.problem_data = self.solution_best_original
return self.solution_best_original
###############################################################################
# PRIVATE extract_infeasibility()
###############################################################################
def extract_infeasibility(self, subgradient):
infeas = 0
for i in range(0, len(subgradient)):
if (subgradient[i] > 0):
infeas = infeas + subgradient[i]
return infeas
###############################################################################
# PRIVATE destroy_DPP_set()
###############################################################################
def destroy_DPP_set(self):
for i in range(0, len(self.problem_DPP)):
len_p = len(self.problem_DPP[i])
if (len_p > 0):
for j in range(0, len_p):
del self.problem_DPP[i][0]
self.problem_DPP[i] = []
self.problem_DPP = []
# print("DESTROY")
###############################################################################
# PRIVATE METHOD __log__()
###############################################################################
# def __log__(self, level="AL"):
# log.addLevelName(80, "AL")
# log.Logger.LR = logging.LR
# if level != 'AL':
# log_level = getattr(log, level)
# logger = log.getLogger('AL_logging')
# logger.setLevel(log_level)
# logger.addFilter(logging.LRFilter())
# if len(logger.handlers) == 0:
# ch = log.StreamHandler()
# ch.setLevel(log_level)
# # create formatter and add it to the handlers
# formatter = log.Formatter("%(levelname)8s: %(message)s")
# ch.setFormatter(formatter)
# logger.addHandler(ch)
# else:
# log_level = 80
# logger = log.getLogger('AL')
# logger.setLevel(log_level)
# if len(logger.handlers) == 0:
# ch = log.StreamHandler()
# ch.setLevel(log_level)
# # create formatter and add it to the handlers
# formatter = log.Formatter("%(message)s")
# ch.setFormatter(formatter)
# logger.addHandler(ch)
#
# self.log = logger
# return 1
def update_problem_data_sol(self, solution):
problem = self.problem_data.copy_problem()
problem = self.solution_to_problem(problem, solution)
return problem
def solution_to_problem(self, problem, solution):
problem.mipgap = None
problem.mipgapabs = None
problem.constrvio = self.infeas_global
problem.solve_status = 2
variables = solution
data = problem.data
problem.resources.update(
{i: {'select': round(variables.z[i].getValue()) == 1}
for i in data.I})
s = {(i, t): round(variables.s[i, t].x) == 1
for i in data.I for t in data.T}
u = {(i, t): round(variables.u[i, t].getValue()) == 1
for i in data.I for t in data.T}
e = {(i, t): round(variables.e[i, t].x) == 1
for i in data.I for t in data.T}
w = {(i, t): round(variables.w[i, t].getValue()) == 1
for i in data.I for t in data.T}
r = {(i, t): round(variables.r[i, t].x) == 1
for i in data.I for t in data.T}
er = {(i, t): round(variables.er[i, t].x) == 1
for i in data.I for t in data.T}
tr = {(i, t): round(variables.tr[i, t].x) == 1
for i in data.I for t in data.T}
problem.resources_wildfire.update(
{(i, t): {
'start': s[i, t],
'use': u[i, t],
'end': e[i, t],
'work': w[i, t],
'travel': tr[i, t],
'rest': r[i, t],
'end_rest': er[i, t]
}
for i in data.I for t in data.T})
problem.groups_wildfire.update(
{(g, t): {'num_left_resources': variables.mu[g, t].x}
for g in data.G for t in data.T})
contained = {t: variables.y[t].x == 0
for t in data.T}
contained_period = [t for t, v in contained.items()
if v is True]
if len(contained_period) > 0:
first_contained = min(contained_period) + 1
else:
first_contained = data.max_t + 1
problem.wildfire.update(
{t: {'contained': False if t < first_contained else True}
for t in data.T})
return problem
def create_init_solution(self, problem_data):
T = problem_data.get_names("wildfire")
min_t = int(min(T))
list_y = dict([(p, 1) for p in range(0, len(T + [min_t - 1]))])
list_y[len(list_y) - 1] = 0
print(list_y)
problem_data_copy = problem_data.copy_problem()
init_problem = _model.InputModel(problem_data_copy)
solver_options = {
'OutputFlag': 1,
'LogToConsole': 1,
'TimeLimit': 10,
}
for i in range(0, len(list_y)):
init_problem.y[i].UB = list_y[i]
init_problem.y[i].LB = list_y[i]
init_problem.m.update()
print("COMPUTE INIT SOLUTION")
init_problem.solve(solver_options)
print("END COMPUTE INIT SOLUTION")
return init_problem
| jorgerodriguezveiga/firedecomp | firedecomp/AL/AL.py | AL.py | py | 22,213 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "firedecomp.fix_work.utils.get_initial_sol",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "firedecomp.fix_work.utils",
"line_number": 135,
"usage_type": "name"
},
{
"a... |
7952291852 | from __future__ import annotations
import logbook
from discord import Interaction
from discord import Message
from discord.ext.commands import Context
from json import dumps
from logging import Logger
from time import time
from utils import send_response
from yt_dlp.YoutubeDL import YoutubeDL
from yt_dlp import DownloadError
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from audio import Audio
log: Logger = logbook.getLogger("track")
class Track():
def __init__(
self,
title: str,
url: str,
track_type: str,
*,
duration: int = 0,
format: tuple[str, str] = ("", ""),
guild_id: int = 0,
url_original: str = "",
video_id: str = ""
) -> None:
self.title: str = title
self.url: str = url
self.track_type: str = track_type
self.duration: int = duration
self.format: tuple[str, str] = format
self.guild_id: int = guild_id
self.url_original: str = url_original
self.video_id: str = video_id
self.time_stamp: float = time()
def __repr__(self) -> str:
return dumps(self, default=vars, indent=4, ensure_ascii=False)
async def create_local_track(audio: Audio, cim: Context | Interaction | Message, url: str, track_type: str = "music") -> None | Track:
if not cim.guild: return
log.info(f"{cim.guild}: Creating local track from {url}")
if "/.Cached Tracks/" in url:
title: str = url[url.rfind("/") + 1 : -16]
else:
title: str = url[url.rfind("/") + 1:] if "/" in url else url
title = title[:title.rfind(".")]
return Track(title, url, track_type, guild_id=cim.guild.id)
async def create_stream_track(audio: Audio, cim: Context | Interaction | Message, url: str) -> None | Track:
if not cim.guild: return
log.info(f"{cim.guild}: Creating stream track from {url}")
if url.startswith(("https://www.youtube.com/", "https://youtu.be/", "https://m.youtube.com/", "https://youtube.com/")):
id: str | None = await _get_yt_video_id(url)
if not id: return None
url_local: str | None = audio.cached_tracks.get(id)
if url_local:
log.info(f"Found {id} in my cached tracks.")
return await create_local_track(audio, cim, url_local, "music")
log.info(f"{id} not found in cached tracks, downloading meta data...")
url = f"https://www.youtube.com/watch?v={id}" # Sanitize url
video_info: dict | None = await _get_yt_video_info(audio, cim, url)
if not video_info: return None
protocol: str | None = video_info.get("protocol")
if not protocol: return None
log.info(f"Stream track protocol:\n{protocol}")
if protocol == "m3u8_native":
log.info(f"Stream track is a live stream.")
title: str = video_info.get("title")[:-17] # type: ignore
url_original: str = url
url_stream: str = video_info.get("url") # type: ignore
return Track(title, url_stream, "live", guild_id=cim.guild.id, url_original=url_original)
else:
log.info(f"Stream track is a normal stream.")
duration: int = video_info["duration"] if video_info.get("duration") else await audio.get_audio_track_caching_duration_max()
format: tuple[str, str] | None = await _get_yt_video_best_audio_format(video_info)
if not format:
await send_response(cim, "I could not find a suitable audio format to stream.")
return None
title: str = video_info["title"]
url_original: str = url
url_stream: str = format[1]
track: Track = Track(title, url_stream, "stream", duration=duration, format=format, guild_id=cim.guild.id, url_original=url_original, video_id=id)
await audio.download_q.put(track)
return track
elif url.startswith("https://open.spotify.com/"):
await send_response(cim, "Spotify not implemented yet.")
return None
return None
async def create_meme_track(audio: Audio, cim: Context | Interaction | Message, url: str) -> None | Track:
if url.lower() == "despacito":
log.info(f"Creating meme track for \"{url}\"")
return await create_stream_track(audio, cim, "https://www.youtube.com/watch?v=kJQP7kiw5Fk")
return None
async def _get_yt_video_id(url: str) -> str | None:
"""Get the 11 chars long video id from a Youtube link."""
if url.find("?v=") > 0:
return url[url.find("?v=") + 3 : url.find("?v=") + 14]
elif url.find("&v=") > 0:
return url[url.find("&v=") + 3 : url.find("&v=") + 14]
elif url.find(".be/") > 0:
return url[url.find(".be/") + 4 : url.find(".be/") + 15]
elif url.find("/shorts/") > 0:
return url[url.find("/shorts/") + 8 : url.find("/shorts/") + 19]
else:
return None
async def _get_yt_video_info(audio: Audio, cim: Context | Interaction | Message, url) -> dict | None:
options: dict[str, str | bool | int | Logger] = {
"no_warnings": False,
"default_search": "auto",
"source_address": "0.0.0.0",
"logger": logbook.getLogger("yt-dlp"),
"age_limit": 21
}
try:
video_info: dict | None = await audio.maon.loop.run_in_executor(
None, lambda: YoutubeDL(options).extract_info(url, download=False)
)
except DownloadError as e:
log.error(f"{cim.guild.name}: {e.__str__()[28:]}") # type: ignore
if "looks truncated" in e.__str__():
await send_response(cim, "The link looks incomplete, paste it again, please.")
elif "to confirm your age" in e.__str__():
await send_response(cim, "The video is age gated and I couldn't proxy my way around it.")
elif "HTTP Error 403" in e.__str__():
await send_response(cim, "I received a `forbidden` error, I was locked out from downloading the meta data...\nYou could try again in a few seconds, though!")
elif "Private video." in e.__str__():
await send_response(cim, "The video has been privated and I can't view it.")
else:
await send_response(cim, "I could not download the video's meta data... maybe try again in a few seconds.")
return None
return video_info
async def _get_yt_video_best_audio_format(video_info: dict) -> tuple[str, str] | None:
formats: dict[str, str] = {}
for f in video_info.get("formats", [video_info]):
formats[f.get("format_id")] = f.get("url") # type: ignore
log.info(f"Found formats: {formats.keys()}")
if "251" in formats: return ("251", formats["251"])
elif "140" in formats: return ("140", formats["140"])
elif "250" in formats: return ("250", formats["250"])
elif "249" in formats: return ("249", formats["249"])
else: return None
| ruubytes/Maon.py | src/track.py | track.py | py | 6,962 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "logging.Logger",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "logbook.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.time",
... |
24643182429 | import pyautogui
import time
# Locating the game window
pyautogui.alert('Put the mouse pointer over the top left corner of the game then press "Enter"')
TOP_LEFT = pyautogui.position()
pyautogui.alert('Put the mouse pointer over the bottom right corner of the game then press "Enter"')
BOT_RIGHT = pyautogui.position()
DIM = (BOT_RIGHT[0] - TOP_LEFT[0], BOT_RIGHT[1] - TOP_LEFT[1])
print(TOP_LEFT,BOT_RIGHT,DIM)
# Generating the positions
QSKILL = (0.3*DIM[0]+TOP_LEFT[0], 0.9*DIM[1]+TOP_LEFT[1])
WSKILL = (0.4*DIM[0]+TOP_LEFT[0], 0.9*DIM[1]+TOP_LEFT[1])
ESKILL = (0.5*DIM[0]+TOP_LEFT[0], 0.9*DIM[1]+TOP_LEFT[1])
RSKILL = (0.6*DIM[0]+TOP_LEFT[0], 0.9*DIM[1]+TOP_LEFT[1])
SKILLS = [QSKILL,WSKILL,ESKILL,RSKILL]
SLOTZ = [[( (0.1+(i*0.07))*DIM[0]+TOP_LEFT[0], (0.26+(j*0.125))*DIM[1]+TOP_LEFT[1] )for i in range(5)]for j in range(6)]
SLOTS = [None,]
for line in SLOTZ:
SLOTS += line
# Tool functions
def use(slot, chest = False):
pyautogui.moveTo(SLOTS[slot][0], SLOTS[slot][1])
time.sleep(.1)
pyautogui.click()
pyautogui.moveRel(0.194*DIM[0], 0.185*DIM[1])
time.sleep(.1)
pyautogui.click()
if chest :
pyautogui.moveTo(0.7*DIM[0]+TOP_LEFT[0], 0.5*DIM[1]+TOP_LEFT[1])
time.sleep(.4)
pyautogui.click()
def sell(slot):
pyautogui.click(SLOTS[slot][0], SLOTS[slot][1])
time.sleep(.4)
pos= pyautogui.locateCenterOnScreen('sell_button.png', region=(SLOTS[slot][0], SLOTS[slot][1], DIM[0]/4, DIM[1]/2), grayscale=True)
print(pos)
pyautogui.click(pos[0],pos[1])
def equip(slot):
pyautogui.moveTo(SLOTS[slot][0], SLOTS[slot][1])
time.sleep(.1)
pyautogui.click()
pyautogui.moveRel(0.192*DIM[0], 0.198*DIM[1])
time.sleep(.1)
pyautogui.click()
def open_chest(slot, n):
for i in range(n):
use(slot, chest=True)
running = True
while running :
userinput = pyautogui.prompt(text = '1 - Open Chests \n2 - Sell items\n 9 - Quit', title = 'What do you want to do ?')
if userinput == '1':
i = pyautogui.prompt(text='Print the slot number (1 to 30) of the chest followed by the number of chests to open. Example : 5 20').split()
open_chest(int(i[0]), int(i[1]))
elif userinput == '2':
pyautogui.alert('Put the mouse pointer over the "SELL" button then press "Enter"')
pos = pyautogui.position()
pyautogui.screenshot('sell_button.png', region=(pos[0],pos[1], 100,50))
i = pyautogui.prompt(text='Write the starting and end slot (inclusive) of the items you want to sell, EVERYTHING in between will be sold !').split()
start, end = int(i[0]), int(i[1])
for slot in range(start, end+1):
sell(slot)
time.sleep(.1)
else :
running = False | Furrane/voxelbot | main.py | main.py | py | 2,768 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyautogui.alert",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pyautogui.position",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyautogui.alert",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyautogui.position",... |
12080387469 | from tinydb import TinyDB, Query, where
db = TinyDB("data.json", indent=4)
db.update({"score": 10}, where ("name") == "Patrick")
db.update({"roles": ["Junior"]})
db.update({"roles": ["Expert"]}, where("name") == "Patrick")
db.upsert({"name": "Pierre", "score": 120, "roles": ["Senior"]}, where("name") == "Pierre")
db.remove(where("score") == 0)
# db.truncate() # supprime tout le contenu de la db | yunus-gdk/python_beginner | tiny-db/maj.py | maj.py | py | 402 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tinydb.TinyDB",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "tinydb.where",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "tinydb.where",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tinydb.where",
"line_number"... |
28890711101 | """Tool for processing pytd files.
pytd is a type declaration language for Python. Each .py file can have an
accompanying .pytd file that specifies classes, argument types, return types
and exceptions.
This binary processes pytd files, typically to optimize them.
Usage:
pytd_tool [flags] <inputfile> <outputfile>
"""
import argparse
import sys
from pytype import utils
from pytype.imports import builtin_stubs
from pytype.pyi import parser
from pytype.pytd import optimize
from pytype.pytd import pytd_utils
def make_parser():
"""Use argparse to make a parser for command line options."""
o = argparse.ArgumentParser(
usage="%(prog)s [options] infile.pytd [outfile.pytd]")
# Input and output filenames
o.add_argument("input", help="File to process")
o.add_argument("output", nargs="?",
help=("Output file (or - for stdout). If output is omitted, "
"the input file will be checked for errors."))
o.add_argument(
"-O", "--optimize", action="store_true",
dest="optimize", default=False,
help="Optimize pytd file.")
o.add_argument(
"--lossy", action="store_true",
dest="lossy", default=False,
help="Allow lossy optimizations, such as merging classes.")
o.add_argument(
"--max-union", type=int, action="store",
dest="max_union", default=4,
help="Maximum number of objects in an 'or' clause.\nUse with --lossy.")
o.add_argument(
"--use-abcs", action="store_true",
dest="use_abcs", default=False,
help="Inject abstract bases classes for type merging.\nUse with --lossy.")
o.add_argument(
"--remove-mutable", action="store_true",
dest="remove_mutable", default=False,
help="Remove mutable parameters.")
o.add_argument(
"-V", "--python_version", type=str, action="store",
dest="python_version", default=None,
help=("Python version to target (\"major.minor\", e.g. \"3.10\")"))
o.add_argument(
"--multiline-args", action="store_true",
dest="multiline_args", default=False,
help="Print function arguments one to a line.")
return o
def main():
argument_parser = make_parser()
opts = argument_parser.parse_args()
if opts.python_version:
python_version = utils.version_from_string(opts.python_version)
else:
python_version = sys.version_info[:2]
try:
utils.validate_version(python_version)
except utils.UsageError as e:
sys.stderr.write(f"Usage error: {e}\n")
sys.exit(1)
options = parser.PyiOptions(python_version=python_version)
with open(opts.input) as fi:
sourcecode = fi.read()
try:
parsed = parser.parse_string(
sourcecode, filename=opts.input, options=options)
except parser.ParseError as e:
sys.stderr.write(str(e))
sys.exit(1)
if opts.optimize:
parsed = optimize.Optimize(
parsed,
pytd_utils.Concat(*builtin_stubs.GetBuiltinsAndTyping(options)),
lossy=opts.lossy,
use_abcs=opts.use_abcs,
max_union=opts.max_union,
remove_mutable=opts.remove_mutable,
can_do_lookup=False)
if opts.output is not None:
out_text = pytd_utils.Print(parsed, opts.multiline_args)
if opts.output == "-":
sys.stdout.write(out_text)
else:
with open(opts.output, "w") as out:
out.write(out_text)
if __name__ == "__main__":
main()
| google/pytype | pytype/pytd/main.py | main.py | py | 3,389 | python | en | code | 4,405 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pytype.utils.version_from_string",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pytype.utils",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": ... |
34887554995 | import django_filters
from .models import *
from django_filters import DateFilter, CharFilter, NumberFilter
from django.forms.widgets import TextInput, NumberInput, DateInput, SelectDateWidget
# class TitleFilter(django_filters.FilterSet):
# title = CharFilter(field_name='title', lookup_expr='icontains',
# widget=TextInput(attrs={
# 'p laceholder': "Znajdź swoją roślinę!",
# 'class': "form-control me-2 button-search-right"}))
#
# class Meta:
# model = Offer
# fields = ['title']
class OfferFilter(django_filters.FilterSet):
start_date = DateFilter(field_name='date_posted', lookup_expr='gte', label='Data od:',
widget=SelectDateWidget(empty_label=('rok', 'miesiąc', 'dzień'), attrs={
'class': "form-control me-2 button-search-right",
'style': 'width: auto; display: inline-block;'}))
end_date = DateFilter(field_name='date_posted', lookup_expr='lte', label='Data do:',
widget=SelectDateWidget(empty_label=('rok', 'miesiąc', 'dzień'), attrs={
'class': "form-control me-2 button-search-right",
'style': 'width: auto; display: inline-block;'}))
cheapest = NumberFilter(field_name='price', lookup_expr='gte', label='Cena od',
widget=NumberInput(attrs={
'class': "form-control me-2 button-search-right",
'style': 'width: auto; display: inline-block; margin: 4px'}))
expensive = NumberFilter(field_name='price', lookup_expr='lte', label='Cena do',
widget=NumberInput(attrs={
'class': "form-control me-2 button-search-right",
'style': 'width: auto; display: inline-block; margin: 4px'}))
class Meta:
model = Offer
fields = '__all__'
exclude = ['seller', 'description', 'date_posted', 'title', 'price', ]
| viginti23/project-home-gardens | home/filters.py | filters.py | py | 2,126 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django_filters.FilterSet",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "django_filters.DateFilter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.forms.widgets.SelectDateWidget",
"line_number": 20,
"usage_type": "call"
... |
38242299725 | import os, re, math, sys
from collections import defaultdict
VCF_CONTIG_PATT = re.compile('ID=(\w+),length=(\d+)')
PROG_NAME = 'Hycco'
DESCRIPTION = 'Hycco is an HMM based method to estimate hybrid chromosomal crossover points using distinguising SNPs from two parental genotypes'
FILE_TAG = 'crossover_regions'
DEFAULT_BIN_SIZE = 10000
DEFAULT_QUALITY = 100.0
DEFAULT_MIN_CHROMO_SIZE = 1.0
DEFAULT_NUM_ITER = 400
def info(msg, prefix='INFO'):
print('%8s : %s' % (prefix, msg))
def warn(msg, prefix='WARNING'):
print('%8s : %s' % (prefix, msg))
def fatal(msg, prefix='%s FAILURE' % PROG_NAME):
print('%8s : %s' % (prefix, msg))
sys.exit(0)
def check_file_problems(file_path):
problem = None
if not os.path.exists(file_path):
problem = 'File "%s" does not exist'
return problem % file_path
if not os.path.isfile(file_path):
problem = 'Location "%s" is not a regular file'
return problem % file_path
if os.stat(file_path).st_size == 0:
problem = 'File "%s" is of zero size '
return problem % file_path
if not os.access(file_path, os.R_OK):
problem = 'File "%s" is not readable'
return problem % file_path
return problem
def test_imports():
critical = False
try:
from numpy import array
except ImportError:
critical = True
warn('Critical module "numpy" is not installed or accessible')
try:
from sklearn import cluster
except ImportError:
critical = True
warn('Critical module "sklearn" is not installed or accessible')
try:
from hmmlearn import hmm
except ImportError:
critical = True
warn('Critical module "hmmlearn" is not installed or accessible')
try:
from matplotlib import pyplot
except ImportError:
warn('Module "matplotlib" is not installed or accessible. Graphing option is not available.')
if critical:
fatal('Exiting because critial Python modules are not available')
test_imports()
import numpy as np
def read_vcf(file_path, min_qual=100):
"""
Read VCF file to get SNPs with a given minimum quality.
Reurns chromsome sizes and SPN positions with qualities, keyed by chromosome
"""
if file_path.lower().endswith('.gz'):
import gzip
file_obj = gzip.open(file_path, 'rt')
else:
file_obj = open(file_path)
chromo_dict = {}
var_dict = defaultdict(list)
for line in file_obj:
if line[:9] == '##contig=':
match = VCF_CONTIG_PATT.search(line)
if match:
chromo = match.group(1)
size = int(match.group(2))
chromo_dict[chromo] = size
elif line[0] == '#':
continue
else:
chromo, pos, _id, ref, alt, qual, filt, info, fmt, xgb3 = line.split()
qual = float(qual)
if len(ref) == 1 and len(alt) == 1:
if qual >= min_qual:
var_dict[chromo].append((int(pos), qual))
file_obj.close()
# VCF may not be sorted
sort_var_dict = {}
for chromo in var_dict:
sort_var_dict[chromo] = sorted(var_dict[chromo])
return chromo_dict, sort_var_dict
def get_contig_data(chromo_dict, var_dict, bin_size=1000):
"""
Given a dictionary of chromosome sizes, bins the log_10 scores of variant
positions into a contiguous array for each chromosome. Returns a list
of lists, one for each chromosome in sorted order.
"""
contigs = []
bs = float(bin_size)
for chromo in sorted(chromo_dict):
size = chromo_dict[chromo]
n = int(math.ceil(size/bs)) + 1
contig = np.zeros(n, float)
for pos, qual in var_dict.get(chromo, []):
i = int(pos/bin_size)
w = (pos - (i*bin_size))/bs
q = np.log10(1.0 + qual)
contig[i] += (1.0-w) * q
contig[i+1] += w * q
contigs.append(contig)
return contigs
def get_training_data(contig_data1, contig_data2):
"""
Joins the TMM training data for contiguous SNP scores derived from
different genome builds.
"""
data = []
sizes = []
for i, contig1 in enumerate(contig_data1):
contig2 = contig_data2[i]
row = list(zip(contig1, contig2))
sizes.append(len(row))
data.append(np.array(row))
data = np.concatenate(data)
return data, sizes
def train_hybrid_crossover_hmm(vcf_paths_pairs, text_labels=('A','B'), out_dir='', bin_size=10000, min_qual=100,
min_chromo_size=1e6, num_hmm_iter=400, plot_graphs=False, covariance_type='diag'):
"""
Main function to train the HMM and plot the results
"""
from hmmlearn import hmm
# This is simply to remove worrysome messages which ough to be addressed in newer hmmlearn versions
from sklearn import warnings
def nullfunc(*args, **kw):
pass
warnings.warn = nullfunc
chromos = set()
var_pairs = []
chromo_dicts = []
n_states=2
# Read the VCF data and chromosome sizes
for vcf_path_a, vcf_path_b in vcf_paths_pairs:
chromo_dict_a, var_dict_a = read_vcf(vcf_path_a, min_qual)
chromo_dict_b, var_dict_b = read_vcf(vcf_path_b, min_qual)
chromos.update(chromo_dict_a.keys())
chromos.update(chromo_dict_b.keys())
var_pairs.append((var_dict_a, var_dict_b))
chromo_dicts += [chromo_dict_a, chromo_dict_b]
# Collate chromosome sizes, talking the largest,
# just in case there are any discrepencies and
# ignoring any that are too small to bother with
chromo_dict = {}
for chromo in chromos:
size = max([cd.get(chromo, 0) for cd in chromo_dicts])
if size >= min_chromo_size:
chromo_dict[chromo] = size
chromos = sorted(chromo_dict)
# Look through variant call pairs for each strain
if plot_graphs:
fig, axarr = plt.subplots(len(chromos), len(var_pairs))
title_text = 'Hybrid genome HMM states. Bin size = {:,} Min qual = {}'
fig.suptitle(title_text.format(bin_size, min_qual))
n_cols = len(var_pairs)
head_1 = '#HMM params - bin_size:%d min_qual:%d min_chromo_size:%d num_hmm_iter:%d\n'
head_2 = '#cols - chr\thaplotype\tregion_size\tregion_start\tregion_end\tfirst_SNP\tlast_SNP\tbin_start\tbin_end\n'
for col, (var_dict_a, var_dict_b) in enumerate(var_pairs):
file_name = '%s_%s.tsv' % (text_labels[col], FILE_TAG)
file_path = os.path.join(out_dir, file_name)
file_obj = open(file_path, 'w')
write = file_obj.write
write(head_1 % (bin_size, min_qual, min_chromo_size, num_hmm_iter))
write(head_2)
contig_data_a = get_contig_data(chromo_dict, var_dict_a, bin_size)
contig_data_b = get_contig_data(chromo_dict, var_dict_b, bin_size)
in_data, sizes = get_training_data(contig_data_a, contig_data_b)
# Setup an HMM object
model = hmm.GaussianHMM(n_components=n_states, covariance_type=covariance_type,
n_iter=num_hmm_iter)
# Run Baum-Welch to lear the HMM probabilities
model.fit(in_data, sizes)
mv = in_data.max()
i = 0
for row, chromo in enumerate(chromos):
m = sizes[row]
chrom_data = in_data[i:i+m]
i += m
# Run Forward-Backward to get state probabilities at each point
probs = model.predict_proba(chrom_data, [m])
# The order of state labels is arbitrary, so use a dot product to
# deduce which state best matches the first genome
dp1 = np.dot(probs[:,0], chrom_data[:,0])
dp2 = np.dot(probs[:,0], chrom_data[:,1])
if dp2 > dp1:
probs_a = probs[:,1]
probs_b = probs[:,0]
else:
probs_a = probs[:,0]
probs_b = probs[:,1]
# Create chromosome regios of contiguous state according to which of
# the probabilities for the binned regions was higest
prev_state = 0
region_start = 0
chromo_regions = []
for j in range(m):
pos = j * bin_size
if probs_a[j] > probs_b[j]:
state = 'A'
elif probs_a[j] < probs_b[j]:
state = 'B'
else:
state = ''
if state != prev_state:
if prev_state:
chromo_regions.append((region_start, pos, prev_state))
region_start = pos
prev_state = state
# Last region goes to the chromosome end
if state:
chromo_regions.append((region_start, min(pos, chromo_dict[chromo]), prev_state))
# Refine region edges according to precise SNP positions
# which could be before or after end of binned region
# Remove SNPs common to both genotypes
pos_counts = defaultdict(int)
for pos, qual in var_dict_a[chromo]:
pos_counts[pos] += 1
for pos, qual in var_dict_b[chromo]:
pos_counts[pos] += 1
# Get sorted positions (and corresponding states) of distinguishing SNPs
vars_state_pos = [(pos, 'A') for pos, qual in var_dict_a[chromo] if pos_counts[pos] < 2]
vars_state_pos += [(pos, 'B') for pos, qual in var_dict_b[chromo] if pos_counts[pos] < 2]
vars_state_pos.sort()
var_pos, var_states = zip(*vars_state_pos)
var_pos = np.array(var_pos)
n_var = len(var_pos)
for start, end, state in chromo_regions:
# Find transitions from A/B genotypes SNPs working away from region edge
# Report terminal snips and mid-way between transitions, where possible
# Refine beginning
idx_left = idx_right = np.abs(var_pos-start).argmin() # Closest SNP to bin boundary
while (idx_right < n_var-1) and (var_states[idx_right] != state): # Find next matching SNP
idx_right += 1
while (idx_left >= 0) and (var_states[idx_left] == state): # Find prev mismatching SNP
idx_left -= 1
vp1 = var_pos[idx_right]
if vp1 > end:
msg = 'No SNPs for HMM state "%s" found in chromosome region %s:%d-%d. '
msg += 'Probably the HMM was not able to separate states in the data as expected'
warn(msg)
vp1 = start
if idx_left < 0: # Off the chromosome start
rp1 = 0
else:
rp1 = int((var_pos[idx_left] + vp1)/2)
# Refine end
idx_left = idx_right = np.abs(var_pos-end).argmin() # Closest SNP to bin boundary
while (idx_left >= 0) and (var_states[idx_left] != state): # Find prev matching SNP
idx_left -= 1
while (idx_right < n_var) and (var_states[idx_right] == state): # Find next mismatching SNP
idx_right += 1
vp2 = var_pos[idx_left]
if vp2 < start:
vp2 = end
if idx_right < n_var:
rp2 = int((vp2 + var_pos[idx_right])/2)
else: # Off the chromosome end
rp2 = end
# Chromosome, state code, region start, region end, region length,
# first matching var position, last matching var pos
line_data = (chromo, state, rp2-rp1, rp1, rp2, vp1, vp2, start, end)
line = '%s\t%s\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n' % line_data
write(line)
if plot_graphs:
# Plot first probabilities at a resonable scale
probs = probs_a * 0.75 * mv
# X valuys for plot in Megabases
x_vals = np.array(range(len(chrom_data))) * bin_size / 1e6
nx = x_vals[-1]
# Plot the lines
if n_cols > 1:
ax = axarr[row, col]
else:
ax = axarr[row]
ax.plot(x_vals, chrom_data[:,0], color='#FF4000', alpha=0.4, linewidth=1.5, label='Genome A SNPs')
ax.plot(x_vals, chrom_data[:,1], color='#0080FF', alpha=0.4, linewidth=1.5, label='Genome B SNPs')
ax.plot(x_vals, probs[:], color='#808080', alpha=0.75, linewidth=1.0, label='State A probability', linestyle='-')
# Titles axes and labels at the aprropriate spots
dx = bin_size / 1e6
if row == 0:
ax.set_title(text_labels[col])
ax.set_xlim((-dx, nx+dx))
ax.set_ylim((0, 1.1*mv))
ax.set_xlabel('Chr %s Position (Mb)' % chromo, fontsize=11)
ax.axhline(0, -dx, nx+dx, color='#808080', alpha=0.5)
if col == 0:
ax.set_ylabel('$\Sigma[log_{10}(1+qual)]$')
if row == 0:
ax.legend(fontsize=11, frameon=False, ncol=3)
file_obj.close()
info('Output data written to %s' % file_path)
if plot_graphs:
plt.show()
if __name__ == '__main__':
from argparse import ArgumentParser
epilog = 'For further help email tstevens@mrc-lmb.cam.ac.uk or garethb@mrc-lmb.cam.ac.uk'
arg_parse = ArgumentParser(prog=PROG_NAME, description=DESCRIPTION,
epilog=epilog, prefix_chars='-', add_help=True)
arg_parse.add_argument(metavar='VCF_FILE', nargs='+', dest='i',
help='Input VCF format files containing variant calls for each parental genotype/strain. Files should be listed sequentially in pair order. Inputs may be gzipped (this is assumed by a .gz file extension).')
arg_parse.add_argument('-b', default=DEFAULT_BIN_SIZE, type=int, metavar='BIN_SIZE',
help='Binned analysis region size (in bp) for defining HMM chromosome segments (). Default: %d bp.' % DEFAULT_BIN_SIZE)
arg_parse.add_argument('-g', default=False, action='store_true',
help='Specifies that graphical output will be displayed for each VCF pair using matplotlib')
arg_parse.add_argument('-m', default=DEFAULT_MIN_CHROMO_SIZE, type=float, metavar='MIN_CHROMO_SIZE',
help='Minimum length (in Mb) required for a contig/chromosome (as described in VCF header) to be analysed. Default: %.2f Mb.' % DEFAULT_MIN_CHROMO_SIZE)
arg_parse.add_argument('-n', default=DEFAULT_NUM_ITER, type=int, metavar='NUM_HMM_ITER',
help='Number of iterations to perform when estimating Gaussian HMM probabilities using the Baum-Welch method. Default: %d' % DEFAULT_NUM_ITER)
arg_parse.add_argument('-o', metavar='OUT_DIR',
help='Optional output directory for writing results. Defaults to the current working directory.')
arg_parse.add_argument('-q', default=DEFAULT_QUALITY, type=float, metavar='',
help='Minimum quality (phred-scale) for accepting a SNP call, as described in the VCF data lines. Default: %.2f' % DEFAULT_QUALITY)
arg_parse.add_argument('-s', default=0, type=int, metavar='RANDOM_SEED',
help='Optional random seed value, i.e. to make repeat calculations deterministic.')
arg_parse.add_argument('-t', nargs='+', metavar='TEXT_LABEL',
help='Optional text labels to describe each input pair, which are used to name output files as {NAME}_%s.tsv, in the same order as the input VCF file pairs.' % FILE_TAG)
args = vars(arg_parse.parse_args(sys.argv[1:]))
vcf_paths = args['i']
plot_graphs = args['g']
ran_seed = args['s']
out_dir = args['o'] or './'
text_labels = args['t']
bin_size = args['b']
min_qual = args['q']
min_chromo_size = int(args['m'] * 1e6)
num_hmm_iter = args['n']
try:
from matplotlib import pyplot as plt
except ImportError:
plot_graphs = False
if ran_seed:
np.random.seed(ran_seed)
n_paths = len(vcf_paths)
if n_paths < 2:
fatal('At least two VCF file paths must be specified')
if n_paths % 2 == 1:
fatal('An even number of VCF paths (i.e. pairs of files) must be input. %d paths were specified' % n_paths)
n_pairs = n_paths/2
if text_labels:
text_labels = list(text_labels)
else:
text_labels = []
while len(text_labels) < n_pairs:
label = 'pair_%d' % (1+len(text_labels))
text_labels.append(label)
if len(text_labels) > n_pairs:
warn('Number of input text labels (%d) greater than the number of input pairs (%s)' % (len(text_labels), n_pairs))
text_labels = text_labels[:n_pairs]
abs_path = os.path.abspath(out_dir)
if not os.path.exists(abs_path):
fatal('Output directory "%d" does not exist')
if not os.path.isdir(abs_path):
fatal('Output path "%d" is not a directory')
for vcf_path in vcf_paths:
problem = check_file_problems(vcf_path)
if problem:
fatal(problem)
vcf_paths_pairs = [(vcf_paths[i], vcf_paths[i+1]) for i in range(0, n_paths, 2)]
train_hybrid_crossover_hmm(vcf_paths_pairs, text_labels, out_dir, bin_size,
min_qual, min_chromo_size, num_hmm_iter, plot_graphs)
# Example ./hycco xgb3_vs12_clean.vcf.gz xgb3_vs13_clean.vcf.gz
| tjs23/hycco | hycco.py | hycco.py | py | 16,851 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
... |
932080591 | import argparse
import sys
from FitsToPNG import main_run
from FitsMath import calibration_compute_process
from JsonConvert import JsonConvert
def argument_handling():
"""
Method to deal with arguments parsing
:return: file path to fits file and path to a new png file
"""
parser = argparse.ArgumentParser()
parser.add_argument('-j', '--json',
required=True,
type=str,
help='Insert json file path')
args = parser.parse_args()
return args.json
def fits_to_png_proc(path_arr: list):
fits_path, png_path = path_arr
main_run(fits_path, png_path)
def validate_files(bias_file, dark_file, flats_file, light_file) -> dict:
"""
:param bias_file:
:param dark_file:
:param flats_file:
:param light_file:
:return: dict with the existing files array as values and keys as names
"""
dict_files = {}
if len(bias_file) > 0:
dict_files['bias'] = bias_file
if len(dark_file) > 0:
dict_files['dark'] = dark_file
if len(flats_file) > 0:
dict_files['flat'] = flats_file
if len(light_file) > 0:
dict_files['light'] = light_file
return dict_files
if __name__ == '__main__':
# sys.argv = ['main.py', '-j', './stam.json']
json_file_path = argument_handling()
data = JsonConvert(json_file_path)
fits_to_png, bias, dark, flats, light, output_master_bias, output_master_dark, output_master_flat, output_calibration_file, output_calibration_folder, solve_stars_plate = data.load_data()
if fits_to_png:
fits_to_png_proc(fits_to_png)
else:
files = validate_files(bias, dark, flats, light)
calibration_compute_process(files, output_master_bias, output_master_dark, output_master_flat,
output_calibration_file, output_calibration_folder, solve_stars_plate)
| AstroPhotometry/AstroPhotometry | python/main.py | main.py | py | 1,913 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "FitsToPNG.main_run",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "JsonConvert.JsonConvert",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "Fi... |
15991432175 | """Point-wise Spatial Attention Network"""
import torch
import torch.nn as nn
up_kwargs = {'mode': 'bilinear', 'align_corners': True}
norm_layer = nn.BatchNorm2d
class _ConvBNReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1, relu6=False, norm_layer=norm_layer):
super(_ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
self.bn = norm_layer(out_channels)
self.relu = nn.ReLU6(True) if relu6 else nn.ReLU(True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class PSAHead(nn.Module):
def __init__(self, in_channels=768, num_classes=6, norm_layer=norm_layer, in_index=3):
super(PSAHead, self).__init__()
self.in_index = in_index
# psa_out_channels = crop_size // stride_rate ** 2
psa_out_channels = (512 // 32) ** 2
self.psa = _PointwiseSpatialAttention(in_channels, psa_out_channels, norm_layer)
self.conv_post = _ConvBNReLU(psa_out_channels, in_channels, 1, norm_layer=norm_layer)
self.project = nn.Sequential(
_ConvBNReLU(in_channels * 2, in_channels // 2, 3, padding=1, norm_layer=norm_layer),
nn.Dropout2d(0.1, False),
nn.Conv2d(in_channels // 2, num_classes, 1))
def _transform_inputs(self, inputs):
if isinstance(self.in_index, (list, tuple)):
inputs = [inputs[i] for i in self.in_index]
elif isinstance(self.in_index, int):
inputs = inputs[self.in_index]
return inputs
def forward(self, inputs):
x = self._transform_inputs(inputs)
global_feature = self.psa(x)
out = self.conv_post(global_feature)
out = torch.cat([x, out], dim=1)
out = self.project(out)
return out
class _PointwiseSpatialAttention(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d):
super(_PointwiseSpatialAttention, self).__init__()
reduced_channels = out_channels // 2
self.collect_attention = _AttentionGeneration(in_channels, reduced_channels, out_channels, norm_layer)
self.distribute_attention = _AttentionGeneration(in_channels, reduced_channels, out_channels, norm_layer)
def forward(self, x):
collect_fm = self.collect_attention(x)
distribute_fm = self.distribute_attention(x)
psa_fm = torch.cat([collect_fm, distribute_fm], dim=1)
return psa_fm
class _AttentionGeneration(nn.Module):
def __init__(self, in_channels, reduced_channels, out_channels, norm_layer):
super(_AttentionGeneration, self).__init__()
self.conv_reduce = _ConvBNReLU(in_channels, reduced_channels, 1, norm_layer=norm_layer)
self.attention = nn.Sequential(
_ConvBNReLU(reduced_channels, reduced_channels, 1, norm_layer=norm_layer),
nn.Conv2d(reduced_channels, out_channels, 1, bias=False))
self.reduced_channels = reduced_channels
def forward(self, x):
reduce_x = self.conv_reduce(x)
attention = self.attention(reduce_x)
n, c, h, w = attention.size()
attention = attention.view(n, c, -1)
reduce_x = reduce_x.view(n, self.reduced_channels, -1)
fm = torch.bmm(reduce_x, torch.softmax(attention, dim=1))
fm = fm.view(n, self.reduced_channels, h, w)
return fm
| zyxu1996/Efficient-Transformer | models/head/psa.py | psa.py | py | 3,539 | python | en | code | 67 | github-code | 36 | [
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
... |
31167909236 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as pl
n = []
e = []
ep = []
with open('cohesive.txt') as file:
next(file)
for line in file:
value = line.strip().split(' ')
n.append(int(value[0]))
e.append(float(value[1]))
ep.append(float(value[2]))
n = [int(i) for i in n]
a = 5.256*10**-10
x = [a*i**3 for i in n]
pl.plot(n, e, '.')
pl.plot(n, ep, '.')
pl.xticks(n)
pl.xlabel('Number of Unit Cells [^(1/3)]')
pl.ylabel('Eth [eV]')
pl.grid(b=True, which='both')
pl.tight_layout()
pl.legend(['Non-periodic', 'Periodic'])
pl.show()
pl.clf()
print(x[-1])
print(e[-1])
print(ep[-1])
| leschultz/MSE760 | hw1/cohesiveplot.py | cohesiveplot.py | py | 640 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matpl... |
38230641429 | from __future__ import division, print_function
import numpy as np
import scipy.linalg
from MatrixIO import load, store
import click
def lqr(A,B,Q,R):
"""Solve the continuous time lqr controller.
dx/dt = A x + B u
cost = integral x.T*Q*x + u.T*R*u
"""
#ref Bertsekas, p.151
#first, try to solve the ricatti equation
P = np.matrix(scipy.linalg.solve_continuous_are(A, B, Q, R))
#compute the LQR gain
K = np.matrix(scipy.linalg.inv(R)*(B.T*P))
return K, P
def dlqr(A,B,Q,R):
"""Solve the discrete time lqr controller.
x[k+1] = A x[k] + B u[k]
cost = sum x[k].T*Q*x[k] + u[k].T*R*u[k]
"""
#ref Bertsekas, p.151
#first, try to solve the ricatti equation
P = np.matrix(scipy.linalg.solve_discrete_are(A, B, Q, R))
#compute the LQR gain
K = np.matrix(scipy.linalg.inv(B.T*X*B+R)*(B.T*X*A))
return K, P
@click.command()
@click.option('-A', type=click.Path(exists=True))
@click.option('-B', type=click.Path(exists=True))
@click.option('-Q', type=click.Path(exists=True))
@click.option('-R', type=click.Path(exists=True))
@click.option('-Kout', type=click.Path())
@click.option('-Pout', type=click.Path())
def run_lqr(a, b, q, r, kout, pout):
A = load(a)
B = load(b)
Q = load(q)
R = load(r)
K, P = lqr(A,B,Q,R)
store(kout, K)
store(pout, P)
if __name__ == '__main__':
run_lqr()
| Zomega/thesis | Wurm/Stabilize/LQR/python/LQR.py | LQR.py | py | 1,450 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.matrix",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.linalg.solve_continuous_are",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.linalg",
"line_number": 20,
"usage_type": "attribute"
},
{
"ap... |
8639664963 | # -*- coding: utf-8 -*-
#
# Virtual Satellite 4 - FreeCAD module
#
# Copyright (C) 2019 by
#
# DLR (German Aerospace Center),
# Software for Space Systems and interactive Visualization
# Braunschweig, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
from json_io.products.json_product import AJsonProduct
from json_io.json_definitions import JSON_ELEMNT_CHILDREN, PRODUCT_IDENTIFIER, PART_IDENTIFIER, \
_get_combined_name_uuid, JSON_ELEMENT_NAME, JSON_ELEMENT_UUID
from json_io.products.json_product_child import JsonProductChild
from json_io.json_spread_sheet import FREECAD_PART_SHEET_NAME
from freecad.active_document import ActiveDocument
from itertools import compress
import FreeCAD
import os
from A2plus.a2p_importpart import updateImportedParts
Log = FreeCAD.Console.PrintLog
class JsonProductAssembly(AJsonProduct):
'''
This class represents an assembly, which consists of several children
which basically reference the parts to be imported to this assembly.
The parts/children contain information about their position and orientation.
This information is processed to correctly place the parts in the assembly.
The assembly itself can also have a referenced part. But his one does
not contain information about its position and rotation. In the current assembly,
this part is supposed to be imported in the current origin of the assembly.
In case this assembly is a sub assembly it may have a position and rotation.
Nevertheless in this particular case, the whole assembly is supposed to be positioned
and rotated in the super assembly. Actually this assembly is than a child product of
the super assembly.
'''
def _parse_position_and_rotation_from_json(self, json_object):
'''
An assembly does not have a position or orientation. If it has these properties
than it is a sub assembly which has to be processed as a child of the containing
super assembly.
'''
pass
def parse_from_json(self, json_object):
'''
This time the parse method follows the convention
to not parse the position and orientation. Actually it gets called
in the super method which refers to the protected method for
importing position and orientation. This method is overridden in this
class without implementation. Additionally this method starts parsing
the children.
'''
super().parse_from_json(json_object)
# Get all children from the json and try to parse them
# into JsonProductChild objects
json_object_children = list(json_object[JSON_ELEMNT_CHILDREN])
self.children = []
for json_object_child in json_object_children:
json_product_child = JsonProductChild().parse_from_json(json_object_child)
self.children.append(json_product_child)
# Don't hand back an assembly if there are no children
if len(self.children) > 0:
return self
else:
return None
def parse_to_json(self, isRoot=False):
if(isRoot):
json_dict = {
JSON_ELEMENT_NAME: self.name,
JSON_ELEMENT_UUID: self.uuid
}
else:
json_dict = super().parse_to_json()
children_dicts = []
for child in self.children:
if(isRoot):
children_dicts.append(child.parse_to_json())
else:
# ignore part of product assembly
if(not child.get_unique_name() == self.get_unique_name()):
children_dicts.append(child.parse_to_json())
json_dict[JSON_ELEMNT_CHILDREN] = children_dicts
return json_dict
def write_to_freecad(self, active_document):
# This assembly may refer to a part as well
# hence if there is a partUuid and if there is a part name, than
# it should be written to the FreeCAD document as well.
old_products = self.get_products_of_active_document(active_document)
old_product_names = [o[0].Label for o in old_products]
# store if a product has to be deleted
# (because it doesn't exist in the new imported JSON file)
delete_products = [True] * len(old_product_names)
update_count = 0
if self.is_part_reference():
name = _get_combined_name_uuid(self.part_name, self.part_uuid)
if(name in old_product_names):
# update
update_count += 1
super().write_to_freecad(active_document, create=False)
delete_products[old_product_names.index(name)] = False
else:
# create
super().write_to_freecad(active_document)
# And now write the children, they decide on their own if they reference
# part or a product
for child in self.children:
name = child.get_unique_name()
if(name in old_product_names):
# update
update_count += 1
child.write_to_freecad(active_document, create=False)
delete_products[old_product_names.index(name)] = False
else:
# create
child.write_to_freecad(active_document)
# delete remaining old products
old_products = list(compress(old_products, delete_products))
for old_product in old_products:
active_document.app_active_document.removeObject(old_product[0].Name)
active_document.app_active_document.removeObject(old_product[1].Name)
# only if there were updates instead of creates
if(update_count > 0):
# update already read in parts
updateImportedParts(active_document.app_active_document)
def read_from_freecad(self, active_document, working_output_directory, part_list, freecad_object=None, freecad_sheet=None):
"""
Reads an ProductAssembly from FreeCAD
Then calls read_from_freecad of his children (either another assembly or a ProductChild)
"""
products_with_sheets = self.get_products_of_active_document(active_document)
# read the assembly
super().read_from_freecad(active_document, working_output_directory, part_list, freecad_object, freecad_sheet)
self.children = []
# read the children
for product, sheet in products_with_sheets:
name, label = product.Name, product.Label
# use the source file of a2plus part
# then get the file name (.split(os.path.sep)[-1]) and ignore the FreeCAD file ending ([:-6])
child_file_name = product.sourceFile.split(os.path.sep)[-1][:-6]
# open the document for this child
child_document = ActiveDocument(working_output_directory).open_set_and_get_document(child_file_name)
if(PRODUCT_IDENTIFIER in name):
Log(f"Read ProductAssembly '{label}'\n")
child = JsonProductAssembly()
else:
Log(f"Read Product '{label}'\n")
child = AJsonProduct()
child.read_from_freecad(child_document, working_output_directory, part_list, freecad_object=product, freecad_sheet=sheet)
child_document.close_active_document(child_file_name)
self.children.append(child)
def get_products_of_active_document(self, active_document):
"""
Accesses, sorts and filters objects of the current document.
NOTE: A document always contains productAssemblies or productChild as long as it is an assembly itself
Only a document that references one part, thus contains the PART_IDENTIFIER in it's name, references a part
Returns a list of found products (that have a sheet) and the corresponding sheets
"""
products, sheets = [], []
for obj in active_document.app_active_document.Objects:
name, label = obj.Name, obj.Label
Log("Object: {}, {}\n".format(name, label))
if(FREECAD_PART_SHEET_NAME in name):
sheets.append(obj)
Log("Object is sheet\n")
elif(PRODUCT_IDENTIFIER in name or PART_IDENTIFIER in name):
products.append(obj)
Log("Object is product\n")
products_with_sheets = []
for product in products:
for sheet in sheets:
if(product.Label in sheet.Label):
products_with_sheets.append((product, sheet))
Log(f"Found products with sheets: '{[(p.Label, s.Label) for p, s in products_with_sheets]}'\n")
return products_with_sheets
def get_product_unique_name(self):
return PRODUCT_IDENTIFIER + _get_combined_name_uuid(self.name, self.uuid)
| virtualsatellite/VirtualSatellite4-FreeCAD-mod | VirtualSatelliteCAD/json_io/products/json_product_assembly.py | json_product_assembly.py | py | 9,732 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "FreeCAD.Console",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "json_io.products.json_product.AJsonProduct",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "json_io.json_definitions.JSON_ELEMNT_CHILDREN",
"line_number": 77,
"usage... |
25450914107 | from rest_framework import serializers
from taggit.serializers import TagListSerializerField, TaggitSerializer
from accounts.models import Profile
from ...models import Post, Category
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ("name", "id")
read_only_fields = ("id",)
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ("first_name", "last_name", "image")
read_only_fields = ("first_name", "last_name", "image")
class PostSerializer(TaggitSerializer, serializers.ModelSerializer):
tags = TagListSerializerField()
snippet = serializers.ReadOnlyField(source="get_snippet")
relative_url = serializers.URLField(source="get_absolute_api_url", read_only=True)
absolute_url = serializers.SerializerMethodField()
# category = serializers.SlugRelatedField(slug_field='name', many=True, queryset=Category.objects.all())
# category = CategorySerializer(many=True)
class Meta:
model = Post
fields = (
"id",
"author",
"image",
"title",
"content",
"snippet",
"relative_url",
"absolute_url",
"category",
"tags",
"counted_view",
"published_date",
)
read_only_fields = ("id", "author", "counted_view")
def get_absolute_url(self, post):
request = self.context.get("request")
return request.build_absolute_uri(post.pk)
def to_representation(self, instance):
request = self.context.get("request")
rep = super().to_representation(instance)
rep["category"] = CategorySerializer(instance.category, many=True).data
rep["author"] = AuthorSerializer(instance.author).data
if request.parser_context.get("kwargs").get("pk"):
rep.pop("snippet", None)
rep.pop("relative_url", None)
rep.pop("absolute_url", None)
else:
rep.pop("content", None)
return rep
def create(self, validated_data):
request = self.context.get("request")
validated_data["author"] = Profile.objects.get(user=request.user.id)
return super().create(validated_data)
| AmirhosseinRafiee/Blog | mysite/blog/api/v1/serializers.py | serializers.py | py | 2,319 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "models.Category",
"line_number": 9,
"usage_type": "name"
},
... |
361601527 | import pandas as pd
import glob
import functools
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn import decomposition
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
#from plotnine import *
#from matplotlib.mlab import PCA
# LOAD THE DATA
#data = pd.read_csv('all.aa', sep='\t') #, header=None)
data = pd.concat(map(functools.partial(pd.read_csv, sep='\t', compression='gzip'), glob.glob("../data/aa/*")))
# STRIP OUT ONLY THE COUNTS
dat = data[list("ARNDCEQGHILKMFPSTWYV")]
# DIVIDE BY THE ROW SUMS TO GET THE FREQUENCY
dat_norm = dat.div(dat.sum(axis=1), axis=0)
# SCALE THE VALUES
dat_scaled = StandardScaler().fit_transform(dat_norm)
# CLASSIFY EACH ROW USING KMEANS
#clust = KMeans(n_clusters=2).fit(dat_scaled).labels_
# CALCULATE THE PRINCIPLE COMPONENTS
pca = decomposition.PCA(n_components = 2, svd_solver='full').fit(dat_scaled)
dat_pca = pca.transform(dat_scaled)
x_vector = pca.components_[0]
y_vector = pca.components_[1]
# PLOT
colors = {'noncoding':'#F2766E', 'coding':'#3CC9CF', True:'#3CC9CF', False:'#F2766E'}
df = pd.DataFrame({'X':dat_pca[:,0],'Y':dat_pca[:,1],'TYPE':data.TYPE})
fig, ax = plt.subplots()
ax.scatter(df['X'], df['Y'], c=df['TYPE'].apply(lambda x: colors[x]), marker='.', linewidths=0.0, alpha=0.1, zorder=5)
for i in range(len(x_vector)):
x = (1.2*x_vector[i]*max(dat_pca[:,0]))
y = (1.2*y_vector[i]*max(dat_pca[:,0]))
plt.arrow(0, 0, x, y, color='black', width=0.00005, zorder=10)
plt.text(x*1.1, y*1.1, dat.columns[i], color='black', zorder=10)
print("done")
blue_patch = mpatches.Patch(color='#3CC9CF', label='coding')
pink_patch = mpatches.Patch(color='#F2766E', label='non-coding')
# LEGEND
plt.legend(handles=[blue_patch,pink_patch])
ax.set_title('amino-acid frequency of potential ORFs from Lambda phage')
ax.set(xlabel='PC1', ylabel='PC2')
#plt.show()
fig.set_size_inches(20, 10)
fig.savefig('test.png', dpi=100)
| deprekate/goodorfs_experimental | scripts/pca_all.py | pca_all.py | py | 1,954 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.concat",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
... |
20681022248 | __author__ = 'elmira'
import numpy as np
from lxml import etree
from collections import Counter
from matplotlib import pyplot as plt
from matplotlib import mlab
def open_corpus(fname):
parser = etree.HTMLParser() # создаем парсер хтмл-страниц
# скармливаем парсеру майстемовский xml, берем тэг body и все что внутри него
tree = etree.parse(fname, parser).getroot()[0]
sents = [Counter([w[0].attrib['gr'].split('=')[0].split(',')[0] for w in se if len(w) != 0]) for se in tree]
# этот дико жуткий list comprehension делает из каждого предложения массив
# в этом массиве находятся подряд части речи входящих в предложение слов
# # если у слова есть омонимия, то берется самый первый разбор
# # если майстем слово не разобрал, то слово игнорируется
# а потом каждый массив превращается в словарь, в котором написано, сколько раз какая часть речи встретилась
return sents
def make_features(data):
return np.array([(d['A'],
d['S'],
d['V'],
d['ADV'],
d['SPRO'] + d['APRO'] + d['ADVPRO']) # за местоимения считаются и мест-сущ, и мест-прил, и мест-наречие
for d in data])
def main():
sonets = open_corpus('corpus1.txt')
anna = open_corpus('corpus2.txt')
sonets_data = make_features(sonets)
anna_data = make_features(anna)
data = np.vstack((sonets_data, anna_data))
p = mlab.PCA(data, True)
N = len(sonets_data)
print(p.Wt)
plt.plot(p.Y[N:,0], p.Y[N:,1], 'og', p.Y[:N,0], p.Y[:N,1], 'sb')
# зелененькое - анна каренина, а синенькое - сонеты
# Правда ли, что существует линейная комбинация признаков (т.е. значение по первой оси в преобразованных методом главных компонент данных), и пороговое значение, при которых больше 70% текстов каждого жанра находятся с одной стороны от порогового значения? Напишите программу genre-by-pos.py, которая демонстрирует ответ на этот вопрос.
# Мне кажется, что ответ да, судя по картинке
print('Линейная комбинация и пороговое значение, при которых больше 70% текстов каждого жанра находятся с одной стороны от порогового значения, существуют.')
# plt.savefig('result.png')
plt.show()
# Подберем, например, на глаз по картинке пороговое значение,
# при котором больше 70% предложений анны карениной справа от него, и больше 70% предложений сонетов -- слева
# Например:
print('Пороговое значение: -4.2')
print(sum(p.Y[N:,0]>-4.2)/len(p.Y[N:,0])*100, '- процент предложений "Анны Карениной", которые лежат справа от порога')
print(sum(p.Y[:N,0]<-4.2)/len(p.Y[:N,0])*100, '- процент предложений сонетов, которые лежат слева от порога')
if __name__ == "__main__":
main() | elmiram/homework | seminar9/task2 (4 points)/genre-by-pos.py | genre-by-pos.py | py | 3,848 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "lxml.etree.HTMLParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "lxml.etree.parse",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"l... |
10877583563 | import sys
import os
from tqdm.rich import tqdm
import pandas as pd
import datetime
import tables
from pathlib import Path
from typing import TypedDict
class TrialSummary(TypedDict):
subject: str
task: str
step: str
n_trials: int
data_path = Path.home() / "Dropbox" / "lab" / "autopilot" / "data"
subjects = list(data_path.glob('*.h5'))
# counting manually because the table structure of
# the subject file changed in v0.5.0 and automatically
# recreates a new file which can take awhile and this
# is almost as easy
summaries = []
for subject in tqdm(subjects):
subject_id = subject.stem
h5f = tables.open_file(str(subject), 'r')
for table in h5f.walk_nodes('/data', classname="Table"):
summary = TrialSummary(
subject = subject_id,
step = table._v_parent._v_name,
task = table._v_parent._v_parent._v_name,
n_trials = table.nrows
)
summaries.append(summary)
h5f.close()
df = pd.DataFrame(summaries)
df.to_csv('./trial_counts.csv', index=False)
print(f"Total subjects: {len(subjects)}\nTotal trials: {df['n_trials'].sum()}") | auto-pi-lot/autopilot-paper | code/log_counting/count_trials.py | count_trials.py | py | 1,137 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.TypedDict",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.home",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "tqdm.rich.tqdm",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.