seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17813388652 | from typing import Callable, Any, Type
from lyrid import Address
from lyrid.base import ActorSystemBase
from lyrid.core.node import NodeSpawnProcessMessage
from lyrid.core.process import Process
from lyrid.core.system import Placement
from tests.factory.system import create_actor_system
from tests.mock.messenger import MessengerMock
from tests.mock.placement_policy import PlacementPolicyMatcherMock, PlacementPolicyMock
from tests.mock.randomizer import RandomizerMock
def assert_pass_process_type_to_policy_matcher(spawn_process: Callable[[ActorSystemBase], Any], type_: Type[Process]):
matcher = PlacementPolicyMatcherMock()
system = create_actor_system(placements=[Placement(match=matcher, policy=PlacementPolicyMock())],
node_addresses=[Address("#node0"), Address("#node1"), Address("#node2")])
spawn_process(system)
assert matcher.match__type == type_
def assert_send_node_spawn_process_message_to_the_address_from_policy(spawn_process: Callable[[ActorSystemBase], Any]):
messenger = MessengerMock()
policy = PlacementPolicyMock(get_placement_node__return=Address("#node1"))
system = create_actor_system(messenger=messenger,
placements=[Placement(PlacementPolicyMatcherMock(match__return=True), policy)],
node_addresses=[Address("#node0"), Address("#node1"), Address("#node2")])
spawn_process(system)
assert messenger.send__receiver == Address("#node1") and \
isinstance(messenger.send__message, NodeSpawnProcessMessage)
def assert_use_node_address_from_first_matched_policy(spawn_process: Callable[[ActorSystemBase], Any]):
messenger = MessengerMock()
placements = [
Placement(
match=PlacementPolicyMatcherMock(match__return=False),
policy=PlacementPolicyMock(get_placement_node__return=Address("#node0")),
),
Placement(
match=PlacementPolicyMatcherMock(match__return=True),
policy=PlacementPolicyMock(get_placement_node__return=Address("#node1")),
),
Placement(
match=PlacementPolicyMatcherMock(match__return=True),
policy=PlacementPolicyMock(get_placement_node__return=Address("#node2")),
),
]
# noinspection DuplicatedCode
system = create_actor_system(messenger=messenger, placements=placements,
node_addresses=[Address("#node0"), Address("#node1"), Address("#node2")])
spawn_process(system)
assert messenger.send__receiver == Address("#node1") and \
isinstance(messenger.send__message, NodeSpawnProcessMessage)
def assert_use_random_node_when_no_matched_policy(spawn_process: Callable[[ActorSystemBase], Any]):
messenger = MessengerMock()
placements = [
Placement(
match=PlacementPolicyMatcherMock(match__return=False),
policy=PlacementPolicyMock(get_placement_node__return=Address("#node0")),
),
Placement(
match=PlacementPolicyMatcherMock(match__return=False),
policy=PlacementPolicyMock(get_placement_node__return=Address("#node1")),
),
]
randomizer = RandomizerMock(randrange__return=2)
system = create_actor_system(messenger=messenger, placements=placements, randomizer=randomizer,
node_addresses=[Address("#node0"), Address("#node1"), Address("#node2")])
spawn_process(system)
assert messenger.send__receiver == Address("#node2") and \
isinstance(messenger.send__message, NodeSpawnProcessMessage)
| SSripilaipong/lyrid | tests/system/actor_placement/_assertion.py | _assertion.py | py | 3,625 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "typing.Callable",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "lyrid.base.ActorSystemBase",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Type",
... |
15484480802 | #!/usr/bin/env python
from __future__ import print_function
import sys
import os
if sys.version_info >= (3, 0):
import tkinter
else:
import Tkinter as tkinter
import interaction
import canvas
import FigureManager
# The size of the button (width, height) for buttons in root gui.
SIZE_BUTTON = (18, 4)
def find_show_image():
"""Search, open and show an pmg image.
"""
filename = interaction.find_pmg()
if filename:
FigureManager.g_figure_manager.add_pmg(filename)
canvas.show_figure_from_manager(
FigureManager.g_figure_manager,
title=os.path.basename(filename))
def main():
"""The main entry of the program.
"""
root = tkinter.Tk()
root.title('Pytena')
tkinter.Button(
root,
text='Script',
height=SIZE_BUTTON[1],
width=SIZE_BUTTON[0],
command=interaction.load_python_script).pack(side=tkinter.TOP)
tkinter.Button(
root,
text='Image',
height=SIZE_BUTTON[1],
width=SIZE_BUTTON[0],
command=find_show_image).pack(side=tkinter.TOP)
tkinter.Button(
root,
text='Command',
height=SIZE_BUTTON[1],
width=SIZE_BUTTON[0],
command=interaction.start_text_box).pack(side=tkinter.TOP)
tkinter.Button(
root,
text='Help',
height=SIZE_BUTTON[1],
width=SIZE_BUTTON[0],
command=interaction.show_help_box).pack(side=tkinter.TOP)
root.mainloop()
if __name__ == '__main__':
main()
| t-lou/pytena | main.py | main.py | py | 1,537 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.version_info",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "interaction.find_pmg",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "FigureManager.g_figure_manager.add_pmg",
"line_number": 26,
"usage_type": "call"
},
{
"... |
21138659052 | from neo4j import GraphDatabase
# neo4j connection
driver = GraphDatabase.driver("bolt://127.0.0.1:7687", auth=("neo4j", "neo4j"))
# random walk
k = 10 # Number of neighbors
pre_weight = 2 # Weight of return
n = -1 # number of users to use, -1 means using all the users.
batch_size = 1000 # batchsize to save
cores = 12 # Multi threads
# Neo4j SQL to sample the motif.
motif_sql = '''
match (a:User {{user_id: {id} }})<-[:msg|click]-(m)-[:msg|click]->(f) return "RESPOND" as r1, m.user_id as middle, f.user_id as final, 2 as weight limit {n1}
union
match (a:User {{user_id: {id} }})-[:msg|click]->(m)<-[:msg|click]-(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 2 as weight limit {n2}
'''
"""
match (a:User {{user_id: {id} }})<-[:msg|click]-(m)-[:msg|click]->(f) return "RESPOND" as r1, m.user_id as middle, f.user_id as final, 2 as weight limit {n1}
union
match (a:User {{user_id: {id} }})<-->(m)-[:msg|click]->(f) return "SEND" as r1, m.user_id as middle, f.user_id as final, 3 as weight limit {n1}
union
match (a:User {{user_id: {id} }})<-[:msg|click]-(m)<-->(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 3 as weight limit {n2}
union
match (a:User {{user_id: {id} }})-[:msg|click]->(m)<-[:msg|click]-(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 2 as weight limit {n2}
union
match (a:User {{user_id: {id} }})<-->(m)<-[:msg|click]-(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 2 as weight limit {n2}
union
match (a:User {{user_id: {id} }})-[:msg|click]->(m)<-->(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 3 as weight limit {n2}
union
match (a:User {{user_id: {id} }})<-->(m)<-->(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 4 as weight limit {n2}
"""
raw_walk_path = "../data/sjjy_data/motif_random_walk_path_M1+M4_b_{}.txt".format(pre_weight) # Path of the raw random walk sequences
raw_emb_path = "../model/sjjy_motif_walk_M1+M4_b_{}.emb".format(pre_weight) # Path of the raw embedding path
emb_save_path = "../model/sjjy_motif_walk_M1+M4_b_{}.emb".format(pre_weight) # No need for data Sjjy
# motif random walk
raw_train_data_path = "../data/sjjy_data/train_data_v4.csv" # train user pairs file path 原始的用户对id
raw_test_data_path = ""'../data/sjjy_data/test_data_v4.csv' # test file path
train_data_path = "../data/sjjy_data/rec_data_train_M1+M4_b_{}.csv".format(pre_weight) # train user pairs with neighbors
test_data_path = "../data/sjjy_data/rec_data_train_test_M1+M4_b_{}.csv".format(pre_weight)
# train
uid2idx_path = "../data/uid_2_idx.pkl" # user_id to id
model_save_path = "../model/recommend_M1+M4_b_{}.pb".format(pre_weight) # final model save path
check_point_path = "../checkpoint/recommend_M1+M4_b_{}.pth".format(pre_weight) # checkpoint path
feature_dict_path = "../data/sjjy_data/enc_feature_dict.pkl"
| RManLuo/MotifGNN | src_sjjy/pipline_config.py | pipline_config.py | py | 3,021 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "neo4j.GraphDatabase.driver",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "neo4j.GraphDatabase",
"line_number": 4,
"usage_type": "name"
}
] |
1004121962 | from flask import Flask, render_template, request
import pypandoc
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/convert', methods=['POST'])
def convert():
input_markup = request.form['input_markup']
output_markup = pypandoc.convert(input_markup, format='mediawiki', to='markdown_github')
return render_template('index.html',
input_markup=input_markup,
output_markup=output_markup)
if __name__ == '__main__':
app.run(debug=True)
| myw/wiki-converter | converter.py | converter.py | py | 552 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.reques... |
18015924174 | import cv2
import sys
import PyQt5.QtCore as QtCore
from PyQt5.QtCore import QTimer # Import QTimer from PyQt5
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QLabel, QFileDialog, QInputDialog
from PyQt5.QtGui import QImage, QPixmap
class TrackingApp(QWidget):
def __init__(self):
super().__init__()
self.tracker_type = ""
self.capture = None
self.tracker = None
self.timer = QTimer(self)
self.initUI()
def initUI(self):
self.setWindowTitle('Object Tracking App')
self.setGeometry(100, 100, 800, 600)
self.video_label = QLabel(self)
self.video_label.setAlignment(QtCore.Qt.AlignCenter)
self.select_button = QPushButton('Select Video', self)
self.select_button.clicked.connect(self.openVideo)
self.start_button = QPushButton('Start Tracking', self)
self.start_button.clicked.connect(self.startTracking)
self.layout = QVBoxLayout()
self.layout.addWidget(self.video_label)
self.layout.addWidget(self.select_button)
self.layout.addWidget(self.start_button)
self.setLayout(self.layout)
def openVideo(self):
options = QFileDialog.Options()
options |= QFileDialog.ReadOnly
video_path, _ = QFileDialog.getOpenFileName(self, 'Open Video File', '', 'Video Files (*.mp4 *.avi);;All Files (*)', options=options)
if video_path:
self.capture = cv2.VideoCapture(video_path)
def startTracking(self):
if self.capture is None:
return
self.tracker_type, ok = QInputDialog.getItem(self, 'Select Tracker Type', 'Choose Tracker Type:', ['1. MIL', '2. KCF', '3. CSRT'])
if ok:
if self.tracker_type == '1. MIL':
self.tracker = cv2.TrackerMIL_create()
elif self.tracker_type == '2. KCF':
self.tracker = cv2.TrackerKCF_create()
elif self.tracker_type == '3. CSRT':
self.tracker = cv2.TrackerCSRT_create()
else:
print("Invalid choice")
return
ret, frame = self.capture.read()
bbox = cv2.selectROI("Select Object to Track", frame)
self.tracker.init(frame, bbox)
self.timer.timeout.connect(self.trackObject)
self.timer.start(30) # Update every 30 milliseconds
def trackObject(self):
ret, frame = self.capture.read()
if not ret:
self.timer.stop()
return
success, bbox = self.tracker.update(frame)
if success:
(x, y, w, h) = tuple(map(int, bbox))
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Convert the OpenCV image to a QImage for displaying in the GUI
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
height, width, channel = frame_rgb.shape
bytes_per_line = 3 * width
q_img = QImage(frame_rgb.data, width, height, bytes_per_line, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(q_img)
self.video_label.setPixmap(pixmap)
if __name__ == '__main__':
app = QApplication(sys.argv)
trackingApp = TrackingApp()
trackingApp.show()
sys.exit(app.exec_())
| kio7/smart_tech | Submission 2/Task_6/trackingGUI.py | trackingGUI.py | py | 3,282 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QTimer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PyQ... |
21998531046 | from collections import Counter
class Solution:
def minWindow(self, s: str, t: str) -> str:
s_len = len(s)
t_len = len(t)
begin = 0
win_freq = {}
t_freq = dict(Counter(t))
min_len = s_len + 1
distance = 0
left = 0
right = 0
while right < s_len:
if s[right] in t_freq and t_freq[s[right]] == 0:
right += 1
continue
win_freq.setdefault(s[right], 0)
if s[right] in t_freq and win_freq[s[right]] < t_freq[s[right]]:
distance += 1
win_freq[s[right]] += 1
right += 1
# 满足条件时,进行左边缘移动
while distance == t_len:
# win_freq.setdefault(s[left], 0)
if right - left < min_len:
min_len = right - left
begin = left
if s[left] not in t_freq:
left += 1
continue
if s[left] in t_freq and win_freq[s[left]] == t_freq[s[left]]:
distance -= 1
win_freq[s[left]] -= 1
left += 1
if min_len == s_len + 1:
return ""
return s[begin:begin + min_len]
so = Solution()
print(so.minWindow(s="ADOBECODEBANC", t="ABC"))
| hangwudy/leetcode | 1-99/76. 最小覆盖子串.py | 76. 最小覆盖子串.py | py | 1,359 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.Counter",
"line_number": 10,
"usage_type": "call"
}
] |
8068261091 | from torchvision.models.detection import maskrcnn_resnet50_fpn
from rigl_torch.models import ModelFactory
@ModelFactory.register_model_loader(model="maskrcnn", dataset="coco")
def get_maskrcnn(*args, **kwargs):
return maskrcnn_resnet50_fpn(
weights=None, weights_backbone=None, trainable_backbone_layers=5
)
if __name__ == "__main__":
model = get_maskrcnn()
print(model)
| calgaryml/condensed-sparsity | src/rigl_torch/models/maskrcnn.py | maskrcnn.py | py | 400 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "torchvision.models.detection.maskrcnn_resnet50_fpn",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "rigl_torch.models.ModelFactory.register_model_loader",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "rigl_torch.models.ModelFactory",
"line_... |
21437122618 | import kivy
from kivy.app import App
from kivy.uix.label import Label
# 2
from kivymd.app import MDApp
from kivymd.uix.label import MDLabel
from kivymd.uix.screen import Screen
kivy.require('2.1.0')
class MyFirstApp(App):
def build(self):
# lbl = Label(text='Hello World')
# lbl = Label(text='Hello World and Good Morning', font_size='20sp', color=[0.41, 0.42, 0.74, 1])
lbl = Label(text="[color=ff3333][b]'Hello World'[/b][/color]\n[color=3333ff]Good Morning[/color]",
font_size='20sp', markup=True)
"""
[b][/b] → 太字を有効にする
[i][/i] → イタリック体のテキストをアクティブにする
[u][/u] → 下線テキスト
[s][/s] →取り消し線付きテキスト
[font=][/font] → フォントを変更する
[サイズ=][/size]]です。→ フォントサイズを変更する
[色=#][/color] → 文字色の変更
[ref=][/ref] -> インタラクティブゾーンを追加します。参照+参照内部のバウンディングボックスがLabel.refsで利用可能になります。
[anchor=] -> テキストにアンカーを入れる。テキスト内のアンカーの位置はLabel.anchorsで取得できます。
[sub][/sub] -> 前のテキストからの相対的な添え字の位置でテキストを表示します。
[sup][/sup] -> 前のテキストと相対的な上付き文字の位置でテキストを表示します。
"""
return lbl
class Demo(MDApp):
def build(self):
screen = Screen()
l = MDLabel(text="Welcome", pos_hint={'center_x': 0.8, 'center_y': 0.8},
theme_text_color='Custom',
text_color=(0.5, 0, 0.5, 1),
font_style='Caption'
)
l1 = MDLabel(text="Welcome", pos_hint={'center_x': 0.8, 'center_y': 0.5},
theme_text_color='Custom',
text_color=(0.5, 0, 0.5, 1),
font_style='H2'
)
l2 = MDLabel(text="Welcome", pos_hint={'center_x': 0.8, 'center_y': 0.2},
theme_text_color='Custom',
text_color=(0.5, 0, 0.5, 1),
font_style='H1'
)
screen.add_widget(l)
screen.add_widget(l1)
screen.add_widget(l2)
return screen
if __name__ == '__main__':
# MyFirstApp().run()
Demo().run()
| gonzales54/python_script | kivy/kivy1(text)/main1.py | main1.py | py | 2,528 | python | ja | code | 0 | github-code | 6 | [
{
"api_name": "kivy.require",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "kivy.app.App",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "kivy.uix.label.Label",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "kivymd.app.MDApp",
... |
36636572184 |
import random
import pyxel
import utils
import stage
TYPE_AGGRESSIVE = 0
TYPE_MILD = 1
TYPE_RANDOM_SLOW = 2
TYPE_RANDOM_FAST = 3
TYPES = [
TYPE_AGGRESSIVE,
TYPE_MILD,
TYPE_RANDOM_SLOW,
TYPE_RANDOM_FAST
]
TICKS_PER_FRAME = 10
MAX_FRAME = 4
MAX_SPEED = 0.4
MAX_RESPAWN_TICKS = 300 # 5 secs
class Spinner:
def __init__(self, x, y, type):
self.x = x
self.y = y
self.type = 2
if type in TYPES:
self.type = type
self.vx = random.choice([-MAX_SPEED, MAX_SPEED])
self.vy = random.choice([-MAX_SPEED, MAX_SPEED])
self.radius = 4
self.frame = 0
self.frame_ticks = 0
self.is_dead = False
self.respawn_ticks = MAX_RESPAWN_TICKS
def _set_new_position(self, stageObj):
px = stageObj.player.x
py = stageObj.player.y
loc = None
loclist = [
stage.SPAWN_SECTOR_TOPLEFT,
stage.SPAWN_SECTOR_BOTTOMLEFT,
stage.SPAWN_SECTOR_TOPRIGHT,
stage.SPAWN_SECTOR_BOTTOMRIGHT
]
if px < 80:
if py < 75:
loclist.remove(stage.SPAWN_SECTOR_TOPLEFT)
else:
loclist.remove(stage.SPAWN_SECTOR_BOTTOMLEFT)
else:
if py < 75:
loclist.remove(stage.SPAWN_SECTOR_TOPRIGHT)
else:
loclist.remove(stage.SPAWN_SECTOR_BOTTOMRIGHT)
loc = stageObj.get_random_spawn_loc(random.choice(loclist))
self.x = loc[0]
self.y = loc[1]
def kill(self):
self.is_dead = True
self.respawn_ticks = MAX_RESPAWN_TICKS
def _do_collisions(self, stage):
new_x = self.x + self.vx
for b in stage.solid_rects:
if utils.circle_rect_overlap(new_x, self.y, self.radius,
b[0], b[1], b[2], b[3]):
if self.x > b[0] + b[2]: # was prev to right of border.
new_x = b[0] + b[2] + self.radius
elif self.x < b[0]: # was prev to left of border.
new_x = b[0] - self.radius
self.vx *= -1
break
new_y = self.y + self.vy
for b in stage.solid_rects:
if utils.circle_rect_overlap(self.x, new_y, self.radius,
b[0], b[1], b[2], b[3]):
if self.y > b[1] + b[3]: # was prev below border.
new_y = b[1] + b[3] + self.radius
elif self.y < b[1]: # was prev above border.
new_y = b[1] - self.radius
self.vy *= -1
break
self.x = new_x
self.y = new_y
def respawn(self):
self.is_dead = False
def update(self, stage):
if self.is_dead:
self.respawn_ticks -= 1
if self.respawn_ticks == 0:
self.respawn()
elif self.respawn_ticks == 30:
self._set_new_position(stage)
else:
self._do_collisions(stage)
self.frame_ticks += 1
if self.frame_ticks == TICKS_PER_FRAME:
self.frame_ticks = 0
self.frame += 1
if self.frame == MAX_FRAME:
self.frame = 0
def draw(self, shake_x, shake_y):
if self.is_dead:
framex = None
if self.respawn_ticks < 10:
framex = 42
elif self.respawn_ticks < 20:
framex = 63
elif self.respawn_ticks < 30:
framex = 84
if framex is not None:
pyxel.blt(
self.x + shake_x - 10,
self.y + shake_y - 10,
0,
framex,
231,
21, 21,
8
)
else:
pyxel.blt(
self.x + shake_x - 4,
self.y + shake_y - 4,
0,
160 + self.frame*9,
8,
9, 9,
8
)
| helpcomputer/megaball | megaball/spinner.py | spinner.py | py | 4,317 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "random.choice",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "stage.SPAWN_SECTOR_TOPLEFT",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "stage.SPA... |
46046574096 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
REQUIREMENTS = open(os.path.join(os.path.dirname(__file__), 'requirements.txt')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-hooks',
version='0.2.0-pre',
description='A plugin system for django.',
author='Esteban Castro Borsani',
author_email='ecastroborsani@gmail.com',
long_description=README,
url='https://github.com/nitely/django-hooks',
packages=[
'hooks',
'hooks.templatetags',
],
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| nitely/django-hooks | setup.py | setup.py | py | 1,303 | python | en | code | 16 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numbe... |
37304550340 | import pyrealsense2 as rs
import numpy as np
import cv2
WIDTH = 640
HEIGHT = 480
FPS = 30
# file name which you want to open
FILE = './data/stairs.bag'
def main():
# stream(Depth/Color) setting
config = rs.config()
config.enable_stream(rs.stream.color, WIDTH, HEIGHT, rs.format.rgb8, FPS)
config.enable_stream(rs.stream.depth, WIDTH, HEIGHT, rs.format.z16, FPS)
config.enable_device_from_file(FILE)
# Start streaming
pipeline = rs.pipeline()
pipeline.start(config)
try:
while True:
# Wait for frames(Color/Depth)
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.08), cv2.COLORMAP_JET)
color_image = np.asanyarray(color_frame.get_data())
# Show images
color_image_s = cv2.resize(color_image, (WIDTH, HEIGHT))
depth_colormap_s = cv2.resize(depth_colormap, (WIDTH, HEIGHT))
images = np.hstack((color_image_s, depth_colormap_s))
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
INTERVAL = 10
if cv2.waitKey(INTERVAL) & 0xff == 27: # End with ESC
cv2.destroyAllWindows()
break
finally:
# Stop streaming
pipeline.stop()
if __name__ == '__main__':
main()
| masachika-kamada/realsense-matome | play_bagfile.py | play_bagfile.py | py | 1,686 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyrealsense2.config",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.stream",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pyrealsense2.format",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": ... |
1282652745 | import datetime
import pandas as pd
from tqdm import tqdm
from emailer import Emailer
from shipping import Shipping
from shipstation import Shipstation
def main():
# Instantiate objects to be used throughout the script
shipstation = Shipstation()
shipping = Shipping()
# Get all shipment information from ShipStation
print("\nGetting shipments...", end="")
all_shipments = shipstation.get_shipments()
print("done!\n")
# Filter shipments for only those that were delivered, per ShipStation
print("\nFiltering for delivered shipments...", end="")
delivered_shipments = [
shipment for shipment in all_shipments if shipment["confirmation"] == "delivery"
]
print("done!\n")
# Filter delivered shipments created in the last week for those with orders created in the last week
print("\nFiltering for orders within the last week...", end="")
good_shipments = []
for shipment in tqdm(delivered_shipments, position=0, leave=True):
order_response = shipstation.get_order(shipment["orderId"])
order_date = datetime.datetime.strptime(
order_response["orderDate"], "%Y-%m-%dT%H:%M:%S.%f0"
)
if order_date > datetime.datetime.now() - datetime.timedelta(days=8):
if len(order_response["items"]) == 1:
good_shipments.append((shipment, order_response))
print("done!\n")
# Get tracking info from USPS and UPS
print("\nGetting tracking info...", end="")
usps_info = {}
usps_tracking_numbers = [
s[0]["trackingNumber"] for s in good_shipments if "usps" in s[0]["serviceCode"]
]
for tracking_number in usps_tracking_numbers:
usps_info[tracking_number] = shipping.get_ups_tracking(tracking_number)
ups_info = {}
ups_tracking_numbers = [
s[0]["trackingNumber"] for s in good_shipments if "ups" in s[0]["serviceCode"]
]
for tracking_number in ups_tracking_numbers:
ups_info[tracking_number] = shipping.get_ups_tracking(tracking_number)
# Combine tracking info into one dictionary
tracking_info = {**ups_info, **usps_info}
print("done!\n")
# Filter shipments for those that were confirmed as delivered during the previous business day by USPS or UPS
print("\nFiltering for deliveries confirmed by the carrier...", end="")
actually_delivered = [
s
for s in good_shipments
if tracking_info.get(s[0]["trackingNumber"], [0, False])[1]
and datetime.datetime.strptime(
tracking_info.get(s[0]["trackingNumber"], ["2023-01-01 00:00", False])[0],
"%Y-%m-%d %H:%M",
).date()
== (datetime.datetime.now() - datetime.timedelta(days=3)).date()
]
print("done!\n")
# Create pandas DataFrame for data to be exported
print("\nSending to CSV...", end="")
filename = "shipstation_delivered.csv"
values = []
for i in range(len(actually_delivered)):
shipment_id = actually_delivered[i][0]["shipmentId"]
order_id = actually_delivered[i][0]["orderId"]
email = actually_delivered[i][0]["customerEmail"]
ship_date = actually_delivered[i][0]["shipDate"]
order_date = actually_delivered[i][1]["createDate"]
bill_to = actually_delivered[i][1]["billTo"]
ship_to = actually_delivered[i][1]["shipTo"]
item = actually_delivered[i][1]["items"][0]["sku"]
quantity = actually_delivered[i][1]["items"][0]["quantity"]
tracking_number = actually_delivered[i][0]["trackingNumber"]
values.append(
(
shipment_id,
order_id,
email,
ship_date,
order_date,
bill_to,
ship_to,
item,
quantity,
tracking_number,
)
)
df = pd.DataFrame(
values,
columns=[
"shipmentId",
"orderId",
"customerEmail",
"shipDate",
"orderDate",
"billTo",
"shipTo",
"sku",
"quantity",
"trackingNumber",
],
)
df["deliveryDate"] = df["trackingNumber"].map(
{k: v[0] for k, v in usps_info.items()}
)
df["deliveryDate"] = pd.to_datetime(df["deliveryDate"])
df.to_csv(filename, index=False)
print("done!\n")
# Sending email to relevant parties
emailer = Emailer(to_address="matt@jmac.com")
print("\nSending email...", end="")
subject = "ShipStation Daily Report"
body = f"""
Attached are the {len(df)} cherry-picked orders/shipments that were delivered during the
previous business day.
"""
emailer.send_email(subject, body, filename)
print("done!\n")
if __name__ == "__main__":
main()
| mattgrcia/review-booster | main.py | main.py | py | 4,870 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "shipstation.Shipstation",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "shipping.Shipping",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "shipstation.get_shipments",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "t... |
26333498275 | # Face detection is done using classifier
# classifier is an algorithm that decides wherether a face is present or not
# classifier need to be trained images thousands of with and without the faces.
# Opencv have pretrained classifier called haarcascade, localbinary pattern.
import cv2 as cv
img = cv.imread('Images/group.jpg')
cv.imshow('group', img)
# firstly we covert an image into grayscale because face detection does not involve any colors
# haarcascade looks at object in an image and using edges find the faces in an image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('GrayScale', gray)
# Now Reading into haarcascade file
haar_cascade = cv.CascadeClassifier('haar_face.xml')
# Detection of Face
face_rect = haar_cascade.detectMultiScale(gray, 1.1, minNeighbors=3) # detects a face and returns list of the rectangle coordinates of each faces
print(f'No. of faces detected : {len(face_rect)}')
# print(face_rect)
for (x,y,w,h) in face_rect:
cv.rectangle(img, (x,y), (x+w,y+h), (0,0,255), thickness=2)
# so for the group of people haarcascade is more sensitiveto noise which will end up with more no. of faces than the actual no. of faces
# less minNeighbour value leads to more face and vise versa
cv.imshow('Detected_Faces', img)
cv.waitKey(0) | JinalSinroja/OpenCV | Face_Detection.py | Face_Detection.py | py | 1,299 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_numbe... |
19399743449 | from typing import List
import collections
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
q = collections.deque()
for i in range(1, n + 1):
q.append([i])
while q:
e = q.popleft()
if len(e) == k:
q.appendleft(e)
break
else:
for i in range(e[-1] + 1, n + 1):
a = e[:]
a.append(i)
q.append(a)
return list(q)
n = 3
k = 3
r = Solution().combine(n, k)
print(r)
| Yigang0622/LeetCode | combine.py | combine.py | py | 576 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
}
] |
69986222269 | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.contrib.auth import get_user_model
class CustomUser(AbstractUser):
phone = models.CharField(max_length=13, blank=True, null=True)
bonus_coin = models.IntegerField(default=0)
class NameIt(models.Model):
name = models.CharField(max_length=255)
class Meta:
abstract = True
def __str__(self):
return self.name
class Category(NameIt):
pass
class Product(NameIt):
price = models.IntegerField(null=False)
category = models.ForeignKey(Category, on_delete=models.CASCADE, null=False)
compound = models.TextField(null=True)
description = models.TextField(null=True)
class ProductImage(models.Model):
image = models.ImageField(upload_to='images', verbose_name='Изображение_товара')
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='images')
is_main = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
def __str__(self):
return "%s" % self.id
class Reviews(models.Model):
body = models.TextField()
publish_date = models.DateTimeField(blank=True, null=True)
is_published = models.BooleanField(default=False)
author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
def __str__(self):
return self.body
# Create your models here.
| Pdnky/MySite | FoodDelivery/core/models.py | models.py | py | 1,427 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"... |
10420612333 | from __future__ import annotations
from typing import TYPE_CHECKING
from randovania.exporter.hints import guaranteed_item_hint
from randovania.exporter.hints.hint_exporter import HintExporter
from randovania.exporter.hints.joke_hints import JOKE_HINTS
from randovania.game_description.db.hint_node import HintNode
from randovania.games.common.prime_family.exporter.hint_namer import colorize_text
from randovania.games.prime2.patcher import echoes_items
if TYPE_CHECKING:
from random import Random
from randovania.exporter.hints.hint_namer import HintNamer
from randovania.game_description.db.node_identifier import NodeIdentifier
from randovania.game_description.db.region_list import RegionList
from randovania.game_description.game_patches import GamePatches
from randovania.game_description.resources.resource_database import ResourceDatabase
from randovania.games.prime2.exporter.hint_namer import EchoesHintNamer
from randovania.interface_common.players_configuration import PlayersConfiguration
def create_simple_logbook_hint(asset_id: int, hint: str) -> dict:
return {
"asset_id": asset_id,
"strings": [hint, "", hint],
}
def create_patches_hints(
all_patches: dict[int, GamePatches],
players_config: PlayersConfiguration,
region_list: RegionList,
namer: HintNamer,
rng: Random,
) -> list:
exporter = HintExporter(namer, rng, JOKE_HINTS)
hints_for_asset: dict[NodeIdentifier, str] = {}
for identifier, hint in all_patches[players_config.player_index].hints.items():
hints_for_asset[identifier] = exporter.create_message_for_hint(hint, all_patches, players_config, True)
return [
create_simple_logbook_hint(
logbook_node.extra["string_asset_id"],
hints_for_asset.get(region_list.identifier_for_node(logbook_node), "Someone forgot to leave a message."),
)
for logbook_node in region_list.iterate_nodes()
if isinstance(logbook_node, HintNode)
]
def hide_patches_hints(region_list: RegionList) -> list:
"""
Creates the string patches entries that changes the Lore scans in the game
completely useless text.
:return:
"""
return [
create_simple_logbook_hint(logbook_node.extra["string_asset_id"], "Some item was placed somewhere.")
for logbook_node in region_list.iterate_nodes()
if isinstance(logbook_node, HintNode)
]
_SKY_TEMPLE_KEY_SCAN_ASSETS = [
0xD97685FE,
0x32413EFD,
0xDD8355C3,
0x3F5F4EBA,
0xD09D2584,
0x3BAA9E87,
0xD468F5B9,
0x2563AE34,
0xCAA1C50A,
]
def create_stk_hints(
all_patches: dict[int, GamePatches],
players_config: PlayersConfiguration,
resource_database: ResourceDatabase,
namer: HintNamer,
hide_area: bool,
) -> list:
"""
Creates the string patches entries that changes the Sky Temple Gateway hint scans with hints for where
the STK actually are.
:param all_patches:
:param players_config:
:param resource_database:
:param namer:
:param hide_area: Should the hint include only the db?
:return:
"""
resulting_hints = guaranteed_item_hint.create_guaranteed_hints_for_resources(
all_patches,
players_config,
namer,
hide_area,
[resource_database.get_item(index) for index in echoes_items.SKY_TEMPLE_KEY_ITEMS],
True,
)
return [
create_simple_logbook_hint(
_SKY_TEMPLE_KEY_SCAN_ASSETS[key_number],
resulting_hints[resource_database.get_item(key_index)],
)
for key_number, key_index in enumerate(echoes_items.SKY_TEMPLE_KEY_ITEMS)
]
def hide_stk_hints(namer: EchoesHintNamer) -> list:
"""
Creates the string patches entries that changes the Sky Temple Gateway hint scans with hints for
completely useless text.
:return:
"""
return [
create_simple_logbook_hint(
_SKY_TEMPLE_KEY_SCAN_ASSETS[key_number],
"{} is lost somewhere in Aether.".format(
colorize_text(namer.color_item, f"Sky Temple Key {key_number + 1}", True)
),
)
for key_number in range(9)
]
| randovania/randovania | randovania/games/prime2/exporter/hints.py | hints.py | py | 4,216 | python | en | code | 165 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.game_patches.GamePatches",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "randovania.interface_common.players_configuration.PlayersConfiguration"... |
2544504801 | import cv2
import numpy as np
###Color detection
def empty(a):
pass
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range ( 0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver
cv2.namedWindow("trackBars")
cv2.resizeWindow("trackBars",640,240)
#name of window,add on which window,initial value, max value, function executed when trak bar value is changed
#Hue ranges from 0 to 255 but opencv supports still 179 so max value 179
cv2.createTrackbar("Hue Min","trackBars",0,179,empty)
cv2.createTrackbar("Hue Max","trackBars",13,179,empty)
cv2.createTrackbar("Sat Min","trackBars",24,255,empty)
cv2.createTrackbar("Sat Max","trackBars",250,255,empty)
cv2.createTrackbar("Value Min","trackBars",119,255,empty)
cv2.createTrackbar("Value Max","trackBars",255,255,empty)
while True:
img = cv2.imread("lambo.png")
imgHSV = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
#trackbar name, window name is which it belongs
h_min = cv2.getTrackbarPos("Hue Min","trackBars")
h_max = cv2.getTrackbarPos("Hue Max","trackBars")
s_min = cv2.getTrackbarPos("Sat Min","trackBars")
s_max = cv2.getTrackbarPos("Sat Max","trackBars")
v_min = cv2.getTrackbarPos("Value Min","trackBars")
v_max = cv2.getTrackbarPos("Value Max","trackBars")
print(h_min,h_max,s_min,s_max,v_min,v_max)
lower = np.array([h_min,s_min,v_min])
upper = np.array([h_max,s_max,v_max])
#creating a mask
mask = cv2.inRange(imgHSV,lower,upper)
#cv2.imshow("lambo ",img)
#cv2.imshow("lamboHSV ",imgHSV)
#cv2.imshow("mask ", mask)
# keep things in black color if you dont want it
#which will add 2 images together to create a new image it will check both images and wherever the pixel are both present it will take it has a yes or a 1 and it will store that in new image
# cv2.bitwise_and(img,img,mask=mask) source image,output image, mask
imgResult = cv2.bitwise_and(img,img,mask=mask)
#cv2.imshow("Result masked image ", imgResult)
imgStack = stackImages(0.6,([img,imgHSV],[mask,imgResult]))
cv2.imshow("Stack Images",imgStack)
cv2.waitKey(1) | monsterpit/openCVDemo | Resources/chapter7.py | chapter7.py | py | 3,458 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.resize",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_GRAY2BGR",
"line_num... |
28156175074 | import argparse
import sys
from pathlib import Path
from typing import List
import numpy as np
import torch
from thre3d_atom.modules.volumetric_model.volumetric_model import (
VolumetricModel,
VolumetricModelRenderingParameters,
)
from thre3d_atom.rendering.volumetric.voxels import (
GridLocation,
FeatureGrid,
VoxelSize,
)
from thre3d_atom.utils.constants import (
NUM_RGBA_CHANNELS,
)
from thre3d_atom.utils.imaging_utils import SceneBounds, CameraIntrinsics
def parse_arguments(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
"Converts Feature-Grid (+ mlp model) into an RGBA grid",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# fmt: off
# Required arguments
parser.add_argument("-i", "-m", "--model_path",
action="store", type=Path, required=True, help="path to the trained 3dSGDS model")
parser.add_argument("-o", "--output_dir",
action="store", type=Path, required=True, help="path to the output directory")
# fmt: on
parsed_args = parser.parse_args(args)
return parsed_args
## noinspection PyUnresolvedReferences
def main() -> None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args = parse_arguments(sys.argv[1:])
# load the numpy model:
np_model = np.load(args.model_path, allow_pickle=True)
features = torch.from_numpy(np_model["grid"]).to(device=device)
grid_size = np_model["grid_size"]
grid_location = GridLocation(*np_model["grid_center"])
grid_dim = features.shape[:-1]
x_voxel_size = grid_size[0] / (grid_dim[0] - 1)
y_voxel_size = grid_size[1] / (grid_dim[1] - 1)
z_voxel_size = grid_size[2] / (grid_dim[2] - 1)
feature_grid = FeatureGrid(
features=features.permute(3, 0, 1, 2),
voxel_size=VoxelSize(x_voxel_size, y_voxel_size, z_voxel_size),
grid_location=grid_location,
tunable=True,
)
render_params = VolumetricModelRenderingParameters(
num_rays_chunk=1024,
num_points_chunk=65536,
num_samples_per_ray=256,
num_fine_samples_per_ray=0,
perturb_sampled_points=True,
density_noise_std=0.0,
)
vol_mod = VolumetricModel(
render_params=render_params,
grid_dims=grid_dim,
feature_dims=NUM_RGBA_CHANNELS,
grid_size=grid_size,
grid_center=grid_location,
device=device,
)
vol_mod.feature_grid = feature_grid
torch.save(
vol_mod.get_save_info(
extra_info={
"scene_bounds": SceneBounds(0.1, 2.5),
"camera_intrinsics": CameraIntrinsics(256, 256, 256),
"hemispherical_radius": 1.0,
}
),
f"{args.output_dir}/model_rgba.pth",
)
if __name__ == "__main__":
main()
| akanimax/3inGAN | projects/thre3ingan/experimental/create_vol_mod_from_npy.py | create_vol_mod_from_npy.py | py | 2,878 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 27,
"usage_type": "attribute"
},
{
"a... |
1466500793 | from dataclasses import dataclass, field
from src.shared.general_functions import sum_all_initialized_int_attributes
@dataclass
class ShareholdersEquity:
"""Shareholders' equity is the amount that the owners of a company have invested in their business. This includes
the money they've directly invested and the accumulation of income the company has earned and that has been
reinvested since inception."""
preferred_Stock: int
common_stock: int
retained_earnings: int
accumulated_other_comprehensive_income_loss: int
other_total_stockholders_equity: int
minority_interest: int
total_shareholders_equity: int = field(init=False)
def __post_init__(self):
self.total_shareholders_equity = sum_all_initialized_int_attributes(self)
| hakunaprojects/stock-investing | src/domain/financial_statements/balance_sheet_statement/shareholders_equity.py | shareholders_equity.py | py | 785 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dataclasses.field",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "src.shared.general_functions.sum_all_initialized_int_attributes",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 6,
"usage_type":... |
11120994067 | import logging
import typing as tp
from collections import deque
from librarius.domain.messages import (
AbstractMessage,
AbstractEvent,
AbstractCommand,
AbstractQuery,
)
from librarius.service.uow import AbstractUnitOfWork
from librarius.domain.exceptions import SkipMessage
logger = logging.getLogger(__name__)
class MessageBus:
def __init__(
self,
uow: AbstractUnitOfWork,
event_handlers: dict[tp.Type[AbstractEvent], list[tp.Callable]],
command_handlers: dict[tp.Type[AbstractCommand], tp.Callable],
query_handlers: dict[tp.Type[AbstractQuery], tp.Callable],
):
self.queue: deque[AbstractMessage] = deque()
self.uow = uow
self.event_handlers = event_handlers
self.command_handlers = command_handlers
self.query_handlers = query_handlers
def handle(self, message: AbstractMessage):
self.queue.append(message)
try:
while self.queue:
message = self.queue.popleft()
if isinstance(message, AbstractEvent):
self.handle_event(message)
elif isinstance(message, AbstractCommand):
self.handle_command(message)
elif isinstance(message, AbstractQuery):
return self.handle_query(message)
else:
raise Exception(f"{message} was not an Event, Command or Query")
except SkipMessage as error:
logger.warning(f"Skipping message {message.uuid} because {error.reason}")
def handle_event(self, event: AbstractEvent) -> None:
for handler in self.event_handlers[type(event)]:
try:
logger.debug(f"Handling event {event} with handler {handler}")
handler(event)
self.queue.extend(self.uow.collect_new_events())
except Exception:
logger.exception(f"Exception handling event {event}")
continue
def handle_command(self, command: AbstractCommand) -> None:
logger.debug(f"Handling command {command}")
try:
handler = self.command_handlers[type(command)]
handler(command)
self.queue.extend(self.uow.collect_new_events())
except Exception:
logger.exception(f"Exception handling command {command}")
raise
def handle_query(self, query: AbstractQuery):
logger.debug(f"Handling query {query}")
try:
handler = self.query_handlers[type(query)]
results = handler(query)
self.queue.extend(self.uow.collect_new_events())
return results
except Exception:
logger.exception(f"Exception handling query {query}")
raise
| adriangabura/vega | librarius/service/message_bus.py | message_bus.py | py | 2,802 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "librarius.service.uow.AbstractUnitOfWork",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_n... |
11324258537 | import pygame
from random import randint
from pygame.locals import *
pygame.init()
display_widht = 600
display_height = 360
spaceship_widht = 84
spaceship_height = 50
shots_x = []
shots_y = []
asteroids_x = []
asteroids_y = []
asteroids_type = []
gameDisplay = pygame.display.set_mode((display_widht, display_height))
pygame.display.set_caption('The battle of death')
clock = pygame.time.Clock()
spaceshipImg = pygame.image.load('spaceship.png')
backgroundImg = pygame.image.load('background.png')
laserImg = pygame.image.load('laser.png')
asteroidImg = pygame.image.load('asteroid.png')
def spaceship(x,y):
gameDisplay.blit(spaceshipImg, (x,y))
def shot(x,y):
x += 4
gameDisplay.blit(laserImg, (x,y))
gameDisplay.blit(laserImg, (x, y + spaceship_height - 7))
shots_x.append(x)
shots_y.append(y)
def move_shoots():
for i in range(len(shots_x)):
shots_x[i] += 8
if shots_x[i] < display_widht:
gameDisplay.blit(laserImg, (shots_x[i],shots_y[i]))
gameDisplay.blit(laserImg, (shots_x[i],shots_y[i] + spaceship_height - 7))
def create_asteroid():
up_side = randint(0,2)
x = randint(1, display_widht)
y = randint(1, display_height)
asteroids_type.append(up_side)
if up_side == 0:
y = 0
asteroids_x.append(x)
asteroids_y.append(y)
else:
x = display_widht - 40
asteroids_y.append(y)
asteroids_x.append(x)
gameDisplay.blit(asteroidImg, (x,y))
def move_asteroids():
global asteroids_x
global asteroids_y
for i in range(len(asteroids_x)):
if (asteroids_x[i] < display_widht or asteroids_x[i] > 0) and asteroids_type[i] != 0:
asteroids_x[i] -= 7
gameDisplay.blit(asteroidImg, (asteroids_x[i], asteroids_y[i]))
else:
asteroids_y[i] += 7
gameDisplay.blit(asteroidImg, (asteroids_x[i], asteroids_y[i]))
def game_loop():
x = 0
y = display_height * 0.5
x_change = 0
y_change = 0
gameExit = False
while not gameExit:
gameDisplay.blit(backgroundImg, (0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
x_change = 4
if event.key == pygame.K_LEFT:
x_change = -4
if event.key == pygame.K_UP:
y_change = -4
if event.key == pygame.K_DOWN:
y_change = 4
if event.key == pygame.K_SPACE:
shot(x,y)
create_asteroid()
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
x_change = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
y_change = 0
x += x_change
y += y_change
if y > display_height - spaceship_height:
y = display_height - spaceship_height
if x > display_widht - spaceship_widht:
x = display_widht - spaceship_widht
if x < 0:
x = 0
if y < 0:
y = 0
spaceship(x,y)
move_shoots()
move_asteroids()
pygame.display.update()
clock.tick(60)
game_loop()
pygame.quit()
quit()
| macelai/star-wars | game.py | game.py | py | 3,439 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
22868194593 | import requests
from googletrans import Translator, LANGUAGES
import pickle
import webScraping
with open('Resources/API key/oxford.pck', 'rb') as file:
api_key = pickle.load(file)
app_id = api_key['app id']
app_key = api_key['app key']
url_base = 'https://od-api.oxforddictionaries.com/api/v2/'
language_code = 'en-us'
def lemmatize(word):
endpoint = 'lemmas'
url = url_base + endpoint + '/' + language_code + '/' + word
res = requests.get(url, headers={'app_id': app_id, 'app_key': app_key})
if format(res.status_code) != '404':
return res.json()['results'][0]['lexicalEntries'][0]['inflectionOf'][0]['id']
else:
return ''
def Definition(word):
word = lemmatize(word)
if word != '':
endpoint = 'entries'
url = url_base + endpoint + '/' + language_code + '/' + word
res = requests.get(url, headers={'app_id': app_id, 'app_key': app_key})
try:
return res.json()['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]['definitions'][0]
except:
return None
else:
return None
def Synonyms(word):
word = lemmatize(word)
if word != '':
endpoint = 'entries'
url = url_base + endpoint + '/' + language_code + '/' + word
res = requests.get(url, headers={"app_id": app_id, "app_key": app_key})
try:
list_of_synonyms = res.json()['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]['synonyms']
result_list = []
for i in range(min(5, len(list_of_synonyms))):
result_list.append(list_of_synonyms[i]['text'])
return result_list
except:
return None
else:
return None
def Antonyms(word):
if word.find(' ') != -1:
return None
word = lemmatize(word)
return webScraping.Get_Antonyms(word)
def lang_translate(text,language):
if language in LANGUAGES.values():
translator = Translator()
result = translator.translate(text, src='en', dest=language)
return result
else:
return None | TroySigX/smartbot | dictionary.py | dictionary.py | py | 2,176 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pickle.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number"... |
36846615388 | from typing import cast
from .kotlin_entities import (
KotlinEntity,
KotlinProperty,
KotlinEntityEnumeration,
PARSING_ERRORS_PROP_NAME,
ENTITY_STATIC_CREATOR
)
from ..base import Generator
from ... import utils
from ...config import GenerationMode, GeneratedLanguage, TEMPLATE_SUFFIX
from ...schema.modeling.entities import (
StringEnumeration,
EntityEnumeration,
Entity,
Object,
ObjectFormat,
)
from ...schema.modeling.text import Text, EMPTY
class KotlinGenerator(Generator):
def __init__(self, config):
super(KotlinGenerator, self).__init__(config)
self.kotlin_annotations = config.generation.kotlin_annotations
self._error_collectors = config.generation.errors_collectors
self._generate_equality = config.generation.generate_equality
self.generate_serialization = config.generation.generate_serialization
def filename(self, name: str) -> str:
return f'{utils.capitalize_camel_case(name)}.kt'
def _entity_declaration(self, entity: Entity) -> Text:
entity: KotlinEntity = cast(KotlinEntity, entity)
entity.__class__ = KotlinEntity
entity.eval_errors_collector_enabled(self._error_collectors)
entity.update_bases()
if entity.generate_as_protocol:
return self.__declaration_as_interface(entity)
result: Text = self.__main_declaration_header(entity)
is_template = entity.generation_mode.is_template
if is_template:
result += EMPTY
result += ' constructor ('
result += ' env: ParsingEnvironment,'
result += f' parent: {utils.capitalize_camel_case(entity.name)}? = null,'
result += ' topLevel: Boolean = false,'
result += ' json: JSONObject'
result += ' ) {'
result += ' val logger = env.logger'
constructor = entity.constructor_body(with_commas=False).indented(indent_width=8)
if constructor.lines:
result += constructor
result += ' }'
result += EMPTY
result += entity.value_resolving_declaration.indented(indent_width=4)
if self.generate_serialization:
result += EMPTY
result += entity.serialization_declaration.indented(indent_width=4)
if not is_template and self._generate_equality and not entity.instance_properties:
result += EMPTY
result += self.__manual_equals_hash_code_declaration.indented(indent_width=4)
if not is_template:
patch = entity.copy_with_new_array_declaration
if patch:
result += patch
static_declarations = entity.static_declarations(self.generate_serialization)
if static_declarations.lines:
result += EMPTY
result += ' companion object {'
result += static_declarations.indented(indent_width=8)
result += ' }'
result += EMPTY
if entity.inner_types:
for inner_type in filter(lambda t: not isinstance(t, StringEnumeration) or not is_template,
entity.inner_types):
result += EMPTY
result += self._main_declaration(inner_type).indented(indent_width=4)
result += '}'
return result
@staticmethod
def __declaration_as_interface(entity: KotlinEntity) -> Text:
result = Text(f'interface {utils.capitalize_camel_case(entity.name)} {{')
for prop in entity.instance_properties_kotlin:
result += prop.declaration(overridden=False,
in_interface=True,
with_comma=False,
with_default=False).indented(indent_width=4)
result += '}'
return result
def __main_declaration_header(self, entity: KotlinEntity) -> Text:
result = Text()
for annotation in self.kotlin_annotations.classes:
result += annotation
data_prefix = 'data '
if entity.generation_mode.is_template or not self._generate_equality or not entity.instance_properties:
data_prefix = ''
prefix = f'{data_prefix}class {utils.capitalize_camel_case(entity.name)}'
interfaces = ['JSONSerializable'] if self.generate_serialization else []
protocol_plus_super_entities = entity.protocol_plus_super_entities()
if protocol_plus_super_entities is not None:
interfaces.append(protocol_plus_super_entities)
interfaces = ', '.join(interfaces)
suffix = f' : {interfaces}' if interfaces else ''
suffix += ' {'
def add_instance_properties(text: Text, is_template: bool) -> Text:
mixed_properties = entity.instance_properties_kotlin
if entity.errors_collector_enabled:
mixed_properties.append(KotlinProperty(
name=PARSING_ERRORS_PROP_NAME,
description='',
description_translations={},
dict_field='',
property_type=Object(name='List<Exception>', object=None, format=ObjectFormat.DEFAULT),
optional=True,
is_deprecated=False,
mode=GenerationMode.NORMAL_WITHOUT_TEMPLATES,
supports_expressions_flag=False,
default_value=None,
platforms=None
))
for prop in mixed_properties:
overridden = False
if entity.implemented_protocol is not None:
overridden = any(p.name == prop.name for p in entity.implemented_protocol.properties)
text += prop.declaration(
overridden=overridden,
in_interface=False,
with_comma=not is_template,
with_default=not is_template
).indented(indent_width=4)
return text
if entity.generation_mode.is_template:
result += prefix + suffix
if entity.instance_properties:
result = add_instance_properties(text=result, is_template=True)
else:
constructor_prefix = ''
if self.kotlin_annotations.constructors:
constructor_annotations = ', '.join(self.kotlin_annotations.constructors)
constructor_prefix = f' {constructor_annotations} constructor '
if not entity.instance_properties:
result += f'{prefix}{constructor_prefix}(){suffix}'
else:
result += f'{prefix}{constructor_prefix}('
result = add_instance_properties(text=result, is_template=False)
result += f'){suffix}'
return result
@property
def __manual_equals_hash_code_declaration(self) -> Text:
result = Text('override fun equals(other: Any?) = javaClass == other?.javaClass')
result += EMPTY
result += 'override fun hashCode() = javaClass.hashCode()'
return result
def _entity_enumeration_declaration(self, entity_enumeration: EntityEnumeration) -> Text:
entity_enumeration: KotlinEntityEnumeration = cast(KotlinEntityEnumeration, entity_enumeration)
entity_enumeration.__class__ = KotlinEntityEnumeration
declaration_name = utils.capitalize_camel_case(entity_enumeration.name)
entity_declarations = list(map(utils.capitalize_camel_case, entity_enumeration.entity_names))
default_entity_decl = utils.capitalize_camel_case(str(entity_enumeration.default_entity_declaration))
result = Text()
for annotation in self.kotlin_annotations.classes:
result += annotation
interfaces = ['JSONSerializable'] if self.generate_serialization else []
interfaces.append(entity_enumeration.mode.protocol_name(
lang=GeneratedLanguage.KOTLIN,
name=entity_enumeration.resolved_prefixed_declaration))
interfaces = ', '.join(filter(None, interfaces))
suffix = f' : {interfaces}' if interfaces else ''
suffix += ' {'
result += f'sealed class {declaration_name}{suffix}'
for decl in entity_declarations:
naming = entity_enumeration.format_case_naming(decl)
decl = f'class {naming}(val value: {decl}) : {declaration_name}()'
result += Text(indent_width=4, init_lines=decl)
result += EMPTY
result += f' fun value(): {entity_enumeration.common_interface(GeneratedLanguage.KOTLIN) or "Any"} {{'
result += ' return when (this) {'
for decl in entity_declarations:
naming = entity_enumeration.format_case_naming(decl)
decl = f'is {naming} -> value'
result += Text(indent_width=12, init_lines=decl)
result += ' }'
result += ' }'
result += EMPTY
if self.generate_serialization:
result += ' override fun writeToJSON(): JSONObject {'
result += ' return when (this) {'
for decl in entity_declarations:
naming = entity_enumeration.format_case_naming(decl)
decl = f'is {naming} -> value.writeToJSON()'
result += Text(indent_width=12, init_lines=decl)
result += ' }'
result += ' }'
result += EMPTY
if entity_enumeration.mode.is_template:
self_name = entity_enumeration.resolved_prefixed_declaration
result += f' override fun resolve(env: ParsingEnvironment, data: JSONObject): {self_name} {{'
result += ' return when (this) {'
for decl in entity_declarations:
case_name = entity_enumeration.format_case_naming(decl)
line = f'is {case_name} -> {self_name}.{case_name}(value.resolve(env, data))'
result += Text(indent_width=12, init_lines=line)
result += ' }'
result += ' }'
result += EMPTY
result += ' val type: String'
result += ' get() {'
result += ' return when (this) {'
for decl in entity_declarations:
naming = entity_enumeration.format_case_naming(decl)
line = f'is {naming} -> {decl}.TYPE'
result += Text(indent_width=16, init_lines=line)
result += ' }'
result += ' }'
result += EMPTY
elif self._generate_equality:
result += ' override fun equals(other: Any?): Boolean {'
result += ' if (this === other) { return true }'
result += f' if (other is {declaration_name}) {{'
result += ' return value().equals(other.value())'
result += ' }'
result += ' return false'
result += ' }'
result += EMPTY
if not self.generate_serialization:
result += '}'
return result
result += ' companion object {'
result += ' @Throws(ParsingException::class)'
source_name = 'json'
source_type = 'JSONObject'
read_type_expr = 'json.read("type", logger = logger, env = env)'
read_type_opt_expr = 'json.readOptional("type", logger = logger, env = env)'
throwing_expr = 'throw typeMismatch(json = json, key = "type", value = type)'
if entity_enumeration.mode.is_template:
def deserialization_args(s):
return f'env, parent?.value() as {s}?, topLevel, {source_name}'
result += ' operator fun invoke('
result += ' env: ParsingEnvironment,'
result += ' topLevel: Boolean = false,'
result += f' {source_name}: {source_type}'
result += f' ): {declaration_name} {{'
result += ' val logger = env.logger'
if default_entity_decl:
result += f' val receivedType: String = {read_type_opt_expr} ?: {default_entity_decl}Template.TYPE'
else:
result += f' val receivedType: String = {read_type_expr}'
result += f' val parent = env.templates[receivedType] as? {declaration_name}'
result += ' val type = parent?.type ?: receivedType'
else:
def deserialization_args(s):
return f'env, {source_name}'
result += ' @JvmStatic'
result += ' @JvmName("fromJson")'
args = f'env: ParsingEnvironment, {source_name}: {source_type}'
result += f' operator fun invoke({args}): {declaration_name} {{'
result += ' val logger = env.logger'
if default_entity_decl:
result += f' val type: String = {read_type_opt_expr} ?: {default_entity_decl}.TYPE'
else:
result += f' val type: String = {read_type_expr}'
result += ' when (type) {'
for decl in entity_declarations:
naming = entity_enumeration.format_case_naming(decl)
line = f'{decl}.TYPE -> return {naming}({decl}({deserialization_args(decl)}))'
result += Text(indent_width=16, init_lines=line)
if entity_enumeration.mode is GenerationMode.NORMAL_WITH_TEMPLATES:
result += ' }'
name = utils.capitalize_camel_case(entity_enumeration.name + TEMPLATE_SUFFIX)
template_type = entity_enumeration.template_declaration_prefix + name
result += f' val template = env.templates.getOrThrow(type, json) as? {template_type}'
result += ' if (template != null) {'
result += f' return template.resolve(env, {source_name})'
result += ' } else {'
result += f' {throwing_expr}'
result += ' }'
else:
result += f' else -> {throwing_expr}'
result += ' }'
result += ' }'
static_creator_lambda = f'env: ParsingEnvironment, it: JSONObject -> {declaration_name}(env, json = it)'
result += f' val {ENTITY_STATIC_CREATOR} = {{ {static_creator_lambda} }}'
result += ' }'
result += '}'
return result
def _string_enumeration_declaration(self, string_enumeration: StringEnumeration) -> Text:
declaration_name = utils.capitalize_camel_case(string_enumeration.name)
cases_declarations = list(map(lambda s: Text(indent_width=16, init_lines=f'{s}.value -> {s}'),
map(lambda s: utils.fixing_first_digit(utils.constant_upper_case(s[0])),
string_enumeration.cases)))
result = Text(f'enum class {declaration_name}(private val value: String) {{')
for ind, case in enumerate(string_enumeration.cases):
terminal = ',' if ind != (len(cases_declarations) - 1) else ';'
name = utils.fixing_first_digit(utils.constant_upper_case(case[0]))
value = case[1]
result += Text(indent_width=4, init_lines=f'{name}("{value}"){terminal}')
result += EMPTY
result += ' companion object Converter {'
result += f' fun toString(obj: {declaration_name}): String {{'
result += ' return obj.value'
result += ' }'
result += EMPTY
result += f' fun fromString(string: String): {declaration_name}? {{'
result += ' return when (string) {'
result += cases_declarations
result += ' else -> null'
result += ' }'
result += ' }'
result += EMPTY
result += ' val FROM_STRING = { string: String ->'
result += ' when (string) {'
result += cases_declarations
result += ' else -> null'
result += ' }'
result += ' }'
result += ' }'
result += '}'
return result
| divkit/divkit | api_generator/api_generator/generators/kotlin/generator.py | generator.py | py | 16,470 | python | en | code | 1,940 | github-code | 6 | [
{
"api_name": "base.Generator",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "config.generation",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "config.generation",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "config.g... |
18959073144 | import boto3
import time
import json
import configparser
from botocore.exceptions import ClientError
redshift_client = boto3.client('redshift', region_name='ap-southeast-1')
ec2 = boto3.resource('ec2', region_name='ap-southeast-1')
def create_udacity_cluster(config):
"""Create an Amazon Redshift cluster
Args:
config: configurations file
Returns:
response['Cluster']: return cluster dictionary information
Raises:
ClientError
"""
try:
response = redshift_client.create_cluster(
ClusterIdentifier='udacity-cluster',
ClusterType='multi-node',
NumberOfNodes=2,
NodeType='dc2.large',
PubliclyAccessible=True,
DBName=config.get('CLUSTER', 'DB_NAME'),
MasterUsername=config.get('CLUSTER', 'DB_USER'),
MasterUserPassword=config.get('CLUSTER', 'DB_PASSWORD'),
Port=int(config.get('CLUSTER', 'DB_PORT')),
IamRoles=[config.get('IAM_ROLE', 'ROLE_ARN')],
VpcSecurityGroupIds=['sg-077f9a08ba80c09e4']
)
except ClientError as e:
print(f'ERROR: {e}')
return None
else:
return response['Cluster']
def wait_for_creation(cluster_id):
"""Wait for cluster creation
Args:
cluster_id: Cluster identifier
Returns:
cluster_info: return cluster dictionary information
Raises:
None
"""
while True:
response = redshift_client.describe_clusters(ClusterIdentifier=cluster_id)
cluster_info = response['Clusters'][0]
if cluster_info['ClusterStatus'] == 'available':
break
time.sleep(30)
return cluster_info
def opentcp(config,cluster_info):
"""Open an incoming TCP port to access the cluster endpoint
Args:
config: configurations file
cluster_info: cluster dictionary information
Returns:
None
Raises:
None
"""
try:
vpc = ec2.Vpc(id=cluster_info['VpcId'])
defaultSg = list(vpc.security_groups.all())[0]
print(defaultSg)
defaultSg.authorize_ingress(
GroupName=defaultSg.group_name,
CidrIp='0.0.0.0/0',
IpProtocol='TCP',
FromPort=int(config.getint('CLUSTER', 'DB_PORT')),
ToPort=int(config.getint('CLUSTER', 'DB_PORT'))
)
except Exception as e:
print(e)
def main():
"""Create cluster"""
config = configparser.ConfigParser()
config.read('../dwh.cfg')
cluster_info = create_udacity_cluster(config)
if cluster_info is not None:
print('Cluster is being created')
cluster_info = wait_for_creation(cluster_info['ClusterIdentifier'])
print(f'Cluster has been created.')
print(f"Endpoint to copy={cluster_info['Endpoint']['Address']}")
opentcp(config,cluster_info)
if __name__ == '__main__':
main()
| hieutdle/bachelor-thesis | airflow/scripts/create_cluster.py | create_cluster.py | py | 2,942 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "boto3.client",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "botocore.exceptions.ClientError",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "time.sleep"... |
23091348874 | '''
Epidemic modelling
YOUR NAME
Functions for running a simple epidemiological simulation
'''
import random
import sys
import click
# This seed should be used for debugging purposes only! Do not refer
# to this variable in your code.
TEST_SEED = 20170217
def has_an_infected_neighbor(city, location):
'''
Determine whether a person at a specific location has an infected
neighbor in a city modelled as a ring.
Args:
city (list of tuples): the state of all people in the simulation
at the start of the day
location (int): the location of the person to check
Returns (boolean): True, if the person has an infected neighb
False otherwise.
'''
# The location needs to be a valid index for the city list.
assert 0 <= location < len(city)
# This function should only be called when the person at location
# is susceptible to infection.
disease_state, _ = city[location]
assert disease_state == "S"
disease_state_left, _ = city[location-1]
disease_state_right, _ = city[(location+1) % len(city)]
# these define the state of the neighbors to the immediate left or right of the selected person
if disease_state_left == "I" or disease_state_right == "I":
# if the person has an infeccted neighbor to their left or their right, it is true that they neighbor an infected person
return True
# REPLACE False WITH AN APPROPRIATE RETURN VALUE
return False
# if the person doesn't have an infected neighbor, it is false that they would neighbor an infectee
def advance_person_at_location(city, location, days_contagious):
'''
Compute the next state for the person at the specified location.
Args:
city (list): the state of all people in the simulation at the
start of the day
location (int): the location of the person to check
days_contagious (int): the number of a days a person is infected
Returns (string, int): the disease state and the number of days
the person has been in that state after simulating one day.
'''
disease_state, _ = city[location]
assert 0 <= location < len(city)
state, days_in_state = city[location]
days_in_state +=1
# the day increases by one everytime we advance a person (a day has passed for their condition to be rechecked)
if state == "S":
if has_an_infected_neighbor(city, location):
#if the person is susceptible and it is true that their neighbor is an infected person
state = "I"
days_in_state = 0
# the suspectible person becomes infected and have been so for zero days
if disease_state[0] == "I":
if days_in_state >= days_contagious:
# if the person is infected and have been so past the life of the virus
state = "R"
days_in_state = 0
# they recover and have been so for 0 days
# We don't add a condition for recovered people.
#Their condition cannot change so all that happens is a day passes in their life
# REPLACE ("R", 0) WITH AN APPROPRIATE RETURN VALUE
return (state, days_in_state)
def simulate_one_day(starting_city, days_contagious):
'''
Move the simulation forward a single day.
Args:
starting_city (list): the state of all people in the simulation at the
start of the day
days_contagious (int): the number of a days a person is infected
Returns (list of tuples): the state of the city after one day
'''
ending_city = []
# we set an empty set, which will be the city after one day
for location in range(len(starting_city)):
# for a person in the city
ending_city.append(advance_person_at_location(starting_city, location, days_contagious))
# we advance a person through a day, and add them to the new city
# REPLACE [] WITH AN APPROPRIATE RETURN VALUE
return ending_city
# this leaves us with an ending city, where the people have all gone through one day
# thus, a day has been simulated
def is_transmission_possible(city):
"""
Is there at least one susceptible person who has an infected neighbor?
Args:
city (list): the current state of the city
Returns (boolean): True if the city has at least one susceptible person
with an infected neighbor, False otherwise.
"""
# YOUR CODE HERE
for location in range(len(city)):
state, _ = city[location]
# we define the state of each person in the city
if state == "S" and has_an_infected_neighbor(city, location):
return True
# if a person is suspectible and neighbors an infected person, we say that transmission can occur
# REPLACE False WITH AN APPROPRIATE RETURN VALUE
return False
# In any other case, the city has no susceptible people next to sick neighbors
def run_simulation(starting_city, days_contagious):
'''
Run the entire simulation
Args:
starting_city (list): the state of all people in the city at the
start of the simulation
days_contagious (int): the number of a days a person is infected
Returns tuple (list of tuples, int): the final state of the city
and the number of days actually simulated.
'''
pass
city = starting_city
days = 0
while is_transmission_possible(city):
# while susceptible people in the city can be infected
city=simulate_one_day(city, days_contagious)
days +=1
# we simulate a day, and do so until no more susceptible people can get infected
# REPLACE ([], 0) WITH AN APPROPRIATE RETURN VALUE
return (city, days)
def vaccinate_person(vax_tuple):
'''
Attempt to vaccinate a single person based on their current
disease state and personal eagerness to be vaccinated.
Args:
vax_tuple (string, int, float): information about a person,
including their eagerness to be vaccinated.
Returns (string, int): a person tuple
'''
# YOUR CODE HERE
state, days, chance = vax_tuple
# we only check the case for susceptible people, as recovered or infected people aren't allowed to get vaccinated
if state =="S" and random.random() < chance:
# if the person is susceptible and they pass the probability test
state = "V"
days = 0
# they become vaccinated, and have been so for 0 days
# REPLACE ("R", 0) WITH AN APPROPRIATE RETURN VALUE
return (state, days)
def vaccinate_city(city_vax_tuples, random_seed):
'''
Vaccinate the people in the city based on their current state and
eagerness to be vaccinated.
Args:
city_vax_tuples (list of (string, int, float) triples):
state of all people in the simulation at the start
of the simulation, including their eagerness to be vaccinated.
random_seed (int): seed for the random number generator
Returns (list of (string, int) tuples): state of the people in the
city after vaccination
'''
# YOUR CODE HERE
random.seed(random_seed)
city_end = []
# empty city_end will represent the city after one day has passed
for person in city_vax_tuples:
city_end.append(vaccinate_person(person))
# we check if any person gets vaccinated given the above function, and move these post-day people to city_end
# REPLACE [] WITH AN APPROPRIATE RETURN VALUE
return city_end
def vaccinate_and_simulate(city_vax_tuples, days_contagious, random_seed):
"""
Vaccinate the city and then simulate the infection spread
Args:
city_vax_tuples (list): a list with the state of the people in the city,
including their eagerness to be vaccinated.
days_contagious (int): the number of days a person is infected
random_seed (int): the seed for the random number generator
Returns (list of tuples, int): the state of the city at the end of the
simulation and the number of days simulated.
"""
# YOUR CODE HERE
city = vaccinate_city(city_vax_tuples, random_seed)
# this returns the city after we perform the above simulation of a day where people can get vaccinated
# REPLACE ([], 0) WITH AN APPROPRIATE RETURN VALUE
return run_simulation(city, days_contagious)
# this returns the city after a simulated day, where people can get infected or recover
# now, vaccinated people can't get infected
################ Do not change the code below this line #######################
def run_trials(vax_city, days_contagious, random_seed, num_trials):
"""
Run multiple trials of vaccinate_and_simulate and compute the median
result for the number of days until infection transmission stops.
Args:
vax_city (list of (string, int, float) triples): a list with vax
tuples for the people in the city
days_contagious (int): the number of days a person is infected
random_seed (int): the seed for the random number generator
num_trials (int): the number of trial simulations to run
Returns:
(int) the median number of days until infection transmission stops
"""
days = []
for i in range(num_trials):
if random_seed:
_, num_days_simulated = vaccinate_and_simulate(vax_city,
days_contagious,
random_seed+i)
else:
_, num_days_simulated = vaccinate_and_simulate(vax_city,
days_contagious,
random_seed)
days.append(num_days_simulated)
# quick way to compute the median
return sorted(days)[num_trials // 2]
def parse_city_file(filename, is_vax_tuple):
"""
Read a city represented as person tuples or vax tuples from
a file.
Args:
filename (string): the name of the file
is_vax_tuple (boolean): True if the file is expected to contain
(string, int) pairs. False if the file is expected to contain
(string, int, float) triples.
Returns: list of tuples or None, if the file does not exist or
cannot be parsed.
"""
try:
with open(filename) as f:
residents = [line.split() for line in f]
except IOError:
print("Could not open:", filename, file=sys.stderr)
return None
ds_types = ('S', 'I', 'R', 'V')
rv = []
if is_vax_tuple:
try:
for i, res in enumerate(residents):
ds, nd, ve = res
num_days = int(nd)
vax_eagerness = float(ve)
if ds not in ds_types or num_days < 0 or \
vax_eagerness < 0 or vax_eagerness > 1.0:
raise ValueError()
rv.append((ds, num_days, vax_eagerness))
except ValueError:
emsg = ("Error in line {}: vax tuples are represented "
"with a disease state {}"
"a non-negative integer, and a floating point value "
"between 0 and 1.0.")
print(emsg.format(i, ds_types), file=sys.stderr)
return None
else:
try:
for i, res in enumerate(residents):
ds, nd = res
num_days = int(nd)
if ds not in ds_types or num_days < 0:
raise ValueError()
rv.append((ds, num_days))
except ValueError:
emsg = ("Error in line {}: persons are represented "
"with a disease state {} and a non-negative integer.")
print(emsg.format(i, ds_types), file=sys.stderr)
return None
return rv
@click.command()
@click.argument("filename", type=str)
@click.option("--days-contagious", default=2, type=int)
@click.option("--task-type", default="no_vax",
type=click.Choice(['no_vax', 'vax']))
@click.option("--random-seed", default=None, type=int)
@click.option("--num-trials", default=1, type=int)
def cmd(filename, days_contagious, task_type, random_seed, num_trials):
'''
Process the command-line arguments and do the work.
'''
city = parse_city_file(filename, task_type == "vax")
if not city:
return -1
if task_type == "no_vax":
print("Running simulation ...")
final_city, num_days_simulated = run_simulation(
city, days_contagious)
print("Final city:", final_city)
print("Days simulated:", num_days_simulated)
elif num_trials == 1:
print("Running one vax clinic and simulation ...")
final_city, num_days_simulated = vaccinate_and_simulate(
city, days_contagious, random_seed)
print("Final city:", final_city)
print("Days simulated:", num_days_simulated)
else:
print("Running multiple trials of the vax clinic and simulation ...")
median_num_days = run_trials(city, days_contagious,
random_seed, num_trials)
print("Median number of days until infection transmission stops:",
median_num_days)
return 0
if __name__ == "__main__":
cmd() # pylint: disable=no-value-for-parameter
| MaxSaint01/pa1 | sir.py | sir.py | py | 13,436 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "random.random",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_n... |
1422010768 | #Dilation and Erosion
import cv2
import matplotlib.pyplot as plt
import numpy as np
#-----------------------------------Dilation------------------------------
# Reads in a binary image
img = cv2.imread('j.png',0)
# Create a 5x5 kernel of ones
Kernel = np.ones((5,5), np.uint8)
'''
To dilate an image in OpenCV, you can use the dilate function and three inputs:
an original binary image, a kernel that determines the size of the dilation (None will
result in a default size), and a number of iterations to perform the dilation (typically = 1).
In the below example, we have a 5x5 kernel of ones, which move over an image, like a filter,
and turn a pixel white if any of its surrounding pixels are white in a 5x5 window! We’ll
use a simple image of the cursive letter “j” as an example.
'''
dilation = cv2.dilate(img, Kernel, iterations = 1)
plt.imshow(dilation, cmap = 'gray')
#-----------------------------------Erosion--------------------------------
erosion = cv2.erode(img, Kernel, iterations = 1)
plt.imshow(erosion, cmap = 'gray')
#----------------------------------Opening------------------------------
'''
As mentioned, above, these operations are often combined for desired results! One such combination
is called opening, which is erosion followed by dilation. This is useful in noise reduction
in which erosion first gets rid of noise (and shrinks the object) then dilation enlarges the
object again, but the noise will have disappeared from the previous erosion!
To implement this in OpenCV, we use the function morphologyEx with our original image,
the operation we want to perform, and our kernel passed in.
'''
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, Kernel)
plt.imshow(opening, cmap = 'gray')
#----------------------------------Closing------------------------------
'''
Closing is the reverse combination of opening; it’s dilation followed by erosion,
which is useful in closing small holes or dark areas within an object.
Closing is reverse of Opening, Dilation followed by Erosion. It is useful in
closing small holes inside the foreground objects, or small black points on the object.
'''
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, Kernel)
plt.imshow(closing, cmap = 'gray')
| haderalim/Computer-Vision | Types of features and Image segmentation/Dilation- Erosion- Opeining and Closing/test.py | test.py | py | 2,245 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cv2.dilate",
"line_number":... |
4501146166 | import asyncio
import contextlib
import types
import unittest
import pytest
from lsst.ts import salobj, watcher
from lsst.ts.idl.enums.Watcher import AlarmSeverity
# Timeout for normal operations (seconds)
STD_TIMEOUT = 5
class GetRuleClassTestCase(unittest.TestCase):
"""Test `lsst.ts.watcher.get_rule_class`."""
def test_good_names(self):
for classname, desired_class in (
("Enabled", watcher.rules.Enabled),
("test.NoConfig", watcher.rules.test.NoConfig),
("test.ConfiguredSeverities", watcher.rules.test.ConfiguredSeverities),
):
rule_class = watcher.get_rule_class(classname)
assert rule_class == desired_class
def test_bad_names(self):
for bad_name in (
"NoSuchRule", # no such rule
"test.NoSuchRule", # no such rule
"test.Enabled", # wrong module
"NoConfig", # wrong module
"test_NoConfig", # wrong separator
):
with pytest.raises(ValueError):
watcher.get_rule_class(bad_name)
class ModelTestCase(unittest.IsolatedAsyncioTestCase):
def setUp(self):
salobj.set_random_lsst_dds_partition_prefix()
@contextlib.asynccontextmanager
async def make_model(self, names, enable, escalation=(), use_bad_callback=False):
"""Make a Model as self.model, with one or more Enabled rules.
Parameters
----------
names : `list` [`str`]
Name and index of one or more CSCs.
Each entry is of the form "name" or name:index".
The associated alarm names have a prefix of "Enabled.".
enable : `bool`
Enable the model?
escalation : `list` of `dict`, optional
Escalation information.
See `CONFIG_SCHEMA` for the format of entries.
use_bad_callback : `bool`
If True then specify an invalid callback function:
one that is synchronous. This should raise TypeError.
"""
if not names:
raise ValueError("Must specify one or more CSCs")
self.name_index_list = [salobj.name_to_name_index(name) for name in names]
configs = [dict(name=name_index) for name_index in names]
watcher_config_dict = dict(
disabled_sal_components=[],
auto_acknowledge_delay=3600,
auto_unacknowledge_delay=3600,
rules=[dict(classname="Enabled", configs=configs)],
escalation=escalation,
)
watcher_config = types.SimpleNamespace(**watcher_config_dict)
self.read_severities = dict()
self.read_max_severities = dict()
self.controllers = []
for name_index in names:
name, index = salobj.name_to_name_index(name_index)
self.controllers.append(salobj.Controller(name=name, index=index))
if use_bad_callback:
def bad_callback():
pass
alarm_callback = bad_callback
else:
alarm_callback = self.alarm_callback
self.model = watcher.Model(
domain=self.controllers[0].domain,
config=watcher_config,
alarm_callback=alarm_callback,
)
for name, rule in self.model.rules.items():
rule.alarm.init_severity_queue()
self.read_severities[name] = []
self.read_max_severities[name] = []
controller_start_tasks = [
controller.start_task for controller in self.controllers
]
await asyncio.gather(self.model.start_task, *controller_start_tasks)
if enable:
await self.model.enable()
for rule in self.model.rules.values():
assert rule.alarm.nominal
assert not rule.alarm.acknowledged
assert not rule.alarm.muted
self.assert_not_muted(rule.alarm)
try:
yield
finally:
await self.model.close()
controller_close_tasks = [
asyncio.create_task(controller.close())
for controller in self.controllers
]
await asyncio.gather(*controller_close_tasks)
async def alarm_callback(self, alarm):
"""Callback function for each alarm.
Updates self.read_severities and self.read_max_severities,
dicts of alarm_name: list of severity/max_severity.
"""
self.read_severities[alarm.name].append(alarm.severity)
self.read_max_severities[alarm.name].append(alarm.max_severity)
# Print the state to aid debugging test failures.
print(
f"alarm_callback({alarm.name}, severity={alarm.severity!r}): "
f"read_severities={self.read_severities[alarm.name]}"
)
async def write_states(self, index, states):
"""Write a sequence of summary states to a specified controller."""
controller = self.controllers[index]
controller_name_index = f"{controller.salinfo.name}:{controller.salinfo.index}"
rule_name = f"Enabled.{controller_name_index}"
rule = self.model.rules[rule_name]
previous_state = None
for state in states:
await controller.evt_summaryState.set_write(
summaryState=state, force_output=True
)
if self.model.enabled and previous_state != state:
await asyncio.wait_for(
rule.alarm.severity_queue.get(), timeout=STD_TIMEOUT
)
assert rule.alarm.severity_queue.empty()
elif self.model.enabled:
# State didn't changed should not receive any new event
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(
rule.alarm.severity_queue.get(), timeout=STD_TIMEOUT
)
assert rule.alarm.severity_queue.empty()
else:
# We don't have any event we can wait for, so sleep a bit
# to give the model time to react to the data.
await asyncio.sleep(0.1)
previous_state = state
def assert_muted(self, alarm, muted_severity, muted_by):
"""Assert that the specified alarm is muted.
Parameters
----------
alarm : `lsst.ts.watcher.Alarm`
Alarm to test.
muted_severity : `lsst.ts.idl.enums.Watcher.AlarmSeverity`
Expected value for rule.severity.
muted_by : `str`
Expected value for rule.muted_by.
"""
assert alarm.muted
assert alarm.muted_severity == muted_severity
assert alarm.muted_by == muted_by
def assert_not_muted(self, alarm):
"""Assert that the specified alarm is not muted.
Parameters
----------
alarm : `lsst.ts.watcher.Alarm`
Alarm to test.
"""
assert not alarm.muted
assert alarm.muted_severity == AlarmSeverity.NONE
assert alarm.muted_by == ""
async def test_constructor_bad_callback(self):
remote_names = ["ScriptQueue:5", "Test:7"]
with pytest.raises(TypeError):
async with self.make_model(
names=remote_names, enable=False, use_bad_callback=True
):
pass
async def test_acknowledge_full_name(self):
user = "test_ack_alarm"
remote_names = ["ScriptQueue:5", "Test:7"]
nrules = len(remote_names)
async with self.make_model(names=remote_names, enable=True):
full_rule_name = f"Enabled.{remote_names[0]}"
assert full_rule_name in self.model.rules
# Send STANDBY to all controllers to put all alarms into warning.
for index in range(nrules):
await self.write_states(index=index, states=[salobj.State.STANDBY])
for name, rule in self.model.rules.items():
assert not rule.alarm.nominal
assert rule.alarm.severity == AlarmSeverity.WARNING
assert rule.alarm.max_severity == AlarmSeverity.WARNING
# Acknowledge one rule by full name but not the other.
await self.model.acknowledge_alarm(
name=full_rule_name, severity=AlarmSeverity.WARNING, user=user
)
for name, rule in self.model.rules.items():
if name == full_rule_name:
assert rule.alarm.acknowledged
assert rule.alarm.acknowledged_by == user
else:
assert not rule.alarm.acknowledged
assert rule.alarm.acknowledged_by == ""
async def test_acknowledge_regex(self):
user = "test_ack_alarm"
remote_names = ["ScriptQueue:1", "ScriptQueue:2", "Test:62"]
nrules = len(remote_names)
async with self.make_model(names=remote_names, enable=True):
assert len(self.model.rules) == nrules
# Send STANDBY to all controllers to put all alarms into warning.
for index in range(nrules):
await self.write_states(index=index, states=[salobj.State.STANDBY])
for rule in self.model.rules.values():
assert not rule.alarm.nominal
assert rule.alarm.severity == AlarmSeverity.WARNING
assert rule.alarm.max_severity == AlarmSeverity.WARNING
# Acknowledge the ScriptQueue alarms but not Test.
await self.model.acknowledge_alarm(
name="Enabled.ScriptQueue:*", severity=AlarmSeverity.WARNING, user=user
)
for name, rule in self.model.rules.items():
if "ScriptQueue" in name:
assert rule.alarm.acknowledged
assert rule.alarm.acknowledged_by == user
else:
assert not rule.alarm.acknowledged
assert rule.alarm.acknowledged_by == ""
async def test_enable(self):
remote_names = ["ScriptQueue:5", "Test:7"]
async with self.make_model(names=remote_names, enable=True):
assert len(self.model.rules) == 2
# Enable the model and write ENABLED several times.
# This triggers the rule callback but that does not
# change the state of the alarm.
await self.model.enable()
for index in range(len(remote_names)):
await self.write_states(
index=index,
states=(
salobj.State.ENABLED,
salobj.State.ENABLED,
salobj.State.ENABLED,
),
)
for name, rule in self.model.rules.items():
assert rule.alarm.nominal
assert self.read_severities[name] == [AlarmSeverity.NONE]
assert self.read_max_severities[name] == [AlarmSeverity.NONE]
# Disable the model and issue several events that would
# trigger an alarm if the model was enabled. Since the
# model is disabled the alarm does not change states.
self.model.disable()
for index in range(len(remote_names)):
await self.write_states(
index=index, states=(salobj.State.FAULT, salobj.State.STANDBY)
)
for name, rule in self.model.rules.items():
assert rule.alarm.nominal
assert self.read_severities[name] == [AlarmSeverity.NONE]
assert self.read_max_severities[name] == [AlarmSeverity.NONE]
# Enable the model. This will trigger a callback with
# the current state of the event (STANDBY).
# Note that the earlier FAULT event is is ignored
# because it arrived while disabled.
await self.model.enable()
for name, rule in self.model.rules.items():
await rule.alarm.assert_next_severity(AlarmSeverity.WARNING)
assert not rule.alarm.nominal
assert rule.alarm.severity == AlarmSeverity.WARNING
assert rule.alarm.max_severity == AlarmSeverity.WARNING
assert self.read_severities[name] == [
AlarmSeverity.NONE,
AlarmSeverity.WARNING,
]
assert self.read_max_severities[name] == [
AlarmSeverity.NONE,
AlarmSeverity.WARNING,
]
# Issue more events; they should be processed normally.
for index in range(len(remote_names)):
await self.write_states(
index=index, states=(salobj.State.FAULT, salobj.State.STANDBY)
)
for name, rule in self.model.rules.items():
assert not rule.alarm.nominal
assert rule.alarm.severity == AlarmSeverity.WARNING
assert rule.alarm.max_severity == AlarmSeverity.CRITICAL
assert self.read_severities[name] == [
AlarmSeverity.NONE,
AlarmSeverity.WARNING,
AlarmSeverity.CRITICAL,
AlarmSeverity.WARNING,
]
assert self.read_max_severities[name] == [
AlarmSeverity.NONE,
AlarmSeverity.WARNING,
AlarmSeverity.CRITICAL,
AlarmSeverity.CRITICAL,
]
async def test_escalation(self):
remote_names = ["ScriptQueue:1", "ScriptQueue:2", "Test:1", "Test:2", "Test:52"]
# Escalation info for the first two rules;
# check that case does not have to match.
esc_info12 = dict(
alarms=["enabled.scriptqueue:*"],
responder="chaos",
delay=0.11,
)
# Escalation info for the next two rules
esc_info34 = dict(
alarms=["Enabled.Test:?"],
responder="stella",
delay=0.12,
)
# Escalation info that does not match any alarm names
esc_notused = dict(
alarms=["Enabled.NoMatch"],
responder="someone",
delay=0.13,
)
async with self.make_model(
names=remote_names,
enable=False,
escalation=[esc_info12, esc_info34, esc_notused],
):
alarms = [rule.alarm for rule in self.model.rules.values()]
assert len(alarms) == len(remote_names)
for alarm in alarms[0:2]:
assert alarm.escalation_responder == esc_info12["responder"]
assert alarm.escalation_delay == esc_info12["delay"]
for alarm in alarms[2:4]:
assert alarm.escalation_responder == esc_info34["responder"]
assert alarm.escalation_delay == esc_info34["delay"]
for alarm in alarms[4:]:
assert alarm.escalation_responder == ""
assert alarm.escalation_delay == 0
for alarm in alarms:
assert alarm.timestamp_escalate == 0
async def test_get_rules(self):
remote_names = ["ScriptQueue:1", "ScriptQueue:2", "Test:1", "Test:2", "Test:52"]
async with self.make_model(names=remote_names, enable=False):
rules = self.model.get_rules("NoSuchName")
assert len(list(rules)) == 0
# Search starts at beginning, so Enabled.foo works
# but foo does not.
rules = self.model.get_rules("ScriptQueue")
assert len(list(rules)) == 0
rules = self.model.get_rules(".*")
assert len(list(rules)) == len(remote_names)
rules = self.model.get_rules("Enabled")
assert len(list(rules)) == len(remote_names)
rules = self.model.get_rules("Enabled.ScriptQueue")
assert len(list(rules)) == 2
rules = self.model.get_rules("Enabled.Test")
assert len(list(rules)) == 3
async def test_mute_full_name(self):
"""Test mute and unmute by full alarm name."""
user = "test_mute_alarm"
remote_names = ["ScriptQueue:5", "Test:7"]
async with self.make_model(names=remote_names, enable=True):
full_rule_name = f"Enabled.{remote_names[0]}"
assert full_rule_name in self.model.rules
# Mute one rule by full name.
await self.model.mute_alarm(
name=full_rule_name,
duration=5,
severity=AlarmSeverity.WARNING,
user=user,
)
for name, rule in self.model.rules.items():
if name == full_rule_name:
self.assert_muted(
rule.alarm, muted_severity=AlarmSeverity.WARNING, muted_by=user
)
else:
self.assert_not_muted(rule.alarm)
# Nnmute one rule by full name.
await self.model.unmute_alarm(name=full_rule_name)
for rule in self.model.rules.values():
self.assert_not_muted(rule.alarm)
async def test_mute_regex(self):
"""Test mute and unmute by regex."""
user = "test_mute_alarm"
remote_names = ["ScriptQueue:1", "ScriptQueue:2", "Test:62"]
nrules = len(remote_names)
async with self.make_model(names=remote_names, enable=True):
assert len(self.model.rules) == nrules
# Mute the ScriptQueue alarms but not Test.
await self.model.mute_alarm(
name="Enabled.ScriptQueue.*",
duration=5,
severity=AlarmSeverity.WARNING,
user=user,
)
for name, rule in self.model.rules.items():
if "ScriptQueue" in name:
self.assert_muted(
rule.alarm, muted_severity=AlarmSeverity.WARNING, muted_by=user
)
else:
self.assert_not_muted(rule.alarm)
# Unmute the ScriptQueue alarms but not Test.
await self.model.unmute_alarm(name="Enabled.ScriptQueue.*")
for rule in self.model.rules.values():
self.assert_not_muted(rule.alarm)
| lsst-ts/ts_watcher | tests/test_model.py | test_model.py | py | 18,435 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "lsst.ts.watcher.rules",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "lsst.ts.watcher",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "lss... |
3929101533 | from sqlalchemy import Column, INTEGER, Identity, String
from src.data_access.database.models.base_entity import InoversityLibraryBase
__all__ = [
"StaffEntity"
]
class StaffEntity(InoversityLibraryBase):
user_id = Column("id", INTEGER, Identity(), primary_key=True, index=True)
role_level = Column("roleLevel", String(256), nullable=False, index=True)
staff_number = Column("staffNumber", String(20), nullable=False, unique=True)
department = Column("department", String(100), nullable=False)
job_title = Column("jobTitle", String(100), nullable=False)
| mariusvrstr/PythonMicroservice | src/data_access/database/models/staff_entity.py | staff_entity.py | py | 582 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "src.data_access.database.models.base_entity.InoversityLibraryBase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.INTEGER",
"line_number": 11,
"usage_type":... |
4786996440 | #まだわからん。
from collections import defaultdict
n,k = map(int,input().split())
a = list(map(int,input().split()))
d = defaultdict(int)
right = 0
ans = 0 # 区間の最大を保存する。
kinds = 0
for left in range(n):
while right < n and kinds < k:
d[a[right]] += 1
right += 1
kinds = len(d)
print("whileループの中",kinds,d)
"""
if left == right:
right += 1
continue
"""
print(left,right," ",right-left)
if ans < right - left:
ans = right-left
print("ansを更新しました!",ans)
d[a[left]] -= 1
if d[a[left]] == 0:
print("削除します",d)
kinds -= 1
del d[a[left]]
print(ans) | K5h1n0/compe_prog_new | typical90/034/main.py | main.py | py | 725 | python | ja | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 7,
"usage_type": "call"
}
] |
7436815802 | from pathlib import Path
from zoneinfo import ZoneInfo
import datetime
import sys
TIME_ZONE = ZoneInfo('US/Eastern')
def main():
station_name = sys.argv[1]
dir_path = Path(sys.argv[2])
file_paths = sorted(dir_path.glob('*.WAV'))
for file_path in file_paths:
move_file(file_path, station_name)
def move_file(file_path, station_name):
recorder_name = file_path.parent.parent.name
file_name = file_path.name
start_time = parse_file_name(file_name)
night = get_night(start_time)
night_dir_name = night.strftime('%Y-%m-%d')
start_time_string = start_time.strftime('%Y-%m-%d_%H.%M.%S_Z')
new_file_name = f'{station_name}_{recorder_name}_{start_time_string}.wav'
night_dir_path = file_path.parent / night_dir_name
night_dir_path.mkdir(mode=0o755, parents=True, exist_ok=True)
new_file_path = night_dir_path / new_file_name
file_path.rename(new_file_path)
print(f'{start_time} {night_dir_path} {new_file_path}')
def parse_file_name(file_name):
start_time = datetime.datetime.strptime(file_name, '%Y%m%d_%H%M%S.WAV')
return start_time.replace(tzinfo=ZoneInfo('UTC'))
def get_night(dt):
dt = dt.astimezone(TIME_ZONE)
date = dt.date()
hour = dt.hour
if hour >= 12:
return date
else:
return datetime.date.fromordinal(dt.toordinal() - 1)
if __name__ == '__main__':
main()
| HaroldMills/Vesper | scripts/organize_audiomoth_wav_files_by_night.py | organize_audiomoth_wav_files_by_night.py | py | 1,464 | python | en | code | 47 | github-code | 6 | [
{
"api_name": "zoneinfo.ZoneInfo",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_numb... |
8833474558 | ########################################################
# Rodrigo Leite - drigols #
# Last update: 17/12/2021 #
########################################################
import pandas as pd
from matplotlib import pyplot as plt
df = pd.DataFrame(
{
'Name': ['Dan', 'Joann', 'Pedro', 'Rosie', 'Ethan', 'Vicky', 'Frederic'],
'Salary':[50000, 54000, 50000, 189000, 55000, 40000, 59000],
'Hours':[41, 40, 36, 17, 35, 39, 40],
'Grade':[50, 50, 46, 95, 50, 5,57]
}
)
# Utiliza o atributo showfliers = False - Ou seja, ignora dados discrepantes.
df['Salary'].plot(kind='box', title='Salary Distribution', figsize=(10,8), showfliers=False)
plt.savefig('../images/first-boxplot-03.png', format='png')
plt.show()
| drigols/studies | modules/math-codes/modules/statistics-and-probability/src/outliers-v2.py | outliers-v2.py | py | 804 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.DataFrame",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotli... |
5609431554 | import gym
class SparseRewardWrapper(gym.Wrapper):
def __init__(self, env, sparse_level=-1, timestep_limit=-1):
super(SparseRewardWrapper, self).__init__(env)
self.sparse_level = sparse_level
self.timestep_limit = timestep_limit
self.acc_reward = 0
self.acc_t = 0
def step(self, action):
obs, rew, done, info = self.env.step(action)
self.acc_t += 1
if self.timestep_limit > 0 and (self.acc_t) >= self.timestep_limit:
done = True
if self.sparse_level == 0:
return obs, rew, done, info
self.acc_reward += rew
ret_rew = 0
if self.sparse_level != -1:
if done or (self.acc_t > 0 and self.acc_t % self.sparse_level == 0):
ret_rew = self.acc_reward
self.acc_reward = 0
else:
if done:
ret_rew = self.acc_reward
self.acc_reward = 0
return obs, ret_rew, done, info
def reset(self, **kwargs):
self.acc_t = 0
self.acc_reward = 0
return self.env.reset(**kwargs)
| pfnet-research/piekd | sparse_wrapper.py | sparse_wrapper.py | py | 1,118 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "gym.Wrapper",
"line_number": 3,
"usage_type": "attribute"
}
] |
72519791227 | import json
from warnings import warn
# def init_from_config(meas_cls, config: dict):
# arg_str = ''
#
# for key, value in config.items():
# arg_str = key+'='+value
def export_measurement_config(obj, attr_keys=None):
if attr_keys is None:
attr_keys = obj.__init__.__code__.co_varnames
params = {}
for key in attr_keys:
flag = 0
if isinstance(obj, dict):
if key in obj.keys():
param = obj[key]
flag = 1
else:
if key != 'self' and hasattr(obj, key):
param = obj.__getattribute__(key)
flag = 1
if flag:
if param.__class__.__name__ in ['dict', 'list', 'tuple', 'str', 'int',
'float', 'bool', 'NoneType']:
params[key] = param
else:
warn('The parameter \'%s\' of type \'%s\' is not JSON serializable and is skipped.' %
(key, param.__class__.__name__))
return params
def save_config(config, filename):
with open(filename, 'w') as fp:
json.dump(config, fp, indent='\t')
def load_config(filename):
with open(filename, 'r') as fp:
config = json.load(fp)
return config
| yyzidea/measurement-automation | utilities/measurement_helper.py | measurement_helper.py | py | 1,278 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.warn",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 45,
"usage_type": "call"
}
] |
16543455789 | from ai_chatbot.scripts import REDataHeader as Header
import dateparser
import datetime
def printData(data):
print('Station from : {0}'.format(data[Header.STATIONFROM]))
print('Station to : {0}'.format(data[Header.STATIONTO]))
print('departure date : {0}'.format(data[Header.DEPARTDATE]))
print('departure time : {0}'.format(data[Header.DEPARTTIME]))
def datecheck(date):
now = datetime.datetime.now()
dateobject = dateparser.parse(date)
if dateobject > now:
return 0
else:
return 1
def returndatecheck(date, returndate):
dateobject = dateparser.parse(date)
returndateobject = dateparser.parse(returndate)
if dateobject < returndateobject:
return 0
else:
return 1
def timecheck(date, time):
now = datetime.datetime.now()
dateobject = dateparser.parse(date)
timeobject = dateparser.parse(time)
fullobject = datetime.datetime.combine(dateobject.date(), timeobject.time())
if fullobject > now:
return 0
else:
return 1
def missingDataCheck(data):
if data[Header.STATIONFROM] == '':
return Header.STATIONFROM
elif data[Header.STATIONTO] == '':
return Header.STATIONTO
elif data[Header.DEPARTDATE] == '':
return Header.DEPARTDATE
elif datecheck(data[Header.DEPARTDATE]) == 1:
data[Header.DEPARTDATE] = ''
return Header.BADDATE
elif data[Header.DEPARTTIME] == '':
return Header.DEPARTTIME
elif timecheck(data[Header.DEPARTDATE], data[Header.DEPARTTIME]) == 1:
data[Header.DEPARTDATE] = ''
data[Header.DEPARTTIME] = ''
return Header.BADTIME
elif data[Header.SINGLERETURN].lower() == '':
return Header.SINGLERETURN
elif data[Header.SINGLERETURN].lower() == 'return':
if data[Header.RETURNDATE] == '':
return Header.RETURNDATE
elif returndatecheck(data[Header.DEPARTDATE], data[Header.RETURNDATE]) == 1:
data[Header.RETURNDATE] = ''
return Header.BADDATE
elif data[Header.RETURNTIME] == '':
return Header.RETURNTIME
return 0
else:
return 0
def verificationCheck(data):
if data[Header.CONFIRMED] == 'true':
return 0
return 1
def getURL(data):
# call function in scraper/scraper.py
print('Getting URL for...')
print('\t {0} --> {1}'.format(data.stationFrom, data.stationTo))
print('\t Departure date : {0}'.format(data.DepDate))
print('\t Departure time : {0}'.format(data.DepTime))
| Grimmii/TrainChatBot | src/ai_chatbot/scripts/RE_function_booking.py | RE_function_booking.py | py | 2,542 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ai_chatbot.scripts.REDataHeader.STATIONFROM",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "ai_chatbot.scripts.REDataHeader",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "ai_chatbot.scripts.REDataHeader.STATIONTO",
"line_number": 8,
... |
8380997732 | import os
from flask import Flask, jsonify, request
from math import sqrt
app = Flask(__name__)
@app.route('/')
def nao_entre_em_panico():
nmax = 50
n1 = 0
n2 = 1
cont = 0
fib = 0
res = "Essa é sequencia dos 50 primeiros números da razão de Fibonacci: <br> Desenvolvido por Jefferson Alves. <br> <br>"
while cont < nmax:
fib = n1 + n2
n1 = n2
n2 = fib
cont = cont + 1
res = res + str(fib) + "<br>"
return res
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| jeffersonpedroza/Docker | fibonacci.py | fibonacci.py | py | 606 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 28,
"usage_type": "attribute"
}
] |
2061469568 | from sklearn.preprocessing import StandardScaler
from sklearn import svm
class OneClassSVM:
def __init__(self, scaling=True):
self._scaling = scaling
def fit(self, X):
if self._scaling:
self._scaler = StandardScaler()
X = self._scaler.fit_transform(X)
X = X[:4096]
self._svm = svm.OneClassSVM().fit(X)
return self
def anomaly_scores(self, batch):
if self._scaling:
batch = self._scaler.transform(batch)
return -self._svm.decision_function(batch)
| rom1mouret/cheatmeal | benchmarks/baselines/one_class_svm.py | one_class_svm.py | py | 559 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.OneClassSVM",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 17,
"usage_type": "name"
}
] |
36837090213 | import streamlit as st
from streamlit_option_menu import option_menu
import math
import datetime
from datetime import date
import calendar
from PIL import Image
from title_1 import *
from img import *
with open('final.css') as f:
st.markdown(f"<style>{f.read()}</style>",unsafe_allow_html=True)
def average():
image()
st.markdown(" <h1 style='text-align: center; color: Black;font-size: 25px;'>Application to Find the Average</h1>", unsafe_allow_html=True)
w1,col1,col2,w2=st.columns((1,2,2,1))
us1,bc1,bc2,us2=st.columns((4,1.5,1.8,6))
with col1:
st.markdown("")
st.write("# Enter the Date ")
# ------------to create the function to clear the input-----------#
with bc2:
st.markdown("")
st.markdown("")
def clear_text():
st.session_state["text"] = ""
st.button("Clear", on_click=clear_text)
with col2:
vAR_input_num=st.text_input("",key="text")
vAR_list=[]
#----- Average -------#
with bc1:
st.markdown("")
st.markdown("")
if st.button("Submit"):
with col2:
if vAR_input_num != '':
vAR_input_data = vAR_input_num.split(",")
for i in vAR_input_data:
num=int(i)
vAR_list.append(num)
def Average(vAR_list):
vAR_avg= sum(vAR_list) / len(vAR_list)
vAR_avg=round(vAR_avg,4)
st.success(vAR_avg)
Average(vAR_list)
else:
st.error("Error")
with col1:
st.write("# Answer is ")
| Deepsphere-AI/AI-lab-Schools | Grade 08/Application/find_avg.py | find_avg.py | py | 1,787 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.markdown",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "streamlit.columns",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "streamlit.col... |
33359791664 | import sys
import unittest
import psycopg2
sys.path.insert(0, '../src')
from src.utils import daily_reports_return_json, daily_reports_return_csv, time_series_return_csv, check_query_data_active, check_request
from src.config import connect_database
# import copy
class TestUtils(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestUtils, self).__init__(*args, **kwargs)
self.conn = connect_database()
def test_daily_reports_return_json(self):
try:
# conn = connect_database()
cur = self.conn.cursor()
# create a test table in the database with the table format of a daily report
cur.execute("DROP TABLE IF EXISTS test;")
self.conn.commit()
cur.execute("CREATE TABLE test (state VARCHAR(5), region VARCHAR(5), last_update VARCHAR(20), "
"confirmed INTEGER, deaths INTEGER, recovered INTEGER, active INTEGER, combined_key VARCHAR("
"5));")
self.conn.commit()
cur.execute("INSERT INTO test VALUES ('a', 'a', '2021-01-02 05:22:33', 10, 5, 0, 5, 'a, a'), "
"(null, 'b', '2021-01-02 05:22:33', 1, 0, 0, 1, 'b'), "
"('b', 'b', '2021-01-02 05:22:33', 4, 3, 0, 1, 'b, b');"
)
self.conn.commit()
date = "01/01/21"
types = ["Confirmed", "Deaths", "Recovered", "Active"]
locations = [{"Country/Region": "b"},
{"Country/Region": "a", "Province/State": "a", "Combined_Key": "a, a"}
]
result = daily_reports_return_json(cur, date, locations, types, 'test')
expected = {
"Date": date,
"Reports": [
{
"Active": 2,
"Confirmed": 5,
"Country/Region": "b",
"Deaths": 3,
"Recovered": 0
},
{
"Active": 5,
"Confirmed": 10,
"Country/Region": "a",
"Deaths": 5,
"Province/State": "a",
"Combined_Key": "a, a",
"Recovered": 0
}
]
}
self.assertEqual(result, expected)
except psycopg2.Error:
assert False, "Database Error"
def test_daily_reports_return_csv(self):
json_data = {
"Date": "01/01/21",
"Reports": [
{
"Active": 2,
"Confirmed": 5,
"Country/Region": "b",
"Deaths": 3,
"Recovered": 0
},
{
"Active": 5,
"Confirmed": 10,
"Country/Region": "a",
"Deaths": 5,
"Province/State": "a",
"Combined_Key": "a, a",
"Recovered": 0
}
]
}
result = daily_reports_return_csv(json_data, ["Confirmed", "Deaths", "Recovered", "Active"])
expected = "Date,Province/State,Country/Region,Combined_Key,Confirmed,Deaths,Recovered,Active" \
"\n01/01/21,,b,,5,3,0,2\n01/01/21,a,a,a, a,10,5,0,5"
self.assertEqual(result, expected)
def test_time_series_return_csv(self):
json_data = {"01/26/20": [{"Active": 0, "Confirmed": 0, "Country/Region": "Albania"}]}
expected = "Date,Province/State,Country/Region,Confirmed\n01/26/20,,Albania,0"
result = time_series_return_csv(json_data, ["01/26/20"], ["Confirmed"])
self.assertEqual(result, expected)
def test_check_query_data_active(self):
try:
# conn = connect_database()
cur = self.conn.cursor()
# create a test table in the database with the table format of a daily report
cur.execute("DROP TABLE IF EXISTS test;")
self.conn.commit()
self.assertEqual(check_query_data_active(cur, ["test"]), False)
except psycopg2.Error:
assert False, "Database Error"
def test_check_request(self):
result = check_request(['test'], {})
self.assertEqual(result[0], 'test')
if __name__ == '__main__':
unittest.main()
| shin19991207/CSC301-A2 | tests/test_utils.py | test_utils.py | py | 4,518 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.insert",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "src.config.connect... |
25849292828 | # imports
import socket
import json
def extractData(ledger):
ledger = ledger['ledger']
title = ledger['title']
date = ledger['date']
people = [person['name'] for person in ledger['people']]
people = ', '.join(people)
summary = ledger['summary']
items = ledger['transactions']
htmlTable = []
for item in items:
htmlTable.append(f"<tr><td>{item['item']}</td><td>{item['amount']}</td><td>{item['date']}</td><td>{item['paid_by']}</td></tr>")
htmlTable = ''.join(htmlTable)
return title, date, people, summary, htmlTable
def generateHTML(title, date, people, summary, table):
html = f'''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Email</title>
</head>
<body>
<h1>Subject: {title}</h1>
<p>You have been sent a record of shared expenses between {people}. The following is a snapshop of that ledger from {date}</p>
<table style="width:80%">
<tr>
<th>Item</th>
<th>Amount</th>
<th>Date</th>
<th>Paid By</th>
</tr>
{table}
</table>
<p>Note that items with an " * " means they have been edited. Items with "del" have been removed from the summary.</p>
<h2>Ledger summary: </h2>
<p>{summary}</p>
</body>
</html>'''
return html
# setup
HOST = "localhost"
PORT = 65432
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
while True:
server.listen(1)
print(f'Server listening on port: {PORT} \n')
commSocket, addr = server.accept()
print(f'Connected by {addr} \n')
dataLen = int(commSocket.recv(1024).decode())
print(f'length of data to receive: {dataLen} \n')
# LENGTH VERIFICATION
commSocket.send(str(dataLen).encode())
ledgerData = ''
while True:
data = commSocket.recv(1024).decode()
ledgerData += data
if len(ledgerData) == dataLen:
print(f'Server received: {ledgerData}\n')
ledgerData = json.loads(ledgerData)
title, date, people, summary, htmlTable = extractData(ledgerData)
html = generateHTML(title, date, people, summary, htmlTable)
commSocket.send(html.encode())
print('sending html')
commSocket.close()
print('Connection closed.')
break
| alexcw08/email-microservice | server.py | server.py | py | 2,524 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "socket.socket",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "json.loads"... |
38160567413 | #import bpy
from random import seed
from random import uniform
import numpy as np
import cv2
# seed random number generator
seed(1)
"""
def test1():
# make mesh
vertices = [(1, 0, 0),(1,0,5),(0,1,0)]
edges = []
faces = []
faces.append([0,1,2])
faces.append([2,0,3])
#new_mesh = bpy.data.meshes.new('new_mesh')
#new_mesh.from_pydata(vertices, edges, faces)
#knew_mesh.update()
# make object from mesh
#new_object = bpy.data.objects.new('new_object', new_mesh)
# make collection
#new_collection = bpy.data.collections.new('new_collection')
#bpy.context.scene.collection.children.link(new_collection)
# add object to scene collection
#new_collection.objects.link(new_object)
def add_mesh(name, verts, faces, edges=None, col_name="Collection"):
if edges is None:
edges = []
mesh = bpy.data.meshes.new(name)
obj = bpy.data.objects.new(mesh.name, mesh)
col = bpy.data.collections.get(col_name)
col.objects.link(obj)
bpy.context.view_layer.objects.active = obj
mesh.from_pydata(verts, edges, faces)
"""
def get_translation_between_points(pt1,pt2):
transl = (pt2[0] - pt1[0], pt2[1] - pt1[1])
return transl
class Geometry2d:
def __init__(self, points=[]):
self.points = points
def reverse(self):
self.points.reverse()
def get_last_point(self):
return self.points[-1]
def get_first_point(self):
return self.points[0]
def remove_first_element(self):
self.points.pop(0)
def remove_last_element(self):
self.points.pop()
def translate_points(self, transl):
points = self.points
for i,p in enumerate(points):
p = (p[0]+transl[0], p[1]+transl[1])
points[i] = p
return Geometry2d(points)
def scale_points(self, scale):
points = self.points
for i,p in enumerate(points):
p = (p[0]*scale, p[1]*scale)
points[i] = p
return Geometry2d(points)
def flip_y(self):
points = self.points
for i,p in enumerate(self.points):
points[i] = (p[0],-p[1])
return Geometry2d(points)
def draw(self):
width = 300
height = 300
img = np.zeros((300,300))
scale_obj = self.scale_points(30)
draw_obj = scale_obj.flip_y()
draw_obj = draw_obj.translate_points((int(width/2), int(height/2)))
for point in draw_obj.points:
print(point)
point = (int(point[0]), int(point[1]))
cv2.circle(img, point, 2, (255,0,0), 2)
cv2.imshow("img", img)
cv2.waitKey(0)
def combine_at_first(self, geo2d_obj):
new_geo2d_obj = Geometry2d()
first_pt_obj1 = self.get_first_point()
print("first_pt")
print(first_pt_obj1)
first_pt_obj2 = geo2d_obj.get_first_point()
print("last pt")
print(first_pt_obj2)
transl = get_translation_between_points(first_pt_obj2, first_pt_obj1)
print("transl")
print(transl)
transl_obj = geo2d_obj.translate_points(transl)
print("translated points")
print(transl_obj.points)
transl_obj.points.pop(0)
transl_obj.points = self.points + transl_obj.points
return transl_obj
def combine_at_last(self, geo2d_obj):
first_pt_obj1 = self.get_last_point()
print("first_pt")
print(first_pt_obj1)
first_pt_obj2 = geo2d_obj.get_first_point()
print("last pt")
print(first_pt_obj2)
transl = get_translation_between_points(first_pt_obj2, first_pt_obj1)
print("transl")
print(transl)
transl_obj = geo2d_obj.translate_points(transl)
print("translated points")
print(transl_obj.points)
transl_obj.points.pop(0)
transl_obj.points = self.points + transl_obj.points
return transl_obj
"""
def generate_2d_corner(height, width, corner_point):
p1 = (corner_point[0] + width, corner_point[1])
p2 = corner_point
p3 = (corner_point[0], corner_point[1]+height)
verts = []
# create bottom vertices
for i in range(num_points):
i = i/num_points*x_stop
x = i+uniform(-step,step)
y = -width/2
z = uniform(0,z_limit)
point = (x,y,z)
verts.append(point)
y = width/2
point = (x,y,z)
verts.append(point)
faces = []
for point in verts:
print(point)
# create faces
num_points = len(verts)
print(f'num points: {num_points}')
for i in range(0,num_points-2, 2):
faces.append([i+1, i, i+2, i+3])
print("Faces:")
print(faces)
for face in faces:
print(face)
add_mesh("testsets", verts, faces)
#verts = [( 1.0, 1.0, 0.0),
# ( 1.0, -1.0, 0.0),
# (-1.0, -1.0, 0.0),
# (-1.0, 1.0, 0.0),
#]
#faces = [[0, 1, 2, 3]]
#add_mesh("myBeautifulMesh_1", verts, faces)
#verts = [( 3.0, 1.0, 0.0),
# ( 3.0, -1.0, 0.0),
# ( 2.0, -1.0, 0.0),
# ( 2.0, 1.0, 0.0),
# ]
#add_mesh("myBeautifulMesh_2", verts, faces)
"""
if __name__ == '__main__':
geo2d_obj = Geometry2d([(1,0), (0,0),(0,1),(0,2), (0,3)])
geo2d_obj1 = Geometry2d([(0,0), (1,0), (1,2), (2,1.5),(3,2), (3,0), (4,0)])
geo2d_obj1 = geo2d_obj1.translate_points((4,4))
geo_comb = geo2d_obj.combine_at_last(geo2d_obj1)
geo_comb.draw()
| olaals/masteroppgave-old | src/testing/blender/generate-mesh/generate-alu-parts/generate-test.py | generate-test.py | py | 5,601 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.seed",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 96,... |
39263007416 | import datetime as datetime
import json
from django.db.models import Q
from django.test import override_settings
from mock import MagicMock, patch
from rest_framework.status import HTTP_403_FORBIDDEN, HTTP_201_CREATED
from eums.models import MultipleChoiceAnswer, TextAnswer, Flow, Run, \
NumericAnswer, Alert, RunQueue
from eums.test.api.authorization.authenticated_api_test_case import AuthenticatedAPITestCase
from eums.test.config import BACKEND_URL
from eums.test.factories.consignee_factory import ConsigneeFactory
from eums.test.factories.delivery_factory import DeliveryFactory
from eums.test.factories.delivery_node_factory import DeliveryNodeFactory
from eums.test.factories.flow_factory import FlowFactory
from eums.test.factories.option_factory import OptionFactory
from eums.test.factories.purchase_order_factory import PurchaseOrderFactory
from eums.test.factories.purchase_order_item_factory import PurchaseOrderItemFactory
from eums.test.factories.question_factory import TextQuestionFactory, MultipleChoiceQuestionFactory, \
NumericQuestionFactory
ENDPOINT_URL = BACKEND_URL + 'web-answers'
class WebAnswerEndpointTest(AuthenticatedAPITestCase):
mock_get = MagicMock(return_value={})
mock_distribution_alert_raise = MagicMock()
def setUp(self):
super(WebAnswerEndpointTest, self).setUp()
self.setup_flow_with_questions(Flow.Label.IMPLEMENTING_PARTNER)
def setup_flow_with_questions(self, flow_type):
flow = FlowFactory(label=flow_type)
delivery_received_qn = MultipleChoiceQuestionFactory(label='deliveryReceived', flow=flow)
OptionFactory(question=delivery_received_qn, text='Yes')
OptionFactory(question=delivery_received_qn, text='No')
TextQuestionFactory(label='dateOfReceipt', flow=flow)
good_order_qn = MultipleChoiceQuestionFactory(label='isDeliveryInGoodOrder', flow=flow)
OptionFactory(question=good_order_qn, text='Yes')
OptionFactory(question=good_order_qn, text='No')
OptionFactory(question=good_order_qn, text='Incomplete')
satisfied_qn = MultipleChoiceQuestionFactory(label='areYouSatisfied', flow=flow)
OptionFactory(question=satisfied_qn, text='Yes')
OptionFactory(question=satisfied_qn, text='No')
TextQuestionFactory(label='additionalDeliveryComments', flow=flow)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_save_answers(self):
delivery = DeliveryFactory()
date_of_receipt = self.__get_current_date()
good_comment = "All is good"
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt},
{'question_label': 'isDeliveryInGoodOrder', 'value': 'Yes'},
{'question_label': 'areYouSatisfied', 'value': 'Yes'},
{'question_label': 'additionalDeliveryComments', 'value': good_comment}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
answer_for_delivery_received = self._get_answer_for(MultipleChoiceAnswer, delivery.id, 'deliveryReceived')
answer_for_date_of_receipt = self._get_answer_for(TextAnswer, delivery.id, 'dateOfReceipt')
answer_for_delivery_order = self._get_answer_for(MultipleChoiceAnswer, delivery.id, 'isDeliveryInGoodOrder')
answer_for_satisfaction = self._get_answer_for(MultipleChoiceAnswer, delivery.id, 'areYouSatisfied')
answer_for_additional_comments = self._get_answer_for(TextAnswer, delivery.id, 'additionalDeliveryComments')
self.assertEqual(response.status_code, 201)
self.assertEqual(answer_for_delivery_received.value.text, 'Yes')
self.assertEqual(answer_for_date_of_receipt.value, date_of_receipt)
self.assertEqual(answer_for_delivery_order.value.text, 'Yes')
self.assertEqual(answer_for_satisfaction.value.text, 'Yes')
self.assertEqual(answer_for_additional_comments.value, good_comment)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
@patch('eums.models.DistributionPlan.confirm')
def test_should_confirm_delivery_when_answers_are_saved(self, mock_confirm):
delivery = DeliveryFactory()
date_of_receipt = self.__get_current_date()
good_comment = "All is good"
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt},
{'question_label': 'isDeliveryInGoodOrder', 'value': 'Yes'},
{'question_label': 'areYouSatisfied', 'value': 'Yes'},
{'question_label': 'additionalDeliveryComments', 'value': good_comment}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertTrue(mock_confirm.called)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
@patch('eums.services.response_alert_handler.ResponseAlertHandler')
def test_should_format_answers_to_rapidpro_hook_api_and_handle_corresponding_alerts(self, mock_alert_handler):
delivery = DeliveryFactory()
date_of_receipt = self.__get_current_date()
good_comment = "All is good"
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt},
{'question_label': 'isDeliveryInGoodOrder', 'value': 'Yes'},
{'question_label': 'areYouSatisfied', 'value': 'Yes'},
{'question_label': 'additionalDeliveryComments', 'value': good_comment}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 201)
rapidpro_formatted_answers = [
{"category": {'eng': 'Yes', 'base': 'Yes'}, 'label': 'deliveryReceived'},
{"category": {'eng': date_of_receipt, 'base': date_of_receipt}, 'label': 'dateOfReceipt'},
{"category": {'eng': 'Yes', 'base': 'Yes'}, 'label': 'isDeliveryInGoodOrder',},
{"category": {'eng': 'Yes', 'base': 'Yes'}, 'label': 'areYouSatisfied'},
{"category": {'eng': good_comment, 'base': good_comment}, 'label': 'additionalDeliveryComments'}
]
self.assertTrue(mock_alert_handler.called_once_with(delivery, rapidpro_formatted_answers))
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
@patch('eums.services.response_alert_handler.ResponseAlertHandler.process')
def test_should_process_alerts(self, mock_process):
delivery = DeliveryFactory()
date_of_receipt = self.__get_current_date()
good_comment = "All is good"
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt},
{'question_label': 'isDeliveryInGoodOrder', 'value': 'Yes'},
{'question_label': 'areYouSatisfied', 'value': 'Yes'},
{'question_label': 'additionalDeliveryComments', 'value': good_comment}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertTrue(mock_process.called)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_create_alerts_integration(self):
purchase_order = PurchaseOrderFactory(order_number=5678)
purchase_order_item = PurchaseOrderItemFactory(purchase_order=purchase_order)
consignee = ConsigneeFactory(name="Liverpool FC")
delivery = DeliveryFactory(consignee=consignee)
DeliveryNodeFactory(item=purchase_order_item, distribution_plan=delivery)
date_of_receipt = self.__get_current_date()
good_comment = "All is good"
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'No'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt},
{'question_label': 'isDeliveryInGoodOrder', 'value': 'Yes'},
{'question_label': 'areYouSatisfied', 'value': 'Yes'},
{'question_label': 'additionalDeliveryComments', 'value': good_comment}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 201)
alert = Alert.objects.get(consignee_name="Liverpool FC", order_number=5678)
self.assertEqual(alert.issue, Alert.ISSUE_TYPES.not_received)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_cancel_existing_runs_when_saving_a_new_set_of_answers(self):
delivery = DeliveryFactory()
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'}
]}
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
runs = Run.objects.filter(runnable=delivery)
self.assertEqual(len(runs), 1)
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
runs = Run.objects.filter(runnable=delivery)
self.assertEqual(len(runs), 2)
self.assertEqual(len(Run.objects.filter(runnable=delivery, status='cancelled')), 1)
self.assertEqual(len(Run.objects.filter(runnable=delivery, status='completed')), 1)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_save_delivery_node_answers(self):
self.setup_flow_with_questions(Flow.Label.WEB)
node = DeliveryNodeFactory()
date_of_receipt = self.__get_current_date()
data = {
'runnable': node.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt}
]}
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
runs = Run.objects.filter(runnable=node)
self.assertEqual(len(runs), 1)
self.assertEqual(len(TextAnswer.objects.filter(run__runnable=node)), 1)
self.assertEqual(len(MultipleChoiceAnswer.objects.filter(run__runnable=node)), 1)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_save_delivery_node_answers_to_web_flow(self):
self.setup_flow_with_questions(Flow.Label.WEB)
node = DeliveryNodeFactory()
date_of_receipt = self.__get_current_date()
data = {
'runnable': node.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt}
]}
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
web_flow = Flow.objects.get(label=Flow.Label.WEB)
self.assertEqual(len(TextAnswer.objects.filter(question__flow=web_flow)), 1)
self.assertEqual(len(MultipleChoiceAnswer.objects.filter(question__flow=web_flow)), 1)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_save_numeric_answers(self):
self.setup_flow_with_questions(Flow.Label.WEB)
web_flow = Flow.objects.filter(label=Flow.Label.WEB).first()
NumericQuestionFactory(label='quantityDelivered', flow=web_flow)
node = DeliveryNodeFactory()
data = {
'runnable': node.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'quantityDelivered', 'value': '2'}
]}
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
self.assertEqual(len(NumericAnswer.objects.filter(question__flow=web_flow)), 1)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_dequeue_next_run_in_the_queue(self):
first_delivery_to_be_answered = DeliveryFactory(track=True)
contact = {'name': 'Some name', 'phone': '098765433'}
first_delivery_to_be_answered.build_contact = MagicMock(return_value=contact)
self._schedule_run_for(first_delivery_to_be_answered)
second_delivery_to_be_answered = DeliveryFactory(track=True)
self._schedule_run_for(second_delivery_to_be_answered)
data = {
'runnable': first_delivery_to_be_answered.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'}]
}
next_run = RunQueue.objects.filter(
Q(contact_person_id=second_delivery_to_be_answered.contact_person_id) & Q(
status='not_started')).order_by(
'-run_delay').first()
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
first_runs = Run.objects.filter(runnable=first_delivery_to_be_answered)
next_run = RunQueue.objects.get(id=next_run.id)
self.assertEqual(len(first_runs), 2)
self.assertEqual(next_run.status, 'started')
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
def _get_answer_for(self, answer_type, delivery_id, question_label):
return answer_type.objects.filter(run__runnable=delivery_id, question__label=question_label).first()
def _schedule_run_for(self, runnable):
if runnable.completed_run() is None:
if Run.has_scheduled_run(runnable.contact_person_id):
RunQueue.enqueue(runnable, 0)
else:
contact = runnable.build_contact()
task = '231x31231231'
Run.objects.create(scheduled_message_task_id=task, runnable=runnable,
status=Run.STATUS.scheduled, phone=contact['phone'] if contact else None)
def __get_current_date(self):
return datetime.datetime.strftime(datetime.datetime.now().date(), '%Y-%m-%d')
def test_unicef_admin_should_not_have_permission_to_create_web_answer(self):
self.log_and_assert_create_web_answer_permission(self.log_unicef_admin_in, HTTP_403_FORBIDDEN)
def test_unicef_editor_should_not_have_permission_to_create_web_answer(self):
self.log_and_assert_create_web_answer_permission(self.log_unicef_editor_in, HTTP_403_FORBIDDEN)
def test_unicef_viewer_should_not_have_permission_to_create_web_answer(self):
self.log_and_assert_create_web_answer_permission(self.log_unicef_viewer_in, HTTP_403_FORBIDDEN)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_ip_editor_should_have_permission_to_create_web_answer(self):
self.log_and_assert_create_web_answer_permission(self.log_ip_editor_in, HTTP_201_CREATED)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
def test_ip_viewer_should_not_have_permission_to_create_web_answer(self):
self.log_and_assert_create_web_answer_permission(self.log_ip_viewer_in, HTTP_403_FORBIDDEN)
def log_and_assert_create_web_answer_permission(self, log_func, expected_status_code):
log_func()
self.setup_flow_with_questions(Flow.Label.WEB)
web_flow = Flow.objects.filter(label=Flow.Label.WEB).first()
NumericQuestionFactory(label='quantityDelivered', flow=web_flow)
node = DeliveryNodeFactory()
request_body = {
'runnable': node.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'quantityDelivered', 'value': '2'}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(request_body), content_type='application/json')
self.assertEqual(response.status_code, expected_status_code)
| unicefuganda/eums | eums/test/api/test_web_answers_end_point.py | test_web_answers_end_point.py | py | 18,674 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "eums.test.config.BACKEND_URL",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "eums.test.api.authorization.authenticated_api_test_case.AuthenticatedAPITestCase",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number":... |
21334940324 | import logging
import re
import urlparse
find_href = re.compile(r'\bhref\s*=\s*(?!.*mailto:)(?!.*mailto:)("[^"]*"|\'[^\']*\'|[^"\'<>=\s]+)')
# FYI: added a workaround to not to break inline akavita counter script
find_src = re.compile(r'\bsrc\s*=\s*("[^"\']*"|\'[^"\']*\'|[^"\'<>=\s;]{2,})')
PATTERNS = [find_href, find_src]
def fix_urls(document, base_url, pattern):
ret = []
last_end = 0
for match in pattern.finditer(document):
url = match.group(1)
logging.info("Checking url: %s" % url)
if url[0] in "\"'":
url = url.strip(url[0])
parsed = urlparse.urlparse(url)
if parsed.scheme == parsed.netloc == '':
if not url.startswith('/' + base_url) and not url.startswith(base_url):
logging.info("Processing url: %s" % url)
url = '/%s%s' % (base_url, url)
logging.info("Processed url: %s" % url)
ret.append(document[last_end:match.start(1)])
ret.append('"%s"' % (url))
last_end = match.end(1)
ret.append(document[last_end:])
return ''.join(ret)
def add_subdir_hook():
def replace_hook(options, page):
if options.get('url_subdir'):
for pattern in PATTERNS:
page.rendered = fix_urls(page.rendered, options['url_subdir'], pattern)
return [replace_hook]
| stachern/bseu_fm | hooks/subdir.py | subdir.py | py | 1,410 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "urlparse.urlparse",
"line_number... |
22088681014 |
from helpers import setup_logger
menu_name = "Hardware test"
from threading import Event, Thread
from traceback import format_exc
from subprocess import call
from time import sleep
import sys
import os
from ui import Menu, Printer, PrettyPrinter, GraphicsPrinter
from helpers import ExitHelper, local_path_gen
logger = setup_logger(__name__, "warning")
i = None
o = None
#Code from downloading a song from http://freemusicarchive.org/
downloaded = Event()
url = "http://wiki.zerophone.org/images/b/b5/Otis_McMusic.mp3"
music_filename = "test.mp3"
local_path = local_path_gen(__name__)
music_path = local_path(music_filename)
def init_app(input, output):
global i, o
i = input; o = output
if music_filename not in os.listdir(local_path('.')):
def download():
downloaded.clear()
logger.debug("Downloading music for hardware test app!")
call(["wget", url, "-O", music_path])
downloaded.set()
t = Thread(target=download)
t.daemon=True
t.start()
else:
downloaded.set()
def callback():
try:
#Testing I2C - 0x12 should answer, 0x20 should raise IOError with busy errno
from smbus import SMBus
bus = SMBus(1)
try:
bus.read_byte(0x12)
except IOError:
PrettyPrinter("Keypad does not respond!", i, o)
else:
PrettyPrinter("Keypad found!", i, o)
#Checking IO expander
expander_ok = False
try:
bus.read_byte(0x20)
except IOError as e:
if e.errno == 16:
PrettyPrinter("IO expander OK!", i, o)
expander_ok = True
elif e.errno == 121:
PrettyPrinter("IO expander not found!", i, o)
else:
PrettyPrinter("IO expander driver not loaded!", i, o)
#Launching splashscreen
GraphicsPrinter("splash.png", i, o, 2)
#Launching key_test app from app folder, that's symlinked from example app folder
PrettyPrinter("Testing keypad", i, o, 1)
import key_test
key_test.init_app(i, o)
key_test.callback()
#Following things depend on I2C IO expander,
#which might not be present:
if expander_ok:
#Testing charging detection
PrettyPrinter("Testing charger detection", i, o, 1)
from zerophone_hw import is_charging
eh = ExitHelper(i, ["KEY_LEFT", "KEY_ENTER"]).start()
if is_charging():
PrettyPrinter("Charging, unplug charger to continue \n Enter to bypass", None, o, 0)
while is_charging() and eh.do_run():
sleep(1)
else:
PrettyPrinter("Not charging, plug charger to continue \n Enter to bypass", None, o, 0)
while not is_charging() and eh.do_run():
sleep(1)
#Testing the RGB LED
PrettyPrinter("Testing RGB LED", i, o, 1)
from zerophone_hw import RGB_LED
led = RGB_LED()
for color in ["red", "green", "blue"]:
led.set_color(color)
Printer(color.center(o.cols), i, o, 3)
led.set_color("none")
#Testing audio jack sound
PrettyPrinter("Testing audio jack", i, o, 1)
if not downloaded.isSet():
PrettyPrinter("Audio jack test music not yet downloaded, waiting...", i, o)
downloaded.wait()
disclaimer = ["Track used:" "", "Otis McDonald", "-", "Otis McMusic", "YT AudioLibrary"]
Printer([s.center(o.cols) for s in disclaimer], i, o, 3)
PrettyPrinter("Press C1 to restart music, C2 to continue testing", i, o)
import pygame
pygame.mixer.init()
pygame.mixer.music.load(music_path)
pygame.mixer.music.play()
continue_event = Event()
def restart():
pygame.mixer.music.stop()
pygame.mixer.init()
pygame.mixer.music.load(music_path)
pygame.mixer.music.play()
def stop():
pygame.mixer.music.stop()
continue_event.set()
i.clear_keymap()
i.set_callback("KEY_F1", restart)
i.set_callback("KEY_F2", stop)
i.set_callback("KEY_ENTER", stop)
continue_event.wait()
#Self-test passed, it seems!
except:
exc = format_exc()
PrettyPrinter(exc, i, o, 10)
else:
PrettyPrinter("Self-test passed!", i, o, 3, skippable=False)
| LouisPi/piportablerecorder | apps/test_hardware/main.py | main.py | py | 4,539 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "helpers.setup_logger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "helpers.local_path_gen",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.listdi... |
15751603227 | from elasticsearch import Elasticsearch, exceptions
import json, time
import itertools
from project import config
class SelectionAnalytics():
'''
SelectionAnalytics class
data analytics - elasticsearch
'''
# declare globals for the Elasticsearch client host
DOMAIN = config.DOMAIN
LOGIN = config.LOGIN
PASSWORD = config.PASSWORD
PORT = config.PORT
index_name = 'news_analysis'
client = None
def __init__(self):
'''
Create an Elasticsearch connection object
:param index_name: index name
:type index_name: string
:return: null
'''
self.client = Elasticsearch( [self.DOMAIN],
http_auth=(self.LOGIN, self.PASSWORD),
scheme="https",
port=self.PORT)
# Confirming there is a valid connection to Elasticsearch
try:
# use the JSON library's dump() method for indentation
info = json.dumps(self.client.info(), indent=4)
# pass client object to info() method
print ("Elasticsearch client info():", info)
except exceptions.ConnectionError as err:
# print ConnectionError for Elasticsearch
print ("\nElasticsearch info() ERROR:", err)
print ("\nThe client host:", host, "is invalid or cluster is not running")
# change the client's value to 'None' if ConnectionError
self.client = None
def get_elements_list(self, element_name):
'''
get_elements_list
:param self: self
:type self: None
:param element_name: element_name
:type element_name: str
:return: elements_list
:rtype: list of dics (doc_count & key)
'''
res = self.client.search(index=self.index_name, body={
"size": 0,
"aggs": {
"Articles": {
"filter": {
"range": {
"date": {
"gte": "2020-01-01T00:00:00.00"
}
}
},
"aggs": {
"GroupBy": {
"terms": { "field": element_name + ".keyword", "size": 10000 }
}
}
}
}
}
)
elements_docs = res['aggregations']['Articles']['GroupBy']['buckets']
# sorting by doc_count desc
elements_list = [item for item in sorted(elements_docs, key = lambda i: i['doc_count'], reverse=True)]
# remove empty sections (bug to fix)
sections_to_exclude = ['les-decodeurs', 'm-le-mag', 'm-perso', 'm-styles', 'series-d-ete']
for item in elements_list[:18]:
# print(item)
if (item['key'] in sections_to_exclude):
elements_list.remove(item)
# list of dics (doc_count & key)
return elements_list[:18]
def get_custom_corpus(self, section_name, query_size):
'''
get_custom_corpus
:param section_name: section_name
:type section_name: str
:param query_size: query_size
:type query_size: int
:return: (custom_corpus, total_hits)
:rtype: dict (custom_corpus & total_hits)
'''
res = self.client.search(index=self.index_name, body= {
"size": query_size,
"query": {
"bool" : {
"must" : {
"term" : { "section" : section_name }
},
},
},
"_source": ["doc_token"]
}
)
# total hits
total_hits = res['hits']['total']['value']
# concat doc_token fields from documents
results_list = []
results_list = [item["_source"]['doc_token'] for item in res['hits']['hits']]
# merge lists to unique list of tokens
custom_corpus = list(itertools.chain.from_iterable(results_list))
return (custom_corpus, total_hits)
def get_documents(self, string_search, nb_wanted):
'''
get_documents
:param string_search: tokens to search
:type string_search: str
:param nb_wanted: total docs wanted
:type nb_wanted: int
:return: (hits, nb_wanted, documents_list)
:rtype: tuple
'''
res = self.client.search(index=self.index_name, body={
"size": nb_wanted,
"query": {
"match": {
"doc_token": string_search
},
},
"_source": {
"include": ["author", "date", "link", "section", "title"]
},
}
)
hits = res['hits']['total']['value']
documents_list = res['hits']['hits']
return (hits, nb_wanted, documents_list)
def get_document_by_id(self, id_doc):
'''
get_documents
:param id_doc: id_doc
:type id_doc: str
:return: doc
:rtype: dict
'''
res = self.client.search(index=self.index_name, body={
"size": 1,
"query": {
"terms": {
"_id": [id_doc]
},
},
"_source": {
"include": ["author", "content_html", "date", "doc_token", "link", "teaser", "section", "title"]
},
}
)
doc = res['hits']['hits'][0]
return doc
def get_custom_corpus_list(self, section_name, query_size):
'''
get_custom_corpus
:param section_name: section_name
:type section_name: str
:param query_size: query_size
:type query_size: int
:return: custom_corpus
:rtype: list of lists
'''
res = self.client.search(index='news_analysis', body= {
"size": query_size,
"query": {
"bool" : {
"must" : {
"term" : { "section" : section_name }
},
},
},
"_source": ["doc_token"]
}
)
# from doc_token fields create list of lists
results_list = []
results_list = [item["_source"]['doc_token'] for item in res['hits']['hits']]
return (results_list)
def count_by_sections(self):
'''
count docs by sections
:return: sections_list
:rtype: list of dicts
'''
res = self.client.search(index='news_analysis', body={
# "size": 9999,
"aggs": {
"sections": {
"terms": { "field": "section.keyword" }
}
},
"_source": {
"include": ["_id", "date", "section"]
},
}
)
result = res['aggregations']['sections']
buckets = result['buckets'][:9]
sections_list = []
# get total docs
total_docs = 0
for item in buckets:
total_docs += item['doc_count']
# get percent
for item in buckets:
doc_percent = round(item['doc_count']/total_docs*100)
sections_list.append({'score':item['doc_count'], 'percent':doc_percent, 'section':item['key']})
# Rename sections
sections_names = {
'international': 'International',
'economie':'Economie',
'planete': 'Planète',
'idees':'Idées',
'afrique':'Afrique',
'politique':'Politique',
'societe': 'Societe',
'culture':'Culture',
'sport':'Sport'
}
for item in sections_list:
if item['section'] in sections_names:
item['section']= sections_names[item['section']]
return(sections_list)
def count_by_dates(self):
res = self.client.search(index='news_analysis', body={
"aggs": {
"amount_per_week": {
"date_histogram": {
"field": "date",
"interval": "week",
"format" : "yyyy-MM-dd"
},
# "aggs": {
# "total_amount": {
# "sum": {
# "field": "date"
# }
# }
# },
"aggs": {
"sections": {
"terms": { "field": "section.keyword" }
}
},
}
},
}
)
res_list = res['aggregations']['amount_per_week']['buckets']
# dict for sections selection & renaming
sections_names = {
'international': 'International',
'economie':'Economie',
'planete': 'Planète',
'idees':'Idées',
'afrique':'Afrique',
'politique':'Politique',
'societe': 'Société',
'culture':'Culture',
'sport':'Sport'
}
# build data list
data = []
for item in res_list:
nb_docs = item['doc_count']
# filter year 2020
year = item['key_as_string'][0:4]
if (year != '2019'):
# get & subtring date
date = item['key_as_string'][0:10]
buckets = item['sections']['buckets']
sections_scores = []
# select sections and rename
for i in buckets:
if i['key'] in sections_names:
sections_scores.append({'section':sections_names[i['key']], 'score':i['doc_count']})
# set empty sections to zero
listed_sections = [element['section'] for element in sections_scores]
for name in sections_names.values():
if name not in listed_sections:
sections_scores.append({'section':name, 'score':0})
# data list to return
data.append({'date':date, 'nb_docs':nb_docs, 'sections_scores':sections_scores})
# reformat data
data_list = []
for item in data:
item_dict = {'date': item['date'].replace('-', '')}
for element in item['sections_scores']:
item_dict[element['section']] = element['score']
data_list.append(item_dict)
return data_list
class SelectionRelational():
'''
SelectionRelational class
data statistics - Azure SQL
'''
def __init__(self):
'''
Create an Elasticsearch connection object
:param index_name: index name
:type index_name: string
:return: null
'''
return 'hello SelectionRelational' | flabastie/news-analysis | project/queries/selection.py | selection.py | py | 11,437 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "project.config.DOMAIN",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "project.config",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "project.config.LOGIN",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "p... |
70780596348 | from flask import Flask, render_template, request
from modelo import modelagemPredicao
from data import gerarNovosDados
app = Flask(__name__, template_folder='templates', static_folder='static')
@app.route('/', methods=['GET', 'POST'])
def index():
# variáveis auxiliares
partidas = 0
precisaomedalha = 0
precisaotaxavitoria = 0
probabilidade = 0
team = 'RADIANT'
if request.method == 'POST':
radiantheroes = request.form.get('radiantheroes')
direheroes = request.form.get('direheroes')
radiantmedals = request.form.get('radiantmedals')
diremedals = request.form.get('diremedals')
if radiantheroes and direheroes:
# separando os heróis
radiantheroes = radiantheroes.split(',')
direheroes = direheroes.split(',')
# separando as medalhas
radiantmedals = radiantmedals.split(',')
diremedals = diremedals.split(',')
if len(radiantheroes) == 5 and len(direheroes) == 5 and len(radiantmedals) == 5 and len(diremedals) == 5:
# enviando para trasnformação dos dados
dados = modelagemPredicao.preprocessamentomedalha(radiantheroes, radiantmedals, direheroes, diremedals)
print(dados)
# predict do modelo
team, probabilidade = modelagemPredicao.predicao(dados, 1)
elif len(radiantheroes) == 5 and len(direheroes) == 5:
# enviando para trasnformação dos dados
dados = modelagemPredicao.preprocessamentotaxavitoria(radiantheroes, direheroes)
print(dados)
# predict do modelo
team, probabilidade = modelagemPredicao.predicao(dados, 2)
# precisão dos modelos
precisaomedalha, precisaotaxavitoria, partidas = modelagemPredicao.precisaomodelos()
# deixando valor da precisao em número inteiro
precisaomedalha = int(precisaomedalha*100)
precisaotaxavitoria = int(precisaotaxavitoria*100)
# deixando a probabilidade em número inteiro
probabilidade = int(probabilidade*100)
# deixando texto maiusculo
team = team.upper()
# definindo cor para html de acordo com o time
color = ''
if team == 'RADIANT':
color = '#64FF56'
else:
color = '#d84a4a'
# renderizando modelo
return render_template("index.html", partidas=partidas, precisaotaxavitoria=precisaotaxavitoria, precisaomedalha=precisaomedalha, chance=probabilidade, team=team, color=color)
@app.route('/atualizarDados')
def atualizarDados():
# chamando função de inserção de novos dados
gerarNovosDados.gerarNovosDadosPartidas()
# renderizando modelo
return render_template("atualizarDados.html") | stardotwav/Dota2Predictor | web service/app.py | app.py | py | 2,788 | python | pt | code | 2 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask.request.form... |
37379251526 | import os
import glob
import numpy as np
import time
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from configs import *
def merge_shp(shp_list, save_dir):
"""merge shapefiles in shp_list to a single shapefile in save_dir
Args:
shp_list (list): _description_
save_dir (str): the path of save dir
Returns:
str: the merged shp file path
"""
files_string = " ".join(shp_list)
print(files_string)
shp_dir = os.path.join(os.path.dirname(shp_list[0]), save_dir)
if not os.path.exists(shp_dir):
os.makedirs(shp_dir)
# the path maybe need to be changed
command = "ogrmerge.py -single -o {}/merged.shp ".format(shp_dir) + files_string
print(os.popen(command).read())
time.sleep(1)
return shp_dir + "/merged.shp"
def trans_shp(fn):
"""create a new feature depending on the 'CC' field
Args:
fn (function): _description_
"""
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(fn, 1)
layer = dataSource.GetLayer()
feature = layer.GetNextFeature()
sum = 0
newField = ogr.FieldDefn('My_class', ogr.OFTInteger)
if layer.GetLayerDefn().GetFieldIndex('My_class') == -1:
layer.CreateField(newField)
while feature:
DLBM = feature.GetField('DLBM')
# if DLBM in 水田:
# feature.SetField('My_class', 0)
# elif DLBM in 旱地:
# feature.SetField('My_class', 1)
# elif DLBM in 果园:
# feature.SetField('My_class', 2)
# elif DLBM in 茶园:
# feature.SetField('My_class', 3)
# elif DLBM in 乔木林地:
# feature.SetField('My_class', 4)
# elif DLBM in 灌木林地:
# feature.SetField('My_class', 5)
# elif DLBM in 苗圃:
# feature.SetField('My_class', 6)
# elif DLBM in 草地:
# feature.SetField('My_class', 7)
# elif DLBM in 工矿用地:
# feature.SetField('My_class', 8)
# elif DLBM in 公共建筑:
# feature.SetField('My_class', 9)
# elif DLBM in 城镇住宅:
# feature.SetField('My_class', 10)
# elif DLBM in 农村住宅:
# feature.SetField('My_class', 11)
# elif DLBM in 公路用地:
# feature.SetField('My_class', 12)
# elif DLBM in 农村道路:
# feature.SetField('My_class', 13)
# elif DLBM in 河流:
# feature.SetField('My_class', 14)
# elif DLBM in 裸地:
# feature.SetField('My_class', 15)
# else:
# feature.SetField('My_class', 16)
# sum += 1
if DLBM in 田地:
feature.SetField('My_class', 0)
elif DLBM in 园地:
feature.SetField('My_class', 1)
elif DLBM in 林地:
feature.SetField('My_class', 2)
elif DLBM in 建筑用地:
feature.SetField('My_class', 3)
elif DLBM in 道路:
feature.SetField('My_class', 4)
elif DLBM in 水体:
feature.SetField('My_class', 5)
else:
feature.SetField('My_class', 6)
sum += 1
layer.SetFeature(feature)
feature = layer.GetNextFeature()
print(sum)
return
def trans_shp_all_class(fn):
"""create a new feature depending on the 'CC' field
Args:
fn (function): _description_
"""
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(fn, 1)
layer = dataSource.GetLayer()
feature = layer.GetNextFeature()
newField = ogr.FieldDefn('My_class', ogr.OFTInteger)
if layer.GetLayerDefn().GetFieldIndex('My_class') == -1:
layer.CreateField(newField)
while feature:
DLBM = feature.GetField('DLBM')
if DLBM not in CORRESPOND:
code = 56
else:
code = CORRESPOND_LABEL[CORRESPOND[DLBM]]
feature.SetField('My_class', code)
layer.SetFeature(feature)
feature = layer.GetNextFeature()
return
def shp2raster(shapename, output_raster, pixel_size, colormap=None):
"""convert shapefile to raster
Args:
shapename (str): the path of shapefile
output_raster (str): the path of output raster
pixel_size (float): the pixel size of output raster
colormap(array): the color map of output raster
"""
input_shp = ogr.Open(shapename)
shp_layer = input_shp.GetLayer()
extent = shp_layer.GetExtent()
x_min = extent[0]
x_max = extent[1]
y_min = extent[2]
y_max = extent[3]
x_res = int((x_max - x_min) / pixel_size)
y_res = int((y_max - y_min) / pixel_size)
image_type = "GTiff"
driver = gdal.GetDriverByName(image_type)
new_raster = driver.Create(output_raster, x_res, y_res, 1, gdal.GDT_Byte)
new_raster.SetGeoTransform((x_min, pixel_size, 0, y_max, 0, -pixel_size))
band = new_raster.GetRasterBand(1)
ct = colormap
# band.SetRasterColorTable(ct)
band.SetNoDataValue(255)
band.FlushCache()
gdal.RasterizeLayer(new_raster, [1], shp_layer, options=["Attribute=My_class"])
new_rasterSRS = osr.SpatialReference()
new_rasterSRS.ImportFromEPSG(4524)
new_raster.SetProjection(new_rasterSRS.ExportToWkt())
return
def count_features_by_field(shp_file, field_name):
driver = ogr.GetDriverByName('ESRI Shapefile')
data_source = driver.Open(shp_file, 0)
layer = data_source.GetLayer()
feature_count = {}
for feature in layer:
field_value = feature.GetField(field_name)
if field_value not in feature_count:
feature_count[field_value] = 1
else:
feature_count[field_value] += 1
return feature_count
def area_features_by_field(shp_file):
driver = ogr.GetDriverByName('ESRI Shapefile')
data_source = driver.Open(shp_file, 0)
layer = data_source.GetLayer()
feature_area = {}
for feature in layer:
field_value = feature.GetField("DLBM")
field_area = feature.GetField("SHAPE_Area")
if field_value not in feature_area:
feature_area[field_value] = field_area
else:
feature_area[field_value] += field_area
return feature_area
def gdb_to_shp(gdb_file, output_folder):
ogr_command = "ogr2ogr -f 'ESRI Shapefile' -lco ENCODING=UTF-8 -s_srs EPSG:4490 -t_srs EPSG:4524 {} {}".format(output_folder, gdb_file)
os.system(ogr_command)
def rename_lcpa_copy(shp_dir, target_dir):
for dir, _, file_names in os.walk(shp_dir):
for file_name in file_names:
if "LCPA" in file_name:
source_file = os.path.join(dir, file_name)
taget_name = file_name.replace("LCPA", dir.split('/')[-1])
target_file = os.path.join(target_dir, taget_name)
os.popen('cp {} {}'.format(source_file, target_file))
if __name__ == "__main__":
a=0
# gdb_dir = "/media/dell/DATA/wy/data/guiyang/地理国情监测/2021/分区/"
# output_dir = "/media/dell/DATA/wy/data/guiyang/地理国情监测/2021/shape/"
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
# gdb_list = os.listdir(gdb_dir)
# for gdb_name in gdb_list:
# print(gdb_name)
# gdb_path = os.path.join(gdb_dir, gdb_name)
# output_shp_dir = os.path.join(output_dir, gdb_name.split('.')[0])
# if not os.path.exists(output_shp_dir):
# os.makedirs(output_shp_dir)
# gdb_to_shp(gdb_path, output_shp_dir)
# rename_lcpa_copy("/media/dell/DATA/wy/data/guiyang/地理国情监测/2021/shape/", "/media/dell/DATA/wy/data/guiyang/地理国情监测/2021/LCPA/")
# merge_shp()
# data_dir = "J:/GuangdongSHP/splitSHP/merge_shp/"
# file_list = glob.glob(('{}*.shp'.format(data_dir)))
# for i, file_name in enumerate(file_list):
# print("{}/{}".format(str(i+1), str(len(file_list))))
# output_raster = file_name.split(".")[0] + '.tif'
# pixel_size = 7.516606439032443e-06
# shp2raster(file_name, output_raster, pixel_size)
| faye0078/RS-ImgShp2Dataset | make_dataset/shp_functions.py | shp_functions.py | py | 8,144 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_... |
21041808334 | """Pytorch dataset module"""
import json
from glob import glob
from pathlib import Path
import albumentations as A
import cv2
import numpy as np
import torch
from albumentations.pytorch import ToTensorV2
from torch import Tensor
from torch.utils.data import Dataset
from data.config import DataConfig, keypoint_indices
val_transforms = A.Compose(
[
A.LongestMaxSize(max_size=DataConfig.IMAGE_SIZE),
A.PadIfNeeded(
min_height=DataConfig.IMAGE_SIZE,
min_width=DataConfig.IMAGE_SIZE,
border_mode=cv2.BORDER_REPLICATE,
),
A.Normalize(),
ToTensorV2(),
],
keypoint_params=A.KeypointParams(format='xy', remove_invisible=False),
bbox_params=A.BboxParams(format='pascal_voc', label_fields=['classes']),
)
train_transforms = A.Compose(
[
# spatial
A.HorizontalFlip(),
A.VerticalFlip(),
A.Affine(mode=cv2.BORDER_REPLICATE),
A.Perspective(pad_mode=cv2.BORDER_REPLICATE),
A.Rotate(limit=30, border_mode=cv2.BORDER_REPLICATE),
A.SmallestMaxSize(max_size=320),
A.RandomScale(scale_limit=.1),
A.RandomCrop(
height=DataConfig.IMAGE_SIZE,
width=DataConfig.IMAGE_SIZE,
),
# pixel level
A.RandomBrightnessContrast(p=.15),
A.AdvancedBlur(p=.15),
A.ChannelShuffle(p=.15),
A.MedianBlur(p=.15),
A.Posterize(p=.15),
A.Solarize(p=.015),
# format data
A.Normalize(),
ToTensorV2(),
],
keypoint_params=A.KeypointParams(format='xy', remove_invisible=False),
bbox_params=A.BboxParams(format='pascal_voc', label_fields=['classes']),
)
class DeepFashion2Dataset(Dataset):
def __init__(
self,
base_path: str,
transforms: A.Compose,
max_objects: int,
) -> None:
super().__init__()
base_path = Path(base_path)
self._base_path = Path(base_path)
self._length = len(glob(str(self._base_path / 'image/*.jpg')))
self._transforms = transforms
self._max_objects = max_objects
def __len__(self) -> int:
return self._length
def _pad_classes(self, classes: list[int]) -> Tensor:
classes = torch.LongTensor(classes)
classes = torch.cat(
[
classes,
torch.zeros(
self._max_objects - classes.size(0),
dtype=torch.int32,
),
],
)
return classes
def _pad_bboxes(self, bboxes: list[tuple[float]]) -> Tensor:
bboxes = torch.FloatTensor(bboxes).clip(0, DataConfig.IMAGE_SIZE)
bboxes /= DataConfig.IMAGE_SIZE
bboxes = torch.cat(
[
bboxes,
torch.zeros(
(self._max_objects - bboxes.size(0), 4),
dtype=torch.float32,
),
],
)
return bboxes
def _pad_keypoints(
self,
keypoints: list[list[tuple[float]]],
classes: Tensor,
) -> Tensor:
keypoints = [
(
torch.FloatTensor(keypoint).clip(0, DataConfig.IMAGE_SIZE)
/ DataConfig.IMAGE_SIZE
)
for keypoint
in keypoints
]
result = torch.zeros(
(self._max_objects, DataConfig.NUM_KEYPOINTS, 2),
dtype=torch.float32,
)
for i, (class_, keypoint) in enumerate(zip(classes, keypoints)):
class_ = class_.item()
if class_ == 0:
break
start, end = keypoint_indices[class_]
result[i, start:end] = keypoint
return result
def _pad_visibilities(
self,
visibilities: list[np.ndarray],
classes: Tensor,
) -> Tensor:
visibilities = [
torch.FloatTensor(visibility).reshape(-1, 1) / 2.
for visibility
in visibilities
]
result = torch.zeros(
(self._max_objects, DataConfig.NUM_KEYPOINTS, 1),
dtype=torch.float32,
)
for i, (class_, visibility) in enumerate(zip(classes, visibilities)):
class_ = class_.item()
if class_ == 0:
break
start, end = keypoint_indices[class_]
result[i, start:end] = visibility
return result
def _getitem(self, index: int) -> tuple[Tensor]:
# create paths
image_path = self._base_path / f'image/{index + 1:06d}.jpg'
annotation_path = self._base_path / f'annos/{index + 1:06d}.json'
# load image and annotation
image = cv2.imread(str(image_path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
with open(annotation_path) as f:
annotation = json.load(f)
# restructure annotation
annotation = [
{
'bbox': v['bounding_box'],
'class': v['category_id'],
'keypoints': np.array(v['landmarks']).reshape(-1, 3)[:, :2],
'visibilities': np.array(v['landmarks']).reshape(-1, 3)[:, 2],
}
for k, v in annotation.items()
if k.startswith('item')
]
# create keypoint, bbox, and classes lists. (pack keypoints)
bboxes = [item['bbox'] for item in annotation]
keypoints = np.concatenate([item['keypoints'] for item in annotation])
keypoints_border = [item['keypoints'].shape[0] for item in annotation]
classes = [item['class'] for item in annotation]
visibilities = [item['visibilities'] for item in annotation]
# apply transform
transformed = self._transforms(
image=image,
bboxes=bboxes,
keypoints=keypoints,
classes=classes,
)
# separate transformed results
image = transformed['image']
bboxes = transformed['bboxes']
keypoints = transformed['keypoints']
classes = transformed['classes']
# unpack keypoints
keypoints_border = np.cumsum([0] + keypoints_border)
iterator = zip(keypoints_border[:-1], keypoints_border[1:])
keypoints = [keypoints[start:end] for start, end in iterator]
# normalize and fix length of classes, bboxes, keypoints,
# and visibilities
classes = self._pad_classes(classes)
bboxes = self._pad_bboxes(bboxes)
keypoints = self._pad_keypoints(keypoints, classes)
visibilities = self._pad_visibilities(visibilities, classes)
return image, classes, bboxes, keypoints, visibilities
def __getitem__(self, index: int) -> tuple[Tensor]:
try:
return self._getitem(index)
except Exception:
return self[(index + 1) % len(self)]
if __name__ == '__main__':
ds = DeepFashion2Dataset(
base_path='/home/aj/data/DeepFashion2/validation',
transforms=train_transforms,
# transforms=val_transforms,
max_objects=10,
)
image, classes, bboxes, keypoints, visibilities = ds[0]
from torchvision.utils import save_image
save_image(image, '/tmp/tmp.png')
| mohamad-hasan-sohan-ajini/deep_fashion_2 | data/data_pt.py | data_pt.py | py | 7,291 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "albumentations.Compose",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "albumentations.LongestMaxSize",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "data.config.DataConfig.IMAGE_SIZE",
"line_number": 19,
"usage_type": "attribute"
}... |
21894452141 | from dsa_stack import DSAStack
import sys
from typing import Union
class TowersOfHanoi:
def __init__(self, num_pegs: int, num_disks: int) -> None:
self.num_pegs = num_pegs
self.num_disks = num_disks
self.pegs = [
DSAStack(num_disks),
DSAStack(num_disks),
DSAStack(num_disks),
]
def place_disk(self, peg: int, disk: int) -> None:
peg = self.pegs[peg]
if peg.is_empty() or disk < peg.top():
peg.push(disk)
else:
raise ValueError(
"Disk of size {} cannot be placed on disk of size {}.".format(disk, peg.top()))
def remove_disk(self, peg: int) -> int:
peg = self.pegs[peg]
return peg.pop()
def move_disk(self, src: int, dst: int) -> None:
self.place_disk(dst, self.remove_disk(src))
# Gets the disk at the given peg and index from bottom, or None if none
# exists.
def disk_at(self, peg: int, i: int) -> Union[int, None]:
p = self.pegs[peg].as_list()
if i < len(p):
d = p[-1 - i]
else:
d = None
return d
# Moves n disks from peg src to peg dst (1-indexed).
def solve(n: int, src: int, dst: int) -> None:
src -= 1
dst -= 1
towers = TowersOfHanoi(3, n)
for i in range(n, 0, -1):
towers.place_disk(src, i)
step = 0
display_progress(towers, step)
solve_impl(towers, n, src, dst, step)
# Moves n disks from peg src to peg dst (0-indexed).
# Returns the new step count.
def solve_impl(towers: TowersOfHanoi, n: int, src: int, dst: int, step: int) -> int:
if n <= 0:
raise AssertionError("n must be > 0.")
elif n == 1:
towers.move_disk(src, dst)
step += 1
display_progress(towers, step)
else:
other = 3 - src - dst
step = solve_impl(towers, n - 1, src, other, step)
towers.move_disk(src, dst)
step += 1
display_progress(towers, step)
step = solve_impl(towers, n - 1, other, dst, step)
return step
def display_progress(towers: TowersOfHanoi, step: int) -> None:
header = "Step {}:".format(step)
indent = " " * (len(header) + 2)
disk_width = len(str(towers.num_disks))
print(header)
for i in range(towers.num_disks - 1, -1, -1):
print(indent, end="")
for j in range(towers.num_pegs):
disk = towers.disk_at(j, i)
if disk is None:
s = "|"
else:
s = str(disk)
# Padding for when disk could be multiple columns wide.
s = " " * (disk_width - len(s)) + s
print(s + " ", end="")
print()
print()
if len(sys.argv) != 4:
print("Usage: python {} num_disks src_peg dst_peg".format(sys.argv[0]))
else:
try:
num_disks = int(sys.argv[1])
src = int(sys.argv[2])
dst = int(sys.argv[3])
except ValueError:
print("Parameters must be integers.")
else:
if num_disks < 1:
print("num_disks must be > 0.")
# Currently needs 8 extra stack frames to run, will require adjustment
# if implementation changes.
elif sys.getrecursionlimit() < num_disks + 8:
print("Solving with num_disks={} would exceed max call stack depth."
.format(num_disks))
elif not 0 < src <= 3:
print("src_peg must be > 0 and <= 3.")
elif not 0 < dst <= 3:
print("dst_peg must be > 0 and <= 3.")
else:
print("Solving Towers of Hanoi with {} pegs and {} disks, starting from peg {} and ending at peg {}."
.format(3, num_disks, src, dst))
print("Number of moves required: {}.".format(2 ** num_disks - 1))
print()
solve(num_disks, src, dst)
| MC-DeltaT/DSA-Practicals | P2/towers_of_hanoi.py | towers_of_hanoi.py | py | 3,845 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dsa_stack.DSAStack",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "dsa_stack.DSAStack",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "dsa_stack.DSAStack",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "typing.Union... |
13114754891 | import requests
import tkinter.messagebox
user = open('user.txt','r').read().splitlines()
def checking():
for users in user:
tik = (f'https://m.tiktok.com/node/share/user/@{users}')
head = {
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-encoding':'gzip, deflate, br',
'accept-language':'en-US,en;q=0.9',
'cache-control':'max-age=0',
'cookie':'tt_webid_v2=6930696974879032837; tt_webid=6930696974879032837; tt_csrf_token=d8lRPZdjfD3sgWCKlFHeaq-0',
'sec-fetch-dest':'document',
'sec-fetch-mode':'navigate',
'sec-fetch-site':'none',
'sec-fetch-user':'?1',
'upgrade-insecure-requests':'1',
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36 OPR/73.0.3856.344',
}
Tik = requests.get(tik,headers=head)
if ('"statusCode":10202,"statusMsg":""') in Tik.text:
tkinter.messagebox.showinfo(title='NewUser',message=users)
elif ('statusCode":10221') in Tik.text:
print(f'Status : Banned >> {users}')
elif ('"pageId"') in Tik.text:
print(f'Taken >> {users}')
checking()
| 8-wrk/TikCheck | Check.py | Check.py | py | 1,463 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox.messagebox.showinfo",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox.messagebox",
"line_number": 23,
"usage_type": "attribute"
},
{... |
31628139132 | # fastapi
from fastapi import APIRouter
from fastapi_sqlalchemy import db
# starlette
from starlette.requests import Request
# models
from server.models import User
router = APIRouter(
prefix="/accounts",
tags=["accounts"],
dependencies=[],
responses={
400: {"description": "Bad request"}
},
)
@router.get("/profile/")
def profile(request: Request):
user_id = request.state.user_id
user = db.session.query(User).filter(
User.id == user_id
).first()
return user.dict()
| RajeshJ3/arya.ai | server/accounts/account_controllers.py | account_controllers.py | py | 525 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "starlette.requests.Request",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "fastapi_sqlalchemy.db.session.query",
"line_number": 24,
"usage_type": "call"
},
{
"... |
72510037949 | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, HTML, Div, Row, Column, Fieldset
from crispy_forms.bootstrap import InlineRadios
from django.contrib.auth.forms import PasswordResetForm
class EmailValidationOnForgotPassword(PasswordResetForm):
def clean_email(self):
email = self.cleaned_data['email']
if not User.objects.filter(email__iexact=email, is_active=True).exists():
msg = ("There is no user registered with the specified E-Mail address.")
self.add_error('email', msg)
return email
class UserRegisterForm(UserCreationForm):
class Meta():
model = User
fields = ['username', 'email', 'password1']
# fields = ['username', 'email', 'password1']
# def __init__(self, *args, **kwargs):
# super(UserRegisterForm, self).__init__(*args, **kwargs)
# self.fields['username'].widget.attrs.update({'class': 'form-control', 'placeholder': 'username'})
# self.fields['email'].widget.attrs.update({'class': 'form-control', 'placeholder': 'email'})
# self.fields['password1'].widget.attrs.update({'class': 'form-control', 'placeholder': 'password'})
# self.fields['password2'].widget.attrs.update({'class': 'form-control', 'placeholder': 'repeat password'})
""" Update user profile fields """
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'email', 'first_name', 'last_name']
""" Update user profile image """
class ProfileUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-group'
self.helper.form_tag = False
self.helper.layout = Layout(
'phone_number'
)
class Meta:
model = Profile
fields = ['image', 'phone_number']
class ExampleForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'first arg is the legend of the fieldset',
'favorite_number',
'favorite_color',
'favorite_food',
HTML("""<p>We use notes to get better, <strong>please help us {{ username }}</strong></p>"""),
'notes',
Row(
Column('name'),
Column('email'),
Column(InlineRadios('like_website')),
),
Submit('submit', 'Submit', css_class='button white'),
)
like_website = forms.TypedChoiceField(
label = "Do you like this website?",
choices = ((1, "Yes"), (0, "No")),
coerce = lambda x: bool(int(x)),
widget = forms.RadioSelect,
initial = '1',
required = True,
)
favorite_food = forms.CharField(
label = "What is your favorite food?",
max_length = 80,
required = True,
)
favorite_color = forms.CharField(
label = "What is your favorite color?",
max_length = 80,
required = True,
)
favorite_number = forms.IntegerField(
label = "Favorite number",
required = False,
)
notes = forms.CharField(
label = "Additional notes or feedback",
required = False,
)
name = forms.CharField(
label = 'What is your name?',
required=False
)
email = forms.EmailField(
label='What is your email?',
required=False
)
| userksv/carsbay | users/forms.py | forms.py | py | 3,773 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.forms.PasswordResetForm",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.filter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_nu... |
15142385428 | import requests
from flask import redirect, url_for, flash
from app.github import bp
from app.github.functions import request_interface
@bp.route('/update-database', methods=['GET', 'POST'])
async def update_database():
# get all repos sorted by star rating
# The max number of items per page is 100
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars&per_page=100'
response = requests.get(url)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
flash('Rate Limit Exceeded, please wait a little while and try again')
return redirect(url_for('main.home'))
response_dict = response.json()
status = await request_interface(response_dict)
if status == 200:
return redirect(url_for('main.home'))
else:
return 'Oh no! Something is amiss!'
| Red-Hammer/most-starred-python-repos | app/github/routes.py | routes.py | py | 872 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
... |
19325512904 | from statsmodels.tsa.seasonal import seasonal_decompose
from dateutil.parser import parse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('../timeserie_train.csv',
parse_dates=['data'],
index_col='data',
squeeze=True)
# Multiplicative Decomposition
result_mul = seasonal_decompose(df, model='multiplicative', period=365*24, extrapolate_trend='freq')
# Additive Decomposition
result_add = seasonal_decompose(df, model='additive',period=365*24, extrapolate_trend='freq')
# Extract the Components ----
# Actual Values = Product of (Seasonal * Trend * Resid)
df_reconstructed_mul = pd.concat([result_mul.seasonal, result_mul.trend, result_mul.resid, result_mul.observed], axis=1)
df_reconstructed_mul.columns = ['seas', 'trend', 'resid', 'actual_values']
df_reconstructed_mul.to_csv('timeserie_decom_mul_train.csv')
#df_reconstructed_mul.head()
# Actual Values = Sum of (Seasonal * Trend * Resid)
df_reconstructed_add = pd.concat([result_add.seasonal, result_add.trend, result_add.resid, result_add.observed], axis=1)
df_reconstructed_add.columns = ['seas', 'trend', 'resid', 'actual_values']
df_reconstructed_add.to_csv('timeserie_decom_add_train.csv')
#df_reconstructed_add.head()
# Plot
result_mul.plot().suptitle('Multiplicative Decompose', fontsize=22)
result_add.plot().suptitle('Additive Decompose', fontsize=22)
plt.show() | gsilva49/timeseries | H/python_code/decom.py | decom.py | py | 1,379 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "statsmodels.tsa.seasonal.seasonal_decompose",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "statsmodels.tsa.seasonal.seasonal_decompose",
"line_number": 16,
"usage_type":... |
21099702516 | from sklearn import svm
import sklearn.linear_model.stochastic_gradient as sg
from sklearn.model_selection import GridSearchCV as grid
import numpy
#linear kernel support vector machine using tf-idf vectorizations
class SVM:
train_X = []
train_Y = []
test_X = []
test_Y = []
def __init__(self, train_X, train_Y, test_X, test_Y, n_iter, alpha):
self.n_iter = n_iter
self.alpha = alpha
self.train_X = train_X.apply(lambda x: ' '.join(x)).tolist()
self.train_Y = train_Y
self.test_X = test_X.apply(lambda x: ' '.join(x)).tolist()
self.test_Y = test_Y
# Convert text to tf-idf vectors and return accuracy obtained from SVM
def predict(self):
from sklearn.feature_extraction.text import TfidfVectorizer
#convert train set to tf-idf vectors
tf_idf = TfidfVectorizer()
self.train_X = tf_idf.fit_transform(self.train_X)
self.test_X = tf_idf.transform( raw_documents=self.test_X)
#SVM very slow, better suited for task but does not scale to large datasets
# SVM = svm.SVC(kernel='linear', verbose=True)
# SVM.fit(X=self.train_X, y=self.train_Y)
# prediction = SVM.predict(self.test_X)
# accuracy = numpy.mean(prediction == self.test_Y)
# param_grid = [
# {'alpha': [.00001, .0001, .001, .01]}
# ] # best results for lowest alpha
SGD = sg.SGDClassifier(verbose=True, n_iter=self.n_iter, alpha=self.alpha)
# clf = grid(SGD, param_grid, cv=3)
SGD.fit(X=self.train_X, y=self.train_Y)
prediction = SGD.predict(self.test_X)
accuracy = numpy.mean(prediction == self.test_Y)
return accuracy | hadarohana/Tweets | Tweets/SVM.py | SVM.py | py | 1,710 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.stochastic_gradient.SGDClassifier",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.stochastic_gradient",... |
38217506704 |
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.animation as animation
import matplotlib.patches as mpatches
from matplotlib import ticker
from matplotlib import cm
from matplotlib.ticker import FuncFormatter
import numpy as np
from utils.occ_map_utils import load_map, display_occ_map, plot_grid_map_hmm, show_traj, \
black_cm, green_cm, red_cm, blue_cm, greens_cm, greys_cm
from utils.occ_map_utils import show_map
from utils.plot_utils import plot_4d_tensor
# key press to update figure for the next tracking setp
def on_press(event, animation):
if event.key == ' ':
if animation.pause:
animation.event_source.stop()
else:
animation.event_source.start()
animation.pause ^= True
def onclick(event, anim):
models = anim.models
ix, iy = event.xdata, event.ydata
coords = np.floor(np.array([ix, iy]) / models[0].map_res).astype(int)
print("Click at coordinates: {}".format(coords))
all_axes = [plot.axes for plot in anim.plots]
for i, ax in enumerate(all_axes):
# For infomation, print which axes the click was in
if ax == event.inaxes:
#print "Click is at filter {}".format(anim.models[i].name)
break
clicked = np.zeros_like(models[0].map)
x, y = coords[0], coords[1]
clicked[x, y] = 1
for plot in anim.plots:
plot.set_axes_data("occupancy_axes", clicked)
anim.fig.canvas.draw()
accessories_figures = [anim.nn_output_fig, anim.kernel_fig]
for fig in accessories_figures:
if fig is not None:
fig.clear()
anim.nn_output_fig.suptitle('Network Output')
anim.kernel_fig.suptitle('Motion pattern')
if models[i].kernels.ndim == 6:
kernel = models[i].kernels[x, y]
condi_prob = models[i].nn_probs[x, y]
else:
kernel = models[i].kernels
condi_prob = None
plot_4d_tensor(kernel, fig=anim.kernel_fig)
if condi_prob is not None:
plot_4d_tensor(condi_prob, fig=anim.nn_output_fig)
anim.set_axis_ticks(models[i].extent)
anim.accessories_plots['ma_plot'].set_axes_data("occupancy_axes", np.ones_like(models[i].ma_vel))
anim.accessories_plots['vel_plot'].set_axes_data("occupancy_axes", models[i].P_Vt_pred[x, y])
anim.accessories_plots['merge_vel_plot'].set_axes_data("occupancy_axes", models[i].P_Vt_merged[x, y])
anim.accessories_plots['final_vel_plot'].set_axes_data("occupancy_axes", models[i].P_Vt[x, y])
accessories_figures += [anim.vel_fig]
for fig in accessories_figures:
if fig is not None:
fig.canvas.draw()
for k, plot in anim.accessories_plots.items():
plot.refresh_colorbar()
model_names = map(lambda model: model.name, models)
occs = map(lambda model: model.P_Ot[x, y], models)
for name, occ in zip(model_names, occs):
print("loc ({}, {}) of model {} has occupancy of {}".format(x, y, name, occ))
class Plot(object):
def __init__(self, axes, map, res, plot_map=True, plot_seen=False, show_text=True, colorbar_on=None, title=None):
self.axes = axes
self.map = map
self.res = res
self.plot_seen = plot_seen
self.plot_map = plot_map
self.map_axes = None
self.occupancy_axes = None
self.ground_truth_axes = None
self.seen_axes = None
self.colorbars = []
self.show_text = show_text
if title is None:
title = 'Measurements'
self.axes.set_title(title)
if show_text:
self.text = self.axes.text(0.92, 0.92, "", bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 5},
transform=self.axes.transAxes, ha="right", color='white', zorder=14)
self.add_images()
self.add_colorbar(colorbar_on)
def add_images(self):
"""Add AxesImages for showing map, occupancy and seen."""
occupancy = np.zeros(self.map.shape, dtype=float)
self.occupancy_axes = show_map(occupancy, self.res, cmap=red_cm, ax=self.axes, zorder=11)
# initialize plots with map
map_ = self.map if self.plot_map else np.zeros_like(self.map)
self.map_axes = show_map(map_, self.res, cmap=black_cm, ax=self.axes, zorder=12)
if self.plot_seen:
# add seen image
self.seen_axes = show_map(occupancy, self.res, cmap=black_cm, alpha=0.2, ax=self.axes)
def set_axes_data(self, axes_name, data, vmin=None, vmax=None):
image_ax = getattr(self, axes_name)
image_ax.set_data(np.rot90(data))
vmin = vmin if vmin is not None else data.min()
vmax = vmax if vmax is not None else data.max()
image_ax.set_clim([vmin, vmax])
def add_custom_image(self, axes_name, cmap=None, image=None, **kwargs):
if image is None:
image = np.zeros(self.map.shape, dtype=float)
image_ax = show_map(image, self.res, cmap=cmap, ax=self.axes, **kwargs)
setattr(self, axes_name, image_ax)
def add_colorbar(self, colorbar_on):
if colorbar_on is None:
return
image_axes = getattr(self, colorbar_on)
if image_axes is not None:
cb = plt.colorbar(image_axes, ax=self.axes, fraction=0.046, pad=0.04)
tick_locator = ticker.MaxNLocator(nbins=5)
cb.locator = tick_locator
self.colorbars.append(cb)
def set_ylabel(self, text='', **kwargs):
self.axes.set_ylabel(text, **kwargs)
def refresh_colorbar(self):
for cb in self.colorbars:
cb.update_ticks()
def add_traj_line(self, num_targets=1):
""" Add 2D Lines for showing trajectories."""
colors = cm.Dark2(np.linspace(0, 1, num_targets))
# add lines for showing trajectories
self.lines = map(lambda _: self.axes.add_line(Line2D([], [], zorder=14, color='grey')), range(num_targets))
def set_title(self, title):
self.axes.set_title(title)
def set_text(self, text):
self.text.set_text(text)
class TrackingAnimation(animation.TimedAnimation):
def __init__(self, models, num_steps, simulated_data, plot_seen=False, plot_map=True, show_text=True, accessories=None):
self.num_models = len(models)
self.models = models
self.map = models[0].map
self.res = models[0].map_res
self.num_steps = num_steps
self.simulated_data = simulated_data
self.show_map = plot_map
self.show_seen = plot_seen
self.show_text = show_text
self.nn_output_fig = None
self.kernel_fig = None
self.vel_fig = None
self.accessories_plots = None
self.accessories = accessories
self.initialize_figure()
self.initialize_models()
self.initialize_accessories()
print(self.accessories_plots)
self.fig.canvas.mpl_connect('key_press_event', lambda event: on_press(event, self))
self.fig.canvas.mpl_connect('button_press_event', lambda event: onclick(event, self))
animation.TimedAnimation.__init__(self, self.fig, interval=500, blit=True, repeat=True, repeat_delay=1000)
def initialize_figure(self):
fig_size = (5 * self.num_models, 5)
self.fig = plt.figure(figsize=fig_size)
self.pause = True
# bind key press event to pause animation
self.plots = []
for i in range(self.num_models):
axes = self.fig.add_subplot(1, self.num_models, i + 1)
title = self.models[i].name
colorbar_on = "occupancy_axes" if self.simulated_data else None
plot = Plot(axes, self.map, self.res, self.show_map, self.show_seen, self.show_text, colorbar_on, title=title)
self.add_custom_element(plot)
self.plots.append(plot)
self.fig_title_axes = self.fig.add_axes([.4, .9, .2, .05])
self.fig_title_axes.set_axis_off()
self.fig_title = self.fig.text(.49, .9, "", transform=self.fig_title_axes.transAxes, fontsize=15, color='r', ha='center')
if not self.simulated_data:
self.add_legend()
def initialize_accessories(self):
if "motion_pattern" in self.accessories:
self.nn_output_fig = plt.figure(figsize=(5, 5))
self.nn_output_fig.suptitle('Network Output')
self.kernel_fig = plt.figure(figsize=(5, 5))
self.kernel_fig.suptitle('Motion pattern')
if "velocities" in self.accessories:
self.vel_fig = plt.figure(figsize=(12, 3))
ma_ax = self.vel_fig.add_subplot(141)
ma_plot = Plot(ma_ax, self.models[0].ma_vel, 1, False, False, False, colorbar_on=None, title=r'$P(V_{ma})$')
vel_ax = self.vel_fig.add_subplot(142)
vel_plot = Plot(vel_ax, self.models[0].ma_vel, 1, False, False, False, colorbar_on=None, title=r'$P(V_{pred})$')
merge_vel_ax = self.vel_fig.add_subplot(143)
merge_vel_plot = Plot(merge_vel_ax, self.models[0].ma_vel, 1, False, False, False, colorbar_on=None, title=r'$P(V_{merge})$')
final_vel_ax = self.vel_fig.add_subplot(144)
final_vel_plot = Plot(final_vel_ax, self.models[0].ma_vel, 1, False, False, False, colorbar_on=None, title='$P(V)$')
self.accessories_plots = dict(ma_plot=ma_plot, vel_plot=vel_plot,
merge_vel_plot=merge_vel_plot, final_vel_plot=final_vel_plot)
def set_axis_ticks(self, extent):
if self.vel_fig is not None:
xlabels = (np.arange(extent) + np.array([-(extent // 2)])).tolist()
ylabels = xlabels
def format_fn_x(tick_val, tick_pos):
if int(tick_val) in range(7):
return xlabels[int(tick_val)]
else:
return ''
def format_fn_y(tick_val, tick_pos):
if int(tick_val) in range(7):
return ylabels[int(tick_val)]
else:
return ''
ax = self.vel_fig.get_axes()[0]
max_extent = float(extent)
ax.set_xticks(np.arange(.5, max_extent, 1.0))
ax.set_yticks(np.arange(0.5, max_extent, 1.0))
ax.xaxis.set_major_formatter(FuncFormatter(format_fn_x))
ax.yaxis.set_major_formatter(FuncFormatter(format_fn_y))
ylabel = ax.set_ylabel(r'$V_y$', color='darkred', fontsize=12)
ylabel.set_rotation(0)
ax.yaxis.set_label_coords(-0.06, .95)
ax.set_xlabel(r'$V_x$', color='darkred', fontsize=12)
ax.xaxis.set_label_coords(1.05, -0.025)
for ax in self.vel_fig.get_axes()[1:]:
ax.set_xticks([])
ax.set_yticks([])
def add_custom_element(self, plot):
"""Add extra elements to plot. This method has to be overwritten by subclasses. """
pass
def update_custom_element(self, idx):
""" Update custom elements on animation. This method has to be overwritten by subclasses. """
def initialize_models(self):
"""Initialize BOFUM models. This method has to be overwritten by subclasses."""
pass
def _draw_frame(self, framedata):
t = self.models[0].t
t_count = "frame = " + str(t)
print(t_count)
self.fig_title.set_text(t_count)
measurement = self.models[0].measurement_at()
for model in self.models:
model.tracking_step(measurement=measurement)
# plot new occupancy
Ot_max = max(map(lambda model: model.P_Ot.max(), self.models))
Ot_min = min(map(lambda model: model.P_Ot.min(), self.models))
for i, model in enumerate(self.models):
self.plots[i].set_axes_data("occupancy_axes", model.P_Ot, Ot_min, Ot_max)
if self.show_seen:
seen = model.evaluate_loc_at(t)
self.plots[i].set_axes_data("seen_axes", seen)
if self.plots[i].show_text:
x_ent = model.calc_cross_entropy()
f1_score = model.calc_f1_score()
average_precision = model.calc_average_precision()
self.plots[i].text.set_text("x_ent: {:.3f}, f1: {:.3f}, ap: {:.3f}".format(x_ent, f1_score, average_precision))
self.update_custom_element(i)
# if i == self.num_models-1:
# self.add_legend()
self.plots[i].refresh_colorbar()
# repeat tracking
if framedata == self.num_steps-1:
for model in self.models:
model.reset()
def new_frame_seq(self):
return iter(range(self.num_steps))
def _init_draw(self):
pass
class TrackingAnimSimulation(TrackingAnimation):
def __init__(self, models, num_steps, num_targets=1, diagonal=False, plot_map=True, **kwargs):
self.num_targets = num_targets
self.diagonal = diagonal
self.trajs = None
self.distances = None
super(TrackingAnimSimulation, self).__init__(models, num_steps, True, plot_map=plot_map, **kwargs)
def add_custom_element(self, plot):
plot.add_traj_line(self.num_targets)
def update_custom_element(self, idx):
# add trajectory lines
truncated_trajs = self.models[0].traversed_traj_at()
for idx_, line in enumerate(self.plots[idx].lines):
xs, ys = truncated_trajs[idx_].T[0][-5:], truncated_trajs[idx_].T[1][-5:]
line.set_data(xs, ys)
def initialize_models(self):
self.distances , self.trajs = self.models[0].initialize(self.num_targets, self.num_steps)
init_model = lambda model: model.initialize(self.num_targets, self.num_steps,
distances=self.distances, trajectories=self.trajs)
map(init_model, self.models[1:])
class TrackingAnimRealdata(TrackingAnimation):
def __init__(self, models, num_steps, scene, plot_map=True,plot_seen=False, simulated_scenes=False, **kwargs):
self.scene = scene
self.simulated_scenes = simulated_scenes
super(TrackingAnimRealdata, self).__init__(models, num_steps, False,
plot_seen=plot_seen, plot_map=plot_map, **kwargs)
def update(self, scene, update_num_steps=True):
self.scene = scene
update_map = lambda plot: plot.set_axes_data("map_axes", self.scene.static_map)
map(update_map, self.plots)
map(lambda model: model.update(scene), self.models)
if update_num_steps:
self.num_steps = len(scene.hits)
self.frame_seq = self.new_frame_seq()
def initialize_models(self):
init_model = lambda model: model.initialize(self.scene, not self.simulated_scenes)
map(init_model, self.models)
def add_custom_element(self, plot):
# add false negative axes
# it shows locations where ground truth is occupied
# but BOFUM fails to track
plot.add_custom_image("fn_axes", blue_cm)
# add true positive axes
# it shows locations where ground truth is occupied
# and BOFUM predicts occupancy prob higher than 0
plot.add_custom_image("tp_axes", greens_cm)
plot.add_colorbar("tp_axes")
# add false positive axes
plot.add_custom_image("fp_axes", red_cm)
def add_legend(self):
g_patch = mpatches.Patch(color='g', label='True positive')
b_patch = mpatches.Patch(color='b', label='False negative')
o_patch = mpatches.Patch(color='orange', label='False positive')
plt.legend(handles=[g_patch, b_patch, o_patch], bbox_to_anchor=(1, 1),
bbox_transform=self.fig.transFigure)
def update_custom_element(self, idx):
t = self.models[0].t - 1
model = self.models[idx]
plot = self.plots[idx]
occupancy_prob = model.P_Ot
h_max = occupancy_prob.max()/2
#occupancy_prob = np.where(occupancy_prob>h_max, occupancy_prob, 0)
ground_truth = model.ground_truth_at(t)
overlap = np.logical_and(occupancy_prob, ground_truth)
# if occupany on ground truth location is higher than 0.1,
# it is not thought as a false negative
occupancy_temp = np.where(overlap, occupancy_prob, 0)
#predicted = np.where(occupancy_temp>0.1, 1, 0)
false_negative = np.where(occupancy_temp>0.1, 0, ground_truth)
# if model predicts occupancy higher than 0 on ground truth locations,
# it is thought as a true positive
true_positive = np.where(overlap, occupancy_prob, 0)
# if model predicts occupancy higher than 0 on non-ground truth locations,
# it is thought as a false positive
false_positive = occupancy_prob.copy()
false_positive[overlap] = 0
# only show for occupancies higher than 1/2 highest occupancy
h_max = false_positive.max() / 4
false_positive = np.where(false_positive>h_max, false_positive, 0)
Ot_max = max(map(lambda model: model.P_Ot.max(), self.models))
plot.set_axes_data("fn_axes", false_negative, 0, 1)
plot.set_axes_data("tp_axes", true_positive, 0, Ot_max)
plot.set_axes_data("fp_axes", false_positive, 0, Ot_max)
plot.set_axes_data("occupancy_axes", np.zeros_like(occupancy_prob))
| stomachacheGE/bofmp | tracking/animation.py | animation.py | py | 17,318 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.animation.pause",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.animation",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.animation.event_source.stop",
"line_number": 20,
"usage_type": "call"
}... |
1396103450 | from django.shortcuts import render, redirect, reverse
from django.http import JsonResponse
from django.forms import ValidationError
from .models import *
import pyshorteners
def index(request):
data = {}
if request.method == "POST":
try:
l = Link()
s = pyshorteners.Shortener()
l.original_url = request.POST["url"]
l.short_url = s.tinyurl.short(l.original_url)
l.full_clean()
l.save()
return redirect(reverse("encode", args=(l.id,)))
except ValidationError as v:
data["error"] = v.message_dict
return render(request, 'pages/index.html', data)
def encode(request, link_id):
l = Link.objects.get(id=link_id)
data = {
"short_url" : l.short_url
}
return JsonResponse(data)
def decode(request, link_id):
l = Link.objects.get(id=link_id)
data = {
"original_url": l.original_url
}
return JsonResponse(data) | jennytoc/url-shortener | url_shortener_app/views.py | views.py | py | 976 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyshorteners.Shortener",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.reverse",
"line_number": 17,
"usage_type": "call"
},
{
"api_nam... |
72994074107 | import torch.nn as nn
import torch
class NetworksFactory:
def __init__(self):
pass
@staticmethod
def get_by_name(network_name, *args, **kwargs):
################ Ours #################
if network_name == 'Ours_Reconstruction':
from networks.Ours_Reconstruction import Net
network = Net(*args, **kwargs)
elif network_name == 'Ours_DeblurOnly':
from networks.Ours_DeblurOnly import Net
network = Net(*args, **kwargs)
else:
raise ValueError("Network %s not recognized." % network_name)
# print(network)
print("Network %s was created: " % network_name)
print('Network parameters: {}'.format(sum([p.data.nelement() for p in network.network.parameters()])))
return network
class NetworkBase(nn.Module):
def __init__(self):
super(NetworkBase, self).__init__()
self._name = 'BaseNetwork'
@property
def name(self):
return self._name
| Lynn0306/LEDVDI | CODES/networks/networks.py | networks.py | py | 1,012 | python | en | code | 20 | github-code | 6 | [
{
"api_name": "networks.Ours_Reconstruction.Net",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "networks.Ours_DeblurOnly.Net",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 28,
"usage_type": "attribute"
},
{
... |
16164892137 | from flask import Flask, request, jsonify, abort, Response, redirect
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from os import environ
import sys
import os
import asyncio
import requests
from invokes import invoke_http
import pika
import amqp_setup
import json
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('dbURL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ENGINE_OPTIONS'] = {'pool_recycle': 299}
db = SQLAlchemy(app)
CORS(app)
verification_URL = environ.get(
'verificationURL') or "http://localhost:6001/verification/"
account_URL = environ.get('accountURL') or "http://localhost:6003/account/"
epayment_URL = environ.get('epaymentURL') or "http://localhost:6203/epayment"
loyalty_URL = environ.get('loyaltyURL') or "http://localhost:6301/loyalty/"
promo_URL = environ.get('promoURL') or "http://localhost:6204/promo/"
queue_URL = environ.get('queueURL') or "http://localhost:6202/queueticket/"
order_URL = environ.get('orderURL') or "http://localhost:6201/order/"
@app.route('/order/get_payment_method/<int:account_id>', methods=['POST'])
async def select_payment_method(account_id):
payment_method1 = request.get_json()
payment_method = payment_method1['payment_method']
check_qid = invoke_http(
queue_URL, method='GET')
if check_qid["code"] == 200:
if len(check_qid["data"]["queues"]) == 0:
queue_id = 1
else:
queue_id = len(check_qid["data"]["queues"]) + 1
else:
queue_id = 1
data = {
"account_id": account_id,
"queue_id": queue_id,
"payment_method": payment_method
}
if (payment_method == "external"):
response = invoke_http(epayment_URL + 'create_checkout_session',
method="POST", json={"account_id": data["account_id"]})
if response:
response["queue_id"] = data["queue_id"]
ini_create_ticket = invoke_http(
order_URL + str(account_id) + "/paying", method='POST', json=data)
if ini_create_ticket["code"] == 201:
return jsonify({
"code": 200,
"data": response,
"queue_id": data["queue_id"]
}), 200
else:
return jsonify({
"code": 405,
"data": response,
"message": "Failed to create ticket"
}), 405
else:
return jsonify({'status': 'error', 'message': 'Failed to create checkout session', 'data': response})
elif (payment_method == "promo"):
promo_json = {
"is_used": 1,
"promo_code": payment_method1["promo_code"]
}
update_promo = invoke_http(
promo_URL + str(account_id), method="PATCH", json=promo_json)
if update_promo["code"] == 200:
ini_create_ticket = invoke_http(
order_URL + str(account_id) + "/paying", method='POST', json=data)
if ini_create_ticket["code"] == 201:
return jsonify({
"code": 200,
"message": "Promo code has been redeemed",
"data": update_promo["data"],
"queue_id": data["queue_id"]
}), 200
else:
return jsonify({
"code": 405,
"message": update_promo["message"]
}), 405
elif (payment_method == "loyalty"):
points = {
"points": 500
}
update_loyalty = invoke_http(
loyalty_URL + str(account_id) + "/redeem", method='PATCH', json=points)
if update_loyalty["code"] == 200:
ini_create_ticket = invoke_http(
order_URL + str(account_id) + "/paying", method='POST', json=data)
if ini_create_ticket["code"] == 201:
return jsonify({
"code": 200,
"message": "Loyalty points have been redeemed",
"data": update_loyalty["data"],
"queue_id": data["queue_id"],
"available_points": update_loyalty["data"]["available_points"]
}), 200
else:
return jsonify({
"code": 405,
"message": update_loyalty["message"],
"available_points": update_loyalty["data"]["available_points"]
}), 405
else:
return "Cannot find payment method"
@app.route("/order/<int:account_id>/paying", methods=['POST'])
def ini_create_ticket(account_id):
# this function initialises the create ticket post
# invoked by one of 3 payment microservice to indicate that it has been paid
if (not request.is_json):
return jsonify({
"code": 404,
"message": "Invalid JSON input: " + str(request.get_data())
}), 404
data = request.get_json()
create_ticket = invoke_http(
queue_URL, method='POST', json=data)
if create_ticket["code"] == 201:
# For User Scenario 3, Update Challenge Status
challenge_message = {
"mission_id": 2,
"code": 201
}
challenge_message.update(create_ticket["data"])
message = json.dumps(challenge_message)
amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename1, routing_key="challenge.challenge_complete", body=message, properties=pika.BasicProperties(delivery_mode=2))
return jsonify({
"code": 201,
"message": "Queueticket being created",
"data": create_ticket["data"]
}), 201
else:
return jsonify({
"code": 405,
"message": "Queueticket not being created",
"error": create_ticket,
}), 405
@app.patch("/order/<int:account_id>/paid")
def update_order(account_id):
# this function is being invoked by post queue ticket
# indicates that the ticket has been created
if (not request.is_json):
return jsonify({
"code": 404,
"message": "Invalid JSON input: " + str(request.get_data())
}), 404
data = request.get_json()
update_account = invoke_http(
account_URL + str(account_id), method='PATCH', json=data)
if update_account["code"] == 200:
account_result = invoke_http(
verification_URL + "account/" + str(data["account_id"]), method='GET')
notification_message = {
"type": "queueticket",
"account_id": data["account_id"],
"first_name": account_result["data"]["first_name"],
"phone_number": account_result["data"]["phone"],
"payment_method": data["payment_method"],
"queue_id": data["queue_id"],
"message": "You have successfully created a queueticket."
}
message = json.dumps(notification_message)
amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename, routing_key="notification.sms",
body=message, properties=pika.BasicProperties(delivery_mode=2))
return jsonify({
"code": 200,
"message": "Account updated successfully (is express)",
"queue_id": data["queue_id"]
}), 200
else:
return jsonify({
"code": 405,
"message": "Order not updated"
}), 405
@app.route("/order/<int:queue_id>/used", methods=['PATCH'])
def ticket_used(queue_id):
if (not request.is_json):
return jsonify({
"code": 404,
"message": "Invalid JSON input: " + str(request.get_data())
}), 404
data = request.get_json()
ticket_update = invoke_http(
queue_URL + str(data["queue_id"]), method='PATCH', json=data)
if ticket_update["code"] == 200:
update_is_prio = {
"is_priority": 0
}
account_res = invoke_http(
account_URL + str(account_URL), method='PATCH', json=update_is_prio)
if account_res["code"] == 200:
return jsonify({
"code": 200,
"message": "Ticket used successfully"
}), 200
account_result = invoke_http(
verification_URL + "account/" + str(ticket_update["data"]["account_id"]), method='GET')
notification_message = {
"type": "use_queue",
"account_id": ticket_update["data"]["account_id"],
"first_name": account_result["data"]["first_name"],
"phone_number": account_result["data"]["phone"],
"payment_method": ticket_update["data"]["payment_method"],
"queue_id": ticket_update["data"]["queue_id"],
"message": "You have redeemed your queue ticket."
}
message = json.dumps(notification_message)
amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename, routing_key="notification.sms",
body=message, properties=pika.BasicProperties(delivery_mode=2))
return jsonify({
"code": 200,
"message": "Ticket used successfully",
"data": ticket_update["data"]
}), 200
else:
return jsonify({
"code": 405,
"message": ticket_update["message"]
}), 405
if __name__ == '__main__':
app.run(host='0.0.0.0', port=6201, debug=True) | ESDeezknee/ESDeezknee | order/order.py | order.py | py | 9,606 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
... |
10117546059 | import pytest
from dao.genre import GenreDAO
from service.genre import GenreService
class TestGenreService:
@pytest.fixture(autouse=True)
def genre_service(self, genre_Dao: GenreDAO):
self.genre_service = GenreService(genre_Dao)
def test_get_one(self):
certain_genre = self.genre_service.get_one(1)
assert certain_genre is not None
assert certain_genre.id == 1
assert certain_genre.name == 'horror'
def test_get_all(self):
all_genres = self.genre_service.get_all(None)
assert all_genres is not None
assert type(all_genres) == list
| AgzigitovOskar/CR_4_Agzigitov | tests/service_tests/genre_service.py | genre_service.py | py | 618 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dao.genre.GenreDAO",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "service.genre.GenreService",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 8,
"usage_type": "call"
}
] |
37974828359 | '''
Program to be called from cron for working with lights - on and off
This is a wrapper for the Client, handling command line parameters
Author: Howard Webb
Date: 2/10/2021
'''
import argparse
from exp import exp
from GrowLight import GrowLight
parser = argparse.ArgumentParser()
# list of acceptable arguments
parser.add_argument("-a", help="Send a light command (on, off, ...", type=str)
args = parser.parse_args()
#print(args)
gl = GrowLight()
if args.a == "on":
gl.on()
elif args.a == "off":
gl.off()
| webbhm/GBE-Digital | python/Light_Switch.py | Light_Switch.py | py | 538 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "GrowLight.GrowLight",
"line_number": 17,
"usage_type": "call"
}
] |
32644084947 | import maya.cmds as cmds
import pymel.core as pm
from mgear.core import attribute
ATTR_SLIDER_TYPES = ["long", "float", "double", "doubleLinear", "doubleAngle"]
DEFAULT_RANGE = 1000
# TODO: filter channel by color. By right click menu in a channel with color
def init_table_config_data():
"""Initialize the dictionary to store the channel master table data
Items are the channels or attributes fullname in a list
items_data is a dictionary with each channel configuration, the keys is the
fullName
Returns:
dict: configuration dictionary
"""
config_data = {}
config_data["channels"] = []
config_data["channels_data"] = {}
return config_data
def init_channel_master_config_data():
"""Initialize the dictionary to store channel master tabs configuration"""
config_data = {}
config_data["tabs"] = []
config_data["tabs_data"] = {}
config_data["current_tab"] = 0
return config_data
def get_keyable_attribute(node):
"""Get keyable attributes from node
Args:
node (str): name of the node that have the attribute
Returns:
list: list of keyable attributes
"""
if cmds.nodeType(node) == "blendShape":
attrs = cmds.listAttr("{}.w".format(node), m=True)
else:
attrs = cmds.listAttr(node, ud=False, k=True)
return attrs
def get_single_attribute_config(node, attr):
"""Summary
Args:
node (str): name of the node that have the attribute
attr (str): attribute name
Returns:
dict: attribute configuration
"""
config = {}
# config["ctl"] = node
# config["ctl"] = pm.NameParser(node).stripNamespace().__str__()
config["ctl"] = node
config["color"] = None # This is a place holder for the channel UI color
try:
config["type"] = cmds.attributeQuery(attr, node=node, attributeType=True)
except:
return
# check it the attr is alias
alias = cmds.aliasAttr(node, q=True)
if alias and attr in alias:
config["niceName"] = attr
config["longName"] = attr
else:
config["niceName"] = cmds.attributeQuery(
attr, node=node, niceName=True
)
config["longName"] = cmds.attributeQuery(
attr, node=node, longName=True
)
config["fullName"] = config["ctl"] + "." + config["longName"]
if config["type"] in ATTR_SLIDER_TYPES:
if cmds.attributeQuery(attr, node=node, maxExists=True):
config["max"] = cmds.attributeQuery(attr, node=node, max=True)[0]
else:
config["max"] = DEFAULT_RANGE
if cmds.attributeQuery(attr, node=node, minExists=True):
config["min"] = cmds.attributeQuery(attr, node=node, min=True)[0]
else:
config["min"] = DEFAULT_RANGE * -1
config["default"] = cmds.attributeQuery(
attr, node=node, listDefault=True
)[0]
elif config["type"] in ["enum"]:
items = cmds.attributeQuery(attr, node=node, listEnum=True)[0]
config["items"] = [x for x in items.split(":")]
# Get value at channel creation time
# this value can be different from the default value
config["creationValue"] = cmds.getAttr("{}.{}".format(node, attr))
return config
def get_attributes_config(node):
"""Get the configuration to all the keyable attributes
Args:
node (str): name of the node that have the attribute
Returns:
dict: All keyable attributes configuration
"""
# attrs_config = {}
keyable_attrs = get_keyable_attribute(node)
config_data = init_table_config_data()
if keyable_attrs:
# attrs_config["_attrs"] = keyable_attrs
for attr in keyable_attrs:
config = get_single_attribute_config(node, attr)
# attrs_config[attr] = config
if config:
config_data["channels"].append(config["fullName"])
config_data["channels_data"][config["fullName"]] = config
return config_data
def get_table_config_from_selection():
oSel = pm.selected()
attrs_config = None
namespace = None
if oSel:
namespace = oSel[-1].namespace()
ctl = oSel[-1].name()
attrs_config = get_attributes_config(ctl)
return attrs_config, namespace
def get_ctl_with_namespace(attr_config, namespace=None):
if namespace:
ctl = (
namespace
+ pm.NameParser(attr_config["ctl"]).stripNamespace().__str__()
)
else:
ctl = attr_config["ctl"]
return ctl
def reset_attribute(attr_config, namespace=None):
"""Reset the value of a given attribute for the attribute configuration
Args:
attr_config (dict): Attribute configuration
"""
ctl = get_ctl_with_namespace(attr_config, namespace=None)
obj = pm.PyNode(ctl)
attr = attr_config["longName"]
attribute.reset_selected_channels_value(objects=[obj], attributes=[attr])
def reset_creation_value_attribute(attr_config, namespace=None):
"""Reset the value of a given attribute for the attribute configuration
Args:
attr_config (dict): Attribute configuration
"""
ctl = get_ctl_with_namespace(attr_config, namespace=None)
attr = attr_config["longName"]
fullname_attr = "{}.{}".format(ctl, attr)
if "creationValue" in attr_config.keys():
val = attr_config["creationValue"]
cmds.setAttr(fullname_attr, val)
else:
pm.displayWarning(
"Initial Creation Value was not originally stored for {}".format(
fullname_attr
)
)
def sync_graph_editor(attr_configs, namespace=None):
"""sync the channels in the graph editor
Args:
attr_configs (list): list of attribute configuration
"""
# select channel host controls
ctls = []
for ac in attr_configs:
ctl = ac["ctl"]
if ctl not in ctls:
if namespace:
ctl = namespace + pm.NameParser(ctl).stripNamespace().__str__()
ctls.append(ctl)
pm.select(ctls, r=True)
# filter curves in graph editor\
cnxs = []
for ac in attr_configs:
attr = ac["fullName"]
if namespace:
attr = namespace + pm.NameParser(attr).stripNamespace().__str__()
cnxs.append(attr)
def ge_update():
pm.selectionConnection("graphEditor1FromOutliner", e=True, clear=True)
for c in cnxs:
cmds.selectionConnection(
"graphEditor1FromOutliner", e=True, select=c
)
# we need to evalDeferred to allow grapheditor update the selection
# highlight in grapheditor outliner
pm.evalDeferred(ge_update)
################
# Keyframe utils
################
def current_frame_has_key(attr):
"""Check if the attribute has keyframe in the current frame
Args:
attr (str): Attribute fullName
Returns:
bool: Return true if the attribute has keyframe in the current frame
"""
k = pm.keyframe(attr, query=True, time=pm.currentTime())
if k:
return True
def channel_has_animation(attr):
"""Check if the current channel has animaton
Args:
attr (str): Attribute fullName
Returns:
bool: Return true if the attribute has animation
"""
k = cmds.keyframe(attr, query=True)
if k:
return True
def get_anim_value_at_current_frame(attr):
"""Get the animation value in the current framwe from a given attribute
Args:
attr (str): Attribute fullName
Returns:
bol, int or float: animation current value
"""
val = cmds.keyframe(attr, query=True, eval=True)
if val:
return val[0]
def set_key(attr):
"""Keyframes the attribute at current frame
Args:
attr (str): Attribute fullName
"""
cmds.setKeyframe(attr)
def remove_key(attr):
"""Remove the keyframe of an attribute at current frame
Args:
attr (str): Attribute fullName
"""
pm.cutKey(attr, clear=True, time=pm.currentTime())
def remove_animation(attr):
"""Remove the animation of an attribute
Args:
attr (str): Attribute fullName
"""
pm.cutKey(attr, clear=True)
def _go_to_keyframe(attr, which):
frame = cmds.findKeyframe(attr, which=which)
cmds.currentTime(frame, e=True)
def next_keyframe(attr):
_go_to_keyframe(attr, which="next")
def previous_keyframe(attr):
_go_to_keyframe(attr, which="previous")
def value_equal_keyvalue(attr, current_time=False):
"""Compare the animation value and the current value of a given attribute
Args:
attr (str): the attribute fullName
Returns:
bool: Return true is current value and animation value are the same
"""
anim_val = get_anim_value_at_current_frame(attr)
if current_time:
val = cmds.getAttr(attr, time=current_time)
else:
val = cmds.getAttr(attr)
if anim_val == val:
return True
| mgear-dev/mgear4 | release/scripts/mgear/animbits/channel_master_utils.py | channel_master_utils.py | py | 9,005 | python | en | code | 209 | github-code | 6 | [
{
"api_name": "maya.cmds.nodeType",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "maya.cmds.listAttr",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line... |
36154798504 | import streamlit as st
st.set_option('deprecation.showPyplotGlobalUse', False)
# for manipulation
import pandas as pd
import numpy as np
# for data visualization
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="ticks")
plt.style.use("dark_background")
#sns.set_style('whitegrid')
# to filter warnings
import warnings
warnings.filterwarnings('ignore')
# for interactivity
from ipywidgets import interact
st.title("Agricultural Production Optimization Engine")
# Reading the dataset
data= pd.read_csv('data.csv')
x= data.drop(['label'], axis=1)
y= data['label']
# let's create training and testing sets for validation of results
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test= train_test_split(x,y, test_size=0.2, random_state=42)
# let's create predictive model
from sklearn.linear_model import LogisticRegression
LogReg = LogisticRegression()
LogReg.fit(x_train,y_train)
from sklearn import linear_model
from sklearn.ensemble import RandomForestClassifier
random_forest= RandomForestClassifier(n_estimators=100)
random_forest.fit(x_train, y_train)
from sklearn.tree import DecisionTreeClassifier
DecTree= DecisionTreeClassifier()
DecTree.fit(x_train,y_train)
from sklearn.neighbors import KNeighborsClassifier
KNN= KNeighborsClassifier()
KNN.fit(x_train, y_train)
from sklearn.naive_bayes import GaussianNB
NB= GaussianNB()
NB.fit(x_train, y_train)
from sklearn.svm import SVC
svm = SVC()
svm.fit(x_train, y_train)
Nv = st.sidebar.radio("Navigator", ["Home","Prediction","Contribute"])
if Nv== "Home":
#st.write("### Home")
st.image("app.png", width= 700)
if st.checkbox("Show Dataset"):
st.table(data)
st.subheader("\nSoil Requirement of Each Crop")
if st.checkbox("Show Soil Requirement Graphs"):
condition = st.selectbox("Conditions",['Nitrogen Requirement','Phosphorous Requirement','Potassium Requirement','Temperature Requirement',
'PH Requirement','Humidity Requirement','Rainfall Requirement'])
if condition == "Nitrogen Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["N"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Nitrogen Requirement", fontsize=12)
st.pyplot()
if condition == "Phosphorous Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["P"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Phosphorous Requirement", fontsize=12)
st.pyplot()
if condition == "Potassium Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["K"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Potassium Requirement", fontsize=12)
st.pyplot()
if condition == "Temperature Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["temperature"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Temperature Requirement", fontsize=12)
st.pyplot()
if condition == "Humidity Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["humidity"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Humidity Requirement", fontsize=12)
st.pyplot()
if condition == "PH Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["ph"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("PH Requirement", fontsize=12)
st.pyplot()
if condition == "Rainfall Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["rainfall"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Rainfall Requirement", fontsize=12)
st.pyplot()
st.subheader("\nDistribution of Agricultural Conditions")
if st.checkbox("Show Distribution Graphs"):
con = st.selectbox("Conditions",['N','P','K','Temperature','PH','Humidity','Rainfall'])
if con == "N":
plt.figure(figsize=(5, 3))
sns.distplot(data["N"])
plt.xlabel("\nNitrogen", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["N"].min(), color='y', label='Minimum')
plt.axvline(data["N"].mean(), color='orange', label='Mean')
plt.axvline(data["N"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "P":
plt.figure(figsize=(5, 3))
sns.distplot(data["P"])
plt.xlabel("\nPhosphourous", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["P"].min(), color='y', label='Minimum')
plt.axvline(data["P"].mean(), color='orange', label='Mean')
plt.axvline(data["P"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "K":
plt.figure(figsize=(5, 3))
sns.distplot(data["K"])
plt.xlabel("\nPotassium", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["K"].min(), color='y', label='Minimum')
plt.axvline(data["K"].mean(), color='orange', label='Mean')
plt.axvline(data["K"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "Temperature":
plt.figure(figsize=(5, 3))
sns.distplot(data["temperature"])
plt.xlabel("\nTemperature", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["temperature"].min(), color='y', label='Minimum')
plt.axvline(data["temperature"].mean(), color='orange', label='Mean')
plt.axvline(data["temperature"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "PH":
plt.figure(figsize=(5, 3))
sns.distplot(data["ph"])
plt.xlabel("\nPH", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["ph"].min(), color='y', label='Minimum')
plt.axvline(data["ph"].mean(), color='orange', label='Mean')
plt.axvline(data["ph"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "Humidity":
plt.figure(figsize=(5, 3))
sns.distplot(data["humidity"])
plt.xlabel("\nHumidity", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["humidity"].min(), color='y', label='Minimum')
plt.axvline(data["humidity"].mean(), color='orange', label='Mean')
plt.axvline(data["humidity"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "Rainfall":
plt.figure(figsize=(5, 3))
sns.distplot(data["rainfall"])
plt.xlabel("\nRainfall", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["rainfall"].min(), color='y', label='Minimum')
plt.axvline(data["rainfall"].mean(), color='orange', label='Mean')
plt.axvline(data["rainfall"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if Nv == "Prediction":
st.subheader("\nCrop Predictor\n")
N = st.number_input("\nNitrogen Value: ",50.00, step=0.10)
P = st.number_input("Phosphorous Value: ", 50.00 ,step=0.10)
K = st.number_input("Potassium Value: ", 50.00 ,step=0.10)
T = st.number_input("Tempreture: ", 25.00 ,step=0.10)
H = st.number_input("Humidity: ", 50.00 ,step=0.10)
PH = st.number_input("PH Value: ", 7.00 ,step=0.10)
R = st.number_input("Rainfall: ", 200.00 ,step=0.10)
st.write("\n\n\n")
op=st.selectbox("Choose ML Algorithm",['Random Forest','Logistic Regression', 'Decision Tree','KNN', 'Naive Bayes', 'SVM'])
st.write("\n\n\n")
if st.button("Predict"):
if op=="Logistic Regression":
y_pred_LR= LogReg.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using Logistic Regression is:")
st.success(y_pred_LR)
if op=="Random Forest":
y_pred_RF= random_forest.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using Random Forest is:")
st.success(y_pred_RF)
if op=="Decision Tree":
y_pred_DT= DecTree.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using Decision Tree is:")
st.success(y_pred_DT)
if op=="KNN":
y_pred_KNN= DecTree.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using KNN is:")
st.success(y_pred_KNN)
if op=="Naive Bayes":
y_pred_NB= NB.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using Naive Bayes is:")
st.success(y_pred_NB)
if op=="SVM":
y_pred_SVM= svm.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using SVM is:")
st.success(y_pred_SVM)
if Nv == "Contribute":
st.subheader("Contribute to our Dataset")
N = st.number_input("Nitrogen Value: ", 0.00, 150.00, 50.00, step=0.5)
P = st.number_input("Phosphorous Value: ", 0.00, 150.00, 50.00, step=0.5)
K = st.number_input("Potassium Value: ", 0.00, 120.00, 50.00, step=0.5)
T = st.number_input("Tempreture: ", 0.00, 60.00, 25.00, step=0.5)
H = st.number_input("Humidity: ", 10.00, 100.00, 50.00, step=0.5)
PH = st.number_input("PH Value: ", 0.00, 10.00, 7.00, step=0.5)
R = st.number_input("Rainfall: ", 20.00, 300.00, 200.00, step=0.5)
crop = st.text_input("Crop: ")
if st.button("Contribute"):
to_add= {"N":[N], "P":[P], "K":[K], "temperature":[T], "humidity":[H], "ph":[PH], "rainfall":[R], "label":[crop]}
to_add= pd.DataFrame(to_add)
to_add.to_csv("app.csv", mode='a', header=False, index=False)
st.success("Thanks for Your Contribution")
| Jkauser/Agricultural-Production-Optimization-Engine | app.py | app.py | py | 10,888 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.set_option",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "seaborn.set",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotli... |
38928831481 | import os
from dotenv import load_dotenv
import requests
from lxml import etree
import re
from postgres import cursor, connection
from slugify import slugify
load_dotenv()
# --------------------------
# link đến trang hình ảnh của chapter
nettruyen = os.getenv("PUBLIC_NETTRUYEN_URL")
def openWebsite(domain: str):
headersList = {
"Accept": "*/*",
"User-Agent": "Thunder Client (https://www.thunderclient.com)"
}
response = requests.request("GET", domain, data="", headers=headersList)
return response
def crawlChapters(crawl_id: str):
# crawl_id là id của truyện tranh để truy vấn và lấy ra danh sách chhapter
# Gọi api domain lấy danh sách chương
response = openWebsite(nettruyen + "Comic/Services/ComicService.asmx/ProcessChapterList?comicId=" + crawl_id)
return response.json() # Assuming the response is in JSON format
def updateChapter(comic_id: str, crawl_id: str):
# comic_id : id cua table comics, crawl_id : chapter_id cua table crawls
# Thu thập dữ liệu
data = crawlChapters(crawl_id)
if len(data['chapters']):
for chap in data['chapters']:
# Thêm dữ liệu vào db
cursor.execute("INSERT INTO public.chapters(comic_id, title, crawl_id) "
"VALUES (%s, %s, %s) ON CONFLICT (crawl_id) DO NOTHING",
# Đảm bảo các dữ liệu thêm vào không bị trùng lặp
(comic_id, chap['name'], chap['url']))
# Đảm bảo lưu thay đổi vào cơ sở dữ liệu
connection.commit()
# print("Thêm chapter vào db thành công:")
print(chap['name'])
# set is_updated comics = false
cursor.execute("UPDATE public.comics SET is_updated = false WHERE id = %s", (comic_id,))
connection.commit()
cursor.execute("UPDATE public.crawls SET is_updated = false WHERE chapter_id = %s", (crawl_id,))
connection.commit()
return
# func auto update
def autoUpdateChapter():
# Lấy danh sách truyện
cursor.execute("SELECT id, crawl_id FROM public.comics WHERE is_updated = true ORDER BY id ASC limit 5")
results = cursor.fetchall()
if results is not None:
for row in results:
comic_id = row[0] # id cua truyen tranh
crawl_id = row[1] # crawl id fk id cua comic
cursor.execute("SELECT chapter_id FROM public.crawls WHERE id = %s", (crawl_id,))
crawlResult = cursor.fetchone()
if crawlResult is not None:
updateChapter(comic_id, crawlResult[0])
return
autoUpdateChapter() | baocuns/BCunsAutoCrawls | crawlChaptersNettruyenToPostgres.py | crawlChaptersNettruyenToPostgres.py | py | 2,686 | python | vi | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "postgres.cursor.execute... |
14992716515 | #!/usr/bin/env python
# coding: utf-8
# In[37]:
# Questions for 10/28 meeting:
# Test set -> Should the test be just one game? Answer: Leave it the way it is for now.
# Train set -> Should we duplicate previous games to add weighting? Answer: Yes.
## November 6th, 2020 Backend Meeting ##
# 4 Factors to include for opponent: efg, tov_pct, orb_pct, ftr ... - Done
# Add win (boolean) column for each game -> predict on that instead of points - Done
# Later on: Using most recent games???
## November 10th, 2020 Backend Meeting ##
# Next Steps:
# Get it on the dashboard
# Other functionality?
# Imports
import numpy as np
import pandas as pd
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.feature_selection import f_regression
from sklearn.feature_selection import SelectKBest
from matplotlib import pyplot
pd.set_option("display.max_rows", None, "display.max_columns", None)
# In[38]:
# Read in box score data provided by Ludis
df = pd.read_csv("team_boxscores_v3.csv")
df = df.fillna(0)
# pd.set_option('display.max_columns', None)
pd.set_option('display.max_columns', 59)
# In[39]:
### Hard-coded teamIDs from dataset for testing purposes ###
# Kentucky
team1 = '2267a1f4-68f6-418b-aaf6-2aa0c4b291f1'
# LSU
team2 = '70e2bedd-3a0a-479c-ac99-e3f58aa6824b'
# Ohio State
team3 = '857462b3-0ab6-4d26-9669-10ca354e382b'
# Florida
team4 = '912f8837-1d81-4ef9-a576-a21f271d4c64'
# Michigan State
team5 = 'a41d5a05-4c11-4171-a57e-e7a1ea325a6d'
floatArr = ["efg","orb_pct","ftr"]
negFloatArr = ["tov_pct"]
intArr = ["assists", "blocks","defensive_rebounds", "fast_break_pts", "points_in_paint","points_off_turnovers","rebounds","steals"]
negIntArr = ["turnovers","opponent_drb"]
# In[40]:
# Returns all game records for a given teamID
def getAllTeamMatchRecords(teamID, df):
return df[df["team_id"] == teamID]
# In[41]:
# Returns win/loss ratio for a given team across entire dataset
# Add functionality for filtering by season?
def statWinLoss(teamID, df):
wins = 0
losses = 0
team_stats = df[df["team_id"] == teamID]
for index, row in team_stats.iterrows():
if row["points"] > row["points_against"]:
wins = wins + 1
else:
losses = losses + 1
if losses == 0:
return 1
else:
return wins/losses
# In[42]:
# Return all gameIDs for a given team
def getGameIDs(teamID, df):
return df[df["team_id"] == teamID]["game_id"]
# In[43]:
# Returns common game IDs between two teams
def getMatchupGameIDs(team1, team2, df):
return pd.merge(getGameIDs(team1, df), getGameIDs(team2, df))
# In[44]:
# Returns average of a given statistic for a given teamID
def getAvgStatForTeam(teamID, statistic, df):
runningSum = 0
#runningSum = float(0)
runningCount = 0
team_stats = df[df["team_id"] == teamID]
for index, row in team_stats.iterrows():
runningSum += row[statistic]
runningCount += 1
return runningSum / runningCount
return runningSum / runningCount
print(getAvgStatForTeam(team1, "rebounds", df))
# In[45]:
# This function will get the record of a team by a specific year and can also calculate some avg
def getTeamRecordByYear(teamID, year, df):
team_record = df[df["team_id"] == teamID]
sum_two_pts_made = 0
count = 0
avg_two_pts_made = 0
sum_field_goals_made =0
count2 = 0
avg_field_goals_made = 0
for index, row in team_record.iterrows():
if (row["season"] == year):
team_record1 = team_record[df["season"] == row["season"]]
for index, row in team_record1.iterrows():
sum_two_pts_made += row["two_points_made"]
sum_field_goals_made += row["field_goals_made"]
count +=1
count2 +=1
avg_two_pts_made = sum_two_pts_made / count
avg_field_goals_made = sum_field_goals_made / count2
return_value = "%f %f" %(avg_two_pts_made,avg_field_goals_made)
return team_record1
# In[46]:
# Return dataframe with selected features
def filterRowsFS(df):
return df[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
# In[105]:
# Calculate correct predictions -> wins/losses
def calcPredError(df):
error = 0
correct = 0
i = 0
for index, row in df.iterrows():
i = i + 1
if df.loc[index, 'Actual'] != df.loc[index, 'Predicted (int)']:
error = error + 1
else:
correct = correct + 1
return ((correct / i) * 100)
# In[48]:
# Calculate win percentage
def winPct(teamPred):
# return round((teamPred['Predicted (float)'].sum() / len(teamPred['Predicted (float)']) * 100))
return float(teamPred['Predicted (float)'].sum() / len(teamPred['Predicted (float)']) * 100)
# In[49]:
# feature selection
def select_features(X_train, y_train, X_test):
# configure to select all features
fs = SelectKBest(score_func=f_regression, k='all')
# learn relationship from training data
fs.fit(X_train, y_train)
# transform train input data
X_train_fs = fs.transform(X_train)
# transform test input data
X_test_fs = fs.transform(X_test)
return X_train_fs, X_test_fs, fs
# In[50]:
def overallFeatures(df):
datasetForFS = df
datasetForFS.fillna(0)
# X1 = datasetForFS[["assists","personal_fouls","ftr","orb_pct", "tov_pct", "points_in_paint", "blocks"]]
# X1 = datasetForFS[["assists","blocks","personal_fouls"]]
X1 = datasetForFS[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
y1 = datasetForFS['win']
X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.2, random_state=0)
X_train_fs, X_test_fs, fs = select_features(X_train, y_train, X_test)
colList = X1.columns.values.tolist()
statScoreDF = pd.DataFrame(data={'Stat': pd.Series(colList), 'Score': pd.Series(fs.scores_.tolist())})
statScoreDF = statScoreDF.sort_values(by=['Score'], ascending=False)
# plot the scores
pyplot.bar([i for i in range(len(fs.scores_))], fs.scores_)
pyplot.show()
return statScoreDF
# print(overallFeatures(df))
# In[122]:
def teamFeatures(team1, team2, df):
datasetForFS = getAllTeamMatchRecords(team1, df).merge(getMatchupGameIDs(team1, team2, df))
datasetForFS.fillna(0)
# X1 = datasetForFS[["assists","personal_fouls","ftr","orb_pct", "tov_pct", "points_in_paint", "blocks"]]
# X1 = datasetForFS[["assists","blocks","personal_fouls"]]
X1 = datasetForFS[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
y1 = datasetForFS['win']
X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.2, random_state=0)
X_train_fs, X_test_fs, fs = select_features(X_train, y_train, X_test)
colList = X1.columns.values.tolist()
statScoreDF = pd.DataFrame(data={'Stat': pd.Series(colList), 'Score': pd.Series(fs.scores_.tolist())})
statScoreDF = statScoreDF.sort_values(by=['Score'], ascending=False)
# Plot the scores - PyPlot
# pyplot.bar([i for i in range(len(fs.scores_))], fs.scores_)
# pyplot.show()
return statScoreDF
# teamFeatures(team1, team2, df)
# In[123]:
def learn(dataset):
dataset = pd.read_csv("team_boxscores_v3.csv")
dataset = dataset.fillna(0)
# Shuffle
dataset = dataset.sample(frac = 1)
X1 = dataset[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
y1 = dataset['win']
# No shuffle
# X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.2, random_state=0)
# W/ shuffle
X_train = X1[int(len(X1)/5):]
X_test = X1[:int(len(X1)/5)]
y_train = y1[int(len(y1)/5):]
y_test = y1[:int(len(y1)/5)]
regressor = LinearRegression()
regressor.fit(X_train, y_train)
coeff_df = pd.DataFrame(regressor.coef_, X1.columns, columns=['Coefficient'])
y_pred = regressor.predict(X_test)
y_pred_round = np.around(regressor.predict(X_test))
# print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
# print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
# print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
return regressor, pd.DataFrame({'Actual': y_test, 'Predicted (int)': y_pred_round, 'Predicted (float)': y_pred})
# reg, pred = learn(pd.read_csv("team_boxscores_v3.csv"))
# print(calcPredError(pred), winPct(pred))
# df1 = filterRowsFS(getAllTeamMatchRecords(team1, df))
# df2 = getAllTeamMatchRecords(team1, df)["win"]
# dfPred = reg.predict(df1)
# dfPredRound = np.around(dfPred)
# temp = pd.DataFrame({'Actual': df2, 'Predicted (int)': dfPredRound, 'Predicted (float)': dfPred})
# print(calcPredError(temp), winPct(temp))
# In[124]:
def learnMatchup(team1, team2):
dataset = pd.read_csv("team_boxscores_v3.csv")
dataset = dataset.fillna(0)
dfTeam1 = getAllTeamMatchRecords(team1, dataset)
matchups = getMatchupGameIDs(team1, team2, df)["game_id"].tolist()
dfTeam1 = dfTeam1.reset_index()
# Elijah - Save rows for later and append to train set
for index, row in dfTeam1.iterrows():
for i in range(0, len(matchups)):
if str(dfTeam1.loc[index, "game_id"]) == matchups[i]:
dfTeam1 = dfTeam1.append(dfTeam1.loc[index], ignore_index=True)
dfTeam1 = dfTeam1.sample(frac = 1)
X1 = dfTeam1[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
y1 = dfTeam1['win']
# rng = np.random.randint(0, 42)
rng = 0
# X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.2, random_state=rng)
# W/ shuffle
X_train = X1[int(len(X1)/5):]
X_test = X1[:int(len(X1)/5)]
y_train = y1[int(len(y1)/5):]
y_test = y1[:int(len(y1)/5)]
regressor = LinearRegression()
regressor.fit(X_train, y_train)
coeff_df = pd.DataFrame(regressor.coef_, X1.columns, columns=['Coefficient'])
y_pred = regressor.predict(X_test)
y_pred_round = np.around(regressor.predict(X_test))
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
return regressor, pd.DataFrame({'Actual': y_test, 'Predicted (int)': y_pred_round, 'Predicted (float)': y_pred})
reg, pred = learnMatchup(team1, team2)
# In[125]:
def avgDataRow(df):
df1 = dict()
for (columnName, columnData) in df.iteritems():
df1[columnName] = [df[columnName].mean()]
return pd.DataFrame(df1)
# In[128]:
stats = teamFeatures(team1, team2, df).head()['Stat'].tolist()
df1 = getAllTeamMatchRecords(team1, df)
df2 = avgDataRow(filterRowsFS(getAllTeamMatchRecords(team1, df)))
df3 = df1["win"]
dfPred = reg.predict(df2)
dfPredRound = np.around(dfPred)
dfFinal = pd.DataFrame({'Actual': df3.mean(), 'Predicted (int)': dfPredRound, 'Predicted (float)': dfPred})
print(dfFinal)
# print(df2)
df2.at[0,"assists"] = df2.at[0,"assists"] + 10
dfPred = reg.predict(df2)
dfPredRound = np.around(dfPred)
dfFinal = pd.DataFrame({'Actual': df3.mean(), 'Predicted (int)': dfPredRound, 'Predicted (float)': dfPred})
print(dfFinal)
# print(df2)
# In[54]:
# Return win percentage as stat changes
# df - dataframe, e.g. getAllTeamMatchRecords(team1, df)
# reg - regressor from above
# var - the feature to change
# val - the value to add to the feature
def predOnStat(df, reg, var, val):
df1 = df[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
for index, row in df1.iterrows():
df1.at[index, var] = df1.at[index, var] + val
temp_pred = reg.predict(df1)
temp_pred_round = np.around(reg.predict(df1))
test = pd.DataFrame({'Actual': df['win'], 'Predicted (int)': temp_pred_round, 'Predicted (float)': temp_pred})
return float(winPct(test))
# In[ ]:
# df -> dataframe
# reg -> regressor
# Return new win pct
def updateWinPct(df, reg):
reg.predict()
# In[28]:
# statList = ["assists", "blocks", "orb_pct"]
def compTeams(df, teamID, opponentID, win_percent):
topFive = teamFeatures(teamID, opponentID, df)["Stat"].head().tolist()
print(topFive)
reg, pred = learnMatchup(teamID, opponentID)
intVal = 0
floatVal = 0
originalPct = predOnStat(getAllTeamMatchRecords(teamID, df), reg, 'assists', 0)
for stat in topFive:
currentPct = originalPct
print(stat)
floatVal = 0
intVal = 0
if stat in intArr:
while (currentPct <= win_percent):
print("intyyy")
intVal = intVal + 1
currentPct = predOnStat(getAllTeamMatchRecords(teamID, df), reg, stat, intVal)
print(stat, intVal)
if stat in negIntArr:
while (currentPct <= win_percent):
print("neggggintyyy")
intVal = intVal - 1
currentPct = predOnStat(getAllTeamMatchRecords(teamID, df), reg, stat, intVal)
print(stat, intVal)
elif stat in floatArr:
while (currentPct <= win_percent):
print("floattty")
floatVal = floatVal + 0.1
currentPct = predOnStat(getAllTeamMatchRecords(teamID, df), reg, stat, floatVal)
print(stat, floatVal)
elif stat in negFloatArr:
while (currentPct <= win_percent):
print("neggggfloattty")
floatVal = floatVal - 0.1
currentPct = predOnStat(getAllTeamMatchRecords(teamID, df), reg, stat, floatVal)
print(stat, floatVal)
print(val)
return temp
win_percent = 80.5
compTeams(df, team1, team2, win_percent)
# In[19]:
# testey = getAllTeamMatchRecords(team1, df)
# prediction_acc, win_percent = predOnStat(testey, reg, "assists", 0)
# print("Prediction accuracy:", prediction_acc, "\nWin Percent:", win_percent)
# prediction_acc, win_percent = predOnStat(testey, reg, "assists", 5)
# print("Prediction accuracy:", prediction_acc, "\nWin Percent:", win_percent)
# prediction_acc, win_percent = predOnStat(testey, reg, "assists", 10)
# print("Prediction accuracy:", prediction_acc, "\nWin Percent:", win_percent)
# In[ ]:
| oohshan/SmartGameGoalsGenerator | passenger.py | passenger.py | py | 15,323 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pandas.set_option",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
... |
8042412809 | import tornado.web
import tornado.ioloop
import tornado.httpserver
import tornado.options
# define parameter,like --port=9000 list=a,b,c,de,
tornado.options.define("port", default=8000, type=None)
tornado.options.define("list", default=[], type=str, multiple=True)
class IndexHandler(tornado.web.RequestHandler):
def get(self, *args, **kwargs):
self.write("hello customer server.")
if __name__ == '__main__':
tornado.options.options.logging = None # turn off logging
tornado.options.parse_config_file("config")
print(tornado.options.options.list)
app = tornado.web.Application([
(r"/", IndexHandler)
])
httpserver = tornado.httpserver.HTTPServer(app)
# use parameter value
httpserver.bind(tornado.options.options.port)
httpserver.start(1)
tornado.ioloop.IOLoop.current().start()
| zuohd/python-excise | tornado/server04.py | server04.py | py | 847 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tornado.web.options.define",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tornado.web.options",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "tornado.... |
34859170758 | #####
# Remove "warn" logs from spark
#####
from os.path import abspath
from pyspark.sql import SparkSession
# warehouse_location points to the default location for managed databases and tables
warehouse_location = abspath('spark-warehouse')
spark = SparkSession \
.builder \
.appName("Pyspark integration with Hive") \
.config("spark.sql.warehouse.dir", warehouse_location) \
.enableHiveSupport() \
.getOrCreate()
# enableHiveSupport() option in spark session supports the connection with Hive
# Queries are expressed in HiveQL
spark.sql("SELECT * FROM company.employees").show()
employees_df = spark.sql("SELECT id, first_name, last_name, age, gender \
FROM company.employees \
WHERE age < 30 \
ORDER BY first_name")
employees_df.show(50)
| zaka-ai/data-engineer-track | Big_data_warehousing_in_hadoop/hive_hands_on/2_hive_partitioning_pyspark_integration/2_2_hive_with_pyspark.py | 2_2_hive_with_pyspark.py | py | 828 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 11,
"usage_type": "attribute"
... |
41559253356 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.webdriver.common.action_chains import ActionChains as ac
from selenium.common.exceptions import NoSuchElementException
import pandas as pd
import time
# seamless 로그아웃
def logout(driver):
driver.find_element_by_xpath('/html/body/div/div/div/div/div[2]/div[2]/span/button').click()
driver.find_element_by_xpath('/html/body/div/div/div/div/div[2]/div[2]/ul/li[6]/a').click()
driver.close()
# 스크랩한 기업 삭제
def delete_companies(driver):
wait(driver, 20).until(EC.element_to_be_clickable((By.XPATH,
'/html/body/div/div/div/div[2]/div/div[2]/table/thead/tr/th/div/span/div/div/div/label/span'))).click()
time.sleep(1)
wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div[2]/div/div[2]/div/div[2]/div[2]/div[2]/span/button'))).click()
time.sleep(1)
wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div[2]/div/div[2]/div/div[2]/div[2]/div[2]/ul/li/a'))).click()
time.sleep(1)
wait(driver, 10).until(
EC.element_to_be_clickable((By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div/input'))).send_keys(
'delete')
time.sleep(1)
wait(driver, 10).until(
EC.element_to_be_clickable((By.XPATH, '/html/body/div[7]/div[2]/div/div/div[3]/button[2]'))).send_keys(
Keys.ENTER)
time.sleep(5)
def scrap(keyword, start_page, end_page, columns, file_name):
driver = webdriver.Chrome('chromedriver.exe')
driver.get('https://login.seamless.ai/login')
driver.set_window_size(1500, 1000)
time.sleep(1)
driver.find_element_by_name('username').send_keys('********')
driver.find_element_by_name('password').send_keys('********')
driver.find_element_by_css_selector('form > button').click()
time.sleep(3)
try:
driver.get('https://login.seamless.ai/search/companies?page=' + str(
start_page) + '&locations=1&companiesExactMatch=false&companyKeywords=' + keyword)
driver.execute_script("location.reload(true);")
time.sleep(1)
driver.find_element_by_css_selector('button > svg').click()
for p1 in range(start_page, end_page + 1):
driver.find_element_by_css_selector('body').send_keys(Keys.HOME)
time.sleep(5)
# 페이지 전체 한꺼번에 스크랩
wait(driver, 60).until(EC.element_to_be_clickable((By.XPATH,
'/html/body/div/div/div/div[2]/div/div[2]/div[2]/table/thead/tr/th/div/span/div/div/div/label/span'))).click()
time.sleep(1)
wait(driver, 60).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div[2]/div/div[2]/div[2]/div/div[2]/button'))).click()
if p1 < end_page:
driver.find_element_by_css_selector('body').send_keys(Keys.END)
time.sleep(20)
wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div[2]/div/div[2]/div[2]/div[2]/div/button[2]'))).send_keys(
Keys.ENTER)
else:
time.sleep(10)
# 스크랩한 기업 정보 가져오기
all_data = list() # 정보 저장할 리스트
wait(driver, 10).until(
EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div/div/div[3]/a'))).click() # 스크랩한 기업 목록으로 넘어감
time.sleep(5)
item_info = wait(driver, 10).until(
EC.element_to_be_clickable((By.XPATH, '/html/body/div/div/div/div[2]/div/div[2]/div/div/div[2]'))).text
item_len = int(item_info.split()[-1])
pages = (item_len - 1) // 15 + 1
for p2 in range(pages):
if p2 < pages - 1:
items = 15
else:
items = item_len % 15 if item_len % 15 else 15
for i in range(items):
company_data = list()
wait(driver, 10).until(EC.element_to_be_clickable((By.XPATH,
'/html/body/div/div/div/div[2]/div/div[2]/table/tbody/tr[' + str(
i + 1) + ']/td[2]/div/div/button'))).send_keys(
Keys.ENTER)
time.sleep(5)
wait(driver, 60).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div/div[2]/button'))).send_keys(Keys.ENTER)
time.sleep(5)
wait(driver, 30).until(EC.element_to_be_clickable((By.XPATH,
'/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[2]/div[2]/div/span/span/span[4]/span/span/a'))).send_keys(
Keys.ENTER)
name = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]'))).text
desc = wait(driver, 10).until(EC.element_to_be_clickable((By.XPATH,
'/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[2]/div[2]/div/span/span'))).text
website = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[3]/div[2]'))).text
industry = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[4]/div[2]'))).text
size = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[5]/div[2]'))).text
founded = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[6]/div[2]'))).text
company_type = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[7]/div[2]'))).text
revenue = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[8]/div[2]'))).text
location = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[9]/div[2]'))).text
company_data.extend([name, desc, website, industry, size, founded, company_type, revenue, location])
all_data.append(company_data)
time.sleep(20)
driver.find_element_by_css_selector('body').send_keys(Keys.ESCAPE)
if i % 5 == 4:
time.sleep(1)
driver.find_element_by_css_selector('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
time.sleep(1)
driver.find_element_by_css_selector('body').send_keys(Keys.HOME)
time.sleep(2)
delete_companies(driver)
except Exception:
time.sleep(1)
driver.find_element_by_css_selector('body').send_keys(Keys.ESCAPE)
time.sleep(1)
wait(driver, 10).until(
EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div/div/div[3]/a'))).click() # 스크랩한 기업 목록으로 넘어감
time.sleep(1)
ac(driver).move_by_offset(0, 500).click().perform()
time.sleep(5)
try:
item_info = wait(driver, 30).until(EC.element_to_be_clickable((
By.CSS_SELECTOR, 'div.RecordCount__RecordCountContainer-jdtFHI'))).text
item_len = int(item_info.split()[-1])
pages = (item_len - 1) // 15 + 1
for p in range(pages):
delete_companies(driver)
except NoSuchElementException:
pass
else:
all_columns = [
'Company Name', 'Description', 'Website', 'Industry', 'Company Size', 'Founded', 'Company Type',
'Revenue',
'Location'
]
all_data.reverse()
all_data = pd.DataFrame(all_data, columns=all_columns, index=list(range(1, item_len + 1)))[columns]
all_data.to_excel(file_name, encoding='utf-8-sig')
finally:
logout(driver)
| cermen/SecondCompanyScraping | scrap.py | scrap.py | py | 8,973 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.exp... |
13437468850 | # Display a runtext with double-buffering.
import sys
sys.path.append("matrix/bindings/python/samples")
from samplebase import SampleBase
from rgbmatrix import graphics
import time
from PIL import Image
import requests
import json
import threading
from threading import Thread
from queue import Queue
import traceback
import logging
LOG_FILENAME = "Logs/mtatext.log"
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
logging.debug("Startup test in mtatext.py")
#GPIO
PIN = 35
DIRECTIONS = ["N", "S"]
### MATRIX HELPER FUNCTIONS ###
def fillRectangle(gx, canvas, xUL=0, yUL=0, xBR=63, yBR=31, color=graphics.Color(0,0,0)):
if xUL>=xBR or yUL>=yBR:
print("ERROR, bad rectangle boundaries.")
else:
for x in range(xUL,xBR+1):
gx.DrawLine(canvas, x,yUL,x,yBR,color)
def scrollText(gx, canvas, leftBoundary, rightBoudary, height, color, text):
text_length = graphics.DrawText(offscreen_canvas, font, pos, 20, textColor, my_text)
#hardcoded now, update for different trains
def printTrainBulletId(canvas, x, y, route_id):
#printTrainBullet(canvas, x, y, 0, 106, 9)
image = Image.open("pixelMaps/%strain.ppm"%(route_id)).convert('RGB')
canvas.SetImage(image, x, y)
#position is 0 or 1
def printTrainLine(gx, canvas, route_id, font, min_font, destination, mins_left, position, text_frame):
height = 8 + position*17
bullet_position = (0, height - 7) #was 6,height
destination_position = (bullet_position[0]+16, height+int(font.baseline/2)-1)
mins_left_position = (48, height+int(font.baseline/2)-1)
text_color = gx.Color(100,100,100)
left_boundary = destination_position[0]-1
right_boundary = mins_left_position[0]-2
text_width = gx.DrawText(canvas, font, destination_position[0]-text_frame, destination_position[1], text_color, destination)
fillRectangle(gx, canvas, xBR=left_boundary, yUL=position*16, yBR=16+position*16)
fillRectangle(gx, canvas, xUL=right_boundary, yUL=position*16, yBR=16+position*16)
printTrainBulletId(canvas, bullet_position[0], bullet_position[1], route_id)
gx.DrawText(canvas, min_font, mins_left_position[0], mins_left_position[1], text_color, "%sm"%(mins_left))
return text_width-text_frame
def getTrains(stations):
station_string = ",".join(stations) if len(stations)>1 else stations[0]
try:
response = requests.get("http://localhost:5000/train-schedule/%s"%(station_string))
trains = json.loads(response.text)
valid = trains and len(trains)>0 and trains[0]["destination"] is not None
if valid:
logging.debug("Valid response returning trains:")
logging.debug(str(trains))
return trains
logging.debug("Not valid returning NONE")
return None
except:
logging.exception("Ex in getTrains:")
return None
server_live = threading.Event()
class ServerLiveThread(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
trains = None
try:
valid = trains and len(trains)>0 and trains[0]["destination"] is not None
while not valid:
logging.debug("Startup server still not valid, pinging again")
trains = getTrains(["F21"])
valid = trains and len(trains)>0 and trains[0]["destination"] is not None
logging.debug("Server online, starting UI")
server_live.set()
except:
logging.exception("Ex in ServerLiveThread:")
time.sleep(2)
self.run()
class GetTrainsThread(Thread):
def __init__(self, stations, queue):
Thread.__init__(self)
self.trains = []
self.stations = stations
self.queue = queue
def setTrains(self, trains):
self.trains = trains
def run(self):
self.trains = getTrains(self.stations)
if self.trains:
self.queue.put(self.trains)
class RunText(SampleBase):
def __init__(self, *args, **kwargs):
super(RunText, self).__init__(*args, **kwargs)
self.parser.add_argument("-s", "--stations", help="List of stations", nargs="*", default=["F21"])
def run(self):
offscreen_canvas = self.matrix.CreateFrameCanvas()
font = graphics.Font()
font.LoadFont("matrix/fonts/6x12.bdf")
min_font = graphics.Font()
min_font.LoadFont("matrix/fonts/5x8.bdf")
textColor = graphics.Color(200, 200, 200)
black = graphics.Color(0,0,0)
pos = offscreen_canvas.width
stations = self.args.stations
time_step = 0.09
freeze_time = 3
train_update_time = 25
secondary_switch_time = 10
trains_queue = Queue()
pos1 = 0
freeze1 = int(freeze_time/time_step)
pos2 = 0
freeze2 = int(freeze_time/time_step)
train_update = 0
switch_time = int(secondary_switch_time/time_step)
trains = None
secondary_train = 1
primary_train = 0
t = ServerLiveThread()
t.start()
server_live.wait()
while True:
now = time.time()
offscreen_canvas.Clear()
if train_update==0 and trains_queue.qsize()==0 and threading.active_count()==1:
train_thread = GetTrainsThread(stations,trains_queue)
train_thread.start()
train_update = int(train_update_time/time_step)
if(trains_queue.qsize()>0):
trains = trains_queue.get()
if trains:
if switch_time==0:
secondary_train = max(1,(secondary_train+2)%len(trains))
primary_train = secondary_train-1
switch_time = int(secondary_switch_time/time_step)
else:
switch_time-=1
reset1 = printTrainLine(graphics, offscreen_canvas, trains[primary_train]["route_id"], font, min_font, trains[primary_train]["destination"], trains[primary_train]["mins_left"], 0, pos1)
if len(trains) > 1:
reset2 = printTrainLine(graphics, offscreen_canvas, trains[secondary_train]["route_id"], font, min_font,trains[secondary_train]["destination"], trains[secondary_train]["mins_left"], 1, pos2)
else:
reset2 = -1
offscreen_canvas = self.matrix.SwapOnVSync(offscreen_canvas)
if trains:
if pos1==0 and freeze1>0:
freeze1-=1
else:
pos1+=1
freeze1 = int(freeze_time/time_step)
if pos2==0 and freeze2>0:
freeze2-=1
else:
pos2+=1
freeze2 = int(freeze_time/time_step)
if reset1<0:
pos1 = 0
if reset2<0:
pos2 = 0
train_update= max(0, train_update-1)
elapsed = time.time()-now
time.sleep(max(0,time_step-elapsed))
# Main function
if __name__ == "__main__":
run_text = RunText()
if (not run_text.process()):
run_text.print_help()
| aqwesd8/MTAProject | mtatext.py | mtatext.py | py | 7,270 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"... |
31008546048 | from flask import Flask
from flask_pymongo import PyMongo
from flask import Response
import random
import requests
from flask import request
import json
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
from flask import jsonify
from bson.objectid import ObjectId
from functools import wraps
import uuid
from bson import json_util
app = Flask(__name__)
app.config['SECRET_KEY'] = "secret"
app.config['MONGO_DBNAME'] = "tasker_db"
app.config['MONGO_URI'] = "mongodb://localhost:27017"
mongo = PyMongo(app)
def to_json(data):
"""Convert Mongo object(s) to JSON"""
return json.dumps(data, default=json_util.default)
#region Security
def generate_auth_token(id, expiration=600):
ss = str(id)
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': ss})
def authorize(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not 'Authorization' in request.headers:
return Response(status="401")
data = request.headers['Authorization']
user = verify_auth_token(data)
if not user:
return Response(status="404")
return f(user)
return wrapper
def verify_auth_token(token):
s = Serializer(app.config['SECRET_KEY'])
t = token.replace('\'', '')[1:]
try:
data = s.loads(t)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
dd = data['id']
user = mongo.db.users.find_one({'_id': ObjectId(dd)})
return user
#endregion
@app.route('/')
def home_page():
user = mongo.db.users
return "Success!"
# region Registration
@app.route('/register', methods=['POST'])
def register():
req = request.get_json(silent=True)
rnd = random.randrange(1000, 9999)
if mongo.db.users.find_one({'phone': req['phone']}):
return Response(
"Указанный номер зарегистрирован",
status_code=409,
content_type="utf-8")
tmp_users = mongo.db.template_users
user = tmp_users.find_one({'phone': req['phone']})
if not tmp_users.find_one_and_update({'phone': req['phone']}, {'$set': {'code': rnd}}, upsert=True):
tmp_users.insert({'phone': req['phone'], 'code': rnd})
#remove this
return Response(status="200")
r = requests.post('https://sms.ru/sms/send?api_id=840B3593-66E9-5AB4-4965-0B9589019F3A&to=' + str(
req['phone']) + '&msg=Код%20для%20регистрации:%20' + str(rnd) + '&json=1')
return Response(
r.text,
status=r.status_code,
content_type=r.headers['content-type'])
@app.route('/register/confirm', methods=['POST'])
def finish_registration():
req = request.get_json(silent=True)
if mongo.db.users.find_one({'phone': req['phone']}):
return Response("Указанный номер уже зарегистирован", status="409", content_type="utf-8")
tmp_users = mongo.db.template_users
if tmp_users.find_one({'phone': req['phone'], 'code': int(req['code'])}):
to_insert = {'phone': req['phone'], 'profile': {'first_name': '', 'last_name': '', 'birth_date': None}}
mongo.db.users.insert_one(
{'phone': req['phone'], 'profile': {'first_name': '', 'last_name': '', 'birth_date': None}})
return Response(json.dumps(to_insert), status="200", content_type='application/json')
else:
return Response(status="404")
@app.route('/login', methods=['POST'])
def tasks():
req = request.get_json(silent=True)
user = mongo.db.users.find_one({'phone': req['phone']})
if user:
token = generate_auth_token(user['_id'])
return Response(json.dumps({'token': str(token)}), status="200", content_type='application/json')
else:
return Response(status="404")
# endregion
#region Task
@app.route('/tasks', methods=['GET'])
@authorize
def get_all_task(user):
tasks = mongo.db.tasks.find({'user_id': str(user['_id'])})
json_result = []
for task in tasks:
json_result.append(task)
result = to_json(json_result)
return Response(result, status="200", content_type='application/json')
@app.route('/task', methods=['POST'])
@authorize
def add_task(user):
req = request.get_json(silent=True)
mongo.db.tasks.insert({'data': req['data'],
'date': req['date'],
'guid': req['guid'],
'user_id': str(user['_id'])})
return Response(status="200")
@app.route('/task', methods=['PUT'])
@authorize
def update_task(user):
req = request.get_json(silent=True)
mongo.db.tasks.update({'guid': req['guid']},
{"$set" : {'data': req['data'],
'date': req['date']}})
return Response(status="200")
@app.route('/task', methods=['GET'])
@authorize
def get_task(user):
req = request.get_json(silent=True)
id = request.args.get('id')
task = mongo.db.tasks.find_one({'guid' : id})
return Response(to_json(task), status="200", content_type='application/json')
@app.route('/task', methods=['DELETE'])
@authorize
def delete_task(user):
req = request.get_json(silent=True)
mongo.db.tasks.delete_one({'guid' : req['guid']})
return Response(status="200")
#endregion
if __name__ == "__main__":
app.run()
| SvTitov/tasker | SRV/tasker_srv/application.py | application.py | py | 5,493 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask_pymongo.PyMongo",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bson.json_util.default",... |
20489742276 | import scipy
from scipy.special import logsumexp
from sklearn.cluster import KMeans
from sklearn.cluster import SpectralClustering
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, SVR
from ucsl.sinkhornknopp_utils import *
def one_hot_encode(y, n_classes=None):
''' utils function in order to turn a label vector into a one hot encoded matrix '''
if n_classes is None:
n_classes = np.max(y) + 1
y_one_hot = np.copy(y)
return np.eye(n_classes)[y_one_hot]
def sigmoid(x, lambda_=5):
return 1 / (1 + np.exp(-lambda_ * x))
def py_softmax(x, axis=None):
"""stable softmax"""
return np.exp(x - logsumexp(x, axis=axis, keepdims=True))
def consensus_clustering(clustering_results, n_clusters, index_positives):
S = np.ones((clustering_results.shape[0], n_clusters)) / n_clusters
co_occurrence_matrix = np.zeros((clustering_results.shape[0], clustering_results.shape[0]))
for i in range(clustering_results.shape[0] - 1):
for j in range(i + 1, clustering_results.shape[0]):
co_occurrence_matrix[i, j] = sum(clustering_results[i, :] == clustering_results[j, :])
co_occurrence_matrix = np.add(co_occurrence_matrix, co_occurrence_matrix.transpose())
# here is to compute the Laplacian matrix
Laplacian = np.subtract(np.diag(np.sum(co_occurrence_matrix, axis=1)), co_occurrence_matrix)
Laplacian_norm = np.subtract(np.eye(clustering_results.shape[0]), np.matmul(
np.matmul(np.diag(1 / np.sqrt(np.sum(co_occurrence_matrix, axis=1))), co_occurrence_matrix),
np.diag(1 / np.sqrt(np.sum(co_occurrence_matrix, axis=1)))))
# replace the nan with 0
Laplacian_norm = np.nan_to_num(Laplacian_norm)
# check if the Laplacian norm is symmetric or not, because matlab eig function will automatically check this, but not in numpy or scipy
e_value, e_vector = scipy.linalg.eigh(Laplacian_norm)
# check if the eigen vector is complex
if np.any(np.iscomplex(e_vector)):
e_value, e_vector = scipy.linalg.eigh(Laplacian)
# train Spectral Clustering algorithm and make predictions
spectral_features = e_vector.real[:, :n_clusters]
# apply clustering method
k_means = KMeans(n_clusters=n_clusters).fit(spectral_features[index_positives])
S[index_positives] = one_hot_encode(k_means.labels_.astype(np.int), n_classes=n_clusters)
return S
def compute_similarity_matrix(consensus_assignment, clustering_assignments_to_pred=None):
# compute inter-samples positive/negative co-occurence matrix
similarity_matrix = np.zeros((len(consensus_assignment), len(clustering_assignments_to_pred)))
for i, p_assignment in enumerate(consensus_assignment):
for j, new_point_assignment in enumerate(clustering_assignments_to_pred):
similarity_matrix[i, j] = np.sum(p_assignment == new_point_assignment)
similarity_matrix += 1e-3
similarity_matrix /= np.max(similarity_matrix)
return similarity_matrix
def compute_spectral_clustering_consensus(clustering_results, n_clusters):
# compute positive samples co-occurence matrix
n_positives = len(clustering_results)
similarity_matrix = np.zeros((n_positives, n_positives))
for i in range(n_positives - 1):
for j in range(i + 1, n_positives):
similarity_matrix[i, j] = sum(clustering_results[i, :] == clustering_results[j, :])
similarity_matrix = np.add(similarity_matrix, similarity_matrix.transpose())
similarity_matrix += 1e-3
similarity_matrix /= np.max(similarity_matrix)
# initialize spectral clustering method
spectral_clustering_method = SpectralClustering(n_clusters=n_clusters, affinity='precomputed')
spectral_clustering_method.fit(similarity_matrix)
return spectral_clustering_method.labels_
def launch_svc(X, y, sample_weight=None, kernel='linear', C=1):
"""Fit the classification SVMs according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,)
Training sample weights.
kernel : string,
kernel used for SVM.
C : float,
SVM hyperparameter C
Returns
-------
SVM_coefficient : array-like, shape (1, n_features)
The coefficient of the resulting SVM.
SVM_intercept : array-like, shape (1,)
The intercept of the resulting SVM.
"""
# fit the different SVM/hyperplanes
SVM_classifier = SVC(kernel=kernel, C=C)
SVM_classifier.fit(X, y, sample_weight=sample_weight)
# get SVM intercept value
SVM_intercept = SVM_classifier.intercept_
# get SVM hyperplane coefficient
SVM_coefficient = SVM_classifier.coef_
return SVM_coefficient, SVM_intercept
def launch_svr(X, y, sample_weight=None, kernel='linear', C=1):
"""Fit the classification SVMs according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,)
Training sample weights.
kernel : string,
kernel used for SVM.
C : float,
SVM hyperparameter C
Returns
-------
SVM_coefficient : array-like, shape (1, n_features)
The coefficient of the resulting SVM.
SVM_intercept : array-like, shape (1,)
The intercept of the resulting SVM.
"""
# fit the different SVM/hyperplanes
SVM_regressor = SVR(kernel=kernel, C=C)
SVM_regressor.fit(X, y, sample_weight=sample_weight)
# get SVM intercept value
SVM_intercept = SVM_regressor.intercept_
# get SVM hyperplane coefficient
SVM_coefficient = SVM_regressor.coef_
return SVM_coefficient, SVM_intercept
def launch_logistic(X, y, sample_weight=None):
"""Fit the logistic regressions according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,)
Training sample weights.
Returns
-------
logistic_coefficient : array-like, shape (1, n_features)
The coefficient of the resulting logistic regression.
"""
# fit the different logistic classifier
logistic = LogisticRegression(max_iter=200)
logistic.fit(X, y, sample_weight=sample_weight)
# get logistic coefficient and intercept
logistic_coefficient = logistic.coef_
logistic_intercept = logistic.intercept_
return logistic_coefficient, logistic_intercept
| rlouiset/py_ucsl | ucsl/utils.py | utils.py | py | 6,772 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "scipy.special.logsumexp",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.eigh",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "scipy.linalg",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "scipy.lin... |
23476634886 | import joblib
wordsTB = ["'s", ',', 'keywords', 'Twitter', 'account', 'a', 'all', 'anyone', 'are', 'awesome', 'be', 'behavior', 'by', 'bye', 'can', 'chatting', 'check', 'could', 'data', 'day', 'detail', 'do', 'dont', 'find', 'for', 'give', 'good', 'goodbye', 'have', 'hello', 'help', 'helpful', 'helping', 'hey', 'hi', 'history', 'how', 'i', 'id', 'is', 'later', 'list', 'load', 'locate', 'log', 'looking', 'lookup', 'management', 'me', 'module', 'next', 'nice', 'of', 'offered', 'open', 'provide', 'reaction', 'related', 'result', 'search', 'searching', 'see', 'show', 'support', 'task', 'thank', 'thanks', 'that', 'there', 'till', 'time', 'to', 'transfer', 'up', 'want', 'what', 'which', 'with', 'you']
classesTB = ['goodbye', 'greeting', 'options', 'thanks', 'no_response']
joblib.dump(wordsTB, 'wordsTB.pkl')
joblib.dump(classesTB, 'classesTB.pkl')
'''
x = joblib.load('x.pkl')
print(x)
''' | kaitong-li/Twitter-Bot | Twitter Bot/generatePkl.py | generatePkl.py | py | 906 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "joblib.dump",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "joblib.dump",
"line_number": 6,
"usage_type": "call"
}
] |
27579907019 | from pyspark import SparkConf
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
conf = SparkConf().set("spark.cores.max", "32") \
.set("spark.driver.memory", "50g") \
.set("spark.executor.memory", "50g") \
.set("spark.executor.memory_overhead", "50g") \
.set("spark.driver.maxResultsSize", "16g")\
.set("spark.executor.heartbeatInterval", "30s")
sc = SparkContext(conf=conf).getOrCreate();
spark = SparkSession(sc)
# read baskets_prior
baskets = spark.read.csv('./data/baskets_prior.csv',header=True, inferSchema=True)
baskets.createOrReplaceTempView("baskets")
baskets.show(5)
print(baskets.count())
# transform string to list
import pyspark.sql.functions as F
df2 = baskets.withColumn(
"new_items",
F.from_json(F.col("items"), "array<string>")
)
df2 = df2.drop('items')
df2.show(5)
from pyspark.ml.fpm import FPGrowth
import time
start = time.time()
local_time = time.ctime(start)
print("Start time:", local_time)
fpGrowth = FPGrowth(itemsCol="new_items", minSupport=0.000015, minConfidence=0.7)
model = fpGrowth.fit(df2)
model.associationRules.show()
print(model.associationRules.count())
assoRules = model.associationRules
freqItems = model.freqItemsets
end = time.time()
print("run time: ", (end-start)/60)
local_time = time.ctime(end)
print("End time:", local_time)
# freq to pandas
freq_pd =freqItems.toPandas()
freq_pd = freq_pd.sort_values('freq', ascending=False)
print(freq_pd.head(5))
freq_pd.to_csv('./data/freqItems_baskets3M.csv', index=False)
# save rules
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
def array_to_string(my_list):
return '[' + ','.join([str(elem) for elem in my_list]) + ']'
array_to_string_udf = udf(array_to_string, StringType())
assoRules = assoRules.withColumn('antecedent', array_to_string_udf(assoRules["antecedent"]))
assoRules = assoRules.withColumn('consequent', array_to_string_udf(assoRules["consequent"]))
print('after convert string to save: ', assoRules.show(7))
assoRules.coalesce(1).write.csv('./data/assoRules_baskets3M_50_70%')
| thuy4tbn99/spark_instacart | baskets.py | baskets.py | py | 2,095 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyspark.SparkConf",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pyspark.context.SparkContext",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.session.SparkSession",
"line_number": 11,
"usage_type": "call"
},
{
"ap... |
72330666747 | # TEE RATKAISUSI TÄHÄN:
import pygame
pygame.init()
naytto = pygame.display.set_mode((640, 480))
robo = pygame.image.load("robo.png")
leveys, korkeus = 640, 480
x = 0
y = 0
suunta = 1
kello = pygame.time.Clock()
while True:
for tapahtuma in pygame.event.get():
if tapahtuma.type == pygame.QUIT:
exit()
naytto.fill((0, 0, 0))
naytto.blit(robo, (x, y))
pygame.display.flip()
if suunta == 1:
x += 1
if x+robo.get_width() == leveys:
suunta = 2
elif suunta == 2:
y += 1
if y+robo.get_height() == korkeus:
suunta = 3
elif suunta == 3:
x -= 1
if x == 0:
suunta = 4
elif suunta == 4:
y -= 1
if y == 0:
suunta = 1
kello.tick(60)
| jevgenix/Python_OOP | osa13-06_reunan_kierto/src/main.py | main.py | py | 796 | python | fi | code | 4 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.loa... |
18711654900 | import argparse
import logging
from pathlib import Path
from typing import List
import yaml
from topaz3.conversions import phase_remove_bad_values, phase_to_map
from topaz3.database_ops import prepare_labels_database, prepare_training_database
from topaz3.delete_temp_files import delete_temp_files
from topaz3.get_cc import get_cc
from topaz3.mtz_info import mtz_get_cell
from topaz3.space_group import mtz_find_space_group, textfile_find_space_group
def prepare_training_data(
phase_directory: str,
cell_info_directory: str,
cell_info_path: str,
space_group_directory: str,
space_group_path: str,
xyz_limits: List[int],
output_directory: str,
database: str = None,
delete_temp: bool = True,
):
"""Convert both the original and inverse hands of a structure into a regular map file based on information
about the cell info and space group and the xyz dimensions. Return True if no exceptions"""
logging.info("Preparing training data")
# Check all directories exist
try:
phase_dir = Path(phase_directory)
assert phase_dir.exists()
except Exception:
logging.error(f"Could not find phase directory at {phase_directory}")
raise
try:
cell_info_dir = Path(cell_info_directory)
assert cell_info_dir.exists()
except Exception:
logging.error(f"Could not find cell info directory at {cell_info_directory}")
raise
try:
space_group_dir = Path(space_group_directory)
assert space_group_dir.exists()
except Exception:
logging.error(
f"Could not find space group directory at {space_group_directory}"
)
raise
try:
output_dir = Path(output_directory)
assert output_dir.exists()
except Exception:
logging.error(f"Could not find output directory at {output_directory}")
raise
# Check xyz limits are of correct format
try:
assert type(xyz_limits) == list or type(xyz_limits) == tuple
assert len(xyz_limits) == 3
assert all(type(values) == int for values in xyz_limits)
except AssertionError:
logging.error(
"xyz_limits muste be provided as a list or tupls of three integer values"
)
raise
# Get lists of child directories
phase_structs = [struct.stem for struct in phase_dir.iterdir()]
cell_info_structs = [struct.stem for struct in cell_info_dir.iterdir()]
space_group_structs = [struct.stem for struct in space_group_dir.iterdir()]
assert (
phase_structs == cell_info_structs == space_group_structs
), "Same structures not found in all given directories"
phase_structs = sorted(phase_structs)
logging.debug(f"Following structures found to transform: {phase_structs}")
# Get cell info and space group
cell_info_dict = {}
space_group_dict = {}
# Set up function to get space group depending on suffix
if Path(space_group_path).suffix == ".mtz":
find_space_group = mtz_find_space_group
else:
find_space_group = textfile_find_space_group
for struct in phase_structs:
logging.info(
f"Collecting info from {struct}, {phase_structs.index(struct)+1}/{len(phase_structs)}"
)
try:
cell_info_file = cell_info_dir / Path(struct) / Path(cell_info_path)
assert cell_info_file.exists()
except Exception:
logging.error(f"Could not find cell info file at {cell_info_dir}")
raise
try:
cell_info_dict[struct] = mtz_get_cell(cell_info_file)
except Exception:
logging.error(f"Could not get cell info from {cell_info_file}")
raise
try:
space_group_file = space_group_dir / Path(struct) / Path(space_group_path)
assert space_group_file.exists()
except Exception:
logging.error(f"Could not find space group file at {space_group_dir}")
raise
try:
space_group_dict[struct] = find_space_group(space_group_file)
except Exception:
logging.error(f"Could not get space group from {space_group_file}")
raise
logging.info("Collected cell info and space group")
# Begin transformation
for struct in phase_structs:
logging.info(
f"Converting {struct}, {phase_structs.index(struct)+1}/{len(phase_structs)}"
)
# Create original and inverse hands
try:
original_hand = Path(
phase_dir / struct / space_group_dict[struct] / (struct + ".phs")
)
inverse_hand = Path(
phase_dir / struct / space_group_dict[struct] / (struct + "_i.phs")
)
# Catch a weird situation where some space groups RXX can also be called RXX:H
if (space_group_dict[struct][0] == "R") and (
original_hand.exists() is False
):
original_hand = Path(
phase_dir
/ struct
/ (space_group_dict[struct] + ":H")
/ (struct + ".phs")
)
inverse_hand = Path(
phase_dir
/ struct
/ (space_group_dict[struct] + ":H")
/ (struct + "_i.phs")
)
assert original_hand.exists(), f"Could not find original hand for {struct}"
assert inverse_hand.exists(), f"Could not find inverse hand for {struct}"
except Exception:
logging.error(
f"Could not find phase files of {struct} in space group {space_group_dict[struct]}"
)
raise
# Convert original
# Check the phase file first
original_hand_good = phase_remove_bad_values(
original_hand, output_dir.parent / (original_hand.stem + "_temp.phs")
)
# Log the result
if original_hand is not original_hand_good:
logging.info(
f"Filtered bad values from {original_hand.stem} and stored results in {original_hand_good}"
)
try:
phase_to_map(
original_hand_good,
cell_info_dict[struct],
space_group_dict[struct],
xyz_limits,
output_dir / (struct + ".map"),
)
except Exception:
logging.error(f"Could not convert original hand for {struct}")
raise
# Convert inverse
# Check the phase file first
inverse_hand_good = phase_remove_bad_values(
inverse_hand, output_dir.parent / (inverse_hand.stem + "_temp.phs")
)
# Log the result
if inverse_hand is not inverse_hand_good:
logging.info(
f"Filtered bad values from {inverse_hand.stem} and stored results in {inverse_hand_good}"
)
try:
phase_to_map(
inverse_hand_good,
cell_info_dict[struct],
space_group_dict[struct],
xyz_limits,
output_dir / (struct + "_i.map"),
)
except Exception:
logging.error(f"Could not convert inverse hand for {struct}")
raise
logging.info(f"Successfully converted {struct}")
logging.info("Finished conversions")
# If a database file is given, attempt to provide the training and labels table
if database is not None:
logging.info(f"Adding to database at {database}")
# Build up database - collect all cc information first then put it into database
logging.info("Collecting CC information")
# Dictionary of correlation coefficients
cc_original_dict = {}
cc_inverse_dict = {}
for struct in phase_structs:
# Create original and inverse hands
try:
original_hand = Path(
phase_dir / struct / space_group_dict[struct] / (struct + ".lst")
)
inverse_hand = Path(
phase_dir / struct / space_group_dict[struct] / (struct + "_i.lst")
)
# Catch a weird situation where some space groups RXX can also be called RXX:H
if (space_group_dict[struct][0] == "R") and (
original_hand.exists() is False
):
original_hand = Path(
phase_dir
/ struct
/ (space_group_dict[struct] + ":H")
/ (struct + ".lst")
)
inverse_hand = Path(
phase_dir
/ struct
/ (space_group_dict[struct] + ":H")
/ (struct + "_i.lst")
)
assert (
original_hand.exists()
), f"Could not find original hand for {struct}"
assert (
inverse_hand.exists()
), f"Could not find inverse hand for {struct}"
except Exception:
logging.error(
f"Could not find lst files of {struct} in space group {space_group_dict[struct]}"
)
raise
try:
cc_original_dict[struct] = get_cc(original_hand)
cc_inverse_dict[struct] = get_cc(inverse_hand)
except Exception:
logging.error(
f"Could not get CC info of {struct} in space group {space_group_dict[struct]}"
)
raise
try:
database_path = Path(database)
assert database_path.exists()
except Exception:
logging.error(f"Could not find database at {database}")
raise
# Generate list of results
cc_results = []
for struct in phase_structs:
cc_results.append(
(
struct,
cc_original_dict[struct],
cc_inverse_dict[struct],
(cc_original_dict[struct] > cc_inverse_dict[struct]),
(cc_original_dict[struct] < cc_inverse_dict[struct]),
)
)
# Put in database
prepare_training_database(str(database_path), cc_results)
prepare_labels_database(str(database_path))
# Delete temporary files if requested
if delete_temp is True:
delete_temp_files(output_directory)
logging.info("Deleted temporary files in output directory")
return True
def params_from_yaml(args):
"""Extract the parameters for prepare_training_data from a yaml file and return a dict"""
# Check the path exists
try:
config_file_path = Path(args.config_file)
assert config_file_path.exists()
except Exception:
logging.error(f"Could not find config file at {args.config_file}")
raise
# Load the data from the config file
try:
with open(config_file_path, "r") as f:
params = yaml.safe_load(f)
except Exception:
logging.error(
f"Could not extract parameters from yaml file at {config_file_path}"
)
raise
if "db_path" not in params.keys():
params["db_path"] = None
if "delete_temp" not in params.keys():
params["delete_temp"] = True
return params
def params_from_cmd(args):
"""Extract the parameters for prepare_training_data from the command line and return a dict"""
params = {
"phase_dir": args.phase_dir,
"cell_info_dir": args.cell_info_dir,
"cell_info_path": args.cell_info_path,
"space_group_dir": args.space_group_dir,
"space_group_path": args.space_group_path,
"xyz_limits": args.xyz,
"db_path": args.db,
"output_dir": args.output_dir,
"delete_temp": True,
}
if args.keep_temp:
params["delete_temp"] = False
return params
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(name="debug_log")
userlog = logging.getLogger(name="usermessages")
# Parser for command line interface
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
yaml_parser = subparsers.add_parser("yaml")
yaml_parser.add_argument(
"config_file",
type=str,
help="yaml file with configuration information for this program",
)
yaml_parser.set_defaults(func=params_from_yaml)
cmd_parser = subparsers.add_parser("cmd")
cmd_parser.add_argument(
"phase_dir", type=str, help="top level directory for phase information"
)
cmd_parser.add_argument(
"cell_info_dir", type=str, help="top level directory for cell info"
)
cmd_parser.add_argument(
"cell_info_path", type=str, help="cell info file within each structure folder"
)
cmd_parser.add_argument(
"space_group_dir", type=str, help="top level directory for space group"
)
cmd_parser.add_argument(
"space_group_path",
type=str,
help="space group file within each structure folder",
)
cmd_parser.add_argument(
"xyz", type=int, nargs=3, help="xyz size of the output map file"
)
cmd_parser.add_argument(
"output_dir", type=str, help="directory to output all map files to"
)
cmd_parser.add_argument(
"db",
type=str,
help="location of the sqlite3 database to store training information",
)
cmd_parser.add_argument(
"--keep_temp",
action="store_false",
help="keep the temporary files after processing",
)
cmd_parser.set_defaults(func=params_from_cmd)
# Extract the parameters based on the yaml/command line argument
args = parser.parse_args()
parameters = args.func(args)
print(parameters)
# Execute the command
try:
prepare_training_data(
parameters["phase_dir"],
parameters["cell_info_dir"],
parameters["cell_info_path"],
parameters["space_group_dir"],
parameters["space_group_path"],
parameters["xyz_limits"],
parameters["output_dir"],
parameters["db_path"],
parameters["delete_temp"],
)
except KeyError as e:
logging.error(f"Could not find parameter {e} to prepare training data")
| mevol/python_topaz3 | topaz3/prepare_training_data.py | prepare_training_data.py | py | 14,661 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_numbe... |
69936276028 | import torch.nn as nn
import torch.optim as optimizers
from nlp.generation.models import CharLSTM
class CharLSTMTrainer:
def __init__(self,
model: CharLSTM,
vocab_size: int,
learning_rate: float = 1e-3,
weights_decay: float = 1e-3,
epochs: int = 1,
logging_level: int = 0):
self.vocab_size = vocab_size
self.logging_level = logging_level
self.model = model.train()
self.epochs = epochs
self.learning_rate = learning_rate
self._loss = nn.CrossEntropyLoss()
self._optimizer = optimizers.Adam(self.model.parameters(), lr=self.learning_rate, weight_decay=weights_decay)
def train(self, text_dataloader):
for epoch in range(self.epochs):
for input_chars, target_chars in text_dataloader:
self._optimizer.zero_grad()
predicted_chars = self.model(input_chars)
loss = self._loss(predicted_chars.transpose(1, 2), target_chars)
loss.backward()
self._optimizer.step()
| Danielto1404/bachelor-courses | python-backend/projects/nlp.ai/nlp/generation/trainers.py | trainers.py | py | 1,129 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "nlp.generation.models.CharLSTM",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torc... |
39680498179 | """ General functions for data_tables and data_table_manager
We are using a class here just to make it easier to pass around
"""
import logging
import pprint
import subprocess
from pathlib import Path
import re
from typing import Union
import matplotlib.pyplot as mpl
import numpy as np
import pandas as pd
from pylibrary.plotting import plothelpers as PH
from pylibrary.tools import cprint
from pyqtgraph.Qt import QtGui
import ephys.datareaders as DR
from ephys.ephys_analysis import spike_analysis
from ephys.tools import utilities
import ephys
UTIL = utilities.Utility()
CP = cprint.cprint
class CustomFormatter(logging.Formatter):
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
white = "\x1b[37m"
reset = "\x1b[0m"
lineformat = "%(asctime)s - %(levelname)s - (%(filename)s:%(lineno)d) %(message)s "
FORMATS = {
logging.DEBUG: grey + lineformat + reset,
logging.INFO: white + lineformat + reset,
logging.WARNING: yellow + lineformat + reset,
logging.ERROR: red + lineformat + reset,
logging.CRITICAL: bold_red + lineformat + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def get_git_hashes():
process = subprocess.Popen(["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE)
git_head_hash = process.communicate()[0].strip()
ephyspath = Path(ephys.__file__).parent
process = subprocess.Popen(
["git", "-C", str(ephyspath), "rev-parse", "HEAD"],
shell=False,
stdout=subprocess.PIPE,
)
ephys_git_hash = process.communicate()[0].strip()
return {"project": git_head_hash, "ephys": ephys_git_hash}
def create_logger(
log_name: str = "Log Name",
log_file: str = "log_file.log",
log_message: str = "Starting Logging",
):
logging.getLogger("fontTools.subset").disabled = True
Logger = logging.getLogger(log_name)
level = logging.DEBUG
Logger.setLevel(level)
# create file handler which logs even debug messages
logging_fh = logging.FileHandler(filename=log_file)
logging_fh.setLevel(level)
logging_sh = logging.StreamHandler()
logging_sh.setLevel(level)
log_formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s (%(filename)s:%(lineno)d) - %(message)s "
)
logging_fh.setFormatter(log_formatter)
logging_sh.setFormatter(CustomFormatter()) # log_formatter)
Logger.addHandler(logging_fh)
Logger.addHandler(logging_sh)
Logger.info(log_message)
return Logger
Logger = create_logger(
log_name="Spike Analysis",
log_file="spike_analysis.log",
log_message="Starting Process Spike Analysis",
)
PrettyPrinter = pprint.PrettyPrinter
datacols = [
"holding",
"RMP",
"RMP_SD",
"Rin",
"taum",
"dvdt_rising",
"dvdt_falling",
"current",
"AP_thr_V",
"AP_HW",
"AP15Rate",
"AdaptRatio",
"AHP_trough_V",
"AHP_depth_V",
"tauh",
"Gh",
"FiringRate",
]
iv_keys: list = [
"holding",
"WCComp",
"CCComp",
"BridgeAdjust",
"RMP",
"RMP_SD",
"RMPs",
"Irmp",
"taum",
"taupars",
"taufunc",
"Rin",
"Rin_peak",
"tauh_tau",
"tauh_bovera",
"tauh_Gh",
"tauh_vss",
]
spike_keys: list = [
"FI_Growth",
"AdaptRatio",
"FI_Curve",
"FiringRate",
"AP1_Latency",
"AP1_HalfWidth",
"AP1_HalfWidth_interpolated",
"AP2_Latency",
"AP2_HalfWidth",
"AP2_HalfWidth_interpolated",
"FiringRate_1p5T",
"AHP_Depth",
"AHP_Trough",
"spikes",
"iHold",
"pulseDuration",
"baseline_spikes",
"poststimulus_spikes",
]
# map spike measurements to top level keys
mapper: dict = {
"AP1_HalfWidth": "halfwidth",
"AP1_HalfWidth_interpolated": "halfwidth_interpolated",
"AHP_trough_V": "trough_V",
"AHP_Trough": "trough_T",
"AHP_depth_V": "trough_V",
"AP1_Latency": "AP_latency",
"AP_thr_V": "AP_begin_V",
"AP_HW": "halfwidth",
"dvdt_rising": "dvdt_rising",
"dvdt_falling": "dvdt_falling",
}
# map summary/not individual spike data to top level keys
mapper1: dict = {
"AP15Rate": "FiringRate_1p5T",
"AdaptRatio": "AdaptRatio",
}
iv_mapper: dict = {
"tauh": "tauh_tau",
"Gh": "tauh_Gh",
"taum": "taum",
"Rin": "Rin",
"RMP": "RMP",
}
def print_spike_keys(row):
if pd.isnull(row.IV):
return row
# print(row.IV)
return row
class Functions:
def __init__(self):
self.textbox = None
pass
def get_row_selection(self, table_manager):
"""
Find the selected rows in the currently managed table, and if there is a valid selection,
return the index to the first row and the data from that row
"""
self.selected_index_rows = table_manager.table.selectionModel().selectedRows()
if self.selected_index_rows is None:
return None, None
else:
index_row = self.selected_index_rows[0]
selected = table_manager.get_table_data(index_row) # table_data[index_row]
if selected is None:
return None, None
else:
return index_row, selected
def get_multiple_row_selection(self, table_manager):
"""
Find the selected rows in the currently managed table, and if there is a valid selection,
return a list of indexs from the selected rows.
"""
self.selected_index_rows = table_manager.table.selectionModel().selectedRows()
if self.selected_index_rows is None:
return None, None
else:
return self.selected_index_rows
def get_datasummary_protocols(self, datasummary):
"""
Print a configuration file-like text of all the datasummary protocols, as categorized here.
"""
data_complete = datasummary["data_complete"].values
print("# of datasummary entries: ", len(data_complete))
protocols = []
for i, prots in enumerate(data_complete):
prots = prots.split(",")
for prot in prots:
protocols.append(prot[:-4].strip(" ")) # remove trailing "_000" etc
allprots = sorted(list(set(protocols)))
print("# of unique protocols: ", len(allprots))
# print(allprots)
# make a little table for config dict:
txt = "protocols:\n"
txt += " CCIV:"
ncciv = 0
prots_used = []
for i, prot in enumerate(allprots):
if "CCIV".casefold() in prot.casefold():
computes = "['RmTau', 'IV', 'Spikes', 'FI']"
if "posonly".casefold() in prot.casefold(): # cannot compute rmtau for posonly
computes = "['IV', 'Spikes', 'FI']"
txt += f"\n {prot:s}: {computes:s}"
prots_used.append(i)
ncciv += 1
if ncciv == 0:
txt += " None"
txt += "\n VCIV:"
nvciv = 0
for i, prot in enumerate(allprots):
if "VCIV".casefold() in prot.casefold():
computes = "['VC']"
txt += f"\n {prot:s}: {computes:s}"
nvciv += 1
prots_used.append(i)
if nvciv == 0:
txt += " None"
txt += "\n Maps:"
nmaps = 0
for i, prot in enumerate(allprots):
if "Map".casefold() in prot.casefold():
computes = "['Maps']"
txt += f"\n {prot:s}: {computes:s}"
nmaps += 1
prots_used.append(i)
if nmaps == 0:
txt += " None"
txt += "\n Minis:"
nminis = 0
for i, prot in enumerate(allprots):
cprot = prot.casefold()
if "Mini".casefold() in cprot or "VC_Spont".casefold() in cprot:
computes = "['Mini']"
txt += f"\n {prot:s}: {computes:s}"
nminis += 1
prots_used.append(i)
if nminis == 0:
txt += " None"
txt += "\n PSCs:"
npsc = 0
for i, prot in enumerate(allprots):
if "PSC".casefold() in prot.casefold():
computes = "['PSC']"
txt += f"\n {prot:s}: {computes:s}"
npsc += 1
prots_used.append(i)
if npsc == 0:
txt += " None"
txt += "\n Uncategorized:"
allprots = [prot for i, prot in enumerate(allprots) if i not in prots_used]
nother = 0
for i, prot in enumerate(allprots):
if len(prot) == 0 or prot == " ":
prot = "No Name"
computes = "None"
txt += f"\n {prot:s}: {computes:s}"
nother += 1
if nother == 0:
txt += "\n None"
print(f"\n{txt:s}\n")
# this print should be pasted into the configuration file (watch indentation)
def moving_average(self, data, window_size):
"""moving_average Compute a triangular moving average on the data over a window
Parameters
----------
data : _type_
_description_
window_size : _type_
_description_
Returns
-------
_type_
_description_
"""
window = np.bartlett(window_size)
# Normalize the window
window /= window.sum()
return np.convolve(data, window, "valid") / window_size
def get_slope(self, y, x, index, window_size):
"""get_slope get slope of a smoothed curve at a given index
Parameters
----------
y : _type_
_description_
x : _type_
_description_
index : _type_
_description_
window_size : _type_
_description_
Returns
-------
_type_
_description_
"""
# Smooth the data
y_smooth = self.moving_average(y, window_size)
x_smooth = self.moving_average(x, window_size)
# Adjust the index for the reduced size of the smoothed data
index -= window_size // 2
if index < 1 or index >= len(y_smooth) - 1:
# Can't calculate slope at the start or end
return None
else:
dy = y_smooth[index + 1] - y_smooth[index - 1]
dx = x_smooth[index + 1] - x_smooth[index - 1]
return dy / dx
def draw_orthogonal_line(self, x, y, index, slope, length, color, ax):
# Calculate the slope of the orthogonal line
orthogonal_slope = -1.0 / slope
# Calculate the start and end points of the orthogonal line
x_start = x[index] - length / 2
x_end = x[index] + length / 2
y_start = y[index] + orthogonal_slope * (x_start - x[index])
y_end = y[index] + orthogonal_slope * (x_end - x[index])
# Plot the orthogonal line
ax.plot([x_start, x_end], [y_start, y_end], color=color)
def get_selected_cell_data_spikes(self, experiment, table_manager, assembleddata):
self.get_row_selection(table_manager)
if self.selected_index_rows is not None:
for nplots, index_row in enumerate(self.selected_index_rows):
selected = table_manager.get_table_data(index_row)
day = selected.date[:-4]
slicecell = selected.cell_id[-4:]
cell_df, cell_df_tmp = self.get_cell(experiment, assembleddata, cell=selected.cell_id)
protocols = list(cell_df["Spikes"].keys())
min_index = None
min_current = 1
V = None
min_protocol = None
spike = None
for ip, protocol in enumerate(protocols):
min_current_index, current, trace = self.find_lowest_current_trace(
cell_df["Spikes"][protocol]
)
if current < min_current:
I = current
V = trace
min_index = min_current_index
min_protocol = ip
min_current = current
spike = cell_df["Spikes"][protocol]
pp = PrettyPrinter(indent=4)
print("spike keys: ", spike["spikes"].keys())
print(
"min I : ",
I,
"min V: ",
V,
"min index: ",
min_index,
"min_current: ",
min_current,
)
pp.pprint(spike["spikes"][V][min_index])
low_spike = spike["spikes"][V][min_index]
if nplots == 0:
import matplotlib.pyplot as mpl
f, ax = mpl.subplots(1, 2, figsize=(10, 5))
vtime = (low_spike.Vtime - low_spike.peak_T) * 1e3
ax[0].plot(vtime, low_spike.V * 1e3)
ax[1].plot(low_spike.V * 1e3, low_spike.dvdt)
dvdt_ticks = np.arange(-4, 2.01, 0.1)
t_indices = np.array([np.abs(vtime - point).argmin() for point in dvdt_ticks])
thr_index = np.abs(vtime - (low_spike.AP_latency - low_spike.peak_T) * 1e3).argmin()
# Create a colormap
cmap = mpl.get_cmap("tab10")
# Create an array of colors based on the index of each point
colors = cmap(np.linspace(0, 1, len(t_indices)))
# for i in range(len(t_indices)):
# local_slope = self.get_slope(
# low_spike.V * 1e3, low_spike.dvdt, t_indices[i], 7,
# )
# if local_slope is not None:
# self.draw_orthogonal_line(
# low_spike.V * 1e3,
# low_spike.dvdt,
# index=t_indices[i],
# slope=local_slope,
# length=5.0,
# color=colors[i],
# ax=ax[1],
# )
# ax[1].scatter(
# low_spike.V[t_indices[i]] * 1e3,
# low_spike.dvdt[t_indices[i]],
# s=12,
# marker='|',
# color=colors[i],
# zorder = 10
# )
# Plot each point with a different color
# ax[1].scatter(
# low_spike.V[t_indices] * 1e3,
# low_spike.dvdt[t_indices],
# s=12,
# marker='|',
# color=colors,
# zorder = 10
# )
ax[1].scatter(
low_spike.V[thr_index] * 1e3,
low_spike.dvdt[thr_index],
s=12,
marker="o",
color="r",
zorder=12,
)
latency = (low_spike.AP_latency - low_spike.peak_T) * 1e3 # in msec
ax[0].plot(
latency,
low_spike.AP_begin_V * 1e3,
"ro",
markersize=2.5,
zorder=10,
)
ax[0].plot(
[
(low_spike.left_halfwidth_T - low_spike.peak_T - 0.0001) * 1e3,
(low_spike.right_halfwidth_T - low_spike.peak_T + 0.0001) * 1e3,
],
[ # in msec
low_spike.halfwidth_V * 1e3,
low_spike.halfwidth_V * 1e3,
],
"g-",
zorder=10,
)
# ax[0].plot(
# (low_spike.right_halfwidth_T - low_spike.peak_T)
# * 1e3, # in msec
# low_spike.halfwidth_V * 1e3,
# "co",
# )
if nplots == 0: # annotate
ax[0].set_xlabel("Time (msec), re Peak")
ax[0].set_ylabel("V (mV)")
ax[1].set_xlabel("V (mV)")
ax[1].set_ylabel("dV/dt (mV/ms)")
PH.nice_plot(ax[0])
PH.nice_plot(ax[1])
PH.talbotTicks(ax[0])
PH.talbotTicks(ax[1])
nplots += 1
if nplots > 0:
mpl.show()
return cell_df
else:
return None
def get_selected_cell_data_FI(self, experiment, table_manager, assembleddata):
self.get_row_selection(table_manager)
pp = PrettyPrinter(indent=4, width=120)
if self.selected_index_rows is not None:
for nplots, index_row in enumerate(self.selected_index_rows):
selected = table_manager.get_table_data(index_row)
day = selected.date[:-4]
slicecell = selected.cell_id[-4:]
# cell_df, _ = self.get_cell(
# experiment, assembleddata, cell=selected.cell_id
# )
fig, ax = mpl.subplots(1, 1)
self.compute_FI_Fits(
experiment, assembleddata, selected.cell_id, plot_fits=True, ax=ax
)
if nplots > 0:
mpl.show()
return self.selected_index_rows
else:
return None
def average_FI(self, FI_Data_I_, FI_Data_FR_, max_current: float = 1.0e-9):
if len(FI_Data_I_) > 0:
try:
FI_Data_I, FI_Data_FR = zip(*sorted(zip(FI_Data_I_, FI_Data_FR_)))
except:
raise ValueError("couldn't zip the data sets: ")
if len(FI_Data_I) > 0: # has data...
print("averaging FI data")
FI_Data_I_ = np.array(FI_Data_I)
FI_Data_FR_ = np.array(FI_Data_FR)
f1_index = np.where((FI_Data_I_ >= 0.0) & (FI_Data_I_ <= max_current))[
0
] # limit to 1 nA, regardless
FI_Data_I, FI_Data_FR, FI_Data_FR_Std, FI_Data_N = self.avg_group(
FI_Data_I_[f1_index], FI_Data_FR_[f1_index], ndim=FI_Data_I_.shape
)
return FI_Data_I, FI_Data_FR, FI_Data_FR_Std, FI_Data_N
def avg_group(self, x, y, ndim=2):
if ndim == 2:
x = np.array([a for b in x for a in b])
y = np.array([a for b in y for a in b])
else:
x = np.array(x)
y = np.array(y)
# x = np.ravel(x) # np.array(x)
# y = np.array(y)
xa, ind, counts = np.unique(
x, return_index=True, return_counts=True
) # find unique values in x
ya = y[ind]
ystd = np.zeros_like(ya)
yn = np.ones_like(ya)
for dupe in xa[counts > 1]: # for each duplicate value, replace with mean
# print("dupe: ", dupe)
# print(np.where(x==dupe), np.where(xa==dupe))
ya[np.where(xa == dupe)] = np.nanmean(y[np.where(x == dupe)])
ystd[np.where(xa == dupe)] = np.nanstd(y[np.where(x == dupe)])
yn[np.where(xa == dupe)] = np.count_nonzero(~np.isnan(y[np.where(x == dupe)]))
return xa, ya, ystd, yn
# get maximum slope from fit.
def hill_deriv(self, x: float, y0: float, ymax: float, m: float, n: float):
"""hill_deriv
analyztical solution computed from SageMath
Parameters
----------
x : float
current
y0 : float
baseline
ymax : float
maximum y value
m : float
growth rate
n : float
growth power
"""
hd = m * n * ymax
hd *= np.power(m / x, n - 1)
hd /= (x * x) * np.power((np.power(m / x, n) + 1.0), 2.0)
return hd
def fit_FI_Hill(
self,
FI_Data_I,
FI_Data_FR,
FI_Data_FR_Std,
FI_Data_I_,
FI_Data_FR_,
FI_Data_N,
hill_max_derivs,
hill_i_max_derivs,
FI_fits,
linfits,
cell: str,
celltype: str,
plot_fits=False,
ax: Union[mpl.Axes, None] = None,
):
plot_raw = False # only to plot the unaveraged points.
spanalyzer = spike_analysis.SpikeAnalysis()
spanalyzer.fitOne(
i_inj=FI_Data_I,
spike_count=FI_Data_FR,
pulse_duration=None, # protodurs[ivname],
info="",
function="Hill",
fixNonMonotonic=True,
excludeNonMonotonic=False,
max_current=None,
)
try:
fitpars = spanalyzer.analysis_summary["FI_Growth"][0]["parameters"][0]
except:
CP(
"r",
f"fitpars has no solution? : {cell!s}, {celltype:s}, {spanalyzer.analysis_summary['FI_Growth']!s}",
)
return (
hill_max_derivs,
hill_i_max_derivs,
FI_fits,
linfits,
) # no fit, return without appending a new fit
# raise ValueError("couldn't get fitpars: no solution?")
y0 = fitpars[0]
ymax = fitpars[1]
m = fitpars[2]
n = fitpars[3]
xyfit = spanalyzer.analysis_summary["FI_Growth"][0]["fit"]
i_range = np.linspace(1e-12, np.max(xyfit[0]), 1000)
# print(f"fitpars: y0={y0:.3f}, ymax={ymax:.3f}, m={m*1e9:.3f}, n={n:.3f}")
deriv_hill = [self.hill_deriv(x=x, y0=y0, ymax=ymax, m=m, n=n) for x in i_range]
deriv_hill = np.array(deriv_hill) * 1e-9 # convert to sp/nA
max_deriv = np.max(deriv_hill)
arg_max_deriv = np.argmax(deriv_hill)
i_max_deriv = i_range[arg_max_deriv] * (1e12)
hill_max_derivs.append(max_deriv)
hill_i_max_derivs.append(i_max_deriv)
# print(f"max deriv: {max_deriv:.3f} sp/nA at {i_max_deriv:.1f} pA")
# print(xyfit[1])
if len(spanalyzer.analysis_summary["FI_Growth"]) > 0:
FI_fits["fits"].append(spanalyzer.analysis_summary["FI_Growth"][0]["fit"])
FI_fits["pars"].append(spanalyzer.analysis_summary["FI_Growth"][0]["parameters"])
linfit = spanalyzer.getFISlope(
i_inj=FI_Data_I,
spike_count=FI_Data_FR,
pulse_duration=None, # FR is already duration
min_current=0e-12,
max_current=300e-12,
)
linfits.append(linfit)
linx = np.arange(0, 300e-12, 10e-12)
liny = linfit.slope * linx + linfit.intercept
if plot_fits:
if ax is None:
fig, ax = mpl.subplots(1, 1)
fig.suptitle(f"{celltype:s} {cell:s}")
line_FI = ax.errorbar(
np.array(FI_Data_I) * 1e9,
FI_Data_FR,
yerr=FI_Data_FR_Std,
marker="o",
color="k",
linestyle=None,
)
# ax[1].plot(FI_Data_I * 1e12, FI_Data_N, marker="s")
if plot_raw:
for i, d in enumerate(FI_Data_I_): # plot the raw points before combining
ax.plot(np.array(FI_Data_I_[i]) * 1e9, FI_Data_FR_[i], "x", color="k")
# print("fit x * 1e9: ", spanalyzer.analysis_summary['FI_Growth'][0]['fit'][0]*1e9)
# print("fit y * 1: ", spanalyzer.analysis_summary['FI_Growth'][0]['fit'][1])
# ax[0].plot(linx * 1e12, liny, color="c", linestyle="dashdot")
celln = Path(cell).name
if len(spanalyzer.analysis_summary["FI_Growth"]) >= 0:
line_fit = ax.plot(
spanalyzer.analysis_summary["FI_Growth"][0]["fit"][0][0] * 1e9,
spanalyzer.analysis_summary["FI_Growth"][0]["fit"][1][0],
color="r",
linestyle="-",
zorder=100,
)
# derivative (in blue)
line_deriv = ax.plot(
i_range * 1e9, deriv_hill, color="b", linestyle="--", zorder=100
)
d_max = np.argmax(deriv_hill)
ax2 = ax.twinx()
ax2.set_ylim(0, 500)
ax2.set_ylabel("Firing Rate Slope (sp/s/nA)")
line_drop = ax2.plot(
[i_range[d_max] * 1e9, i_range[d_max] * 1e9],
[0, 1.1 * deriv_hill[d_max]],
color="b",
zorder=100,
)
ax.set_xlabel("Current (nA)")
ax.set_ylabel("Firing Rate (sp/s)")
# turn off top box
for loc, spine in ax.spines.items():
if loc in ["left", "bottom"]:
spine.set_visible(True)
elif loc in ["right", "top"]:
spine.set_visible(False)
for loc, spine in ax2.spines.items():
if loc in ["right", "bottom"]:
spine.set_visible(True)
elif loc in ["left", "top"]:
spine.set_visible(False)
# spine.set_color('none')
# do not draw the spine
# spine.set_color('none')
# do not draw the spine
PH.talbotTicks(ax, density=[2.0, 2.0])
PH.talbotTicks(ax2, density=[2.0, 2.0])
ax.legend(
[line_FI, line_fit[0], line_deriv[0], line_drop[0]],
["Firing Rate", "Hill Fit", "Derivative", "Max Derivative"],
loc="best",
frameon=False,
)
mpl.show()
return hill_max_derivs, hill_i_max_derivs, FI_fits, linfits
def check_excluded_dataset(self, day_slice_cell, experiment, protocol):
exclude_flag = day_slice_cell in experiment["excludeIVs"]
print(" IV is in exclusion table: ", exclude_flag)
if exclude_flag:
exclude_table = experiment["excludeIVs"][day_slice_cell]
print(" excluded table data: ", exclude_table)
print(" testing protocol: ", protocol)
proto = Path(protocol).name # passed protocol has day/slice/cell/protocol
if proto in exclude_table["protocols"] or exclude_table["protocols"] == ["all"]:
CP(
"y",
f"Excluded cell/protocol: {day_slice_cell:s}, {proto:s} because: {exclude_table['reason']:s}",
)
Logger.info(
f"Excluded cell: {day_slice_cell:s}, {proto:s} because: {exclude_table['reason']:s}"
)
return True
print(" Protocol passed: ", protocol)
return False
def compute_FI_Fits(
self,
experiment,
df: pd.DataFrame,
cell: str,
protodurs: list = [1.0],
plot_fits: bool = False,
ax: Union[mpl.Axes, None] = None,
):
CP("g", f"\n{'='*80:s}\nCell: {cell!s}, {df[df.cell_id==cell].cell_type.values[0]:s}")
df_cell, df_tmp = self.get_cell(experiment, df, cell)
if df_cell is None:
return None
print(" df_tmp group>>: ", df_tmp.Group.values)
print(" df_cell group>>: ", df_cell.keys())
protocols = list(df_cell.Spikes.keys())
spike_keys = list(df_cell.Spikes[protocols[0]].keys())
iv_keys = list(df_cell.IV[protocols[0]].keys())
srs = {}
dur = {}
important = {}
# for each CCIV type of protocol that was run:
for nprot, protocol in enumerate(protocols):
if protocol.endswith("0000"): # bad protocol name
continue
day_slice_cell = str(Path(df_cell.date, df_cell.slice_slice, df_cell.cell_cell))
CP("m", f"day_slice_cell: {day_slice_cell:s}, protocol: {protocol:s}")
if self.check_excluded_dataset(day_slice_cell, experiment, protocol):
continue
fullpath = Path(experiment["rawdatapath"], experiment["directory"], protocol)
with DR.acq4_reader.acq4_reader(fullpath, "MultiClamp1.ma") as AR:
try:
AR.getData(fullpath)
sample_rate = AR.sample_rate[0]
duration = AR.tend - AR.tstart
srs[protocol] = sample_rate
dur[protocol] = duration
important[protocol] = AR.checkProtocolImportant(fullpath)
CP("g", f" Protocol {protocol:s} has sample rate of {sample_rate:e}")
except ValueError:
CP("r", f"Acq4Read failed to read data file: {str(fullpath):s}")
raise ValueError(f"Acq4Read failed to read data file: {str(fullpath):s}")
protocols = list(srs.keys()) # only count valid protocols
CP("c", f"Valid Protocols: {protocols!s}")
if len(protocols) > 1:
protname = "combined"
elif len(protocols) == 1:
protname = protocols[0]
else:
return None
# parse group correctly.
# the first point in the Group column is likely a nan.
# if it is, then use the next point.
print("Group: ", df_tmp.Group, "protoname: ", protname)
group = df_tmp.Group.values[0]
datadict = {
"ID": str(df_tmp.cell_id.values[0]),
"Subject": str(df_tmp.cell_id.values[0]),
"cell_id": cell,
"Group": group,
"Date": str(df_tmp.Date.values[0]),
"age": str(df_tmp.age.values[0]),
"weight": str(df_tmp.weight.values[0]),
"sex": str(df_tmp.sex.values[0]),
"cell_type": df_tmp.cell_type.values[0],
"protocol": protname,
"important": important,
"protocols": list(df_cell.IV),
"sample_rate": srs,
"duration": dur,
}
# get the measures for the fixed values from the measure list
for measure in datacols:
datadict = self.get_measure(df_cell, measure, datadict, protocols, threshold_slope=experiment["AP_threshold_dvdt"])
# now combine the FI data across protocols for this cell
FI_Data_I1_:list_ = []
FI_Data_FR1_:list_ = [] # firing rate
FI_Data_I4_:list_ = []
FI_Data_FR4_:list_ = [] # firing rate
FI_fits:dict = {"fits": [], "pars": [], "names": []}
linfits:list = []
hill_max_derivs:list = []
hill_i_max_derivs:list = []
protofails = 0
for protocol in protocols:
if protocol.endswith("0000"): # bad protocol name
continue
# check if duration is acceptable:
if protodurs is not None:
durflag = False
for d in protodurs:
if not np.isclose(dur[protocol], d):
durflag = True
if durflag:
CP("y", f" >>>> Protocol {protocol:s} has duration of {dur[protocol]:e}")
CP("y", f" This is not in accepted limits of: {protodurs!s}")
continue
else:
CP("g", f" >>>> Protocol {protocol:s} has acceptable duration of {dur[protocol]:e}")
# print("protocol: ", protocol, "spikes: ", df_cell.Spikes[protocol]['spikes'])
if len(df_cell.Spikes[protocol]["spikes"]) == 0:
CP("y", f" >>>> Skipping protocol with no spikes: {protocol:s}")
continue
else:
CP("g", f" >>>> Analyzing FI for protocol: {protocol:s}")
try:
fidata = df_cell.Spikes[protocol]["FI_Curve"]
except KeyError:
print("FI curve not found for protocol: ", protocol, "for cell: ", cell)
# print(df_cell.Spikes[protocol])
protofails += 1
if protofails > 4:
raise ValueError(
"FI curve data not found for protocol: ",
protocol,
"for cell: ",
cell,
)
else:
continue
if np.max(fidata[0]) > 1.01e-9: # accumulate high-current protocols
FI_Data_I4_.extend(fidata[0])
FI_Data_FR4_.extend(fidata[1] / dur[protocol])
else: # accumulate other protocols <= 1 nA
FI_Data_I1_.extend(fidata[0])
FI_Data_FR1_.extend(fidata[1] / dur[protocol])
FI_Data_I1 = []
FI_Data_FR1 = []
FI_Data_I4 = []
FI_Data_FR4 = []
if len(FI_Data_I1_) > 0:
FI_Data_I1, FI_Data_FR1, FI_Data_FR1_Std, FI_Data_N1 = self.average_FI(
FI_Data_I1_, FI_Data_FR1_, 1e-9
)
if len(FI_Data_I4_) > 0:
FI_Data_I4, FI_Data_FR4, FI_Data_FR4_Std, FI_Data_N1 = self.average_FI(
FI_Data_I4_, FI_Data_FR4_, 4e-9
)
if len(FI_Data_I1) > 0:
# do a curve fit on the first 1 nA of the protocol
hill_max_derivs, hill_i_max_derivs, FI_fits, linfits = self.fit_FI_Hill(
FI_Data_I=FI_Data_I1,
FI_Data_FR=FI_Data_FR1,
FI_Data_I_=FI_Data_I1_,
FI_Data_FR_=FI_Data_FR1_,
FI_Data_FR_Std=FI_Data_FR1_Std,
FI_Data_N=FI_Data_N1,
hill_max_derivs=hill_max_derivs,
hill_i_max_derivs=hill_i_max_derivs,
FI_fits=FI_fits,
linfits=linfits,
cell=cell,
celltype=df_tmp.cell_type.values[0],
plot_fits=plot_fits,
ax=ax,
)
# save the results
datadict["FI_Curve"] = [FI_Data_I1, FI_Data_FR1]
datadict["FI_Curve4"] = [FI_Data_I4, FI_Data_FR4]
datadict["current"] = FI_Data_I1
datadict["spsec"] = FI_Data_FR1
# datadict["Subject"] = df_tmp.cell_id.values[0]
# datadict["Group"] = df_tmp.Group.values[0]
# datadict["sex"] = df_tmp.sex.values[0]
# datadict["celltype"] = df_tmp.cell_type.values[0]
datadict["pars"] = [FI_fits["pars"]]
datadict["names"] = []
datadict["fit"] = [FI_fits["fits"]]
datadict["F1amp"] = np.nan
datadict["F2amp"] = np.nan
datadict["Irate"] = np.nan
datadict["maxHillSlope"] = np.nan
datadict["maxHillSlope_SD"] = np.nan
datadict["I_maxHillSlope"] = np.nan
datadict["I_maxHillSlope_SD"] = np.nan
if len(linfits) > 0:
datadict["FISlope"] = np.mean([s.slope for s in linfits])
else:
datadict["FISlope"] = np.nan
if len(hill_max_derivs) > 0:
datadict["maxHillSlope"] = np.mean(hill_max_derivs)
datadict["maxHillSlope_SD"] = np.std(hill_max_derivs)
datadict["I_maxHillSlope"] = np.mean(hill_i_max_derivs)
datadict["I_maxHillSlope_SD"] = np.std(hill_i_max_derivs)
if len(FI_Data_I1) > 0:
i_one = np.where(FI_Data_I1 <= 1.01e-9)[0]
datadict["FIMax_1"] = np.nanmax(FI_Data_FR1[i_one])
if len(FI_Data_I4) > 0:
i_four = np.where(FI_Data_I4 <= 4.01e-9)[0]
datadict["FIMax_4"] = np.nanmax(FI_Data_FR4[i_four])
return datadict
def get_cell(self, experiment, df: pd.DataFrame, cell: str):
df_tmp = df[df.cell_id == cell] # df.copy() # .dropna(subset=["Date"])
print("\nGet_cell:: df_tmp head: \n", "Groups: ", df_tmp["Group"].unique(), "\n len df_tmp: ", len(df_tmp))
if len(df_tmp) == 0:
return None, None
try:
celltype = df_tmp.cell_type.values[0]
except ValueError:
celltype = df_tmp.cell_type
celltype = str(celltype).replace("\n", "")
if celltype == " ": # no cell type
celltype = "unknown"
CP("m", f"get cell: df_tmp cell type: {celltype:s}")
# look for original PKL file for cell in the dataset
# if it exists, use it to get the FI curve
# base_cellname = str(Path(cell)).split("_")
# print("base_cellname: ", base_cellname)
# sn = int(base_cellname[-1][1])
# cn = int(base_cellname[-1][3])
# different way from cell_id:
# The cell name may be a path, or just the cell name.
# we have to handle both cases.
parent = Path(cell).parent
if parent == ".": # just cell, not path
cell_parts = str(cell).split("_")
re_parse = re.compile("([Ss]{1})(\d{1,3})([Cc]{1})(\d{1,3})")
cnp = re_parse.match(cell_parts[-1]).group(2)
cn = int(cnp)
snp = re_parse.match(cell_parts[-1]).group(4)
sn = int(snp)
cell_day_name = cell_parts[-3].split("_")[0]
dir_path = None
else:
cell = Path(cell).name # just get the name here
cell_parts = cell.split("_")
re_parse = re.compile("([Ss]{1})(\d{1,3})([Cc]{1})(\d{1,3})")
# print("cell_parts: ", cell_parts[-1])
snp = re_parse.match(cell_parts[-1]).group(2)
sn = int(snp)
cnp = re_parse.match(cell_parts[-1]).group(4)
cn = int(cnp)
cell_day_name = cell_parts[0]
dir_path = parent
# print("Cell name, slice, cell: ", cell_parts, sn, cn)
# if cell_parts != ['2019.02.22', '000', 'S0C0']:
# return None, None
cname2 = f"{cell_day_name.replace('.', '_'):s}_S{snp:s}C{cnp:s}_{celltype:s}_IVs.pkl"
datapath2 = Path(experiment["analyzeddatapath"], experiment["directory"], celltype, cname2)
# cname2 = f"{cell_day_name.replace('.', '_'):s}_S{sn:02d}C{cn:02d}_{celltype:s}_IVs.pkl"
# datapath2 = Path(experiment["analyzeddatapath"], experiment["directory"], celltype, cname2)
# cname1 = f"{cell_day_name.replace('.', '_'):s}_S{sn:01d}C{cn:01d}_{celltype:s}_IVs.pkl"
# datapath1 = Path(experiment["analyzeddatapath"], experiment["directory"], celltype, cname1)
# print(datapath)
# if datapath1.is_file():
# CP("c", f"... {datapath1!s} is OK")
# datapath = datapath1
if datapath2.is_file():
CP("c", f"... {datapath2!s} is OK")
datapath = datapath2
else:
CP("r", f"no file: matching: {datapath2!s}, \n") # or: {datapath2!s}\n")
print("cell type: ", celltype)
raise ValueError
return None, None
try:
df_cell = pd.read_pickle(datapath, compression="gzip")
except ValueError:
try:
df_cell = pd.read_pickle(datapath) # try with no compression
except ValueError:
CP("r", f"Could not read {datapath!s}")
raise ValueError("Failed to read compressed pickle file")
if "Spikes" not in df_cell.keys() or df_cell.Spikes is None:
CP(
"r",
f"df_cell: {df_cell.age!s}, {df_cell.cell_type!s}, No spike protos:",
)
return None, None
# print(
# "df_cell: ",
# df_cell.age,
# df_cell.cell_type,
# "N spike protos: ",
# len(df_cell.Spikes),
# "\n",
# df_tmp['Group'],
# )
return df_cell, df_tmp
def get_lowest_current_spike(self, row, SP):
measured_first_spike = False
dvdts = []
for tr in SP.spikeShapes: # for each trace
if len(SP.spikeShapes[tr]) > 1: # if there is a spike
spk = SP.spikeShapes[tr][0] # get the first spike in the trace
dvdts.append(spk) # accumulate first spike info
if len(dvdts) > 0:
currents = []
for d in dvdts: # for each first spike, make a list of the currents
currents.append(d.current)
min_current = np.argmin(currents) # find spike elicited by the minimum current
row.dvdt_rising = dvdts[min_current].dvdt_rising
row.dvdt_falling = dvdts[min_current].dvdt_falling
row.dvdt_current = currents[min_current] * 1e12 # put in pA
row.AP_thr_V = 1e3 * dvdts[min_current].AP_begin_V
if dvdts[min_current].halfwidth_interpolated is not None:
row.AP_HW = dvdts[min_current].halfwidth_interpolated * 1e3
row.AP_begin_V = 1e3 * dvdts[min_current].AP_begin_V
CP(
"y",
f"I={currents[min_current]*1e12:6.1f} pA, dvdtRise={row.dvdt_rising:6.1f}, dvdtFall={row.dvdt_falling:6.1f}, APthr={row.AP_thr_V:6.1f} mV, HW={row.AP_HW*1e3:6.1f} usec",
)
return row
def find_lowest_current_trace(self, spikes):
current = []
trace = []
for sweep in spikes["spikes"]:
for spike in spikes["spikes"][sweep]:
this_spike = spikes["spikes"][sweep][spike]
current.append(this_spike.current)
trace.append(this_spike.trace)
break # only get the first one
# now find the index of the lowest current
if len(current) == 0:
return np.nan, np.nan, np.nan
min_current_index = np.argmin(current)
# print("current: ", current, "traces: ", trace)
# print(current[min_current_index], trace[min_current_index])
return min_current_index, current[min_current_index], trace[min_current_index]
def convert_FI_array(self, FI_values):
"""convert_FI_array Take a potential string representing the FI_data,
and convert it to a numpy array
Parameters
----------
FI_values : str or numpy array
data to be converted
Returns
-------
numpy array
converted data from FI_values
"""
if isinstance(FI_values, str):
fistring = FI_values.replace("[", "").replace("]", "").replace("\n", "")
fistring = fistring.split(" ")
FI_data = np.array([float(s) for s in fistring if len(s) > 0])
FI_data = FI_data.reshape(2, int(FI_data.shape[0] / 2))
else:
FI_data = FI_values
FI_data = np.array(FI_data)
return FI_data
def get_measure(self, df_cell, measure, datadict, protocols, threshold_slope:float=20.0):
"""get_measure : for the giveen cell, get the measure from the protocols
Parameters
----------
df_cell : _type_
_description_
measure : _type_
_description_
datadict : _type_
_description_
protocols : _type_
_description_
Returns
-------
_type_
_description_
"""
m = []
if measure in iv_keys:
for protocol in protocols:
if measure in df_cell.IV[protocol].keys():
m.append(df_cell.IV[protocol][measure])
elif measure in iv_mapper.keys() and iv_mapper[measure] in iv_keys:
for protocol in protocols:
if iv_mapper[measure] in df_cell.IV[protocol].keys():
m.append(df_cell.IV[protocol][iv_mapper[measure]])
elif measure in spike_keys:
maxadapt = 0
for protocol in protocols:
# print("p: ", p)
if measure == "AdaptRatio":
if df_cell.Spikes[protocol][mapper1[measure]] > 8.0:
continue
# print("\nprot, measure: ", protocol, measure, df_cell.Spikes[protocol][mapper1[measure]])
# print(df_cell.Spikes[protocol].keys())
# maxadapt = np.max([maxadapt, df_cell.Spikes[protocol][mapper1['AdaptRatio']]])
if measure in df_cell.Spikes[protocol].keys():
m.append(df_cell.Spikes[protocol][measure])
# if maxadapt > 8:
# exit()
elif measure in mapper1.keys() and mapper1[measure] in spike_keys:
for protocol in protocols:
if mapper1[measure] in df_cell.Spikes[protocol].keys():
m.append(df_cell.Spikes[protocol][mapper1[measure]])
elif measure == "current":
for protocol in protocols: # for all protocols with spike analysis data for this cell
if "spikes" not in df_cell.Spikes[protocol].keys():
continue
# we need to get the first spike evoked by the lowest current level ...
min_current_index, current, trace = self.find_lowest_current_trace(
df_cell.Spikes[protocol]
)
if not np.isnan(min_current_index):
m.append(current)
else:
m.append(np.nan)
else:
for protocol in protocols: # for all protocols with spike analysis data for this cell
# we need to get the first spike evoked by the lowest current level ...
prot_spike_count = 0
if "spikes" not in df_cell.Spikes[protocol].keys():
continue
spike_data = df_cell.Spikes[protocol]["spikes"]
if measure in [
"dvdt_rising",
"dvdt_falling",
"AP_HW",
"AHP_trough_V",
"AHP_depth_V",
]: # use lowest current spike
min_current_index, current, trace = self.find_lowest_current_trace(
df_cell.Spikes[protocol]
)
if not np.isnan(min_current_index):
spike_data = df_cell.Spikes[protocol]["spikes"][trace][0].__dict__
# print("spike data ", spike_data['dvdt_rising'])
m.append(spike_data[mapper[measure]])
else:
m.append(np.nan)
# print("spike data: ", spike_data.keys())
elif measure == "AP_thr_V": # have to try two variants. Note that threshold slope is defined in config file
min_current_index, current, trace = self.find_lowest_current_trace(
df_cell.Spikes[protocol]
)
if not np.isnan(min_current_index):
spike_data = df_cell.Spikes[protocol]["spikes"][trace][0].__dict__
# CP("c", "Check AP_thr_V")
Vthr, Vthr_time = UTIL.find_threshold(
spike_data["V"],
np.mean(np.diff(spike_data["Vtime"])),
threshold_slope=threshold_slope,
)
m.append(Vthr)
else:
m.append(np.nan)
elif (
measure in mapper.keys() and mapper[measure] in spike_data.keys()
): # if the measure exists for this sweep
m.append(spike_data[mapper[measure]])
else:
# print(measure in mapper.keys())
# print(spike_data.keys())
CP(
"r",
f"measure not found in spike_data, either: <{measure:s}>, {mapper.keys()!s}",
)
CP(
"r",
f"\n or mapped in {mapper[measure]!s} to {spike_data.keys()!s}",
)
raise ValueError()
exit()
prot_spike_count += 1
# CP("c", f"measure: {measure!s} : {m!s}")
# else:
# print(
# f"measure {measure:s} not found in either IV or Spikes keys. Skipping"
# )
# raise ValueError(f"measure {measure:s} not found in either IV or Spikes keys. Skipping")
for i, xm in enumerate(m):
if xm is None:
m[i] = np.nan
# m = [u for u in m if u is not None else np.nan] # sanitize data
N = np.count_nonzero(~np.isnan(m))
# print("N: ", N)
if N > 0:
datadict[measure] = np.nanmean(m)
else:
datadict[measure] = np.nan
return datadict
def textbox_setup(self, textbox):
self.textbox = textbox
def textclear(self):
if self.textbox is None:
raise ValueError("datatables - functions - textbox has not been set up")
if self is None: # or self.in_Parallel:
return
else:
self.textbox.clear()
def text_get(self):
if self.textbox is None:
raise ValueError("datatables - functions - textbox has not been set up")
return self.textbox.toPlainText()
def textappend(self, text, color="white"):
if self.textbox is None:
raise ValueError("datatables - functions - textbox has not been set up")
colormap = {
"[31m": "red",
"[48:5:208:0m": "orange",
"[33m": "yellow",
"[32m": "limegreen",
"[34m": "pink",
"[35m": "magenta",
"[36m": "cyan",
"[30m": "black",
"[37m": "white",
"[0m": "white",
"[100m": "backgray",
}
if self is None:
CP(color, text) # just go straight to the terminal
else:
text = "".join(text)
text = text.split("\n")
for textl in text:
# print(f"text: <{textl:s}>")
if len(textl) > 0 and textl[0] == "\x1b":
textl = textl[1:] # clip the escape sequence
for k in colormap.keys():
if textl.startswith(k): # skip the escape sequence
textl = textl[len(k) :]
textl = textl.replace("[0m", "")
color = colormap[k]
self.textbox.setTextColor(QtGui.QColor(color))
break
textl = textl.replace("[0m", "")
self.textbox.append(textl)
self.textbox.setTextColor(QtGui.QColor("white"))
| marsiwiec/ephys | ephys/gui/data_table_functions.py | data_table_functions.py | py | 51,215 | python | en | code | null | github-code | 6 | [
{
"api_name": "ephys.tools.utilities.Utility",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "ephys.tools.utilities",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pylibrary.tools.cprint.cprint",
"line_number": 25,
"usage_type": "attribute"
},
{... |
8056801684 | """
Static Pipeline representation to create a CodePipeline dedicated to building
Lambda Layers
"""
from troposphere import (
Parameter,
Template,
GetAtt,
Ref,
Sub
)
from ozone.handlers.lambda_tools import check_params_exist
from ozone.resources.iam.roles.pipeline_role import pipelinerole_build
from ozone.resources.devtools.pipeline import (
SourceAction,
BuildAction,
DeployAction,
InvokeAction,
CodePipeline
)
from ozone.outputs import object_outputs
def template(**kwargs):
"""
"""
template_required_params = [
'BucketName',
'Source', 'LayerBuildProjects', 'LayersMergeProject',
'LayerName', 'GeneratorFunctionName', 'CloudformationRoleArn'
]
check_params_exist(template_required_params, kwargs)
template = Template()
token = template.add_parameter(Parameter(
'GitHubOAuthToken',
Type="String",
NoEcho=True
))
role = pipelinerole_build(
UseCodeCommit=True,
UseCodeBuild=True,
UseLambda=True,
UseCloudformation=True,
Bucket=kwargs['BucketName']
)
if kwargs['Source']['Provider'].lower() == 'github':
kwargs['Source']['Config']['OAuthToken'] = Ref(token)
source = SourceAction(
name='SourceCode',
provider=kwargs['Source']['Provider'],
config=kwargs['Source']['Config']
)
build_actions = []
builds_projects = kwargs['LayerBuildProjects']
for project in builds_projects:
build_actions.append(BuildAction(
project,
source.outputs,
project
))
build_outputs = []
for action in build_actions:
build_outputs += action.outputs
merge_action = BuildAction(
'MergeAction',
build_outputs,
kwargs['LayersMergeProject']
)
invoke = InvokeAction(
'GenerateTemplateForCfn',
merge_action.outputs,
function_name=kwargs['GeneratorFunctionName']
)
input_name = invoke.outputs[0].Name
deploy = DeployAction(
'DeployToCfn',
invoke.outputs,
'CloudFormation',
StackName=f'layer-{kwargs["LayerName"]}',
RoleArn=kwargs['CloudformationRoleArn'],
TemplatePath=f'{input_name}::tmp/template.json'
)
stages = [
('Source', [source]),
('BuildLayers', build_actions),
('MergeLayers', [merge_action]),
('GenerateCfnTemplate', [invoke]),
('DeployWithCfn', [deploy]),
]
pipeline = CodePipeline(
'Pipeline',
GetAtt(role, 'Arn'),
kwargs['BucketName'],
stages
)
template.add_resource(role)
template.add_resource(pipeline)
template.add_output(object_outputs(pipeline, True))
return template
| lambda-my-aws/ozone | ozone/templates/awslambdalayer_pipeline.py | awslambdalayer_pipeline.py | py | 2,782 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ozone.handlers.lambda_tools.check_params_exist",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "troposphere.Template",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "troposphere.Parameter",
"line_number": 37,
"usage_type": "call"
},
... |
6757711914 | import json
import sys
import os.path
from mutagen.id3 import (ID3, CTOC, CHAP, TIT2, TALB,
TPE1, COMM, USLT, APIC, CTOCFlags)
audio = ID3(sys.argv[1])
if len(sys.argv) > 2:
data = json.loads(sys.argv[2])
chapters = data["chapters"]
ctoc_ids = list(map(lambda i: i.get("id"), chapters))
audio.delall('TALB')
audio["TALB"] = TALB(encoding=3, text=data["podcast_title"])
audio.delall('TPE1')
audio["TPE1"] = TPE1(encoding=3, text=data["podcast_title"])
audio.delall('TIT2')
audio["TIT2"] = TIT2(encoding=3, text=data["episode_title"])
audio.delall('COMM')
audio["COMM"] = COMM(encoding=3,
lang=u'eng',
text=data["episode_description"])
audio.delall('USLT')
audio["USLT"] = USLT(encoding=3,
lang=u'eng',
text=data["episode_description"])
if "podcast_cover" in data and os.path.isfile(data["podcast_cover"]):
audio.delall('APIC')
audio["APIC"] = APIC(encoding=3,
mime='image/jpeg',
type=3,
desc=u'Cover',
data=open(data["podcast_cover"]).read())
audio.delall('CTOC')
audio.add(CTOC(element_id=u"toc",
flags=CTOCFlags.TOP_LEVEL | CTOCFlags.ORDERED,
child_element_ids=ctoc_ids,
sub_frames=[
TIT2(text=[u"TOC"]),
]))
audio.delall('CHAP')
for chapter in chapters:
audio.add(CHAP(element_id=chapter.get("id"),
start_time=int(chapter.get("start")),
end_time=int(chapter.get("end")),
sub_frames=[
TIT2(text=[chapter.get("title")]),
]))
audio.save()
for key, value in audio.items():
print(value.pprint())
| lukekarrys/audiobook | id3.py | id3.py | py | 1,967 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "mutagen.id3.ID3",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_numbe... |
35164168406 | #!/usr/bin/python3
import os
import json
import html
import random
import string
import threading
import subprocess
from bottle import app, error, post, request, redirect, route, run, static_file
from beaker.middleware import SessionMiddleware
session_opts = {
'session.type': 'file',
'session.data_dir': './cfg/',
'session.auto': True,
}
sapp = SessionMiddleware(app(), session_opts)
sess = request.environ.get('beaker.session')
ipfspath = '/usr/local/bin/ipfs'
with open('cfg/email.cfg', 'r') as ecf:
email = ecf.read()
ipfs_id = ''
if os.path.exists('ipfs/config'):
with open('ipfs/config', 'r') as ipcfg:
ipconfig = ipcfg.read()
jtxt = json.loads(ipconfig)
ipfs_id = jtxt['Identity']['PeerID']
@route('/')
def index():
sess = request.environ.get('beaker.session')
sess['csrf'] = ''.join(random.choice(string.ascii_letters) for i in range(12))
sess.save()
htmlsrc = '<html><head>'
htmlsrc += '<title>IPFS Podcast Node</title>'
htmlsrc += '<meta name="viewport" content="width=device-width, initial-scale=1.0" />'
htmlsrc += '<link rel="icon" href="/favicon.png">'
htmlsrc += '<style>'
htmlsrc += 'body { background-image: url("ipfspod.png"); background-repeat: no-repeat; background-position: 50% 50%; font-family: "Helvetica Neue",Helvetica,Arial,sans-serif; font-size: 14px; margin: 1em; } '
htmlsrc += '.nfo { border-radius: 20px; background-color: darkcyan; color: white; opacity: 0.6; padding: 10px; } '
htmlsrc += 'label { display: inline-block; width: 65px; text-align: right; } '
htmlsrc += 'form#ecfg { margin-bottom: 0; } '
htmlsrc += 'form#ecfg input { margin: 4px; width: calc(100% - 150px); max-width: 200px; } '
htmlsrc += 'form#frst button { background-color: pink; border-color: indianred; margin: 4px; padding: 3px 13px; font-weight: bold; border-radius: 10px; display: inline-block; font-size: 9pt; white-space: nowrap; } '
htmlsrc += 'form#igc { display: inline-block; margin-left: 5px; } '
htmlsrc += 'div.prog { height: 5px; background-color: gray; border-radius: 0.25rem; } '
htmlsrc += 'div.prog div.used { height: 5px; background-color: lime; border-radius: 0.25rem; } '
htmlsrc += 'pre { overflow: auto; height: 50%; display: flex; flex-direction: column-reverse; white-space: break-spaces; } '
htmlsrc += 'div#links a { background-color: lightgray; margin: 4px; padding: 5px 13px; font-weight: bold; border-radius: 10px; display: inline-block; font-size: 9pt; text-decoration: none; } '
htmlsrc += 'a.ppass, a.pwarn, a.pfail { padding: 3px 8px 1px 8px; border-radius: 8px; display: inline-block; font-size: 9pt; font-weight: bold; text-decoration: none; } '
htmlsrc += 'a.ppass { background-color: lightgreen; color: green; } '
htmlsrc += 'a.pwarn { background-color: palegoldenrod; color: darkorange; } '
htmlsrc += 'a.pfail { background-color: pink; color: red; } '
htmlsrc += 'div#tmr { height: 3px; margin-bottom: 0.5em; background-color: lightblue; animation: tbar 60s linear; } '
htmlsrc += '@keyframes tbar { 0% { width: 0%; } 90% { background-color: cornflowerblue; } 100% { width: 100%; background-color: red; } } '
htmlsrc += '</style>'
htmlsrc += '</head>'
htmlsrc += '<body>'
htmlsrc += '<h2>IPFS Podcasting Node</h2>'
htmlsrc += '<div class="nfo" style="background-color: #222; overflow: hidden;">'
if ipfs_id != '':
htmlsrc += '<div style="white-space: nowrap;"><label>IPFS ID : </label> <b>' + str(ipfs_id) + '</b></div>'
htmlsrc += '<form id="ecfg" action="/" method="post">'
htmlsrc += '<input id="csrf" name="csrf" type="hidden" value="' + sess['csrf'] + '" />'
htmlsrc += '<label title="E-mail Address (optional)">E-Mail : </label><input id="email" name="email" type="email" placeholder="user@example.com" title="E-mail Address (optional)" value="' + email + '" />'
htmlsrc += '<button>Update</button><br/>'
htmlsrc += '</form>'
htmlsrc += '<label>Network : </label> '
httpstat = 'pfail'
hstat = subprocess.run('timeout 1 bash -c "</dev/tcp/ipfspodcasting.net/80"', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if hstat.returncode == 0:
httpstat = 'ppass'
htmlsrc += '<a class="' + httpstat + '" href="https://ipfspodcasting.net/Help/Network" title="Port 80 Status" target="_blank">HTTP</a> '
httpsstat = 'pfail'
hsstat = subprocess.run('timeout 1 bash -c "</dev/tcp/ipfspodcasting.net/443"', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if hsstat.returncode == 0:
httpsstat = 'ppass'
htmlsrc += '<a class="' + httpsstat + '" href="https://ipfspodcasting.net/Help/Network" title="Port 443 Status" target="_blank">HTTPS</a> '
peercnt = 0
speers = subprocess.run(ipfspath + ' swarm peers|wc -l', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if speers.returncode == 0:
peercnt = int(speers.stdout.decode().strip())
if peercnt > 400:
ipfsstat = 'ppass'
elif peercnt > 100:
ipfsstat = 'pwarn'
else:
ipfsstat = 'pfail'
htmlsrc += '<a class="' + ipfsstat + '" href="https://ipfspodcasting.net/Help/Network" title="Port 4001 Status" target="_blank">IPFS <span style="font-weight: normal; color: #222;">- ' + str(peercnt) + ' Peers</span></a><br/>'
repostat = subprocess.run(ipfspath + ' repo stat -s|grep RepoSize', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if repostat.returncode == 0:
repolen = repostat.stdout.decode().strip().split(':')
used = int(repolen[1].strip())
else:
used = 0
df = os.statvfs('/')
avail = df.f_bavail * df.f_frsize
percent = round(used/(used+avail)*100, 1)
if used < (1024*1024*1024):
used = str(round(used/1024/1024, 1)) + ' MB'
elif used < (1024*1024*1024*1024):
used = str(round(used/1024/1024/1024, 1)) + ' GB'
else:
used = str(round(used/1024/1024/1024/1024, 2)) + ' TB'
if avail < (1024*1024*1024):
avail = str(round(avail/1024/1024, 1)) + ' MB'
elif avail < (1024*1024*1024*1024):
avail = str(round(avail/1024/1024/1024, 1)) + ' GB'
else:
avail = str(round(avail/1024/1024/1024/1024, 2)) + ' TB'
htmlsrc += '<label>Storage : </label>'
htmlsrc += '<div style="display: inline-block; margin-left: 5px; position: relative; top: 5px; width: calc(100% - 150px);">'
htmlsrc += '<div class="prog"><div class="used" style="width: ' + str(percent) + '%; min-width: 4px;"></div></div>'
htmlsrc += '<div style="display: flex; margin-top: 3px;"><span style="width: 33.3%; text-align: left;">' + str(used) + ' Used</span><span style="width: 33.3%; text-align: center;">' + str(percent) + '%</span><span style="width: 33.3%; text-align: right;">' + str(avail) + ' Available</span></div>'
htmlsrc += '</div>'
#don't allow gc while pinning (or already running)
gctxt = ''
gcrun = subprocess.run('ps x|grep -E "(repo gc|ipfs pin)"|grep -v grep', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if gcrun.returncode == 0:
gctxt = gcrun.stdout.decode().strip()
if gctxt == '':
disabled = ''
title = 'Run IPFS Garbage Collection'
else:
disabled = 'disabled="disabled"'
title = 'Not available while pinning or GC already running...'
htmlsrc += '<form id="igc" action="/" method="post">'
htmlsrc += '<input id="csrf" name="csrf" type="hidden" value="' + sess['csrf'] + '" />'
htmlsrc += '<input id="rungc" name="rungc" type="hidden" value="1" />'
htmlsrc += '<button ' + disabled + ' title="' + title + '">Clean Up</button>'
htmlsrc += '</form>'
htmlsrc += '</div>'
htmlsrc += '<h3 style="margin-bottom: 0;">Activity Log</h3>'
htmlsrc += '<pre class="nfo" style="margin-top: 0;">'
with open('ipfspodcastnode.log', 'r') as pcl:
logtxt = pcl.read()
htmlsrc += html.escape(logtxt)
htmlsrc += '</pre>'
htmlsrc += '<div id="tmr"></div>'
htmlsrc += '<form id="frst" action="/" method="post" style="float: right;">'
htmlsrc += '<input id="csrf" name="csrf" type="hidden" value="' + sess['csrf'] + '" />'
htmlsrc += '<input id="reset" name="reset" type="hidden" value="1" />'
htmlsrc += '<button title="Hard reset the IPFS app (when "it\'s just not working")">Restart IPFS</button>'
htmlsrc += '</form>'
htmlsrc += '<div id="links"><a href="https://ipfspodcasting.net/Manage" target="_blank">Manage</a><a href="https://ipfspodcasting.net/faq" target="_blank">FAQ</a></div>'
#<a id="ipfsui" href="http://umbrel.local:5001/webui" target="_blank">IPFS WebUI</a><a id="ipfspn" href="http://umbrel.local:5001/webui/#/pins" target="_blank">Pinned Files</a>
htmlsrc += '<script>window.setTimeout( function() { window.location.reload(); }, 60000); </script>'
#document.getElementById("ipfsui").href=window.location.href; document.getElementById("ipfsui").href=document.getElementById("ipfsui").href.replace("8675", "5001/webui"); document.getElementById("ipfspn").href=window.location.href; document.getElementById("ipfspn").href=document.getElementById("ipfspn").href.replace("8675", "5001/webui/#/pins");
htmlsrc += '</body></html>'
return htmlsrc
@post('/')
def do_email():
csrf = request.forms.get('csrf')
sess = request.environ.get('beaker.session')
if csrf == sess['csrf']:
if request.forms.get('email') is not None:
global email
email = request.forms.get('email')
with open('cfg/email.cfg', 'w') as ecf:
ecf.write(email)
if request.forms.get('reset') == '1':
suicide = subprocess.run('kill 1', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if request.forms.get('rungc') == '1':
gcrun = subprocess.run(ipfspath + ' repo gc --silent', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
redirect('/')
@route('/ipfspod.png')
def server_static():
return static_file('ipfspod.png', root='')
@route('/favicon.png')
def server_static():
return static_file('favicon.png', root='')
#run(host='0.0.0.0', port=8675, debug=True)
threading.Thread(target=run, kwargs=dict(host='0.0.0.0', port=8675, app=sapp, debug=False)).start()
| Cameron-IPFSPodcasting/podcastnode-Umbrel | webui.py | webui.py | py | 9,972 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "beaker.middleware.SessionMiddleware",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bottle.app",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bottle.request.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_nam... |
71552358588 | import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os, os.path
import smtplib
import random
import win32gui
import win32con
try:
engine=pyttsx3.init('sapi5')
voices=engine.getProperty('voices')
print(voices[0].id)
engine.setProperty('voice',voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def chrome_webbrowser(chrome_path, url):
webbrowser.get(chrome_path).open(url)
def wishme():
hour=int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak('Good Morning Vicky')
elif hour>=12 and hour>=17:
speak('Good Afternoon Vicky')
else:
speak('Good Evening')
print('Hellow I am Computer, How can I help you!')
speak('Hellow I am Computer, How can I help you!')
def takecommand():
r=sr.Recognizer()
with sr.Microphone() as source:
print('Listening....')
r.pause_threshold=1
audio=r.listen(source,timeout=1,phrase_time_limit=3)
try:
print('Recognizing....')
querry=r.recognize_google(audio,language='en-in')
print(f'You said {querry}')
except Exception:
print('Say That Again Please!')
return 'None'
return querry
if __name__ == "__main__":
chrome_path="C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s"
webbrowser.get(chrome_path)
while True:
querry=takecommand().lower()
# Strt
if "poweroff computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "power off computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "power of computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "shutdown computer" in querry or "shut down computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "computer shutdown" in querry or "computer shut down" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "quit computer" in querry:
speak("Computer has been power off")
os.system("shutdown /s /t 1")
elif "restartcomputer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "restart computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "rstart computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "restart computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "hybrid" in querry or "hybernate" in querry or "hybernation" in querry or "hibernation" in querry:
speak("We set your PC to sleeping mode")
os.system("Rundll32.exe Powrprof.dll,SetSuspendState Sleep")
elif "sleep" in querry or "sleap" in querry:
speak("We set your PC to sleeping mode or turn off your screen")
win32gui.SendMessage(win32con.HWND_BROADCAST,win32con.WM_SYSCOMMAND, win32con.SC_MONITORPOWER, 2)
elif "open screen" in querry or "openscreen" in querry or "screen" in querry:
speak("We open your screen")
win32gui.SendMessage(win32con.HWND_BROADCAST,
win32con.WM_SYSCOMMAND, win32con.SC_MONITORPOWER, -1)
elif "vscode" in querry or "vs code" in querry:
speak("Vs code open to you Vicky")
os.system("code .")
# end
if "commands" in querry or "command" in querry:
i=0
while True:
if i==0:
i=1
wishme()
querry=takecommand().lower()
if 'wikipedia' in querry:
speak('Searching wikipedia...')
querry=querry.replace('wikipedia','')
querry=querry.replace('please','')
results=wikipedia.summary(querry,sentences=2)
speak('According to wikipedia, ')
print(results)
speak(results)
elif "vscode" in querry or "vs code" in querry:
speak("Vs code open to you Vicky")
os.system("code .")
elif 'who are you' in querry:
print('I am Computer Sir!')
speak('I am Computer Sir!')
elif 'made you' in querry:
print('I am made by you Sir Waqas powered by Vicky World Production')
speak('I am made by you Sir Waqas powered by Vicky World Production')
elif "sleep" in querry or "sleap" in querry:
speak("We set your PC to sleeping mode")
# os.system("Powercfg -H OFF")
os.system("rundll32.exe Powercfg -H OFF,SetSuspendState 0,1,0")
elif 'open youtube' in querry:
url=('youtube.com')
chrome_webbrowser(chrome_path,url)
# webbrowser.open('youtube.com')
speak('Youtube has been opened dear Vicky')
elif 'open google' in querry or 'open chrome' in querry:
# webbrowser.open('google.com')
url=('google.com')
chrome_webbrowser(chrome_path,url)
speak('Google Has been opened dear Vicky')
elif 'stack overflow' in querry:
# webbrowser.open('stackoverflow.com')
url=('stackoverflow.com')
chrome_webbrowser(chrome_path,url)
elif 'stackoverflow' in querry:
url=('stackoverflow.com')
chrome_webbrowser(chrome_path,url)
elif 'time' in querry:
str=datetime.datetime.now().strftime('%H:%M:%S')
print(f"Time is{str}")
speak(f"Time is {str}")
elif 'search' in querry:
querry=querry.replace('search','')
querry=querry.replace('please','')
chrome_webbrowser(chrome_path,querry)
elif 'song' in querry or 'songs' in querry:
music_dir=r'E:\D\New folder (2)'
songs=os.listdir(music_dir)
print(songs)
files_len= len([name for name in os.listdir('.') if os.path.isfile(name)])
print(files_len)
r= random.randint(0, files_len-1)
print(songs[r])
os.startfile(os.path.join(music_dir,songs[r]))
elif 'stop' in querry:
print('Commands has been stopped Thank You Sir!')
speak('Commands has been stopped Thank You Sir!')
break
elif 'quit' in querry or 'exit' in querry:
print('Commands has been stopped. Thank You Sir!')
speak('Commands has been stopped. Thank You Sir!')
break
elif "shutdown computer" in querry or "shut down computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "computer shutdown" in querry or "computer shut down" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "poweroff computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "power off computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "power of computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "quit Computer" in querry:
speak("Computer has been power off")
os.system("shutdown /s /t 1")
elif "restartcomputer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "restart computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "rstart computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "restart Computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "sleep" in querry or "sleap" in querry:
speak("We set your PC to sleeping mode or turn off your screen")
win32gui.SendMessage(win32con.HWND_BROADCAST,win32con.WM_SYSCOMMAND, win32con.SC_MONITORPOWER, 2)
elif "open screen" in querry or "openscreen" in querry:
speak("Screen has been turn")
win32gui.SendMessage(win32con.HWND_BROADCAST,
win32con.WM_SYSCOMMAND, win32con.SC_MONITORPOWER, -1)
except Exception as e:
speak('An unknown Error has been occured Check Your Connection Please') | IamVicky90/Desktop-AI | task.py | task.py | py | 10,566 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyttsx3.init",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "webbrowser.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",... |
32653169006 | from flask import Flask, send_file, send_from_directory, safe_join, abort
app = Flask(__name__)
# app.config["CLIENT_IMAGES"] = "/home/mahima/console/static/client/img"
app.config["CLIENT_IMAGES"] = "/home/lenovo/SEproject/OpsConsole/api/static"
# The absolute path of the directory containing CSV files for users to download
app.config["CLIENT_CSV"] = "/home/mahima/console/static/client/csv"
# The absolute path of the directory containing PDF files for users to download
app.config["CLIENT_PDF"] = "/home/mahima/console/static/client/pdf"
@app.route("/getimg/<img_name>")
def get_img(img_name):
try:
return send_from_directory(app.config["CLIENT_IMAGES"], filename = img_name, as_attachment=True)
except FileNotFoundError:
abort(404)
@app.route('/hello')
def hello():
return "Hello Lifeeee"
if __name__ == "__main__":
app.run(debug = True) | trishu99/Platypus | api/static/fileserver.py | fileserver.py | py | 882 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 20,
"usage_type": "call"
}
] |
71943493307 | import csv
import math
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import chi2, f_regression, mutual_info_regression
def mylog(x):
if x==0:
return -10000000000
else:
return math.log(x)
def entropy(probs, neg, pos):
'''
entropy for binary classification data
'''
entropy=0.0
entropy=(probs[1]/pos-probs[0]/neg)*mylog((probs[1]*neg)/(probs[0]*pos))
return entropy
def get_bin_from_score(score):
'''
get bin number. 0-low, 1-high. avg. threshold=280
'''
return min(1,int(score//280))
def iv(header, data):
'''
Information Value based feature selection
'''
neg,pos = 0,0
probs=np.zeros((9,5,2))
for datum in data:
score_bin=get_bin_from_score(float(datum[9]))
if(score_bin==0):
neg+=1
else:
pos+=1
for i in range(9):
probs[i][int(datum[i])-1][score_bin]+=1
feature_score=[0 for _ in range(9)]
for i in range(9):
for j in range(5):
feature_score[i]+=entropy(probs[i][j], neg, pos)
fig = plt.figure()
plt.barh(header[::-1],feature_score[::-1])
plt.show()
def anova(header, regressors, target):
'''
ANOVA based feature selection
'''
# chi_scores = chi2(regressors,target)
anova_scores = f_regression(regressors, target)
fig = plt.figure()
plt.barh(header[::-1],anova_scores[0][::-1])
plt.show()
def mutual_info(header, regressors, target):
'''
Mutual Information based feature selection
'''
# chi_scores = chi2(regressors,target)
mi_scores = mutual_info_regression(regressors, target)
fig = plt.figure()
plt.barh(header[::-1],mi_scores[::-1])
plt.show()
def main():
'''
reads training data and calls appropriate method for feature-selection
'''
data,regressors,target = [],[],[]
with open('data.csv','r') as csv_file:
csv_reader=csv.reader(csv_file,delimiter=',')
for row in csv_reader:
data.append(row)
header=data[0]
data=data[1:]
for datum in data:
for i in range(9):
datum[i]=int(datum[i])
regressors.append(datum[:9])
target.append(float(datum[9]))
if len(sys.argv)<2:
print('Usage: python feature_selectors.py [iv/anova/mi]')
else:
option=sys.argv[1]
if option=='iv':
iv(header, data)
elif option=='anova':
anova(header, regressors, target)
elif option=='mi':
mutual_info(header, regressors, target)
else:
print('Usage: python feature_selectors.py [iv/anova/mi]')
if __name__=='__main__':
main()
| Arnabjana1999/scoring_models | feature_selectors.py | feature_selectors.py | py | 2,740 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.log",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
27216859235 | from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
(r'^$', 'rss_duna.feed.views.home'),
# url(r'^myproject/', include('myproject.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
#(r'^feed/$', DunaEntriesFeed()),
(r'^duna/(?P<programa_id>\D+)/rss/$', 'rss_duna.feed.views.get_feed_rss'),
(r'^duna/feeds/$', 'rss_duna.feed.views.list_feeds'),
#(r'^prueba/$', 'rss_duna.feed.views.prueba')
)
#if settings.DEBUG:
# urlpatterns += patterns('',
# (r'^files/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes':False}),
# )
| yonsing/rss_duna | urls.py | urls.py | py | 965 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.defaults.patterns",
"line_number": 7,
"usage_type": "call"
},
{... |
73583270588 | #!/usr/bin/python
# coding: utf-8
from flask import Flask, Blueprint, flash, g, redirect, render_template, request, url_for, session
import os
app = Flask(__name__)
tests = []
class TestObj:
def __init__(self, name, path):
self.name = name
self.path = path+self.name
self.countfile = self.path+"/count.txt"
self.count = self.read_count()
self.run_file = self.path + "/run.txt"
self.running = self.get_run_state()
def read_count(self):
return int(open(self.countfile).read())
def get_run_state(self):
return int(open(self.run_file).read())
def toggle_run(self):
newstate = str(int(not bool(self.get_run_state())))
with open(self.run_file,'w') as rf:
rf.write(newstate)
@app.route('/', methods=('GET','POST'))
def main():
if request.method == 'POST':
print('got request')
test = list(dict(request.form))[0]
testobj = None
for t in tests:
if t.name == test:
testobj = t
testobj.toggle_run()
print ('test',testobj)
global tests
tests = generate_tests()
return render_template('index.html',tests=tests)
def generate_tests():
a_dir = "/home/pi/current_tests/"
tests = [name for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))]
testobjs= [ TestObj(x,a_dir) for x in tests]
return testobjs
if __name__ == '__main__':
print(generate_tests())
app.run(host='0.0.0.0', debug=True)
| rjames711/automation | flaskweb/app.py | app.py | py | 1,568 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "flask.request.form... |
25760910262 | import random
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, LSTM
# Define the RNN model
model = Sequential()
model.add(LSTM(64, input_shape=(1, 1)))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error')
balance = 100
bet = 1
sim_numbers = []
while True:
cup = random.randint(1, 3)
game = input("Choice ('simulate x' or 'play cups'): ")
if game.startswith("simulate"):
input_list = game.split(" ")
num = int(input_list[1])
if num >= 100000:
print("Please wait a few minutes, numbers above 100.000 take longer for the model to simulate...")
sim_numbers += [random.randint(1, 3) for _ in range(num)]
print(sim_numbers)
sim_numbers_arr = np.array(sim_numbers)
sim_numbers_arr = sim_numbers_arr.reshape(sim_numbers_arr.shape[0], 1, 1)
model.fit(sim_numbers_arr[:-1], sim_numbers_arr[1:], epochs=10, verbose=0)
predicted_number = model.predict(sim_numbers_arr[-1].reshape(1, 1, 1))
print(f'Predicted next number: {predicted_number[0][0]:.0f}')
elif game == "play cups" or game == "play":
choice = input("What cup do you want to choose? 1, 2 or 3: ")
bet_choice = input(f'How much do you want to bet on cup {choice}?: ')
bet = bet_choice
if int(choice) == cup:
balance += int(bet)
print(f'You won ${bet}! Total balance is now ${balance}!')
elif int(choice) != cup:
balance -= int(bet)
print(f'You lost ${bet}. Total balance is now ${balance}. Correct cup was cup {cup}.')
else:
print("Please input a valid number")
continue
else:
print("Please input a valid choice...")
| atleastimnotgay/python | 3cups_prediction.py | 3cups_prediction.py | py | 1,897 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "keras.models.Sequential",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.rand... |
19882566170 | from jinja2 import Environment, BaseLoader, TemplateNotFound
import importlib_resources
class PackageLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
from backendService import templates
try:
source = importlib_resources.read_text(templates, template)
except FileNotFoundError as exc:
raise TemplateNotFound(template) from exc
return source, self.path, lambda: True
JINJA_ENV = Environment(loader=PackageLoader("backendService.http.templates"))
def get_template(name):
return JINJA_ENV.get_template(name)
| bitlogik/guardata | backendService/templates/__init__.py | __init__.py | py | 648 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "jinja2.BaseLoader",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "importlib_resources.read_text",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "backendService.templates",
"line_number": 13,
"usage_type": "name"
},
{
"api_name"... |
41014218939 | #coding=utf-8
import numpy as np
import pyten
from scipy import stats
from pyten.method.PoissonAirCP import PoissonAirCP
from pyten.method import AirCP
from pyten.tools import tenerror
from pyten.method import cp_als
from pyten.method import falrtc,TNCP
import matplotlib.pyplot as plt
#参数设置
missList = [0.7]
duplicate=1
prespecifyrank = 5
para_alpha = [1,1,1]
para_lmbda = 1
def normalize(mat):
'''
将矩阵每一列都标准化,不然在计算余弦相似度时都非常相近
:param mat:
:return:
'''
X_mean = mat.mean(axis=0)
# standardize X
X1 = (mat - X_mean)
return(X1)
from sklearn.metrics.pairwise import cosine_similarity
def cons_similarity(dat):
siz = dat.shape
temp = np.sum(dat, axis=1)
tagvector = normalize(np.sum(dat, axis=1))
cos_dist = 1 - cosine_similarity(tagvector)
aux0 = np.exp(-(cos_dist**2))
# 2时间相似性用AR(1)模型的acf去做
from statsmodels.tsa.arima_model import ARMA
ts = np.sum(np.sum(dat, axis=0),axis = 1)
order = (1,0)
tempModel = ARMA(ts,order).fit()
rho = np.abs(tempModel.arparams)
aux1 = np.diag(np.ones(siz[1]))
for nn in range(1, siz[1]):
aux1 = aux1 + np.diag(np.ones(siz[1] - nn), -nn) * rho ** nn + np.diag(np.ones(siz[1] - nn), nn) * rho ** nn
# 3话题之间相关性
aux2 = np.diag(np.ones(siz[2]))
Pl = np.sum(temp, axis=1) / np.sum(temp)
for i in range(siz[2]):
for j in range(siz[2]):
aux2[i,j] = np.exp(-np.sum((((temp[:, i] - temp[:, j]) / np.max(temp, 1)) ** 2) * Pl))
aux = [aux0, aux1, aux2]
return (aux)
def convertMon(mat):
'''
将数据从daily_data转化为monthly_data
:param mat:
:return:
'''
monthdat = []
month = range(0, 365, 30)
for i in range(12):
monthdat.append(np.sum(mat[:, month[i]:month[i + 1]], axis=1))
monthdat = np.array(monthdat)
monthdat = monthdat.transpose((1, 0, 2))
return(monthdat)
dat =np.load('newbuild_tensor.npy')
#预处理,先筛选一次国家,0太多的的不纳入考虑,只剩下235->195个
idx = np.sum(np.sum(dat ==0,axis = 1),axis=1)>1000
dat = dat[idx]
#可供选择的调整方法,整理成月数据
dat = convertMon(dat)
siz = dat.shape
true_data = dat.copy()
true_data = pyten.tenclass.tensor.Tensor(true_data)
# 这里是为了画图比较
finalList1 = []
finalList22 = []
finalList2 = []
finalListTNCP=[]
finalListfal = []
for miss in missList:
aux = [np.diag(np.ones(siz[0])), np.diag(np.ones(siz[1])), np.diag(np.ones(siz[2]))]
RE2 = []
RE22 = []
for dup in range(duplicate):
np.random.seed(dup*4)
#每次都用同一份数据去做
data = dat.copy()
#观测值:丢失部分数据的
Omega = (np.random.random(siz) > miss) * 1
data[Omega == 0] -= data[Omega == 0]
data = pyten.tenclass.tensor.Tensor(data)
#补全时候用的rank
print('missing ratio: {0}'.format(miss))
#补全时候用的rank
com_rank = prespecifyrank
# 这部分引入了更新辅助矩阵的算法
simerror = 1
Iter = 1
while (simerror > 1e-2 and Iter < 10):
self2 = PoissonAirCP(data, omega=Omega, rank=com_rank, max_iter=3000, tol=1e-5,
OnlyObs=True, TrueValue=true_data, sim_mats=aux, alpha=para_alpha, lmbda=para_lmbda)
self2.run()
temp_aux = cons_similarity(self2.X.data)
simerror = np.max((np.linalg.norm(aux[0] - temp_aux[0]),
np.linalg.norm(aux[1] - temp_aux[1]),
np.linalg.norm(aux[2] - temp_aux[2])))
aux = temp_aux
Iter = Iter + 1
print('ExpAirCP loop with similarity error: {0}'.format(simerror))
[EEr, EReEr1, EReEr2] = tenerror(self2.X, true_data, Omega)
if Iter ==2:
RE22.append(EReEr1)
print(EReEr1)
# 到这里为止
[EErr, EReErr1, EReErr2] = tenerror(self2.X, true_data, Omega)
print ('ExpAirCP Completion Error: {0}, {1}, {2}'.format(EErr, EReErr1, EReErr2))
RE2.append(EReErr1)
finalList22.append(np.mean(RE22))
finalList2.append(np.mean(RE2))
for miss in missList:
aux = [np.diag(np.ones(siz[0])), np.diag(np.ones(siz[1])), np.diag(np.ones(siz[2]))]
RE1 = []
RE11 = []
for dup in range(duplicate):
np.random.seed(dup*4)
#每次都用同一份数据去做
data = dat.copy()
#观测值:丢失部分数据的
Omega = (np.random.random(siz) > miss) * 1
data[Omega == 0] -= data[Omega == 0]
data = pyten.tenclass.tensor.Tensor(data)
#补全时候用的rank
print('missing ratio: {0}'.format(miss))
#补全时候用的rank
com_rank = prespecifyrank
# 这部分引入了更新辅助矩阵的算法
simerror = 1
Iter = 1
while (simerror > 1e-2 and Iter < 10):
self = AirCP(data, omega=Omega, rank=com_rank, max_iter=3000, tol=1e-5, sim_mats=aux, alpha=para_alpha, lmbda=para_lmbda)
self.run()
temp_aux = cons_similarity(self.X.data)
simerror = np.max((np.linalg.norm(aux[0] - temp_aux[0]),
np.linalg.norm(aux[1] - temp_aux[1]),
np.linalg.norm(aux[2] - temp_aux[2])))
aux = temp_aux
Iter = Iter + 1
print('AirCP loop with similarity error: {0}'.format(simerror))
[EEr, EReEr1, EReEr2] = tenerror(self.X, true_data, Omega)
print(EReEr1)
# 到这里为止
#这里看对原始数据的补全准不准
[Err, ReErr1, ReErr2] = tenerror(self.X, true_data, Omega)
print ('AirCP Completion Error: {0}, {1}, {2}'.format(Err, ReErr1, ReErr2))
RE1.append(ReErr1)
finalList1.append(np.mean(RE1))
# for miss in missList:
# RETNCP = []
#
# for dup in range(duplicate):
# np.random.seed(dup*4)
# #每次都用同一份数据去做
# data = dat.copy()
# #观测值:丢失部分数据的
# Omega = (np.random.random(siz) > miss) * 1
# data[Omega == 0] -= data[Omega == 0]
# data = pyten.tenclass.tensor.Tensor(data)
#
# #补全时候用的rank
# print('missing ratio: {0}'.format(miss))
# #补全时候用的rank
# com_rank = prespecifyrank
# self3 = TNCP(data, Omega, rank=com_rank,alpha = para_alpha, lmbda=para_lmbda)
# self3.run()
# [EErrr, EReErrr1, EReErrr2] = tenerror(self3.X, true_data, Omega)
# print ('TNCP Completion Error: {0}, {1}, {2}'.format(EErrr, EReErrr1, EReErrr2))
# RETNCP.append(EReErrr1)
# finalListTNCP.append(np.mean(RETNCP))
#
#
# #对于fal不受到rank改变的影响,所以单独写出来
# for miss in missList:
# REfal = []
# for dup in range(duplicate):
# np.random.seed(dup*4)
# #每次都用同一份数据去做
# data = dat.copy()
# #观测值:丢失部分数据的
# Omega = (np.random.random(siz) > miss) * 1
# data[Omega == 0] -= data[Omega == 0]
# data = pyten.tenclass.tensor.Tensor(data)
# print('missing ratio: {0}'.format(miss))
# rX1 = falrtc(data, Omega, max_iter=100)
# [Errfal, ReErrfal, ReErr2fal] = tenerror(rX1, true_data, Omega)
# print ('falrtc Completion Error: {0}, {1}, {2}'.format(Errfal, ReErrfal, ReErr2fal))
# REfal.append(ReErrfal)
# finalListfal.append(np.mean(REfal))
#
print(finalList1)
print(finalList2)
print(finalListTNCP)
print(finalListfal)
result = [finalList1,finalList2,finalListTNCP]
result_name = 'prerank='+str(prespecifyrank)+'.csv'
#np.savetxt(result_name,result,fmt='%.4f',delimiter=',')
| yangjichen/ExpCP | realdata/GDELT_step3.py | GDELT_step3.py | py | 7,907 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.sum",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.cosine_similarity",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.... |
2958627650 | import Algorithmia
import logging
import os
LOG_FOLDER = 'logs'
if os.path.exists(LOG_FOLDER) is False:
os.mkdir(LOG_FOLDER)
logging.basicConfig(filename=LOG_FOLDER + '/' + __name__ + '.log', format='[%(asctime)s] %(message)s\n\n',
level=logging.DEBUG)
api_key = None
def get_emotion(photo: bytes) -> str or None:
'''Returns emotions by face image
Args:
photo: bytes data
Returns:
main_emotion: most possible emotion name
None: if failed
'''
try:
client = Algorithmia.client(api_key)
algo = client.algo('deeplearning/EmotionRecognitionCNNMBP/0.1.3')
img = bytearray(photo)
emotions = algo.pipe(img).result['results'][0]['emotion']
main_emotion = str()
confidence = 0.0
for emotion in emotions:
if emotion[0] > confidence:
confidence = emotion[0]
main_emotion = emotion[1]
return main_emotion.lower()
except Exception as e:
logging.error(str(e))
print('Algorithmia:', str(e))
return None
def celebrities_similarity(photo: bytes) -> str or None:
'''Returns person similarity to some celebrity
Args:
photo: bytes data
Returns:
Name of the most possible celebrity
None: if failed
'''
try:
client = Algorithmia.client(api_key)
algo = client.algo('deeplearning/DeepFaceRecognition/0.1.1')
img = bytearray(photo)
celebrities = algo.pipe(img).result['results']
return ' '.join(celebrities[0][1].split('_'))
except Exception as e:
logging.error(str(e))
return None
def verify_faces(photo1: bytes, photo2: bytes) -> float or None:
'''Returns two photos similarity
Args:
photo1: bytes data
photo2: bytes data
Returns:
similarity confidence: if data recieved
None: if failed
'''
try:
data = [bytearray(photo1), bytearray(photo2)]
client = Algorithmia.client(api_key)
algo = client.algo('zskurultay/ImageSimilarity/0.1.2')
return algo.pipe(data)
except Exception as e:
logging.error(str(e))
return None
def gender(photo: bytes) -> str or None:
'''Computes gender probabilities
Args:
photo: bytes data
Returns:
gender name
None: if failed
'''
try:
img = bytearray(photo)
data = {'image': img}
client = Algorithmia.client(api_key)
algo = client.algo('deeplearning/GenderClassification/1.0.2')
gender_list = algo.pipe(img).result['results'][0]['gender']
if gender_list[0][0] > gender_list[1][0]:
return gender_list[0][1].lower()
else:
return gender_list[1][1].lower()
except Exception as e:
logging.error(str(e))
return None
def age(photo: bytes) -> str or None:
'''Returns age groups with probabilies
Args:
photo: bytes data
Returns:
the most possible age interval : list with structure [min_age, max_age]
None: if failed
'''
try:
img = bytearray(photo)
client = Algorithmia.client(api_key)
algo = client.algo('deeplearning/AgeClassification/1.0.3')
ages = algo.pipe(img).result['results'][0]['age']
str_age_interval = str()
age_confidence = 0.0
for age in ages:
if age[0] > age_confidence:
age_confidence = age[0]
str_age_interval = age[1]
age_string_interval = str_age_interval.strip('()').split(', ')
age_interval = [int(age_string_interval[0]), int(age_string_interval[1])]
return age_interval
except Exception as e:
logging.error(str(e))
print(str(e))
return None
| FunRobots/candybot_v2 | src/coffebot/vision/utils/algorithmia.py | algorithmia.py | py | 3,841 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_num... |
10424214131 | #-*- coding: utf-8 -*-
u"""
@author: Martí Congost
@contact: marti.congost@whads.com
@organization: Whads/Accent SL
@since: October 2008
"""
import cherrypy
from cocktail.modeling import cached_getter
from woost.controllers.publishablecontroller import PublishableController
class DocumentController(PublishableController):
"""A controller that serves rendered pages."""
def __call__(self, **kwargs):
# Document specified redirection
document = self.context["publishable"]
if document.redirection_mode:
redirection_target = document.find_redirection_target()
if redirection_target is None:
raise cherrypy.NotFound()
raise cherrypy.HTTPRedirect(redirection_target.get_uri())
# No redirection, serve the document normally
return PublishableController.__call__(self)
@cached_getter
def page_template(self):
template = self.context["publishable"].template
if template is None:
raise cherrypy.NotFound()
return template
@cached_getter
def view_class(self):
return self.page_template.identifier
| marticongost/woost | woost/controllers/documentcontroller.py | documentcontroller.py | py | 1,181 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "woost.controllers.publishablecontroller.PublishableController",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "cherrypy.NotFound",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cherrypy.HTTPRedirect",
"line_number": 29,
"usage_type": ... |
27009828768 | from sklearn.linear_model import LassoCV
def run(x_train, y_train, x_test, y_test, eps, n_alphas, alphas, fit_intercept, normalize, precompute, max_iter, tol, copy_X, cv, verbose, n_jobs,
positive, random_state, selection):
reg = LassoCV(eps=eps,
n_alphas=n_alphas,
alphas=alphas,
fit_intercept=fit_intercept,
normalize=normalize,
precompute=precompute,
max_iter=max_iter,
tol=tol,
copy_X=copy_X,
cv=cv,
verbose=verbose,
n_jobs=n_jobs,
positive=positive,
random_state=random_state,
selection=selection).fit(x_train, y_train)
return {'train_predict': reg.predict(x_train).tolist(),
'test_predict': reg.predict(x_test).tolist(),
'train_score': reg.score(x_train, y_train),
'test_score': reg.score(x_test, y_test),
'alpha_': reg.alpha_,
'coef_': reg.coef_.tolist(),
'intercept_': reg.intercept_,
'mse_path_': reg.mse_path_.tolist(),
'alphas_': reg.alphas_.tolist(),
'dual_gap_': reg.dual_gap_.tolist(),
'n_iter_': reg.n_iter_}
| lisunshine1234/mlp-algorithm-python | machine_learning/regression/linear_models/lassoCV/run.py | run.py | py | 1,314 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.linear_model.LassoCV",
"line_number": 6,
"usage_type": "call"
}
] |
4993994587 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 19 13:04:11 2019
@author: Diego Wanderley
@python: 3.6
@description: Train script with training class
"""
import tqdm
import argparse
import torch
import torch.optim as optim
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from torch.autograd import Variable
from terminaltables import AsciiTable
import utils.transformations as tsfrm
from test_yolo import evaluate
from models.yolo import Darknet
from models.yolo_utils.utils import *
from utils.datasets import OvaryDataset
from utils.helper import gettrainname
class Training:
"""
Training class
"""
def __init__(self, model, device, train_set, valid_set, optim,
class_names, train_name='yolov3', logger=None,
iou_thres=0.5, conf_thres=0.5, nms_thres=0.5):
'''
Training class - Constructor
'''
self.model = model
self.device = device
self.train_set = train_set
self.valid_set = valid_set
self.optimizer = optim
self.train_name = train_name
self.model_name = "_".join(train_name.split('_')[2:])
self.logger = logger
self.class_names = class_names
self.gradient_accumulations = 2
self.iou_thres = iou_thres
self.conf_thres = conf_thres
self.nms_thres = nms_thres
self.metrics = [
"grid_size",
"loss",
"x",
"y",
"w",
"h",
"conf",
"cls",
"cls_acc",
"recall50",
"recall75",
"precision",
"conf_obj",
"conf_noobj",
]
self.epoch = 0
def _saveweights(self, state, log=None):
'''
Save network weights.
Arguments:
@state (dict): parameters of the network
'''
path = '../weights/'
filename = path + self.train_name + '_weights.pth.tar'
torch.save(state, filename)
# Save Log table
if type(log) == str:
logname = filename.replace('.pth.tar','.log')
logname = logname.replace('_weights','_train')
log_file = open(logname, "w")
log_file.write(log)
log_file.close()
def _iterate_train(self, data_loader):
# Init loss count
lotal_loss = 0
data_train_len = len(self.train_set)
# Active train
self.model.train()
self.model = self.model.to(self.device)
# Batch iteration - Training dataset
for batch_idx, (names, imgs, targets) in enumerate(tqdm.tqdm(data_loader, desc="Training epoch")):
batches_done = len(data_loader) * self.epoch + batch_idx
targets = Variable(targets.to(self.device), requires_grad=False)
imgs = Variable(imgs.to(self.device))
bs = len(imgs)
# Forward and loss
loss, output = self.model(imgs, targets=targets)
loss.backward()
if batches_done % self.gradient_accumulations:
# Accumulates gradient before each step
self.optimizer.step()
self.optimizer.zero_grad()
self.model.seen += imgs.size(0)
# Log metrics at each YOLO layer
batch_factor = bs / data_train_len
for i, metric in enumerate(self.metrics):
out_metrics = [(yolo.metrics.get(metric, 0) * batch_factor) for yolo in self.model.yolo_layers]
# Fill average
for j in range(len(self.avg_metrics[metric])):
self.avg_metrics[metric][j] += out_metrics[j]
lotal_loss += loss.item() * batch_factor
return lotal_loss
def _logging(self, epoch, avg_loss_train, val_evaluation):
# 1. Log scalar values (scalar summary)
info = val_evaluation
info.append(('train_loss_total', avg_loss_train))
for tag, value in info:
self.logger.add_scalar(tag, value, epoch+1)
# 2. Log values and gradients of the parameters (histogram summary)
for yolo_tag, value in self.model.named_parameters():
# Define tag name
tag_parts = yolo_tag.split('.')
tag = self.model_name + '/' + tag_parts[-2] + '/' + tag_parts[-1]
# Ignore bias from batch normalization
if (not 'batch_norm' in tag_parts[-2]) or (not 'bias' in tag_parts[-1]):
# add data to histogram
self.logger.add_histogram(tag, value.data.cpu().numpy(), epoch+1)
# add gradient if exist
#if not value.grad is None:
# self.logger.add_histogram(tag +'/grad', value.grad.data.cpu().numpy(), epoch+1)
def train(self, epochs=100, batch_size=4):
'''
Train network function
Arguments:
@param net: network model
@param epochs: number of training epochs (int)
@param batch_size: batch size (int)
'''
# Load Dataset
data_loader_train = DataLoader(self.train_set, batch_size=batch_size, shuffle=True,
collate_fn=self.train_set.collate_fn_yolo)
data_loader_val = DataLoader(self.valid_set, batch_size=1, shuffle=False,
collate_fn=self.valid_set.collate_fn_yolo)
# Define parameters
best_loss = 1000000 # Init best loss with a too high value
best_ap = 0 # Init best average precision as zero
# Run epochs
for e in range(epochs):
self.epoch = e
print('Starting epoch {}/{}.'.format(self.epoch + 1, epochs))
log_str = ''
metric_table = [["Metrics", *["YOLO Layer " + str(i) for i in range(len(model.yolo_layers))]]]
self.avg_metrics = { i : [0]*len(self.model.yolo_layers) for i in self.metrics }
# ========================= Training =============================== #
loss_train = self._iterate_train(data_loader_train)
# Log metrics at each YOLO layer
for i, metric in enumerate(self.metrics):
formats = {m: "%.6f" for m in self.metrics}
formats["grid_size"] = "%2d"
formats["cls_acc"] = "%.2f%%"
row_metrics = self.avg_metrics[metric]
metric_table += [[metric, *row_metrics]]
log_str += AsciiTable(metric_table).table
log_str += "\nTotal loss: %0.5f"%loss_train
print(log_str)
print('')
# ========================= Validation ============================= #
precision, recall, AP, f1, ap_class = evaluate(self.model,
data_loader_val,
self.iou_thres,
self.conf_thres,
self.nms_thres,
self.device)
# Group metrics
evaluation_metrics = [
("val_precision", precision.mean()),
("val_recall", recall.mean()),
("val_mAP", AP.mean()),
("val_f1", f1.mean()),
]
# Print class APs and mAP
ap_table = [["Index", "Class name", "AP"]]
for i, c in enumerate(ap_class):
ap_table += [[c, self.class_names[c], "%.5f" % AP[i]]]
print(AsciiTable(ap_table).table)
print("mAP: "+ str(AP.mean()))
print('\n')
# ======================== Save weights ============================ #
best_loss = loss_train if loss_train <= best_loss else best_loss
is_best = AP.mean() >= best_ap
if is_best:
best_ap = AP.mean()
# save
self._saveweights({
'epoch': self.epoch + 1,
'state_dict': self.model.state_dict(),
'train_loss_total': loss_train,
'train_best_loss': best_loss,
'val_precision': precision.mean(),
'val_recall': recall.mean(),
'val_mAP': AP.mean(),
'val_f1': f1.mean(),
'batch_size': batch_size,
'optimizer': str(self.optimizer),
'optimizer_dict': self.optimizer.state_dict(),
'device': str(self.device),
'avg_metrics': self.avg_metrics,
'iou_thres': self.iou_thres,
'conf_thres': self.conf_thres,
'nms_thres': self.nms_thres
},
log=log_str )
print('Model {:s} updated!'.format(self.train_name))
print('\n')
# ====================== Tensorboard Logging ======================= #
if self.logger:
self._logging(self.epoch, loss_train, evaluation_metrics)
def parse_yolo_name(backbone_name, num_anchors, num_classes):
"""
Get the .cfg filename given the Yolo v3 hyperparameters.
"""
model_name = 'yolov3'
if 'tiny' in backbone_name:
model_name += '-tiny'
elif 'spp' in backbone_name:
model_name += '-spp'
model_name += '_a' + str(num_anchors)
model_name += '_c' + str(num_classes)
return model_name
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Training parameters
parser.add_argument("--batch_size", type=int, default=4, help="size of each image batch")
parser.add_argument("--num_epochs", type=int, default=150, help="size of each image batch")
parser.add_argument("--model_name", type=str, default="yolov3", help="name of the model definition (used to load the .cfg file)")
parser.add_argument("--num_anchors", type=int, default=6, help="number of anchors")
parser.add_argument("--num_classes", type=int, default=3, help="number of classes")
# Evaluation parameters
parser.add_argument("--iou_thres", type=float, default=0.5, help="iou threshold required to qualify as detected")
parser.add_argument("--conf_thres", type=float, default=0.5, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
opt = parser.parse_args()
print(opt)
# Classes names
cls_names = ['background','follicle','ovary']
# Input parameters
n_classes = opt.num_classes
has_ovary = True if n_classes > 2 else False
n_epochs = opt.num_epochs
batch_size = opt.batch_size
network_name = parse_yolo_name(opt.model_name, opt.num_anchors, n_classes)
train_name = gettrainname(network_name)
mode_config_path = 'config/'+ network_name +'.cfg'
# Load network model
model = Darknet(mode_config_path)
# Load CUDA if exist
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Transformation parameters
transform = tsfrm.Compose([tsfrm.RandomHorizontalFlip(p=0.5),
tsfrm.RandomVerticalFlip(p=0.5),
tsfrm.RandomAffine(90, translate=(0.15, 0.15),
scale=(0.75, 1.5), resample=3, fillcolor=0)
])
# Dataset definitions
dataset_train = OvaryDataset(im_dir='../datasets/ovarian/im/train/',
gt_dir='../datasets/ovarian/gt/train/',
clahe=False, transform=transform,
ovary_inst=has_ovary)
dataset_val = OvaryDataset(im_dir='../datasets/ovarian/im/val/',
gt_dir='../datasets/ovarian/gt/val/',
clahe=False, transform=False,
ovary_inst=has_ovary)
# Optmization
optimizer = optim.Adam(model.parameters())
# Set logs folder
log_dir = '../logs/' + train_name + '/'
writer = SummaryWriter(log_dir=log_dir)
# Run training
training = Training(model, device, dataset_train, dataset_val,
optimizer,
logger=writer,
class_names=cls_names[:n_classes],
train_name=train_name,
iou_thres=opt.iou_thres,
conf_thres=opt.conf_thres,
nms_thres=opt.nms_thres)
training.train(epochs=n_epochs, batch_size=batch_size)
print('') | dswanderley/detntorch | python/train_yolo.py | train_yolo.py | py | 12,881 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.optim",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "torch.save",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line... |
3986831730 | """The abstract class for http routing"""
from abc import ABCMeta, abstractmethod
from typing import AbstractSet, Any, Mapping, Tuple
from .http_callbacks import HttpRequestCallback
from .http_response import HttpResponse
class HttpRouter(metaclass=ABCMeta):
"""The interface for an HTTP router"""
@property # type: ignore
@abstractmethod
def not_found_response(self) -> HttpResponse:
"""The response when a handler could not be found for a method/path
Returns:
HttpResponse: The response when a route cannot be found.
"""
@not_found_response.setter # type: ignore
@abstractmethod
def not_found_response(self, value: HttpResponse) -> None:
...
@abstractmethod
def add(
self,
methods: AbstractSet[str],
path: str,
callback: HttpRequestCallback
) -> None:
"""Add an HTTP request handler
Args:
methods (AbstractSet[str]): The supported HTTP methods.
path (str): The path.
callback (HttpRequestCallback): The request handler.
"""
@abstractmethod
def resolve(
self,
method: str,
path: str
) -> Tuple[HttpRequestCallback, Mapping[str, Any]]:
"""Resolve a request to a handler with the route matches
Args:
method (str): The HTTP method.
path (str): The path.
Returns:
Tuple[HttpRequestCallback, Mapping[str, Any]]: A handler and the route
matches.
"""
| rob-blackbourn/bareASGI | bareasgi/http/http_router.py | http_router.py | py | 1,583 | python | en | code | 26 | github-code | 6 | [
{
"api_name": "abc.ABCMeta",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "http_response.HttpResponse",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "http_respon... |
21729046794 | import firebase_admin
from firebase_admin import db
from flask import jsonify
from hashlib import md5
from random import randint
from time import time
from time import time, sleep
firebase_admin.initialize_app(options={
'databaseURL': 'https://copy-passed.firebaseio.com',
})
waitlist = db.reference('waitlist')
ids = db.reference('ids')
timeoutSeconds = 120
def put(ref, data):
orig = ref.get()
if not orig:
return False
for i, j in data.items():
orig[i] = j
ref.update(orig)
return True
def add_user(request):
delBlankw = False
delBlanki = False
if waitlist.get() and request.json["id"] in waitlist.get():
return "Conflict", 409
if not waitlist.get():
delBlankw = True
db.reference().update({"waitlist": {"blank--": 0}})
if not ids.get():
delBlanki = True
db.reference().update({"ids": {"blank--": 0}})
put(waitlist, {request.json["id"]: "x"})
start = time()
# print("entering waitlist")
while waitlist.child(request.json["id"]).get() == "x":
sleep(.5)
if (time() - start) >= timeoutSeconds:
if delBlanki:
ids.child("blank--").delete()
if delBlankw:
waitlist.child("blank--").delete()
waitlist.child(request.json["id"]).delete()
return "Request Timeout", 408
uuid = waitlist.child(request.json["id"]).get()
waitlist.child(request.json["id"]).delete()
if not ids.get():
delBlanki = True
db.reference().update({"ids": {"blank--": 0}})
idu = get_id(uuid)
while idu in ids.get():
idu = get_id(uuid)
put(ids, {idu: {"uid": uuid, "timestamp": time()}})
# print(ids)
if delBlanki:
ids.child("blank--").delete()
if delBlankw:
waitlist.child("blank--").delete()
return jsonify({"id": idu}), 201
def get_id(oid):
return md5((str(randint(0, 1e12)) + oid).encode()).hexdigest()
def authenticator(request):
if not request.json or 'id' not in request.json:
return "Not Acceptable", 406
if "revoke" in request.json and request.json["revoke"]:
if ids.get() and ids.child(request.json["id"]).get():
ids.child(request.json["id"]).delete()
return "Deleted", 200
else:
return "Not Found", 204
if request.path == '/' or request.path == '':
if request.method == 'POST':
return add_user(request)
else:
return 'Method not supported', 405
return 'URL not found', 404
| ocular-data/copy-passed-firebase | python_functions/authenticator/main.py | main.py | py | 2,575 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "firebase_admin.initialize_app",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "firebase_admin.db.reference",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "firebase_admin.db",
"line_number": 13,
"usage_type": "name"
},
{
"api_na... |
15581775407 | #!/usr/bin/env python
import pygame
import constants
from network import Type
import physical_object
from physical_object import PhysicalObject
import bullet
import math
from pygame.rect import Rect
import play_sound
from pygame import mixer
from pygame.mixer import Sound
TURRET_WIDTH = 24
TURRET_HEIGHT = 28
GUN_CHARGEUP_TIME = 100
class Turret(PhysicalObject):
"""This class represents a turret"""
typ = Type.TURRET
timeLeftToCharge = 0
def __init__(self, position, level):
PhysicalObject.__init__(self, position)
self.level = level
self.controllingPlayer = physical_object.OWNER_DEFENDER
self.physicsRect = pygame.rect.Rect(self.r_x, self.r_y, TURRET_WIDTH, TURRET_HEIGHT)
self.image = pygame.image.load('images/defenses.png')
self.rect = self.image.get_rect()
self.actions = {"charged 0": (0, 112, TURRET_WIDTH, TURRET_HEIGHT),
"charged 50": (TURRET_WIDTH, 112, TURRET_WIDTH, TURRET_HEIGHT),
"charged 100": (2*TURRET_WIDTH, 112, TURRET_WIDTH, TURRET_HEIGHT)}
self.boundsRect = Rect(level.rect.x,level.rect.y,level.rect.width,constants.SCREEN_HEIGHT)
self.action = "charged 0"
self.area = pygame.rect.Rect(self.actions[self.action])
#print 'turret (x,y) = ', (self.r_x, self.r_y)
#print 'turret owner = ', self.controllingPlayer
self.timeLeftToCharge = GUN_CHARGEUP_TIME
def step(self, scrollPosition):
# translate movement boundary
self.boundsRect.y = scrollPosition
# update self
PhysicalObject.step(self, scrollPosition)
if self.timeLeftToCharge < (1/5.0)*GUN_CHARGEUP_TIME:
self.action = "charged 100"
elif self.timeLeftToCharge < (3/5.0)*GUN_CHARGEUP_TIME:
self.action = "charged 50"
else:
self.action = "charged 0"
self.area = pygame.rect.Rect(self.actions[self.action])
if self.physicsRect.colliderect(self.boundsRect):
turretSeesShip = False
target = None
for o in self.level.physicalObjects:
if(o.controllingPlayer == physical_object.OWNER_ATTACKER and
o.targetType == physical_object.TARGET_TYPE_SHIP):
turretSeesShip = True
target = o
if turretSeesShip:
self.timeLeftToCharge -= 1
if self.timeLeftToCharge <= 0:
# it's the ship! get it!
soundEfx = pygame.mixer.Sound(constants.TURRET_BULLET_SFX)
soundEfx.set_volume(0.5)
play_sound.PlaySounds(soundEfx, 2)
theBullet = bullet.Bullet((self.rect.x + TURRET_WIDTH/2 - bullet.BULLET_WIDTH/2, self.rect.y + (bullet.BULLET_HEIGHT + 6)), "tur")
theBullet.controllingPlayer = self.controllingPlayer
# old velocity code
#deltaX = o.r_x - self.r_x
#deltaY = o.r_y - self.r_y
#distance = math.hypot(deltaX, deltaY)
#theBullet.v_x = bullet.DEFAULT_SPEED*(deltaX/distance) # v_x = speed*cos
#theBullet.v_y = bullet.DEFAULT_SPEED*(deltaY/distance) # v_y = speed*sin
# new velocity code; apparently tries to divide by zero and take the square root of a negative number
#timeToImpact = ((o.r_x*o.v_x + o.r_y*o.v_y + math.sqrt(-pow(o.r_y,2)*(-1 + pow(o.v_x, 2)) + o.r_x*(o.r_x + 2*o.r_y*o.v_x*o.v_y - o.r_x*pow(o.v_y, 2))))/(-1 + pow(o.v_x, 2) + pow(o.v_y, 2)))
#theBullet.v_x = (o.r_x + timeToImpact*o.v_x)/timeToImpact
#theBullet.v_y = (o.r_y + timeToImpact*o.v_y)/timeToImpact
# new velocity code, mk. II
futurepos = (target.r_x, target.r_y) # Guess that where they'll be in the future is where they are now
MY_SPEED = 1.5 + constants.SCROLL_RATE
for i in range(0, 4):
dist = (futurepos[0] - self.r_x, futurepos[1] - self.r_y)
timetotarget = math.hypot(dist[0], dist[1]) / bullet.DEFAULT_SPEED
distcovered = (target.v_x*timetotarget, target.v_y*timetotarget)
futurepos = (target.r_x + distcovered[0], target.r_y + distcovered[1])
dirNotNormalized = (futurepos[0] - self.r_x, futurepos[1] - self.r_y)
dirNormalized = ((dirNotNormalized[0]/math.hypot(dirNotNormalized[0], dirNotNormalized[1]),
dirNotNormalized[1]/math.hypot(dirNotNormalized[0], dirNotNormalized[1])))
theBullet.v_x = MY_SPEED*dirNormalized[0]
theBullet.v_y = MY_SPEED*dirNormalized[1]
# end of velocity code
self.childObjects.append(theBullet)
self.timeLeftToCharge = GUN_CHARGEUP_TIME
else: # if the turret doesn't see the ship,
self.timeLeftToCharge = GUN_CHARGEUP_TIME # then the turret should power down
| Nayruden/GameDev | turret.py | turret.py | py | 4,330 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "physical_object.PhysicalObject",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "network.Type.TURRET",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "network.Type",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "... |
39959163393 | # to build, use "cd (playsong directory)"
# pyinstaller --onefile playSong.py
#lib imports
import keyboard
import threading
import time
import os
import re
#local imports
from settings import SETTINGS,map_velocity,apply_range_bounds
global isPlaying
global midi_action_list
isPlaying = False
storedIndex = 0
conversionCases = {'!': '1', '@': '2', '£': '3', '$': '4', '%': '5', '^': '6', '&': '7', '*': '8', '(': '9', ')': '0'}
"""
#maps a string representing a note to a note index where C0 = 0
note_offsets = {"C":0,"D":2,"E":4,"F":5,"G":7,"A":9,"B":11}
def note_to_index(note):
is_sharp = (note[1] == "#")
note_letter = note[0]
if is_sharp:
note_number = int(note[2:])
else:
note_number = int(note[1:])
index = note_offsets[note_letter] + int(is_sharp) + 12*note_number
return index
octave_note_order = ["C","C#","D","D#","E","F","F#","G","G#","A","A#","B"]
def index_to_note(index):
base_letter = "A"
base_octave = 0
val = 21
#A0 is value 21 in midi
octave = (index - 12) // 12
letter = (index - 12) % 12
return octave_note_order[letter] + str(octave)
"""
def onDelPress(event):
global isPlaying
isPlaying = not isPlaying
if isPlaying:
print("Playing...")
playNextNote()
else:
print("Stopping...")
return True
def isShifted(charIn):
#print(charIn)
if "shift" in charIn:
return True
asciiValue = ord(charIn)
if(asciiValue >= 65 and asciiValue <= 90):
return True
if(charIn in "!@#$%^&*()_+{}|:\"<>?"):
return True
return False
def pressLetter(strLetter):
if isShifted(strLetter):
# we have to convert all symbols to numbers
if strLetter in conversionCases:
strLetter = conversionCases[strLetter]
keyboard.release(strLetter.lower())
keyboard.press("shift")
keyboard.press(strLetter.lower())
keyboard.release("shift")
if SETTINGS.get("key_instant_release") == True:
keyboard.release(strLetter.lower())
else:
keyboard.release(strLetter)
keyboard.press(strLetter)
if SETTINGS.get("key_instant_release") == True:
keyboard.release(strLetter)
return
def releaseLetter(strLetter):
if isShifted(strLetter):
if strLetter in conversionCases:
strLetter = conversionCases[strLetter]
keyboard.release(strLetter.lower())
else:
keyboard.release(strLetter)
return
#Mini class to organize different actions into standard chunks
class Midi_Action:
def __init__(self,offset,note_list,velocity,tempo_change):
self.tempo_change = tempo_change
self.note_list = note_list
self.velocity = velocity
self.offset = offset
def processFile(song_path):
global playback_speed
with open(song_path,"r") as macro_file:
lines = macro_file.read().split("\n")
processed_notes = []
for line in lines:
if len(line.strip()) == 0:
continue
try:
#print(line)
offset,note_str = line.split(" ",1)
note_group,velocity = note_str.split(":")
if "tempo" in note_str:
tempo_change = int(note_group.split("tempo=")[1])
note_list = []
else:
tempo_change = None
note_list = note_group.split(" ")
new_note_list = []
for n in note_list:
v = apply_range_bounds(int(n))
if v is not None:
new_note_list.append(SETTINGS["key_map"][v])
note_list = new_note_list
#print(note_list)
#input()
m = Midi_Action( float(offset),
note_list,
int(velocity),
tempo_change)
processed_notes.append(m)
except Exception as e:
print(f"Error reading line:: '{line}'")
print(e)
input()
return processed_notes
# for this method, we instead use delays as l[0] and work using indexes with delays instead of time
# we'll use recursion and threading to press keys
def set_note_offsets(midi_action_list):
# parse time between each note
# while loop is required because we are editing the array as we go
i = 0
while i < len(midi_action_list)-1:
note = midi_action_list[i]
nextNote = midi_action_list[i+1]
if note.tempo_change:
tempo = 60/float(note.tempo_change)
midi_action_list.pop(i)
note = midi_action_list[i]
if i < len(midi_action_list)-1:
nextNote = midi_action_list[i+1]
else:
note.offset = (nextNote.offset - note.offset) * tempo
i += 1
# let's just hold the last note for 1 second because we have no data on it
midi_action_list[-1].offset = 1.00
return midi_action_list
def playNextNote():
global isPlaying
global storedIndex
global playback_speed
while isPlaying and storedIndex < len(midi_action_list):
note = midi_action_list[storedIndex]
delay = max(note.offset,0)
if note.velocity == 0:
#release notes
for n in note.note_list:
releaseLetter(n)
else:
#press notes
if SETTINGS.get("alt_velocity",False) == True:
vel_key = map_velocity(note.velocity)
print("alt+",vel_key)
keyboard.press("alt")
keyboard.press_and_release(vel_key)
keyboard.release("alt")
if SETTINGS.get("hold_to_play",False) == True:
while not keyboard.is_pressed(SETTINGS.get("hold_to_play_key")):
time.sleep(.05)
for n in note.note_list:
pressLetter(n)
if(note.tempo_change is None and note.velocity != 0):
print("%10.2f %15s %d" % (delay,"".join(note.note_list),note.velocity))
#print("%10.2f %15s" % (delay/playback_speed,noteInfo[1]))
storedIndex += 1
if(delay != 0):
threading.Timer(delay/playback_speed, playNextNote).start()
return
if storedIndex > len(midi_action_list)-1:
isPlaying = False
storedIndex = 0
return
#TODO (BUG)
#Rewind and Fast Forward skip over tempo events
# missing a critical tempo event will change playback significantly.
def rewind(KeyboardEvent):
global storedIndex
if storedIndex - 10 < 0:
storedIndex = 0
else:
storedIndex -= 10
print("Rewound to %.2f" % storedIndex)
def skip(KeyboardEvent):
global storedIndex
if storedIndex + 10 > len(midi_action_list):
isPlaying = False
storedIndex = 0
else:
storedIndex += 10
print("Skipped to %.2f" % storedIndex)
def get_file_choice(song_dir):
fileList = os.listdir(song_dir)
songList = []
for f in fileList:
if(".txt" in f or ".txt" in f.lower()):
songList.append(f)
print("\nType the number of a song file then press enter:\n")
for i in range(len(songList)):
print(i+1,":",songList[i])
choice = int(input(">"))
print()
choice_index = int(choice)
return songList[choice_index-1],songList
def mode_play(song_path):
global isPlaying
global midi_action_list
global playback_speed
playback_speed = SETTINGS["playback_speed"]
isPlaying = False
storedIndex = 0
midi_action_list = processFile(song_path)
set_note_offsets(midi_action_list)
keyboard.on_press_key(SETTINGS["pause_key"], onDelPress)
keyboard.on_press_key(SETTINGS["rewind_key"], rewind)
keyboard.on_press_key(SETTINGS["advance_key"], skip)
print()
print("Controls")
print("-"*20)
print(f"Press {SETTINGS['pause_key'].upper()} to play/pause")
print(f"Press {SETTINGS['rewind_key'].upper()} to rewind")
print(f"Press {SETTINGS['advance_key'].upper()} to advance")
if SETTINGS.get("hold_to_play",False) == True:
print(f"Hold {SETTINGS['hold_to_play_key'].upper()} while song is unpaused to play notes")
while True:
input("Press Ctrl+C to go back\n\n")
def main():
song_dir = SETTINGS["song_dir"]
while True:
song_choice,_ = get_file_choice(song_dir)
song_path = os.path.join(song_dir,song_choice)
try:
mode_play(song_path)
except KeyboardInterrupt as e:
pass
finally:
keyboard.unhook_all()
storedIndex = 0
isPlaying = False
if __name__ == "__main__":
main()
| eddiemunson/nn | playSong.py | playSong.py | py | 7,532 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "keyboard.release",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "keyboard.press",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "keyboard.press",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "keyboard.release",
... |
74626753147 | #%%
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import matplotlib.ticker as ticker
from matplotlib import rcParams
import numpy as np
from highlight_text import fig_text
import pandas as pd
from PIL import Image
import urllib
import os
df = pd.read_csv("success_rate_2022_2023.csv", index_col = 0)
df = (
df
.sort_values(by = ["variable", "value"], ascending = True)
.reset_index(drop = True)
)
fig = plt.figure(figsize=(6.5, 10), dpi = 200, facecolor="#EFE9E6")
ax = plt.subplot(111, facecolor = "#EFE9E6")
# Adjust spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.grid(True, color = "lightgrey", ls = ":")
# Define the series
teams = list(df["team_id"].unique())
Y = np.arange(len(teams))
X_xg = df[df["variable"] == "2022_success_rate"]["value"]
X_goals = df[df["variable"] == "2023_success_rate"]["value"]
# Fix axes limits
ax.set_ylim(-.5, len(teams) - .5)
ax.set_xlim(
min(X_goals.min(), X_xg.min(), 35),
max(X_goals.max(), X_xg.max(), 55)
)
# Scatter plots
ax.scatter(X_xg, Y, color = "#74959A", s = 200, alpha = 1, zorder = 3)
ax.scatter(X_goals, Y, color = "#495371", s = 200, alpha = 1, zorder = 3)
ax.scatter(X_xg, Y, color = "none", ec = "#74959A", s = 180, lw = 2.5, zorder = 3)
ax.scatter(X_goals, Y, color = "none", ec = "#495371", s = 180, lw = 2.5, zorder = 3)
# Add line chart between points and difference annotation
for index in Y:
difference = X_xg.iloc[index] - X_goals.iloc[index]
if difference > 0:
color = "#74959A"
x_adj = -1.75
anot_position = X_xg.iloc[index]
anot_aux_sign = "-"
else:
color = "#495371"
x_adj = 1.75
anot_position = X_goals.iloc[index]
anot_aux_sign = "+"
ax.annotate(
xy = (anot_position, index),
text = f"{anot_aux_sign} {abs(difference):.1f}",
xytext = (13, -2),
textcoords = "offset points",
size = 8,
color = color,
weight = "bold"
)
if abs(difference) < 1.3:
continue
if abs(difference) < -1.1:
continue
ax.plot(
[X_xg.iloc[index] + x_adj, X_goals.iloc[index] + x_adj*(-1)],
[index, index],
lw = 1,
color = color,
zorder = 2
)
DC_to_FC = ax.transData.transform
FC_to_NFC = fig.transFigure.inverted().transform
# Native data to normalized data coordinates
DC_to_NFC = lambda x: FC_to_NFC(DC_to_FC(x))
logos_folder = "nfl_logos/"
# Modify the loop to fetch logos from the local folder
for index, team_id in enumerate(teams):
ax_coords = DC_to_NFC([33, index - 0.55])
logo_ax = fig.add_axes([ax_coords[0], ax_coords[1], 0.04, 0.04], anchor="C")
# Use the local path to the logos folder
logo_path = f"{logos_folder}{team_id:.0f}.png"
try:
# Check if the file exists before opening
with Image.open(logo_path) as club_icon:
logo_ax.imshow(club_icon.convert("LA"))
logo_ax.axis("off")
except FileNotFoundError:
print(f"Logo not found for team ID {team_id}")
# Remove tick labels
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
false_ticks = ax.set_yticklabels([])
fig_text(
x = 0.15, y = .9,
s = "Through 10 weeks, only 3 NFL Teams\nhave outperformed their <2022> \noffensive success rate in <2023>",
highlight_textprops = [
{"color":"#74959A"},
{"color": "#495371"}
],
va = "bottom", ha = "left",
fontsize = 14, color = "black", weight = "bold"
)
fig_text(
x = 0.15, y = .885,
s = "Source: rbsdm.com | Viz by Ray Carpenter | inspired by a viz by @sonofacorner",
va = "bottom", ha = "left",
fontsize = 8, color = "#4E616C"
)
# # ---- The League's logo
league_icon = Image.open("nfl_logos/NFL.png")
league_ax = fig.add_axes([0.055, 0.89, 0.065, 0.065], zorder=1)
league_ax.imshow(league_icon)
league_ax.axis("off")
plt.savefig(
"06202022_bundelsiga_xg.png",
dpi = 500,
facecolor = "#EFE9E6",
bbox_inches="tight",
edgecolor="none",
transparent = False
)
plt.savefig(
"06202022_bundelsiga_xg_tr.png",
dpi = 500,
facecolor = "none",
bbox_inches="tight",
edgecolor="none",
transparent = True
)
| array-carpenter/14thstreetanalytics | success_rate_comparison/success_rate_comparison.py | success_rate_comparison.py | py | 4,223 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib... |
1956715633 | from collections import deque
ulaz = open('ulaz.txt', 'r')
sve = ulaz.read()
ulaz.close()
igrači = sve.split('\n\n')
prvi = deque(igrači[0].split('\n')[1:])
drugi = deque(igrači[1].split('\n')[1:])
while len(prvi) != 0 and len(drugi) != 0:
a = int(prvi.popleft())
b = int(drugi.popleft())
if a > b:
prvi.extend([str(a), str(b)])
else:
drugi.extend([str(b), str(a)])
if len(prvi) > 0:
l = prvi
else:
l = drugi
l.reverse()
rezultat = 0
for i in range(len(l)):
rezultat += int(l[i]) * (i + 1)
print(rezultat)
| bonkach/Advent-of-Code-2020 | 22a.py | 22a.py | py | 582 | python | hr | code | 1 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 7,
"usage_type": "call"
}
] |
18262922550 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import types
import re
import subprocess
import unitTestUtil
import logging
sensorDict = {}
util_support_map = ['fbttn', 'fbtp', 'lightning', 'minipack', 'fby2', 'yosemite']
multi_host_map = ['fby2', 'yosemite']
lm_sensor_support_map = ['wedge', 'wedge100', 'galaxy100', 'cmm']
def sensorTest(platformType, data, util):
"""
Check that sensor data is with spec or is just present
"""
# no drivers present from sensor cmd
if platformType in util_support_map:
failed = sensorTestUtil(platformType, data, util)
# drivers present from sensor cmd
else:
failed = sensorTestNetwork(platformType, data, util)
if len(failed) == 0:
print("Sensor Readings on " + platformType + " [PASSED]")
sys.exit(0)
else:
print("Sensor Readings on " + platformType + " for keys: " +
str(failed) + " [FAILED]")
sys.exit(1)
def sensorTestNetwork(platformType, data, util):
failed = []
createSensorDictNetworkLmSensors(util)
logger.debug("Checking values against json file")
for driver in data:
if isinstance(data[driver], dict):
for reading in data[driver]:
if data[driver][reading] == "yes":
try:
raw_value = sensorDict[driver][reading]
except Exception:
failed += [driver, reading]
continue
if isinstance(data[driver][reading], list):
values = re.findall(r"[-+]?\d*\.\d+|\d+", raw_value)
if len(values) == 0:
failed += [driver, reading]
continue
rang = data[driver][reading]
if float(rang[0]) > float(values[0]) or float(
values[0]) > float(rang[1]):
failed += [driver, reading]
else:
if bool(re.search(r'\d', raw_value)):
continue
else:
failed += [driver, reading]
return failed
def sensorTestUtil(platformType, data, util):
failed = []
createSensorDictUtil(util)
logger.debug("checking values against json file")
for sensor in data:
# skip type argument in json file
if sensor == "type":
continue
try:
raw_values = sensorDict[sensor]
except Exception:
failed += [sensor]
continue
if platformType in multi_host_map:
if len(raw_values) not in [1, 4]:
failed += [sensor]
continue
elif len(raw_values) not in [1]:
failed += [sensor]
continue
if isinstance(data[sensor], list):
for raw_value in raw_values:
values = re.findall(r"[-+]?\d*\.\d+|\d+", raw_value)
if len(values) == 0:
failed += [sensor]
continue
rang = data[sensor]
if float(rang[0]) > float(values[0]) or float(values[0]) > float(
rang[1]):
failed += [sensor]
else:
for raw_value in raw_values:
if 'ok' not in raw_value:
failed += [sensor + raw_value]
break
return failed
def createSensorDictNetworkLmSensors(util):
"""
Creating a sensor dictionary driver -> sensor -> reading
Supports wedge, wedge100, galaxy100, and cmm
"""
cmd = util.SensorCmd
if cmd is None:
raise Exception("sensor command not implemented")
logger.debug("executing command: ".format(cmd))
f = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
info, err = f.communicate()
if len(err) != 0:
raise Exception(err)
logger.debug("Creating sensor dictionary")
info = info.decode('utf-8')
info = info.split('\n')
currentKey = ''
for line in info:
if ':' in line:
lineInfo = line.split(':')
key = lineInfo[0]
val = ''.join(lineInfo[1:])
sensorDict[currentKey][key] = val
elif len(line) == 0 or line[0] == ' ':
continue
else:
sensorDict[line] = {}
currentKey = line
def createSensorDictUtil(util):
"""
Creating a sensor dictionary sensor -> reading
Supports fbtp and fbttn
"""
cmd = util.SensorCmd
if cmd is None:
raise Exception("sensor command not implemented")
logger.debug("executing command: " + str(cmd))
f = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
info, err = f.communicate()
if len(err) != 0:
raise Exception(err)
logger.debug("creating sensor dictionary")
info = info.decode('utf-8')
info = info.split('\n')
for line in info:
if ':' in line:
lineInfo = line.split(':')
key = lineInfo[0]
val = ''.join(lineInfo[1:])
if key not in sensorDict:
sensorDict[key] = []
sensorDict[key].append(val)
if "timed out" in line:
print(line)
raise Exception(line)
if __name__ == "__main__":
"""
Input to this file should look like the following:
python sensorTest.py wedgeSensor.json
"""
util = unitTestUtil.UnitTestUtil()
logger = util.logger(logging.WARN)
try:
data = {}
args = util.Argparser(['json', '--verbose'], [str, None],
['json file',
'output all steps from test with mode options: DEBUG, INFO, WARNING, ERROR'])
if args.verbose is not None:
logger = util.logger(args.verbose)
data = util.JSONparser(args.json)
platformType = data['type']
utilType = util.importUtil(platformType)
sensorTest(platformType, data, utilType)
except Exception as e:
print("Sensor test [FAILED]")
print("Error code returned: {}".format(e))
sys.exit(1)
| WeilerWebServices/Facebook | openbmc/tests/common/sensorTest.py | sensorTest.py | py | 6,525 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "sys.exit",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 60,
"... |
29180093984 | from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
from datetime import datetime as dt
import re
import copy
import MySQLdb
dataBase = MySQLdb
userInput1 = str(input("Please Provide with Calendar link: "))
userInput2 = str(input("Please Provide a file name ending in .sql: "))
userInput3 = str(input("Please select student type: (undergrad or graduate): "))
url = userInput1
# get data from the link
client = uReq(url)
pageHtml = client.read()
client.close()
# create parsabel html
html = soup(pageHtml, "html.parser")
header = html.h2
headTables = header.find_next_siblings("table")
list_of_rows = []
# Loop through the sibling tables of h2 and find tr
for i in headTables:
rows = i.find_all("tr")
# loop through all the tr's and find td's
for j in rows:
list_of_cells = []
cols = j.find_all("td")
# loop through all the td's and get data
for data in cols:
event = data.text.replace("\r\n\t\t\t",' ')
# assignment based on regex
time = re.match(r"[ADFJMNOS]\w* [\d]{1,2}, [\d]{4}",event)
doubleTime = re.match(r"[ADFJMNOS]\w* [\d]{1,2} to [\d]{1,2}, [\d]{4}", event)
doubleDates = re.match(r"[ADFJMNOS]\w* [\d]{1,2} and [\d]{1,2}, [\d]{4}", event)
crossYear = re.match(r"[ADFJMNOS]\w* [\d]{1,2}, [\d]{4} to [ADFJMNOS]\w* [\d]{1,2}, [\d]{4}", event)
global updateEvent
if crossYear:
dates = re.split(r"\ to \ |\ |,\ ",event)
# get the start date numbers and join
startDates = " ".join([dates[0],dates[1],dates[2]])
# get the end date values and join
endDates = " ".join([dates[4],dates[5],dates[6]])
startTime = dt.strptime(startDates, "%B %d %Y")
endTime = dt.strptime(endDates, "%B %d %Y")
finalDates = ' to '.join([str(startTime), str(endTime)])
event = re.sub(r"[ADFJMNOS]\w* [\d]{1,2}, [\d]{4} to [ADFJMNOS]\w* [\d]{1,2}, [\d]{4}",str(finalDates), event)
# print(event)
# converts Month Day, Year
elif time:
timeVal = dt.strptime(time.group(), "%B %d, %Y")
# global variable for storage of date value
updateEvent = timeVal
event = re.sub(r"[ADFJMNOS]\w* [\d]{1,2}, [\d]{4}",str(timeVal), event)
# convets date in format: Month Day to Day, Year
elif doubleTime:
dates = re.split(r"\ to\ |\ |,\ ", event)
# get the start date numbers and join
startDates = " ".join([dates[0],dates[1],dates[3]])
# get the end date values and join
endDates = " ".join([dates[0],dates[2],dates[3]])
startTime = dt.strptime(startDates, "%B %d %Y")
endTime = dt.strptime(endDates, "%B %d %Y")
#global variable for empty date
updateEvent = str(startTime)
finalDates = ' to '.join([str(startTime), str(endTime)])
# print(finalDates)
event = re.sub(r"[ADFJMNOS]\w* [\d]{1,2} to [\d]{1,2}, [\d]{4}",str(finalDates), event)
# converts Month Day and Day, Year
elif doubleDates:
dates = re.split(r"\ and\ |\ |,\ ", event)
# get the start date numbers and join
startDates = " ".join([dates[0],dates[1],dates[3]])
# get the end date values and join
endDates = " ".join([dates[0],dates[2],dates[3]])
startTime = dt.strptime(startDates, "%B %d %Y")
#global variable for empty dates
endTime = dt.strptime(endDates, "%B %d %Y")
updateEvent = str(startTime)
finalDates = ' to '.join([str(startTime), str(endTime)])
event = re.sub(r"[ADFJMNOS]\w* [\d]{1,2} and [\d]{1,2}, [\d]{4}",str(finalDates), event)
else :
# Fill in all the date values that are empty with date value
# before it
event = re.sub(r"\xa0", str(updateEvent), event)
#append to list of cols
list_of_cells.append(event)
newCells = copy.deepcopy(list_of_cells[0])
toSplit = re.match(r"[\d]{4}-[\d]{1,2}-[\d]{1,2} [\d]{1,2}:[\d]{1,2}:[\d]{1,2} to [\d]{4}-[\d]{1,2}-[\d]{1,2} [\d]{1,2}:[\d]{1,2}:[\d]{1,2}", newCells)
global endTimes
if toSplit:
# print(toSplit.group())
newSplit = toSplit.group().split(' to')
endTimes = newSplit[1].replace("00:00:00","11:59:59")
else:
endTimes = newCells.replace("00:00:00","11:59:59")
list_of_cells.append(endTimes)
startDate = list_of_cells[0].split('to')
strParts = list_of_cells[1].split('. ')
global title, description
if len(strParts) > 1:
title = str(strParts[0])
description = str(strParts[1])
else:
title= str(strParts[0])
description = str(strParts[0])
newTitle =str(dataBase.escape_string(title))
newDesc =str(dataBase.escape_string(description))
query = "INSERT INTO tbl_entries ( event_name, event_description, event_categories, event_tags, event_startdate, event_enddate, open_to, location_building, location_room, location_campus, location_other, start_hour, start_minute, start_ampm, end_hour, end_minute, end_ampm, contact_event_firstname, contact_event_lastname, contact_event_phonenumber, contact_event_phoneext, contact_event_email, contact_firstname, contact_lastname, contact_phonenumber, contact_phoneext, contact_email, event_url, event_url_protocol, upload_image, date_submitted, date_approved, repeated, repeat_freq, repeat_day, repeat_until, repeat_until_date, repeat_until_num, clickable, pending, approved, archived, cancelled, frontpage, submission_ip) VALUES "
registrars = str(dataBase.escape_string("'Registar's'"))[1:]
global values
if userInput3 == 'undergrad':
values = "(" + str(newTitle)[1:] + " ," + str(newDesc)[1:]+ ", '73', '0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0', '"+ startDate[0] + "','" + endTimes + "', '29', 0, '', 2, 'Ontario Tech', 0, 0, 'am', 11, 59, 'pm', "+ registrars +", 'Office', '905.721.3190', '', 'connect@uoit.ca', " + registrars + ", 'Office', '905.721.3190', '', 'connect@uoit.ca', '" + url + "', 'https', NULL, '" + str(dt.now())+ "', '" + str(dt.now()) + "', 0, '', '', 0, '" + str(dt.now()) + "', 0, 1, 0, 1, 0, 0, 0, '00.000.0.000'),"
elif userInput3 == 'graduate':
values = "(" + str(newTitle)[1:] + " ," + str(newDesc)[1:]+ ", '74', '0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0', '"+ startDate[0] + "','" + endTimes + "', '29', 0, '', 2, 'Ontario Tech', 0, 0, 'am', 11, 59, 'pm','School of Graduate', 'and Postdoctoral Studies', '905.721.8668', '6209', 'connect@uoit.ca', 'School of Graduate', 'and Postdoctoral Studies', '905.721.8668', '6209','connect@uoit.ca', '" + url + "', 'https', NULL, '" + str(dt.now())+ "', '" + str(dt.now()) + "', 0, '', '', 0, '" + str(dt.now()) + "', 0, 1, 0, 1, 0, 0, 0, '00.000.0.000'),"
#append to rows
list_of_rows.append(values)
lastString = str(list_of_rows.pop(len(list_of_rows)-1))
lastString = lastString[:-1] +';'
list_of_rows.append(lastString)
outfile = open(userInput2, "w")
outfile.write(query)
for item in list_of_rows:
outfile.write("%s\n" % item)
outfile.close | ZbonaL/WebScraper | webscraper-Important-Dates.py | webscraper-Important-Dates.py | py | 7,088 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "re.match",
"lin... |
16799554480 | import argparse
from pathlib import Path
import sys
# Add aoc_common to the python path
file = Path(__file__)
root = file.parent.parent
sys.path.append(root.as_posix())
import re
from functools import lru_cache
from math import inf
parser = argparse.ArgumentParser()
parser.add_argument('--sample', '-s', help='Run with sample data', action='store_true', default=False)
parsed_args = parser.parse_args()
if parsed_args.sample:
print("Using sample data!")
def dprint(*args, **kwargs):
if parsed_args.sample:
print(*args, **kwargs)
with open('input.txt' if not parsed_args.sample else 'sample.txt') as f:
input_data = list(map(lambda x: x.replace('\n', ''), f.readlines()))
dprint(input_data)
valve_flow_rates = {}
valve_tunnels = {}
input_re = re.compile(r"Valve ([A-Z]{2}) has flow rate=(\d+); tunnels? leads? to valves? (.*)")
for line in input_data:
matcher = input_re.match(line)
valve, rate, tunnels = matcher.groups()
tunnels = tunnels.split(', ')
valve_flow_rates[valve] = int(rate)
valve_tunnels[valve] = tunnels
dprint(valve_flow_rates)
dprint(valve_tunnels)
potential_valves = sorted([x[0] for x in valve_flow_rates.items() if x[1] != 0])
@lru_cache(maxsize=None)
def get_max_flow_rate(current_position, opened_valves, time_left):
if time_left <= 0:
return 0
# If the valve can open, we want to open the valve and consider the case where we can't open the valve
# If the valve can't open, we just want to cosnider the adjacent spaces
if valve_flow_rates[current_position] == 0 or current_position in opened_valves:
best = 0
for adjacent in valve_tunnels[current_position]:
best = max(best, get_max_flow_rate(adjacent, opened_valves, time_left - 1))
return best
else:
gained_flow = (time_left - 1) * valve_flow_rates[current_position]
best = 0
opened = tuple(sorted(opened_valves + (current_position,)))
for adjacent in valve_tunnels[current_position]:
best = max(best, gained_flow + get_max_flow_rate(adjacent, opened, time_left - 2))
best = max(best, get_max_flow_rate(adjacent, opened_valves, time_left - 1))
return best
valve_distances = {}
def djikstra(valve):
possible = {valve: 0}
explored = set()
while len(explored) < len(valve_tunnels):
current = min(((k, v) for k, v in possible.items() if k not in explored), key=lambda x: x[1])[0]
for other in valve_tunnels[current]:
new_dist = possible[current] + 1
if possible.get(other, inf) > new_dist:
explored.discard(other)
possible[other] = new_dist
explored.add(current)
return possible
valve_distances = {k: djikstra(k) for k in valve_tunnels.keys() }
max_flow_seen = 0
@lru_cache(maxsize=None)
def run_part_2(cur, other, closed_valves):
cur_time_left, cur_pos = cur
other_time_left, other_pos = other
totals = [0]
for valve in closed_valves:
time_to_valve = valve_distances[cur_pos].get(valve) + 1
time_left = cur_time_left - time_to_valve
if time_left <= 0:
continue # Can't get to the valve and open it
flow_gained = time_left * valve_flow_rates[valve]
# Move the person that has the most time left
if time_left > other_time_left:
totals.append(flow_gained + run_part_2((time_left, valve), other, closed_valves - {valve}))
else:
totals.append(flow_gained + run_part_2(other, (time_left, valve), closed_valves - {valve}))
max_flow = max(totals)
global max_flow_seen
if max_flow > max_flow_seen:
print("New max:", max_flow)
max_flow_seen = max_flow
return max_flow
def part_1():
return get_max_flow_rate('AA', (), 30)
def part_2():
return run_part_2((26, 'AA'), (26, 'AA'), frozenset(potential_valves))
print(f"Part 1: {part_1()}")
print(f"Part 2: {part_2()}") | mrkirby153/AdventOfCode2022 | day16/day16.py | day16.py | py | 3,973 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.