seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41310847925 | import os
from abc import ABC
from keras import Model, layers
from keras.layers import Conv2D, BatchNormalization, Add, MaxPool2D, GlobalAveragePooling2D, Flatten, Dense, Rescaling
import tensorflow as tf
class ResnetBlock(Model, ABC):
"""
A standard resnet block.
"""
def __init__(self, channels: int, down_sample=False):
"""
channels: same as number of convolution kernels
"""
super().__init__()
self.__channels = channels
self.__down_sample = down_sample
self.__strides = [2, 1] if down_sample else [1, 1]
KERNEL_SIZE = (3, 3)
# use He initialization, instead of Xavier (a.k.a 'glorot_uniform' in Keras), as suggested in [2]
INIT_SCHEME = "he_normal"
self.conv_1 = Conv2D(self.__channels, strides=self.__strides[0],
kernel_size=KERNEL_SIZE, padding="same", kernel_initializer=INIT_SCHEME)
self.bn_1 = BatchNormalization()
self.conv_2 = Conv2D(self.__channels, strides=self.__strides[1],
kernel_size=KERNEL_SIZE, padding="same", kernel_initializer=INIT_SCHEME)
self.bn_2 = BatchNormalization()
self.merge = Add()
if self.__down_sample:
# perform down sampling using stride of 2, according to [1].
self.res_conv = Conv2D(
self.__channels, strides=2, kernel_size=(1, 1), kernel_initializer=INIT_SCHEME, padding="same")
self.res_bn = BatchNormalization()
def call(self, inputs, training=None, mask=None):
res = inputs
x = self.conv_1(inputs)
x = self.bn_1(x)
x = tf.nn.relu(x)
x = self.conv_2(x)
x = self.bn_2(x)
if self.__down_sample:
res = self.res_conv(res)
res = self.res_bn(res)
# if not perform down sample, then add a shortcut directly
x = self.merge([x, res])
out = tf.nn.relu(x)
return out
class ResNet18(Model):
def __init__(self, num_classes, **kwargs):
"""
num_classes: number of classes in specific classification task.
"""
super().__init__(**kwargs)
self.conv_1 = Conv2D(64, (7, 7), strides=2,
padding="same", kernel_initializer="he_normal")
self.init_bn = BatchNormalization()
self.pool_2 = MaxPool2D(pool_size=(2, 2), strides=2, padding="same")
self.res_1_1 = ResnetBlock(64)
self.res_1_2 = ResnetBlock(64)
self.res_2_1 = ResnetBlock(128, down_sample=True)
self.res_2_2 = ResnetBlock(128)
self.res_3_1 = ResnetBlock(256, down_sample=True)
self.res_3_2 = ResnetBlock(256)
self.res_4_1 = ResnetBlock(512, down_sample=True)
self.res_4_2 = ResnetBlock(512)
self.avg_pool = GlobalAveragePooling2D()
self.flat = Flatten()
self.fc = Dense(num_classes, activation="softmax")
self.data_augmentation = tf.keras.Sequential(
[
layers.RandomFlip("horizontal",
input_shape=(int(os.getenv("img_height")),
int(os.getenv("img_width")),
3)),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
]
)
def call(self, inputs, training=None, mask=None):
out = self.data_augmentation(inputs)
out = Rescaling(scale=1.0/255)(out)
out = self.conv_1(out)
out = self.init_bn(out)
out = tf.nn.relu(out)
out = self.pool_2(out)
for res_block in [self.res_1_1, self.res_1_2, self.res_2_1, self.res_2_2, self.res_3_1, self.res_3_2,
self.res_4_1, self.res_4_2]:
out = res_block(out)
out = self.avg_pool(out)
out = self.flat(out)
out = self.fc(out)
return out | beishangongzi/graduation_internship | utils/Resnet.py | Resnet.py | py | 3,958 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "keras.Model",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "abc.ABC",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization... |
74992084668 | import cv2
import numpy as np
import os
import sys
import json
import math
import time
import argparse
from enum import Enum
import platform
class Config:
@classmethod
def init(cls):
if platform.system() == "Windows":
cls.QUIT_KEY = ord("q")
cls.CONTINUE_KEY = 2555904 #right arrow
cls.BACK_KEY = 2424832 #left arrow
cls.REWIND_KEY = ord("r")
cls.PLAYPAUSE_KEY = 32 #spacebar
else:
cls.QUIT_KEY = ord("q")
cls.CONTINUE_KEY = 65363 #right arrow
cls.BACK_KEY = 65361 #left arrow
cls.REWIND_KEY = ord("r")
cls.PLAYPAUSE_KEY = 32 #spacebar
if os.path.exists(os.path.join(os.getcwd(), "./manim-presentation.json")):
json_config = json.load(open(os.path.join(os.getcwd(), "./manim-presentation.json"), "r"))
for key, value in json_config.items():
setattr(cls, key, value)
class State(Enum):
PLAYING = 0
PAUSED = 1
WAIT = 2
END = 3
def __str__(self):
if self.value == 0: return "Playing"
if self.value == 1: return "Paused"
if self.value == 2: return "Wait"
if self.value == 3: return "End"
return "..."
def now():
return round(time.time() * 1000)
def fix_time(x):
return x if x > 0 else 1
class Presentation:
def __init__(self, config, last_frame_next=False):
self.last_frame_next = last_frame_next
self.slides = config["slides"]
self.files = config["files"]
self.lastframe = []
self.caps = [None for _ in self.files]
self.reset()
self.add_last_slide()
def add_last_slide(self):
last_slide_end = self.slides[-1]["end_animation"]
last_animation = len(self.files)
self.slides.append(dict(
start_animation = last_slide_end,
end_animation = last_animation,
type = "last",
number = len(self.slides) + 1,
terminated = False
))
def reset(self):
self.current_animation = 0
self.load_this_cap(0)
self.current_slide_i = 0
self.slides[-1]["terminated"] = False
def next(self):
if self.current_slide["type"] == "last":
self.current_slide["terminated"] = True
else:
self.current_slide_i = min(len(self.slides) - 1, self.current_slide_i + 1)
self.rewind_slide()
def prev(self):
self.current_slide_i = max(0, self.current_slide_i - 1)
self.rewind_slide()
def rewind_slide(self):
self.current_animation = self.current_slide["start_animation"]
self.current_cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
def load_this_cap(self,cap_number):
if self.caps[cap_number] == None:
# unload other caps
for i in range(len(self.caps)):
if self.caps[i] != None:
self.caps[i].release()
self.caps[i] = None
# load this cap
self.caps[cap_number] = cv2.VideoCapture(self.files[cap_number])
@property
def current_slide(self):
return self.slides[self.current_slide_i]
@property
def current_cap(self):
self.load_this_cap(self.current_animation)
return self.caps[self.current_animation]
@property
def fps(self):
return self.current_cap.get(cv2.CAP_PROP_FPS)
# This function updates the state given the previous state.
# It does this by reading the video information and checking if the state is still correct.
# It returns the frame to show (lastframe) and the new state.
def update_state(self, state):
if state == State.PAUSED:
if len(self.lastframe) == 0:
_, self.lastframe = self.current_cap.read()
return self.lastframe, state
still_playing, frame = self.current_cap.read()
if still_playing:
self.lastframe = frame
elif state in [state.WAIT, state.PAUSED]:
return self.lastframe, state
elif self.current_slide["type"] == "last" and self.current_slide["terminated"]:
return self.lastframe, State.END
if not still_playing:
if self.current_slide["end_animation"] == self.current_animation + 1:
if self.current_slide["type"] == "slide":
# To fix "it always ends one frame before the animation", uncomment this.
# But then clears on the next slide will clear the stationary after this slide.
if self.last_frame_next:
self.load_this_cap(self.next_cap)
self.next_cap = self.caps[self.current_animation + 1]
self.next_cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
_, self.lastframe = self.next_cap.read()
state = State.WAIT
elif self.current_slide["type"] == "loop":
self.current_animation = self.current_slide["start_animation"]
state = State.PLAYING
self.rewind_slide()
elif self.current_slide["type"] == "last":
self.current_slide["terminated"] = True
elif self.current_slide["type"] == "last" and self.current_slide["end_animation"] == self.current_animation:
state = State.WAIT
else:
# Play next video!
self.current_animation += 1
self.load_this_cap(self.current_animation)
# Reset video to position zero if it has been played before
self.current_cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
return self.lastframe, state
class Display:
def __init__(self, presentations, start_paused=False, fullscreen=False):
self.presentations = presentations
self.start_paused = start_paused
self.state = State.PLAYING
self.lastframe = None
self.current_presentation_i = 0
self.lag = 0
self.last_time = now()
if fullscreen:
cv2.namedWindow("Video", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Video", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
@property
def current_presentation(self):
return self.presentations[self.current_presentation_i]
def run(self):
while True:
self.lastframe, self.state = self.current_presentation.update_state(self.state)
if self.state == State.PLAYING or self.state == State.PAUSED:
if self.start_paused:
self.state = State.PAUSED
self.start_paused = False
if self.state == State.END:
if self.current_presentation_i == len(self.presentations) - 1:
self.quit()
else:
self.current_presentation_i += 1
self.state = State.PLAYING
self.handle_key()
self.show_video()
self.show_info()
def show_video(self):
self.lag = now() - self.last_time
self.last_time = now()
cv2.imshow("Video", self.lastframe)
def show_info(self):
info = np.zeros((130, 420), np.uint8)
font_args = (cv2.FONT_HERSHEY_SIMPLEX, 0.7, 255)
grid_x = [30, 230]
grid_y = [30, 70, 110]
cv2.putText(
info,
f"Animation: {self.current_presentation.current_animation}",
(grid_x[0], grid_y[0]),
*font_args
)
cv2.putText(
info,
f"State: {self.state}",
(grid_x[1], grid_y[0]),
*font_args
)
cv2.putText(
info,
f"Slide {self.current_presentation.current_slide['number']}/{len(self.current_presentation.slides)}",
(grid_x[0], grid_y[1]),
*font_args
)
cv2.putText(
info,
f"Slide Type: {self.current_presentation.current_slide['type']}",
(grid_x[1], grid_y[1]),
*font_args
)
cv2.putText(
info,
f"Scene {self.current_presentation_i + 1}/{len(self.presentations)}",
((grid_x[0]+grid_x[1])//2, grid_y[2]),
*font_args
)
cv2.imshow("Info", info)
def handle_key(self):
sleep_time = math.ceil(1000/self.current_presentation.fps)
key = cv2.waitKeyEx(fix_time(sleep_time - self.lag))
if key == Config.QUIT_KEY:
self.quit()
elif self.state == State.PLAYING and key == Config.PLAYPAUSE_KEY:
self.state = State.PAUSED
elif self.state == State.PAUSED and key == Config.PLAYPAUSE_KEY:
self.state = State.PLAYING
elif self.state == State.WAIT and (key == Config.CONTINUE_KEY or key == Config.PLAYPAUSE_KEY):
self.current_presentation.next()
self.state = State.PLAYING
elif self.state == State.PLAYING and key == Config.CONTINUE_KEY:
self.current_presentation.next()
elif key == Config.BACK_KEY:
if self.current_presentation.current_slide_i == 0:
self.current_presentation_i = max(0, self.current_presentation_i - 1)
self.current_presentation.reset()
self.state = State.PLAYING
else:
self.current_presentation.prev()
self.state = State.PLAYING
elif key == Config.REWIND_KEY:
self.current_presentation.rewind_slide()
self.state = State.PLAYING
def quit(self):
cv2.destroyAllWindows()
sys.exit()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("scenes", metavar="scenes", type=str, nargs="+", help="Scenes to present")
parser.add_argument("--folder", type=str, default="./presentation", help="Presentation files folder")
parser.add_argument("--start-paused", action="store_true", help="Start paused")
parser.add_argument("--fullscreen", action="store_true", help="Fullscreen")
parser.add_argument("--last-frame-next", action="store_true", help="Show the next animation first frame as last frame (hack)")
args = parser.parse_args()
args.folder = os.path.normcase(args.folder)
Config.init()
presentations = list()
for scene in args.scenes:
config_file = os.path.join(args.folder, f"{scene}.json")
if not os.path.exists(config_file):
raise Exception(f"File {config_file} does not exist, check the scene name and make sure to use Slide as your scene base class")
config = json.load(open(config_file))
presentations.append(Presentation(config, last_frame_next=args.last_frame_next))
display = Display(presentations, start_paused=args.start_paused, fullscreen=args.fullscreen)
display.run()
if __name__ == "__main__":
main()
| galatolofederico/manim-presentation | manim_presentation/present.py | present.py | py | 11,126 | python | en | code | 153 | github-code | 6 | [
{
"api_name": "platform.system",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_... |
26552249009 | #!/usr/bin/env python3
import fnmatch
import os
import re
import ntpath
import sys
import argparse
# handle x64 python clipboard, ref https://forums.autodesk.com/t5/maya-programming/ctypes-bug-cannot-copy-data-to-clipboard-via-python/m-p/9197068/highlight/true#M10992
import ctypes
from ctypes import wintypes
CF_UNICODETEXT = 13
user32 = ctypes.WinDLL('user32')
kernel32 = ctypes.WinDLL('kernel32')
OpenClipboard = user32.OpenClipboard
OpenClipboard.argtypes = wintypes.HWND,
OpenClipboard.restype = wintypes.BOOL
CloseClipboard = user32.CloseClipboard
CloseClipboard.restype = wintypes.BOOL
EmptyClipboard = user32.EmptyClipboard
EmptyClipboard.restype = wintypes.BOOL
GetClipboardData = user32.GetClipboardData
GetClipboardData.argtypes = wintypes.UINT,
GetClipboardData.restype = wintypes.HANDLE
SetClipboardData = user32.SetClipboardData
SetClipboardData.argtypes = (wintypes.UINT, wintypes.HANDLE)
SetClipboardData.restype = wintypes.HANDLE
GlobalLock = kernel32.GlobalLock
GlobalLock.argtypes = wintypes.HGLOBAL,
GlobalLock.restype = wintypes.LPVOID
GlobalUnlock = kernel32.GlobalUnlock
GlobalUnlock.argtypes = wintypes.HGLOBAL,
GlobalUnlock.restype = wintypes.BOOL
GlobalAlloc = kernel32.GlobalAlloc
GlobalAlloc.argtypes = (wintypes.UINT, ctypes.c_size_t)
GlobalAlloc.restype = wintypes.HGLOBAL
GlobalSize = kernel32.GlobalSize
GlobalSize.argtypes = wintypes.HGLOBAL,
GlobalSize.restype = ctypes.c_size_t
GMEM_MOVEABLE = 0x0002
GMEM_ZEROINIT = 0x0040
def Paste( data ):
data = data.encode('utf-16le')
OpenClipboard(None)
EmptyClipboard()
handle = GlobalAlloc(GMEM_MOVEABLE | GMEM_ZEROINIT, len(data) + 2)
pcontents = GlobalLock(handle)
ctypes.memmove(pcontents, data, len(data))
GlobalUnlock(handle)
SetClipboardData(CF_UNICODETEXT, handle)
CloseClipboard()
def getFunctions(filepath):
selfmodule = (re.search(r'addons[\W]*([_a-zA-Z0-9]*)', filepath)).group(1)
# print("Checking {0} from {1}".format(filepath,selfmodule))
if (selfmodule.startswith("compat")): return []
with open(filepath, 'r') as file:
content = file.read()
srch = re.compile(r'[^E]FUNC\(([_a-zA-Z0-9]*)\)')
modfuncs = srch.findall(content)
modfuncs = sorted(set(modfuncs))
srch = re.compile(r'EFUNC\(([_a-zA-Z0-9]*),([_a-zA-Z0-9]*)\)')
exfuncs = srch.findall(content)
exfuncs = sorted(set(exfuncs))
fileFuncs = []
for func in modfuncs:
fileFuncs.append("ace_{0}_fnc_{1}".format(selfmodule,func))
for exModule,func in exfuncs:
fileFuncs.append("ace_{0}_fnc_{1}".format(exModule, func))
return fileFuncs
def getStrings(filepath):
selfmodule = (re.search(r'addons[\W]*([_a-zA-Z0-9]*)', filepath)).group(1)
# print("Checking {0} from {1}".format(filepath,selfmodule))
if (selfmodule.startswith("compat")): return []
with open(filepath, 'r') as file:
content = file.read()
srch = re.compile(r'[^E][CL]STRING\(([_a-zA-Z0-9]*)\)')
modStrings = srch.findall(content)
modStrings = sorted(set(modStrings))
srch = re.compile(r'E[CL]STRING\(([_a-zA-Z0-9]*),([_a-zA-Z0-9]*)\)')
exStrings = srch.findall(content)
exStrings = sorted(set(exStrings))
fileStrings = []
for localString in modStrings:
fileStrings.append("STR_ACE_{0}_{1}".format(selfmodule, localString))
for (exModule, exString) in exStrings:
fileStrings.append("STR_ACE_{0}_{1}".format(exModule, exString))
return fileStrings
def main():
print("#########################")
print("# All Functions #")
print("#########################")
sqf_list = []
allFunctions = []
allStrings = []
parser = argparse.ArgumentParser()
parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default=".")
args = parser.parse_args()
addon_base_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
for root, dirnames, filenames in os.walk(addon_base_path +"/" + 'addons' + '/' + args.module):
for filename in fnmatch.filter(filenames, '*.sqf'):
sqf_list.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.cpp'):
sqf_list.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.hpp'):
sqf_list.append(os.path.join(root, filename))
for filename in sqf_list:
allFunctions = allFunctions + getFunctions(filename)
for filename in sqf_list:
allStrings = allStrings + getStrings(filename)
codeHeader = "diag_log text '*********** Scaning for nil functions [funcs {0} / strings {1}]';".format(len(set(allFunctions)), len(set(allStrings)))
codeFuncCheck = "{ if (isNil _x) then {systemChat format ['%1 is nil', _x]; diag_log text format ['%1 is nil', _x];}} forEach allFunctions;"
codeStringCheck = "{ if (!isLocalized _x) then {systemChat format ['%1 is not in stringtable', _x]; diag_log text format ['%1 is not in stringtable', _x];}} forEach allStrings;"
outputCode = "{0} allFunctions = {1}; allStrings = {2}; {3} {4}".format(codeHeader, list(set(allFunctions)), list(set(allStrings)), codeFuncCheck, codeStringCheck)
print(outputCode)
Paste(outputCode)
print ("")
print ("Copied to clipboard, [funcs {0} / strings {1}]'".format(len(set(allFunctions)), len(set(allStrings))))
if __name__ == "__main__":
main()
| acemod/ACE3 | tools/search_undefinedFunctions.py | search_undefinedFunctions.py | py | 5,461 | python | en | code | 966 | github-code | 6 | [
{
"api_name": "ctypes.WinDLL",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ctypes.WinDLL",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ctypes.wintypes.HWND",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "ctypes.wintypes... |
15873196557 | from datetime import datetime
def unix_to_dt(time):
return datetime.utcfromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')
class event():
def __init__(self, events):
self.type = events['type']
self.empty = False
if self.type == 'None':
self.empty = True
elif self.type == 'membership.nickname_changed':
self.nickname_changed(events['data'])
elif self.type == 'membership.announce.joined':
self.user_joined(events['data'])
elif self.type == 'membership.notifications.exited':
self.user_exited(events['data'])
elif self.type == 'membership.notifications.removed':
self.user_removed(events['data'])
elif self.type == 'membership.notifications.autokicked':
self.user_autokicked(events['data'])
elif self.type == 'group.avatar_change':
self.group_avatar_change(events['data'])
elif self.type == 'group.like_icon_set':
self.like_icon_change(events['data'])
elif self.type == 'group.name_change':
self.group_name_change(events['data'])
elif self.type == 'poll.created':
self.poll_created(events['data'])
elif self.type == 'poll.reminder':
self.poll_reminder(events['data'])
elif self.type == 'poll.finished':
self.poll_finished(events['data'])
elif self.type == 'message.deleted':
self.message_deleted(events['data'])
else:
print('Unknown event type: ' + self.type)
def nickname_changed(self, event_data):
self.user = event_data['user']
self.new_name = event_data['name']
def user_joined(self, event_data):
self.user = event_data['user']
def user_added(self, event_data):
self.user = event_data['adder_user']
self.added_users = event_data['added_users']
def user_exited(self, event_data):
self.placeholder = 'X'
self.charmap = event_data['charmap']
def user_removed(self, event_data):
self.user = event_data['remover_user']
self.removed_user = event_data['removed_user']
def user_autokicked(self, event_data):
self.user = event_data['user']
def group_avatar_change(self, event_data):
self.user = event_data['user']
self.url = event_data['avatar_url']
def like_icon_change(self, event_data):
self.user = event_data['user']
self.like_icon = [event_data['pack_id'], event_data['icon_index']]
# like_icon->type is ignored
def group_name_change(self, event_data):
self.user = event_data['user']
self.new_name = event_data['name']
'''POLL EVENTS'''
def poll_created(self, event_data):
self.user = event_data['user']
self.conversation = event_data['conversation']
self.poll = event_data['poll']
def poll_reminder(self, event_data):
self.conversation = event_data['conversation']
self.poll = event_data['poll']
self.expiration = unix_to_dt(event_data['expiration'])
def poll_finished(self, event_data):
self.conversation = event_data['conversation']
self.raw_options = event_data['options']
self.options = []
for opt in self.raw_options:
temp_option = {
"id": None,
"title": None,
"votes": None,
"voter_ids": None
}
temp_option["id"] = opt["id"]
temp_option["title"] = opt["title"]
try:
temp_option["votes"] = opt["votes"]
temp_option["voter_ids"] = opt["voter_ids"]
except:
temp_option["votes"] = 0
temp_option["voter_ids"] = []
self.options.append(temp_option)
'''MISC EVENTS'''
def message_deleted(self, event_data):
self.message_id = event_data['message_id']
self.deleted_at = unix_to_dt(event_data['deleted_at'])
self.deleted_at_ts = event_data['deleted_at']
self.deletion_actor = event_data['deletion_actor']
self.deleter_id = event_data['deleter_id'] | theTrueEnder/GroupMe-Export-Parser | Python_Scripts/events.py | events.py | py | 4,193 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 3,
"usage_type": "name"
}
] |
35817144385 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Automatic electric field computation
------------------------------------
:download:`examples/auto_efield.py` demonstrates how
drift can be added self-consistently by calculating the
electric field generated from the concentration profile
of charged species.
::
$ python auto_efield.py --help
.. exec::
echo "::\\n\\n"
python examples/examples/auto_efield.py --help | sed "s/^/ /"
Here is an example generated by:
::
$ python auto_efield.py --plot --savefig auto_efield.png
.. image:: ../_generated/auto_efield.png
"""
from __future__ import print_function, division, absolute_import
from math import log, erf, exp
import argh
import numpy as np
from chempy.einstein_smoluchowski import electrical_mobility_from_D
from chemreac import ReactionDiffusion
from chemreac.integrate import run
from chemreac.util.plotting import save_and_or_show_plot
def sigm(x, lim=150., n=8):
# Algebraic sigmoid to avoid overflow/underflow of 'double exp(double)'
return x/((x/lim)**n+1)**(1./n)
sq2 = 2**0.5
pi = np.pi
sqpi = pi**0.5
def _gaussian(x, mu, sigma, logy, logx, geom, use_log2=False):
# Formula for normalization from derived in following mathematica code:
# $Assumptions = {(sigma | mu) \[Element] Reals, sigma > 0}
# 1/Integrate[E^(-1/2*((x - mu)/sigma)^2), {x, -Infinity, Infinity}]
# 1/Integrate[2*pi*x*E^(-1/2*((x - mu)/sigma)^2), {x, 0, Infinity}]
# 1/Integrate[4*pi*x^2*E^(-1/2*((x - mu)/sigma)^2), {x, 0, Infinity}]
if geom == 'f':
a = 1/sigma/(2*np.pi)**0.5
elif geom == 'c':
a = 1/pi/sigma/(2*exp(-mu**2/2/sigma**2)*sigma +
mu*sq2*sqpi*(1 + erf(mu/(sq2*sigma))))
elif geom == 's':
a = 1/2/pi/sigma/(2*exp(-mu**2/2/sigma**2)*mu*sigma +
sq2*sqpi*(mu**2 + sigma**2)*(1 + erf(mu/sq2/sigma)))
else:
raise NotImplementedError("Unkown geomtry: %s" % geom)
b = -0.5*((x-mu)/sigma)**2
logb = (lambda arg: log(arg)/log(2)) if use_log2 else log
if logy:
return logb(a) + b*logb(np.e)
else:
return a*np.exp(b)
def pair_of_gaussians(x, offsets, sigma, logy, logx, geom, use_log2=False):
try:
sigma0, sigma1 = sigma[0], sigma[1]
except:
sigma0 = sigma1 = sigma
expb = (lambda arg: 2**arg) if use_log2 else np.exp
x = expb(x) if logx else x
xspan = (x[-1] - x[0])
xl = x[0] + offsets[0]*xspan # lower
xu = x[0] + offsets[1]*xspan # upper
return (
_gaussian(x, xl, sigma0, logy, logx, geom, use_log2),
_gaussian(x, xu, sigma1, logy, logx, geom, use_log2)
)
def integrate_rd(D=-3e-1, t0=0.0, tend=7., x0=0.1, xend=1.0, N=1024,
base=0.5, offset=0.25, nt=25, geom='f',
logt=False, logy=False, logx=False, random=False,
nstencil=3, lrefl=False, rrefl=False,
num_jacobian=False, method='bdf', plot=False,
savefig='None', atol=1e-6, rtol=1e-6, random_seed=42,
surf_chg=(0.0, 0.0), sigma_q=101, sigma_skew=0.5,
verbose=False, eps_rel=80.10, use_log2=False):
"""
A negative D (diffusion coefficent) denotes:
mobility := -D
D := 0
A positive D calculates mobility from Einstein-Smoluchowski relation
"""
assert 0 <= base and base <= 1
assert 0 <= offset and offset <= 1
if random_seed:
np.random.seed(random_seed)
n = 2
if D < 0:
mobility = -D
D = 0
else:
mobility = electrical_mobility_from_D(D, 1, 298.15)
print(D, mobility)
# Setup the grid
logb = (lambda arg: log(arg)/log(2)) if use_log2 else log
_x0 = logb(x0) if logx else x0
_xend = logb(xend) if logx else xend
x = np.linspace(_x0, _xend, N+1)
if random:
x += (np.random.random(N+1)-0.5)*(_xend-_x0)/(N+2)
# Setup the system
stoich_active = []
stoich_prod = []
k = []
rd = ReactionDiffusion(
n, stoich_active, stoich_prod, k, N,
D=[D, D],
z_chg=[1, -1],
mobility=[mobility, -mobility],
x=x,
geom=geom,
logy=logy,
logt=logt,
logx=logx,
nstencil=nstencil,
lrefl=lrefl,
rrefl=rrefl,
auto_efield=True,
surf_chg=surf_chg,
eps_rel=eps_rel, # water at 20 deg C
faraday_const=1,
vacuum_permittivity=1,
use_log2=use_log2
)
# Initial conditions
sigma = (xend-x0)/sigma_q
sigma = [(1-sigma_skew)*sigma, sigma_skew*sigma]
y0 = np.vstack(pair_of_gaussians(
rd.xcenters, [base+offset, base-offset], sigma, logy, logx, geom, use_log2)).transpose()
if logy:
y0 = sigm(y0)
if plot:
# Plot initial E-field
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 10))
rd.calc_efield((rd.expb(y0) if logy else y0).flatten())
plt.subplot(4, 1, 3)
plt.plot(rd.xcenters, rd.efield, label="E at t=t0")
plt.plot(rd.xcenters, rd.xcenters*0, label="0")
# Run the integration
tout = np.linspace(t0, tend, nt)
integr = run(rd, y0, tout,
atol=atol, rtol=rtol, sigm_damp=True,
C0_is_log=logy,
with_jacobian=(not num_jacobian), method=method)
Cout = integr.Cout
if verbose:
print(integr.info)
# Plot results
if plot:
def _plot(y, ttl=None, **kwargs):
plt.plot(rd.xcenters, y, **kwargs)
plt.xlabel((('log_%s({})' % ('2' if use_log2 else 'e')) if logx else '{}').format('x / m'))
plt.ylabel('C / M')
if ttl:
plt.title(ttl)
for i in range(nt):
plt.subplot(4, 1, 1)
c = 1-tout[i]/tend
c = (1.0-c, .5-c/2, .5-c/2)
_plot(Cout[i, :, 0], 'Simulation (N={})'.format(rd.N),
c=c, label='$z_A=1$' if i == nt-1 else None)
_plot(Cout[i, :, 1], c=c[::-1],
label='$z_B=-1$' if i == nt-1 else None)
plt.legend()
plt.subplot(4, 1, 2)
delta_y = Cout[i, :, 0] - Cout[i, :, 1]
_plot(delta_y, 'Diff',
c=[c[2], c[0], c[1]],
label='A-B (positive excess)' if i == nt-1 else None)
plt.legend(loc='best')
plt.xlabel("$x~/~m$")
plt.ylabel(r'Concentration / M')
ylim = plt.gca().get_ylim()
if N < 100:
plt.vlines(rd.x, ylim[0], ylim[1],
linewidth=1.0, alpha=0.2, colors='gray')
plt.subplot(4, 1, 3)
plt.plot(rd.xcenters, rd.efield, label="E at t=tend")
plt.xlabel("$x~/~m$")
plt.ylabel(r"$E~/~V\cdot m^{-1}$")
plt.legend()
for i in range(3):
plt.subplot(4, 1, i+1)
ylim = plt.gca().get_ylim()
for d in (-1, 1):
center_loc = [x0+(base+d*offset)*(xend-x0)]*2
plt.plot(rd.logb(center_loc) if logx else center_loc,
ylim, '--k')
plt.subplot(4, 1, 4)
for i in range(n):
amount = [rd.integrated_conc(Cout[j, :, i]) for j in range(nt)]
plt.plot(tout, amount, c=c[::(1, -1)[i]], label=chr(ord('A')+i))
plt.xlabel('Time / s')
plt.ylabel('Amount / mol')
plt.legend(loc='best')
plt.tight_layout()
save_and_or_show_plot(savefig=savefig)
return tout, Cout, integr.info, rd
if __name__ == '__main__':
argh.dispatch_command(integrate_rd, output_file=None)
| chemreac/chemreac | examples/auto_efield.py | auto_efield.py | py | 7,622 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "numpy.pi",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "math.exp",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "math.erf",
"line_number": 65... |
13395399902 | from PIL import Image
def brighten_Image(pixelList):
pix_len = len(pixelList)
for i in range(pix_len): #assigns each part of tuple to a variable
current_pixel = pixelList[i] #then will brigthen each by 50
red = current_pixel[0]
green = current_pixel[1]
blue = current_pixel[2]
#this is not the best way, ex : replace current_pixel
#with pixList[i][0] ect
newPixel = (red+50, green+50, blue+50)
pixelList[i] = newPixel #replaces current pixel with one
#whos R,G,B values are +50
return pixelList
def main():
myFile = "/home/h702546919/Desktop/jimmy.jpg"
my_img_obj = Image.open(myFile)
#my_img_obj.show()
#grabs each pixel's tuple
pixelList = list(my_img_obj.getdata())
#sends list of tuples to be brightend, gives back newPic
newPic = brighten_Image(pixelList)
#pushes new brighter pixels
my_img_obj.putdata(newPic)
#displays new pic and old one
#(learn how to make a canvas and print side by side ? )
my_img_obj.show()
return
#---#
main()
| tommulvey/CSC15_python | 10_3/brightenImage.py | brightenImage.py | py | 1,001 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 22,
"usage_type": "name"
}
] |
3727746961 | ### This file is meant to run from pc, *not* from the server. It extracts the
# data from the datafile, posts it to the database and finally runs the day
# command to add the day to the data.
import math
import pandas as pd
import requests
day_of_month = 6
def upload_data(fname):
data = extract_data(fname)
data_dict_list = []
url = create_url()
for _, row in data.iterrows():
aaa = row[5]
if math.isnan(aaa):
aaa = 'NULL'
dict = {"SubmitDateTime": str(row[0]),
"UserId": row[1]+1000,
"ExerciseId": row[2],
"LearningObjectiveId": 8025,
"Correct": min(row[4], 1),
"AbilityAfterAnswer": aaa}
data_dict_list.append(dict)
print(dict, ",")
# r = requests.post(url=url + "insert/", json=data_dict_list[0],
# auth=("Group2", "Group2-1234"))
# print(r.status_code, r.reason, url + "insert/")
r = requests.get(url + "add_days/start=2018-06-04&end=2018-06-0{}".format( str(day_of_month)), auth=("Group2", "Group2-1234"))
print(r.status_code, r.reason,
url + "add_days/start=2018-06-04&end=2018-06-0{}".format(
day_of_month))
def extract_data(fname):
data = pd.read_excel(fname)
return data
def create_url():
url = "http://applab.ai.ru.nl:5000/"
return url
if __name__ == "__main__":
day_of_month = 6
upload_data("C:/Users/Rick "
"Dijkstra/Documents/Study/Applab/SnappetDataAnoniem/"
"resultaten-radboud_anoniem 4-6-18.xlsx")
| simgeekiz/ApplabAPI | group2api/utils/UploadData.py | UploadData.py | py | 1,592 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.isnan",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 38,
"usage_type": "call"
}
] |
72035219069 | #!/usr/bin/env python3
from bcc import BPF
from http.server import HTTPServer, BaseHTTPRequestHandler
import sys
import threading
clone_ebpf = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#include <linux/fs.h>
#define ARGSIZE 128
BPF_PERF_OUTPUT(events);
struct data_t {
u32 pid; // PID as in the userspace term (i.e. task->tgid in kernel)
u32 ppid; // Parent PID as in the userspace term (i.e task->real_parent->tgid in kernel)
u32 uid;
char comm[TASK_COMM_LEN];
};
int clone_ebpf(struct pt_regs *ctx) {
struct data_t data = {};
struct task_struct *task;
data.uid = bpf_get_current_uid_gid() & 0xffffffff;
data.pid = bpf_get_current_pid_tgid() >> 32;
task = (struct task_struct *)bpf_get_current_task();
data.ppid = task->real_parent->tgid;
bpf_get_current_comm(&data.comm, sizeof(data.comm));
events.perf_submit(ctx, data, sizeof(struct data_t));
return 0;
}
"""
pid = ""
ppid = ""
uid = ""
command = ""
def clone_ebpf_thread():
b = BPF(text=clone_ebpf)
clone_fn_name = b.get_syscall_fnname("clone")
b.attach_kprobe(event=clone_fn_name, fn_name="clone_ebpf")
b["events"].open_perf_buffer(collect_events)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
def collect_events():
event = b["events"].event(data)
pid = event.pid
ppid = event.ppid
uid = event.uid
command = event.comm
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
print("received a get message", sys.stdout)
self.send_response(200)
self.end_headers()
response = "pid: " + pid + " ppid: " + ppid + " uid: " + uid + " command: " + command
self.wfile.write(response)
x = threading.Thread(target=clone_ebpf_thread)
x.start()
httpd = HTTPServer(('0.0.0.0', 8000), SimpleHTTPRequestHandler)
httpd.serve_forever()
| madhusudanas/ebpf-mac-python | misc/hello_world1.py | hello_world1.py | py | 1,927 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "bcc.BPF",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "http.server.BaseHTTPRequestHandler",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "sys.stdout",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "threading.... |
14837490064 | from django.urls import path
from . import views
urlpatterns = [
path('products/', views.product_list, name='product_list'),
path('product/<int:product_pk>/', views.product_detail, name='product_detail'),
path('basket/', views.product_basket, name='product_basket'),
path('product/<int:product_pk>/add_to_basket', views.add_to_basket, name='add_to_basket'),
path('product_delete/<int:product_pk>', views.remove_from_basket, name='remove_from_basket'),
path('product_remove/<int:product_pk>', views.remove_all_products, name='remove_all_products'),
path('delete_all/', views.delete_all, name='delete_all'),
path('order/', views.order, name='order'),
path('orders_history/', views.orders_history, name='orders_history'),
path('add_to_favorites/<int:product_pk>', views.add_to_favorites, name='add_to_favorites')
]
| meeeeeeeh/djangoblog | shop/urls.py | urls.py | py | 855 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
41945752494 | # usually big companies in Hollywood watermark their script with usually an actor's name.
# So if that actor leaks the script well they'll know that that person has their name on the script and
# they're the ones that leaked it.
# so we are going to use this watermark throgh out all the pdf.
import PyPDF2
template = PyPDF2.PdfFileReader(open('super.pdf', 'rb'))
watermark = PyPDF2.PdfFileReader(open('wtr.pdf', 'rb'))
output = PyPDF2.PdfFileWriter()
for i in range(template.getNumPages()):
page = template.getPage(i)
page.mergePage(watermark.getPage(0))
output.addPage(page)
with open('watermarked.pdf', 'wb') as file:
output.write(file)
print('watermark merged with pdf')
| hyraja/python-starter | 12.scripting python (projects)/pdf with python/03.pdf watermark.py | 03.pdf watermark.py | py | 702 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyPDF2.PdfFileReader",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PyPDF2.PdfFileReader",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PyPDF2.PdfFileWriter",
"line_number": 10,
"usage_type": "call"
}
] |
22509106375 | import numpy as np
import logging
from pathlib import Path
#Output folder setup
output = Path('./output/log').expanduser()
output.mkdir(parents=True, exist_ok=True)
en_log = logging.getLogger(__name__)
en_log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
file_handler = logging.FileHandler('./output/log/energy.log',mode='w')
file_handler.setFormatter(formatter)
en_log.addHandler(file_handler)
def energy_init(temp,m,n,dX):
for i in range(1,m-1):
for j in range (1,n-1):
temp[j, i] = 1 - j*dX
return(temp)
def energy_bound(temp,m,n,iter):
en_log.debug("Iteration No.: {}--------C".format(iter))
en_log.debug("Temperature at bouondary calculation entry: \n{}".format(temp))
for j in range(n):
temp[0,j] = 1
temp[m-1,j] = 0
for i in range(m):
temp[i,0] = (4/3)*( temp[i,1]- (temp[i, 2]/4) )
temp[i,n-1] = (4/3)*( temp[i,n-2] - (temp[i, n-3]/4) )
en_log.debug("Temperature at boundary calculation exit: \n{}".format(temp))
en_log.debug("____________________________________________________")
return(temp)
def energy_bound_ur(temp_o,temp_calc,m,n,r,iter):
temp_n = np.copy(temp_calc)
en_log.debug("Iteration No.: {}--------D".format(iter))
en_log.debug("Temperature at boundary UR calculation entry: \n{}".format(temp_calc))
for i in range(m):
temp_n[i,0] = temp_o[i,0] + r*(temp_calc[i,0] - temp_o[i,0])
temp_n[i,n-1] = temp_o[i,n-1] + r*(temp_calc[i,n-1] - temp_o[i,n-1])
for j in range(n):
temp_n[0,j] = temp_o[0,j] + r*(temp_calc[0,j] - temp_o[0,j])
temp_n[m-1,j] = temp_o[m-1,j] + r*(temp_calc[m-1,j] - temp_o[m-1,j])
en_log.debug("Temperature at boundary UR calculation exit: \n{}".format(temp_n))
en_log.debug("____________________________________________________")
return(temp_n)
def energy(temp_o,strm,m,n,dX,dY,div,iter):
en_log.debug("Iteration No.: {}--------A".format(iter))
en_log.debug("Temperature at calculation entry: \n{}".format(temp_o))
temp_calc = np.copy(temp_o)
mul = (-1/(4*dX*dY))
for i in range(1,m-1):
for j in range (1,n-1):
strm_i_diff = (strm[i+1,j]-strm[i-1,j])
strm_j_diff = (strm[i,j+1]-strm[i,j-1])
temp_i_diff = (temp_o[i+1,j]-temp_o[i-1,j])
temp_j_diff = (temp_o[i,j+1]-temp_o[i,j-1])
temp_i_sum = (temp_o[i+1,j]+temp_o[i-1,j])/(dX*dX)
temp_j_sum = (temp_o[i,j+1]+temp_o[i,j-1])/(dY*dY)
temp_calc[i,j] = ( (mul*((strm_j_diff*temp_i_diff)-(strm_i_diff*temp_j_diff))) + temp_i_sum + temp_j_sum )/div
en_log.debug("Temperature at calculation exit: \n{}".format(temp_calc))
en_log.debug("____________________________________________________")
return temp_calc
def energy_ur(temp_o,temp_calc,m,n,r,iter):
en_log.debug("Iteration No.: {}--------B".format(iter))
en_log.debug("Temperature at UR calculation entry: \n{}".format(temp_calc))
temp_n = np.copy(temp_calc)
for i in range(1,m-1):
for j in range (1,n-1):
temp_n[i,j] = temp_o[i,j] + r*(temp_calc[i,j] - temp_o[i,j])
en_log.debug("Temperature at UR calculation exit: \n{}".format(temp_n))
en_log.debug("____________________________________________________")
return(temp_n)
def converge(temp_o,strm,m,n,dX,dY,div,iter):
temp_residue = np.zeros((m,n))
mul = (-1/(4*dX*dY))
if iter<5:
temp_max = 1
else:
temp_max = int(np.amax(np.abs(temp_o)))
en_log.debug("Temperature max value: \n{}".format(temp_max))
for i in range(1,m-1):
for j in range (1,n-1):
strm_i_diff = (strm[i+1,j]-strm[i-1,j])
strm_j_diff = (strm[i,j+1]-strm[i,j-1])
temp_i_diff = (temp_o[i+1,j]-temp_o[i-1,j])
temp_j_diff = (temp_o[i,j+1]-temp_o[i,j-1])
temp_i_sum = (temp_o[i+1,j]+temp_o[i-1,j])/(dX*dX)
temp_j_sum = (temp_o[i,j+1]+temp_o[i,j-1])/(dY*dY)
temp_residue[i,j] = (( ( (mul*((strm_j_diff*temp_i_diff)-(strm_i_diff*temp_j_diff))) + temp_i_sum + temp_j_sum )/div )- temp_o[i,j])/temp_max
en_log.debug("Temperature residue domain: \n{}".format(temp_residue))
return np.std(temp_residue) | amuthankural/square_cavity_Natural_Convection | energy.py | energy.py | py | 4,354 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
... |
24282938220 | from application import app, db
from flask import redirect, render_template, request, url_for, flash
from application.child.models import Child
from application.quotes.models import Quote
from application.likes.models import Likes
from application.child.forms import ChildForm, MakeSureForm
from datetime import datetime, date
from flask_login import login_required, current_user
from datetime import date
from wtforms import ValidationError
@app.route("/child", methods=["GET"])
def child_index():
return render_template("child/listchild.html", quotes = Child.query.all())
# Käyttäjän omien lapsien haku kyselyllä
@app.route("/child/userlist/", methods=["GET"])
@login_required
def child_userchildren():
return render_template("child/ownchildren.html", find_users_children = Child.find_users_children())
@app.route("/child/newchild/")
@login_required
def child_form():
return render_template("child/newchild.html", form = ChildForm())
@app.route("/child/", methods=["GET","POST"])
@login_required
def child_create():
form = ChildForm(request.form)
if not form.validate():
return render_template("child/newchild.html", form = form)
# Tarkastetaan, ettei käyttäjällä ole samannimistä lasta
alreadyExistsChild = Child.query.filter_by(name=form.name.data, account_id=current_user.id).first()
if alreadyExistsChild:
form.name.errors.append("Sinulla on jo tämänniminen lapsi olemassa.")
return render_template("child/newchild.html", form = form)
c = Child(name = form.name.data, birthday = form.birthday.data)
c.account_id = current_user.id
db.session.add(c)
db.session().commit()
return redirect(url_for("child_userchildren"))
@app.route("/child/modifychild/<child_id>/", methods=["GET", "POST"])
@login_required
def child_modifychild(child_id):
# Asetetaan lomakkeelle valmiiksi olemassaolevat tiedot
form=ChildForm()
child = Child.query.get(child_id)
form.name.data = child.name
form.birthday.data = child.birthday
return render_template("child/modifyChild.html", form = form, child_id = child_id)
@app.route("/child/<child_id>/", methods=["POST"])
@login_required
def child_update(child_id):
child = Child.query.get(child_id)
form = ChildForm(request.form)
if not form.validate():
return render_template("child/modifyChild.html", form = form, child_id=child_id)
# Tarkastetaan, ettei käyttäjällä ole samannimistä lasta
alreadyExistsChild = Child.query.filter_by(name=form.name.data, account_id=current_user.id).first()
if alreadyExistsChild and child != alreadyExistsChild:
form.name.errors.append("Sinulla on jo tämänniminen lapsi olemassa.")
return render_template("child/modifyChild.html", form = form, child_id=child_id)
child.name = form.name.data
child.birthday =form.birthday.data
db.session().commit()
return redirect(url_for("child_userchildren"))
@app.route("/child/<child_id>/delete", methods=["POST","GET"])
@login_required
def child_delete(child_id):
# Tarkastuslomake, jottei lasta tule poistettua liian helpolla
form = MakeSureForm()
return render_template("child/deletechild.html", form = form, child_id=child_id)
@app.route("/child/<child_id>/del", methods=["POST"])
@login_required
def child_deleteConfirm(child_id):
form = MakeSureForm(request.form)
ok = form.name.data
# jos tarkastuslomakkeeseen on syötetty oikea tieto, jolla halutaan varmistaa poisto
if ok == "x":
c = Child.query.get(child_id)
# Etsitään lapsen lapsen sanonnat ja poistataan sanonnat sekä sanonnan tykkäykset
q = Quote.query.filter(Quote.child_id == child_id)
for quote in q:
likes = Likes.query.filter(Likes.quote_id==quote.id)
for like in likes:
db.session.delete(like)
db.session().commit()
db.session.delete(quote)
db.session().commit()
# Poistetaan lapsi
db.session().delete(c)
db.session().commit()
flash("Lapsi poistettu onnistuneesti", category="success")
return redirect(url_for("child_userchildren"))
flash("Lasta ei poistettu", category="warning")
return redirect(url_for("child_userchildren"))
# Yhden lapsen tietojen näyttäminen
@app.route("/child/showchild/<child_id>")
@login_required
def child_showOne(child_id):
child = Child.query.get(child_id)
return render_template("child/showchild.html", child_id=child_id, child=child) | millalin/Kids-Say-the-Darndest-Things | application/child/views.py | views.py | py | 4,587 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.render_template",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "application.child.models.Child.query.all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "application.child.models.Child.query",
"line_number": 14,
"usage_type": "a... |
25818064734 | #!/usr/bin/env python3
# =============================================================================
# Author: Julen Bohoyo Bengoetxea
# Email: julen.bohoyo@estudiants.urv.cat
# =============================================================================
""" Description: A set of tools for semantic image segmentations """
# =============================================================================
import os
import glob
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image
from patchify import patchify
##########################################################
######### GENERAL TOOLS #########
##########################################################
def get_class_weights(path, preprocess_function, img_size=256):
"""
Get the class weights of the masks generated from the .png images of the specified directory
Parameters
----------
path : string
Path to de directory.
preprocess_function : function
Function to preprocess data in oder to get weight in the correct order:
def preprocess_data(img, mask): return(img, mask).
img_size : int, optional
image reading size. (default is 256).
Returns
-------
class_weights : list
List containing the weights of each class.
"""
from sklearn.utils import class_weight
#Capture mask/label info as a list
masks = []
for directory_path in glob.glob(path):
paths = sorted(glob.glob(os.path.join(directory_path, "*.png")))
for mask_path in paths:
mask = cv2.imread(mask_path, 0)
mask = cv2.resize(mask, (img_size, img_size), interpolation = cv2.INTER_NEAREST) #Otherwise ground truth changes due to interpolation
masks.append(mask)
# Convert list to array for machine learning processing
imgs = np.zeros(shape=(1,1))
masks = np.array(masks)
# Preprocess masks same way as in training in order to get the weight in the correct order
imgs, masks = preprocess_function(imgs, masks)
masks = np.argmax(masks, axis=3) # preprocess_function hot encodes the masks so must be reverted
masks = masks.reshape(-1) # Masks must be array of shape (num_samples,)
class_weights = class_weight.compute_class_weight('balanced', np.unique(masks), masks)
return class_weights
def drawProgressBar(percent, barLen = 20):
"""
Prints a progress bar
Parameters
----------
percent : float
Completed percentage (0-1).
barLen : int, optional
Size of the bar. (default is 20).
Returns
-------
None.
"""
import sys
sys.stdout.write("\r")
sys.stdout.write("[{:<{}}] {:.0f}%".format("=" * int(barLen * percent), barLen, percent * 100))
sys.stdout.flush()
##########################################################
######### PLOTTING TOOLS #########
##########################################################
def plot_legend(classes, cmap='viridis', size=2):
"""
Plots legend of the colors using matplotlib.pyplot
Parameters
----------
classes : Dict
Dict contaning the number and name of each class.
cmap : string, optional
Color map to use in masks. (default is viridis).
size : int, optional
Plotting size. (default is 2).
Returns
-------
None.
"""
x = []
my_xticks = []
for i in range(len(classes)):
x.append(i)
my_xticks.append(classes[i])
f = plt.figure(figsize = (size, size))
f.add_subplot(1,1,1)
plt.yticks(x, my_xticks)
plt.xticks([], [])
x = np.reshape(x,(1,len(classes))).T
plt.imshow(x, cmap=cmap)
def plot_mask(images, masks, num_classes, num_plots=1, cmap='viridis', size=10):
"""
Plots images and masks from lists using matplotlib.pyplot
Parameters
----------
images : list
List with the original images (3 channel).
masks : list
List with the original masks (1 channel).
num_classes : int
Number of classes to plot
num_plots : int, optional
Ammount of images to plot. (default is 1).
cmap : string, optional
Color map to use in masks. (default is viridis).
size : int, optional
Plotting size. (default is 10).
Returns
-------
None.
"""
# Place all pixel values for colour coherence
print('Masks modified for plotting', num_classes, 'classes')
for i in range(num_plots):
mask=masks[i]
for j in range(num_classes):
mask[0,j]=j
masks[i]=mask
for i in range(num_plots):
f = plt.figure(figsize = (size, size))
f.add_subplot(1,3,1)
plt.axis('off')
plt. title('Original image')
plt.imshow(images[i])
f.add_subplot(1,3,2)
plt.axis('off')
plt. title('Ground truth mask')
plt.imshow(masks[i], cmap=cmap)
plt.show(block=True)
plt.show
def plot_prediction(images, masks, predictions, num_classes, num_plots=1, cmap='viridis', size=10, alpha=0.7):
"""
Plots images, original masks, predicted masks and overlays from lists using matplotlib.pyplot
Parameters
----------
images : list
List with the original images (3 channel).
masks : list
List with the original masks (1 channel).
predictions : list
List with the predicted masks (1 channel).
num_classes : int
Number of classes to plot
num_plots : int, optional
Ammount of images to plot. (default is 1).
cmap : string, optional
Color map to use in masks. (default is viridis).
size : int, optional
Plotting size. (default is 10).
alpha : float, optional
Transparency for the prediction over image. (default is 0.7).
Returns
-------
None.
"""
# Place all pixel values for colour coherence
print('Masks modified for plotting', num_classes, 'classes')
for i in range(num_plots):
mask=masks[i]
prediction=predictions[i]
for j in range(num_classes):
mask[0,j]=j
prediction[0,j]=j
masks[i]=mask
predictions[i]=prediction
for i in range(num_plots):
f = plt.figure(figsize = (size, size))
f.add_subplot(1,3,1)
plt.axis('off')
plt. title('Original image')
plt.imshow(images[i])
f.add_subplot(1,3,2)
plt.axis('off')
plt. title('Ground truth mask')
plt.imshow(masks[i], cmap=cmap)
f.add_subplot(1,3,3)
plt.axis('off')
plt. title('Predicted mask')
plt.imshow(predictions[i], cmap=cmap)
f = plt.figure(figsize = (size, size))
f.add_subplot(1,1,1)
plt.axis('off')
plt. title('Predicted mask over image')
plt.imshow(images[i])
no_background_predictions = np.ma.masked_where(predictions == 0, predictions) # remove background(0) from prediction
plt.imshow(no_background_predictions[i], cmap=cmap, alpha=alpha)
plt.show(block=True)
plt.show
##########################################################
######### READING IMAGES TO LISTS #########
##########################################################
def get_image_list(path, size=256):
"""
Returns a list containing all the .jpg images of the specified directory resized to size*size.
Parameters
----------
path : string
Path to the directory containing the images.
size : int, optional
Size to load the images. (default is 256).
Returns
-------
image_list : list.
A list containing the images as np.arrays.
"""
image_list = []
for directory_path in glob.glob(path):
paths = sorted(glob.glob(os.path.join(directory_path, "*.jpg")))
for img_path in paths:
img = cv2.imread(img_path, 1) #1 for readin 3 channel(rgb or bgr)
img = cv2.resize(img, (size, size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image_list.append(img)
#Convert list to array for machine learning processing
image_list = np.array(image_list)
return(image_list)
def get_mask_list(path, size=256):
"""
Returns a list containing all the masks generated from the .png images of the specified directory resized to size*size.
Parameters
----------
path : string
Path to the directory containing the masks.
size : int, optional
Size to load the masks. (default is 256).
Returns
-------
mask_list : list.
A list containing the masks as np.arrays.
"""
#Capture mask/label info as a list
mask_list = []
for directory_path in glob.glob(path):
paths = sorted(glob.glob(os.path.join(directory_path, "*.png")))
for mask_path in paths:
mask = cv2.imread(mask_path, 0) #1 for readin 3 channel(greyscale)
mask = cv2.resize(mask, (size, size), interpolation = cv2.INTER_NEAREST) #Otherwise ground truth changes due to interpolation
mask_list.append(mask)
#Convert list to array for machine learning processing
mask_list = np.array(mask_list)
#detect number of classes in the masks
num_classes = len(np.unique(mask_list))
return(mask_list, num_classes)
#NOT FINISHED, muest check augmentation and mode
def get_generator_from_list(images, masks, mode, preprocess_function, augmentation=True,
val_split=0.2, batch_size=32, seed=123):
"""
Returns a generator for both input images and masks preprocessed.
Parameters
----------
images : list
List containing the images(not preprocessed).
masks : list
List containing the masks (not preprocessed).
mode : string
Spicify whether is training or validation split.
preprocess_function : function
Function to preprocess data: def preprocess_data(img, mask): return(img, mask).
augmentation : boolean, optional
Poolean for performing data augmentation. (default is True).
val_split : float, optional
Perentage of the images for validation split. (default is 0.2).
batch_size : int, optional
Size of the loaded batches on each call to the generator. (default is 32).
seed : int, optional
seed fot the random transformations. (default is 123).
Yields
------
img :
Preprocessed image.
mask :
Preprocessed mask.
"""
if(augmentation):
data_gen_args = dict(validation_split=val_split,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect', #'constant','nearest','reflect','wrap'
)
else: data_gen_args = dict(validation_split=val_split,
)
image_data_generator = ImageDataGenerator(**data_gen_args)
image_data_generator.fit(images, augment=True, seed=seed)
image_generator = image_data_generator.flow(images, seed=seed)
mask_data_generator = ImageDataGenerator(**data_gen_args)
mask_data_generator.fit(masks, augment=True, seed=seed)
mask_generator = mask_data_generator.flow(masks, seed=seed)
generator = zip(image_generator, mask_generator)
for (img, mask) in generator:
img, mask = preprocess_function(img, mask)
yield (img, mask)
##########################################################
######### FLOW FROM DIRECTORY #########
##########################################################
def get_generator_from_directory(img_path, mask_path, size, mode, preprocess_function, augmentation=True,
val_split=0.2, batch_size=32, seed=123):
"""
Returns a generator for both input images and masks(hot encoded).
dataset must be structured in "images" and "masks" directories.
Parameters
----------
img_path : string
Path to the target dir containing images.
mask_path : string
Path to the target dir containing masks.
size : int
Image loading size.
mode : string
Spicify whether is training or validation split.
preprocess_function : function
Function to preprocess data: def preprocess_data(img, mask): return(img, mask).
augmentation : boolean, optional
Poolean for performing data augmentation. (default is True).
val_split : float, optional
Perentage of the images for validation split. (default is 0.2).
batch_size : int, optional
Size of the loaded batches on each call to the generator. (default is 32).
seed : int, optional
seed fot the random transformations. (default is 123).
Yields
------
img :
Preprocessed image.
mask :
Preprocessed mask.
"""
if(augmentation):
data_gen_args = dict(validation_split=val_split,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect', #'constant','nearest','reflect','wrap'
)
else: data_gen_args = dict(validation_split=val_split,
)
# same arguments in order to transform images and masks equaly
image_datagen = ImageDataGenerator(**data_gen_args)
image_generator = image_datagen.flow_from_directory(img_path,
target_size=(size, size),
subset=mode, # train or validation
batch_size=batch_size,
shuffle=True,
class_mode=None,
seed=seed)
mask_generator = image_datagen.flow_from_directory(mask_path,
target_size=(size, size),
subset=mode, # train or validation
batch_size=batch_size,
color_mode='grayscale',
shuffle=True,
class_mode=None,
seed=seed)
generator = zip(image_generator, mask_generator)
for (img, mask) in generator:
img, mask = preprocess_function(img, mask)
yield (img, mask)
##########################################################
######### TILE GENERATING #########
##########################################################
def get_image_tiles(path, tile_size, step=None, print_resize=False, dest_path=None):
"""
Generates image tiles from the masks on a given directory.
Parameters
----------
path : string
Path to the original images dir.
tile_size : int
Size of the resulting tiles.
step : int, optional
Step pixel from tile to tile. (default is tile_size).
print_resize : boolean, optional
Option to print the cropped size of the image. (default is False).
dest_path : string, optional
Path to the destination dir for the tiles, not saved if None. (default is None).
Returns
-------
mask_array:
Array of tiled masks
"""
print('Reading images:')
if(not step): step=tile_size
image_list = []
for directory_path in glob.glob(path):
paths = sorted(glob.glob(os.path.join(directory_path, "*.jpg")))
for img_path in paths:
#update progress var
percentage = 1/(len(paths)/(paths.index(img_path)+1))
drawProgressBar(percentage, barLen = 50)
img = cv2.imread(img_path, 1) #1 for reading image as BGR (3 channel)
# Cut each image to a size divisible by tile_size
original_width=img.shape[1] # useful for crop locations
original_height=img.shape[0] # useful for crop locations
width = (img.shape[1]//tile_size)*tile_size # get nearest width divisible by tile_size
height = (img.shape[0]//tile_size)*tile_size # get nearest height divisible by tile_size
img = Image.fromarray(img)
#img = img.crop((0 ,0, width, height)) #Crop from top left corner ((left, top, right, bottom))
img = img.crop((original_width-width ,0, original_width, height)) #Crop from top right corner ((left, top, right, bottom))
img = np.array(img)
if (print_resize): print('Cropped image size:', img.shape)
# Extract patches from each image
patches_img = patchify(img, (tile_size, tile_size, 3), step=step) #Step=256 for 256 patches means no overlap
for i in range(patches_img.shape[0]):
for j in range(patches_img.shape[1]):
single_patch_img = patches_img[i,j,:,:]
single_patch_img = single_patch_img[0] #Drop the extra unecessary dimension that patchify adds.
image_list.append(single_patch_img)
# Saving the image
if dest_path is not None:
filename = img_path.rsplit( ".", 1 )[ 0 ] #remove extension
filename = filename.rsplit( "/")[ -1 ] #remove original path
filename = filename+' '+str(i)+'-'+str(j)+'.jpg' # add tile indexes
cv2.imwrite(dest_path+filename, single_patch_img)
image_array = np.array(image_list)
print('\nGot an image array of shape', image_array.shape, image_array.dtype)
return(image_array)
def get_mask_tiles(path, tile_size, step=None, print_resize=False, dest_path=None):
"""
Generates mask tiles from the masks on a given directory.
Parameters
----------
path : string
Path to the original masks dir.
tile_size : int
Size of the resulting tiles.
step : int, optional
Step pixel from tile to tile. (default is tile_size).
print_resize : boolean, optional
Option to print the cropped size of the mask. (default is False).
dest_path : string, optional
Path to the destination dir for the tiles, not saved if None. (default is None).
Returns
-------
mask_array:
Array of tiled masks
"""
print('Reading masks:')
if(not step): step=tile_size
mask_list = []
for directory_path in glob.glob(path):
paths = sorted(glob.glob(os.path.join(directory_path, "*.png")))
for mask_path in paths:
#update progress var
percentage = 1/(len(paths)/(paths.index(mask_path)+1))
drawProgressBar(percentage, barLen = 50)
mask = cv2.imread(mask_path, 0) #0 for reading image as greyscale (1 channel)
# Cut each image to a size divisible by tile_size
original_width=mask.shape[1] # useful for crop locations
original_height=mask.shape[0] # useful for crop locations
width = (mask.shape[1]//tile_size)*tile_size # get nearest width divisible by tile_size
height = (mask.shape[0]//tile_size)*tile_size # get nearest height divisible by tile_size
mask = Image.fromarray(mask)
#mask = mask.crop((0 ,0, width, height)) #Crop from top left corner ((left, top, right, bottom))
mask = mask.crop((original_width-width ,0, original_width, height)) #Crop from top right corner ((left, top, right, bottom))
mask = np.array(mask)
if (print_resize): print('Cropped mask size:', mask.shape)
# Extract patches from each mask
patches_mask = patchify(mask, (tile_size, tile_size), step=step) #Step=256 for 256 patches means no overlap
for i in range(patches_mask.shape[0]):
for j in range(patches_mask.shape[1]):
single_patch_mask = patches_mask[i,j,:,:]
mask_list.append(single_patch_mask)
# Saving the mask
if dest_path is not None:
filename = mask_path.rsplit( ".", 1 )[ 0 ] #remove extension
filename = filename.rsplit( "/")[ -1 ] #remove original path
filename = filename+' '+str(i)+'-'+str(j)+'.png' # add tile indexes
cv2.imwrite(dest_path+filename, single_patch_mask)
mask_array = np.array(mask_list)
print('\nGot a mask array of shape', mask_array.shape, mask_array.dtype, 'with values', np.unique(mask_array))
return(mask_array)
def get_useful_images(IMG_DIR, MASK_DIR, USEFUL_IMG_DIR, USEFUL_MASK_DIR, PERCENTAGE=0.05):
"""
Read the image tiles from a given directory an saves in the new directory
only the ones with more than a percentage not labelled as 0(background).
Parameters
----------
IMG_DIR : string
Path of the original image tiles directory.
MASK_DIR : string
Path of the original mask tiles directory.
USEFUL_IMG_DIR : string
Destination path of the filtered image tiles directory.
USEFUL_MASK_DIR : string
Destination path of the filtered mask tiles directory.
PERCENTAGE : float
The minimum percentage to accept an image. (default is 0.05)
Returns
-------
None.
"""
# needs to be sorted as linux doesn't list sorted
img_list = sorted(os.listdir(IMG_DIR))
msk_list = sorted(os.listdir(MASK_DIR))
useless=0 #Useless image counter
for img in range(len(img_list)):
percentage = 1/(len(img_list)/(img+1))
drawProgressBar(percentage, barLen = 50)
img_name=img_list[img]
mask_name = msk_list[img]
#print("Now preparing image and masks number: ", img)
temp_image=cv2.imread(IMG_DIR+img_list[img], 1)
temp_mask=cv2.imread(MASK_DIR+msk_list[img], 0)
val, counts = np.unique(temp_mask, return_counts=True)
if (1 - (counts[0]/counts.sum())) > PERCENTAGE: #At least 5% useful area with labels that are not 0
cv2.imwrite(USEFUL_IMG_DIR+img_name, temp_image)
cv2.imwrite(USEFUL_MASK_DIR+mask_name, temp_mask); #print("Save Me")
else: useless +=1; #print("I am useless")
print("\nTotal useful images are: ", len(img_list)-useless)
| julenbhy/biomedical_segmentation | tools/segmentation_utils.py | segmentation_utils.py | py | 23,075 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "glob.glob",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
... |
5528516422 | from datetime import datetime
from finnhub import Client
from settings.constants import FINHUB_API_KEY
class FinhubFetcher:
def __init__(self, symbol: str) -> None:
self.symbol = symbol
def _init_client(self) -> None:
return Client(FINHUB_API_KEY)
def _get_params(self, resolution: str) -> dict:
now = datetime.now()
_from = int(datetime(now.year, 1, 1, 0, 0, 0, 0).timestamp()) # 1 January of current year
to = int(now.timestamp())
return {
"symbol": f"BINANCE:{self.symbol}USDT",
"resolution": resolution,
"_from": _from,
"to": to,
}
def get_crypto_candles(self, resolution: str = "D") -> dict:
client = self._init_client()
params = self._get_params(resolution)
response = client.crypto_candles(**params)
if response.get("s") == "no_data":
return {}
return response
| VladisIove/darkstore | portfolio_manager/services/fetchers/finnhub.py | finnhub.py | py | 949 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "finnhub.Client",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "settings.constants.FINHUB_API_KEY",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_na... |
37158346153 | import squarify
import matplotlib.pyplot as plt
import matplotlib.cm
import numpy as np
x = 0.
y = 0.
width = 950
height = 733
fig = plt.figure(figsize=(15, 12))
ax = fig.add_subplot(111, axisbg='white')
values = [285.4, 188.4, 173, 140.6, 91.4, 75.5, 62.3, 39.6, 29.4, 28.5, 26.2, 22.2]
labels = ['South Africa', 'Egypt', 'Nigeria', 'Algeria', 'Morocco', 'Angola', 'Libya', 'Tunisia', 'Kenya', 'Ethiopia', 'Ghana', 'Cameron']
colors = [0]*11
for i in range(11):
colors[i] = tuple(np.random.randint(0,255,3)/255.0)
initvalues = values
values = squarify.normalize_sizes(values, width, height)
rects = squarify.padded_squarify(values, x, y, width, height)
cmap = matplotlib.cm.get_cmap()
color = [cmap(random.random()) for i in range(len(values))]
x = [rect['x'] for rect in rects]
y = [rect['y'] for rect in rects]
dx = [rect['dx'] for rect in rects]
dy = [rect['dy'] for rect in rects]
ax.bar(x, dy, width=dx, bottom=y, color=colors, label=labels, align='edge')
va = 'center'
idx=1
for l,r,v in zip(labels, rects, initvalues):
x,y,dx,dy = r['x'], r['y'], r['dx'], r['dy']
ax.text(x+dx/2, y+dy/2+10, str(idx)+'--> '+l, va=va, ha='center', color='white', fontsize=14)
ax.text(x+dx/2, y+dy/2-12, '($'+str(v)+'b)', va=va, ha='center', color='white', fontsize=12)
idx=idx+1
ax.set_xlim(0, 1000)
ax.set_ylim(0, 1000)
plt.title('Top 12 GDP Africa Country', fontsize=20)
plt.savefig('datavis/Africa-GDP') | QiliWu/Python-datavis | datavis/Africa GDP.py | Africa GDP.py | py | 1,435 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.random.randint",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy... |
24930679414 | from datetime import datetime, timedelta
from email import message
from django.contrib.auth.models import User
from django.contrib import messages
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, JsonResponse
from .models import *
from .forms import questionForm
from django.core import serializers
# Create your views here.
def main(request):
# allQuestions = questions.objects.all()
# order by vote
if request.method == 'POST':
title = request.POST['title']
squestion = questions.objects.filter(title__icontains=title)
squestion = serializers.serialize('json', squestion)
return JsonResponse({'questions':squestion})
# squestions = questions.objects.filter(time__gt = datetime.now() - timedelta(days=100))[0:30]
# squestions = questions.objects.filter(answered=False)[0:30]
squestions = questions.objects.all()
context = {'questions':squestions}
return render(request,'main.html',context)
@login_required(login_url='login')
def addQuestion(request,pk):
if request.method == 'POST':
questioner = User.objects.get(id=pk)
if request.FILES:
newquestion = questions.objects.create(
questioner=questioner,
title = request.POST['title'],
question_image = request.FILES['question_image']
)
else:
newquestion = questions.objects.create(
questioner=questioner,
title = request.POST['title'],
)
messages.info(request,'Question Added Succesfully')
return redirect('main')
return render(request,'main.html')
def loadQuestion(request,pk):
question = questions.objects.get(id=pk)
sanswers = answers.objects.filter(question=question)
if request.method == 'POST':
answerer = User.objects.get(id=request.POST['userid'])
if request.FILES:
newanswer = answers.objects.create(
answerer = answerer,
question=question,
answer = request.POST['answer'],
answer_image=request.FILES['answer_image']
)
else:
newanswer = answers.objects.create(
answerer = answerer,
question=question,
answer = request.POST['answer']
)
messages.info(request,'Answer Added Succesfully')
return HttpResponseRedirect(request.path_info)
context={'question':question,'answers':sanswers}
print(request.path_info)
return render(request,'answers/question.html',context)
@login_required(login_url='login')
def userQuestions(request):
user = User.objects.get(id=request.user.id)
userquestions = questions.objects.filter(questioner=user)
context={'questions':userquestions}
return render(request,'answers/userquestion.html',context)
@login_required(login_url='login')
def editQuestion(request,pk):
question = questions.objects.get(id=pk)
form = questionForm(instance=question)
if request.method == 'POST':
editedquestion = questionForm(request.POST,request.FILES,instance=question)
if editedquestion.is_valid():
editedquestion.save()
return redirect('userquestions')
context={'form':form,'question':question}
return render(request,'answers/editquestion.html',context)
@login_required(login_url='login')
def deleteQuestion(request,pk):
question = questions.objects.get(id=pk)
question.delete()
messages.info(request,'Question deleted succesfully')
return redirect('userquestions') | SachinBhattarai0/QueAns | answers/views.py | views.py | py | 3,662 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.core.serializers.serialize",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.core.serializers",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 19,
"usage_type": "call"
},
{
... |
72789313789 | import numpy as np
import time
from scipy import ndimage
from .toolkit import vectools
from .toolkit.colors import Colors as _C
import matplotlib.pyplot as plt
import matplotlib
import math
import cv2
import sys
import os
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
class Image:
def __init__(self, fn, calibration, lock=None):
"""
Image processing class
Parameters
----------
fn: string
filename of the image to be processed
"""
# t0 = time.time()
self.fn = fn
self.fn_npy = self.fn.split('.')[0] + '.npy'
self.id = int(self.fn.split('cpt')[-1].split('.')[0])
self.calibration = calibration
# calibration_path = __location__ + '/../data/calibration.npy'
# calibration = np.load(calibration_path)
self.midpoint = calibration[self.id - 1][:-1]
# self.midpoint = calibration[0][:-1]
print(_C.YEL + 'Processing image ' + _C.BOLD + fn + _C.ENDC)
if lock is not None:
with lock:
self.image = np.load(self.fn_npy)
# self.image = cv2.imread(self.fn, cv2.IMREAD_GRAYSCALE)
else:
# self.image = cv2.imread(self.fn, cv2.IMREAD_GRAYSCALE)
self.image = np.load(self.fn_npy)
# self.image = np.rot90(self.image)
# print('Image loaded in', str(round(time.time() - t0, 2)), 's')
self.dimensions = np.shape(self.image)
self.dimy, self.dimx = self.dimensions
def transformRadial(self, env=None, midpoint=None, plot=False):
"""
Creates a transformed image where a sector is mapped to r/phi coordinates
Parameters
----------
midpoint: 2-tuple of floats, optional
Origin of the polar coordinate system. If None is given, the calibration data from the current class instantation is taken
plot: bool, optional
Plot the transformed image. Default is False
Cannot be used if multiprocessing is active
Returns
-------
transformed: 2D array
Coordinate transformed image
angles: 1D array of floats
angles between which the image is fully covered
radii: 1D array of float
distance scaling of rmax in the transformed image
"""
r = self.dimx
if midpoint is None:
midpoint = self.midpoint
# t0 = time.time()
dr = midpoint[0] - self.dimx
rmax = r + dr
hplus = midpoint[1]
hminus = self.dimy - midpoint[1]
thetaPlus = -math.asin(hplus / rmax)
thetaMinus = math.asin(hminus / rmax)
# thetaPlus, thetaMinus = -thetaMinus, -thetaPlus
thetaPlus_idx = int((thetaPlus + np.pi) / (2 * np.pi) * self.dimy)
thetaMinus_idx = int((thetaMinus + np.pi) / (2 * np.pi) * self.dimy)
# c = tuple(midpoint)
cx, cy = midpoint
c = (cx, cy)
transformed = cv2.linearPolar(self.image, c, rmax, cv2.WARP_FILL_OUTLIERS)
# Destroy the image object to free memory
del self.image
angles = np.linspace(thetaPlus, thetaMinus, thetaMinus_idx - thetaPlus_idx, endpoint=True)
radii = np.linspace(0, rmax, self.dimx)
self.dimensions = np.shape(transformed)
self.dimy, self.dimx = self.dimensions
absoluteZero = (self.dimy / 2 - thetaPlus_idx) - 1
transformed = transformed[thetaPlus_idx:thetaMinus_idx]
# Pad the transformed image with the boundary value
"""
start_idx = np.argmax(transformed > 0, axis=1)
start_idx = np.ones((len(transformed)), np.uint8) * 1000
transformed[:, :999] = 0
for i in range(len(transformed)):
transformed[i][transformed[i] == 0] = transformed[i, start_idx[i]]"""
# Remove Calibration features
calib_size_px = np.mean(np.array([x[2] for x in self.calibration]))
calib_size_mm = env.calib_size_mm # Outer radius of calibration piece
tolerance = 1.1
calib_width_mm = env.calib_width_mm * tolerance # Width of the calibration piece
# pitch_mm = self.env.pitch_mm # Nominal electrode pitch
scale = calib_size_mm / calib_size_px
self.calibrationCutoff = (calib_size_mm - calib_width_mm) / scale * r / rmax
# pitch = pitch_mm / scale
transformed[:, int(self.calibrationCutoff):] = 0
for i in range(len(transformed)):
transformed[i][transformed[i] == 0] = transformed[i, int(self.calibrationCutoff) - 1]
# plot = True
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(transformed)
ax.axhline(y=absoluteZero)
ax.set_aspect('auto')
fig.savefig(__location__ + '/../img/out/cv2transform.png', dpi=300)
# print('Coordinate transformation completed in ', str(round(time.time() - t0, 2)), 's')
return transformed, angles, radii
def detectFeatures(self, matrix, thresh_std=.5, plot=False):
"""
Distinguish band from background in binary matrix
Parameters
----------
matrix: 2D array
8Bit single channel source matrix to be processed
plot: bool, optional
Plot the transformed image. Default is False
Cannot be used if multiprocessing is active
Returns
-------
proc: 2D array
Processed binary image
"""
# t0 = time.time()
start_range = 2000
# end_range = np.shape(matrix)[1] - start_range
# Initializing Empty array in Memory
proc = np.empty(np.shape(matrix))
start_search = np.empty(np.shape(matrix))[:, :start_range]
end_search = np.empty(np.shape(matrix))[:, start_range:]
matrix = matrix.astype(np.float64, copy=False)
# print('Blurring')
# Gaussian Blur to remove fast features
cv2.GaussianBlur(src=matrix, ksize=(15, 3), dst=proc, sigmaX=1.5, sigmaY=5)
# ndimage.maximum_filter(proc, size=(5, 5), output=proc)
# cv2.GaussianBlur(src=matrix[:, :start_range], ksize=(3, 0), dst=start_search, sigmaX=0, sigmaY=3)
# cv2.GaussianBlur(src=matrix[:, start_range:], ksize=(31, 11), dst=end_search, sigmaX=0, sigmaY=0.1)
start_search = matrix[:, :start_range]
end_search = matrix[:, start_range:int(self.calibrationCutoff)]
# print('Convolving')
# Convolving with Prewitt kernel in x-direction
prewitt_kernel_x = np.tile([-1, 0, 1], (15, 1))
# prewitt_kernel_x = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
# print(prewitt_kernel_x)
# prewitt_kernel_x = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
kernel_y_width = 15
prewitt_kernel_y = np.array([[1] * kernel_y_width, [0] *
kernel_y_width, [-1] * kernel_y_width])
# prewitt_kernel_y_element = np.tile(np.ones(kernel_y_width), (15, 1))
# prewitt_kernel_y_end = np.concatenate((prewitt_kernel_y_element, np.zeros(15), -1 * prewitt_kernel_y_element))
# print(prewitt_kernel_y_end)
# print(prewitt_kernel_y_end)
cv2.threshold(src=start_search, dst=start_search, thresh=20, maxval=255, type=cv2.THRESH_TOZERO)
cv2.GaussianBlur(src=start_search, ksize=(21, 21), dst=start_search, sigmaX=50, sigmaY=0)
cv2.filter2D(src=start_search, kernel=prewitt_kernel_y, dst=start_search, ddepth=-1)
np.abs(start_search, out=start_search)
cv2.threshold(src=start_search, dst=start_search, thresh=80, maxval=1, type=cv2.THRESH_BINARY)
start_search = start_search.astype(np.uint8, copy=False)
n_labels, labels, l_stats, l_centroids = cv2.connectedComponentsWithStats(image=start_search, connectivity=4)
sizes = [s[-1] for s in l_stats]
sizes_original = sizes[:]
sizes.remove(max(sizes))
if len(sizes) == 0:
start_amp = 0
start_centroid = (0, 0)
else:
start_amp = max(sizes)
start_centroid_idx = sizes_original.index(start_amp)
start_centroid = (int(l_centroids[start_centroid_idx][1]), int(l_centroids[start_centroid_idx][0]))
cv2.threshold(src=end_search, dst=end_search, thresh=20, maxval=255, type=cv2.THRESH_TOZERO)
cv2.GaussianBlur(src=end_search, ksize=(21, 21), dst=end_search, sigmaX=50, sigmaY=0)
cv2.filter2D(src=end_search, kernel=prewitt_kernel_y, dst=end_search, ddepth=-1)
np.abs(end_search, out=end_search)
cv2.threshold(src=end_search, dst=end_search, thresh=80, maxval=1, type=cv2.THRESH_BINARY)
end_search = end_search.astype(np.uint8, copy=False)
n_labels, labels, l_stats, l_centroids = cv2.connectedComponentsWithStats(image=end_search, connectivity=4)
sizes = [s[-1] for s in l_stats]
sizes_original = sizes[:]
sizes.remove(max(sizes))
if len(sizes) == 0:
end_amp = 0
end_centroid = (0, 0)
else:
end_amp = max(sizes)
end_centroid_idx = sizes_original.index(end_amp)
end_centroid = (int(l_centroids[end_centroid_idx][1]), int(l_centroids[end_centroid_idx][0]) + start_range)
cv2.filter2D(src=proc, kernel=prewitt_kernel_x, dst=proc, ddepth=-1)
# ndimage.maximum_filter(proc, size=(5, 5), output=proc)
# cv2.GaussianBlur(src=proc, ksize=(11, 3), dst=proc, sigmaX=0, sigmaY=5)
np.abs(proc, out=proc)
#start_amp = start_search.max()
#start_idx = np.unravel_index(start_search.argmax(), start_search.shape)
start = (start_centroid, start_amp)
# end_amp = end_search.max()
# end_idx = np.unravel_index(end_search.argmax(), end_search.shape)
#ex, ey = end_idx
# if ey != 0:
# ey += start_range
#end_idx = (ex, ey)
end = (end_centroid, end_amp)
print(self.id)
print(start)
print(end)
print()
# del start_search
del end_search
# print('Thresholding')
# proc_mean = np.mean(proc)
# proc_std = np.std(proc)
# thresh = proc_mean + thresh_std * proc_std
thresh = 50.0
# thresh = proc_mean
# thresh = 0.1
#proc = proc * 255 / np.max(proc)
cv2.threshold(src=proc, dst=proc, thresh=thresh, maxval=1, type=cv2.THRESH_BINARY)
# cv2.adaptiveThreshold(src=proc,
# dst=proc,
# maxValue=1,
# thresholdType=cv2.THRESH_BINARY,
# adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C,
# blockSize=1001,
# C=0)
proc = proc.astype(np.uint8, copy=False)
Morphkernel = np.ones((11, 11), np.uint8)
cv2.dilate(proc, Morphkernel, proc)
cv2.erode(proc, Morphkernel, proc)
# print('Connecting')
# Label the complement regions of the binary image
proc_inv = 1 - proc
n_labels, labels, l_stats, l_centroids = cv2.connectedComponentsWithStats(image=proc_inv, connectivity=4)
# The maximum number of pixels in a noise field
# Everything larger is considered to be background
fieldsize = 2e4
# Label background fields
gaps = []
for i, stat in enumerate(l_stats):
if stat[-1] > fieldsize:
gaps.append(i)
# Set background fields to zero
for gap in gaps:
labels[labels == gap] = 0
# Set all forground fields to one
labels[labels != 0] = 1
labels = labels.astype(np.uint8, copy=False)
# Combine foreground noise with with thresholded image
cv2.bitwise_or(src1=proc, src2=labels, dst=proc)
filtered = np.copy(proc)
#Morphkernel = np.ones((11, 11), np.uint8)
#cv2.dilate(proc, Morphkernel, proc)
#cv2.erode(proc, Morphkernel, proc)
# plot = True
if plot:
print('Plotting')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(filtered)
# ax.plot(filtered[0], lw=0.2)
# ax.imshow(end_search)
# ax.plot(filtered[0])
ax.set_aspect('auto')
ax.set_xlabel('Radius [px]')
ax.set_ylabel('Angle [idx]')
fig.savefig(__location__ + '/../img/out/filter' + str(self.id) + '.png', dpi=300, interpolation='none')
# print('Features detected in', str(round(time.time() - t0, 2)), 's')
return proc, (start, end)
| lspgl/csat | sectorImage/core/image.py | image.py | py | 12,778 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.realpath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
38504427534 | import requests
from scraper.get_strava_access_token import refreshed_access_token
BASE_URL = 'https://www.strava.com'
ACCESS_TOKEN = refreshed_access_token()
def get_starred_segments():
print('Getting segement list')
request_dataset_url = BASE_URL + '/api/v3/segments/starred' # check https://developers.strava.com/docs/reference/ for STRAVA API REQUESTS
header = {'Authorization': 'Bearer ' + ACCESS_TOKEN}
param = {'per_page': 200, 'page': 1}
result_list = requests.get(request_dataset_url, headers=header, params=param).json()
segments_id_list = []
for i in result_list:
segments_id_list.append(i['id'])
return segments_id_list
def get_segment_details():
starred_segment_list = get_starred_segments()
detailed_segment_list = []
for i in range(len(starred_segment_list)):
request_dataset_url = BASE_URL + f'/api/v3/segments/{starred_segment_list[i]}'
header = {'Authorization': 'Bearer ' + ACCESS_TOKEN}
segment_details = requests.get(request_dataset_url, headers=header).json()
detailed_segment_list.append(segment_details)
print(f'Segment no. {starred_segment_list[i]} fetched')
return detailed_segment_list
if __name__ == '__main__':
print(get_starred_segments())
print(get_segment_details())
| ADV-111/Srodunia | scraper/request_dataset_through_api.py | request_dataset_through_api.py | py | 1,312 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scraper.get_strava_access_token.refreshed_access_token",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
}
] |
1662811500 | import csv
import os
import numpy as np
import json
from pyexcel_ods import get_data
PATTERN_TYPE = '1'
COURSE_TYPE = 'speed'
DATA_DIR = os.path.join(PATTERN_TYPE, COURSE_TYPE)
fieldnames = ['min_speed','max_speed','delay','spacing','min_angle','max_angle','num_rows','min_b_scale','max_b_scale','clear_threshold','hard_clear_threshold', 'burst_spacing', 'burst_spacing_dur', 'burst_spacing_ratio', 'bskip_hang']
# ods to csv
book = get_data(os.path.join(DATA_DIR, 's5_data.ods'))
data = list(book.values())[0]
with open('tmp.csv','w') as f:
writer = csv.writer(f)
for row in data:
writer.writerow(row)
with open('tmp.csv','r') as f:
reader = csv.DictReader(f)
for s5row in reader:
if 'X' in s5row.values():
break
s5row['num_rows'] = int(s5row['num_rows'])
for key in ['min_speed', 'max_speed', 'delay*speed']:
s5row[key] = float(s5row[key])
l1_min_speed = s5row['min_speed']*.9 - 2.5
l1_max_speed = s5row['max_speed']*.9 - 2.5
l10_min_speed = s5row['min_speed']*1.1+1.5
l10_max_speed = s5row['max_speed']*1.1+1.5
min_speeds = list(
np.append(np.linspace(
l1_min_speed, s5row['min_speed'], num=4, endpoint=False
), np.linspace(
s5row['min_speed'], l10_min_speed, num=6, endpoint=True
))
)
max_speeds = list(
np.append(np.linspace(
l1_max_speed, s5row['max_speed'], num=4, endpoint=False
), np.linspace(
s5row['max_speed'], l10_max_speed, num=6, endpoint=True
))
)
out_dir = os.path.join(DATA_DIR, 'pattern' + s5row['pattern'])
if not os.path.exists(out_dir):
os.mkdir(out_dir)
with open(os.path.join(out_dir, 'metadata.json'),'w') as f:
json.dump({
'title':'(%sS%s) Pattern %s' % (
PATTERN_TYPE
, s5row['pattern'].zfill(2)
, s5row['pattern'].zfill(2)
)
, 'pattern type': PATTERN_TYPE
, 'course type': COURSE_TYPE
}, f)
out_fn = os.path.join(out_dir, 'stages.txt')
with open(out_fn, 'w') as outf:
writer = csv.DictWriter(outf, fieldnames = fieldnames)
writer.writeheader()
for i in range(10):
min_speed = min_speeds[i]
max_speed = max_speeds[i]
avg_speed = (min_speed + max_speed)/2
delay = s5row['delay*speed'] / avg_speed
out_data = {
'min_speed': min_speed
, 'max_speed': max_speed
, 'delay': delay
, 'clear_threshold': 2000
, 'hard_clear_threshold': 4000
}
for fieldname in fieldnames:
if fieldname not in out_data:
out_data[fieldname] = s5row[fieldname]
writer.writerow(out_data)
| gebgebgeb/bdt | courses/write_speed_courses.py | write_speed_courses.py | py | 3,180 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pyexcel_ods.get_data",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line... |
1720170439 | from wazirx_sapi_client.rest import Client
from wazirx_sapi_client.websocket import WebsocketClient
import time
import websocket,json, pprint
from websocket import create_connection
from time import sleep
import logging
import pandas as pd
import asyncio
import socket, threading
import json, sys, os, time, csv, requests
from flask import Flask,request
from flask import render_template
from flask import current_app as app
from os.path import exists
file_exists = exists("config.py")
if file_exists:
import config
api_key = config.API_KEY
secret_key = config.SECRET_KEY
client = Client(api_key=api_key, secret_key=secret_key)
print(client.send("ping"))
global wes
wes = create_connection("wss://stream.wazirx.com/stream")
print(wes)
print("connection true")
# api_key = config.API_KEY
# secret_key = config.SECRET_KEY
app = Flask(__name__)
app.app_context().push()
def sellTrail(price):
return(client.send('create_order',
{"symbol": "ethinr", "side": "sell", "type": "stoplimit", "price": (price-price*0.003), "stopPrice":price,"quantity": quan, "recvWindow": 5000,
"timestamp": int(time.time()*1000)}))
def updateTrail(tick,orderID,price,trail_tage,quan):
try:
print(client.send('cancel_order',
{"symbol": tick, "orderId": orderID, "recvWindow": 5000, "timestamp": int(time.time()*1000)}))
except:
sleep(10)
print(client.send('cancel_order',
{"symbol": tick, "orderId": orderID, "recvWindow": 5000, "timestamp": int(time.time()*1000)}))
sleep(5)
try:
return(client.send('create_order',
{"symbol": tick, "side": "sell", "type": "stop_limit", "price": (price-price*trail_tage), "stopPrice":(price-price*(trail_tage-0.001)),"quantity": quan, "recvWindow": 5000,
"timestamp": int(time.time()*1000)}))
except:
sleep(5)
return(client.send('create_order',
{"symbol": tick, "side": "sell", "type": "stop_limit", "price": (price-price*trail_tage), "stopPrice":(price-price*(trail_tage-0.001)),"quantity": quan, "recvWindow": 5000,
"timestamp": int(time.time()*1000)}))
def gen_sign(query):
t=int(time.time())
echo = subprocess.Popen(['echo','-n',query], stdout=subprocess.PIPE, shell=False)
hmac_key=subprocess.Popen(['openssl','dgst','-sha256','-hmac',API_SECRET],stdin=echo.stdout,stdout=subprocess.PIPE,shell=False)
output = hmac_key.communicate()[0]
output=str(output.strip())
output=output.replace("b'(stdin)= ",'')
output=output.replace("'" ,'')
print(output)
def get_order(orderID):
try:
return(client.send('query_order',
{"orderId": orderID, "recvWindow": 10000, "timestamp": int(time.time() * 1000)}))
except:
sleep(10)
return(client.send('query_order',
{"orderId": orderID, "recvWindow": 10000, "timestamp": int(time.time() * 1000)}))
async def send_heartbeat( *args):
while True:
print(wes.send(json.dumps({'event': 'ping'})))
print("Beat sent")
await asyncio.sleep(10*60)
@app.route("/", methods=["GET"])
def home():
file_exists = exists("config.py")
if file_exists:
# api_key = config.API_KEY
# secret_key = config.SECRET_KEY
# global client
# global wes
# client = Client(api_key=api_key, secret_key=secret_key)
# print(client.send("ping"))
# wes = create_connection("wss://stream.wazirx.com/stream")
# print(wes)
open_ord=client.send('open_orders',
{"recvWindow": 5000,
"timestamp": int(time.time()*1000)})
# print("Ticker: ")
# tick=input()
# print("Quantity: ")
# quan=float(input())
# print("Trail %: ")
# trail_tage=float(input())
# print("orderId: ")
# orderId=int(input())
# print("sellPrice: ")
# sPrice=float(input())
sleep(5)
wes.send(json.dumps({
"event": "subscribe",
"streams": ["!ticker@arr"]
}))
print(file_exists)
return render_template("dashboard.html",open_ord=open_ord,action="parameters")
else:
return render_template("login.html")
# @app.route("/dashboard", methods=["GET"])
# def dashboardShow():
# return trail(tick,quan,trail_tage,orderId,sPrice,wes)
@app.route("/dashboard", methods=["POST"])
def dashboard():
global tick
global quan
global trail_tage
global orderId
global sPrice
tick=request.form['tick']
quan=float(request.form['quan'])
trail_tage=float(request.form['trail_tage'])
orderId=request.form['orderId']
sPrice=float(request.form['sPrice'])
r=get_order(orderId)
# render_template("dashboard.html",stat=r,action="display")
trail(tick,quan,trail_tage,orderId,sPrice,wes)
# render_template("dashboard.html",stat=r,action="display")
@app.route("/login", methods=["POST"])
def login():
api_key=request.form['apiKey']
secret_key=request.form['secretKey']
save=request.form['save']
if save=='True':
file = open("config.py", "w")
file.write("API-KEY = '"+api_key+"'\n")
file.write("SECRET_KEY = '"+secret_key+"'\n")
file.close()
# global client
# global wes
client = Client(api_key=api_key, secret_key=secret_key)
print(client.send("ping"))
wes = create_connection("wss://stream.wazirx.com/stream")
print(wes)
return render_template("dashboard.html",open_ord=open_ord,action="parameters")
# _thread = threading.Thread(target=asyncio.run, args=(self.send_heartbeat(),))
# _thread.start()
def trail(tick,quan,trail_tage,orderId,sPrice,wes):
# connections = dict()
# connections["websocket"] = wes
_thread = threading.Thread(target=asyncio.run, args=(send_heartbeat(),))
_thread.start()
result = wes.recv()
res = json.loads(result)
data={}
recvd=False
while not recvd:
result = wes.recv()
res = json.loads(result)
stream=res['data']
for dc in stream:
if isinstance(dc,dict):
# print(dc['s'])
# for keys in dc:
# print(keys)
if dc['s']==tick:
data=dc
recvd=True
print("data",data['b'])
col_heads=['Bought','MinSell','SoldP','Comp','BuyOrderID','BuyStatus','SellOrderID','SellStatus']
ob = []
prices=[]
buy_order={}
rows={}
# print(data)
bestSell=float(data['a'])
bestBuy=float(data['b'])
rows['serverTime']=data['E']
rows['bestBuy']=bestBuy
rows['bestSell']=bestSell
df=pd.DataFrame()
row=pd.DataFrame()
row = row.append(rows, ignore_index=True, sort=False)
row['serverTime']= pd.to_datetime(row['serverTime'], unit='ms')
df = df.append(row, ignore_index=True, sort=False)
print(row.loc[0])
row_ls=row.values.tolist()
# print(row_ls)
prices.append(row_ls[0])
print('prices',prices)
while True:
recvd=False
while not recvd:
try:
result = wes.recv()
except:
sleep(30)
wes = create_connection("wss://stream.wazirx.com/stream")
print(wes)
sleep(5)
wes.send(json.dumps({
"event": "subscribe",
"streams": ["!ticker@arr"]
}))
# connections = dict()
# connections["websocket"] = wes
res = json.loads(result)
# pprint.pprint(res)
stream=res['data']
for dc in stream:
if isinstance(dc,dict):
if dc['s']==tick:
data=dc
recvd=True
bestBuy=float(data['b'])
bestSell=float(data['a'])
times=data['E']
rows={}
rows['serverTime']=data['E']
rows['bestBuy']=bestBuy
rows['bestSell']=bestSell
row=pd.DataFrame()
row = row.append(rows, ignore_index=True, sort=False)
row['serverTime']= pd.to_datetime(row['serverTime'], unit='ms')
df = df.append(row, ignore_index=True, sort=False)
print("Best sell price",bestSell)
row_ls=row.values.tolist()
prices.append(row_ls[0])
try:
r=get_order(orderId)
except:
sleep(10)
r=get_order(orderId)
status=r[1]
# print(status)
if status['status']=="done":
print("complete")
# render_template("dashboard.html",action="complete")
break
elif bestSell>int(sPrice):
r=updateTrail(tick,orderId,bestSell,trail_tage,quan)
stat=r[1]
orderId=stat['id']
sPrice=bestSell
print(stat)
# return render_template("dashboard.html",stat=stat,action="display")
sleep(15)
sleep(5)
app.app_context().push()
if __name__ == '__main__':
app.run(host='localhost',port=8080, debug=True) | deysanjeeb/wazirX-trailstop | trail.py | trail.py | py | 9,134 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "config.API_KEY",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "config.SECRET_KEY",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "wazirx_sapi... |
16838118388 | from typing import List
from urllib.parse import urlparse
import pandas as pd
from pathlib import Path
from behave import Given, When, Then, Step
from csvcubeddevtools.behaviour.file import get_context_temp_dir_path
from csvcubeddevtools.helpers.file import get_test_cases_dir
from rdflib import Graph
from csvcubed.models.cube import *
from csvcubed.models.cube import (
ExistingQbAttribute,
NewQbAttribute,
NewQbConcept,
QbMultiMeasureDimension,
QbMultiUnits,
)
from csvcubed.models.validationerror import ValidationError
from csvcubed.models.cube.uristyle import URIStyle
from csvcubed.writers.qbwriter import QbWriter
from csvcubed.utils.qb.validation.cube import validate_qb_component_constraints
from csvcubed.utils.csvw import get_first_table_schema
from csvcubed.utils.pandas import read_csv
_test_case_dir = get_test_cases_dir()
def get_standard_catalog_metadata_for_name(
name: str, identifier: Optional[str] = None
) -> CatalogMetadata:
return CatalogMetadata(
name,
summary="Summary",
identifier=identifier,
description="Description",
creator_uri="https://www.gov.uk/government/organisations/office-for-national-statistics",
publisher_uri="https://www.gov.uk/government/organisations/office-for-national-statistics",
theme_uris=["http://gss-data.org.uk/def/gdp#some-test-theme"],
keywords=["Key word one", "Key word two"],
landing_page_uris=["http://example.org/landing-page"],
license_uri="http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/",
public_contact_point_uri="mailto:something@example.org",
)
_standard_data = pd.DataFrame(
{"A": ["a", "b", "c"], "D": ["e", "f", "g"], "Value": [1, 2, 3]}
)
@Given('a single-measure QbCube named "{cube_name}"')
def step_impl(context, cube_name: str):
context.cube = _get_single_measure_cube_with_name_and_id(cube_name, None)
@Given('a single-measure QbCube named "{cube_name}" with missing observation values')
def step_impl(context, cube_name: str):
cube = _get_single_measure_cube_with_name_and_id(cube_name, None)
cube.data["Value"] = [1, None, 3]
context.cube = cube
@Given(
'a single-measure QbCube named "{cube_name}" with missing observation values and `sdmxa:obsStatus` replacements'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"A": ["a", "b", "c"],
"D": ["e", "f", "g"],
"Marker": ["Suppressed", None, None],
"Value": [None, 2, 3],
}
)
columns = [
QbColumn("A", NewQbDimension.from_data(label="A", data=data["A"])),
QbColumn("D", NewQbDimension.from_data(label="D", data=data["D"])),
QbColumn(
"Marker",
NewQbAttribute.from_data(
"Marker",
data["Marker"],
parent_attribute_uri="http://purl.org/linked-data/sdmx/2009/attribute#obsStatus",
),
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with missing observation values and missing `sdmxa:obsStatus` replacements'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"A": ["a", "b", "c"],
"D": ["e", "f", "g"],
"Marker": [None, "Provisional", None],
"Value": [None, 2, 3],
}
)
columns = [
QbColumn("A", NewQbDimension.from_data(label="A", data=data["A"])),
QbColumn("D", NewQbDimension.from_data(label="D", data=data["D"])),
QbColumn(
"Marker",
NewQbAttribute.from_data(
"Marker",
data["Marker"],
parent_attribute_uri="http://purl.org/linked-data/sdmx/2009/attribute#obsStatus",
),
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a QbCube named "{cube_name}" with code-list defined in an existing CSV-W "{csvw_file_path}"'
)
def step_impl(context, cube_name: str, csvw_file_path: str):
tmp_dir = get_context_temp_dir_path(context)
csvw_path = tmp_dir / csvw_file_path
columns = [
QbColumn("A", NewQbDimension.from_data("A code list", _standard_data["A"])),
QbColumn(
"D",
NewQbDimension("D code list", code_list=NewQbCodeListInCsvW(csvw_path)),
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
csv_path, _ = get_first_table_schema(csvw_path)
code_list_data, data_loading_errors = read_csv(csv=csvw_path.parent / csv_path)
code_list_values = code_list_data["Notation"].sample(3, random_state=1)
context.data_loading_errors = data_loading_errors
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name, None),
pd.DataFrame({"A": ["a", "b", "c"], "D": code_list_values, "Value": [1, 2, 3]}),
columns,
)
@Given('a single-measure QbCube with identifier "{cube_id}" named "{cube_name}"')
def step_impl(context, cube_name: str, cube_id: str):
context.cube = _get_single_measure_cube_with_name_and_id(cube_name, cube_id)
def _get_single_measure_cube_with_name_and_id(
cube_name: str, cube_id: str, uri_style: URIStyle = URIStyle.Standard
) -> Cube:
columns = [
QbColumn("A", NewQbDimension.from_data("A code list", _standard_data["A"])),
QbColumn("D", NewQbDimension.from_data("D code list", _standard_data["D"])),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
return Cube(
get_standard_catalog_metadata_for_name(cube_name, cube_id),
_standard_data,
columns,
uri_style=uri_style,
)
@Given('a single-measure QbCube named "{cube_name}" with existing dimensions')
def step_impl(context, cube_name: str):
columns = [
QbColumn(
"A",
ExistingQbDimension("http://example.org/some/dimension/a"),
csv_column_uri_template="http://example.org/some/codelist/a",
),
QbColumn(
"D",
ExistingQbDimension("http://example.org/some/dimension/d"),
csv_column_uri_template="http://example.org/some/codelist/d",
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), _standard_data, columns
)
@Given('a single-measure QbCube named "{cube_name}" with duplicate rows')
def step_impl(context, cube_name: str):
data = pd.DataFrame({"A": ["a", "a"], "Value": [1, 1]})
columns = [
QbColumn("A", NewQbDimension.from_data("A Dimension", data["A"])),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with codes not defined in the code-list'
)
def step_impl(context, cube_name: str):
columns = [
QbColumn(
"A",
NewQbDimension(
"A code list",
code_list=NewQbCodeList(
get_standard_catalog_metadata_for_name("A code list"),
[NewQbConcept("a"), NewQbConcept("b")], # Deliberately missing "c"
),
),
),
QbColumn("D", NewQbDimension.from_data("D code list", _standard_data["D"])),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), _standard_data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with optional attribute values missing'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"Some Dimension": ["a", "b", "c"],
"Some Attribute": ["attr-a", float("nan"), "attr-c"],
"Value": [1, 2, 3],
}
)
columns = [
QbColumn(
"Some Dimension",
NewQbDimension.from_data("Some Dimension", data["Some Dimension"]),
),
QbColumn(
"Some Attribute",
NewQbAttribute.from_data("Some Attribute", data["Some Attribute"]),
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given('a multi-measure QbCube named "{cube_name}"')
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"A": ["a_height", "a_length"],
"Measure": ["height", "length"],
"Value": [1, 20],
}
)
columns = [
QbColumn("A", NewQbDimension.from_data("A Dimension", data["A"])),
QbColumn(
"Measure", QbMultiMeasureDimension.new_measures_from_data(data["Measure"])
),
QbColumn(
"Value",
QbMultiMeasureObservationValue(unit=NewQbUnit("meters")),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given('a multi-measure QbCube named "{cube_name}" with duplicate rows')
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"A": ["a_height", "a_height", "a_length"],
"Measure": ["height", "height", "length"],
"Value": [1, 1, 20],
}
)
columns = [
QbColumn("A", NewQbDimension.from_data("A Dimension", data["A"])),
QbColumn(
"Measure", QbMultiMeasureDimension.new_measures_from_data(data["Measure"])
),
QbColumn(
"Value",
QbMultiMeasureObservationValue(unit=NewQbUnit("meters")),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with new attribute values and units'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"Existing Dimension": ["a", "b", "c"],
"New Attribute": ["pending", "final", "in-review"],
"Value": [2, 2, 2],
}
)
columns = [
QbColumn(
"Existing Dimension", ExistingQbDimension("http://existing-dimension")
),
QbColumn(
"New Attribute",
NewQbAttribute.from_data("New Attribute", data["New Attribute"]),
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with one new unit extending another new unit'
)
def step_impl(context, cube_name: str):
columns = [
QbColumn(
"A",
ExistingQbDimension("http://example.org/some/dimension/a"),
csv_column_uri_template="http://example.org/some/codelist/a",
),
QbColumn(
"D",
ExistingQbDimension("http://example.org/some/dimension/d"),
csv_column_uri_template="http://example.org/some/codelist/d",
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"),
NewQbUnit(
"Some Extending Unit",
base_unit=NewQbUnit("Some Base Unit"),
base_unit_scaling_factor=1000,
qudt_quantity_kind_uri="http://some-quantity-kind",
si_base_unit_conversion_multiplier=25.123123,
),
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), _standard_data, columns
)
@Then('turtle should be written to "{file}"')
def step_impl(context, file: str):
temp_dir = get_context_temp_dir_path(context)
with open(Path(temp_dir / file), "w") as ttl_file:
ttl_file.write(context.turtle)
@When("the cube is serialised to CSV-W")
def step_impl(context):
writer = QbWriter(context.cube)
temp_dir = get_context_temp_dir_path(context)
writer.write(temp_dir)
context.csv_file_name = writer.csv_file_name
@When("the cube is serialised to CSV-W (suppressing missing uri value exceptions)")
def step_impl(context):
writer = QbWriter(context.cube, raise_missing_uri_safe_value_exceptions=False)
temp_dir = get_context_temp_dir_path(context)
writer.write(temp_dir)
@Step('the CSVqb should fail validation with "{validation_error}"')
def step_impl(context, validation_error: str):
cube: Cube = context.cube
errors = cube.validate()
errors += validate_qb_component_constraints(context.cube)
assert any([e for e in errors if validation_error in e.message]), [
e.message for e in errors
]
@Step("the CSVqb should pass all validations")
def step_impl(context):
cube: QbCube = context.cube
data_loading_errors: List[ValidationError] = (
context.data_loading_errors if hasattr(context, "data_loading_errors") else []
)
errors = cube.validate() + data_loading_errors
errors += validate_qb_component_constraints(context.cube)
assert len(errors) == 0, [e.message for e in errors]
assert (
len(data_loading_errors) == 0
), f"Errors were found in the csv: {[e.message for e in errors]}"
@Given(
'a single-measure QbCube named "{cube_name}" with "{type}" "{data_type}" attribute'
)
def step_impl(context, cube_name: str, type: str, data_type: str):
data = pd.DataFrame(
{
"A": ["uss-cerritos", "uss-titan"],
"Value": [1, 1],
"Reg": [75567, 80102],
"Appeared": ["2020-08-06", "2020-10-08"],
"First_Captain": ["William Riker", "Carol Freeman"],
}
)
dim = QbColumn("A", NewQbDimension.from_data("A Dimension", data["A"]))
val = QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
)
if data_type == "int":
if type == "new":
att = QbColumn(
"Reg",
NewQbAttributeLiteral(data_type="int", label="Reg"),
)
else:
att = QbColumn(
"Reg",
ExistingQbAttributeLiteral(
data_type="int", attribute_uri="http://some-uri"
),
)
sp1 = SuppressedCsvColumn("Appeared")
sp2 = SuppressedCsvColumn("First_Captain")
columns = [dim, val, att, sp1, sp2]
elif data_type == "date":
sp1 = SuppressedCsvColumn("Reg")
if type == "new":
att = QbColumn(
"Appeared", NewQbAttributeLiteral(data_type="date", label="Appeared")
)
else:
att = QbColumn(
"Appeared",
ExistingQbAttributeLiteral(
data_type="date", attribute_uri="http://some-uri"
),
)
sp2 = SuppressedCsvColumn("First_Captain")
columns = [dim, val, sp1, att, sp2]
elif data_type == "string":
sp1 = SuppressedCsvColumn("Reg")
sp2 = SuppressedCsvColumn("Appeared")
if type == "new":
att = QbColumn(
"First_Captain",
NewQbAttributeLiteral(data_type="string", label="First Captain"),
)
else:
att = QbColumn(
"First_Captain",
ExistingQbAttributeLiteral(
data_type="string", attribute_uri="http://some-uri"
),
)
columns = [dim, val, sp1, sp2, att]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with all new units/measures/dimensions/attributes/codelists'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"New Dimension": ["a", "b", "c"],
"New Attribute": ["university", "students", "masters"],
"Observed Value": [1, 2, 3],
}
)
columns = [
QbColumn(
"New Dimension",
NewQbDimension(
"a new codelist",
code_list=NewQbCodeList(
get_standard_catalog_metadata_for_name("a new codelist"),
[NewQbConcept("a"), NewQbConcept("b"), NewQbConcept("c")],
),
),
),
QbColumn(
"New Attribute",
NewQbAttribute.from_data("new_Qb_attribute", data["New Attribute"]),
),
QbColumn(
"Observed Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Part-time"), NewQbUnit("Num of Students")
),
),
]
cube = Cube(get_standard_catalog_metadata_for_name(cube_name), data, columns)
errors = cube.validate()
errors += validate_qb_component_constraints(cube)
assert len(errors) == 0, [e.message for e in errors]
context.cube = cube
@Given(
'a multi-measure QbCube named "{cube_name}" with all new units/measures/dimensions/attributes/codelists'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"New Dimension": ["a", "b", "c"],
"New Attribute": ["university", "students", "masters"],
"Observed Value": [1, 2, 3],
"Measure": ["part-time", "full-time", "flex-time"],
}
)
columns = [
QbColumn(
"New Dimension",
NewQbDimension(
"New Dimension",
code_list=NewQbCodeList(
get_standard_catalog_metadata_for_name("a new codelist"),
[NewQbConcept("a"), NewQbConcept("b"), NewQbConcept("c")],
),
),
),
QbColumn(
"New Attribute",
NewQbAttribute.from_data("New Attribute", data["New Attribute"]),
),
QbColumn(
"Observed Value",
QbMultiMeasureObservationValue(unit=NewQbUnit("Num of students")),
),
QbColumn(
"Measure", QbMultiMeasureDimension.new_measures_from_data(data["Measure"])
),
]
cube = Cube(get_standard_catalog_metadata_for_name(cube_name), data, columns)
errors = cube.validate()
errors += validate_qb_component_constraints(cube)
assert len(errors) == 0, [e.message for e in errors]
context.cube = cube
@Given(
'a single measure QbCube named "{cube_name}" with existing units/measure/dimensions/attribute/codelists'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"Existing Dimension": ["a", "b", "c"],
"New Dimension": ["d", "e", "f"],
"Existing Attribute": ["university", "students", "masters"],
"Observed Value": [1, 2, 3],
}
)
columns = [
QbColumn(
csv_column_title="Existing Dimension",
structural_definition=ExistingQbDimension("http://existing/dimension"),
csv_column_uri_template="http://existing/dimension/code-list/{+existing_dimension}",
),
QbColumn(
csv_column_title="New Dimension",
structural_definition=NewQbDimension(
label="existing codelist",
code_list=ExistingQbCodeList(
concept_scheme_uri="http://existing/concept/scheme/uri"
),
),
),
QbColumn(
csv_column_title="Existing Attribute",
structural_definition=ExistingQbAttribute("http://existing/attribute"),
csv_column_uri_template="http://existing/attribute/{+existing_attribute}",
),
QbColumn(
csv_column_title="Observed Value",
structural_definition=QbSingleMeasureObservationValue(
ExistingQbMeasure("http://existing/measure"),
ExistingQbUnit("http://exisiting/unit"),
),
),
]
cube = Cube(get_standard_catalog_metadata_for_name(cube_name), data, columns)
errors = cube.validate()
errors += validate_qb_component_constraints(cube)
assert len(errors) == 0, [e.message for e in errors]
context.cube = cube
@Given(
'a multi measure QbCube named "{cube_name}" with existing units/measure/dimensions/attribute/codelists'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"Existing Dimension": ["a", "b", "c"],
"New Dimension": ["d", "e", "f"],
"Existing Attribute": ["university", "students", "masters"],
"Observed Value": [1, 2, 3],
"Units": ["gbp", "count", "count"],
"Existing Measures": ["part-time", "full-time", "flex-time"],
}
)
columns = [
QbColumn(
"Existing Dimension",
ExistingQbDimension("http://existing/dimension"),
csv_column_uri_template="http://existing/dimension/code-list/{+existing_dimension}",
),
QbColumn(
csv_column_title="New Dimension",
structural_definition=NewQbDimension(
label="existing codelist",
code_list=ExistingQbCodeList(
concept_scheme_uri="http://gss-data.org.uk/def/concept-scheme/some-existing-codelist"
),
),
),
QbColumn(
csv_column_title="Existing Attribute",
structural_definition=ExistingQbAttribute("http://existing/attribute"),
csv_column_uri_template="http://existing/attribute/{+existing_attribute}",
),
QbColumn(
"Observed Value",
QbMultiMeasureObservationValue("number"),
),
QbColumn(
"Units",
QbMultiUnits(
[
ExistingQbUnit("http://existing/unit/gbp"),
ExistingQbUnit("http://existing/unit/count"),
]
),
csv_column_uri_template="http://existing/unit/{+units}",
),
QbColumn(
"Existing Measures",
QbMultiMeasureDimension(
[
ExistingQbMeasure("http://existing/measure/part-time"),
ExistingQbMeasure("http://existing/measure/full-time"),
ExistingQbMeasure("http://existing/measure/flex-time"),
]
),
csv_column_uri_template="http://existing/measure/{+existing_measures}",
),
]
cube = Cube(get_standard_catalog_metadata_for_name(cube_name), data, columns)
errors = cube.validate()
errors += validate_qb_component_constraints(cube)
assert len(errors) == 0, [e.message for e in errors]
context.cube = cube
@Given('a QbCube named "{cube_name}" which references a legacy composite code-list')
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"Location": [
"http://data.europa.eu/nuts/code/UKC",
"http://data.europa.eu/nuts/code/UKL",
"http://data.europa.eu/nuts/code/UKD",
],
"Observed Value": [1, 2, 3],
}
)
columns = [
QbColumn(
"Location",
NewQbDimension(
"Location",
code_list=NewQbCodeListInCsvW(
_test_case_dir
/ "readers"
/ "skoscodelistreader"
/ "location.csv-metadata.json"
),
),
),
QbColumn(
"Observed Value",
QbSingleMeasureObservationValue(
unit=NewQbUnit("Num of students"), measure=NewQbMeasure("Total")
),
),
]
cube = Cube(get_standard_catalog_metadata_for_name(cube_name), data, columns)
errors = cube.validate()
errors += validate_qb_component_constraints(cube)
assert len(errors) == 0, [e.message for e in errors]
context.cube = cube
@Then("some additional turtle is appended to the resulting RDF")
def step_impl(context):
rdf_to_add = context.text.strip()
context.turtle += rdf_to_add
@Then("the cube's metadata should contain URLs with file endings")
def step_impl(context):
temp_dir = get_context_temp_dir_path(context)
assertURIStyle(URIStyle.Standard, temp_dir, context.csv_file_name)
@Then("the cube's metadata should contain URLs without file endings")
def step_impl(context):
temp_dir = get_context_temp_dir_path(context)
assertURIStyle(URIStyle.WithoutFileExtensions, temp_dir, context.csv_file_name)
@Given(
'a single-measure QbCube named "{cube_name}" configured with "{uri_style}" URI style'
)
def step_impl(context, cube_name: str, uri_style: str):
context.cube = _get_single_measure_cube_with_name_and_id(
cube_name, None, URIStyle[uri_style]
)
def assertURIStyle(uri_style: URIStyle, temp_dir: Path, csv_file_name: str):
baseUri = "file://relative-uris/"
metadataFilePath = temp_dir.joinpath(f"{csv_file_name}-metadata.json")
g = Graph()
g.parse(metadataFilePath, publicID=baseUri)
for (s, p, o) in g:
if s.startswith(baseUri):
assert_uri_style_for_uri(uri_style, s, (s, p, o))
if p.startswith(baseUri):
assert_uri_style_for_uri(uri_style, p, (s, p, o))
def assert_uri_style_for_uri(uri_style: URIStyle, uri: str, node):
path = urlparse(uri).path
if uri_style == URIStyle.WithoutFileExtensions:
assert not path.endswith(
".csv"
), f"expected {node} to end without a CSV file extension"
else:
assert path.endswith(".csv") or path.endswith(
".json"
), f"expected {node} to end with .csv or .json"
| GDonRanasinghe/csvcubed-models-test-5 | csvcubed/tests/behaviour/steps/qbwriter.py | qbwriter.py | py | 27,298 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csvcubeddevtools.helpers.file.get_test_cases_dir",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "behave.Given",
"line_number": 52,
"usage_type": "call"
},
{
"ap... |
29965694924 | import pyowm
import telebot
owm = pyowm.OWM('6d00d1d4e704068d70191bad2673e0cc', language = "ru")
bot = telebot.TeleBot( "1031233548:AAFfUXO0e8bDuOTWaQbHQCCuA_YJwRbqQlY" )
@bot.message_handler(content_types=['text'])
def send_echo(message):
observation = owm.weather_at_place( message.text )
w = observation.get_weather()
temp = w.get_temperature('celsius')["temp"]
answer = " В городе " + message.text + " сейчас " + w.get_detailed_status() + "\n"
answer += " Температура сейчас в районе " + str(temp) + "\n\n"
if temp < 0:
answer += " Очень холодно "
elif temp < 10:
answer += " Одевайся теплее"
elif temp < 20:
answer +=" Прохладно "
else:
answer += " Можно и в шортиках "
bot.send_message(message.chat.id, answer)
bot.polling( none_stop = True) | Neynara/witherin | Bot.py | Bot.py | py | 886 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "pyowm.OWM",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "telebot.TeleBot",
"line_number": 5,
"usage_type": "call"
}
] |
34839501596 | import numpy as np
import torch
import torchvision
import PIL
import os
def save_video(img,outdir, drange,fname="video0.mp4", normalize=True):
_, C ,T ,H ,W = img.shape
# print (f'Saving Video with {T} frames, img shape {H}, {W}')
img = img.cpu().xdetach().numpy()
if normalize:
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
# gw, gh = grid_size
# _N, C, T, H, W = img.shape
# img = img.reshape(gh, gw, C, T, H, W)
img = np.squeeze(img)
img = img.transpose(1,2,3,0)
# img = img.reshape(T, H, W, C)
# assert C in [3]
if C == 3:
torchvision.io.write_video(os.path.join(outdir,fname), torch.from_numpy(img), fps=8)
# imgs = [PIL.Image.fromarray(img, 'RGB') for i in range(len(img))]
# imgs[0].save(fname, quality=95, save_all=True, append_images=imgs[1:], duration=100, loop=0)
| interiit-Team10/HP_BO_DIGAN | src/scripts/__init__.py | __init__.py | py | 979 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.asarray",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.rint",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_n... |
22360354761 | from dateutil.relativedelta import relativedelta
from odoo.tests import common
from odoo import fields
class TestContractPriceRevision(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestContractPriceRevision, cls).setUpClass()
partner = cls.env['res.partner'].create({
'name': 'Partner test',
})
product = cls.env['product.product'].create({
'name': 'Test Product',
})
cls.contract = cls.env['account.analytic.account'].create({
'name': 'Contract test',
'partner_id': partner.id,
'date_start': fields.Date.today(),
'recurring_next_date': fields.Date.to_string(
fields.date.today() + relativedelta(days=7)),
'recurring_rule_type': 'monthly',
'recurring_invoice_line_ids': [(0, 0, {
'product_id': product.id,
'quantity': 1.0,
'uom_id': product.uom_id.id,
'name': product.name,
'price_unit': 33.0,
'automatic_price': True,
}), (0, 0, {
'product_id': product.id,
'quantity': 1.0,
'uom_id': product.uom_id.id,
'name': product.name,
'price_unit': 25.0,
'automatic_price': False,
})]
})
def execute_wizard(self):
wizard = self.env['create.revision.line.wizard'].create({
'date_start': fields.Date.today(),
'date_end': fields.Date.to_string(
fields.date.today() + relativedelta(years=1)),
'variation_percent': 100.0,
})
wizard.with_context(
{'active_ids': [self.contract.id]}).action_apply()
def test_contract_price_revision_wizard(self):
self.assertEqual(len(self.contract.recurring_invoice_line_ids.ids), 2)
self.execute_wizard()
self.assertEqual(len(self.contract.recurring_invoice_line_ids.ids), 3)
lines = self.contract.mapped('recurring_invoice_line_ids').filtered(
lambda x: x.price_unit == 50.0)
self.assertEqual(len(lines), 1)
def test_contract_price_revision_invoicing(self):
self.execute_wizard()
self.contract.recurring_create_invoice()
invoices = self.env['account.invoice'].search([
('contract_id', '=', self.contract.id)])
self.assertEqual(len(invoices), 1)
lines = invoices.mapped('invoice_line_ids')
self.assertEqual(len(lines), 2)
lines = lines.filtered(lambda x: x.price_unit == 50.0)
self.assertEqual(len(lines), 1)
| detian08/bsp_addons | contract-11.0/contract_price_revision/tests/test_contract_price_revision.py | test_contract_price_revision.py | py | 2,670 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "odoo.tests.common.SavepointCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "odoo.tests.common",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Date.today",
"line_number": 20,
"usage_type": "call"
},
{
"api_n... |
2500846027 | # author: Tran Quang Loc (darkkcyan)
# editorial: https://codeforces.com/blog/entry/8166
# Note: I switched to python for this problem because I want my check function to always use integer number
# I tried to solve this problem using C++ and got overflow even with long long number
# (and really, never change it to unsigned long long because there are subtractions in the checking equation).
from collections import deque
class line:
def __init__(self, k, b):
self.k = k
self.b = b
def get(self, x):
return self.k * x + self.b
def check(l1, l2, nl):
return (nl.b - l2.b) * (l1.k - l2.k) - (nl.k - l2.k) * (l1.b - l2.b) <= 0
n = int(input())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
ans = 0 # we don't even need the entire dp array, because the deque store the value of all previous ones.
# this variable contains the current computed dp value
hull = deque()
hull.append(line(b[0], 0))
for i in range(1, n):
arg = a[i]
while len(hull) > 1 and hull[0].get(arg) >= hull[1].get(arg):
hull.popleft()
ans = hull[0].get(arg)
nl = line(b[i], ans)
while len(hull) > 1 and check(hull[-2], hull[-1], nl):
hull.pop()
hull.append(nl)
print(ans)
| quangloc99/CompetitiveProgramming | Codeforces/CF319-D1-C.py | CF319-D1-C.py | py | 1,270 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 26,
"usage_type": "call"
}
] |
306333387 | from fastapi import status, HTTPException, Depends, APIRouter
from database import SessionLocal
import models, schemas, utils
router = APIRouter(
prefix="/merchants",
tags=['Merchants']
)
@router.post("/", status_code=status.HTTP_201_CREATED, response_model=schemas.MerchantResponse)
def create_merchant(merchant: schemas.MerchantCreate):
# hash the password - user.password
merchant.password = utils.hash(merchant.password)
new_merchant = models.Merchant(**merchant.dict())
SessionLocal.add(new_merchant)
SessionLocal.commit()
SessionLocal.refresh(new_merchant)
return new_merchant
@router.get("/{id}", response_model=schemas.MerchantResponse)
def get_merchant(id: int):
merchant = SessionLocal.query(models.Merchant).filter(models.Merchant.id == id).first()
if not merchant:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Merchant with id:{id} was not found")
return merchant
@router.put("/{id}",response_model=schemas.MerchantResponse)
def update_merchant(id: int, updated_merchant: schemas.MerchantCreate):
merchant_query = SessionLocal.query(models.Merchant).filter(models.Merchant.id == id)
merchant = merchant_query.first()
if merchant == None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Merchant with id:{id} does not exit")
merchant_query.update(updated_merchant.dict(),synchronize_session=False)
SessionLocal.commit()
return merchant_query.first() | Roshankattel/RFID2 | rfiddemo/routers/merchants.py | merchants.py | py | 1,553 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "schemas.MerchantCreate",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "utils.hash",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.Mercha... |
23248264647 | # Usage:
# python advertising_email.py username email_text.txt csv_of_emails.csv attachment1 attachment2 ...
import smtplib
from getpass import getpass
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import sys
import csv
import time
SMTP_SERVER = "outgoing.mit.edu"
SMTP_SERVER_PORT = '465'
REPLY_TO = "physgaap@mit.edu"
email_account_name = sys.argv[1]
with open(sys.argv[2], 'r') as file:
message_content = file.read()
message_subject = "MIT Physics Graduate Application Assistance Program 2021"
message_cc = ""
message_bcc = "npaladin@mit.edu"
attachments = []
for arg in sys.argv[4:]:
payload = MIMEBase('application', 'octate-stream')
payload.set_payload(open(arg, 'rb').read())
encoders.encode_base64(payload)
payload.add_header('Content-Decomposition', 'attachment', filename=arg)
attachments.append(payload)
server = smtplib.SMTP_SSL('%s:%s' % (SMTP_SERVER, SMTP_SERVER_PORT))
server.login(email_account_name, getpass(prompt="Email Password: "))
with open(sys.argv[3],'r') as csv_file:
data = csv.reader(csv_file)
next(data)
for row in data:
uni_name = row[0]
contact_name = row[2]
custom_message_subject = message_subject.replace('{{University}}', uni_name).replace('{{university}}', uni_name).replace('{{Recipient}}', contact_name).replace('{{recipient}}', contact_name)
custom_message_content = message_content.replace('{{University}}', uni_name).replace('{{university}}', uni_name).replace('{{Recipient}}', contact_name).replace('{{recipient}}', contact_name)
message_to = row[1]
message_from = "%s@mit.edu" % email_account_name
message_to_all = message_to.split(",") + message_cc.split(",") + message_bcc.split(",")
message = MIMEMultipart()
message.attach(MIMEText(custom_message_content, 'html'))
message['From'] = message_from
message['Reply-To'] = REPLY_TO
message['To'] = message_to
message['Cc'] = message_cc
message['Subject'] = custom_message_subject
for attachment in attachments:
message.attach(attachment)
server.send_message(message, message_from, message_to_all)
time.sleep(2)
server.quit() | ngpaladi/PhysGAAP-Tools | mailer/advertising_email.py | advertising_email.py | py | 2,321 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "email.mime.base.MIMEBase",
... |
17657890511 | import time
import speech_recognition as sr
import pyttsx3
engine = pyttsx3.init()
r = sr.Recognizer()
voices = engine.getProperty('voices')
# to check the voices available in the system
'''for voice in voices:
print("Voice:")
print("ID: %s" %voice.id)
print("Name: %s" %voice.name)
print("Age: %s" %voice.age)
print("Gender: %s" %voice.gender)
print("Languages Known: %s" %voice.languages) '''
# female
engine.setProperty('voice', "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0")
engine.say("Hello, I am Zira.")
engine.runAndWait()
# male
engine.setProperty('voice', "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_DAVID_11.0")
engine.say("Hello,I am David.")
engine.runAndWait()
# Voice menu
while(1):
engine.say("Choose the language you want to speak in")
engine.runAndWait()
time.sleep(0.2)
print("1. English")
engine.say("1. English")
engine.runAndWait()
time.sleep(0.2)
print("2. Hindi")
engine.say("2. Hindi")
engine.runAndWait()
time.sleep(0.2)
print("3. Kannada")
engine.say("3. Kannada")
engine.runAndWait()
time.sleep(0.2)
print("4. Bengali")
engine.say("4. Bengali")
engine.runAndWait()
time.sleep(0.2)
print("5. Malayalam")
engine.say("5. Malayalam")
engine.runAndWait()
time.sleep(0.2)
print("6. Marathi")
engine.say("6. Marathi")
engine.runAndWait()
time.sleep(0.2)
print("7. Urdu")
engine.say("7. Urdu")
engine.runAndWait()
time.sleep(0.2)
print("8. Others")
engine.say("8. Others")
engine.runAndWait()
time.sleep(0.2)
n = int(input("\nEnter your choice:"))
lang = 'en'
if n == 1:
lang = 'en'
elif n == 2:
lang = 'hi-IN'
elif n == 3:
lang = 'kn-IN'
elif n == 4:
lang = 'bn-IN'
elif n == 5:
lang = 'ml-IN'
elif n == 6:
lang = 'mr-IN'
elif n == 7:
lang = 'ur'
elif n == 8:
lang = input("Enter the google language code of the language you want to see the output in: ")
elif n == 0:
exit(0)
with sr.Microphone() as source:
engine.say("Mic testing..")
engine.runAndWait()
audio = r.adjust_for_ambient_noise(source)
print("Say something")
audio = r.listen(source)
engine.say("Time is over. Thanks.")
engine.runAndWait()
try:
print("You said: ' " + r.recognize_google(audio, language=lang) + "'")
time.sleep(5)
except LookupError:
engine.say("Could not understand audio. Do you want to try again?")
engine.runAndWait()
engine.say("Do you want to continue?")
engine.runAndWait()
y = int(input("Enter 0 to quit"))
if y == 0:
exit(0)
engine.runAndWait() | prakritisharma/Voice-recognition | voice_recognition.py | voice_recognition.py | py | 2,928 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyttsx3.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Recognizer",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
24247317201 | import tkinter
import customtkinter
import random
cards = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 11]
def draw():
global player_score
global enemy_score
win_label.configure(text=" ")
randint = random.randint(0, len(cards) - 1)
player_cards.append(cards[randint])
player_score += int(cards[randint])
if enemy_score <= 16:
randint = random.randint(0, len(cards) - 1)
enemy_cards.append(cards[randint])
enemy_score += int(cards[randint])
if player_score > 21 or enemy_score > 21:
end_game()
player_score_text = "Your score: " + str(player_score)
enemy_score_text = "Enemy score: " + str(enemy_score)
player_card_label.configure(text=player_score_text)
enemy_card_label.configure(text=enemy_score_text)
def end_game():
global player_score
global enemy_score
global player_cards
global enemy_cards
if player_score > 21:
win_label.configure(text="You Lose!")
elif player_score < enemy_score:
win_label.configure(text="You Lose!")
elif enemy_score > 21:
win_label.configure(text="You Win!")
elif player_score > enemy_score:
win_label.configure(text="You Win!")
else:
win_label.configure(text="Tie!")
player_score = 0
enemy_score = 0
player_cards = []
enemy_cards = []
if __name__ == '__main__':
player_score = 0
player_cards = []
enemy_score = 0
enemy_cards = []
root_tk = customtkinter.CTk()
root_tk.geometry("400x300")
root_tk.title("Blackjack")
customtkinter.set_appearance_mode("dark")
customtkinter.set_default_color_theme("dark-blue")
draw_button = customtkinter.CTkButton(master=root_tk, text="Draw Card", command=draw)
draw_button.place(relx=0.3, rely=0.8, anchor=tkinter.CENTER)
stand_button = customtkinter.CTkButton(master=root_tk, text="Stand", command=end_game)
stand_button.place(relx=0.7, rely=0.8, anchor=tkinter.CENTER)
player_card_label = customtkinter.CTkLabel(master=root_tk, text="Blank")
player_card_label.place(relx=0.5, rely=0.6, anchor=tkinter.CENTER)
enemy_card_label = customtkinter.CTkLabel(master=root_tk, text="Blank")
enemy_card_label.place(relx=0.5, rely=0.25, anchor=tkinter.CENTER)
win_label = customtkinter.CTkLabel(master=root_tk, text=" ")
win_label.place(relx=0.5, rely=0.35, anchor=tkinter.CENTER)
root_tk.mainloop()
| anarkitty8/gui-blackjack | blackjack_gui.py | blackjack_gui.py | py | 2,420 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.randint",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTk",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "customtkinter.set_app... |
19499854601 | # -*- coding: utf-8 -*-
import pytest
from mdye_leetcode.solution_28 import Solution
# makes a Solution object b/c that's how leetcode rolls
@pytest.fixture(scope="module")
def sol():
yield Solution()
def test_solution_28_basic(sol: Solution):
assert sol.strStr("mississippi", "issip") == 4
assert sol.strStr("foogzon", "zon") == 4
assert sol.strStr("sadbutsad", "sad") == 0
assert sol.strStr("leetcode", "leeto") == -1
# vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4
| michaeldye/mdye-python-samples | src/mdye_leetcode/test/test_solution_28.py | test_solution_28.py | py | 513 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "mdye_leetcode.solution_28.Solution",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mdye_leetcode.solution_28.Solution",
"line_number": 14,
"usage_type": "name"
}
] |
39425074538 | import discord
class DiscordClient(discord.Client):
def __init__(self, channel: int, players: list):
self.channel: int = channel
self.players: list = players
super().__init__()
async def on_ready(self):
print(f"{self.user} is connected!")
channel = self.get_channel(self.channel)
lines = [
"Hello! Ready to bring you RaiderIO information 🎉",
"I'll only list the following characters: If I'm missing any, please add them here: TBD",
"",
"\n".join(self.players),
]
await channel.send("\n".join(lines))
async def on_member_join(self, member):
print(member)
async def on_message(self, message):
if message.author == self.user:
return
if str(message.channel.id) != str(self.channel):
return
if message.content.lower().startswith("!rio rank"):
channel = self.get_channel(self.channel)
await channel.send("TBD, but Sylphyl Rocks!!! sozz Krugdir")
return
| kevinrobayna/rio_discord_bot | rio_discord_bot/discord_client.py | discord_client.py | py | 1,073 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "discord.Client",
"line_number": 4,
"usage_type": "attribute"
}
] |
28359703116 | import csv
import DBN
import matplotlib.pyplot as plt
def getData(inp="../ABP_data_11traces_1min/dataset7.txt"):
f = file(inp)
lines = f.readlines()
data = (map(float,l.split(" ")[:3]) for l in lines)
# end = lines.index('\n')
# obs = lines[1:end]
# data = map(lambda x: tuple(map(float,x.split(','))),obs)
return data
def main():
data = list(getData())
bayesNet = DBN.DBN()
dataOut = []
count = 0
for each in data:
# for i in range(1000):
print("timestep: " + str(count) + " Observation: " + str(each))
# if (bayesNet.observe(each) != False):
bayesNet.observe(each)
bayesNet.elapseTime()
dataOut.append(bayesNet.getStats())
count += 1
DiaObserved = [d["dia_bp"][0] for d in dataOut]
MeanObserved = [d["mean_bp"][0] for d in dataOut]
SysObserved = [d["sys_bp"][0] for d in dataOut]
BagPressure = [d["bag_pressure"][0] for d in dataOut]
DiaObservedErr = [d["dia_bp"][1] for d in dataOut]
MeanObservedErr = [d["mean_bp"][1] for d in dataOut]
SysObservedErr = [d["sys_bp"][1] for d in dataOut]
BagPressureErr = [d["bag_pressure"][1] for d in dataOut]
DiaData = map(lambda x: x[2], data)
MeanData = map(lambda x: x[0], data)
SysData = map(lambda x: x[1], data)
l = list(range(31))
plt.plot(l,DiaData)
plt.plot(l,DiaObserved)
plt.fill_between(l,list(x[0] - x[1] for x in zip(DiaObserved,DiaObservedErr)),list(x[0] + x[1] for x in zip(DiaObserved,DiaObservedErr)),interpolate=True)
plt.plot(l,MeanData)
plt.plot(l,MeanObserved)
plt.fill_between(l,list(x[0] - x[1] for x in zip(MeanObserved,MeanObservedErr)),list(x[0] + x[1] for x in zip(MeanObserved,MeanObservedErr)),interpolate=True)
plt.plot(l,SysData)
plt.plot(l,SysObserved)
plt.fill_between(l,list(x[0] - x[1] for x in zip(SysObserved,SysObservedErr)),list(x[0] + x[1] for x in zip(SysObserved,SysObservedErr)),interpolate=True)
# plt.plot(l,BagPressure)
# plt.fill_between(l,list(x[0] - x[1] for x in zip(BagPressure,BagPressureErr)),list(x[0] + x[1] for x in zip(BagPressure,BagPressureErr)),interpolate=True)
plt.show()
# return dataOut
if __name__ == "__main__":
main()
| romiphadte/ICU-Artifact-Detection-via-Bayesian-Inference | ABP_DBN/run.py | run.py | py | 2,090 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "DBN.DBN",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pl... |
16304395489 | import cv2
import numpy as np
import apriltag
import collections
apriltag_detect_error_thres = 0.07
def draw_pose(overlay, camera_params, tag_size, pose, z_sign=1, color=(0, 255, 0)):
opoints = np.array([
-1, -1, 0,
1, -1, 0,
1, 1, 0,
-1, 1, 0,
-1, -1, -2 * z_sign,
1, -1, -2 * z_sign,
1, 1, -2 * z_sign,
-1, 1, -2 * z_sign,
]).reshape(-1, 1, 3) * 0.5 * tag_size
edges = np.array([
0, 1,
1, 2,
2, 3,
3, 0,
0, 4,
1, 5,
2, 6,
3, 7,
4, 5,
5, 6,
6, 7,
7, 4
]).reshape(-1, 2)
fx, fy, cx, cy = camera_params
K = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape(3, 3)
rvec, _ = cv2.Rodrigues(pose[:3, :3])
tvec = pose[:3, 3]
dcoeffs = np.zeros(5)
ipoints, _ = cv2.projectPoints(opoints, rvec, tvec, K, dcoeffs)
ipoints = np.round(ipoints).astype(int)
ipoints = [tuple(pt) for pt in ipoints.reshape(-1, 2)]
for i, j in edges:
cv2.line(overlay, ipoints[i], ipoints[j], color, 1, 16)
def draw_pose_axes(overlay, camera_params, tag_size, pose, center):
fx, fy, cx, cy = camera_params
K = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape(3, 3)
rvec, _ = cv2.Rodrigues(pose[:3, :3])
tvec = pose[:3, 3]
dcoeffs = np.zeros(5)
opoints = np.float32([[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]).reshape(-1, 3) * tag_size
ipoints, _ = cv2.projectPoints(opoints, rvec, tvec, K, dcoeffs)
ipoints = np.round(ipoints).astype(int)
center = np.round(center).astype(int)
center = tuple(center.ravel())
cv2.line(overlay, center, tuple(ipoints[0].ravel()), (0, 0, 255), 2)
cv2.line(overlay, center, tuple(ipoints[1].ravel()), (0, 255, 0), 2)
cv2.line(overlay, center, tuple(ipoints[2].ravel()), (255, 0, 0), 2)
def annotate_detection(overlay, detection, center):
text = str(detection.tag_id)
font = cv2.FONT_HERSHEY_SIMPLEX
tag_size_px = np.sqrt((detection.corners[1][0] - detection.corners[0][0]) ** 2 + \
(detection.corners[1][1] - detection.corners[0][1]) ** 2)
font_size = tag_size_px / 22
text_size = cv2.getTextSize(text, font, font_size, 2)[0]
tag_center = [detection.center[0], detection.center[1]]
text_x = int(tag_center[0] - text_size[0] / 2)
text_y = int(tag_center[1] + text_size[1] / 2)
cv2.putText(overlay, text, (text_x, text_y), font, font_size, (0, 255, 255), 2)
def detect_april_tag(orig, camera_params, tag_size, visualize=False, save_path=None, verbose=False):
if len(orig.shape) == 3:
gray = cv2.cvtColor(orig, cv2.COLOR_RGB2GRAY)
detector = apriltag.Detector()
detections, dimg = detector.detect(gray, return_image=True)
num_detections = len(detections)
if verbose:
print(f'Detected {num_detections} tags')
if num_detections == 0:
overlay = orig
elif len(orig.shape) == 3:
overlay = orig // 2 + dimg[:, :, None] // 2
else:
overlay = orig // 2 + dimg // 2
poses = []
for i, detection in enumerate(detections):
if verbose:
print()
print('Detection {} of {}:'.format(i + 1, num_detections))
print(detection.tostring(indent=2))
if camera_params is not None:
pose, e0, ef = detector.detection_pose(detection, camera_params, tag_size)
poses.append((detection.tag_id, pose, ef))
draw_pose(overlay, camera_params, tag_size, pose)
draw_pose_axes(overlay, camera_params, tag_size, pose, detection.center)
annotate_detection(overlay, detection, tag_size)
if verbose:
print(detection.tostring(collections.OrderedDict([('Pose', pose),
('InitError', e0), ('FinalError', ef)]), indent=2))
if visualize:
cv2.imshow('apriltag', overlay)
while cv2.waitKey(5) < 0: # Press any key to load subsequent image
continue
cv2.destroyAllWindows()
if save_path is not None:
cv2.imwrite(save_path, overlay)
return poses, overlay
if __name__ == '__main__':
imagepath = '/home/gdk/Documents/data/1652826411/827312071624/000000_color.png'
camera_params = (765.00, 764.18, 393.72, 304.66)
tag_size = 0.06
detect_april_tag(imagepath, camera_params, tag_size, visualize=True, save_path=None, verbose=True)
| dkguo/Pushing-Imitation | apriltag_detection.py | apriltag_detection.py | py | 4,540 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.Rodrigues",
"line_number"... |
16194751087 | import urllib
import json
import pandas as pd
from pandas.io.json import json_normalize
from rdflib import URIRef, BNode, Literal, Graph
from rdflib import Namespace
from rdflib.namespace import RDF, FOAF, RDFS, XSD
from datetime import datetime
#api key = 57ab2bbab8dda80e00969c4ea12d6debcaddd956 for jsdeux api
#let's create RDF in TURTLE------------------------------------
# namesoaces we will use
ex = Namespace('http://www.semweb.com/2001-schema#')
mobVoc = Namespace('http://schema.mobivoc.org/')
geoNames = Namespace('http://www.geonames.org/ontology#')
addr = Namespace('http://schemas.tails.com/2005#adresss/schema#')
geo = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
vcard = Namespace('http://www.w3.org/2006/vcard/ns#')
stPty = Namespace('http://www.semweb.org/2006/BycicleStation/property#')
#create defaultgraph
g = Graph()
cities = ['valence', 'marseille', 'lyon', 'nantes', 'toulouse']
for city in cities:
#request ti api
url = urllib.request.urlopen('https://api.jcdecaux.com/vls/v1/stations?contract='+str(city)+'&apiKey=57ab2bbab8dda80e00969c4ea12d6debcaddd956')
#loaded data
data = json.loads(url.read().decode(url.info().get_param('charset') or 'utf-8'))
#parse loaded and generate rdf turtle
for i in range(len(data)):
URIReff = URIRef('http://www.semweb.com/URIRef/'+data[i]['contract_name']+'/'+str(data[i]['number']))
name = Literal(data[i]['name'], datatype=XSD.string)
city = Literal(data[i]['contract_name'], lang='fr')
address = Literal(data[i]['address'], lang="fr")
lat = Literal(data[i]['position']['lat'], datatype = XSD.decimal)
lon = Literal(data[i]['position']['lng'], datatype = XSD.decimal)
avaibility = BNode()
avail_bikes = Literal(data[i]['available_bikes'], datatype = XSD.integer)
total_bikes = Literal(data[i]['bike_stands'], datatype = XSD.integer)
banking = Literal(data[i]['banking'], datatype = XSD.boolean)
date = Literal("12-09-2019T13:05", datatype = XSD.date)
status = Literal(data[i]['status'], datatype = XSD.string)
last_update = Literal(datetime.fromtimestamp(data[i]['last_update']/1000).strftime('%Y-%m-%dT%I:%M:%S'), datatype = XSD.dateTime)
#here name space manager.
g.namespace_manager.bind('geo', geo, override=False)
g.namespace_manager.bind('vcard', vcard, override=False)
g.namespace_manager.bind('geoNames', geoNames, override=False)
g.namespace_manager.bind('addr', addr, override=False)
g.namespace_manager.bind('mobVoc', mobVoc, override=False)
g.namespace_manager.bind('ex', ex, override=False)
g.namespace_manager.bind('stPty', stPty, override=False)
#adding prepared static data to graph
g.add((URIReff, RDF.type, mobVoc.BikeParkingStation))
g.add((URIReff, RDFS.label, name))
g.add((URIReff, addr.streetAdress, address))
g.add((URIReff, vcard.inCity, city))
g.add((URIReff, geo.lat, lat))
g.add((URIReff, geo.lon, lon))
#adding dynamic prepared data to graph(blank node)
g.add((URIReff, ex.hasAvaibility, avaibility))
g.add((avaibility, RDF.type, mobVoc.Avaibility))
g.add((avaibility, stPty.avBicyce, avail_bikes))
g.add((avaibility, stPty.totBicycle, total_bikes))
g.add((avaibility, stPty.paymentCard, banking))
g.add((avaibility, stPty.status, status))
g.add((avaibility, stPty.lastUpdate, last_update))
g.serialize(destination='byke_data.ttl',format="turtle")
print('byke_data.ttl generated')
#-----------------here i found some data about allowed terasse in toulouse. It is very poor data. I need some time to improve
with open('terrasses-autorisees-ville-de-toulouse.geojson') as f:
data = json.load(f)
ex = Namespace('http://www.semweb.com/2001-schema#')
tur = Namespace('http://schema.tur.org/')
addr = Namespace('http://schemas.tails.com/2005#adresss/schema#')
geo = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
vcard = Namespace('http://www.w3.org/2006/vcard/ns#')
g = Graph()
from urllib.parse import quote
for i in range(len(data[0]['features'])):
try:
URIReff = URIRef(quote('<http://semweb.com/get/'+data[0]['features'][i]['properties']['code_int']['id']+'>'))
except KeyError:
continue
city = Literal(data[0]['features'][i]['properties']['commune'], lang='fr')
domain_activite = (data[0]['features'][i]['properties']['domaine_activite'])
address = Literal(data[0]['features'][i]['properties']['nom_voie'], lang="fr")
try:
nature_activite = Literal(data[0]['features'][i]['properties']['nature_activite'], lang='fr')
except KeyError:
continue
lat = Literal(data[0]['features'][i]['properties']['x'], datatype = XSD.decimal)
lon = Literal(data[0]['features'][i]['properties']['y'], datatype = XSD.decimal)
g.namespace_manager.bind('geo', geo, override=False)
g.namespace_manager.bind('vcard', vcard, override=False)
g.namespace_manager.bind('addr', addr, override=False)
g.namespace_manager.bind('tur', tur, override=False)
g.namespace_manager.bind('ex', ex, override=False)
g.add((URIReff, RDF.type, tur.Restraunte))
g.add((URIReff, addr.streetAdress, address))
g.add((URIReff, vcard.inCity, city))
g.add((URIReff, geo.lat, lat))
g.add((URIReff, geo.lon, lon))
g.add((URIReff, RDFS.comment, nature_activite))
#anyway we are adding terasse data to triplestore
g.serialize(destination='terasse.ttl',format="turtle")
| zhantileuov/rdf_project | generate.py | generate.py | py | 5,906 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rdflib.Namespace",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "rdflib.Namespace",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "rdflib.Namespace",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "rdflib.Namespace",... |
42663409589 | import copy
import math #needed for calculation of weight and bias initialization
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
import torch, torch.nn as nn, torch.nn.functional as F
import torchvision
from torchvision import transforms, models, utils
#Set seeds
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
#Import components
from . import components as cts
from . import custom_models_diseasereps as cmdr
class AxialNet_Mask(nn.Module):
"""Identical implementation to the one in custom_models_base.py except
that it returns an intermediate calculation of the convolution step
which will be used in calculating a mask-related loss.
(1) ResNet18 [slices, 512, 14, 14]
(2) conv_final to [slices, 16, 6, 6]
(3) FC layer (implemented via conv) to [n_outputs, 1, 1]
(4) Avg pooling over slices to get [n_outputs]"""
def __init__(self, n_outputs, slices):
super(AxialNet_Mask, self).__init__()
self.slices = slices #equal to 15 for 9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
self.fc = nn.Conv2d(16, n_outputs, kernel_size = (6,6), stride=(6,6), padding=0)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
assert list(x.shape)==[1,self.slices,3,420,420]
x1 = x.squeeze() #out shape [slices,3,420,420]
x1 = self.features(x1) #out shape [slices,512,14,14]
x1f = self.conv2d(x1) #out shape [slices, 16, 6, 6]
x2 = self.fc(x1f) #out shape [slices,n_outputs,1,1]
x2 = torch.squeeze(x2) #out shape [slices, n_outputs]
x2_perslice_scores = x2.transpose(0,1).unsqueeze(0) #out shape [1, n_outputs, slices]
x2 = self.avgpool_1d(x2_perslice_scores) #out shape [1, n_outputs, 1]
x2f = torch.squeeze(x2, dim=2) #out shape [1, n_outputs]
#Now calculate what the disease specific representation is in the
#intermediate calculation of the fc layer.
#First, make n_outputs copies of the slices x 16 x 6 x 6 representation:
x1_repeated = x1f.repeat(self.n_outputs,1,1,1,1) #out shape [n_outputs, slices, 16, 6, 6]
#Now select the fc_weights:
fc_weights = self.fc.weight #shape [132, 16, 6, 6], where 132 is n_outputs
fc_weights_unsq = fc_weights.unsqueeze(dim=1) #out shape [n_outputs, 1, 16, 6, 6]
#Now multiply element wise. Broadcasting will occur.
#we have [n_outputs, slices, 16, 6, 6] x [n_outputs, 1, 16, 6, 6]
disease_reps = torch.mul(x1_repeated, fc_weights_unsq) #out shape [n_outputs, slices, 16, 6, 6]
out = {'out':x2f,
'x_perslice_scores':x2_perslice_scores,
'disease_reps':disease_reps}
return out
class AxialNet_Mask_VanillaGradCAM(nn.Module):
"""Identical implementation to the one in custom_models_base.py except
that it returns an intermediate calculation of the convolution step
which will be used in calculating a mask-related loss; this intermediate
calculation is based on vanilla Grad-CAM.
(1) ResNet18 [slices, 512, 14, 14]
(2) conv_final to [slices, 16, 6, 6]
(3) FC layer (implemented via conv) to [n_outputs, 1, 1]
(4) Avg pooling over slices to get [n_outputs]"""
def __init__(self, n_outputs, slices):
super(AxialNet_Mask_VanillaGradCAM, self).__init__()
self.slices = slices #equal to 15 for 9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
self.fc = nn.Conv2d(16, n_outputs, kernel_size = (6,6), stride=(6,6), padding=0)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
assert list(x.shape)==[1,self.slices,3,420,420]
x1 = x.squeeze() #out shape [slices,3,420,420]
x1 = self.features(x1) #out shape [slices,512,14,14]
x1f = self.conv2d(x1) #out shape [slices, 16, 6, 6]
x2 = self.fc(x1f) #out shape [slices,n_outputs,1,1]
x2 = torch.squeeze(x2) #out shape [slices, n_outputs]
x2_perslice_scores = x2.transpose(0,1).unsqueeze(0) #out shape [1, n_outputs, slices]
x2 = self.avgpool_1d(x2_perslice_scores) #out shape [1, n_outputs, 1]
x2f = torch.squeeze(x2, dim=2) #out shape [1, n_outputs]
#Now calculate what the disease specific representation is in the
#intermediate calculation of the fc layer.
#First, make n_outputs copies of the slices x 16 x 6 x 6 representation:
x1_repeated = x1f.repeat(self.n_outputs,1,1,1,1) #out shape [n_outputs, slices, 16, 6, 6]
#Now select the fc_weights. These weights are also the gradients leaving
#the last layer.
fc_weights = self.fc.weight #shape [80, 16, 6, 6], where 80 is n_outputs
#To calculate the alpha_ks, we need to take the mean across the height
#and width so that we get one alpha_k per feature per disease:
#(confirmed that this is the mean across the 6x6 in the gradcam code)
alpha_ks = torch.mean(fc_weights,dim=(2,3)) #out shape [n_outputs, 16]
alpha_ks_unsq = alpha_ks.unsqueeze(dim=1).unsqueeze(dim=3).unsqueeze(dim=3) #out shape [n_outputs, 1, 16, 1, 1]
#Now multiply element wise. Broadcasting will occur.
#we have [n_outputs, slices, 16, 6, 6] x [n_outputs, 1, 16, 1, 1]
disease_reps = torch.mul(x1_repeated, alpha_ks_unsq) #out shape [n_outputs, slices, 16, 6, 6]
#the summing over the feature dimension takes place in the loss
#calculation
out = {'out':x2f,
'x_perslice_scores':x2_perslice_scores,
'disease_reps':disease_reps}
return out
class AxialNet_Mask_Final3DConv(nn.Module):
"""Identical implementation to the one in custom_models_base.py except
that it returns an intermediate calculation of the convolution step
which will be used in calculating a mask-related loss.
(1) ResNet18 [slices, 512, 14, 14]
(2) conv_final to [slices, 16, 6, 6]
(3) Final FC layer implemented via 3D convolution to produce [n_outputs]"""
def __init__(self, n_outputs, slices):
super(AxialNet_Mask_Final3DConv, self).__init__()
self.slices = slices #equal to 15 for 9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv() #out shape [slices, 16, 6, 6]
#Final step is 3D convolution!
#Rep is first reshaped to [1, 16, slices, 6, 6]
self.fc = nn.Conv3d(16, n_outputs, kernel_size=(self.slices,6,6), stride=(self.slices,6,6), padding=0)
def forward(self, x):
assert list(x.shape)==[1,self.slices,3,420,420]
x1 = x.squeeze() #out shape [slices,3,420,420]
x1 = self.features(x1) #out shape [slices,512,14,14]
x1 = self.conv2d(x1) #out shape [slices, 16, 6, 6]
#Reshape:
x1f = x1.transpose(0,1).unsqueeze(0) #out shape [1, 16, slices, 6, 6]
#Final classification
x2 = self.fc(x1f) #out shape [1,n_outputs,1,1,1]
x2f = x2.squeeze(dim=2).squeeze(dim=2).squeeze(dim=2) #out shape [1,n_outputs]
#TODO TEST THIS (or at least make visualizations of disease_reps)
#Now calculate what the disease specific representation is in the
#intermediate calculation of the fc layer.
#First, make n_outputs copies of the 16 x slices x 6 x 6 representation:
x1_repeated = x1f.squeeze(dim=0).repeat(self.n_outputs,1,1,1,1) #out shape [n_outputs, 16, slices, 6, 6]
#Now select the fc_weights:
fc_weights = self.fc.weight #shape [n_outputs, 16, slices, 6, 6]
assert x1_repeated.shape==fc_weights.shape
#Now multiply element wise. Broadcasting will occur.
#we have [n_outputs, 16, slices, 6, 6] x [n_outputs, 16, slices, 6, 6]
disease_reps_orig = torch.mul(x1_repeated, fc_weights) #out shape [n_outputs, 16, slices, 6, 6]
#But for the attention ground truth calculation we assume that the
#disease_reps has shape [n_outputs, slices, 16, 6, 6], so transpose!
disease_reps = disease_reps_orig.transpose(1,2) #out shape [n_outputs, slices, 16, 6, 6]
out = {'out':x2f,
'disease_reps':disease_reps}
return out
class BodyLocationAttn3Mask(nn.Module): #7/2/2020, updated 7/7/2020, redone for mask 8/27/2020
"""Modification on 8/27 involves the shape of the attention calculated.
Old version calculated [1,1,1,6,6 attention]. This version calculates
[1,slices,1,6,6] attention (i.e. fully 3d spatially.)
There is also a special loss associated with this model which requires the
model to match the organ attention to ground truth organ masks.
OLD DOCUMENTATION from model that this model was based on,
BodyLocationAttn3 in custom_models_diseasereps.py:
See AxialNetDiseaseFeatureAttn for more documentation including code comments.
Difference from AxialNetDiseaseFeatureAttn: uses spatial attention instead of
feature attention. Specifically there is right lung, heart, and left lung
spatial attention. Also, instead of being fixed weights every time, the
weights are learned based on using the center slices (since the center
slices are most indicative of where the right lung, heart, and left
lung are located.) So this is trainable soft self-attention."""
def __init__(self, n_outputs_lung, n_outputs_heart):
super(BodyLocationAttn3Mask, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = (2*n_outputs_lung)+n_outputs_heart
self.n_outputs_lung = n_outputs_lung
self.n_outputs_heart = n_outputs_heart
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate the spatial attention based on ALL the slices
in_size = self.slices*16*6*6
out_size = self.slices*6*6
self.heart_attn_fc = nn.Sequential(nn.Linear(in_size, out_size),nn.Sigmoid())
self.left_lung_attn_fc = nn.Sequential(nn.Linear(in_size, out_size),nn.Sigmoid())
self.right_lung_attn_fc = nn.Sequential(nn.Linear(in_size, out_size),nn.Sigmoid())
self.fclayers_weights, self.fclayers_biases = cmdr.init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
#Calculate attention mask based on all slices
#This attention mask is basically doing low-dimensional organ
#segmentation. The nice thing about doing the segmentation this way
#is that the model can still look at both lungs when predicting a
#lung disease but it's forced to look MORE at the relevant lung.
all_slices_flat = x.flatten().unsqueeze(dim=0) #out shape [1,8640]
#The spatial maps must be able to be broadcast multiplied against
#a Tensor of shape [slices, n_outputs_organ, 16, 6, 6]
self.heart_spatial = self.heart_attn_fc(all_slices_flat).reshape(self.slices,1,1,6,6) #out shape [slices,1,1,6,6]
self.left_lung_spatial = self.left_lung_attn_fc(all_slices_flat).reshape(self.slices,1,1,6,6) #out shape [slices,1,1,6,6]
self.right_lung_spatial = self.right_lung_attn_fc(all_slices_flat).reshape(self.slices,1,1,6,6) #out shape [slices,1,1,6,6]
#Repeat x
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [n_outputs, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, n_outputs, 16, 6, 6]
#Apply the attention maps
#Must follow ground truth label order, which is heart, left_lung, right_lung
x_heart = torch.mul(x[:,0:self.n_outputs_heart,:,:,:],self.heart_spatial)
x_left_lung = torch.mul(x[:,self.n_outputs_heart:self.n_outputs_heart+self.n_outputs_lung,:,:,:],self.left_lung_spatial)
x_right_lung = torch.mul(x[:,-1*self.n_outputs_lung:,:,:,:],self.right_lung_spatial)
x = torch.cat((x_heart,x_left_lung,x_right_lung),dim=1) #out shape [slices, n_outputs, 16, 6, 6]
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, n_outputs, 16*6*6] = [slices, n_outputs, 576]
slice_preds = cmdr.apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, n_outputs, slices]
x = self.avgpool_1d(x) #out shape [1, n_outputs, 1]
x = torch.squeeze(x, dim=2) #out shape [1, n_outputs]
out = {'out':x,
'heart_spatial':self.heart_spatial,
'left_lung_spatial':self.left_lung_spatial,
'right_lung_spatial':self.right_lung_spatial}
return out
class BodyDiseaseSpatialAttn4Mask(nn.Module): #7/7/2020 #TODO test this #Updated 8/27/2020 for mask
"""In this model a 3D attention mask of shape [slices,6,6] is calculated for
each disease, before the classification step.
Note that this model is identical to BodyDiseaseSpatialAttn4 except for
its usage:
(a) custom loss function: in the loss, the location information is used to
determine what locations the disease-specific attention is allowed to
look at. e.g. if there is atelectasis only in the left lung then the
attention for atelectasis for that scan should be only in the place
demarcated as left lung in the segmentation ground truth.
Furthermore, if there is NO atelectasis present, then the attention
for atelectasis should all be zero.
In order to calculate this custom loss, this model has to return
the attention maps in addition to the predictions.
(b) custom labels: this model is different from everything else I have
been doing because it assumes that we just want to predict lung
diseases generically and so it only makes
n_outputs_lung+n_outputs_heart predictions, rather than
(2*n_outputs_lung+n_outputs_heart) predictions.
OLD DOCUMENTATION from model that this model was based on,
BodyDiseaseSpatialAttn4 in custom_models_diseasereps.py
See AxialNetDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyLocationAttn3: while 4 also uses spatial
attention (like 3), 4 does spatial attention per disease instead of per
location."""
def __init__(self, n_outputs_lung, n_outputs_heart):
super(BodyDiseaseSpatialAttn4Mask, self).__init__()
self.slices = 15 #9 projections
#NOTE that here, we have only n_outputs_lung overall! We are not doing
#separate predictions for the right and left lungs!
self.n_outputs = n_outputs_lung+n_outputs_heart
self.n_outputs_lung = n_outputs_lung
self.n_outputs_heart = n_outputs_heart
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate per-disease spatial attention based on ALL the slices
#Repeated representation: [slices, n_outputs, 16, 6, 6]
#Attention shape we want: [slices, n_outputs, 1, 6, 6]
self.nonlinearity = nn.Sigmoid()
#FC layers for calculating the disease-specific spatial attention
#For each disease and each element of the 6x6 I learn a different FC layer:
self.fcattns_weights, self.fcattns_biases = cmdr.init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs*6*6, in_features = 16)
#FC layers for calculating the final disease predictions
self.fclayers_weights, self.fclayers_biases = cmdr.init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [n_outputs, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, n_outputs, 16, 6, 6]
#Calculate the disease-specific spatial attention:
attn_raw_list = []
for slice_num in range(self.slices):
slice_data = x[slice_num,:,:,:,:] #out shape [n_outputs, 16, 6, 6]
slice_data = slice_data.flatten(start_dim=2,end_dim=3).transpose(1,2) #out shape [n_outputs, 6*6, 16]
slice_data = slice_data.flatten(start_dim=0,end_dim=1) #out shape [n_outputs*6*6, 16]
temp1 = torch.mul(slice_data,self.fcattns_weights) #out shape [n_outputs*6*6, 16]
temp2 = torch.sum(temp1,dim=1) #out shape [n_outputs*6*6]
temp3 = (temp2+self.fcattns_biases).unsqueeze(0) #out shape [n_outputs*6*6]
attn_raw_list.append(temp3)
attn_raw = torch.cat(attn_raw_list,dim=0) #out shape [slices, n_outputs*6*6]
attn_raw = torch.reshape(attn_raw,(self.slices,self.n_outputs,6*6)) #out shape [slices, n_outputs, 6*6]
attn = self.nonlinearity(attn_raw) #out shape [slices, n_outputs, 6*6]
attn = torch.reshape(attn,(self.slices,self.n_outputs,6,6)).unsqueeze(2) #out shape [slices, n_outputs, 1, 6, 6]
#Apply the attention
x_times_attn = torch.mul(x, attn) #out shape [slices, n_outputs, 16, 6, 6]
#Disease predictions
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, n_outputs, 16*6*6] = [slices, n_outputs, 576]
slice_preds = cmdr.apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, n_outputs, slices]
x = self.avgpool_1d(x) #out shape [1, n_outputs, 1]
x = torch.squeeze(x, dim=2) #out shape [1, n_outputs]
out = {'out':x,
'attn':attn} #attn out shape [slices, n_outputs, 1, 6, 6]
return out
class BodyDiseaseSpatialAttn5Mask(nn.Module): #7/7/2020 #TODO test this
#On the natural images dataset, this model had better performance
#than model 4
"""Exactly the same as the BodyDiseaseSpatialAttn5 model except that
this returns the attn so that it can be trained with a loss function that
acts on the attn as well.
OLD DOCUMENTATION from model that this model was based on,
BodyDiseaseSpatialAttn5 in custom_models_diseasereps.py:
See AxialNetDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyDiseaseSpatialAttn4: whereas 4 learns a different
mapping of 16 features -> 1 spatial attn value for each element of the 6x6
square, 5 uses a convolution layer such that the mapping of 16 -> 1 is
the same for each element of the 6x6 square"""
def __init__(self, n_outputs, nonlinearity):
super(BodyDiseaseSpatialAttn5Mask, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate the spatial attention based on center slices
if nonlinearity == 'softmax':
self.nonlinearity = nn.Softmax(dim=2)
elif nonlinearity == 'sigmoid':
self.nonlinearity = nn.Sigmoid()
#Conv layer for calculating the disease-specific spatial attention
#For each disease and each element of the 6x6 I learn a different FC layer:
self.attn_conv = nn.Sequential(
nn.Conv2d(16, self.n_outputs, kernel_size = (1,1), stride=(1,1), padding=0),
self.nonlinearity)
#FC layers for calculating the final disease predictions
self.fclayers_weights, self.fclayers_biases = cmdr.init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
#Calculate the disease-specific spatial attention:
attn = self.attn_conv(x).unsqueeze(2) #out shape [slices, 83, 1, 6, 6]
#Apply the attention
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
x_times_attn = torch.mul(x, attn) #out shape [slices, 83, 16, 6, 6]
#Disease predictions
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = cmdr.apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
out = {'out':x,
'attn':attn} #attn out shape [slices, n_outputs, 1, 6, 6]
return out | rachellea/explainable-ct-ai | src/models/custom_models_mask.py | custom_models_mask.py | py | 21,512 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manu... |
73026312507 | from typing import DefaultDict
import sys
import os
import csv
sys.path.append(0, os.path.abspath('.'))
sys.path.append(0, os.path.abspath('./src'))
sys.path.append(0, os.path.abspath('./src/utilities'))
from src.utilities import SCRIPT_HOME
from src.utilities.post_process import post_proc_timeseries
from net_sim import Attack_Sim
from time import time
from sys import argv
def main(argv):
if len(argv) == 1:
network = 'Comcast'
hosts = 149
te_method = "-semimcfraeke"
traffic_type = "FlashCrowd"
iter_i = 1
experiment_tag = "NoFailures"
network_instance = network
repeat = "False"
traffic_file = "/home/mhall/OLTE/data/traffic/{}_x100_10000_pareto-matrix.txt".format(network)
n_fallow_transponders = "20"
else:
_ ,\
network ,\
hosts ,\
te_method ,\
traffic_type ,\
iter_i ,\
experiment_tag ,\
network_instance ,\
repeat ,\
traffic_file ,\
n_fallow_transponders ,\
optical_strategy ,\
fallow_tx_allocation ,\
ftx_file = argv
iter_i = int(iter_i)
hosts = int(hosts)
iterations = int(os.popen('wc -l ' + traffic_file).read().split()[0])
data = DefaultDict(list)
if 1: ########################## Baseline ########################
t0_baseline_init = time()
attack_sim = Attack_Sim(network_instance,
hosts,
"_".join([traffic_type,experiment_tag]),
iterations=iterations,
te_method=te_method,
method="none",
traffic_file=traffic_file,
# strategy="baseline",
strategy=optical_strategy,
use_heuristic='no',
fallow_transponders=n_fallow_transponders,
fallow_tx_allocation_strategy=fallow_tx_allocation,
fallow_tx_allocation_file=ftx_file,
salt=str(iter_i)
)
if repeat == "repeat":
result = attack_sim.perform_sim(circuits=1, start_iter=iter_i, end_iter=iter_i, repeat = True)
else:
result = attack_sim.perform_sim(circuits=1, start_iter=iter_i, end_iter=iter_i)
for key in result:
data[key].extend(result[key])
# results_file = SCRIPT_HOME + "/data/results/{}_coremelt_every_link_{}_{}_{}".format(
# network, attack, net_iter.split('/')[1], iter_i) + ".csv"
# print("writing results to: " + results_file)
# with open(results_file, "w") as outfile:
# writer = csv.writer(outfile)
# writer.writerow(data.keys())
# writer.writerows(zip(*data.values()))
if __name__ == "__main__":
main(argv)
| mattall/topology-programming | scripts/TDSC/sim_event.py | sim_event.py | py | 3,375 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
8708126012 | from __future__ import unicode_literals
import datetime
import logging
import os
import tweepy as tp
from twiker.modules.tauth import Auth
class Engine(object):
"""
The main engine class for the Twiker Bot.This class includes all the api methods
Copyright (c) 2021 The Knight All rights reserved.
"""
def __init__(self, config, verbose=False):
auth = Auth(config)
# configure logging
log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# every log file will be named with the current date and would be name differently
date = str(datetime.datetime.now().strftime("%Y-%m-%d"))
time = str(datetime.datetime.now().strftime("%H-%M-%S"))
log_file = os.path.join(log_dir, "twiker_bot_" + date + "_" + time + ".log")
if auth.file_exists(log_file):
log_file = os.path.join(log_dir, "twiker_bot_" + date + "_" + time + ".log")
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=log_file,
filemode='w')
self.logger = logging.getLogger()
if verbose:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
self.logger.info("Starting Twiker Bot")
self.api = auth.access()
# print all information about a user
def user_info(self, username=None):
"""
Get User info by username
:param username: username to get info on twitter
:return:
"""
if username is None:
username = self.api.me().screen_name
try:
info = self.api.get_user(screen_name=username)
print("Name: " + str(info.name))
print("Screen Name: " + str(info.screen_name))
print("User ID: " + str(info.id))
print("Location: " + str(info.location))
print("Description: " + str(info.description))
print("URL: " + str(info.url))
print("Followers: " + str(info.followers_count))
print("Following: " + str(info.friends_count))
print("Tweets: " + str(info.statuses_count))
print("Favorites: " + str(info.favourites_count))
print("Created at: " + str(info.created_at))
print("Time zone: " + str(info.time_zone))
print("Geo enabled: " + str(info.geo_enabled))
print("Verified: " + str(info.verified))
print("Lang: " + str(info.lang))
try:
print("Status: " + str(info.status.text))
except:
print("Status: " + "None")
print("Profile background color: " + str(info.profile_background_color))
print("Profile background image: " + str(info.profile_background_image_url))
print("Profile background image url: " + str(info.profile_background_image_url_https))
print("Profile background tile: " + str(info.profile_background_tile))
print("Profile link color: " + str(info.profile_link_color))
print("Profile sidebar border color: " + str(info.profile_sidebar_border_color))
print("Profile sidebar fill color: " + str(info.profile_sidebar_fill_color))
print("Profile text color: " + str(info.profile_text_color))
print("Profile use background image: " + str(info.profile_use_background_image))
print("Profile image: " + str(info.profile_image_url))
print("Profile image url: " + str(info.profile_image_url_https))
print("Profile image url: " + str(info.profile_background_image_url_https))
print("Profile image url: " + str(info.profile_background_image_url))
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# tweet a message
def tweet(self, message, media=None):
"""
Tweet a message
:param message: message to tweet
:param media: media to tweet
:return:
"""
self.logger.debug("Tweeting message: %s", message)
self.logger.info("Tweeting message: %s", message)
try:
if media is None:
self.api.update_status(status=message)
else:
self.api.update_with_media(media, status=message)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# get user timeline
def get_timeline(self, username):
"""
Get user timeline
:param username: username to get timeline
:return:
"""
def retweet(self, tweet_id):
"""
Retweet a tweet by tweet.id
:param tweet_id: tweet id to retweet a tweet
:return:
"""
self.logger.debug("Retweeting tweet with id: %s", tweet_id)
try:
self.api.retweet(tweet_id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def reply(self, message, tweet_id):
"""
Reply to a tweet by tweet.id
:param message: message to reply
:param tweet_id: tweet id to reply
:return:
"""
try:
self.api.update_status(status=message,
in_reply_to_status_id=tweet_id,
auto_populate_reply_metadata=True)
logging.debug("Replied to tweet with id: %s", tweet_id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def follow(self, username):
"""
Follow a user on twitter
:param username: username to follow on twitter
:return:
"""
try:
self.api.create_friendship(screen_name=username)
logging.debug("Followed user: %s", username)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def unfollow(self, username):
"""
Unfollow a user on twitter
:param username: username to unfollow on twitter
:return:
"""
try:
self.api.destroy_friendship(screen_name=username)
logging.debug("Unfollowed user: %s", username)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def block(self, username):
"""
Block a user on twitter
:param username: username to block on twitter
:return:
"""
try:
self.api.create_block(screen_name=username)
logging.debug("Blocked user: %s", username)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def unblock(self, username):
"""
Unblock a user on twitter
:param username: username to unblock on twitter
:return:
"""
try:
self.api.destroy_block(screen_name=username)
logging.debug("Unblocked user: %s", username)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def get_user_id(self, username):
"""
Get User id by username
:param username: username to get id on twitter
"""
try:
return self.api.get_user(screen_name=username).user_id
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# reply on a hashtag
def reply_hashtag(self, message, hashtag):
"""
Reply to all tweet on a hashtag
:param message: message to in reply with hashtag
:param hashtag: hashtag on which method have to reply
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.reply(message, tweet_id.id)
logging.debug("Replied to tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# retweet on a hashtag
def retweet_hashtag(self, hashtag):
"""
Retweet all tweet on a hashtag
:param hashtag: hashtag on which method have to retweet
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.retweet(tweet_id.id)
logging.debug("Retweeted tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def unretweet_hashtag(self, hashtag):
"""
Unretweet all tweet on a hashtag
:param hashtag: hashtag on which method have to unretweet
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.unretweet(tweet_id.id)
logging.debug("Unretweeted tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# like all tweet on a hashtag
def like_hashtag(self, hashtag):
"""
Like all tweet on a hashtag
:param hashtag: hashtag on which method have to like
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
try:
self.api.create_favorite(tweet_id.id)
except Exception as e:
logging.error("Error: %s", e)
print(e)
logging.debug("Liked tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
logging.info("Liked tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def unlike_hashtag(self, hashtag):
"""
Unlike all tweet on a hashtag
:param hashtag: hashtag on which method have to unlike
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.api.destroy_favorite(tweet_id.id)
logging.debug("Unliked tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# follow all user on a hashtag
def follow_hashtag(self, hashtag):
"""
Follow all user on a hashtag
:param hashtag: hashtag on which method have to follow
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.follow(tweet_id.user.screen_name)
logging.debug("Followed user: %s", tweet_id.user.screen_name)
logging.info("Followed user: %s", tweet_id.user.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def unfollow_hashtag(self, hashtag):
"""
Unfollow all user on a hashtag
:param hashtag: hashtag on which method have to unfollow
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.unfollow(tweet_id.user.screen_name)
logging.debug("Unfollowed user: %s", tweet_id.user.screen_name)
logging.info("Unfollowed user: %s", tweet_id.user.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# like all tweet of a user
def like_user(self, username):
"""
Like all tweet of a user
:param username: username on which method have to like
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.user_timeline, screen_name=username).items():
self.api.create_favorite(tweet_id.id)
logging.debug("Liked tweet of user: %s tweet_id: %s", username, tweet_id.id)
logging.info("Liked tweet of user: %s tweet_id: %s", username, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# unlike all tweet of a user
def unlike_user(self, username):
"""
Unlike all tweet of a user
:param username: username on which method have to unlike
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.user_timeline, screen_name=username).items():
self.api.destroy_favorite(tweet_id.id)
logging.debug("Unliked tweet of user: %s tweet_id: %s", username, tweet_id.id)
logging.info("Unliked tweet of user: %s tweet_id: %s", username, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# retweet all tweet of a user
def retweet_user(self, username):
"""
Retweet all tweet of a user
:param username: username on which method have to retweet
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.user_timeline, screen_name=username).items():
self.api.retweet(tweet_id.id)
logging.debug("Retweeted tweet of user: %s tweet_id: %s", username, tweet_id.id)
logging.info("Retweeted tweet of user: %s tweet_id: %s", username, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# dm to single user
def dm(self, username, message, media=None):
"""
Direct message to single user
:param username: username on which method have to dm
:param message: message to dm
:param media: media to dm
:return:
"""
try:
recipient = self.api.get_user(username)
self.api.send_direct_message(recipient_id=recipient.id_str, text=message)
logging.debug("Direct message to user sent: %s", recipient.screen_name)
logging.info("Direct message to user sent: %s", recipient.screen_name)
if media:
self.api.media_upload(media)
self.api.send_direct_message(recipient_id=recipient.id_str, text=message, attachment_type="media",
attachment_media_id=self.api.media_upload(media).media_id)
logging.debug("Direct message to user with media sent: %s", recipient.screen_name)
logging.info("Direct message to user with media sent: %s", recipient.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# dm to multiple user
def dm_multiple(self, usernames, message, media=None):
"""
Direct message to multiple user
:param usernames: list(usernames) on which method have to dm ["username1", "username2", ...]
:param message: message to dm
:return:
"""
try:
for user in usernames:
recipient = self.api.get_user(user)
try:
self.api.send_direct_message(recipient_id=recipient.id, text=message)
logging.debug("Direct message to user sent: %s", recipient.screen_name)
logging.info("Direct message to user sent: %s", recipient.screen_name)
except Exception as e:
logging.error("Error: %s", e)
print(e)
if media:
try:
self.api.media_upload(media)
self.api.send_direct_message(recipient_id=recipient.id_str, text=message,
attachment_type="media",
attachment_media_id=self.api.media_upload(media).media_id)
except Exception as e:
logging.error("Error: %s", e)
print(e)
logging.debug("Direct message to user with media sent: %s", recipient.screen_name)
logging.info("Direct message to user with media sent: %s", recipient.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# dm all user of a hashtag
def dm_hashtag(self, hashtag, message, media=None):
"""
Direct message to all user of a hashtag
:param hashtag: hashtag on which method have to dm
:param message: message to dm
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
recipient = self.api.get_user(tweet_id.user.screen_name)
#
# check if user is protected or dm is disabled
users = []
if not recipient.protected:
users.append(recipient.id)
for user in users:
try:
self.api.send_direct_message(recipient_id=user, text=message)
except Exception as e:
logging.error("Error: %s", e)
print(e)
logging.debug("Direct message to user sent: %s", recipient.screen_name)
logging.info("Direct message to user sent: %s", recipient.screen_name)
print(recipient.screen_name)
if media:
try:
self.api.media_upload(media)
self.api.send_direct_message(recipient_id=user, text=message, attachment_type="media",
attachment_media_id=self.api.media_upload(media).media_id)
except Exception as e:
logging.error("Error: %s", e)
print(e)
logging.debug("Direct message to user with media sent: %s", recipient.screen_name)
logging.info("Direct message to user with media sent: %s", recipient.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def update_profile(self, *args):
"""
Update profile
possible args:
name, url, location, description, profile_link_color, include_entities, skip_status
:param args:
:return:
"""
try:
self.api.update_profile(*args)
logging.debug("Profile updated")
logging.info("Profile updated")
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# follow all followers who have followed you
def follow_followers(self):
"""
Follow all followers who have followed you
:return:
"""
try:
for follower in tp.Cursor(self.api.followers).items():
if not follower.following:
follower.follow()
logging.debug("Followed: %s", follower.screen_name)
logging.info("Followed: %s", follower.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# unfollow all followers who have followed you
def unfollow_followers(self):
"""
Unfollow all followers who have followed you
:return:
"""
try:
for follower in tp.Cursor(self.api.followers).items():
if follower.following:
follower.unfollow()
logging.debug("Unfollowed: %s", follower.screen_name)
logging.info("Unfollowed: %s", follower.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# follow users with a keyword
def follow_keyword(self, keyword, count=1):
"""
Follow users with a keyword
:param keyword: keyword to search
:param count: number of users to follow
:return:
"""
try:
for tweet in tp.Cursor(self.api.search, q=keyword).items(count):
if not tweet.user.following:
tweet.user.follow()
logging.debug("Followed: %s", tweet.user.screen_name)
logging.info("Followed: %s", tweet.user.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# get latest tweets from twitter
def get_tweets(self, count=1):
"""
Get latest tweets from twitter
:param count: number of tweets to get
:return:
"""
try:
for tweet in tp.Cursor(self.api.home_timeline).items(count):
print("Tweet: %s" % tweet.text)
print("User: %s" % tweet.user.screen_name)
print("User id: %s" % tweet.user.id)
print("Date: %s" % tweet.created_at)
logging.debug("Tweet: Feched")
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# get all followers of a user
def get_followers(self, username=None):
"""
Get all followers of a user
Note: twitter api only have limit of of fetching limited requests at a time
:param username: username of user
:param count: number of followers to get
:return:
"""
if username is None:
username = self.api.me().screen_name
try:
count = 1
for follower in tp.Cursor(self.api.followers, screen_name=username).items():
# get total followers
print("Follower: %s" % follower.screen_name)
print("Follower Count: %s" % count)
print("Follower id: %s" % follower.id)
print("Follower date: %s" % follower.created_at)
count += 1
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def get_following(self, username=None):
"""
Get all following of a user
Note: twitter api only have limit of of fetching limited requests at a time
:param username: username of user
"""
if username is None:
username = self.api.me().screen_name
try:
count = 1
for follower in tp.Cursor(self.api.friends, screen_name=username).items():
# get total followers
print("Following: %s" % follower.screen_name)
print("Following Count: %s" % count)
print("Following id: %s" % follower.id)
print("Following date: %s" % follower.created_at)
count += 1
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def search_user(self, username):
"""
Search a user on twitter by username
:param username: username to search
:return:
"""
try:
self.user_info(username=username)
logging.debug("User searched: %s", username)
logging.info("User searched: %s", username)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# get random user and return username
def get_random_user(self, *args, count=1):
"""
Get random user
:param args:
pssible args:
keyword
:param count: number of users to get
:return: username
"""
try:
# get random user
for tweet in tp.Cursor(self.api.search, q=args).items(count):
return tweet.user.screen_name
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def get_hashtag_tweets(self, hashtag):
"""
Get tweets by hashtag
:param hashtag: hashtag to search
:return:
"""
try:
for tweet in tp.Cursor(self.api.search, q=hashtag).items():
print("Tweet: %s" % tweet.text)
print("User: %s" % tweet.user.screen_name)
print("User id: %s" % tweet.user.id)
print("Date: %s" % tweet.created_at)
logging.debug("Tweet: Feched")
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# get all direct messages
def get_messages(self, count=1):
"""
Get all direct messages
:param count: number of messages to get
:return:
"""
try:
message = self.api.list_direct_messages(count=count)
for msg in reversed(message):
sender = msg.message_create['sender_id']
recipient = msg.message_create['target']['recipient_id']
sender_name = self.api.get_user(sender).screen_name
recipient_name = self.api.get_user(recipient).screen_name
print("Sender: %s" % sender)
print("Sender name: %s" % sender_name)
print("Recipient: %s" % recipient)
print("Recipient name: %s" % recipient_name)
print("Message: %s" % msg.message_create['message_data']['text'])
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# | Twiker-Bot/twiker | twiker/modules/engine.py | engine.py | py | 26,243 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "twiker.modules.tauth.Auth",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",... |
26039112676 | from __future__ import annotations
from dataclasses import dataclass
from pants.core.goals.package import BuiltPackageArtifact
from pants.util.strutil import bullet_list, pluralize
@dataclass(frozen=True)
class BuiltDockerImage(BuiltPackageArtifact):
# We don't really want a default for this field, but the superclass has a field with
# a default, so all subsequent fields must have one too. The `create()` method below
# will ensure that this field is properly populated in practice.
image_id: str = ""
tags: tuple[str, ...] = ()
@classmethod
def create(
cls, image_id: str, tags: tuple[str, ...], metadata_filename: str
) -> BuiltDockerImage:
tags_string = tags[0] if len(tags) == 1 else f"\n{bullet_list(tags)}"
return cls(
image_id=image_id,
tags=tags,
relpath=metadata_filename,
extra_log_lines=(
f"Built docker {pluralize(len(tags), 'image', False)}: {tags_string}",
f"Docker image ID: {image_id}",
),
)
| pantsbuild/pants | src/python/pants/backend/docker/package_types.py | package_types.py | py | 1,072 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "pants.core.goals.package.BuiltPackageArtifact",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.bullet_list",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pants.util.strutil.pluralize",
"line_number": 27,
"usage_typ... |
41939702684 | # translate exercise in python
# translate the file in japanese
# so use ' pip install translate'
from translate import Translator
translator = Translator(to_lang='ja')
try:
with open('test.txt', mode='r') as my_file:
text = my_file.read()
translation = translator.translate(text)
with open('./test-ja.txt', mode='w') as my_file2:
my_file2.write(translation)
except FileNotFoundError as e:
print('check your file silly!')
raise e
| hyraja/python-starter | 09.FILE I-O python/03.exercise_translator.py | 03.exercise_translator.py | py | 481 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "translate.Translator",
"line_number": 8,
"usage_type": "call"
}
] |
39920879314 | """
This module implement the ServiceProxy class.
This class is used to provide a local proxy to a remote service for a ZeroRobot.
When a service or robot ask the creation of a service to another robot, a proxy class is created locally
so the robot see the service as if it as local to him while in reality the service is managed by another robot.
"""
import urllib
from requests.exceptions import HTTPError
from jose import jwt
from js9 import j
from zerorobot.task import (TASK_STATE_ERROR, TASK_STATE_NEW, TASK_STATE_OK,
TASK_STATE_RUNNING, Task, TaskNotFoundError)
from zerorobot.template.state import ServiceState
class ServiceProxy():
"""
This class is used to provide a local proxy to a remote service for a ZeroRobot.
When a service or robot ask the creation of a service to another robot, a proxy class is created locally
so the robot see the service as if it as local to him while in reality the service is managed by another robot.
"""
def __init__(self, name, guid, zrobot_client):
"""
@param name: name of the service
@param guid: guid of the service
@param zrobot_client: Instance of ZeroRobotClient that talks to the robot on which the
service is actually running
"""
self._zrobot_client = zrobot_client
self.name = name
self.guid = guid
self.template_uid = None
# a proxy service doesn't have direct access to the data of it's remote homologue
# cause data are always only accessible by the service itself and locally
self._data = None
self.task_list = TaskListProxy(self)
def __repr__(self):
# Provide a nice representation in tools like IPython / js9
return "robot://%s/%s?%s" % (self._zrobot_client.instance, self.template_uid, urllib.parse.urlencode(dict(name=self.name, guid=self.guid)))
@property
def data(self):
return self._data
@property
def state(self):
# TODO: handle exceptions
service, _ = self._zrobot_client.api.services.GetService(self.guid)
s = ServiceState()
for state in service.state:
s.set(state.category, state.tag, state.state.value)
return s
@property
def actions(self):
"""
list available actions of the services
"""
actions, _ = self._zrobot_client.api.services.ListActions(self.guid)
return sorted([a.name for a in actions])
@property
def logs(self):
try:
logs, resp = self._zrobot_client.api.services.GetLogs(self.guid)
except HTTPError as err:
if err.response.status_code == 400:
raise RuntimeError(err.response.json()['message'])
raise err
return logs.logs
def schedule_action(self, action, args=None):
"""
Do a call on a remote ZeroRobot to add an action to the task list of
the corresponding service
@param action: action is the name of the action to add to the task list
@param args: dictionnary of the argument to pass to the action
"""
req = {
"action_name": action,
}
if args:
req["args"] = args
task, _ = self._zrobot_client.api.services.AddTaskToList(req, service_guid=self.guid)
return _task_proxy_from_api(task, self)
def delete(self):
self._zrobot_client.api.services.DeleteService(self.guid)
# clean up secret from zrobot client
for secret in list(self._zrobot_client.config.data['secrets_']):
try:
claims = jwt.get_unverified_claims(secret)
except:
continue
else:
if claims.get('service_guid') == self.guid:
self._zrobot_client.config.data['secrets_'].remove(secret)
self._zrobot_client.config.save()
return
class TaskListProxy:
def __init__(self, service_proxy):
self._service = service_proxy
def empty(self):
tasks, _ = self._service._zrobot_client.api.services.getTaskList(service_guid=self._service.guid, query_params={'all': False})
return len(tasks) <= 0
def list_tasks(self, all=False):
tasks, _ = self._service._zrobot_client.api.services.getTaskList(service_guid=self._service.guid, query_params={'all': all})
return [_task_proxy_from_api(t, self._service) for t in tasks]
def get_task_by_guid(self, guid):
"""
return a task from the list by it's guid
"""
try:
task, _ = self._service._zrobot_client.api.services.GetTask(service_guid=self._service.guid, task_guid=guid)
return _task_proxy_from_api(task, self._service)
except HTTPError as err:
if err.response.status_code == 404:
raise TaskNotFoundError("no task with guid %s found" % guid)
raise err
class TaskProxy(Task):
"""
class that represent a task on a remote service
the state attribute is an property that do an API call to get the
actual state of the task on the remote ZeroRobot
"""
def __init__(self, guid, service, action_name, args, created):
super().__init__(func=None, args=args)
self.action_name = action_name
self.service = service
self.guid = guid
self._created = created
def execute(self):
raise RuntimeError("a TaskProxy should never be executed")
@property
def result(self):
if self._result is None:
task, _ = self.service._zrobot_client.api.services.GetTask(task_guid=self.guid, service_guid=self.service.guid)
if task.result:
self._result = j.data.serializer.json.loads(task.result)
return self._result
@property
def duration(self):
if self._duration is None:
task, _ = self.service._zrobot_client.api.services.GetTask(task_guid=self.guid, service_guid=self.service.guid)
self._duration = task.duration
return self._duration
@property
def state(self):
task, _ = self.service._zrobot_client.api.services.GetTask(task_guid=self.guid, service_guid=self.service.guid)
return task.state.value
@state.setter
def state(self, value):
raise RuntimeError("you can't change the statet of a TaskProxy")
@property
def eco(self):
if self._eco is None:
task, _ = self.service._zrobot_client.api.services.GetTask(task_guid=self.guid, service_guid=self.service.guid)
if task.eco:
d_eco = task.eco.as_dict()
d_eco['_traceback'] = task.eco._traceback
self._eco = j.core.errorhandler.getErrorConditionObject(ddict=d_eco)
return self._eco
def _task_proxy_from_api(task, service):
t = TaskProxy(task.guid, service, task.action_name, task.args, task.created)
if task.duration:
t._duration = task.duration
if task.eco:
d_eco = task.eco.as_dict()
d_eco['_traceback'] = task.eco._traceback
t._eco = j.core.errorhandler.getErrorConditionObject(ddict=d_eco)
return t
| BolaNasr/0-robot | zerorobot/service_proxy.py | service_proxy.py | py | 7,252 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.parse.urlencode",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "zerorobot.template.state.ServiceState",
"line_number": 55,
"usage_type": "call"
},
{
"ap... |
23313489127 | import bpy
class SFX_Socket_Float(bpy.types.NodeSocket):
'''SFX Socket for Float'''
bl_idname = 'SFX_Socket_Float'
bl_label = "Float"
float: bpy.props.FloatProperty(name = "Float",
description = "Float",
default = 0.0)
def draw(self, context, layout, node, text):
if self.is_output:
col = layout.column(align = True)
col1 = col.split(factor = 0.85)
col2 = col1.split(factor = 0.85)
col3 = col2.split(factor = 1)
col1.prop(self, "float", text='')
col2.label(text = text)
else:
col = layout.column(align = True)
col1 = col.split(factor = 0.30)
col2 = col1.split(factor = 0.5)
col3 = col2.split(factor = 1)
col1.label(text = '')
col3.prop(self, "float", text='')
col2.label(text = text)
# Socket color
def draw_color(self, context, node):
return (1.0, 0.4, 0.216, 0.5) | wiredworks/wiredworks_winches | sockets/SFX_Socket_Float.py | SFX_Socket_Float.py | py | 1,073 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "bpy.types",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.FloatProperty",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bpy.props",
"line_number": 8,
"usage_type": "attribute"
}
] |
18003867595 | import torch
import torch.nn as nn
import torch.nn.functional as F
from algo.pn_utils.maniskill_learn.networks import build_model, hard_update, soft_update
from algo.pn_utils.maniskill_learn.optimizers import build_optimizer
from algo.pn_utils.maniskill_learn.utils.data import to_torch
from ..builder import MFRL
from algo.pn_utils.maniskill_learn.utils.torch import BaseAgent
@MFRL.register_module()
class TD3(BaseAgent):
def __init__(self, policy_cfg, value_cfg, obs_shape, action_shape, action_space, batch_size=128, gamma=0.99,
update_coeff=0.005, action_noise=0.2, noise_clip=0.5, policy_update_interval=2):
super(TD3, self).__init__()
policy_optim_cfg = policy_cfg.pop("optim_cfg")
value_optim_cfg = value_cfg.pop("optim_cfg")
self.gamma = gamma
self.batch_size = batch_size
self.update_coeff = update_coeff
self.policy_update_interval = policy_update_interval
self.action_noise = action_noise
self.noise_clip = noise_clip
policy_cfg['obs_shape'] = obs_shape
policy_cfg['action_shape'] = action_shape
policy_cfg['action_space'] = action_space
value_cfg['obs_shape'] = obs_shape
value_cfg['action_shape'] = action_shape
self.policy = build_model(policy_cfg)
self.critic = build_model(value_cfg)
self.target_policy = build_model(policy_cfg)
self.target_critic = build_model(value_cfg)
hard_update(self.target_critic, self.critic)
hard_update(self.target_policy, self.policy)
self.policy_optim = build_optimizer(self.policy, policy_optim_cfg)
self.critic_optim = build_optimizer(self.critic, value_optim_cfg)
def update_parameters(self, memory, updates):
sampled_batch = memory.sample(self.batch_size)
sampled_batch = to_torch(sampled_batch, dtype='float32', device=self.device, non_blocking=True)
for key in sampled_batch:
if not isinstance(sampled_batch[key], dict) and sampled_batch[key].ndim == 1:
sampled_batch[key] = sampled_batch[key][..., None]
with torch.no_grad():
_, _, next_mean_action, _, _ = self.target_policy(sampled_batch['next_obs'], mode='all')
noise = (torch.randn_like(next_mean_action) * self.action_noise).clamp(-self.noise_clip, self.noise_clip)
next_action = self.target_policy['policy_head'].clamp_action(next_mean_action + noise)
q_next_target = self.target_critic(sampled_batch['next_obs'], next_action)
min_q_next_target = torch.min(q_next_target, dim=-1, keepdim=True).values
q_target = sampled_batch['rewards'] + (1 - sampled_batch['dones']) * self.gamma * min_q_next_target
q = self.critic(sampled_batch['obs'], sampled_batch['actions'])
critic_loss = F.mse_loss(q, q_target.repeat(1, q.shape[-1])) * q.shape[-1]
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
if updates % self.policy_update_interval == 0:
policy_loss = -self.critic(sampled_batch['obs'], self.policy(sampled_batch['obs'], mode='eval'))[
..., 0].mean()
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
soft_update(self.target_critic, self.critic, self.update_coeff)
soft_update(self.target_policy, self.policy, self.update_coeff)
else:
policy_loss = torch.zeros(1)
return {
'critic_loss': critic_loss.item(),
'q': torch.min(q, dim=-1).values.mean().item(),
'q_target': torch.mean(q_target).item(),
'policy_loss': policy_loss.item(),
}
| PKU-EPIC/UniDexGrasp | dexgrasp_policy/dexgrasp/algo/pn_utils/maniskill_learn/methods/mfrl/td3.py | td3.py | py | 3,767 | python | en | code | 63 | github-code | 6 | [
{
"api_name": "algo.pn_utils.maniskill_learn.utils.torch.BaseAgent",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "algo.pn_utils.maniskill_learn.networks.build_model",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "algo.pn_utils.maniskill_learn.networks.bui... |
20602544780 | from sqlalchemy import create_engine, Column, Integer, String, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///test.db', echo=True)
Base = declarative_base(engine)
########################################################################
class CompanyModel(Base):
""""""
__tablename__ = 'COMPANY'
# __table_args__ = {'autoload':True} # when auto is enabled manual mapping is not required
############BEGIN MANUAL MAPPING ################################################
ID = Column(Integer, primary_key=True)
NAME = Column(String)
AGE = Column(Integer)
ADDRESS = Column(String)
SALARY = Column(Float)
def __init__(self, id, name, age, addreess, salary):
self.id = id
self.name = name
self.age = age
self.address = address
self.salary = salary
def __repr__(self):
return "<Company - '%s': '%s' >" % (self.id, self.name )
# ######################END MANUAL MAPPING #############################################
#----------------------------------------------------------------------
def loadSession():
""""""
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
return session
if __name__ == "__main__":
session = loadSession()
res = session.query(CompanyModel).all()
for i in res:
print (i.NAME+' '+ str(i.AGE) + ' ' + i.ADDRESS + ' ' + str(i.SALARY))
'''
could also do it using mapper
from sqlalchemy import create_engine, Column, MetaData, Table
from sqlalchemy import Integer, String, Text
from sqlalchemy.orm import mapper, sessionmaker
class Bookmarks(object):
pass
#----------------------------------------------------------------------
def loadSession():
""""""
dbPath = 'places.sqlite'
engine = create_engine('sqlite:///%s' % dbPath, echo=True)
metadata = MetaData(engine)
moz_bookmarks = Table('moz_bookmarks', metadata,
Column('id', Integer, primary_key=True),
Column('type', Integer),
Column('fk', Integer),
Column('parent', Integer),
Column('position', Integer),
Column('title', String),
Column('keyword_id', Integer),
Column('folder_type', Text),
Column('dateAdded', Integer),
Column('lastModified', Integer)
)
mapper(Bookmarks, moz_bookmarks)
Session = sessionmaker(bind=engine)
session = Session()
if __name__ == "__main__":
session = loadSession()
res = session.query(Bookmarks).all()
print res[1].title
'''
| BhujayKumarBhatta/flask-learning | flaskr/db/mysqlalchemy.py | mysqlalchemy.py | py | 2,971 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 14,
"usage_type": "call"
},
{
... |
23185192152 | #!/usr/bin/env python3
"""
Download the name of all games in the bundle.
Download their info and scrore from opencritic if they exist there.
Sort by score.
"""
import json
import urllib.request
import urllib.parse
from typing import List
from bs4 import BeautifulSoup
from typing_extensions import TypedDict
Game = TypedDict(
"Game",
{
"name": str,
"itch": str,
"opencritic": str,
"steam": str,
"score": int,
"correct": float,
"description": str,
"genres": List[str],
},
)
def get_game_list() -> List[Game]:
"""
Get the game list from the bundle.
"""
# bundle_url = "https://itch.io/b/520/bundle-for-racial-justice-and-equality"
# As I need to scroll to the bottom of the page I just used javascript to do
# that and saved the resulted html into a file.
with open("itchio_520.html", "r",) as inf:
soup = BeautifulSoup(inf, "html.parser")
games: List[Game] = []
games_soup = soup.find_all("div", class_="index_game_cell_widget game_cell")
for game in games_soup:
info = game.find("div", class_="label").a.attrs
games.append(
{
"name": info["title"],
"itch": info["href"],
"opencritic": "",
"steam": "",
"score": -1,
"correct": -1,
"description": "",
"genres": [],
}
)
return games
def get_opencritic_info(games: List[Game]) -> List[Game]:
"""
Get information from opencritic regarding the game.
"""
url_api_search = "https://api.opencritic.com/api/game/search?"
url_api_game = "https://api.opencritic.com/api/game/{}"
url_opencritic = "https://www.opencritic.com/game/{}/{}"
url_steam = "https://store.steampowered.com/app/{}"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0"
}
for game in games:
print("Getting info for: {}".format(game["name"]))
# Search for the game, Assume the first respond is the correct one
url1 = url_api_search + urllib.parse.urlencode({"criteria": game["name"]})
request1 = urllib.request.Request(url1, None, headers)
respond1 = json.loads(
urllib.request.urlopen(request1).read().decode("utf-8")
)[0]
# From 0.2 to 0.3 some games were correct some not
if respond1["dist"] > 0.3:
continue
# Get game info
url2 = url_api_game.format(respond1["id"])
request2 = urllib.request.Request(url2, None, headers)
respond2 = json.loads(
urllib.request.urlopen(request2).read().decode("utf-8")
)
game["correct"] = respond1["dist"]
game["score"] = respond2.get("medianScore", -1)
game["opencritic"] = url_opencritic.format(respond1["id"], respond1["name"])
game["steam"] = url_steam.format(respond2.get("steamId", ""))
game["description"] = respond2.get("description", "")
game["genres"] = [val["name"] for val in respond2.get("Genres", [])]
return games
def sort_by_score(games: List[Game]) -> List[Game]:
"""
Sort the games by score.
"""
games = sorted(games, key=lambda k: k['score'], reverse=True)
return games
if __name__ == "__main__":
print("Getting the game list")
my_games = get_game_list()
with open("all_games.json", "w") as outf:
json.dump(my_games, outf, indent=2)
print("Getting info from opencritic")
my_games = get_opencritic_info(my_games)
print("Sorting games")
my_games = sort_by_score(my_games)
with open("games.json", "w") as outf:
json.dump(my_games, outf, indent=2)
| Hyerfatos/itchio_bundle_games | itch.py | itch.py | py | 3,789 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing_extensions.TypedDict",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "typing.List... |
28773188393 | """
examples
@when('the user searches for "{phrase}"')
def step_impl(context, phrase):
search_input = context.browser.find_element_by_name('q')
search_input.send_keys(phrase + Keys.RETURN)
@then('results are shown for "{phrase}"')
def step_impl(context, phrase):
links_div = context.browser.find_element_by_id('links')
assert len(links_div.find_elements_by_xpath('//div')) > 0
search_input = context.browser.find_element_by_name('q')
assert search_input.get_attribute('value') == phrase
"""
import time
from behave import *
from selenium.common.exceptions import (NoAlertPresentException,
NoSuchElementException,
TimeoutException)
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as cond
from selenium.webdriver.support.ui import WebDriverWait
ROCKETMILES_HOME = "https://www.rocketmiles.com"
LOCATION_TO_SELECT = "Peoria, IL, USA"
OPENING_MODAL_CLOSE_XPATH = '//div[@id="new-sign-up-modal"]//button[@class="close"]'
SUBMIT_BUTTON = (
'//button[@class="rm-btn-orange search-submit-btn"]/span[@class="ng-scope"]'
)
@given("a web browser is at the rocketmiles home page")
def step_impl(context):
context.browser.get(ROCKETMILES_HOME)
time.sleep(20)
try:
# Remove initial modal if present
context.browser.find_element(By.XPATH, '//div[@id="new-sign-up-modal"]')
context.browser.find_element(By.XPATH, OPENING_MODAL_CLOSE_XPATH).click()
except NoSuchElementException:
# This modal is only displayed on first startup so
# we don't expect to see it for subsequent tests
pass
@given("the location is blank")
def step_impl(context):
location_dropdown = context.browser.find_element(By.NAME, "locationSearch")
location_dropdown.clear()
wait_for_it(context.browser, location_dropdown)
selected_location = location_dropdown.text
assert selected_location == ""
@when("a search is initiated")
def step_impl(context):
submit_btn = context.browser.find_element(By.XPATH, SUBMIT_BUTTON)
click_on_it(context.browser, submit_btn)
time.sleep(4)
@then("the missing location error is shown")
def step_impl(context):
location_error_modal = context.browser.find_element(
By.XPATH, '//div[@class="popover-inner"]//div[@class="popover-content ng-binding"]'
)
wait_for_it(context.browser, location_error_modal)
time.sleep(4)
@given("the reward program is blank")
def step_impl(context):
rewards_dropdown = context.browser.find_element(By.NAME, "programAutosuggest")
rewards_dropdown.clear()
wait_for_it(context.browser, rewards_dropdown)
selected_program = rewards_dropdown.text
assert selected_program == ""
@given("the location is not blank")
def step_impl(context):
location_dropdown = context.browser.find_element(By.NAME, "locationSearch")
location_dropdown.click()
time.sleep(4)
location_dropdown.send_keys(LOCATION_TO_SELECT[:-4])
time.sleep(4)
location_dropdown.send_keys(Keys.ARROW_DOWN + Keys.RETURN)
time.sleep(4)
selected_location = location_dropdown.text
print("selected_location is {}".format(selected_location))
assert selected_location == LOCATION_TO_SELECT
@then("the missing reward program error is shown")
def step_impl(context):
reward_error_modal = context.browser.find_element(
By.XPATH, '//div[@class-="popover-title"]a//div[@class="popover-content"]'
)
wait_for_it(context.browser, reward_error_modal)
@When("blank")
def step_impl(context):
raise NotImplementedError("STEP: blank")
pass
def click_on_it(driver, element):
action = ActionChains(driver)
action.move_to_element(element)
action.click()
action.perform()
def wait_for_it(driver, element):
try:
WebDriverWait(driver, 10).until(cond.visibility_of(element))
except (NoAlertPresentException, TimeoutException) as py_ex:
print("Alert not present")
print(py_ex)
print(py_ex.args)
return element
| kevindvaf/rocketmiles | features/steps/search.py | search.py | py | 4,218 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "time.sleep",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 43,
"usage_type": "name"
},
{
... |
34421435243 | import numpy as np
import pandas as pd
import json
import argparse
import catboost
from catboost import CatBoostClassifier, Pool, metrics, cv
from catboost.utils import get_roc_curve, get_confusion_matrix, eval_metric
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
def build_model(**kwargs):
model = CatBoostClassifier(
custom_loss=[metrics.Accuracy()],
**kwargs
)
return model
def plot_roc_curves(model_list, X, Y, labels, cat_features_indices):
k = len(model_list)
assert(k <= 3)
color_list = ['blue', 'green', 'red']
plt.title('ROC for models')
for m, color, label in zip(model_list, color_list[:k], labels[:k]):
fpr, tpr, _ = get_roc_curve(m, Pool(X, Y, cat_features=cat_features_indices))
plt.plot(fpr, tpr, color=color, label=label, linewidth=0.5)
plt.legend()
plt.grid(True, linewidth=0.75)
plt.savefig('roc_curve_result.png', dpi=150)
def stats(model_list, X_test, Y_test, cat_features_indices):
auc_scores = []
print('Models info:')
for k in range(0, len(model_list)):
m = model_list[k]
pr_prob = m.predict_proba(X_test)
pr = m.predict(X_test)
ans = Y_test.to_numpy()
check = [True if i==j else False for i, j in zip(ans, pr)]
cm = get_confusion_matrix(m, Pool(X_test, Y_test, cat_features=cat_features_indices))
auc_scores.append(eval_metric(ans, pr_prob, 'AUC')[0])
print(f'\nModel {k} confusion_matrix:\n', cm)
print('AUC:', auc_scores[k])
print(f'Correct predictions: {check.count(True)}/{Y_test.shape[0]}\n')
print(m.get_feature_importance(prettified=True))
return auc_scores
def cv_models(model_list, X, Y, cat_features_indices):
print('CV:')
for k in range(0, len(model_list)):
m = model_list[k]
cv_params = m.get_params()
cv_params.update(
{'loss_function': metrics.Logloss()
})
cv_data = cv(Pool(X, Y, cat_features = cat_features_indices), cv_params, logging_level='Silent')
best_step = np.argmax(cv_data['test-Accuracy-mean'])
print('- Mean: ', cv_data['test-Accuracy-mean'][best_step])
print('- Std: ', cv_data['test-Accuracy-std'][best_step])
print('- Best step: ', best_step)
def split_dataset(df):
seed = 123
Y = df['class']
X = df.drop(['class','score'], axis=1)
X_train, X_, Y_train, Y_ = train_test_split(X, Y, train_size=0.8, random_state=seed)
X_val, X_test, Y_val, Y_test = train_test_split(X_, Y_, train_size=0.5, random_state=seed)
return X_train, X_val, X_test, Y_train, Y_val, Y_test
def build_dataset(dataset_filename):
df = pd.read_csv(dataset_filename, index_col=0)
threshold_score = 50
df['class'] = df['score']//threshold_score
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_file', type=str, required=True)
parser.add_argument('--model_params_file', type=str, required=True)
args = parser.parse_args()
dataset_filename = args.dataset_file
models_params_filename = args.model_params_file
df = build_dataset(dataset_filename)
X_train, X_val, X_test, Y_train, Y_val, Y_test = split_dataset(df)
cat_features_indices = np.where(X_test.dtypes != float)[0]
labels, params, models = [], {}, []
with open(models_params_filename, 'r') as f:
params = json.load(f)
labels = list(params.keys())
for label in labels:
models.append(build_model(**params[label]))
for m in models:
m.fit(X_train, Y_train, eval_set=(X_val, Y_val), cat_features=cat_features_indices)
auc_scores = stats(models, X_test, Y_test, cat_features_indices)
for i in range(0, len(labels)):
labels[i] = labels[i] + f': AUC={auc_scores[i]:.3f}'
plot_roc_curves(models, X_test, Y_test, labels, cat_features_indices)
cv_models(models, X_train, Y_train, cat_features_indices)
| mihael-tunik/SteppingStonesCatboost | classifier.py | classifier.py | py | 4,248 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "catboost.CatBoostClassifier",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "catboost.metrics.Accuracy",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "catboost.metrics",
"line_number": 17,
"usage_type": "name"
},
{
"api_name":... |
16208817026 | #-*- coding: UTF-8 -*-
'''
@author: chenwuji
读取原始文件 将脚本保存为按照天的文件
'''
import tools
alldata = {}
map_dict = {}
global_count = 1
def read_data(filename):
f = open(filename)
for eachline in f:
if(len(eachline.split('values (')) < 2):
continue
eachline = eachline.decode('GBK').encode('UTF-8')
# print eachline
basic_list1 = eachline.split('\n')[0].split('\t')[0].split('values (')[1].split('to_timestamp')[0].split(',')
pass
intersection_name = basic_list1[0].split('\'')[1]
lane_num = basic_list1[1]
if len(basic_list1[2].split('\'')) > 1:
direction = basic_list1[2].split('\'')[1]
else:
direction = basic_list1[2]
id = basic_list1[3].split('\'')[1]
if id == '-':
pass
if map_dict.__contains__(id):
id = map_dict[id]
else:
map_dict[id] = global_count
id = global_count
global global_count
global_count = global_count + 1
vehicle_color = basic_list1[4].split('\'')[1]
time = eachline.split('to_timestamp(\'')[1].split('.')[0]
speed = int(eachline.split('HH24:MI:SS.ff\'),\'')[1].split('\'')[0])
tools.writeToFile('data_sort_by_date/' + time.split(' ')[0] + '.csv', str(id) + ',' + intersection_name + ',' +lane_num
+ ',' +direction + ',' +vehicle_color + ',' + time + ',' + str(speed))
if __name__ == '__main__':
filename = 'data/20160301-10.sql'
read_data(filename)
filename = 'data/20160311-20.sql'
read_data(filename)
filename = 'data/20160320-31.sql'
read_data(filename)
tools.toFileWithPickle('mapping_dict', map_dict)
| chenwuji91/vehicle | src_1_sql_to_day_data/data_process.py | data_process.py | py | 1,769 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tools.writeToFile",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tools.toFileWithPickle",
"line_number": 53,
"usage_type": "call"
}
] |
37056623803 | import os
import numpy as np
from sklearn import datasets
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.svm import SVC
from sklearn.externals import joblib
from utils import save_answer
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
MODEL_DUMP_PATH = os.path.join(BASE_DIR, 'clf.joblib')
ANSWER_PATH = os.path.join(BASE_DIR, 'anwser.txt')
newsgroups = datasets.fetch_20newsgroups(
subset='all',
categories=['alt.atheism', 'sci.space']
)
def initialize_classifier(X, y) -> GridSearchCV:
grid = {'C': np.power(10.0, np.arange(-5, 6))}
cv = KFold(n_splits=5, shuffle=True, random_state=241)
clf = SVC(kernel='linear', random_state=241)
gs = GridSearchCV(clf, grid, scoring='accuracy', cv=cv)
gs.fit(X, y)
joblib.dump(gs, MODEL_DUMP_PATH)
return gs
def load_classifier() -> GridSearchCV:
return joblib.load(MODEL_DUMP_PATH)
def find_best_C(X, y):
if os.path.exists(MODEL_DUMP_PATH):
clf = load_classifier()
else:
clf = initialize_classifier(X, y)
print(clf.cv_results_.__getitem__('params'))
print(clf.cv_results_.__getitem__('mean_test_score'))
def run():
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(newsgroups.data)
y = newsgroups.target
clf = SVC(kernel='linear', random_state=241)
clf.fit(X, y)
names = vectorizer.get_feature_names()
arr = clf.coef_.toarray()
arr[0] = [abs(v) for v in arr[0]]
sorted_weights = arr[::, arr[0, :].argsort()[::-1]]
top_10_weights = sorted_weights[0, :10]
words = list()
for w in top_10_weights:
index = np.where(arr == w)
word_index = index[1][0]
words.append(names[word_index])
words.sort()
print('Most weight words are:', words)
save_answer(os.path.join(BASE_DIR, 'answer.txt'), ','.join(words))
| Nick-Omen/coursera-yandex-introduce-ml | lessons/article/main.py | main.py | py | 1,903 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"lin... |
30066696464 | import logging
import os
from PIL import Image
from PIL.ExifTags import TAGS
class Utils:
@staticmethod
def extract_exif_data(image: Image) -> {}:
map_tag_dict = {}
exif_data = image.getexif()
for tag_id in exif_data:
tag = TAGS.get(tag_id, tag_id)
data = exif_data.get(tag_id)
map_tag_dict[tag] = data
return map_tag_dict
@staticmethod
def gather_images_from_path(path: str) -> []:
images = []
valid_images = [".jpg", ".gif", ".png", ".tga"]
for file in os.listdir(path):
extension = os.path.splitext(file)[1]
if extension.lower() not in valid_images:
continue
images.append(os.path.join(path, file))
return images
class CustomFormatter(logging.Formatter):
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
blue = "\x1b[34m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
format = "[%(filename)s:%(lineno)d] %(levelname)s: %(message)s "
FORMATS = {
logging.DEBUG: blue + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def get_logging_handler():
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(CustomFormatter())
return stream_handler
| greencashew/image-captioner | imagecaptioner/utils.py | utils.py | py | 1,630 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PIL.ExifTags.TAGS.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PIL.ExifTags.TAGS",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"l... |
11814433027 | from get_url import GetUrl
import requests
from bs4 import BeautifulSoup
class GetText():
def __init__(self, area):
self.got_url = GetUrl()
self.url = self.got_url.get_url(area)
def get_url(self):
url = self.url
return url
def get_text(self):
err_text = '以下の地域から選んでください\n北海道\n東北\n関東\n信越・北陸\n東海\n近畿\n中国\n四国\n九州\n沖縄\n'
url = self.get_url()
if url is None:
return err_text
# else:
def get_info(self, url):
html = requests.get(url)
soup = BeautifulSoup(html.text, "html.parser")
text_list = soup.select("#main > div > p")
text_sorce = str(text_list[0])
text = text_sorce.strip('<p class="gaikyo">').strip('</p>').replace('<br/>\n', '')
return text
| yutatakaba/weather_apr | get_text.py | get_text.py | py | 761 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "get_url.GetUrl",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 24,
"usage_type": "call"
}
] |
74160534269 | import datetime
import enum
import os
import signal
import subprocess
import sys
import time
import typing
from logging import getLogger
from threading import Thread
import requests
from slugify import slugify
from config import Setting, client_id, client_secret
from util import file_size_mb, get_setting
logger = getLogger(__name__)
class TwitchResponseStatus(enum.Enum):
ONLINE = 0
OFFLINE = 1
NOT_FOUND = 2
UNAUTHORIZED = 3
ERROR = 4
class RecordingDetail(typing.TypedDict):
path: str
processed: bool
class Twitch:
def __init__(self) -> None:
# twitch stuff
self.client_id = client_id
self.client_secret = client_secret
self.api_url = "https://api.twitch.tv/helix/streams"
self.token_url = f"https://id.twitch.tv/oauth2/token?client_id={self.client_id}&client_secret={self.client_secret}&grant_type=client_credentials"
self.access_token = self.fetch_access_token()
# streams
self.root_path = os.getcwd()
self.streams: dict[str, subprocess.Popen[bytes]] = {}
self.processing_loop = self.init_process_loop()
# setup directories
[os.makedirs(path) for path in [os.path.join(self.root_path, "recorded"), os.path.join(self.root_path, "processed")] if not os.path.isdir(path)]
def fetch_access_token(self) -> str:
"""
Fetch a fresh OAuth2 access token from Twitch
"""
try:
token_response = requests.post(self.token_url, timeout=15)
token_response.raise_for_status()
token = token_response.json()
logger.info(f'Connected to Twitch with client_id={self.client_id}')
return token["access_token"]
except requests.exceptions.RequestException as e:
logger.error(e)
def stream_status(self, username: str):
"""
Determine if a Twitch user is streaming
"""
info = None
status = TwitchResponseStatus.ERROR
try:
headers = {"Client-ID": self.client_id,
"Authorization": f"Bearer {self.access_token}"}
r = requests.get(
f"{self.api_url}?user_login={username}", headers=headers, timeout=15)
r.raise_for_status()
info = r.json()
if info is None or not info["data"]:
status = TwitchResponseStatus.OFFLINE
logger.info(f"Streamer {username} is offline")
else:
status = TwitchResponseStatus.ONLINE
logger.info(f"Streamer {username} is online and streaming")
except requests.exceptions.RequestException as e:
if e.response:
if e.response.status_code == 401:
status = TwitchResponseStatus.UNAUTHORIZED
if e.response.status_code == 404:
status = TwitchResponseStatus.NOT_FOUND
return status, info
def start_watching(self, username: str):
"""
Start watching a Twitch stream
Returns:
bool indicate if stream started recording or not
str | None any errors if bool was false
"""
status, info = self.stream_status(username)
if status is not TwitchResponseStatus.ONLINE:
logger.error('{} is not online'.format(username))
return False, f"{username} is not streaming"
else:
recorded_path = os.path.join(self.root_path, "recorded", username)
if not os.path.isdir(recorded_path):
os.makedirs(recorded_path)
channels = info["data"]
channel = next(iter(channels), None)
filename = '_'.join([datetime.datetime.now().strftime(
"%Y-%m-%d_%H-%M-%S"), slugify(channel.get("title"))]) + '.mp4'
recorded_filename = os.path.join(recorded_path, filename)
self.streams[username] = subprocess.Popen(
["streamlink", "--twitch-disable-ads", f"twitch.tv/{username}", "best", "-o", recorded_filename])
# keep checking until file exists or timer runs out
start = time.time()
while True:
if os.path.isfile(recorded_filename) or time.time() - start > 5:
break
else:
time.sleep(0.5)
continue
return True, None
def stop_watching(self, username):
"""
Stop watching a Twitch stream
"""
if username not in self.streams.keys():
logger.error('could not stop, not watching {}'.format(username))
return False, f"Not watching {username}"
else:
# stop recording process
proc = self.streams[username]
proc.send_signal(signal.SIGTERM)
self.streams.pop(username)
time.sleep(0.5)
# process recording
if get_setting(Setting.AutoProcessRecordings) == True:
recordings = self.get_recordings()
if username in recordings.keys():
video = recordings[username][len(recordings[username]) - 1]
self.process_recording(f"{username}/{video['path']}")
return True, None
def get_recordings(self):
recordings: dict[str, list[RecordingDetail]] = {}
recordings_dir = os.path.join(self.root_path, "recorded")
def is_processed(user, video):
return os.path.isfile(os.path.join(self.root_path, "processed", user, video))
def size_of(user, video):
if is_processed(user, video):
return file_size_mb(os.path.join(self.root_path, "processed", user, video))
else:
return file_size_mb(os.path.join(self.root_path, "recorded", user, video))
for user in os.listdir(recordings_dir):
# ugh this is annoying! i hate mac sometimes
if user != '.DS_Store':
recordings[user] = [dict(path=f, processed=is_processed(user, f), size=size_of(user, f)) for f in os.listdir(
os.path.join(recordings_dir, user)) if str(f).endswith('.mp4')]
return recordings
def process_recording(self, file_path: str):
"""
Expect file_path to be like tsm_imperialhal/some_video_name.mp4
"""
source = os.path.join(self.root_path, "recorded", file_path)
if not os.path.isfile(source):
raise FileNotFoundError(
f"source file {source} does not exist, cannot process")
if not os.path.isdir(os.path.join(self.root_path, "processed", file_path.split('/')[0])):
os.makedirs(os.path.join(self.root_path,
"processed", file_path.split('/')[0]))
dest = os.path.join(self.root_path, "processed", file_path)
try:
subprocess.run(['ffmpeg', '-err_detect', 'ignore_err',
'-i', source, '-c', 'copy', dest, '-y'])
except Exception as e:
logger.error(e)
def delete_recording(self, file_path: str):
"""
Expect file_path to be like tsm_imperialhal/some_video_name.mp4
"""
recorded = os.path.join(self.root_path, "recorded", file_path)
if os.path.isfile(recorded):
os.remove(recorded)
processed = os.path.join(self.root_path, "processed", file_path)
if os.path.isfile(processed):
os.remove(processed)
def init_process_loop(self):
if get_setting(Setting.AutoProcessRecordings) == True:
def run_loop():
logger.info('starting background processing loop...')
while True:
for streamer, recordings in self.get_recordings().items():
for r in recordings:
if streamer not in self.streams.keys() and not r["processed"]:
logger.info(
f"processing saved stream: [{streamer}] -> {r['path']}")
self.process_recording(
f"{streamer}/{r['path']}")
time.sleep(10)
self.processing_loop = Thread(target=run_loop)
self.processing_loop.start()
| bcla22/twitch-multistream-recorder | twitch.py | twitch.py | py | 8,358 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "typing.TypedDict",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "config.client_... |
13412454502 | # 自己设计的CNN模型
import torch.nn as nn
import torch.nn.functional as F
class ConvolutionalNetwork(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 3, 1) # conv1 (RGB图像,输入通道数为3)
self.conv2 = nn.Conv2d(6, 16, 3, 1) # conv2
self.fc1 = nn.Linear(54 * 54 * 16, 120) # fc1
self.fc2 = nn.Linear(120, 84) # fc2
self.fc3 = nn.Linear(84, 6) # fc3 6个类别
def forward(self, X):
X = F.relu(self.conv1(X)) # 激活函数:ReLU
X = F.max_pool2d(X, 2, 2) # 最大池化
X = F.relu(self.conv2(X))
X = F.max_pool2d(X, 2, 2)
X = X.view(-1, 54 * 54 * 16)
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = self.fc3(X)
return F.log_softmax(X, dim=1) # 输出层为softmax | Tommy-Bie/sign_language_classification | my_CNN.py | my_CNN.py | py | 924 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numbe... |
35835057096 | from re import compile
from utils import BasicError
# Class for Tokens
class Token():
# Token type will have a name and a value
def __init__(self, type_name, value, pos_start, pos_end):
self.type = type_name
self.value = value
self.pos_start = pos_start
self.pos_end = pos_end
def __repr__(self):
return f"{self.type}:{self.value}"
# Class for Lexer
class Lexer():
# Initializes lexer
# Skip refers to any stream of chars to be ignored by lexer
def __init__(self, token_types=list(), skip="\s+"):
self.token_types = token_types
self.skip = compile(skip)
# Goes to next token
def next_token(self):
# Ignore whitespace
skip_exist = self.skip.match(self.program, self.position)
if skip_exist:
self.position = skip_exist.end()
# Checks if we are at the very end of the program to be lexed
if self.position >= len(self.program):
return None
# Iterates through token_types to check if any token is found
for tkn_t in self.token_types:
result = tkn_t["regx"].match(self.program, self.position)
if result:
# Create a Token Object having value of the first match
tkn = Token(tkn_t["name"], result.group(0),
self.position,
self.position + len(result.group(0)) - 1)
# Check if user has provided a modifier function
if tkn_t["mod"]:
tkn.value = tkn_t["mod"](tkn.value)
self.position = result.end()
return tkn
raise BasicError(f"Lexer Error: Unknown Token at {self.position + 1}",
self.position, self.position)
# Return List of Tokens
def tokenize(self, program):
self.program = program
self.position = 0
list_token = []
while True:
# Go through the string
# Generating Tokens
# Unti EOL
tkn = self.next_token()
if tkn is None:
break
list_token.append(tkn)
return list_token
# Register token types for a lexer
def register(self, name, regx, modifier=None):
self.token_types.append({"name": name, "regx": compile(regx),
"mod": modifier})
| shaleen111/pyqb | lexer.py | lexer.py | py | 2,416 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "utils.BasicError",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 71,
"usage_type": "call"
}
] |
11463544163 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 11 12:08:53 2022
@author: sampasmann
"""
import sys
sys.path.append("../../")
import os
from src.init_files.mg_init import MultiGroupInit
import numpy as np
import matplotlib.pyplot as plt
Nx = 1
data12 = MultiGroupInit(numGroups=12, Nx=Nx)
data70 = MultiGroupInit(numGroups=70, Nx=Nx)
data618 = MultiGroupInit(numGroups=618, Nx=Nx)
script_dir = os.path.dirname(__file__)
rel_path = "../../src/materials/HDPE/"
abs_file_path = os.path.join(script_dir, rel_path)
centers12 = np.genfromtxt(abs_file_path+"group_centers_12G_HDPE.csv", delimiter=",")
centers70 = np.genfromtxt(abs_file_path+"group_centers_70G_HDPE.csv", delimiter=",")
centers618 = np.genfromtxt(abs_file_path+"group_centers_618G_HDPE.csv", delimiter=",")
edges12 = np.genfromtxt(abs_file_path+"group_edges_12G_HDPE.csv", delimiter=",")
edges70 = np.genfromtxt(abs_file_path+"group_edges_70G_HDPE.csv", delimiter=",")
edges618 = np.genfromtxt(abs_file_path+"group_edges_618G_HDPE.csv", delimiter=",")
dE12 = abs(edges12[1:] - edges12[:-1])
dE70 = abs(edges70[1:] - edges70[:-1])
dE618 = abs(edges618[1:] - edges618[:-1])
y12 = (data12.true_flux[0,:]/dE12)
y12 /= np.sum(data12.true_flux[0,:])
y70 = (data70.true_flux[0,:]/dE70)
y70 /= np.sum(data70.true_flux[0,:])
y618 = (data618.true_flux[0,:]/dE618)
y618 /= np.sum(data618.true_flux[0,:])
centers12[-1] = np.min(centers618)
centers70[-1] = np.min(centers618)
plt.figure(dpi=300)
plt.suptitle('HDPE Group Centers Divided by Energy Bin Width')
size = 3
where = 'mid'
drawstyle='steps-mid'
plt.step(centers12, y12, '-o', where=where,drawstyle=drawstyle, markersize=size,label='G=12')
plt.step(centers70, y70, where=where, drawstyle=drawstyle, markersize=size,label='G=70')
plt.step(centers618, y618,where=where, drawstyle=drawstyle, markersize=size,label='G=618')
plt.legend()
plt.xscale('log')
plt.yscale('log')
plt.xlabel('E (MeV)')
plt.ylabel(r'$\phi(E)/E$')
| spasmann/iQMC | post_process/plotting/mg_solutions.py | mg_solutions.py | py | 1,957 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "src.init_files.mg_init.MultiGroupInit",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "s... |
39691118341 | #!/usr/bin/env python
import sys
from xml.etree import ElementTree
def run(files):
first = None
for filename in files:
data = ElementTree.parse(filename).getroot()
if first is None:
first = data
else:
first.extend(data)
if first is not None:
print(ElementTree.tostring(first).decode('utf-8'))
if __name__ == "__main__":
run(sys.argv[1:])
| cheqd/cheqd-node | .github/scripts/xml_combine.py | xml_combine.py | py | 412 | python | en | code | 61 | github-code | 6 | [
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.tostring",
"line_number": 14,
"usage_type": "call"
},
{
"ap... |
71432253948 | import click
group = click.Group("jaqsmds")
@group.command(help="Run auth server for jaqs.data.DataApi client.")
@click.argument("variables", nargs=-1)
@click.option("-a", "--auth", is_flag=True, default=False)
def server(variables, auth):
from jaqsmds.server.server import start_service
env = {}
for item in variables:
r = item.split("=")
if len(r) == 2:
env[r[0]] = r[1]
start_service(auth, **env)
def catch_db(string):
if string:
return dict(map(lambda s: s.split("="), string.replace(" ", "").split("&")))
else:
return {}
if __name__ == '__main__':
group() | cheatm/jaqsmds | jaqsmds/entry_point.py | entry_point.py | py | 642 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "click.Group",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "jaqsmds.server.server.start_service",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "click.op... |
8662253484 | import logging
import sys
import tarfile
import tempfile
from urllib.request import urlopen
from zipfile import ZipFile
from pathlib import Path
TF = "https://github.com/tensorflow/tflite-micro/archive/80cb11b131e9738dc60b2db3e2f1f8e2425ded52.zip"
CMSIS = "https://github.com/ARM-software/CMSIS_5/archive/a75f01746df18bb5b929dfb8dc6c9407fac3a0f3.zip"
CMSIS_DSP = "https://github.com/ARM-software/CMSIS-DSP/archive/refs/tags/v1.15.0.zip"
CMSIS_NN = "https://github.com/ARM-software/CMSIS-NN/archive/refs/tags/23.08.zip"
CMSIS_ENSEMBLE = "https://github.com/alifsemi/alif_ensemble-cmsis-dfp/archive/5bfce4020fa27d91fcd950725d35ecee8ba364ad.zip"
CMSIS_ENSEMBLE_B = "https://github.com/alifsemi/alif_ensemble-cmsis-dfp/archive/refs/tags/v0.9.6.zip"
BOARDLIB = "https://github.com/alifsemi/alif_boardlib/archive/64067e673171fb7a80272421e537a5f8064bb323.zip"
ETHOS_U_CORE_DRIVER = "https://git.mlplatform.org/ml/ethos-u/ethos-u-core-driver.git/snapshot/ethos-u-core-driver-23.08.tar.gz"
ETHOS_U_CORE_PLATFORM = "https://git.mlplatform.org/ml/ethos-u/ethos-u-core-platform.git/snapshot/ethos-u-core-platform-23.08.tar.gz"
LVGL = "https://github.com/lvgl/lvgl/archive/refs/tags/v8.3.7.zip"
ARM2D = "https://github.com/ARM-software/Arm-2D/archive/refs/tags/v1.1.3.zip"
def download(url_file: str, post_process=None):
with urlopen(url_file) as response, tempfile.NamedTemporaryFile() as temp:
logging.info(f"Downloading {url_file} ...")
temp.write(response.read())
temp.seek(0)
logging.info(f"Finished downloading {url_file}.")
if post_process:
post_process(temp)
def unzip(file, to_path):
with ZipFile(file) as z:
for archive_path in z.infolist():
archive_path.filename = archive_path.filename[archive_path.filename.find("/") + 1:]
if archive_path.filename:
z.extract(archive_path, to_path)
target_path = to_path / archive_path.filename
attr = archive_path.external_attr >> 16
if attr != 0:
target_path.chmod(attr)
def untar(file, to_path):
with tarfile.open(file) as z:
for archive_path in z.getmembers():
index = archive_path.name.find("/")
if index < 0:
continue
archive_path.name = archive_path.name[index + 1:]
if archive_path.name:
z.extract(archive_path, to_path)
def main(dependencies_path: Path):
download(CMSIS,
lambda file: unzip(file.name, to_path=dependencies_path / "cmsis"))
download(CMSIS_DSP,
lambda file: unzip(file.name, to_path=dependencies_path / "cmsis-dsp"))
download(CMSIS_NN,
lambda file: unzip(file.name, to_path=dependencies_path / "cmsis-nn"))
download(CMSIS_ENSEMBLE,
lambda file: unzip(file.name, to_path=dependencies_path / "cmsis-ensemble"))
download(CMSIS_ENSEMBLE_B,
lambda file: unzip(file.name, to_path=dependencies_path / "cmsis-ensemble-b"))
download(BOARDLIB,
lambda file: unzip(file.name, to_path=dependencies_path / "boardlib"))
download(ETHOS_U_CORE_DRIVER,
lambda file: untar(file.name, to_path=dependencies_path / "core-driver"))
download(ETHOS_U_CORE_PLATFORM,
lambda file: untar(file.name, to_path=dependencies_path / "core-platform"))
download(TF,
lambda file: unzip(file.name, to_path=dependencies_path / "tensorflow"))
download(LVGL,
lambda file: unzip(file.name, to_path=dependencies_path / "lvgl"))
download(ARM2D,
lambda file: unzip(file.name, to_path=dependencies_path / "Arm-2D"))
if __name__ == '__main__':
logging.basicConfig(filename='download_dependencies.log', level=logging.DEBUG, filemode='w')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
download_dir = Path(__file__).parent.resolve() / "dependencies"
if download_dir.is_dir():
logging.info(f'{download_dir} exists. Skipping download.')
else:
main(download_dir)
| alifsemi/alif_ml-embedded-evaluation-kit | download_dependencies.py | download_dependencies.py | py | 4,091 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "loggi... |
37009441089 | # coding=utf-8
import pymysql
from com.petstore.dao.base_dao import BaseDao
"""订单明细管理DAO"""
class OrderDetailDao(BaseDao):
def __init__(self):
super().__init__()
def create(self, orderdetail):
"""创建订单明细,插入到数据库"""
try:
with self.conn.cursor() as cursor:
sql = 'insert into orderdetails (orderid, productid,quantity,unitcost) ' \
'values (%s,%s,%s,%s)'
affectedcount = cursor.execute(sql, orderdetail)
print('成功插入{0}条数据'.format(affectedcount))
# 提交数据库事务
self.conn.commit()
except pymysql.DatabaseError as e:
# 回滚数据库事务
self.conn.rollback()
print(e)
finally:
self.close()
| wanglun0318/petStore | com/petstore/dao/order_detail_dao.py | order_detail_dao.py | py | 862 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "com.petstore.dao.base_dao.BaseDao",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pymysql.DatabaseError",
"line_number": 21,
"usage_type": "attribute"
}
] |
32915067412 | from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class Group(models.Model):
title = models.TextField(max_length=200, verbose_name='Название')
slug = models.SlugField(unique=True, verbose_name='Идентификатор')
description = models.TextField(verbose_name='Описание')
class Meta:
verbose_name = 'Группа'
verbose_name_plural = 'Группы'
def __str__(self):
return self.title
class Post(models.Model):
text = models.TextField(verbose_name='Текст')
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,)
author = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='posts',
verbose_name='Автор')
image = models.ImageField(
upload_to='posts/',
null=True, blank=True,
verbose_name='Изображение')
group = models.ForeignKey(
Group, on_delete=models.CASCADE, related_name='posts',
blank=True, null=True, verbose_name='Группа'
)
class Meta:
verbose_name = 'Пост'
verbose_name_plural = 'Посты'
def __str__(self):
return self.text
class Comment(models.Model):
author = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='comments',
verbose_name='Автор')
post = models.ForeignKey(
Post, on_delete=models.CASCADE,
related_name='comments',
verbose_name='Пост')
text = models.TextField()
created = models.DateTimeField(
verbose_name='Дата добавления',
auto_now_add=True, db_index=True)
class Meta:
verbose_name = 'Комментарий'
verbose_name_plural = 'Комментарии'
def __str__(self):
return self.text
class Follow(models.Model):
user = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='follows_user',
verbose_name='Подписавшийся пользователь')
following = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='follows', blank=False,
verbose_name='Пользователь, на которого подписаны')
class Meta:
verbose_name = 'Подписка'
verbose_name_plural = 'Подписки'
unique_together = ["user", "following"]
def __str__(self):
return f'{self.user.username} отслеживает {self.following.username}'
| dew-77/api_final_yatube | yatube_api/posts/models.py | models.py | py | 2,618 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_... |
3400836706 | # -*- coding: utf-8 -*-
from flask import Flask
from pydoc import locate
class ConstructApp(object):
def __init__(self):
self.extensions = {}
self.web_app = self.init_web_app()
def __call__(self, settings, force_init_web_app=False):
if force_init_web_app is True:
self.web_app = self.init_web_app()
self.set_settings(settings)
@staticmethod
def init_web_app():
return Flask(__name__, static_url_path='/static',
static_folder='static') # Создаем экземпляр класса Flask-приложения
def set_settings(self, settings):
self.web_app.url_map.strict_slashes = settings.TRAILING_SLASH # Указываем игнорирововать слеша в конце url
self.web_app.config.from_object(settings) # Передаём остальные настройки в приложение
def init_extensions(self):
extensions = self.web_app.config['APP_EXTENSIONS']
if not isinstance(extensions, tuple):
raise TypeError('The extensions must be a tuple')
for path in extensions:
ex = locate(path)(self)
if ex.extension is NotImplemented:
raise NotImplementedError('The extension is not implemented')
else:
if hasattr(self.web_app, ex.name):
raise AttributeError(f'The base application already has extension "{ex.name}"')
setattr(self, ex.name, ex.extension)
self.extensions[ex.name] = ex
ex.configurate_extension()
APP = ConstructApp()
| tigal/mooc | application.py | application.py | py | 1,660 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pydoc.locate",
"line_number": 36,
"usage_type": "call"
}
] |
17016755905 | import urllib.request as request
import json
src="https://padax.github.io/taipei-day-trip-resources/taipei-attractions-assignment.json"
with request.urlopen(src) as response:
data=json.load(response)
spot_data=data["result"]["results"]
with open("data.csv","w",encoding="UTF-8-sig") as file:
for spot_item in spot_data:
spot_item['address']=spot_item['address'][5:8]
img_list=spot_item['file']
img=img_list.split('https://')
file.write(spot_item['stitle']+","+spot_item['address']+","+spot_item['longitude']+","+spot_item['latitude']+","+"https://"+img[1]+"\n")
| ba40431/wehelp-assignments | week_3/week_3.py | week_3.py | py | 629 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 5,
"usage_type": "call"
}
] |
74048870269 | import pandas as pd
import os
from calendar import monthrange
from datetime import datetime,timedelta
import re
import numpy as np
from models.fed_futures_model.backtestloader import BacktestLoader
from models.fed_futures_model.fff_model import FederalFundsFuture
class Backtest():
def __init__(self, path):
self.loader = BacktestLoader(path)
def load_month(self, meeting_date:datetime):
ff_curr = self.loader.get_curr_data(meeting_date)
ff_prior = self.loader.ff_month_before(meeting_date)
ff_after = self.loader.ff_month_after(meeting_date)
prev_month_date = self.cycle_month(meeting_date, step=-1).strftime("%Y-%m")
fomc_type = 1 if len(self.loader.fomc_dates.loc[prev_month_date]) > 0 else 2
meeting_date = self.loader.fomc_dates.loc[meeting_date.strftime("%Y-%m")].index[0]
fff = FederalFundsFuture()
fff.initiate_model(meeting_date, ff_prior, ff_curr, ff_after, meeting_date, fomc_type)
return fff
def run_month(self, meeting_date:datetime):
fff = self.load_month(meeting_date)
no_hike_prob, hike_prob = fff.calculate_hike_prob()
prob_change = [no_hike_prob, hike_prob]
return prob_change,fff
def find_range(self,implied_rate, probs):
int_ranges = [0.25,0.5,0.75,1,1.25,1.5,1.75,2]
values = [0,0,0,0,0,0,0,0]
for i in range(len(int_ranges)-1):
if int_ranges[i] > implied_rate:
level = i
break
for prob in range(len(probs)):
values[level + prob] = probs[prob]
return values
def predict(self):
today = datetime.now().strftime("%Y-%m")
meeting_dates = self.loader.fomc_dates.loc[today:]
all_predictions = {}
pred_values = {}
raw_probs = {}
for dt in meeting_dates.index:
print(f"Loading: {dt} FOMC Meeting...")
dt = pd.to_datetime(dt)
probs,fff = self.run_month(dt)
result = self.carry(probs)
implied_rate = fff.implied_rate
v = self.find_range(implied_rate, result)
all_predictions[dt] = v
pred_values[dt] = [fff.ffer_end]
raw_probs[dt] = probs
final_result = pd.DataFrame.from_dict(all_predictions).T
final_result.columns = ['0-25 BPS','25-50 BPS','50-75 BPS',
'75-100 BPS','100-125 BPS','125-150 BPS',
'150-175 BPS', '175-200 BPS']
pred_values = pd.DataFrame.from_dict(pred_values).T
pred_values = pred_values.reset_index()
pred_values.columns = ['Date', 'Prediction']
raw_probs = pd.DataFrame.from_dict(raw_probs).T.reset_index()
raw_probs.columns = ['Date', 'No Hike', 'Hike']
return final_result, pred_values, raw_probs
def carry(self, sample,cap=1):
result = sample.copy()
if result[0] < 0:
result.append(0)
result = np.array([result])
for c in range(1,result.shape[1]):
result[:,c] += np.maximum(result[:,c-1]-cap,0)
result[:,:-1] = np.minimum(result[:,:-1],cap)
if result[:,0] < 0:
result[:,0] = 0
for i in range(1,result.shape[1]-1):
if result[:,i] == 1:
result[:,i] = 1 - result[:,i+1]
return result[0].tolist()
def cycle_month(self,date: datetime, step):
new_date = date + step * timedelta(days=monthrange(date.year, date.month)[1] )
return new_date
| limjoobin/bt4103-rate-decision-index | rate_decision_index/models/fed_futures_model/backtest.py | backtest.py | py | 3,664 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.fed_futures_model.backtestloader.BacktestLoader",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "models.fed_futures_model.fff_model.FederalFundsFuture",
"line_number... |
10420450933 | from __future__ import annotations
from typing import TYPE_CHECKING
from randovania.games.prime1.layout.hint_configuration import PhazonSuitHintMode
from randovania.games.prime1.layout.prime_configuration import (
LayoutCutsceneMode,
PrimeConfiguration,
RoomRandoMode,
)
from randovania.layout.preset_describer import (
GamePresetDescriber,
fill_template_strings_from_tree,
message_for_required_mains,
)
if TYPE_CHECKING:
from randovania.layout.base.base_configuration import BaseConfiguration
_PRIME1_CUTSCENE_MODE_DESCRIPTION = {
LayoutCutsceneMode.MAJOR: "Major cutscene removal",
LayoutCutsceneMode.MINOR: "Minor cutscene removal",
LayoutCutsceneMode.COMPETITIVE: "Competitive cutscene removal",
LayoutCutsceneMode.SKIPPABLE: None,
LayoutCutsceneMode.SKIPPABLE_COMPETITIVE: "Competitive cutscenes",
LayoutCutsceneMode.ORIGINAL: "Original cutscenes",
}
_PRIME1_PHAZON_SUIT_HINT = {
PhazonSuitHintMode.DISABLED: None,
PhazonSuitHintMode.HIDE_AREA: "Area only",
PhazonSuitHintMode.PRECISE: "Area and room",
}
_PRIME1_ROOM_RANDO_MODE_DESCRIPTION = {
RoomRandoMode.NONE: None,
RoomRandoMode.ONE_WAY: "One-way Room Rando",
RoomRandoMode.TWO_WAY: "Two-way Room Rando",
}
class PrimePresetDescriber(GamePresetDescriber):
def format_params(self, configuration: BaseConfiguration) -> dict[str, list[str]]:
assert isinstance(configuration, PrimeConfiguration)
template_strings = super().format_params(configuration)
cutscene_removal = _PRIME1_CUTSCENE_MODE_DESCRIPTION[configuration.qol_cutscenes]
ingame_difficulty = configuration.ingame_difficulty.description
phazon_hint = _PRIME1_PHAZON_SUIT_HINT[configuration.hints.phazon_suit]
room_rando = _PRIME1_ROOM_RANDO_MODE_DESCRIPTION[configuration.room_rando]
def describe_probability(probability, attribute):
if probability == 0:
return None
return f"{probability / 10:.1f}% chance of {attribute}"
superheated_probability = describe_probability(configuration.superheated_probability, "superheated")
submerged_probability = describe_probability(configuration.submerged_probability, "submerged")
def attribute_in_range(rand_range, attribute):
if rand_range[0] == 1.0 and rand_range[1] == 1.0:
return None
elif rand_range[0] > rand_range[1]:
rand_range = (rand_range[1], rand_range[0])
return f"Random {attribute} within range {rand_range[0]} - {rand_range[1]}"
def different_xyz_randomization(diff_xyz):
if enemy_rando_range_scale is None:
return None
elif diff_xyz:
return "Enemies will be stretched randomly"
if configuration.enemy_attributes is not None:
enemy_rando_range_scale = attribute_in_range(
[
configuration.enemy_attributes.enemy_rando_range_scale_low,
configuration.enemy_attributes.enemy_rando_range_scale_high,
],
"Size",
)
enemy_rando_range_health = attribute_in_range(
[
configuration.enemy_attributes.enemy_rando_range_health_low,
configuration.enemy_attributes.enemy_rando_range_health_high,
],
"Health",
)
enemy_rando_range_speed = attribute_in_range(
[
configuration.enemy_attributes.enemy_rando_range_speed_low,
configuration.enemy_attributes.enemy_rando_range_speed_high,
],
"Speed",
)
enemy_rando_range_damage = attribute_in_range(
[
configuration.enemy_attributes.enemy_rando_range_damage_low,
configuration.enemy_attributes.enemy_rando_range_damage_high,
],
"Damage",
)
enemy_rando_range_knockback = attribute_in_range(
[
configuration.enemy_attributes.enemy_rando_range_knockback_low,
configuration.enemy_attributes.enemy_rando_range_knockback_high,
],
"Knockback",
)
enemy_rando_diff_xyz = different_xyz_randomization(configuration.enemy_attributes.enemy_rando_diff_xyz)
else:
enemy_rando_range_scale = None
enemy_rando_range_health = None
enemy_rando_range_speed = None
enemy_rando_range_damage = None
enemy_rando_range_knockback = None
enemy_rando_diff_xyz = None
extra_message_tree = {
"Difficulty": [
{f"Heat Damage: {configuration.heat_damage:.2f} dmg/s": configuration.heat_damage != 10.0},
{f"{configuration.energy_per_tank} energy per Energy Tank": configuration.energy_per_tank != 100},
],
"Gameplay": [
{
f"Elevators: {configuration.teleporters.description('elevators')}": (
not configuration.teleporters.is_vanilla
)
},
{
"Dangerous Gravity Suit Logic": configuration.allow_underwater_movement_without_gravity,
},
],
"Quality of Life": [{f"Phazon suit hint: {phazon_hint}": phazon_hint is not None}],
"Game Changes": [
message_for_required_mains(
configuration.ammo_pickup_configuration,
{
"Missiles needs Launcher": "Missile Expansion",
"Power Bomb needs Main": "Power Bomb Expansion",
},
),
{
"Progressive suit damage reduction": configuration.progressive_damage_reduction,
},
{
"Warp to start": configuration.warp_to_start,
"Final bosses removed": configuration.teleporters.skip_final_bosses,
"Unlocked Vault door": configuration.main_plaza_door,
"Unlocked Save Station doors": configuration.blue_save_doors,
"Phazon Elite without Dynamo": configuration.phazon_elite_without_dynamo,
},
{
"Small Samus": configuration.small_samus,
"Large Samus": configuration.large_samus,
},
{
"Shuffle Item Position": configuration.shuffle_item_pos,
"Items Every Room": configuration.items_every_room,
},
{
"Random Boss Sizes": configuration.random_boss_sizes,
"No Doors": configuration.no_doors,
},
{
room_rando: room_rando is not None,
},
{
superheated_probability: superheated_probability is not None,
submerged_probability: submerged_probability is not None,
},
{
"Spring Ball": configuration.spring_ball,
},
{
cutscene_removal: cutscene_removal is not None,
},
{
ingame_difficulty: ingame_difficulty is not None,
},
{
enemy_rando_range_scale: enemy_rando_range_scale is not None,
enemy_rando_range_health: enemy_rando_range_health is not None,
enemy_rando_range_speed: enemy_rando_range_speed is not None,
enemy_rando_range_damage: enemy_rando_range_damage is not None,
enemy_rando_range_knockback: enemy_rando_range_knockback is not None,
enemy_rando_diff_xyz: enemy_rando_diff_xyz is not None,
},
],
}
if enemy_rando_range_scale is not None:
for listing in extra_message_tree["Game Changes"]:
if "Random Boss Sizes" in listing.keys():
listing["Random Boss Sizes"] = False
fill_template_strings_from_tree(template_strings, extra_message_tree)
backwards = [
message
for flag, message in [
(configuration.backwards_frigate, "Frigate"),
(configuration.backwards_labs, "Labs"),
(configuration.backwards_upper_mines, "Upper Mines"),
(configuration.backwards_lower_mines, "Lower Mines"),
]
if flag
]
if backwards:
template_strings["Game Changes"].append("Allowed backwards: {}".format(", ".join(backwards)))
if configuration.legacy_mode:
template_strings["Game Changes"].append("Legacy Mode")
# Artifacts
template_strings["Item Pool"].append(
f"{configuration.artifact_target.num_artifacts} Artifacts, "
f"{configuration.artifact_minimum_progression} min actions"
)
return template_strings
| randovania/randovania | randovania/games/prime1/layout/preset_describer.py | preset_describer.py | py | 9,313 | python | en | code | 165 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "randovania.games.prime1.layout.prime_configuration.LayoutCutsceneMode.MAJOR",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "randovania.games.prime1.layout.prime_config... |
8224455984 | # MAC0318 Intro to Robotics
# Please fill-in the fields below with every team member info
#
# Name: José Lucas Silva Mayer
# NUSP: 11819208
#
# Name: Willian Wang
# NUSP: 11735380
#
# Any supplemental material for your agent to work (e.g. neural networks, data, etc.) should be
# uploaded elsewhere and listed down below together with a download link.
#
#
#
# ---
#
# Final Project - The Travelling Mailduck Problem
#
# Don't forget to run this file from the Duckievillage root directory path (example):
# cd ~/MAC0318/duckievillage
# conda activate duckietown
# python3 assignments/challenge/challenge.py assignments/challenge/examples/challenge_n
#
# Submission instructions:
# 0. Add your names and USP numbers to the file header above.
# 1. Make sure that any last change hasn't broken your code. If the code crashes without running you'll get a 0.
# 2. Submit this file via e-disciplinas.
import pyglet
from pyglet.window import key
import numpy as np
import math
import random
from duckievillage import create_env, FRONT_VIEW_MODE
import cv2
import tensorflow
class Agent:
def __init__(self, env):
self.env = env
self.radius = 0.0318
self.baseline = env.unwrapped.wheel_dist/2
self.motor_gain = 0.68*0.0784739898632288
self.motor_trim = 0.0007500911693361842
self.initial_pos = env.get_position()
# load model of object detection
self.model_od = tensorflow.keras.models.load_model('project/models/od.h5')
# load model of dodge
self.model_dodge = tensorflow.keras.models.load_model('project/models/ddg.h5')
# define steps of dodge
self.dodge_steps = 7
self.dodge_count = 0
# load lane following model
self.model_lf = tensorflow.keras.models.load_model('project/models/lf.h5')
self.score = 0
key_handler = key.KeyStateHandler()
env.unwrapped.window.push_handlers(key_handler)
self.key_handler = key_handler
# Color segmentation hyperspace
self.inner_lower = np.array([22, 93, 160])
self.inner_upper = np.array([45, 255, 255])
self.outer_lower = np.array([0, 0, 130])
self.outer_upper = np.array([179, 85, 255])
def preprocess(self, image):
""" Returns a 2D array mask color segmentation of the image """
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # obtain HSV representation of image
# filter out dashed yellow "inner" line
inner_mask = cv2.inRange(hsv, self.inner_lower, self.inner_upper)//255
# filter out solid white "outer" line
outer_mask = cv2.inRange(hsv, self.outer_lower, self.outer_upper)//255
# Note: it is possible to filter out pixels in the RGB format
# by replacing `hsv` with `image` in the commands above
# produces combined mask (might or might not be useful)
mask = cv2.bitwise_or(inner_mask, outer_mask)
self.masked = cv2.bitwise_and(image, image, mask=mask)
return inner_mask, outer_mask
def get_pwm_control(self, v: float, w: float)-> (float, float):
''' Takes velocity v and angle w and returns left and right power to motors.'''
V_l = (self.motor_gain - self.motor_trim)*(v-w*self.baseline)/self.radius
V_r = (self.motor_gain + self.motor_trim)*(v+w*self.baseline)/self.radius
return V_l, V_r
def send_commands(self, dt: float):
''' Agent control loop '''
# acquire front camera image
img = self.env.front()
# run image processing routines
P, Q = self.preprocess(img) # returns inner and outter mask matrices
# transform image to shape (60, 80, 3)
img_inference = cv2.resize(img, (80, 60))
img_inference = np.expand_dims(img_inference, axis=0)
# if the duckie is in the middle of a dodge, continue it
if self.dodge_count > 0:
prediction = self.model_dodge.predict(img_inference, verbose=False)
v, w = prediction[0][0] * 0.03125, prediction[0][1] * 0.022
pwm_left, pwm_right = self.get_pwm_control(v, w)
self.env.step(pwm_left, pwm_right)
self.dodge_count -= 1
return
# predict object detection directives
prediction = self.model_od.predict(img_inference, verbose=False)
take_care = prediction[0][0] > 0.7
if take_care:
# activate the dodge mode if image has a duckie
# set the dodge count to the number of steps
self.dodge_count = self.dodge_steps
return
# resize masks P and Q to (60, 80)
p_resized = cv2.resize(P, (80, 60))
q_resized = cv2.resize(Q, (80, 60))
# create a 2-channel image with the masks
mask = np.zeros((60, 80, 2))
mask[:, :, 0] = p_resized
mask[:, :, 1] = q_resized
# cut off the 30% top pixels
mask = mask[(3 * mask.shape[0])//10:, :, :]
masks = np.expand_dims(mask, axis=0)
# predict lane following directives
prediction_lf = self.model_lf.predict(masks, verbose=False)
v, w = prediction_lf[0][0] * 1.25, prediction_lf[0][1]
pwm_left, pwm_right = self.get_pwm_control(v, w)
self.env.step(pwm_left, pwm_right)
# for visualization
self.env.render('human')
| josemayer/pato-wheels | project/agent.py | agent.py | py | 5,379 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 51,
"usage_type": "call"
}... |
13438082129 | """
Boston house prices dataset
"""
import sklearn.datasets
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PolynomialFeatures
# 보스턴 집값 데이터 세트 로딩
skl_data = sklearn.datasets.load_boston(return_X_y=False)
print(type(skl_data)) # Bunch: 파이썬의 Dict와 비슷한 타입
print(skl_data.keys())
print(skl_data.feature_names)
# 데이터와 타겟을 구분
X = skl_data.data
y = skl_data.target
print('X shape: ', X.shape)
print('y shape: ', y.shape)
print('len(X):', len(X))
print('len(y):', len(y))
features = skl_data.feature_names
# 데이터 탐색 -> y ~ feature 산점도 그래프
fig, ax = plt.subplots(3, 5)
# ax: 3x4 형태의 2차원 배열(ndarray)
print('fig: ', fig)
# print('ax: ', ax)
ax_flat = ax.flatten()
for i in range(len(features)):
subplot = ax_flat[i]
subplot.scatter(X[:, i], y)
subplot.set_title(features[i])
plt.show()
# 학습 세트/ 검증 세트 나눔
np.random.seed(1217)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
print(f'X_train len: {len(X_train)}, X_test len:{len(X_test)}, y_train len:{len(y_train)}, y_test len:{len(y_test)}')
# 학습 세트를 사용해서 선형 회귀 - 단순 선형 회귀, 다중 선형 회귀
# price = b0 ++ b1 * rm: 주택 가격 ~ 방의 개(rm)
X_train_rm = X_train[:, np.newaxis, 5] # np.newaxis:
X_test_rm = X_test[:, np.newaxis, 5] # 2차원 배열로 만들어줌
print(f'X_train_rm: {X_train_rm.shape}, X_test_rm: {X_test_rm.shape} ')
lin_reg = LinearRegression() # Linear Regression 객체 생성
lin_reg.fit(X_train_rm, y_train) # fit(학습) -> b0, b1 찾음
print(f'intercept: {lin_reg.intercept_}, coefficient: {lin_reg.coef_}')
# 검증 세트를 사용해서 예측 -> 그래프
y_pred_rm = lin_reg.predict(X_test_rm)
# 실제값(scatter), 예측값(plot) 그래프
plt.scatter(X_test_rm, y_test) # 실제값 y_test
plt.plot(X_test_rm, y_pred_rm, 'r-')
plt.title('Price ~ RM')
plt.xlabel('RM')
plt.ylabel('Price')
plt.show()
# MSE: Mean Square Error 계산
# 오차 제곱들의 평균: MSE
# error = y - y_hat, error**2 = (y-y_hat)**2
# MSE = sum(error**2 = (y-y_hat)**2) / 개수
mse = mean_squared_error(y_test, y_pred_rm)
# RMSE(Squared-Root MSE)
rmse = np.sqrt(mse)
print('Price ~ RMSE=', rmse)
# R2-score 계산
r2_1 = lin_reg.score(X_test_rm, y_test) # score 함수: R-Square 값 계산
print('Price ~ r2_1: ', r2_1)
r2_2 = r2_score(y_test, y_pred_rm) # 결정 계수 계산 Coefficient of determination
print('Price ~ r2_2: ', r2_2)
# Price ~ LSTAT 선형회귀: price = b0 + b1 * lstat
# b0, b1 ?
X_train_lstat = X_train[:, np.newaxis, 12] # 학습 세트
X_test_lstat = X_test[:, np.newaxis, 12] # 검증 세트
lin_reg.fit(X_train_lstat, y_train) # 모델 fit, train
print(f'intercept:{lin_reg.intercept_}, coefficients: {lin_reg.coef_}')
y_pred_lstat = lin_reg.predict(X_test_lstat) # 예측, 테스트
plt.scatter(X_test_lstat, y_test) # 실제값 산점도 그래프
plt.plot(X_test_lstat, y_pred_lstat, 'r-')
plt.title('Price ~ LSTAT')
plt.xlabel('LSTAT')
plt.ylabel('Price')
plt.show()
mse = mean_squared_error(y_test, y_pred_lstat)
rmse = np.sqrt(mse)
print('Price ~ RMSE=', rmse)
r2_1 = lin_reg.score(X_test_lstat, y_test) # score 함수: R-Square 값 계산
print('Price ~ r2_1: ', r2_1)
r2_2 = r2_score(y_test, y_pred_lstat) # 결정 계수 계산 Coefficient of determination
print('Price ~ r2_2: ', r2_2)
# Price ~ LSTAT + LSTAT**2 선형 회귀
# Price = b0 + b1 * lstat + b2 * lstat**2
poly = PolynomialFeatures(degree=2, include_bias=False)
# 데이터에 다항식 항들을 컬럼으로 추갖해주는 클래스 객체
X_train_lstat_poly = poly.fit_transform(X_train_lstat)
# 검증 세트에 다항식 항을 추가
X_test_lstat_poly = poly.fit_transform(X_test_lstat)
lin_reg.fit(X_train_lstat_poly, y_train)
print(f'intercept:{lin_reg.intercept_}, coefficient:{lin_reg.coef_}')
y_pred_lstat_poly = lin_reg.predict(X_test_lstat_poly)
plt.scatter(X_test_lstat, y_test) # 실제값
xs = np.linspace(X_test_lstat.min(), X_test_lstat.max(), 100).reshape((100, 1))
xs_poly = poly.fit_transform(xs)
ys = lin_reg.predict(xs_poly)
plt.plot(xs, ys, 'r')
# plt.plot(X_test_lstat, y_pred_lstat_poly, 'r') # 예측값
plt.title('Price ~ lstat + lstat^2')
plt.xlabel('LSTAT')
plt.ylabel('Price')
plt.show()
mse = mean_squared_error(y_test, y_pred_lstat_poly)
rmse = np.sqrt(mse)
print('Price ~ RMSE=', rmse)
r2_1 = lin_reg.score(X_test_lstat_poly, y_test) # score 함수: R-Square 값 계산
print('Price ~ r2_1: ', r2_1)
r2_2 = r2_score(y_test, y_pred_lstat_poly) # 결정 계수 계산 Coefficient of determination
print('Price ~ r2_2: ', r2_2)
# Price ~ RM + LSTAT 선형 회귀: price = b0 + b1 * rm + b2 * lstat
X_train_rm_lstat = X_train[:, [5, 12]]
X_test_rm_lstat = X_test[:, [5, 12]]
print(X_train_rm_lstat[:5])
lin_reg.fit(X_train_rm_lstat, y_train) # fit/train
print(f'intercept: {lin_reg.intercept_}, coefficients: {lin_reg.coef_}')
y_pred_rm_lstat = lin_reg.predict(X_test_rm_lstat) # predict/test
print(y_test[:5], y_pred_rm_lstat[:5])
mse = mean_squared_error(y_test, y_pred_rm_lstat)
rmse = np.sqrt(mse)
r2 = r2_score(y_test, y_pred_rm_lstat)
print(f'Price ~ RM + LSTAT: RMSE = {rmse}, R**2 = {r2}')
print('-====================================')
# Price ~ RM + LSTAT + RM**2 + RM * LSTAT + LSTAT**2
# Price = b0 + b1 * rm + b2 * lstat + b3 * rm**2 + b4 * rm * lstat + b5 * lstat **2
# 학습 세트에 다항식항(컬럼)을 추가
X_train_rm_lstat_poly = poly.fit_transform(X_train_rm_lstat)
# 테스트 세트에 다항식항(컬럼)을 추가
X_test_rm_lstat_poly = poly.fit_transform(X_test_rm_lstat)
print(X_test_rm_lstat_poly[:2])
lin_reg.fit(X_train_rm_lstat_poly, y_train)
print(f'intercept: {lin_reg.intercept_}, coef: {lin_reg.coef_}')
y_pred_rm_lstat_poly = lin_reg.predict(X_test_rm_lstat_poly)
mse = mean_squared_error(y_test, y_pred_rm_lstat_poly)
rmse = np.sqrt(mse)
r2 = r2_score(y_test, y_pred_rm_lstat_poly)
print(f'Price ~ RM + LSTAT: RMSE = {rmse}, R**2 = {r2}')
print('y true:', y_test[:5])
print('y pred:', y_pred_rm_lstat_poly[:5])
# Price ~ RM + LSTAT + STAT**2
# Price = b0 + b1 * rm + b2 * lstat + b3 * lstat**2
X_train_last = np.c_[X_train_rm, X_train_lstat_poly]
X_test_last = np.c_[X_test_rm, X_test_lstat_poly]
print('X_train_last:', X_train_last[:2], '\n X_test_last: ', X_test_last[:2])
lin_reg.fit(X_train_last, y_train) # fit/train
print(f'Price ~ RM + LSTAT + LSTAT**2: intercept: {lin_reg.intercept_}, coef {lin_reg.coef_}')
y_pred_last = lin_reg.predict(X_test_last) # 예측/테스트
print('y true:', y_test[:5])
print('y predict:', y_pred_last[:5].round(2))
mse = mean_squared_error(y_test, y_pred_last)
rmse = np.sqrt(mse)
r2 = r2_score(y_test, y_pred_last)
print(f'Price ~ RM + LSTAT: RMSE = {rmse}, R**2 = {r2}')
| i-hs/lab-python | scratch13/ex05.py | ex05.py | py | 6,999 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.datasets.datasets.load_boston",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.datasets",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sklearn.datasets",
"line_number": 15,
"usage_type": "name"
},
{
... |
9622430105 | #!/usr/bin/env python3
import base64
import c3, c5
from itertools import combinations
def beautify(candidates: list):
'''
Pretty prints the candidates returned
'''
s = ''
for c in candidates:
s += 'Keysize: {}\tHamming Distance: {}\n'.format( c['keysize'], c['normalized_distance'])
return s
def hamming_distance(str1: str, str2: str) -> int:
'''
Calculates the total amount of differing bits between two bits.
returns (int) distance
'''
# declare bytes
b1 = bytes.fromhex(str1)
b2 = bytes.fromhex(str2)
count = 0
for i, j in zip(b1, b2):
xor = bin(i^j)
count += xor.count("1")
return count
def generate_keysize_candidates(data: str) -> list:
'''
Calculates the hamming distance for a variety of keysizes and returns
the top 4 candidates.
returns a list of candidates
'''
distance_candidates = []
ciphertext_bytes = base64.b64decode(data)
for keysize in range(2, 41):
blocks = [ciphertext_bytes[i:i+keysize] for i in range(0, len(ciphertext_bytes), keysize)][:4]
distance = 0
block_combinations = tuple(combinations(blocks, 2))
for (a,b) in block_combinations:
distance += hamming_distance(a.hex(), b.hex())
distance /= len(block_combinations)
normalized_distance = distance / keysize
distance_candidate = {
'keysize': keysize,
'normalized_distance': normalized_distance,
}
distance_candidates.append(distance_candidate)
return sorted(distance_candidates, key=lambda c: c['normalized_distance'])
def generate_blocks(filepath: str, keysize: int) -> list:
'''
Partitions the base64 encoded file into blocks of the given keysize
returns list of byte blocks (list)
'''
blocks = []
with open(filepath, 'r') as file:
b64_ciphertext = file.read()
b64_ciphertext_bytes = bytes(b64_ciphertext, 'utf-8')
ciphertext_bytes = base64.b64decode(b64_ciphertext_bytes)
while len(ciphertext_bytes) // keysize > 1:
block = ciphertext_bytes[:keysize]
ciphertext_bytes = ciphertext_bytes[keysize:]
blocks.append(block)
if len(ciphertext_bytes) > 0:
padding = keysize - len(ciphertext_bytes)
final_block = bytes(ciphertext_bytes[:] + (b'\x00'*padding))
blocks.append(final_block)
return blocks
def transpose_blocks(blocks: list, keysize: int) -> list:
'''
Transposes blocks into new blocks where the first bytes of every block is put
into the the first block, then the second bytes into the second block, the byte
n into block n.
returns a list of bytes
'''
transposed_blocks = []
for _ in range(keysize):
tmp = bytearray()
transposed_blocks.append(tmp)
for block in blocks:
for i in range(0, keysize):
transposed_blocks[i] += bytes([block[i]])
return transposed_blocks
def solve_repeating_xor(filepath: str, keysize: int) -> str:
'''
This will generate the best candidate for the key used
to encrypt the ciphertext using a repeating XOR.
returns str
'''
ciphertext_blocks = generate_blocks(filepath, keysize)
transposed_blocks = transpose_blocks(ciphertext_blocks, keysize)
key = ''
for b in transposed_blocks:
candidates = c3.singlebyte_xor_solve(b.hex())
key += chr(candidates['byte'])
return key
if __name__ == "__main__":
from pathlib import Path
path = str(Path(__file__).parent.absolute())
file = open(path + '/' + 'c6_input.txt', 'r')
txt = file.read()
file.close()
candidates = generate_keysize_candidates(txt)[:1]
keys = []
for candidate in candidates:
tmp_key = solve_repeating_xor(path + '/c6_input.txt', candidate['keysize'])
keys.append(tmp_key)
for key in keys:
print('\033[35mUsing Key:\033[39m', key)
print('\033[35mKeysize:\033[39m', len(key))
print('\033[35mResulting XOR\'d text:\033[39m', c5.repeating_xor(base64.b64decode(txt).decode(), key).decode())
| oatovar/Cryptopals-Solutions | c06.py | c06.py | py | 4,159 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "base64.b64decode",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "c3.singlebyt... |
5593654816 | import telegram
import google
import logging
import base64
import io
from requests_html import HTMLSession
from google.cloud import firestore
from bs4 import BeautifulSoup
from PIL import Image
from time import sleep
from Spider import get_all_course
from UESTC_Login import _login, get_captcha
def __Bot_token():
f = open("token.txt","r")
token = f.read()
f.close()
return token
def course_print(mycourse, update):#构造一个好看的字符串发送给用户
chat_id = update.message.chat_id
week={
"0":"Monday",
"1":"Tuesday",
"2":"Wednsday",
"3":"Thirsday",
"4":"Friday",
"5":"Saturday",
"6":"Sunday",
}
for course in mycourse:
info = course[0]
time = course[1]
out = "{}\n{} {}\nweek:".format(info[0],info[1],info[2])
i = 0
while(i <= 20):
if(info[3][i] == 1):
out = out + " {}".format(i)
i += 1
out = out + '\n' + week[str(time[0][0])] + " "
out = out + "class no."
for classes in time:
out = out + " {}".format(classes[1]+1)
#print(out)
_bot_send_text(chat_id, out)
"""
print(info)
#out = out + str(info) + "\n"
out = out + str(info[0]) + '\n' + str(info[1]) + str(info[2]) +'\n' + str(info[3]) + "\n"
for t in time:
out = out + str(t) + "\n"
out = out + "\n"
"""
_bot_send_text(chat_id, "Demo version. To be continued....")
#bot.send_message(chat_id=update.message.chat_id, text=out)
def _bot_send_text(chat_id, text):
global bot
bot.send_message(chat_id = chat_id, text = text)
def _firestore_update(chat_id ,dict):
#将字典中的内容放入google firestore
global db
doc_ref = db.collection(u'uestc_calendar_bot').document(str(chat_id))
doc_ref.set(dict, merge=True)
def _firestore_read(chat_id):
global db
doc_ref = db.collection(u'uestc_calendar_bot').document(str(chat_id))
doc = doc_ref.get().to_dict()
return doc
def _Process_Start(update):
#打印欢迎界面
chat_id = update.message.chat_id
dicts = {
'user_id': chat_id,
'status': 0
}
_firestore_update(chat_id, dicts)
_bot_send_text(chat_id,
text=""" Welcome to YouESTC alarm clock!
This bot is used to query your timetable and alarm you before class.
Commands:
/login : to login into uestc""")
def _Process_Login(update):
chat_id = update.message.chat_id
dicts = {'status': 1}
_firestore_update(chat_id, dicts)
_bot_send_text(chat_id, "please input your UESTC student number:")
def _Process_Account(update):
#处理输入的帐号
chat_id = update.message.chat_id
dicts = {
'status': 2,
'account': update.message.text
}
_firestore_update(chat_id, dicts)
_bot_send_text(chat_id, "please input your password:")
def _Process_Password(update):
#处理输入的密码
chat_id = update.message.chat_id
doc = _firestore_read(chat_id)
account = doc['account']
passwd = update.message.text
dicts = {'passwd': base64.b64encode(passwd.encode('utf-8'))}
_firestore_update(chat_id, dicts)
bot.send_message(chat_id=update.message.chat_id, text="please input your captcha below:")
bot.send_message(chat_id=update.message.chat_id, text="Pulling captcha photo...")
form, img, new_session = get_captcha(account, passwd) #请求验证码图片
#f = open("captcha.png", "wb")
#f.write(img)
#f.close()
img_b64encode = base64.b64encode(img.encode('utf-8')) # base64编码
img_b64decode = base64.b64decode(img_b64encode) # base64解码
image = io.BytesIO(img_b64decode)
#f = open("captcha.png", "rb")
bot.send_photo(chat_id=chat_id, photo=image)
# 发送验证码图片给用户
dicts = {
'form': form,
'cookies': new_session.cookies.get_dict(),
'status': 3
}
_firestore_update(chat_id, dicts)
def _Process_Captcha(update):
#处理输入的验证码
chat_id = update.message.chat_id
_bot_send_text(chat_id, "Attempting to login...")
doc = _firestore_read(chat_id)
cookies = doc['cookies']
form = doc['form']
captcha = update.message.text
new_session, res = _login(form, captcha, cookies)
if(res == 0):
_bot_send_text(chat_id, "Login success! Pulling data...")
mycourse = get_all_course(new_session)
course_print(mycourse, update)
elif(res == 1):
_bot_send_text(chat_id, "Password wrong!")
elif(res == 2):
_bot_send_text(chat_id, "Captcha wrong!")
else:
_bot_send_text(chat_id, "Student number wrong!")
dicts = {'status': 0}
_firestore_update(chat_id, dicts)
def Text_Process(update):
doc_ref = db.collection(u'uestc_calendar_bot').document(str(update.message.chat_id))
try:#如果之前没有记录,自动跳转到start菜单
doc = doc_ref.get().to_dict()
except google.cloud.exceptions.NotFound:
_Process_Start(update)
return
status = doc['status']
if(status == 0):
_Process_Start(update)
elif(status == 1):
_Process_Account(update)
elif(status == 2):
_Process_Password(update)
elif(status == 3):
_Process_Captcha(update)
_bot_send_text(update.message.chat_id, "收到啦!")
def Command_Process(update): #用来处理指令
command = update.message.text
command_list = {
'/start': _Process_Start,
'/login': _Process_Login
}
if(command in command_list):
command_list[command](update)
elif(command[0] == '/'):
_Process_Start(update)
else:
Text_Process(update)
#不是命令,跳转到对文本的处理函数里去
if(__name__ == "__main__"):
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',level=logging.INFO)
# 记录日志
global bot
global db
#定义全局变量,使得所有函数用一个变量
token = __Bot_token()
bot = telegram.Bot(token = token)#bot
print(bot.get_me())
# 登录telegram
db = firestore.Client()
# 登录google filestore
while(1):
updates = bot.get_updates()
if(updates != []):
for update in updates:
Command_Process(update)
bot.get_updates(limit = 1, offset = update.update_id+1)
print(update.message.text, " ", update.message.chat_id)
else:
sleep(0.01)
#$env:GOOGLE_APPLICATION_CREDENTIALS="G:\github\telebot\key\My First Project-2035ff2d3024.json" | mrh929/uestc_calendar_bot | calendar/main.py | main.py | py | 6,925 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "base64.b64encode",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "UESTC_Login.get_captcha",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "base64.b... |
12789805626 | from errno import EIO, ENOSPC, EROFS
import sys
import os
import traceback
import glob
from decimal import Decimal, getcontext
getcontext().prec = 6
assert sys.platform == 'linux', 'This script must be run only on Linux'
assert sys.version_info.major >= 3 and sys.version_info.minor >= 5, 'This script requires Python 3.5+'
assert os.geteuid() == 0, 'This script must be run as root'
try:
import sh
import time
import tempfile
import re
import logzero
import numpy
import threading
import threading
import hashlib
import ctypes
from collections import OrderedDict
from io import StringIO
from typing import Optional, List, Dict
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn
from errno import ENOENT
from stat import S_IFDIR, S_IFREG
from pynput.keyboard import Key, Listener
from utils import (
mute_system_sound,
unmute_system_sound,
enable_power_led,
disable_power_led,
init_simple_mixer_control,
save_replace_file,
file_read_bytes,
file_write_bytes,
file_read_bytes_direct
)
except ImportError as xie:
traceback.print_exc()
sys.exit(1)
APP_UNIXNAME = 'amiga_disk_devices'
APP_VERSION = '0.1'
TMP_PATH_PREFIX = os.path.join(tempfile.gettempdir(), APP_UNIXNAME)
LOG_PATHNAME = os.path.join(TMP_PATH_PREFIX, 'amiga_disk_devices.log')
ENABLE_LOGGER = False
ENABLE_REINIT_HANDLE_AFTER_SECS = 0
ENABLE_FLOPPY_DRIVE_READ_A_HEAD = True
ENABLE_SET_CACHE_PRESSURE = False
ENABLE_ADF_CACHING = True
DISABLE_SWAP = False
DEFAULT_READ_A_HEAD_SECTORS = 24 # 256 system default, 44 seems ok, 24 seems best
SYNC_DISKS_SECS = 60 * 3
AMIGA_DISK_DEVICE_TYPE_ADF = 1
AMIGA_DISK_DEVICE_TYPE_HDF_HDFRDB = 8
AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE = 2
AMIGA_DISK_DEVICE_TYPE_HDF = 5
AMIGA_DISK_DEVICE_TYPE_ISO = 10
FLOPPY_DEVICE_SIZE = 1474560
FLOPPY_ADF_SIZE = 901120
FLOPPY_DEVICE_LAST_SECTOR = 1474048
FLOPPY_ADF_EXTENSION = '.adf'
HD_HDF_EXTENSION = '.hdf'
CD_ISO_EXTENSION = '.iso'
ADF_BOOTBLOCK = numpy.dtype([
('DiskType', numpy.byte, (4, ) ),
('Chksum', numpy.uint32 ),
('Rootblock', numpy.uint32 )
])
SYSTEM_INTERNAL_SD_CARD_NAME = 'mmcblk0'
PHYSICAL_SECTOR_SIZE = 512
PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS = 100
STATUS_FILE_NAME = 'status.log'
CACHE_DATA_BETWEEN_SECS = 3
CACHED_ADFS_MAX_DIR_SIZE = 1073741824 # 1GB
CACHED_ADFS_DIR = os.path.realpath('./cached_adfs')
CACHED_ADF_SIGN = 'AMIPI400'
CACHED_ADF_HEADER_TYPE = 'CachedADFHeader'
CACHED_ADF_STR_ENCODING = 'ascii'
SHA512_LENGTH = 128
MAIN_LOOP_MAX_COUNTER = 0
fs_instance = None
key_cmd_pressed = False
key_delete_pressed = False
key_shift_pressed = False
os_read_write_mutex = threading.Lock()
devices_read_a_head_sectors = {}
def os_read(handle, offset, size):
with os_read_write_mutex:
os.lseek(handle, offset, os.SEEK_SET)
return os.read(handle, size)
def os_write(handle, offset, data):
with os_read_write_mutex:
os.lseek(handle, offset, os.SEEK_SET)
return os.write(handle, data)
class CachedADFHeader(ctypes.Structure):
_fields_ = [
('sign', ctypes.c_char * 32),
('header_type', ctypes.c_char * 32),
('sha512', ctypes.c_char * 129),
('mtime', ctypes.c_int64)
]
class AsyncFileOps(threading.Thread):
def __init__(self):
self._running = False
self._pathname_direct_readings = []
self._pathname_writings = []
self._pathname_deferred_writings = {}
threading.Thread.__init__(self)
def _direct_readings_by_pathname(self):
processed = 0
handles = {}
while self._pathname_direct_readings:
reading_data = self._pathname_direct_readings.pop(0)
try:
processed += 1
use_fd = None
use_fo = None
use_m = None
if reading_data['pathname'] in handles:
use_fd = handles[reading_data['pathname']][0]
use_fo = handles[reading_data['pathname']][1]
use_m = handles[reading_data['pathname']][2]
handles[reading_data['pathname']] = file_read_bytes_direct(
reading_data['pathname'],
reading_data['offset'],
reading_data['size'],
0,
use_fd,
use_fo,
use_m
)
if reading_data['read_handler_func']:
read_handler_func = reading_data['read_handler_func']
read_handler_func(
reading_data['pathname'],
reading_data['offset'],
reading_data['size']
)
except Exception as x:
traceback.print_exc()
print_log('_process_direct_readings_by_pathname', x)
print_log()
for pathname, handle_tuples in handles.items():
os.close(handle_tuples[0])
# handle_tuples[1].close()
handle_tuples[2].close()
return processed
# def _direct_readings_by_pathname(self):
# processed = 0
# while self._pathname_direct_readings:
# reading_data = self._pathname_direct_readings.pop(0)
# try:
# processed += 1
# file_read_bytes_direct(
# reading_data['pathname'],
# reading_data['offset'],
# reading_data['size']
# )
# if reading_data['read_handler_func']:
# read_handler_func = reading_data['read_handler_func']
# read_handler_func(
# reading_data['pathname'],
# reading_data['offset'],
# reading_data['size']
# )
# except Exception as x:
# print_log('_process_direct_readings_by_pathname', x)
# return processed
def _writings_by_pathname(self):
handles = {}
processed = 0
while self._pathname_writings:
disable_power_led()
write_data = self._pathname_writings.pop(0)
if write_data['pathname'] not in handles:
handles[write_data['pathname']] = os.open(write_data['pathname'], os.O_WRONLY)
fd = handles[write_data['pathname']]
try:
processed += 1
disable_power_led()
file_write_bytes(
write_data['pathname'],
write_data['offset'],
write_data['data'],
use_fd=fd
)
except Exception as x:
print_log('_process_writings_by_pathname', x)
disable_power_led()
for pathname, fd in handles.items():
os.close(fd)
return processed
def _deferred_one_time_writings_by_pathname(self, idle_total_secs):
handles = {}
for pathname, write_data in self._pathname_deferred_writings.copy().items():
if not write_data:
continue
if write_data['idle_min_secs'] < idle_total_secs:
continue
# print('write_data', write_data)
disable_power_led()
if write_data['pathname'] not in handles:
handles[write_data['pathname']] = os.open(write_data['pathname'], os.O_WRONLY)
fd = handles[write_data['pathname']]
try:
disable_power_led()
file_write_bytes(
write_data['pathname'],
write_data['offset'],
write_data['data'],
use_fd=fd
)
except Exception as x:
print_log('_process_writings_by_pathname', x)
disable_power_led()
if write_data['done_handler']:
write_data['done_handler'](
write_data,
write_data['done_handler_args']
)
self._pathname_deferred_writings[pathname] = None
for pathname, fd in handles.items():
os.close(fd)
def run(self):
idle_start_ts = 0
while self._running:
processed = 0
processed += self._direct_readings_by_pathname()
processed += self._writings_by_pathname()
if not processed:
# idle, process deferred one-time writings
if not idle_start_ts:
idle_start_ts = time.time()
idle_total_secs = time.time() - idle_start_ts
self._deferred_one_time_writings_by_pathname(idle_total_secs)
else:
idle_start_ts = 0
time.sleep(10 / 1000)
time.sleep(0)
def read_direct_by_pathname(self, pathname: str, offset, size, read_handler_func=None, max_at_a_time=None):
if max_at_a_time is not None:
if len(self._pathname_direct_readings) >= max_at_a_time:
return
self._pathname_direct_readings.append({
'pathname': pathname,
'offset': offset,
'size': size,
'read_handler_func': read_handler_func
})
def write_by_pathname(self, pathname: str, offset, data):
self._pathname_writings.append({
'pathname': pathname,
'offset': offset,
'data': data
})
def deferred_one_time_write_by_pathname(
self,
pathname,
offset,
data,
idle_min_secs,
done_handler=None,
done_handler_args=None
):
self._pathname_deferred_writings[pathname] = {
'pathname': pathname,
'offset': offset,
'data': data,
'idle_min_secs': idle_min_secs,
'done_handler': done_handler,
'done_handler_args': done_handler_args
}
def start(self):
self._running = True
return super().start()
def stop(self):
self._running = False
class AmigaDiskDevicesFS(LoggingMixIn, Operations):
_handles: Dict[str, int]
_access_times: Dict[str, float]
_modification_times: Dict[str, float]
def __init__(self, disk_devices: dict, async_file_ops: AsyncFileOps):
self._instance_time = time.time()
self._disk_devices = disk_devices
self._static_files = {
'/': dict(
st_mode=(S_IFDIR | 0o444),
st_ctime=self._instance_time,
st_mtime=self._instance_time,
st_atime=self._instance_time,
st_nlink=2,
st_size=4096
),
'/' + STATUS_FILE_NAME: dict(
st_mode=(S_IFREG | 0o444),
st_ctime=self._instance_time,
st_mtime=self._instance_time,
st_atime=self._instance_time,
st_nlink=1
)
}
self._handles = {}
self._mutex = threading.Lock()
self._access_times = {}
self._modification_times = {}
self._last_write_ts = 0
self._async_file_ops = async_file_ops
self._status_log_content = None
access = None
flush = None
getxattr = None
listxattr = None
open = None
opendir = None
release = None
releasedir = None
statfs = None
def _add_defaults(self, ipart_data):
if 'fully_cached' not in ipart_data:
ipart_data['fully_cached'] = False
if 'last_caching_ts' not in ipart_data:
ipart_data['last_caching_ts'] = 0
if 'enable_spinning' not in ipart_data:
ipart_data['enable_spinning'] = True
if 'cached_adf_pathname' not in ipart_data:
ipart_data['cached_adf_pathname'] = ''
def set_disk_devices(self, disk_devices: dict):
with self._mutex:
for ipart_dev, ipart_data in disk_devices.items():
self._add_defaults(ipart_data)
self._disk_devices = disk_devices
self._status_log_content = None
self._flush_handles()
def _flush_handles(self):
for device_pathname in list(self._handles.keys()):
if device_pathname not in self._disk_devices:
self._close_handle(device_pathname)
def _close_handles(self):
for device_pathname in list(self._handles.keys()):
self._close_handle(device_pathname)
def _close_handle(self, device_pathname: str):
handle = None
try:
handle = self._handles[device_pathname]
os.close(handle)
except:
pass
try:
del self._handles[device_pathname]
except:
pass
try:
del self._access_times[device_pathname]
except:
pass
try:
del self._modification_times[device_pathname]
except:
pass
return handle
def _open_handle(self, ipart_data: dict) -> Optional[int]:
device_pathname = ipart_data['device']
if device_pathname in self._handles:
return self._handles[device_pathname]
self._set_fully_cached(ipart_data, False)
is_readable = ipart_data['is_readable']
is_writable = ipart_data['is_writable']
mode = os.O_SYNC | os.O_RSYNC
if is_readable and is_writable:
mode |= os.O_RDWR
else:
mode |= os.O_RDONLY
try:
self._handles[device_pathname] = os.open(device_pathname, mode)
except:
return None
return self._handles[device_pathname]
def _find_file(self, public_name: str) -> Optional[dict]:
for ipart_dev, ipart_data in self._disk_devices.items():
if ipart_data['public_name'] == public_name:
return ipart_data
return None
def _save_file_access_time(self, device_pathname: str, _time: float = None) -> float:
if _time is None:
_time = time.time()
self._access_times[device_pathname] = _time
return _time
def _save_file_modification_time(self, device_pathname: str) -> float:
current_time = time.time()
self._modification_times[device_pathname] = current_time
self._last_write_ts = current_time
return current_time
def _get_file_access_time(self, device: str) -> float:
try:
return self._access_times[device]
except:
return self._save_file_access_time(device)
def _get_file_modification_time(self, device: str) -> float:
try:
return self._modification_times[device]
except:
return self._save_file_modification_time(device)
def _clear_pathname(self, pathname: str) -> str:
if pathname.startswith(os.path.sep):
pathname = pathname[1:]
return pathname
def _genrate_perm_int_mask(self,
user_can_read: bool,
user_can_write: bool,
user_can_execute: bool,
group_can_read: bool,
group_can_write: bool,
group_can_execute: bool,
other_can_read: bool,
other_can_write: bool,
other_can_execute: bool
) -> int:
bin_string = ''
bin_string += str(int(user_can_read))
bin_string += str(int(user_can_write))
bin_string += str(int(user_can_execute))
bin_string += str(int(group_can_read))
bin_string += str(int(group_can_write))
bin_string += str(int(group_can_execute))
bin_string += str(int(other_can_read))
bin_string += str(int(other_can_write))
bin_string += str(int(other_can_execute))
return int(bin_string, 2)
def getattr(self, path, fh=None):
with self._mutex:
self._flush_handles()
if path in self._static_files:
return self._static_files[path]
name = self._clear_pathname(path)
ipart_data = self._find_file(name)
if not ipart_data:
raise FuseOSError(ENOENT)
access_time = self._get_file_access_time(ipart_data['device'])
modification_time = self._get_file_modification_time(ipart_data['device'])
is_readable = ipart_data['is_readable']
is_writable = ipart_data['is_writable']
perm_int_mask = self._genrate_perm_int_mask(
is_readable, is_writable, False,
is_readable, is_writable, False,
is_readable, is_writable, False
)
return dict(st_mode=(S_IFREG | perm_int_mask),
st_nlink=1,
st_size=ipart_data['size'],
st_ctime=self._instance_time,
st_atime=access_time,
st_mtime=modification_time
)
def _partial_read(
self,
handle,
offset,
size,
max_read_size = None,
min_total_read_time_ms = None,
pre_read_callback = None,
post_read_callback = None,
callback_user_data = None
):
ex = None
to_read_size = size
all_data = bytes()
dynamic_offset = offset
read_time_ms = 0
total_read_time_ms = 0
count_real_read_sectors = 0
total_len_data = 0
while True:
try:
if pre_read_callback:
pre_read_callback(
read_time_ms,
total_read_time_ms,
callback_user_data
)
start_time = time.time()
data = os_read(handle, dynamic_offset, PHYSICAL_SECTOR_SIZE)
len_data = len(data)
dynamic_offset += len_data
total_len_data += len_data
read_time_ms = int((time.time() - start_time) * 1000)
total_read_time_ms += read_time_ms
if post_read_callback:
post_read_callback(
read_time_ms,
total_read_time_ms,
callback_user_data
)
if read_time_ms > PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS:
count_real_read_sectors += 1
all_data += data
to_read_size -= len_data
if len_data < PHYSICAL_SECTOR_SIZE:
break
if max_read_size is not None:
if total_len_data >= max_read_size:
break
if to_read_size <= 0:
if min_total_read_time_ms is not None:
if total_read_time_ms < min_total_read_time_ms:
continue
break
except Exception as x:
print_log('_partial_read', x)
ex = x
break
all_data = all_data[:size]
return {
'all_data': all_data,
'ex': ex,
'total_read_time_ms': total_read_time_ms,
'count_real_read_sectors': count_real_read_sectors
}
def _set_fully_cached(self, ipart_data, fully_cached_status):
if ipart_data['fully_cached'] != fully_cached_status:
ipart_data['fully_cached'] = fully_cached_status
self._status_log_content = None
def _pre_read_callback(self, read_time_ms, total_read_time_ms, callback_user_data):
ipart_data = callback_user_data
if not ipart_data['fully_cached']:
mute_system_sound(4)
self._save_file_access_time(ipart_data['device'])
def _floppy_read(self, handle, offset, size, ipart_data):
current_time = time.time()
if not ipart_data['last_caching_ts']:
ipart_data['last_caching_ts'] = current_time
if not ipart_data['fully_cached']:
mute_system_sound(4)
read_result = self._partial_read(
handle,
offset,
size,
None,
None,
self._pre_read_callback,
None,
ipart_data
)
if read_result['total_read_time_ms'] > PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS:
self._set_fully_cached(ipart_data, False)
# set_numlock_state(ipart_data['fully_cached'])
if ipart_data['fully_cached']:
if ipart_data['enable_spinning']:
self._async_file_ops.read_direct_by_pathname(
ipart_data['device'],
offset,
size,
None,
1
)
if read_result['ex'] is not None:
raise read_result['ex']
return read_result['all_data']
if read_result['total_read_time_ms'] < PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS \
and not ipart_data['fully_cached']:
if not ipart_data['fully_cached']:
if current_time - ipart_data['last_caching_ts'] >= CACHE_DATA_BETWEEN_SECS:
read_result2 = self._partial_read(
handle,
0,
PHYSICAL_SECTOR_SIZE,
FLOPPY_ADF_SIZE,
PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS,
self._pre_read_callback,
None,
ipart_data
)
ipart_data['last_caching_ts'] = current_time
if read_result2['total_read_time_ms'] < PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS:
self._set_fully_cached(ipart_data, True)
self._floppy_cache_adf(handle, ipart_data)
self._save_file_access_time(ipart_data['device'])
if read_result['ex'] is not None:
raise read_result['ex']
return read_result['all_data']
def _floppy_cache_adf(self, handle, ipart_data):
# should be called only once when saving cached ADF
# since read() and write() will not call
# _floppy_read()
if not ENABLE_ADF_CACHING:
return
# read whole ADF
read_result3 = self._partial_read(
handle,
0,
FLOPPY_ADF_SIZE,
FLOPPY_ADF_SIZE,
PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS,
self._pre_read_callback,
None,
ipart_data
)
if ipart_data['cached_adf_sha512']:
# use existing sha512 ID
sha512_id = ipart_data['cached_adf_sha512']
print_log('Using existing SHA512 ID={sha512_id} for {filename} '.format(
filename=ipart_data['device'],
sha512_id=sha512_id
))
else:
# calculate sha512 hash from readed ADF
adf_hash = hashlib.sha512()
adf_hash.update(read_result3['all_data'])
sha512_id = adf_hash.hexdigest()
print_log('Calculated SHA512 ID={sha512_id} for {filename} '.format(
filename=ipart_data['device'],
sha512_id=sha512_id
))
# 123
cached_adf_pathname = os.path.join(
CACHED_ADFS_DIR,
build_cached_adf_filename(
sha512_id,
FLOPPY_ADF_EXTENSION
)
)
if not os.path.exists(cached_adf_pathname) or os.path.getsize(cached_adf_pathname) != FLOPPY_ADF_SIZE:
# save a copy of the ADF file in the cache dir
# sha512 + '.adf'
save_replace_file(
cached_adf_pathname,
read_result3['all_data'],
CACHED_ADFS_MAX_DIR_SIZE
)
os.sync()
# next call to read() or write() will be redirected to
# _floppy_read_cached() or _floppy_write_cached()
ipart_data['cached_adf_pathname'] = cached_adf_pathname
# # close the handle, it would not be needed anymore
# self._close_handle(ipart_data['device'])
print_log('{filename} saved cached ADF as {cached_adf_pathname}'.format(
filename=ipart_data['device'],
cached_adf_pathname=cached_adf_pathname
))
header = build_CachedADFHeader(sha512_id, int(os.path.getmtime(cached_adf_pathname)))
os_write(handle, FLOPPY_DEVICE_LAST_SECTOR, header)
# close the handle, it would not be needed anymore
self._close_handle(ipart_data['device'])
def _generate_status_log(self):
if self._status_log_content:
return self._status_log_content
content = ''
for ipart_dev, ipart_data in self._disk_devices.items():
content += 'device:' + ipart_dev + ', '
content += 'public_name:' + ipart_data['public_name'] + ', '
content += 'fully_cached:' + str(int(ipart_data['fully_cached']))
content += '\n'
self._status_log_content = content
return content
def _status_log_read(self, offset, size):
content = self._generate_status_log()
return bytes(
content[offset : offset + size],
'utf-8'
)
def _generic_read(self, handle, offset, size, ipart_data):
self._save_file_access_time(ipart_data['device'])
if ipart_data['is_disk_drive']:
disable_power_led()
return os_read(handle, offset, size)
def _open_cached_adf_handle(self, ipart_data: dict) -> Optional[int]:
pathname = ipart_data['cached_adf_pathname']
if pathname in self._handles:
return self._handles[pathname]
mode = os.O_SYNC | os.O_RSYNC | os.O_RDWR
try:
self._handles[pathname] = os.open(pathname, mode)
except:
return None
return self._handles[pathname]
def _floppy_read_cached(self, offset, size, ipart_data):
self._save_file_access_time(ipart_data['device'])
self._set_fully_cached(ipart_data, True)
if ipart_data['enable_spinning']:
self._async_file_ops.read_direct_by_pathname(
ipart_data['device'],
offset,
size,
None,
2
)
fd = self._open_cached_adf_handle(ipart_data)
# TODO use use_fd
return file_read_bytes(
ipart_data['cached_adf_pathname'],
offset,
size,
use_fd=fd
)
def _floppy_write_cached(self, offset, data, ipart_data):
self._save_file_modification_time(ipart_data['device'])
self._set_fully_cached(ipart_data, True)
self._async_file_ops.write_by_pathname(
ipart_data['device'],
offset,
data
)
# 456
def write_done_handler(write_data, done_handler_args):
# return
print(time.time(), 'data', locals())
fd = self._open_cached_adf_handle(ipart_data)
# TODO use use_fd
write_result = file_write_bytes(
ipart_data['cached_adf_pathname'],
offset,
data,
0,
use_fd=fd
)
header = build_CachedADFHeader(
ipart_data['cached_adf_sha512'],
int(os.path.getmtime(ipart_data['cached_adf_pathname']))
)
self._async_file_ops.deferred_one_time_write_by_pathname(
ipart_data['device'],
FLOPPY_DEVICE_LAST_SECTOR,
header,
1,
done_handler=write_done_handler,
done_handler_args=(ipart_data,)
)
return write_result
def read(self, path, size, offset, fh):
with self._mutex:
self._flush_handles()
name = self._clear_pathname(path)
if name == STATUS_FILE_NAME:
return self._status_log_read(offset, size)
ipart_data = self._find_file(name)
if not ipart_data:
raise FuseOSError(ENOENT)
file_size = ipart_data['size']
if offset + size > file_size:
size = file_size - offset
if offset >= file_size or size <= 0:
self._save_file_access_time(ipart_data['device'])
return b''
if ENABLE_ADF_CACHING:
if ipart_data['is_floppy_drive'] and ipart_data['cached_adf_pathname']:
return self._floppy_read_cached(offset, size, ipart_data)
handle = self._open_handle(ipart_data)
if handle is None:
self._save_file_access_time(ipart_data['device'])
raise FuseOSError(EIO)
if ipart_data['is_floppy_drive']:
return self._floppy_read(
handle,
offset,
size,
ipart_data
)
return self._generic_read(
handle,
offset,
size,
ipart_data
)
def truncate(self, path, length, fh=None):
# block devices cannot be truncated, so just return
return
def write(self, path, data, offset, fh):
with self._mutex:
self._flush_handles()
name = self._clear_pathname(path)
ipart_data = self._find_file(name)
if not ipart_data:
raise FuseOSError(ENOENT)
if not ipart_data['is_writable']:
raise FuseOSError(EROFS)
self._set_fully_cached(ipart_data, False)
self._save_file_modification_time(ipart_data['device'])
max_file_size = ipart_data['size']
len_data = len(data)
if offset + len_data > max_file_size or offset >= max_file_size:
self._save_file_modification_time(ipart_data['device'])
raise FuseOSError(ENOSPC)
if len_data == 0:
self._save_file_modification_time(ipart_data['device'])
return b''
if ENABLE_ADF_CACHING:
if ipart_data['is_floppy_drive'] and ipart_data['cached_adf_pathname']:
return self._floppy_write_cached(offset, data, ipart_data)
handle = self._open_handle(ipart_data)
if handle is None:
self._save_file_modification_time(ipart_data['device'])
raise FuseOSError(EIO)
if ipart_data['is_floppy_drive']:
mute_system_sound(4)
if ipart_data['is_disk_drive']:
disable_power_led()
ex = None
try:
result = os_write(handle, offset, data)
self._save_file_modification_time(ipart_data['device'])
if ipart_data['is_floppy_drive']:
mute_system_sound(4)
except Exception as x:
print_log('write', x)
ex = x
self._save_file_modification_time(ipart_data['device'])
if ex is not None:
raise ex
return result
def readdir(self, path, fh):
with self._mutex:
self._flush_handles()
entries = [
'.',
'..',
STATUS_FILE_NAME
]
if path != '/':
return entries
for ipart_dev, ipart_data in self._disk_devices.items():
entries.append(
ipart_data['public_name']
)
return entries
def destroy(self, path):
with self._mutex:
self._close_handles()
def print_log(*args):
if ENABLE_LOGGER:
if args:
logzero.logger.info(*args)
else:
print(*args)
def init_logger():
if not ENABLE_LOGGER:
return
print('Logging to ' + LOG_PATHNAME)
logzero.logfile(LOG_PATHNAME, maxBytes=1e6, backupCount=3, disableStderrLogger=True)
def print_app_version():
print('{name} v{version}'. format(
name=APP_UNIXNAME.upper(),
version=APP_VERSION
))
def check_pre_requirements():
check_system_binaries()
def configure_system():
print_log('Configuring system')
disable_swap()
set_cache_pressure()
def disable_swap():
if not DISABLE_SWAP:
return
print_log('Disable swap')
os.system('swapoff -a')
def set_cache_pressure():
if not ENABLE_SET_CACHE_PRESSURE:
return
print_log('Set cache pressure')
os.system('sysctl -q vm.vfs_cache_pressure=200')
def check_system_binaries():
print_log('Checking system binaries')
bins = [
'lsblk',
'sysctl',
'swapoff',
'blockdev',
'umount',
'hwinfo'
]
for ibin in bins:
if not sh.which(ibin):
print_log(ibin + ': command not found')
sys.exit(1)
def is_device_physical_floppy(
device_pathname: str,
device_data: dict,
physical_floppy_drives: dict
) -> bool:
return (
device_pathname in physical_floppy_drives
) and \
device_data['type'] == 'disk' and \
device_data['size'] == FLOPPY_DEVICE_SIZE
def is_device_physical_cdrom(
device_pathname: str,
device_data: dict,
physical_cdrom_drives: dict
) -> bool:
return (
device_pathname in physical_cdrom_drives
) and device_data['type'] == 'rom'
def is_device_physical_disk(device_data: dict) -> bool:
return (
not device_data['is_floppy_drive'] and
not device_data['is_cdrom_drive']
) and device_data['type'] == 'disk'
def get_partitions2(physical_cdrom_drives, physical_floppy_drives) -> 'OrderedDict[str, dict]':
lsblk_buf = StringIO()
pattern = r'NAME="(\w*)" SIZE="(\d*)" TYPE="(\w*)" MOUNTPOINT="(.*)" LABEL="(.*)" PATH="(.*)" FSTYPE="(.*)" PTTYPE="(.*)" RO="(.*)"'
ret: OrderedDict[str, dict] = OrderedDict()
try:
# lsblk -P -o name,size,type,mountpoint,label,path,fstype,pttype,ro -n -b
sh.lsblk('-P', '-o', 'name,size,type,mountpoint,label,path,fstype,pttype,ro', '-n', '-b', _out=lsblk_buf)
except Exception as x:
print_log('get_partitions2 lsblk', x)
return None
for line in lsblk_buf.getvalue().splitlines():
line = line.strip()
if not line:
continue
search_result = re.search(pattern, line)
if not search_result:
continue
found = search_result.groups()
full_path = found[5]
device_basename = os.path.basename(full_path)
if device_basename.startswith(SYSTEM_INTERNAL_SD_CARD_NAME):
continue
device_data = {
'mountpoint': found[3],
'label': found[4],
'config': None,
'device': full_path,
'device_basename': device_basename,
'is_floppy_drive': False,
'is_cdrom_drive': False,
'is_disk_drive': False,
'size': int(found[1]) if found[1] else 0,
'type': found[2],
'fstype': found[6],
'pttype': found[7],
'is_readable': True, # in Linux device is reabable by default
'is_writable': bool(int(found[8])) == False
}
device_data['is_floppy_drive'] = is_device_physical_floppy(
full_path,
device_data,
physical_floppy_drives
)
device_data['is_cdrom_drive'] = is_device_physical_cdrom(
full_path,
device_data,
physical_cdrom_drives
)
device_data['is_disk_drive'] = is_device_physical_disk(
device_data
)
if device_data['is_cdrom_drive']:
device_data['is_writable'] = False
if is_unknown_disk(device_data):
# do not add unknown cd/dvd
continue
ret[full_path] = device_data
return ret
def print_partitions(partitions: dict):
if not partitions:
return
print_log('Known partitions:')
for key, value in partitions.items():
print_log(key)
print_log(' mountpoint: ' + str(value['mountpoint']))
print_log(' label: ' + str(value['label']))
print_log(' is_floppy_drive: ' + str(value['is_floppy_drive']))
print_log(' is_cdrom_drive: ' + str(value['is_cdrom_drive']))
print_log(' is_disk_drive: ' + str(value['is_disk_drive']))
print_log(' size: ' + str(value['size']))
print_log(' type: ' + str(value['type']))
print_log(' pttype: ' + str(value['pttype']))
print_log(' fstype: ' + str(value['fstype']))
print_log()
def device_get_public_name(ipart_data: dict):
pathname = ipart_data['device'].replace(os.path.sep, '__')
if ipart_data['amiga_device_type'] == AMIGA_DISK_DEVICE_TYPE_ADF:
pathname += FLOPPY_ADF_EXTENSION
elif ipart_data['amiga_device_type'] == AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE or \
ipart_data['amiga_device_type'] == AMIGA_DISK_DEVICE_TYPE_HDF_HDFRDB:
pathname += HD_HDF_EXTENSION
if ipart_data['amiga_device_type'] == AMIGA_DISK_DEVICE_TYPE_ISO:
pathname += CD_ISO_EXTENSION
return pathname
def get_hdf_type(pathname: str) -> int:
# TODO test me
file_stat = os.stat(pathname)
data = file_read_bytes(pathname, 0, PHYSICAL_SECTOR_SIZE)
if len(data) < 4:
return None
char_0 = chr(data[0])
char_1 = chr(data[1])
char_2 = chr(data[2])
char_3 = chr(data[3])
first_4_chars = ''.join([char_0, char_1, char_2, char_3])
if first_4_chars == 'RDSK':
return AMIGA_DISK_DEVICE_TYPE_HDF_HDFRDB
elif first_4_chars.startswith('DOS'):
if file_stat.st_size < 4 * 1024 * 1024:
return AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE
else:
return AMIGA_DISK_DEVICE_TYPE_HDF
return None
def hdf_type_to_str(hdf_type: int):
if hdf_type == AMIGA_DISK_DEVICE_TYPE_HDF_HDFRDB:
return 'RDSK'
elif hdf_type == AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE:
return 'DISKIMAGE'
elif hdf_type == AMIGA_DISK_DEVICE_TYPE_HDF:
return 'HDF'
return None
def remove_known_disk_devices(partitions: dict, disk_devices: dict):
count_removed = 0
for device_pathname, device_data in disk_devices.copy().items():
if device_pathname not in partitions:
continue
ipart_data = partitions[device_pathname]
remove = not is_unknown_disk(ipart_data) and \
not ipart_data['is_cdrom_drive'] and \
not device_data['force_add']
if remove:
print_log(device_pathname, 'removing incorrectly added device')
del disk_devices[device_pathname]
count_removed += 1
return count_removed
def cleanup_disk_devices(partitions: dict, disk_devices: dict):
for ipart_dev in list(disk_devices.keys()):
if ipart_dev not in partitions:
del disk_devices[ipart_dev]
print_log(ipart_dev, 'ejected')
def add_adf_disk_device(
ipart_dev: str,
ipart_data: dict,
disk_devices: dict,
force_add: bool = False
):
print_log('{filename} using as ADF'.format(
filename=ipart_dev
))
if ENABLE_FLOPPY_DRIVE_READ_A_HEAD:
set_device_read_a_head_sectors(ipart_dev, DEFAULT_READ_A_HEAD_SECTORS)
else:
set_device_read_a_head_sectors(ipart_dev, 0)
disk_devices[ipart_dev] = ipart_data.copy()
disk_devices[ipart_dev]['amiga_device_type'] = AMIGA_DISK_DEVICE_TYPE_ADF
disk_devices[ipart_dev]['public_name'] = device_get_public_name(disk_devices[ipart_dev])
disk_devices[ipart_dev]['size'] = FLOPPY_ADF_SIZE
disk_devices[ipart_dev]['force_add'] = force_add
disk_devices[ipart_dev]['cached_adf_pathname'] = ''
disk_devices[ipart_dev]['cached_adf_sha512'] = ''
update_cached_adf_data(ipart_dev, disk_devices[ipart_dev])
def build_CachedADFHeader(sha512_id, mtime):
header = CachedADFHeader()
header.sign = bytes(CACHED_ADF_SIGN, CACHED_ADF_STR_ENCODING)
header.header_type = bytes(CACHED_ADF_HEADER_TYPE, CACHED_ADF_STR_ENCODING)
header.sha512 = bytes(sha512_id, CACHED_ADF_STR_ENCODING)
header.mtime = mtime
return bytes(header)
def build_cached_adf_filename(sha512_id, ext):
return sha512_id + ext
def update_cached_adf_data(ipart_dev: str, ipart_data: dict):
if not ENABLE_ADF_CACHING:
return
last_sector_data = file_read_bytes(ipart_dev, FLOPPY_DEVICE_LAST_SECTOR, PHYSICAL_SECTOR_SIZE)
adf_header = CachedADFHeader.from_buffer_copy(last_sector_data)
decoded_sign = ''
decoded_header_type = ''
decoded_sha512 = ''
try:
decoded_sign = str(adf_header.sign, CACHED_ADF_STR_ENCODING)
decoded_header_type = str(adf_header.header_type, CACHED_ADF_STR_ENCODING)
decoded_sha512 = str(adf_header.sha512, CACHED_ADF_STR_ENCODING)
except UnicodeDecodeError:
pass
if adf_header.mtime < 0:
adf_header.mtime = 0
if decoded_sign != CACHED_ADF_SIGN or \
decoded_header_type != CACHED_ADF_HEADER_TYPE or \
not decoded_sha512 or \
len(decoded_sha512) < SHA512_LENGTH:
# ADF not cached
return
ipart_data['cached_adf_sha512'] = decoded_sha512
cached_adf_pattern = os.path.join(
CACHED_ADFS_DIR,
build_cached_adf_filename(
decoded_sha512,
FLOPPY_ADF_EXTENSION
)
)
print_log('{filename} looking for {cached_adf_pattern}'.format(
filename=ipart_dev,
cached_adf_pattern=cached_adf_pattern
))
found_cached_adfs = list(glob.glob(cached_adf_pattern))
if not found_cached_adfs or \
not os.path.exists(found_cached_adfs[0]):
print_log('{filename} is cached ADF (ID={sha512_id}, mtime={mtime}, cached file does not exists, existing ID will be used)'.format(
filename=ipart_dev,
sha512_id=decoded_sha512,
mtime=adf_header.mtime
))
return
if os.path.getsize(found_cached_adfs[0]) != FLOPPY_ADF_SIZE:
print_log('{filename} is cached ADF (ID={sha512_id}, mtime={mtime}, cached file has incorrect size, removing, existing ID will be used)'.format(
filename=ipart_dev,
sha512_id=decoded_sha512,
mtime=adf_header.mtime
))
os.remove(found_cached_adfs[0])
return
# if Decimal(os.path.getmtime(found_cached_adfs[0])) < Decimal(adf_header.mtime):
if int(os.path.getmtime(found_cached_adfs[0])) < adf_header.mtime:
print_log('{filename} is cached ADF (ID={sha512_id}, mtime={mtime}, cached file has incorrect mtime, removing, existing ID will be used)'.format(
filename=ipart_dev,
sha512_id=decoded_sha512,
mtime=adf_header.mtime
))
os.remove(found_cached_adfs[0])
return
ipart_data['cached_adf_pathname'] = found_cached_adfs[0]
print_log('{filename} is cached ADF (ID={sha512_id}, as {cached_adf_pathname})'.format(
filename=ipart_dev,
sha512_id=decoded_sha512,
cached_adf_pathname=found_cached_adfs[0]
))
def add_hdf_disk_device(
ipart_dev: str,
ipart_data: dict,
disk_devices: dict,
_type: int,
force_add: bool = False
):
print_log('{filename} using as HDF'.format(
filename=ipart_dev
))
disk_devices[ipart_dev] = ipart_data.copy()
disk_devices[ipart_dev]['amiga_device_type'] = _type
disk_devices[ipart_dev]['public_name'] = device_get_public_name(disk_devices[ipart_dev])
disk_devices[ipart_dev]['force_add'] = force_add
def add_bigger_disk_device(
ipart_dev: str,
ipart_data: dict,
disk_devices: dict,
force_add: bool = False
):
hdf_type = get_hdf_type(ipart_dev)
if not hdf_type:
# could be iso
print_log('{filename} cannot determine disk device type, using DISKIMAGE by default'.format(
filename=ipart_dev
))
hdf_type = AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE
if hdf_type != AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE and \
hdf_type != AMIGA_DISK_DEVICE_TYPE_HDF_HDFRDB and \
hdf_type != AMIGA_DISK_DEVICE_TYPE_HDF:
print_log('{filename} {_type} is not supported'.format(
filename=ipart_dev,
_type=hdf_type_to_str(hdf_type)
))
return
add_hdf_disk_device(
ipart_dev,
ipart_data,
disk_devices,
hdf_type,
force_add
)
def add_iso_disk_device(ipart_dev: str, ipart_data: dict, disk_devices: dict):
print_log('{filename} using as ISO'.format(
filename=ipart_dev
))
disk_devices[ipart_dev] = ipart_data.copy()
disk_devices[ipart_dev]['amiga_device_type'] = AMIGA_DISK_DEVICE_TYPE_ISO
disk_devices[ipart_dev]['public_name'] = device_get_public_name(disk_devices[ipart_dev])
disk_devices[ipart_dev]['force_add'] = False
def is_unknown_disk(ipart_data: dict) -> bool:
return ipart_data['fstype'] == '' and ipart_data['pttype'] == ''
def add_disk_devices2(partitions: dict, disk_devices: dict):
force_add = is_cmd_shift_pressed()
clear_pressed_keys()
for ipart_dev, ipart_data in partitions.items():
if ipart_dev in disk_devices:
continue
unknown = is_unknown_disk(ipart_data)
if ipart_data['is_floppy_drive']:
if not unknown and not force_add:
continue
add_adf_disk_device(
ipart_dev,
ipart_data,
disk_devices,
force_add
)
if not disk_devices[ipart_dev]['cached_adf_pathname']:
# ADF is not cached, need to mute the system sound
mute_system_sound(6)
elif ipart_data['is_disk_drive']:
if not unknown and not force_add:
continue
add_bigger_disk_device(
ipart_dev,
ipart_data,
disk_devices,
force_add
)
elif ipart_data['is_cdrom_drive']:
add_iso_disk_device(
ipart_dev,
ipart_data,
disk_devices
)
def is_adf_header(header: bytes) -> bool:
# TODO provide better method to detect ADF header
parsed_header = numpy.frombuffer(header, ADF_BOOTBLOCK, 1)[0]
disk_type = parsed_header['DiskType'].tobytes().decode('ascii', 'ignore').rstrip('\0')
if disk_type != 'DOS':
return False
disk_type_other_bits = clear_bits(
parsed_header['DiskType'][3],
[0, 1, 2]
)
if disk_type_other_bits != 0:
return False
return True
def clear_bits(i: int, bits: list) -> int:
for ibit in bits:
i = i & ~(1<<ibit)
return i
def update_disk_devices(partitions: dict, disk_devices: dict):
cleanup_disk_devices(partitions, disk_devices)
add_disk_devices2(partitions, disk_devices)
def run_fuse(disk_devices: dict, async_file_ops: AsyncFileOps):
global fs_instance
fs_instance = AmigaDiskDevicesFS(disk_devices, async_file_ops)
FUSE(
fs_instance,
TMP_PATH_PREFIX,
foreground=True,
allow_other=True,
direct_io=True
)
def init_fuse(disk_devices: dict, async_file_ops: AsyncFileOps):
print_log('Init FUSE')
fuse_instance_thread = threading.Thread(target=run_fuse, args=(disk_devices, async_file_ops,))
fuse_instance_thread.start()
return fuse_instance_thread
def unmount_fuse_mountpoint():
print_log('Unmounting FUSE mountpoint')
os.system('umount {dir}'.format(
dir=TMP_PATH_PREFIX
))
def mkdir_fuse_mountpoint():
os.makedirs(TMP_PATH_PREFIX, exist_ok=True)
def affect_fs_disk_devices(disk_devices: dict):
global fs_instance
if not fs_instance:
return
fs_instance.set_disk_devices(disk_devices.copy())
def set_device_read_a_head_sectors(device: str, sectors: int):
global devices_read_a_head_sectors
if device not in devices_read_a_head_sectors:
devices_read_a_head_sectors[device] = None
if devices_read_a_head_sectors[device] == sectors:
return
devices_read_a_head_sectors[device] = sectors
os.system('blockdev --setra {sectors} {device}'.format(
sectors=sectors,
device=device
))
def find_new_devices(partitions: dict, old_partitions: dict) -> List[str]:
new_devices = []
for ipart_dev, ipart_data in partitions.items():
if not old_partitions or ipart_dev not in old_partitions:
new_devices.append(ipart_dev)
return new_devices
def quick_format_single_device(device: str):
blank_dos = bytearray(1024)
blank_dos[0] = ord('D')
blank_dos[1] = ord('O')
blank_dos[2] = ord('S')
try:
file_write_bytes(device, 0, blank_dos, os.O_SYNC | os.O_CREAT)
except OSError as ex:
print_log(str(ex))
return False
return True
def rescan_device(device_basename: str):
os.system('echo 1 > /sys/class/block/{device_basename}/device/rescan'.format(
device_basename=device_basename
))
def format_devices(partitions: dict, old_partitions: dict, loop_counter: int):
if not is_cmd_delete_pressed():
return
clear_pressed_keys()
if not loop_counter:
# do not format on first iteration
return
new_devices = find_new_devices(partitions, old_partitions)
if not new_devices:
return
to_format = []
for ipart_dev in new_devices:
ipart_data = partitions[ipart_dev]
if ipart_data['type'] != 'disk':
continue
if not ipart_data['is_writable']:
continue
print_log(ipart_dev, 'new')
print_log(ipart_dev, 'quick-formatting device')
to_format.append(ipart_dev)
# only one disk device at a time
break
if not to_format:
return
ipart_dev = to_format[0]
if quick_format_single_device(ipart_dev):
print_log(ipart_dev, 'scanning')
rescan_device(ipart_data['device_basename'])
del partitions[ipart_dev]
def is_cmd_delete_pressed() -> bool:
return key_cmd_pressed and key_delete_pressed
def is_cmd_shift_pressed() -> bool:
return key_cmd_pressed and key_shift_pressed
def clear_pressed_keys():
global key_cmd_pressed
global key_delete_pressed
global key_shift_pressed
key_cmd_pressed = False
key_delete_pressed = False
key_shift_pressed = False
def on_key_press(key):
global key_cmd_pressed
global key_delete_pressed
global key_shift_pressed
if key == Key.cmd:
key_cmd_pressed = True
if key == Key.delete:
key_delete_pressed = True
if key == Key.shift:
key_shift_pressed = True
def on_key_release(key):
global key_cmd_pressed
global key_delete_pressed
global key_shift_pressed
if key == Key.cmd:
key_cmd_pressed = False
if key == Key.delete:
key_delete_pressed = False
if key == Key.shift:
key_shift_pressed = False
def init_keyboard_listener():
keyboard_listener = Listener(
on_press=on_key_press,
on_release=on_key_release
)
keyboard_listener.start()
def init_async_file_ops():
print_log('Init AsyncFileOps')
async_file_ops = AsyncFileOps()
async_file_ops.start()
return async_file_ops
def find_physical_cdrom_drives():
hwinfo_buf = StringIO()
cdrom_data_started = False
ret = []
# hwinfo --cdrom --short
sh.hwinfo('--cdrom', '--short', _out=hwinfo_buf)
for line in hwinfo_buf.getvalue().splitlines():
line = line.strip()
if not line:
continue
if line == 'cdrom:':
cdrom_data_started = True
continue
if not cdrom_data_started:
continue
if not line.startswith('/dev/'):
continue
parts = line.split(maxsplit=1)
if len(parts) != 2:
continue
device = parts[0]
if not os.path.exists(device) or not os.path.isfile(device):
ret.append(device)
return ret
def update_physical_cdrom_drives(physical_cdrom_drives):
print_log('Getting information about physical cd-rom drives')
index = 0
for device in sorted(find_physical_cdrom_drives()):
physical_cdrom_drives[device] = {
'index': index,
'device': device
}
index += 1
def print_physical_cdrom_drives(physical_cdrom_drives):
print_log('Physical cd-rom drives:')
for key, drive_data in physical_cdrom_drives.items():
print_log(key)
print_log(' index: ' + str(drive_data['index']))
print_log(' device: ' + drive_data['device'])
print_log()
def find_physical_floppy_drives():
ufiformat_buf = StringIO()
ret = []
# ufiformat --inquire --quiet
sh.ufiformat('--inquire', '--quiet', _out=ufiformat_buf)
for line in ufiformat_buf.getvalue().splitlines():
line = line.strip()
if not line:
continue
parts = line.split()
if len(parts) != 2:
continue
device = parts[0]
if not os.path.exists(device) or not os.path.isfile(device):
ret.append(device)
return ret
def update_physical_floppy_drives(physical_floppy_drives):
print_log('Getting information about physical floppy drives')
index = 0
for device in sorted(find_physical_floppy_drives()):
physical_floppy_drives[device] = {
'index': index,
'device': device
}
index += 1
def print_physical_floppy_drives(physical_floppy_drives):
print_log('Physical floppy drives:')
for key, drive_data in physical_floppy_drives.items():
print_log(key)
print_log(' index: ' + str(drive_data['index']))
print_log(' device: ' + drive_data['device'])
print_log()
def main():
partitions = None
old_partitions = None
disk_devices = {}
loop_counter = 0
physical_floppy_drives = OrderedDict()
physical_cdrom_drives = OrderedDict()
print_app_version()
check_pre_requirements()
init_logger()
unmount_fuse_mountpoint()
mkdir_fuse_mountpoint()
# # uncomment this to enable FUSE logging
# logging.basicConfig(level=logging.DEBUG)
configure_system()
init_simple_mixer_control()
async_file_ops = init_async_file_ops()
init_fuse(disk_devices, async_file_ops)
update_physical_floppy_drives(physical_floppy_drives)
print_physical_floppy_drives(physical_floppy_drives)
update_physical_cdrom_drives(physical_cdrom_drives)
print_physical_cdrom_drives(physical_cdrom_drives)
init_keyboard_listener()
os.makedirs(CACHED_ADFS_DIR, exist_ok=True)
try:
while True:
if not MAIN_LOOP_MAX_COUNTER or loop_counter < MAIN_LOOP_MAX_COUNTER:
partitions = get_partitions2(
physical_cdrom_drives,
physical_floppy_drives
)
if partitions is not None:
if partitions != old_partitions:
# something changed
print_partitions(partitions)
format_devices(partitions, old_partitions, loop_counter)
update_disk_devices(partitions, disk_devices)
affect_fs_disk_devices(disk_devices)
if remove_known_disk_devices(partitions, disk_devices):
affect_fs_disk_devices(disk_devices)
old_partitions = partitions
loop_counter += 1
unmute_system_sound()
enable_power_led()
time.sleep(100 / 1000)
time.sleep(0)
except KeyboardInterrupt as ex:
print_log('KeyboardInterrupt')
unmute_system_sound()
enable_power_led()
unmount_fuse_mountpoint()
async_file_ops.stop()
sys.exit()
if __name__ == '__main__':
main()
| skazanyNaGlany/amipi400 | amiga_disk_devices.py | amiga_disk_devices.py | py | 56,855 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "decimal.getcontext",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.version_info",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.geteuid... |
34670378486 | import sys
from math import log
from copy import deepcopy
from typing import Dict, List
from lib.graph import Graph, read_input_csv
class TraceNode:
def __init__(self, id):
self.id = id
self.preds = []
def shortest_trace(graph, node):
"""
Compute the shortest attack trace to the specified attack goal node
Args:
graph (): a Graph object
node (): a specified attack goal node
Returns:
a tuple of (the minimum depth, the actual trace)
"""
res_node = TraceNode(node)
if graph.nodes[node].node_type == 'primitive':
return 0, res_node
# If the current node is an OR node, then take the minimum of predecessors
if graph.nodes[node].node_type == 'OR':
pred_list = graph.nodes[node].preds
(min_depth, min_pred_node) = shortest_trace(graph, pred_list[0])
res_node.preds.append(min_pred_node)
for i in range(1, len(pred_list)):
(cur_depth, cur_pred_node) = shortest_trace(graph, pred_list[i])
if cur_depth < min_depth:
min_depth = cur_depth
min_pred_node = cur_pred_node
res_node.preds.pop()
res_node.preds.append(min_pred_node)
return min_depth + 1, res_node
# If the current node is an AND node, then take the maximum of predecessors
if graph.nodes[node].node_type == 'AND':
pred_list = graph.nodes[node].preds
(max_depth, max_pred_node) = shortest_trace(graph, pred_list[0])
res_node.preds.append(max_pred_node)
for i in range(1, len(pred_list)):
(cur_depth, cur_pred_node) = shortest_trace(graph, pred_list[i])
res_node.preds.append(cur_pred_node)
if cur_depth > max_depth:
max_depth = cur_depth
return max_depth + 1, res_node
def blast_radius(graph):
"""
Compute the blast radius of each vulnerability in the attack graph
Args:
graph (): a Graph object representing the attack graph
Returns:
a dictionary from `vulnerability node id` to `the list of derivation nodes`
"""
class Vul:
def __init__(self, node_id, desc):
self.node_id = node_id
self.desc = desc
vul_list: List[Vul] = []
for node in graph.nodes:
if graph.nodes[node].node_type == 'primitive' and 'vulExists(' in graph.nodes[node].desc:
vul_list.append(Vul(node, graph.nodes[node].desc))
queue = []
node_vul_evidences: Dict[int, List[Dict[int, int]]] = {}
# Initialize node_vul_evidences for all of the primitive fact nodes
for node in graph.nodes:
node_vul_evidences[node] = [dict(zip(range(len(vul_list)), [0] * len(vul_list)))]
if graph.nodes[node].node_type == 'primitive' and 'vulExists(' in graph.nodes[node].desc:
for i in range(len(vul_list)):
if graph.nodes[node].desc == vul_list[i].desc:
node_vul_evidences[node][0][i] += 1
for child in graph.nodes[node].succ:
if child not in queue:
queue.append(child)
# Iteratively update the `node_vul_evidences` for nodes in the `queue`
while len(queue) != 0:
cur_node = queue.pop(0)
cur_vul_evidence = deepcopy(node_vul_evidences[graph.nodes[cur_node].preds[0]])
for i in range(1, len(graph.nodes[cur_node].preds)):
if graph.nodes[cur_node].node_type == 'AND':
cur_vul_evidence = merge_ve_and(cur_vul_evidence, node_vul_evidences[graph.nodes[cur_node].preds[i]], vul_list)
elif graph.nodes[cur_node].node_type == 'OR':
cur_vul_evidence = merge_ve_or(cur_vul_evidence, node_vul_evidences[graph.nodes[cur_node].preds[i]])
node_vul_evidences[cur_node] = cur_vul_evidence
for child in graph.nodes[cur_node].succ:
if child not in queue:
queue.append(child)
return determine_br(graph, node_vul_evidences, vul_list)
def merge_ve_or(vul_evidence1, vul_evidence2):
"""
Merge vulnerability evidences for two parent nodes. The child node is an`OR` node.
Args:
vul_evidence1 (): vulnerability evidence for parent node 1
vul_evidence2 (): vulnerability evidence for parent node 2
Returns:
the merged vulnerability evidence for the child `OR` node
Example:
>>> node_vul_evidences
{4: [{1: 0, 2: 1, 3: 0}, {1: 0, 2: 0, 3: 1}]}
>>> vul_evidence1 = node_vul_evidences[4]
>>> vul_evidence1
[{1: 0, 2: 1, 3: 0}, {1: 0, 2: 0, 3: 1}]
>>> foot_print1 = vul_evidence1[0]
>>> foot_print1
{1: 0, 2: 1, 3: 0}
>>> foot_print1 in vul_evidence1
True
"""
merged_vul_evidence = deepcopy(vul_evidence1)
for vul_footprint in vul_evidence2:
if vul_footprint not in vul_evidence1:
merged_vul_evidence.append(vul_footprint)
return merged_vul_evidence
def merge_ve_and(vul_evidence1, vul_evidence2, vul_list):
"""
Merge vulnerability evidences for two parent nodes. The child node is an`AND` node.
Args:
vul_evidence1 (): vulnerability evidence for parent node 1
vul_evidence2 (): vulnerability evidence for parent node 2
vul_list (): the list of all of the vulnerabilities in the given attack graph
Returns:
the merged vulnerability evidence for the child `AND` node
"""
merged_vul_evidence = []
for vul_footprint1 in vul_evidence1:
for vul_footprint2 in vul_evidence2:
merged_footprint = dict(zip(range(len(vul_list)), [0]*len(vul_list)))
for vul_index in range(len(vul_list)):
merged_footprint[vul_index] = max(vul_footprint1[vul_index], vul_footprint2[vul_index])
if merged_footprint not in merged_vul_evidence:
merged_vul_evidence.append(merged_footprint)
return merged_vul_evidence
def determine_br(graph, node_vul_evidences, vul_list):
"""
Determine the blast radius for each vulnerability in the attack graph
Args:
node_vul_evidences (): vulnerability evidence for all of the nodes in the attack graph
Example vul_evidences = {4: [{1: 0, 2: 1, 3: 0}, {1: 0, 2: 0, 3: 1}]} means node 4 has two
vulnerability footprints, the first one being {1: 0, 2: 1, 3: 0} and the second one being {1: 0, 2: 0, 3: 1}
vul_list (): the list of all of the vulnerabilities in the given attack graph
Returns:
blast radius for each vulnerability in the attack graph
Example:
>>> br = {3: [1, 5, 43, 49], 12: [], 17: [15], 21: [15], 26: [24, 36, 39, 41], 29: [], 32: [30, 33, 46, 51]}
>>> br[3]
[1, 5, 43, 49]
means the blast radius of `vulnerability node 3` contains derivation node 1, 5, 43, 49
"""
br = {}
for i in range(len(vul_list)):
br[vul_list[i].node_id] = []
for node in node_vul_evidences:
if graph.nodes[node].node_type == 'OR':
for foot_print in node_vul_evidences[node]:
vul_count = sum(foot_print.values())
if vul_count == 1:
key = find_key_from_dict(foot_print)
br[vul_list[key].node_id].append(node)
return br
def find_key_from_dict(d):
"""
Return the key of the dictionary whose corresponding value is 1.
Args:
d (): a dictionary such as {0: 1, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
Returns:
0
"""
for key in d:
if d[key] == 1:
return key
| pmlab-ucd/IOTA | python/graph_analyzer.py | graph_analyzer.py | py | 7,605 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number"... |
34348826764 | from datetime import datetime
from django.contrib.auth.models import AbstractUser
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
YEAR_VALIDATION_ERROR = 'Нельзя добавить произведение из будущего'
SCORE_VALIDATION_ERROR = 'Оценка должна быть от 1 до 10'
class User(AbstractUser):
ADMIN = 'admin'
MODERATOR = 'moderator'
USER = 'user'
ROLE = (
(ADMIN, 'admin'),
(MODERATOR, 'moderator'),
(USER, 'user'),
)
bio = models.TextField(
blank=True,
verbose_name='О себе'
)
email = models.EmailField(
unique=True,
verbose_name='Адрес электронной почты'
)
role = models.CharField(
max_length=15,
choices=ROLE,
default=USER,
)
class Meta:
ordering = ('-pk',)
verbose_name = 'пользователь'
verbose_name_plural = 'пользователи'
@property
def is_moderator(self):
return self.role == self.MODERATOR
@property
def is_admin(self):
return (self.role == self.ADMIN or self.is_staff
or self.is_superuser)
def __str__(self):
return self.username
class Category(models.Model):
name = models.CharField(
max_length=200,
verbose_name='Название категории',
)
slug = models.SlugField(
unique=True,
verbose_name='Уникальный идентификатор категории',
)
class Meta:
ordering = ('-pk',)
verbose_name = 'категирия'
verbose_name_plural = 'категории'
def __str__(self):
return self.name
class Genre(models.Model):
name = models.CharField(
max_length=200,
verbose_name='Название жанра',
)
slug = models.SlugField(
unique=True,
verbose_name='Уникальный идентификатор жанра',
)
class Meta:
ordering = ('-pk',)
verbose_name = 'жанр'
verbose_name_plural = 'жанры'
def __str__(self):
return self.name
class Title(models.Model):
name = models.CharField(
max_length=200,
verbose_name='Название произведения',
)
year = models.PositiveSmallIntegerField(
verbose_name='Год создания',
validators=[
MaxValueValidator(
datetime.now().year,
message=YEAR_VALIDATION_ERROR
)
]
)
category = models.ForeignKey(
Category,
on_delete=models.SET_NULL,
related_name='titles',
blank=True,
null=True,
verbose_name='Категория',
)
description = models.TextField(
verbose_name='Описание',
blank=True,
null=True,
)
genre = models.ManyToManyField(
Genre,
through='GenreTitle',
through_fields=['title', 'genre']
)
class Meta:
ordering = ('-pk', 'name',)
verbose_name = 'произведение'
verbose_name_plural = 'произведения'
def __str__(self):
return (f'{self.name} '
f'({self.category})')
class GenreTitle(models.Model):
title = models.ForeignKey(
Title,
on_delete=models.CASCADE,
verbose_name='Произведение',
)
genre = models.ForeignKey(
Genre,
on_delete=models.CASCADE,
blank=True,
null=True,
verbose_name='Жанр',
)
class Meta:
ordering = ('-pk',)
verbose_name = 'Привязка жанров'
verbose_name_plural = 'Привязки жанров'
def __str__(self):
return (f'({self.title}->{self.genre})')
class Review(models.Model):
title = models.ForeignKey(
Title,
blank=True,
on_delete=models.CASCADE,
null=False,
related_name='reviews',
verbose_name='Произведение'
)
text = models.TextField(
verbose_name='Текст',
help_text='Заполните поле.',
)
author = models.ForeignKey(
User,
blank=True,
verbose_name='Автор',
on_delete=models.CASCADE,
null=False,
related_name='reviews',
)
score = models.PositiveSmallIntegerField(
help_text='Введите от 1 до 10',
default=10,
verbose_name='Оценка',
validators=(MinValueValidator(1, message=SCORE_VALIDATION_ERROR),
MaxValueValidator(10, message=SCORE_VALIDATION_ERROR))
)
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,
db_index=True,
)
def __str__(self):
return self.text[:15]
class Meta:
ordering = ('-pub_date',)
verbose_name_plural = 'Отзывы'
constraints = [
models.UniqueConstraint(fields=['author', 'title'],
name='title_review')
]
class Comment(models.Model):
text = models.TextField(
null=False,
verbose_name='Текст',
help_text='Заполните поле.',
)
author = models.ForeignKey(
User,
blank=False,
verbose_name='Автор',
on_delete=models.CASCADE,
null=False,
related_name='comments',
)
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,
db_index=True,
)
review = models.ForeignKey(
Review,
blank=False,
verbose_name='Отзыв',
on_delete=models.CASCADE,
null=False,
related_name='comments',
)
class Meta:
ordering = ('-pub_date',)
verbose_name_plural = 'Коментарии'
| RomanK74/api_yamdb | api/models.py | models.py | py | 6,054 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
... |
20856359623 | """
Training code for harmonic Residual Networks.
Licensed under the BSD License [see LICENSE for details].
Written by Matej Ulicny, based on pytorch example code:
https://github.com/pytorch/examples/tree/master/imagenet
"""
import argparse
import os
import random
import shutil
import time
import warnings
import csv
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--harm_root', action='store_true',
help='whether to use harmonic block instead of root conv layer.')
parser.add_argument('--harm_res_blocks', action='store_true',
help='whether to use harmonic blocks instead of residual blocks.')
parser.add_argument('--pool', default='', type=str,
help="pooling type after the first layer: 'avg' or 'max', if none"
" specified increased stride is used instead of pooling.")
parser.add_argument('--levels', default=[None, None, None, None], nargs='+',
help="a list of lambda values used to compress harmonic blocks"
" specified for each of the 4 sets of blocks")
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
ngpus_per_node = torch.cuda.device_count()
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# convering level values to int or None
for i in range(len(args.levels)):
if not args.levels[i] is None:
try:
args.levels[i] = int(args.levels[i])
except ValueError:
args.levels[i] = None
# create model
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=args.pretrained, harm_root=args.harm_root, harm_res_blocks=args.harm_res_blocks, pool=args.pool, levels=args.levels)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
best_acc1 = best_acc1.to(args.gpu)
state_dict = model.state_dict()
#loaded_dict = {'module.'+k: v for k, v in checkpoint['state_dict'].items() if 'module.'+k in state_dict}
loaded_dict = {k: v for k, v in checkpoint['state_dict'].items() if k in state_dict}
state_dict.update(loaded_dict)
model.load_state_dict(state_dict)
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
save_score([top1.avg.cpu().data.numpy(), top5.avg.cpu().data.numpy()], args.arch+'_train.csv')
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
save_score([top1.avg.cpu().data.numpy(), top5.avg.cpu().data.numpy()], args.arch+'_val.csv')
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def save_score(score, filename):
with open(filename, 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(score)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| matej-ulicny/harmonic-networks | imagenet/main.py | main.py | py | 13,809 | python | en | code | 55 | github-code | 6 | [
{
"api_name": "models.__dict__",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "models.__dict__",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "ran... |
72249954427 | import json
import atexit
import subprocess
import yaml
import argparse
import textwrap
from dataclasses import dataclass
@dataclass
class Config:
num_nodes: int
config_path: str
def __init__(self, n: int, c: str):
self.num_nodes = n
self.config_path = c
self.place_holder_commands = ["cd ~/Downloads", "touch sample.txt"]
def make_parser():
parser_ = argparse.ArgumentParser(
prog="heiko-docker-test",
description=textwrap.dedent(
"""
heiko-docker-test allows you test your heiko config
locally with just docker.\n
It takes 2 arguments, number of nodes (containers) and
config_path (the path where the config is written to).\n
Using the provided args, it generates a config file which
connects to the containers. The config can then be further
modified to provide the required jobs to be run.\n
After modifying the config, deploy heiko to test.
"""
),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_.add_argument(
"-n",
"--number",
help="Number of nodes",
required=True,
action="store",
dest="number_of_nodes",
)
parser_.add_argument(
"-c",
"--config_path",
help="Path where the config should be generated",
required=True,
action="store",
dest="config",
)
return parser_
def genYAML(path, nodes, command):
stream = open(path, "w")
config = {"nodes": nodes, "jobs": [{"name": "job_1", "commands": command}]}
yaml.dump(config, stream)
print(
yaml.dump(
config,
)
)
stream.close()
parser = make_parser()
args = parser.parse_args()
n = int(args.number_of_nodes)
c = args.config
config = Config(n, c)
name = "heiko_node"
nodes = list()
print("Starting containers ..........")
for i in range(config.num_nodes):
nodes.append(dict())
node_name = name + str(i)
# spawn containers
p = subprocess.Popen(
["docker", "run", "-it", "-d", "--name", node_name, "heiko-node", "/bin/bash"]
)
p.wait()
nodes[i]["name"] = node_name
nodes[i]["username"] = "root"
nodes[i]["password"] = "yabe"
# gets networks
print("Network extraction")
out = subprocess.check_output(["docker", "network", "inspect", "bridge"])
network = json.loads(out)
for i in range(config.num_nodes):
node_name = name + str(i)
cid = subprocess.check_output(
["docker", "ps", "-a", "-q", "--no-trunc", "--filter", f"name={node_name}"]
)
# print(cid)
cid = cid.decode().strip()
nodes[i]["host"] = network[0]["Containers"][cid]["IPv4Address"].split("/")[0]
print()
print("YAML CONFIG")
genYAML(config.config_path, nodes, config.place_holder_commands)
def cleanup():
print("Stopping containers .........")
for i in range(config.num_nodes):
node_name = name + str(i)
p = subprocess.Popen(["docker", "stop", node_name])
p.wait()
print("Removing containeres ............")
for i in range(config.num_nodes):
node_name = name + str(i)
p = subprocess.Popen(["docker", "rm", node_name])
p.wait()
atexit.register(cleanup)
input("Press enter to stop workers")
| pesos/heiko | docker-networks.py | docker-networks.py | py | 3,338 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "textwrap.dedent",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "argpars... |
36830731053 | import json
import time
from threading import Thread
import pika
from pika.exceptions import ConnectionClosed
from utils import Logging
class RabbitMQClient(Logging):
_channel_impl = None
def __init__(self, address, credentials, exchange, exchange_type='topic'):
super(RabbitMQClient, self).__init__()
self._address = address
self._exchange = exchange
self._credentials = credentials
self._exchange_type = exchange_type
self._reset_consumer_thread(start=False)
self._declare_exchange()
def send(self, topic, message):
self._channel.basic_publish(exchange=self._exchange,
routing_key=topic,
body=message)
def subscribe(self, topic, handler):
queue_name = self._channel.queue_declare(exclusive=True).method.queue
self._channel.queue_bind(exchange=self._exchange,
queue=queue_name,
routing_key=topic)
self._channel.basic_consume(handler, queue=queue_name)
if not self._consumer_thread.is_alive():
self._reset_consumer_thread(start=True)
def consume(self, inactivity_timeout, handler, timeout_handler):
queue_name = self._channel.queue_declare(exclusive=True).method.queue
self._channel.queue_bind(exchange=self._exchange,
queue=queue_name)
for message in self._channel.consume(queue=queue_name,
inactivity_timeout=inactivity_timeout):
if message is not None:
handler(self._channel, message)
else:
timeout_handler()
def _declare_exchange(self):
self._channel.exchange_declare(exchange=self._exchange,
exchange_type=self._exchange_type)
def _reset_consumer_thread(self, start):
self._consumer_thread = Thread(target=self._channel.start_consuming)
self._consumer_thread.daemon = True
if start:
assert not self._consumer_thread.is_alive()
self._consumer_thread.start()
@property
def _channel(self):
if not self._channel_impl:
connection = self._establish_connection_to_mq(self._address, self._credentials)
self._channel_impl = connection.channel()
return self._channel_impl
@staticmethod
def _establish_connection_to_mq(address, credentials):
while True:
try:
return pika.BlockingConnection(
pika.ConnectionParameters(host=address[0], port=address[1],
credentials=pika.PlainCredentials(credentials[0], credentials[1])))
except ConnectionClosed:
time.sleep(1)
class RabbitMQJsonSender(Logging):
def __init__(self, rabbit_mq_client, topic):
super(RabbitMQJsonSender, self).__init__()
self._rabbit_mq_client = rabbit_mq_client
self._topic = topic
def send(self, message):
try:
json_message = json.dumps(message)
except Exception as e:
self.logger.debug('JSON serialization failed: {}. Message: {}'.format(e, message))
return
self._rabbit_mq_client.send(topic=self._topic,
message=json_message)
class RabbitMQJsonReceiver(Logging):
def __init__(self, rabbit_mq_client):
super(RabbitMQJsonReceiver, self).__init__()
self._rabbit_mq_client = rabbit_mq_client
def subscribe(self, topic, handler):
self._rabbit_mq_client.subscribe(topic, self._wrapped_handler(handler))
self.logger.debug('Subscribed to topic {}'.format(topic))
@staticmethod
def _wrapped_handler(actual_handler):
# noinspection PyUnusedLocal
def handle(ch, method, properties, body):
message = json.loads(body)
return actual_handler(message)
return handle
| deepsense-ai/seahorse | remote_notebook/code/rabbit_mq_client.py | rabbit_mq_client.py | py | 4,047 | python | en | code | 104 | github-code | 6 | [
{
"api_name": "utils.Logging",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pika.BlockingConnection",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pika.Connectio... |
74434743869 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from test_generator.models import *
# Create your views here.
def home(request):
if request.user.is_authenticated:
status = "You're currently logged in."
else:
status = "You're not currently logged in."
context = {'status': status}
return render(request, "tester_services/home.html", context)
# môže sa neskôr zmazať (def testujeme)
def testujeme(request):
thema = Themes.objects.create(theme_name="Biology")
@login_required
def my_tests(request):
user = request.user
tests = GTest.objects.filter(user_id=user.id)
context = {'my_tests': tests}
return render(request, 'tester_services/my_tests.html', context)
| alenamedzova/final_project | tester_services/views.py | views.py | py | 767 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.render",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 22,
"usage_type": "name"
... |
1005006973 | import socket
import json
class pyWave:
configStr = "{ 'enableRawOutput': 'enableRawOutput', 'format': 'Json'}"
configByte = configStr.encode()
val = 0
def __init__(self, _host, _port):
self.host = _host
self.port = _port
def connect(self):
# This is a standard connection for an Internet socket
# AF.INET is how you declare an internet socket
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to the Thinkgear Connector
client.connect((self.host, self.port))
# calling this client to implement elsewhere
client.send(self.configByte)
return client
def readData(self, _client):
# This loop just waits for messages via the socket
# while True:
# The connection could break for lots of reasons to wrapping this in a try / catch
try:
# When a message is received write it to the data var
# uses a buffer to transfer packets, buffer size is 2^10
data = _client.recv(1024)
data_json = json.loads(data)
eSenseData = data_json["eSense"]
attention = eSenseData["attention"]
self.val = attention
return self.val
# loop for
except Exception as e:
# print(str(e))
if str(e) == "'eSense'":
return self.val
else:
# print(e)
return self.val
# testing code
if __name__ == '__main__':
pywave = pyWave("localhost", 13854)
client = pywave.connect()
print("Waiting for data")
while True:
val = pywave.readData(client)
print(val)
| kittom/Mind-Control-Car | BrainWaveReader/pywave.py | pywave.py | py | 1,713 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "socket.socket",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "json.loads"... |
38008765446 | """
# Analysis utilities
This script belongs to the following manuscript:
- Mathôt, Berberyan, Büchel, Ruuskanen, Vilotjević, & Kruijne (in prep.)
*Causal effects of pupil size on visual ERPs*
This module contains various constants and functions that are used in the main
analysis scripts.
"""
import random
import sys
import multiprocessing as mp
import mne; mne.set_log_level(False)
import eeg_eyetracking_parser as eet
from eeg_eyetracking_parser import braindecode_utils as bdu, \
_eeg_preprocessing as epp
import numpy as np
import time_series_test as tst
from datamatrix import DataMatrix, convert as cnv, operations as ops, \
functional as fnc, SeriesColumn, io, MultiDimensionalColumn
from mne.time_frequency import tfr_morlet
import matplotlib as mpl
from matplotlib import pyplot as plt
from scipy.stats import mode
import logging; logging.basicConfig(level=logging.INFO, force=True)
FIXATION_TRIGGER = 1
CUE_TRIGGER = 2
INTERVAL_TRIGGER = 3
TARGET_TRIGGER = 4
RESPONSE_TRIGGER = 5
N_CHANNELS = 26
# Occipital
LEFT_OCCIPITAL = 'O1',
RIGHT_OCCIPITAL = 'O2',
MIDLINE_OCCIPITAL = 'Oz',
# Parietal
LEFT_PARIETAL = 'P3', 'P7', 'CP1'
RIGHT_PARIETAL = 'P4', 'P8', 'CP2'
MIDLINE_PARIETAL = 'Pz', 'POz'
# Central
LEFT_CENTRAL = 'T7', 'C3'
RIGHT_CENTRAL = 'T8', 'C4'
MIDLINE_CENTRAL = 'Cz',
# Frontal
LEFT_FRONTAL = 'FC1', 'F3', 'F7', 'FP1'
RIGHT_FRONTAL = 'FC2', 'F4', 'F8', 'FP2'
MIDLINE_FRONTAL = 'Fz', 'FPz'
# Only CP1 and CP2, which were the best channels
LEFT_CP = 'CP1',
RIGHT_CP = 'CP2',
MIDLINE_CP = tuple()
# Pz, POz, Oz
LEFT_OPM = 'Pz',
RIGHT_OPM = 'POz',
MIDLINE_OPM = 'Oz',
# Select a channel group for further processing. The main analyses focus on the
# the parietal group.
CHANNEL_GROUPS = 'parietal', 'occipital', 'frontal', 'central', 'CP', \
'occipital-parietal-midline'
# Allow the channel group to be specified on the command line
for arg in sys.argv:
if arg in CHANNEL_GROUPS:
CHANNEL_GROUP = arg
break
else:
CHANNEL_GROUP = 'parietal'
if CHANNEL_GROUP == 'parietal':
LEFT_CHANNELS = LEFT_PARIETAL
RIGHT_CHANNELS = RIGHT_PARIETAL
MIDLINE_CHANNELS = MIDLINE_PARIETAL
elif CHANNEL_GROUP == 'occipital':
LEFT_CHANNELS = LEFT_OCCIPITAL
RIGHT_CHANNELS = RIGHT_OCCIPITAL
MIDLINE_CHANNELS = MIDLINE_OCCIPITAL
elif CHANNEL_GROUP == 'frontal':
LEFT_CHANNELS = LEFT_FRONTAL
RIGHT_CHANNELS = RIGHT_FRONTAL
MIDLINE_CHANNELS = MIDLINE_FRONTAL
elif CHANNEL_GROUP == 'central':
LEFT_CHANNELS = LEFT_CENTRAL
RIGHT_CHANNELS = RIGHT_CENTRAL
MIDLINE_CHANNELS = MIDLINE_CENTRAL
elif CHANNEL_GROUP == 'CP':
LEFT_CHANNELS = LEFT_CP
RIGHT_CHANNELS = RIGHT_CP
MIDLINE_CHANNELS = MIDLINE_CP
elif CHANNEL_GROUP == 'occipital-parietal-midline':
LEFT_CHANNELS = LEFT_OPM
RIGHT_CHANNELS = RIGHT_OPM
MIDLINE_CHANNELS = MIDLINE_OPM
else:
raise ValueError(f'Invalid channel group: {CHANNEL_GROUP}')
ALL_CHANNELS = LEFT_CHANNELS + RIGHT_CHANNELS + MIDLINE_CHANNELS
FACTORS = ['inducer', 'bin_pupil', 'intensity', 'valid']
LABELS = ['00:blue:0:100:no',
'01:blue:0:100:yes',
'02:blue:0:255:no',
'03:blue:0:255:yes',
'04:blue:1:100:no',
'05:blue:1:100:yes',
'06:blue:1:255:no',
'07:blue:1:255:yes',
'08:red:0:100:no',
'09:red:0:100:yes',
'10:red:0:255:no',
'11:red:0:255:yes',
'12:red:1:100:no',
'13:red:1:100:yes',
'14:red:1:255:no',
'15:red:1:255:yes']
ALPHA = .05
N_CONDITIONS = 16 # 4 factors with 2 levels each
FULL_FREQS = np.arange(4, 30, 1)
NOTCH_FREQS = np.exp(np.linspace(np.log(4), np.log(30), 15))
DELTA_FREQS = np.arange(.5, 4, .5)
THETA_FREQS = np.arange(4, 8, .5)
ALPHA_FREQS = np.arange(8, 12.5, .5)
BETA_FREQS = np.arange(13, 30, .5)
PERTURB_TIMES = [(-.1, .47),
(.18, .74)]
SUBJECTS = list(range(1, 34))
SUBJECTS.remove(7) # technical error
SUBJECTS.remove(5) # negative inducer effect
SUBJECTS.remove(18) # negative inducer effect
DATA_FOLDER = 'data'
EPOCHS_KWARGS = dict(tmin=-.1, tmax=.75, picks='eeg',
preload=True, reject_by_annotation=False,
baseline=None)
# Plotting colors
RED = 'red'
BLUE = 'blue'
FACTOR_COLORS = {
'inducer': '#B71C1C',
'bin_pupil': '#4A148C',
'intensity': '#263238',
'valid': '#1B5E20'
}
# TFR plotting parameters
Y_FREQS = np.array([0, 4, 9, 25])
VMIN = -.2
VMAX = .2
CMAP = 'coolwarm'
# Plotting style
plt.style.use('default')
mpl.rcParams['font.family'] = 'Roboto Condensed'
# DATA_CHECKPOINT = 'checkpoints/18012023.dm'
DATA_CHECKPOINT = f'checkpoints/19072023-{CHANNEL_GROUP}.dm'
def read_subject(subject_nr):
"""A simple wrapper function that calls eet.read_subject() with the correct
parameters.
Parameters
----------
subject_nr: int
Returns
-------
tuple
A (raw, events, metadata) tuple
"""
return eet.read_subject(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
def get_tgt_epoch(raw, events, metadata, channels=None, tmin=-.1, tmax=.5,
baseline=(None, 0)):
"""A simple wrapper function that uses eet.autoreject_epochs() to get
an Epochs object around the target onset.
Parameters
----------
raw: Raw
events: tuple
metadata: DataFrame
channels: list or None, optional
A list of channel indices or None to select all channels
tmin: float, optional
tmax: float, optional
baseline: tuple, optional
Returns
-------
Epochs
"""
return eet.autoreject_epochs(
raw, eet.epoch_trigger(events, TARGET_TRIGGER), tmin=tmin, tmax=tmax,
metadata=metadata, picks=channels, baseline=baseline,
ar_kwargs=dict(n_jobs=8))
def get_fix_epoch(raw, events, metadata, channels=None):
"""A simple wrapper function that uses eet.autoreject_epochs() to get
an Epochs object around the fixation onset.
Parameters
----------
raw: Raw
events: tuple
metadata: DataFrame
channels: list or None, optional
A list of channel indices or None to select all channels
Returns
-------
Epochs
"""
return eet.autoreject_epochs(
raw, eet.epoch_trigger(events, FIXATION_TRIGGER), tmin=-.5, tmax=2.5,
metadata=metadata, picks=channels, ar_kwargs=dict(n_jobs=8))
def get_morlet(epochs, freqs, crop=(0, 2), decim=8, n_cycles=2):
"""A simple wrapper function that uses tfr_morlet() to extract
time-frequency data.
Parameters
----------
epochs: Epochs
freqs: array
An array of frequencies
crop: tuple, optional
A time window to crop after extracting the time-frequency data to
reduce edge artifacts.
decim: int, optional
Downsampling factor to reduce memory consumption
n_cycles: int, optional
The number of cycles of the morlet wavelet
Returns
-------
EpochsTFR
"""
morlet = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, n_jobs=-1,
return_itc=False, use_fft=True, average=False,
decim=decim,
picks=np.arange(len(epochs.info['ch_names'])))
morlet.crop(*crop)
return morlet
def z_by_freq(col):
"""Performs z-scoring across trials, channels, and time points but
separately for each frequency.
Parameters
----------
col: MultiDimensionalColumn
Returns
-------
MultiDimensionalColumn
"""
zcol = col[:]
for i in range(zcol.shape[2]):
zcol._seq[:, :, i] = (
(zcol._seq[:, :, i] - np.nanmean(zcol._seq[:, :, i]))
/ np.nanstd(zcol._seq[:, :, i])
)
return zcol
def subject_data(subject_nr):
"""Performs preprocessing for a single participant. This involves basic
EEG preprocessing and subsequent epoching and extraction of TFR data. The
result is a single DataMatrix that contains all information for final
analysis.
Parameters
----------
subject_nr: int
Returns
-------
DataMatrix
"""
print(f'Processing subject {subject_nr}')
raw, events, metadata = read_subject(subject_nr)
raw['PupilSize'] = area_to_mm(raw['PupilSize'][0])
dm = cnv.from_pandas(metadata)
print('- eeg')
tgt_epoch = get_tgt_epoch(raw, events, metadata)
dm.tgt_erp = cnv.from_mne_epochs(tgt_epoch)
tgt_tfr = get_morlet(
get_tgt_epoch(raw, events, metadata, baseline=None, tmax=1),
FULL_FREQS, crop=(0, .5), decim=4)
dm.tgt_tfr = cnv.from_mne_tfr(tgt_tfr)
dm.tgt_tfr = z_by_freq(dm.tgt_tfr)
fix_epoch = get_fix_epoch(raw, events, metadata)
fix_tfr = get_morlet(fix_epoch, FULL_FREQS)
dm.fix_erp = cnv.from_mne_epochs(fix_epoch)
dm.fix_tfr = cnv.from_mne_tfr(fix_tfr)
dm.fix_tfr = z_by_freq(dm.fix_tfr)
print('- pupils')
pupil_fix = eet.PupilEpochs(
raw, eet.epoch_trigger(events, FIXATION_TRIGGER), tmin=0, tmax=2,
metadata=metadata, baseline=None)
pupil_target = eet.PupilEpochs(
raw, eet.epoch_trigger(events, TARGET_TRIGGER), tmin=-.05, tmax=2,
metadata=metadata)
del raw
dm.pupil_fix = cnv.from_mne_epochs(pupil_fix, ch_avg=True)
dm.pupil_target = cnv.from_mne_epochs(pupil_target, ch_avg=True)
return dm
@fnc.memoize(persistent=True, key='merged-data')
def get_merged_data():
"""Merges data for all participants into a single DataMatrix. Uses
multiprocessing for performance.
Returns
-------
DataMatrix
"""
return fnc.stack_multiprocess(subject_data, SUBJECTS, processes=10)
def add_bin_pupil(raw, events, metadata):
"""Adds bin pupil to the metadata. This is a patch to allow decoding to
take bin pupil as a decoding factor into account.
Parameters
----------
raw: Raw
events: tuple
metadata: DataFrame
Returns
-------
tuple
A (raw, events, metadata) tuple where bin_pupil has been added as a
column to metadata.
"""
# This adds the bin_pupil pseudo-factor to the data. This requires that
# this has been generated already by `analyze.py`.
dm = io.readtxt('output/bin-pupil.csv')
dm = dm.subject_nr == metadata.subject_nr[0]
metadata.loc[16:, 'bin_pupil'] = dm.bin_pupil
dummy_factor = 192 * [0] + 192 * [1]
random.shuffle(dummy_factor)
metadata.loc[16:, 'dummy_factor'] = dummy_factor
return raw, events, metadata
def decode_subject(subject_nr):
"""A wrapper function around bdu.decode_subject() that performs overall
decoding for one subject.
Parameters
----------
subject_nr: int
Returns
-------
DataMatrix
See bdu.decode_subject()
"""
read_subject_kwargs = dict(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
return bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=FACTORS,
epochs_kwargs=EPOCHS_KWARGS, trigger=TARGET_TRIGGER, window_stride=1,
window_size=200, n_fold=4, epochs=4, patch_data_func=add_bin_pupil)
def crossdecode_subject(subject_nr, from_factor, to_factor):
"""A wrapper function around bdu.decode_subject() that performs
cross-decoding for one subject.
Parameters
----------
subject_nr: int
from_factor: str
The factor to train on
to_factor: str
The factor to test on
Returns
-------
DataMatrix
See bdu.decode_subject()
"""
read_subject_kwargs = dict(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
if 'bin_pupil' in (from_factor, to_factor):
return bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=from_factor,
crossdecode_factors=to_factor, epochs_kwargs=EPOCHS_KWARGS,
trigger=TARGET_TRIGGER, window_stride=1, window_size=200, n_fold=4,
epochs=4, patch_data_func=add_bin_pupil)
return bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=from_factor,
crossdecode_factors=to_factor, epochs_kwargs=EPOCHS_KWARGS,
trigger=TARGET_TRIGGER, window_stride=1, window_size=200, n_fold=4,
epochs=4)
@fnc.memoize(persistent=True)
def blocked_decode_subject(subject_nr, factor, query1, query2):
"""Decodes a factor for a single subject, using two different queries to
separate the training and testing data.
Parameters
----------
subject_nr: int
factor: str
query1: str
A pandas-style query to select the training set
query2: str
A pandas-style query to select the testing set
Returns
-------
float
Decoding accuracy
"""
read_subject_kwargs = dict(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
train_data, train_labels, train_metadata = bdu.read_decode_dataset(
read_subject_kwargs, factor, EPOCHS_KWARGS, TARGET_TRIGGER, query1)
test_data, test_labels, test_metadata = bdu.read_decode_dataset(
read_subject_kwargs, factor, EPOCHS_KWARGS, TARGET_TRIGGER, query2)
clf = bdu.train(train_data, test_data)
y_pred = clf.predict(test_data)
resized_pred = y_pred.copy()
resized_pred.resize(
(len(test_data.datasets), len(test_data.datasets[0])))
resized_pred = mode(resized_pred, axis=1)[0].flatten()
y_true = [d.y[0] for d in test_data.datasets]
return np.mean([p == t for p, t in zip(resized_pred, y_true)])
def statsplot(rm):
"""A simple wrapper function that plots statistical values as a function of
time and factors.
Parameters
----------
rm: DataMatrix
A DataMatrix with statistical results as returned by time_series_test
functions.
"""
rm = rm[:]
rm.sign = SeriesColumn(depth=rm.p.depth)
colors = ['red', 'green', 'blue', 'orange']
for y, row in enumerate(rm[1:]):
for linewidth, alpha in [(1, .05), (2, .01), (4, .005), (8, .001)]:
row.sign[row.p >= alpha] = np.nan
row.sign[row.p < alpha] = y
plt.plot(row.sign, '-', label=f'{row.effect}, p < {alpha}',
linewidth=linewidth, color=colors[y])
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
def select_ica(raw, events, metadata, exclude_component=0):
"""A helper function that excludes (rather than selects, as the name
suggests) an independent component from the signal.
Parameters
----------
raw: Raw
events: tuple
metadata: DataFrame
exclude_component: int, optional
The index of the excluded independent component
Returns
-------
tuple
A (raw, events, metadata) tuple where the independent component has
been excluded from the `raw` object.
"""
global weights_dict
raw, events, metadata = add_bin_pupil(raw, events, metadata)
print(f'running ica to exclude component {exclude_component}')
@fnc.memoize(persistent=True)
def run_ica(raw):
return epp.run_ica(raw)
# run_ica.clear()
raw.info['bads'] = []
ica = run_ica(raw)
print('applying ica')
ica.apply(raw, exclude=[exclude_component])
weights = np.dot(ica.mixing_matrix_[:, exclude_component].T,
ica.pca_components_[:ica.n_components_])
weights_dict = {ch_name: weight
for ch_name, weight in zip(ica.ch_names, weights)}
print(f'weights: {weights_dict} (len={len(weights_dict)})')
return raw, events, metadata
@fnc.memoize(persistent=True)
def ica_perturbation_decode(subject_nr, factor):
"""Performs the ICA perturbation analysis.
Parameters
----------
subject_nr: int
factor: str
Returns
-------
tuple
The first element of the tuple is a DataMatrix with the regular
decoding results. The second element is a dict with independent
component indices as keys and (dm, weights_dict) tuples as values.
Here, dm is the DataMatrix with the decoding results after excluding
the independent component, and weights_dict is a mapping with channel
names as keys and weights (loading of the channel of the independent
component) as values.
"""
read_subject_kwargs = dict(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
fdm = bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=factor,
epochs_kwargs=EPOCHS_KWARGS, trigger=TARGET_TRIGGER, window_stride=1,
window_size=200, n_fold=4, epochs=4, patch_data_func=add_bin_pupil)
print(f'full-data accuracy: {fdm.braindecode_correct.mean}')
perturbation_results = {}
for exclude_component in range(N_CHANNELS):
bdu.decode_subject.clear()
dm = bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=factor,
epochs_kwargs=EPOCHS_KWARGS, trigger=TARGET_TRIGGER,
window_stride=1, window_size=200, n_fold=4, epochs=4,
patch_data_func=lambda raw, events, metadata: select_ica(
raw, events, metadata, exclude_component))
perturbation_results[exclude_component] = dm, weights_dict
print(f'perturbation accuracy({exclude_component}): '
f'{dm.braindecode_correct.mean}')
return fdm, perturbation_results
def notch_filter(raw, events, metadata, freq):
"""A helper function that excludes a frequency from the signal using a
notch filter.
Parameters
----------
raw: Raw
events: tuple
metadata: DataFrame
freq: float
The frequency to remove.
Returns
-------
tuple
A (raw, events, metadata) tuple where the frequency has been removed
from the `raw` object using a notch filter.
"""
global weights_dict
raw, events, metadata = add_bin_pupil(raw, events, metadata)
width = np.exp(np.log(freq / 4))
print(f'notch-filtering frequency band: {freq:.2f} / {width:.2f}')
raw.notch_filter(freq, notch_widths=width, trans_bandwidth=width)
return raw, events, metadata
@fnc.memoize(persistent=True)
def freq_perturbation_decode(subject_nr, factor):
"""Performs the frequency perturbation analysis.
Parameters
----------
subject_nr: int
factor: str
Returns
-------
tuple
The first element of the tuple is a DataMatrix with the regular
decoding results. The second element is a dict with frequencies
as keys and the DataMatrix objects with the decoding results after
excluding the frequencies as values.
"""
read_subject_kwargs = dict(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
fdm = bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=factor,
epochs_kwargs=EPOCHS_KWARGS, trigger=TARGET_TRIGGER, window_stride=1,
window_size=200, n_fold=4, epochs=4, patch_data_func=add_bin_pupil)
print(f'full-data accuracy: {fdm.braindecode_correct.mean}')
perturbation_results = {}
for freq in NOTCH_FREQS:
bdu.decode_subject.clear()
dm = bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=factor,
epochs_kwargs=EPOCHS_KWARGS, trigger=TARGET_TRIGGER,
window_stride=1, window_size=200, n_fold=4, epochs=4,
patch_data_func=lambda raw, events, metadata: notch_filter(
raw, events, metadata, freq))
perturbation_results[freq] = dm
print(f'perturbation accuracy({freq}): {dm.braindecode_correct.mean}')
return fdm, perturbation_results
def area_to_mm(au):
"""Converts in arbitrary units to millimeters of diameter. This is specific
to the recording set-up.
Parameters
----------
au: float
Returns
-------
float
"""
return -0.9904 + 0.1275 * au ** .5
def pupil_plot(dm, dv='pupil_target', **kwargs):
"""A simple wrapper function that plots pupil size over time.
Parameters
----------
dm: DataMatrix
dv: str, optional
**kwargs: dict, optional
"""
tst.plot(dm, dv=dv, legend_kwargs={'loc': 'lower left'},
**kwargs)
x = np.linspace(12, 262, 6)
t = [f'{int(s)}' for s in np.linspace(0, 1000, 6)]
plt.xticks(x, t)
plt.xlabel('Time (ms)')
if dv == 'pupil_target':
plt.axhline(0, linestyle=':', color='black')
plt.ylim(-.6, .2)
else:
plt.ylim(2, 8)
plt.xlim(0, 250)
plt.ylabel('Baseline-corrected pupil size (mm)')
def erp_plot(dm, dv='lat_erp', ylim=None, **kwargs):
"""A simple wrapper function that plots ERPs.
Parameters
----------
dm: DataMatrix
dv: str, optional
ylim: float or None, optional
**kwargs: dict, optional
"""
tst.plot(dm, dv=dv, **kwargs)
plt.xticks(np.arange(25, 150, 25), np.arange(0, 500, 100))
plt.axvline(25, color='black', linestyle=':')
plt.axhline(0, color='black', linestyle=':')
plt.xlabel('Time (ms)')
if ylim:
plt.ylim(*ylim)
def tfr_plot(dm, dv):
"""A simple wrapper function that creates a multipanel TFR plot.
Parameters
----------
dm: DataMatrix
dv: str, optional
"""
plt.figure(figsize=(12, 4))
plt.subplots_adjust(wspace=0)
plt.subplot(141)
tfr_red = (dm.inducer == 'red')[dv][...]
tfr_blue = (dm.inducer == 'blue')[dv][...]
plt.title('a) Induced Pupil Size (Large - Small)')
plt.imshow(tfr_red - tfr_blue, aspect='auto', vmin=VMIN, vmax=VMAX,
cmap=CMAP, interpolation='bicubic')
plt.yticks(Y_FREQS, FULL_FREQS[Y_FREQS])
plt.xticks(np.arange(0, 31, 6.25), np.arange(0, 499, 100))
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.subplot(142)
tfr_large = (dm.bin_pupil == 1)[dv][...]
tfr_small = (dm.bin_pupil == 0)[dv][...]
plt.title('b) Spontaneous Pupil Size (Large - Small)')
plt.imshow(tfr_large - tfr_small, aspect='auto', vmin=VMIN, vmax=VMAX,
cmap=CMAP, interpolation='bicubic')
plt.gca().get_yaxis().set_visible(False)
plt.xticks(np.arange(0, 31, 6.25), np.arange(0, 499, 100))
plt.xlabel('Time (ms)')
plt.subplot(143)
tfr_bright = (dm.intensity == 255)[dv].mean
tfr_dim = (dm.intensity == 100)[dv].mean
plt.title('c) Stimulus Intensity (Bright - Dim)')
plt.imshow(tfr_bright - tfr_dim, aspect='auto', vmin=VMIN, vmax=VMAX,
cmap=CMAP, interpolation='bicubic')
plt.gca().get_yaxis().set_visible(False)
plt.xticks(np.arange(0, 31, 6.25), np.arange(0, 499, 100))
plt.xlabel('Time (ms)')
plt.subplot(144)
tfr_attended = (dm.valid == 'yes')[dv].mean
tfr_unattended = (dm.valid == 'no')[dv].mean
plt.title('d) Covert Visual Attention (Attended - Unattended)')
plt.imshow(tfr_attended - tfr_unattended, aspect='auto', vmin=VMIN,
vmax=VMAX, cmap=CMAP, interpolation='bicubic')
plt.gca().get_yaxis().set_visible(False)
plt.xticks(np.arange(0, 31, 6.25), np.arange(0, 499, 100))
plt.xlabel('Time (ms)')
| smathot/causal-pupil | analysis_utils.py | analysis_utils.py | py | 23,689 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "mne.set_log_level",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
... |
10140997594 | # -*- coding: utf-8 -*-
# @Time : 19-1-24 下午9:35
# @Author : ccs
import json
from django.http import HttpResponse
def calc(request):
a = request.GET['a']
b = request.GET['b']
c = request.GET['c']
print(a,b,c)
m = a+b+c
n = b+a
rets = {"m":m,"n":n}
retsj = json.dumps(rets)
return HttpResponse(retsj)
| ccs258/python_code | learn_api.py | learn_api.py | py | 348 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.dumps",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 18,
"usage_type": "call"
}
] |
32108433366 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from temba_client.v2 import TembaClient
from django.conf import settings
from django.db import migrations
from ureport.utils import datetime_to_json_date, json_date_to_datetime
logger = logging.getLogger(__name__)
class Migration(migrations.Migration):
def populate_poll_poll_date(apps, schema_editor):
Poll = apps.get_model("polls", "Poll")
Org = apps.get_model("orgs", "Org")
agent = getattr(settings, "SITE_API_USER_AGENT", None)
host = settings.SITE_API_HOST
for org in Org.objects.all():
temba_client = TembaClient(host, org.api_token, user_agent=agent)
api_flows = temba_client.get_flows()
flows_date = dict()
for flow in api_flows:
flows_date[flow.uuid] = datetime_to_json_date(flow.created_on)
for poll in Poll.objects.filter(org=org):
json_date = flows_date.get(poll.flow_uuid, None)
if json_date:
date = json_date_to_datetime(json_date)
else:
logger.info("using created_on for flow_date on poll with id %s" % poll.pk)
date = poll.created_on
poll.poll_date = date
poll.save()
dependencies = [("polls", "0022_poll_flow_date")]
operations = [migrations.RunPython(populate_poll_poll_date)]
| rapidpro/ureport | ureport/polls/migrations/0023_populate_flow_date.py | 0023_populate_flow_date.py | py | 1,499 | python | en | code | 23 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.db.migrations.Migration",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 16,
"usage_type": "name"
},
{
"api_na... |
34702779543 | from firebase_admin import firestore
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def get_user_skills(userid):
item = firestore.client().collection('user').document(userid).get().to_dict()['skills']
user_skill_string = ' '.join(str(e) for e in item)
return user_skill_string
def get_result(field, user_skill_string, keyword_string):
content = (user_skill_string, keyword_string)
cv = CountVectorizer()
matrix = cv.fit_transform(content)
singularity_matrix = cosine_similarity(matrix)
result = (singularity_matrix[1][0]*100)
result = str(result)
result = result.split('.', 1)[0]
# return 'You scored {}% in {} jobs.'.format(result, field)
return result
def check_result(field, result):
return 'You scored {}% in {} jobs.'.format(result, field) | prajwol-manandhar/resume-analysis-website | analysis.py | analysis.py | py | 869 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "firebase_admin.firestore.client",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "firebase_admin.firestore",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 14,
"usage_type":... |
26848770395 | import sys
from PySide6.QtWidgets import QApplication, QPushButton
from PySide6.QtCore import Slot
# 这个例子包含Signals and Slots(信号与槽机制)
# 使用@Slot()表明这是一个槽函数
# @Slot() 服了,没使用这个居然也能照常运行
def say_hello():
print("Button, clicked, hello!")
app = QApplication([])
# QPushButton里面的参数是按钮上会显示的文字
button = QPushButton("点我!")
button.clicked.connect(say_hello)
button.show()
app.exec()
| RamboKingder/PySide6 | button-2.py | button-2.py | py | 524 | python | zh | code | 2 | github-code | 6 | [
{
"api_name": "PySide6.QtWidgets.QApplication",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PySide6.QtWidgets.QPushButton",
"line_number": 16,
"usage_type": "call"
}
] |
3151231607 | import sys
import form
from PyQt4 import QtCore, QtGui
import letters
import pygame
class ConnectorToMainWindow(QtGui.QMainWindow):
def __init__(self, parent = None):
super(ConnectorToMainWindow, self).__init__()
self.expected_letter = ''
self.timer = QtCore.QTimer()
self.ui = form.Ui_MainWindow()
self.ui.setupUi(self)
self.my_letters = letters.Letters()
QtCore.QObject.connect(self.ui.restart_button, QtCore.SIGNAL(form._fromUtf8("clicked()")),
self.on_restartbutton_click)
QtCore.QObject.connect(self.ui.audio_button, QtCore.SIGNAL(form._fromUtf8("clicked()")),
self.on_audiobutton_click)
QtCore.QMetaObject.connectSlotsByName(self)
self.on_restartbutton_click()
def on_restartbutton_click(self) :
self.my_letters.re_init()
self.ui.answer_window.clear()
self.ui.img_display.clear()
self.generate_a_letter()
def on_audiobutton_click(self) :
self.play_audio("./audio/letters/{}.mp3".format(self.expected_letter))
def generate_a_letter(self) :
self.ui.img_display.clear()
letter = self.my_letters.pick_a_letter()
self.play_audio("./audio/letters/{}.mp3".format(letter))
self.expected_letter = letter
self.ui.question_window.setText('{0} {1}'.format(letter.upper(), letter))
self.ui.answer_window.setFocus()
def verify_result(self) :
if str(self.ui.answer_window.toPlainText()).lower() == self.expected_letter :
self.display_image(QtGui.QPixmap("./imgs/happy.png"))
self.ui.answer_window.clear()
QtCore.QTimer.singleShot(2000, self.generate_a_letter)
else :
self.display_image(QtGui.QPixmap("./imgs/try_again.png"))
self.ui.answer_window.clear()
self.retry()
def retry(self) :
self.ui.question_window.setText('{0} {1}'.format(self.expected_letter.upper(),
self.expected_letter))
self.on_audiobutton_click()
self.ui.answer_window.setFocus()
def display_image(self, img) :
self.ui.img_display.setPixmap(img)
self.ui.img_display.show()
def play_audio(self, path) :
pygame.mixer.init(frequency = 8000, channels = 1)
pygame.mixer.music.load(path)
pygame.mixer.music.play()
def keyPressEvent(self, e):
self.ui.img_display.clear()
if e.key() != QtCore.Qt.Key_Return and e.key() != QtCore.Qt.Key_Enter:
if e.key() != QtCore.Qt.Key_Backspace:
self.ui.answer_window.insertPlainText(str(e.text()).upper())
else:
mstr = str(self.ui.answer_window.toPlainText())[:-1]
self.ui.answer_window.clear()
self.ui.answer_window.insertPlainText(mstr)
else:
self.verify_result()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
myapp = ConnectorToMainWindow()
myapp.show()
sys.exit(app.exec_()) | Tal-Levy/homeSchooling | first_steps.py | first_steps.py | py | 3,110 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt4.QtGui.QMainWindow",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtCore.QTimer",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtCo... |
9721588822 | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
ICONSIZE = Gtk.IconSize.SMALL_TOOLBAR
class controlBar(Gtk.HeaderBar):
def __init__(self):
Gtk.HeaderBar.__init__(self)
self.set_show_close_button(True)
self.props.title = "PyFlowChart"
self.info_box = Gtk.Box(spacing=10,orientation=Gtk.Orientation.HORIZONTAL)
self.file_button = Gtk.Button.new_from_icon_name("open-menu-symbolic", ICONSIZE)
self.settings_button = Gtk.Button.new_from_icon_name("preferences-system", ICONSIZE)
self.help_button = Gtk.Button.new_from_icon_name("help-about", ICONSIZE)
self.main_menu = self.init_main_menu()
self.settings_menu = self.init_settings_menu()
self.help_menu = self.init_help_menu()
#self.init_edit_menu()
self.file_button.connect('clicked', self.file_clicked)
self.settings_button.connect('clicked', self.settings_clicked)
self.help_button.connect('clicked', self.help_clicked)
self.info_box.add(self.settings_button)
self.info_box.add(self.help_button)
self.pack_start(self.file_button)
self.pack_end(self.info_box)
self.buttons = []
# self.populate_buttons()
self.show_all()
def init_main_menu(self):
main_menu = Gtk.Menu()
self.new_button = Gtk.MenuItem.new_with_label('New')
self.open_button = Gtk.MenuItem.new_with_label('Open')
self.open_stock_button = Gtk.MenuItem.new_with_label('Import Stock')
view_button = Gtk.MenuItem.new_with_label('View')
self.view_menu = Gtk.Menu()
self.viewer_button = Gtk.MenuItem.new_with_label('Viewer')
self.builder_button = Gtk.MenuItem.new_with_label('Builder')
self.view_menu.append(self.viewer_button)
self.view_menu.append(self.builder_button)
self.view_menu.show_all()
view_button.set_submenu(self.view_menu)
self.save_button = Gtk.MenuItem.new_with_label('Save')
self.save_as_button = Gtk.MenuItem.new_with_label('Save As...')
self.quit_button = Gtk.MenuItem.new_with_label('Quit')
main_menu.append(self.new_button)
main_menu.append(self.open_button)
main_menu.append(self.open_stock_button)
main_menu.append(Gtk.SeparatorMenuItem())
main_menu.append(view_button)
main_menu.append(Gtk.SeparatorMenuItem())
main_menu.append(self.save_button)
main_menu.append(self.save_as_button)
main_menu.append(Gtk.SeparatorMenuItem())
main_menu.append(self.quit_button)
main_menu.show_all()
return main_menu
def init_settings_menu(self):
settings_menu = Gtk.Menu()
self.preferences_button = Gtk.MenuItem.new_with_label('Preferences')
settings_menu.append(self.preferences_button)
settings_menu.show_all()
return settings_menu
def init_help_menu(self):
help_menu = Gtk.Menu()
self.app_help_button = Gtk.MenuItem.new_with_label('Help')
self.about_button = Gtk.MenuItem.new_with_label('About')
help_menu.append(self.about_button)
help_menu.append(self.app_help_button)
help_menu.show_all()
return help_menu
def file_clicked(self, widget):
self.main_menu.popup( None, None, None, None, 0, Gtk.get_current_event_time())
def settings_clicked(self, widget):
self.settings_menu.popup( None, None, None, None, 0, Gtk.get_current_event_time())
def help_clicked(self, widget):
self.help_menu.popup( None, None, None, None, 0, Gtk.get_current_event_time())
| steelcowboy/PyFlowChart | pyflowchart/interface/control_bar.py | control_bar.py | py | 3,719 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "gi.require_version",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.IconSize",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "gi.... |
764504946 | from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster import DBSCAN, KMeans, SpectralClustering
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelBinarizer
class ClusterTransformer(TransformerMixin, BaseEstimator):
"""Turns sklearn clustering algorithm into a transformer component that
you can use in a pipeline.
If the clustering method cannot be used for prediction
(aka. it does not have a predict method), then a nearest neighbour
vot will be used to infer cluster labels for unseen samples.
Parameters
----------
model: ClusterMixin
Sklearn clustering model.
n_neighbors: int
Number of neighbours to use for inference.
metric: str
Metric to use for determining nearest neighbours.
Attributes
----------
labeler: LabelBinarizer
Component that turns cluster labels into one-hot embeddings.
neighbors: KNeighborsClassifier
Classifier to use for out of sample prediction.
"""
def __init__(
self, model: ClusterMixin, n_neighbors: int = 5, metric: str = "cosine"
):
self.model = model
self.labeler = LabelBinarizer()
self.neighbors = KNeighborsClassifier(
n_neighbors=n_neighbors, metric=metric
)
def fit(self, X, y=None):
"""Fits the clustering algorithm and label binarizer.
Parameters
----------
X: ndarray of shape (n_observations, n_features)
Observations to cluster.
y: None
Ignored, exists for compatiblity.
Returns
-------
self
"""
labels = self.model.fit_predict(X)
if not hasattr(self.model, "predict"):
self.neighbors.fit(X, labels)
self.labeler.fit(labels)
return self
def transform(self, X):
"""Infers cluster labels for given data points.
Parameters
----------
X: ndarray of shape (n_observations, n_features)
Observations to cluster.
Returns
-------
ndarray of shape (n_observations, n_clusters)
One-hot encoding of cluster labels.
"""
if hasattr(self.model, "predict"):
labels = self.model.predict(X)
else:
labels = self.neighbors.predict(X)
return self.labeler.transform(labels)
def get_feature_names_out(self):
"""Returns the cluster classes for each dimension.
Returns
-------
ndarray of shape (n_clusters)
Cluster names.
"""
return self.labeler.classes_
def DBSCANTransformer(
eps: float = 0.5, min_samples: int = 5, metric: str = "cosine"
) -> ClusterTransformer:
"""Convenience function for creating a DBSCAN transformer.
Parameters
----------
eps : float, default 0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, default 5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : str, default 'cosine'
The metric to use when calculating distance between instances in a
feature array.
Returns
-------
ClusterTransformer
Sklearn transformer component that wraps DBSCAN.
"""
model = DBSCAN(eps=eps, min_samples=min_samples, metric=metric)
return ClusterTransformer(model, metric="cosine")
def KMeansTransformer(n_clusters: int) -> ClusterTransformer:
"""Convenience function for creating a KMeans transformer.
Parameters
----------
n_clusters: int
Number of clusters.
Returns
-------
ClusterTransformer
Sklearn transformer component that wraps KMeans.
"""
model = KMeans(n_clusters=n_clusters)
return ClusterTransformer(model, metric="cosine")
def SpectralClusteringTransformer(n_clusters: int) -> ClusterTransformer:
"""Convenience function for creating a Spectral Clustering transformer.
Parameters
----------
n_clusters: int
Number of clusters.
Returns
-------
ClusterTransformer
Sklearn transformer component that wraps SpectralClustering.
"""
model = SpectralClustering(n_clusters=n_clusters)
return ClusterTransformer(model, metric="cosine")
| x-tabdeveloping/blackbert | blackbert/cluster.py | cluster.py | py | 4,673 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.base.TransformerMixin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sklearn.base.BaseEstimator",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sklearn.base.ClusterMixin",
"line_number": 32,
"usage_type": "name"
},
{
"... |
21610135351 | import requests
import re
import json
from nonebot import on_command, CommandSession
@on_command('lol新闻', aliases=('lol新闻'))
async def weather(session: CommandSession):
url = "http://l.zhangyoubao.com/news/"
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.6) Gecko/20040206 Firefox/0.8'
}
reponse = requests.get(url, headers=headers)
reponse.encoding = "utf-8"
html = reponse.text
if reponse.status_code == 200:
pattern = re.compile('<h2><a class="omit" target="_blank" href="(.*)" title=".*">(.*)</a></h2>')
#html = reponse.text
items = re.findall(pattern, html)
#print(items)
LMG = [];
for item in items:
Lmg = item[1].strip()
Lmg1 = item[0]
Lmg2 = Lmg + " " + Lmg1
LMG.append(Lmg2)
Lmg4 = LMG[0:5]
Lmg3 = '\n'.join(Lmg4)
#print(Lmg3)
await session.send(Lmg3) | Lmg66/QQrobot | awesome-bot/awesome/plugins/lol.py | lol.py | py | 980 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "nonebot.CommandSession",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_... |
9766823930 | from collections import *
import numpy as np
from common.session import AdventSession
session = AdventSession(day=20, year=2017)
data = session.data.strip()
data = data.split('\n')
p1, p2 = 0, 0
class Particle:
def __init__(self, p, v, a, _id):
self.p = np.array(p)
self.v = np.array(v)
self.a = np.array(a)
self.p_sum = np.abs(p).sum()
self.v_sum = np.abs(v).sum()
self.a_sum = np.abs(a).sum()
self.id = _id
def update(self):
self.v += self.a
self.p += self.v
particles = []
for i, line in enumerate(data):
line = line.replace('<', '(').replace('>', ')')
particles.append(eval(f'Particle({line}, _id={i})'))
sorted_particles = sorted(particles, key=lambda p: (
p.a_sum, p.v_sum, p.p_sum))
p1 = sorted_particles[0].id
for _ in range(1000):
positions = defaultdict(list)
for p in particles:
positions[tuple(p.p)].append(p)
for parts in positions.values():
if len(parts) > 1:
for part in parts:
particles.remove(part)
for p in particles:
p.update()
p2 = len(particles)
session.submit(p1, part=1)
session.submit(p2, part=2)
| smartspot2/advent-of-code | 2017/day20.py | day20.py | py | 1,196 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "common.session.AdventSession",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
36697678270 | #Coded by: QyFashae
import os
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import HashingVectorizer, TfidfTransformer
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn import tree
# Paths to the directories containing the emails
s_ep = os.path.join("training_data_for_ml_emails", "training_data_ML00")
v_ep = os.path.join("training_data_for_ml_emails", "training_data_ML01")
# List of file directories and corresponding labels
l_fd = [(s_ep, 0), (v_ep, 1)]
e_cs = []
l_bs = []
# Function to read email content and labels
def cve_sh_trfm(s_ep, v_ep, l_fd, e_cs, l_bs):
for cfs, lbs in l_fd:
files = os.listdir(cfs)
for file in files:
file_path = os.path.join(cfs, file)
try:
with open(file_path, "r") as current_file:
eml_ctt = current_file.read().replace("\n", "")
eml_ctt = str(eml_ctt)
e_cs.append(eml_ctt)
l_bs.append(lbs)
except:
pass
# Splitting the dataset and training the model
def vary_trmd(cve_sh_trfm, s_ep, v_ep, l_fd, e_cs, l_bs):
x_train, x_test, y_train, y_test = train_test_split(
e_cs, l_bs, test_size=0.4, random_state=17
)
nlp_followed_by_dt = Pipeline(
[
("vect", HashingVectorizer(input="eml_ctt", ngram_range=(1, 4))),
("tfidf", TfidfTransformer(use_idf=True)),
("dt", tree.DecisionTreeClassifier(class_weight="balanced")),
]
)
nlp_followed_by_dt.fit(x_train, y_train)
# Predicting and evaluating the model
y_test_predict = nlp_followed_by_dt.predict(x_test)
accuracy = accuracy_score(y_test, y_test_predict)
confusion = confusion_matrix(y_test, y_test_predict)
# Writing accuracy and confusion matrix to files
with open("accuracy_score.txt", "w") as acc_file:
acc_file.write("Accuracy: " + str(accuracy))
with open("confusion_matrix.txt", "w") as conf_file:
conf_file.write("Confusion Matrix:\n" + str(confusion))
| Qyfashae/ML_IDS_EmailSec_Spam | smtp_assasin.py | smtp_assasin.py | py | 2,150 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1... |
40941005277 | # github üzerinden yapılan arama sonuçlarını consola yazdırma
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome()
url = "https://github.com"
driver.get(url)
searchInput = driver.find_element_by_name("q")
time.sleep(1)
print("\n" + driver.title + "\n")
searchInput.send_keys("python")
time.sleep(1)
searchInput.send_keys(Keys.ENTER)
time.sleep(2)
result = driver.find_elements_by_css_selector(".repo-list-item div div a")
for element in result:
print(element.text)
time.sleep(2)
driver.close()
| furkan-A/Python-WS | navigate.py | navigate.py | py | 588 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
36659620035 |
# finding the right modules/packages to use is not easy
import os
import pyodbc
from numpy import genfromtxt
import pandas as pd
import sqlalchemy as sa # use sqlalchemy for truncating etc
from sqlalchemy import Column, Integer, Float, Date, String, BigInteger
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.dialects import mssql
import urllib
import csv
import fnmatch2
from time import time
## DATABASE STUFF
# server + instance = DESKTOP-A6J6D7Q\LEROYB_INSTANCE
# database = Tracks
server_name='DESKTOP-A6J6D7Q\LEROYB_INSTANCE'
db_name='Tracks'
path = 'F:\export_6642035' # location of all the files related to STRAVA export
pathArchive = 'F:\export_6642035_archive' # this is the archive folder that will contain all the processed files
searchstring = '*' # we could limit the files to a particular string pattern, however in this case we want to go through all the files
# https://docs.sqlalchemy.org/en/13/dialects/mssql.html#connecting-to-pyodbc
# https://docs.sqlalchemy.org/en/13/core/connections.html
# https://www.pythonsheets.com/notes/python-sqlalchemy.html
# https://auth0.com/blog/sqlalchemy-orm-tutorial-for-python-developers/
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.truncate.html
params = urllib.parse.quote_plus('Driver={SQL Server};'
'Server='+server_name +';'
'Database='+db_name+';'
'Trusted_Connection=yes;')
engine = sa.create_engine("mssql+pyodbc:///?odbc_connect=%s" % params,fast_executemany=True, echo=True)
# https://chrisalbon.com/python/data_wrangling/pandas_dataframe_importing_csv/
def Load_Data(file_name):
#data = genfromtxt(file_name, delimiter=',', skip_header=1, converters={0: lambda s: str(s)})
data = pd.read_csv(file_name, skiprows=0)
return data.values.tolist()
Base = declarative_base()
BaseGPX = declarative_base()
class Activities(Base):
#Tell SQLAlchemy what the table name is and if there's any table-specific arguments it should know about
__tablename__ = 'Activities'
__table_args__ = {'schema':'STG'}
#tell SQLAlchemy the name of column and its attributes:
Activity_ID = Column(BigInteger, primary_key=True, nullable=False) #
Activity_Date = Column(Date)
Activity_Name = Column(String)
Activity_Type = Column(String)
Activity_Description = Column(String)
Elapsed_Time = Column(Integer)
Distance = Column(Float)
Commute = Column(String)
Activity_Gear = Column(String)
Filename = Column(String)
class GPXFiles(BaseGPX):
#Tell SQLAlchemy what the table name is and if there's any table-specific arguments it should know about
__tablename__ = 'GPXFiles'
__table_args__ = {'schema':'STG'}
#tell SQLAlchemy the name of column and its attributes:
GPXFile_ID = Column(BigInteger, primary_key=True, autoincrement=True, nullable=False) #
GPXFile_Name = Column(String, nullable=True)
GPXFile_Contents = Column(String, nullable=True)
#GPXFile_XMLContents = Column(String, nullable=True)
#GPXFile_Route_ID = Column(BigInteger, nullable=True)
# loop through files
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if fnmatch2.fnmatch2(file, searchstring):
if file.endswith('.gpx'):
print('gpxfile')
# go straight into the STG.GPXFiles table
#Create the session
#GPXFiles.__table__.drop(bind=engine)
GPXFiles.__table__.create(bind=engine, checkfirst=True) # if the table exists, it is dropped (not using checkfirts=true)
session = sessionmaker(bind=engine)
#print(session)
session.configure(bind=engine)
s = session()
try:
data = Load_Data(os.path.join(r, file))
#print(data)
#for i in data:
#print(str(i[0])+'-'+str(i[1]))
record = GPXFiles(**{
#'GPXFile_ID' : i[0],
'GPXFile_Name' : file, #str(i[1]),
'GPXFile_Contents' : data
})
s.add(record) #Add all the records
s.commit() #Attempt to commit all the records
except Exception as e:
print(e)
s.rollback() #Rollback the changes on error
s.close() #Close the connection
continue
elif file.endswith('.gpx.gz'):
# print('a GZ gpxfile')
# unzip file first, then get the file into the STG.GPXFiles table
continue
elif file.endswith('.csv'):
# the only file we really need is the activities.csv file
# the columns are
if file == 'activities.csv':
# import it
### using pure panda
# insert CSV into the table
# https://stackoverflow.com/questions/31394998/using-sqlalchemy-to-load-csv-file-into-a-database
#Create the session
#Activities.__table__.drop(bind=engine)
Activities.__table__.create(bind=engine, checkfirst=True) # if the table exists, it is dropped (not using checkfirts=true)
session = sessionmaker(bind=engine)
print(session)
session.configure(bind=engine)
s = session()
try:
data = Load_Data(os.path.join(r, file))
#print(data)
for i in data:
#print(str(i[0])+'-'+str(i[1]))
record = Activities(**{
'Activity_ID' : i[0],
'Activity_Date' : i[1].replace(',', ''),
'Activity_Name' : str(i[2]),
'Activity_Type' : str(i[3]),
'Activity_Description' : str(i[4]),
'Elapsed_Time' : int(i[5]),
'Distance' : float(i[6].replace(',', '')),
'Commute' : str(i[7]),
'Activity_Gear' : str(i[8]),
'Filename' : str(i[9])
})
s.add(record) #Add all the records
s.commit() #Attempt to commit all the records
except Exception as e:
print(e)
s.rollback() #Rollback the changes on error
s.close() #Close the connection
else:
continue
# move file to archive folder at same level as source folder
# first check folder exist, if not, then create it
continue
#conn.close
# now that this is done, we need to run the stored procedures to get the data from the staging tbales into the production conformed tables
# first to brng the data from staging into conformed area
# then run a bunch of procs to update some of th emissing attributes
# don't forget to clean up in some way
| BertrandLeroy/GPXReader | ProcessStravaGPX.py | ProcessStravaGPX.py | py | 7,471 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.parse.quote_plus",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pa... |
40615421063 | import os
import time
from collections import Counter
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import core.leading_tree as lt
import core.lmca as lm
from core.delala_select import DeLaLA_select
from utils import common
def load_parameters(param_path):
dataset = common.load_csv(param_path)
dc = dataset[:, 0].astype(float)
lt_num = dataset[:, 1].astype(int)
length_scale = dataset[:, 2].astype(float)
return dc, lt_num, length_scale
def removeMinorData(X, Labels, k):
"""
Remove the points and labels that have no enough neighbors
:param X:
:param Labels:
:param k:
:return:
"""
setLabels = set(Labels)
classNum = len(setLabels)
cntArray = np.zeros(classNum, dtype=int)
i = 0
rmInds = np.zeros(0, dtype=int)
for l in setLabels:
clCnt = list(Labels).count(l)
cntArray[i] = clCnt
i += 1
if clCnt <= k:
lbInds = [i for i in range(len(Labels)) if Labels[i] == l]
rmInds = np.append(rmInds, lbInds)
# print("remove class: ", l)
if len(rmInds) > 0:
Remove_index = X[rmInds]
X = np.delete(X, rmInds, axis=0)
Labels = np.delete(Labels, rmInds)
return X, Labels, Remove_index
else:
return X, Labels, -1
order = 0
param_path = os.path.join("data", "parameters.csv")
dc_all, lt_num_all, length_scale_all = load_parameters(param_path)
SelectedInds_all = np.zeros(0, dtype=int)
def PredictLabel(train_AL, label_num, layer, dimension):
global order, SelectedInds_all
if label_num == 0:
pass
elif label_num == 1: # predict that this subtree sample has the same label as its root node
y_test = np.delete(y[train_AL], 0) # test set labels
y_predict = np.zeros(len(y_test), dtype=int) + y[train_AL[0]]
y_predict_all[train_AL] = y[train_AL[0]]
SelectedInds_all = np.append(SelectedInds_all, train_AL[0])
return 0
elif 2 <= label_num <= 3 or layer == 3: # Prediction with LMCA
D_A = D[train_AL]
D_A = D_A[:, train_AL]
LT = lt.LeadingTree(X_train=X[train_AL], dc=dc_all[order], lt_num=lt_num_all[order], D=D_A)
LT.fit()
LTgammaPara = LT.density * LT.delta
selectedInds = DeLaLA_select(LTgammaPara, LT.density, LT.layer, y[train_AL], 2, label_num * 2, 0.5)
selectedInds_universe = train_AL[selectedInds]
X_train = X[selectedInds_universe]
y_train = y[selectedInds_universe]
lmca = lm.LMCA(dimension=dimension, init_method="kpca", verbose=True, max_iter=100, stepsize=1.E-2,
nn_active=False, length_scale=length_scale_all[order], k=1)
lmca.fit(X_train, y_train)
order += 1
X_test = np.delete(X[train_AL], selectedInds, axis=0) # After removing the training set, the test set samples are obtained
y_test = np.delete(y[train_AL], selectedInds, axis=0) # test set labels
y_predict = np.zeros(len(y_test), dtype=int) - 1 # predict labels
MatDist = common.euclidian_dist_square(X_train, X_test) # The kernel matrix corresponding to the training set and the test set
test_bnd_K = np.exp(-1 * lmca.length_scale * MatDist).T
B = test_bnd_K.dot(lmca.Omega) # Test set after dimensionality reduction
A = lmca.K.dot(lmca.Omega) # The training set after dimensionality reduction
# Find the training sample with the closest Euclidean distance for each test sample, and predict that both have the same label.
D_temp = common.euclidian_dist(B, A)
Pa = np.zeros(len(y_predict), dtype=int) # Pa[i] represents the index of the training sample closest to the test sample i
for j in range(len(y_predict)):
index1 = np.argmin(D_temp[j])
Pa[j] = index1
y_predict = y_train[Pa]
index_predict = np.delete(train_AL, selectedInds, axis=0)
y_predict_all[index_predict] = y_predict
SelectedInds_all = np.append(SelectedInds_all, selectedInds_universe)
return 0
else:
return 1
if __name__ == "__main__":
dataset_path = os.path.join("data", "letter.csv")
X, y = common.load_data(dataset_path, label_index=0, map_label=False)
t1 = time.time()
scalar = MinMaxScaler()
X = scalar.fit_transform(X)
D = common.euclidian_dist(X, X)
remove_index_all = np.zeros(0, dtype=int)
y_predict_all = np.zeros(len(y), dtype=int) - 1
dc_lt_num_arr = [[0.12, 45, 1, 2], [0.19, 10, 2, 2], [0.12, 5, 3, 8]]
def recursive_partitioning(_X, _D, _layer=0, _train_AL=None):
global remove_index_all
lt1 = lt.LeadingTree(X_train=_X, dc=dc_lt_num_arr[_layer][0],
lt_num=dc_lt_num_arr[_layer][1], D=_D) # Constructing the lead tree for the entire dataset
lt1.fit()
for i in range(dc_lt_num_arr[_layer][1]):
if _layer == 0:
_train_AL2, _y_AL, _remove_index = removeMinorData(lt1.AL[i], y[lt1.AL[i]], 2)
else:
_train_AL2, _y_AL, _remove_index = removeMinorData(_train_AL[lt1.AL[i]], y[_train_AL[lt1.AL[i]]], 2)
_label_num = len(np.unique(y[_train_AL2]))
remove_index_all = np.append(remove_index_all, _remove_index)
_a = PredictLabel(_train_AL2, _label_num, dc_lt_num_arr[_layer][2], dc_lt_num_arr[_layer][3])
if _a == 1:
print(f"The {_layer}th layer case3: The number of subtree {i} categories is {_label_num}, which needs to be divided again.")
_D_2 = D[_train_AL2]
_D_2 = _D_2[:, _train_AL2]
recursive_partitioning(X[_train_AL2], _D_2, _layer + 1, _train_AL=_train_AL2)
return lt1
lt0 = recursive_partitioning(X, D)
index_None = np.zeros(0, dtype=int) # Returns -1 if there are no indexes that need to be removed, here they are to be removed
for i in range(len(remove_index_all)):
if remove_index_all[i] == -1:
index_None = np.append(index_None, i)
remove_index_all = np.delete(remove_index_all, index_None)
y_remove_Select = np.delete(y_predict_all, np.append(SelectedInds_all, remove_index_all))
D = common.euclidian_dist(X[remove_index_all], X[SelectedInds_all])
Pa = np.zeros(len(remove_index_all), dtype=int)
for i in range(len(remove_index_all)):
index1 = np.argmin(D[i])
Pa[i] = index1
y_predict_all[remove_index_all] = y[SelectedInds_all][Pa]
arr1 = y[remove_index_all] - y_predict_all[remove_index_all]
count0 = Counter(arr1)[0]
print(f'The accuracy of the remove sample prediction is {count0 / len(arr1)}, {count0}/{len(arr1)}')
# Subtree accuracy
for i in range(45):
temp = np.setdiff1d(lt0.AL[i], SelectedInds_all)
arr2 = y[temp] - y_predict_all[temp]
count1 = Counter(arr2)[0]
print(f'The accuracy of subtree {i} is {count1 / len(temp)}, {count1}/{len(temp)}')
# Overall accuracy
y_predict_all = np.delete(y_predict_all, SelectedInds_all)
y_test_all = np.delete(y, SelectedInds_all)
arr = y_test_all - y_predict_all
count = Counter(arr)[0]
t2 = time.time()
print(f'A total of {len(SelectedInds_all)} points are selected, with an accuracy of {count / len(y_test_all)}, {count}/{len(y_test_all)}')
print(f'Takes {t2 - t1} seconds')
| alanxuji/DeLaLA | DeLaLA/DeLaLA-Letter.py | DeLaLA-Letter.py | py | 7,574 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "utils.common.load_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "utils.common",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"lin... |
11275131068 |
from django.contrib import admin
from django.urls import path, include
from basesite import views
urlpatterns = [
path('', views.index, name='index'),
path('academics', views.academics, name='academics'),
path('labs', views.labs, name='labs'),
path('committee', views.committee, name='committee'),
path('gallery', views.gallery, name='gallery'),
path('hostel', views.hostel, name='hostel'),
path('placements', views.placements, name='placements'),
path('alumni', views.alumni, name='alumni'),
path('library', views.library, name='library'),
path('about', views.about, name='about'),
path('contact', views.contact, name='contact'),
path('coursempe', views.coursempe, name='coursempe'),
path('coursemae', views.coursemae, name='coursemae'),
path('coursecse', views.coursecse, name='coursecse'),
path('insert', views.insert, name="insert")
]
| Mr-vabs/GPA | basesite/urls.py | urls.py | py | 923 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "basesite.views.index",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "basesite.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.pa... |
11169933239 | # from django.shortcuts import render
from rest_framework import generics
from review_app.models import FarmersMarket, Vendor
from fm_api.serializers import FarmersMarketSerializer, VendorSerializer
# Create your views here.
class FarmersMarketListAPIView(generics.ListAPIView):
queryset = FarmersMarket.objects.all()
serializer_class = FarmersMarketSerializer
class FarmersMarketRetrieveAPIView(generics.RetrieveAPIView):
queryset = FarmersMarket.objects.all()
serializer_class = FarmersMarketSerializer
class VendorListAPIView(generics.ListAPIView):
queryset = Vendor.objects.all()
serializer_class = VendorSerializer
class VendorRetrieveAPIView(generics.RetrieveAPIView):
queryset = Vendor.objects.all()
serializer_class = VendorSerializer
| dhcrain/FatHen | fm_api/views.py | views.py | py | 783 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "review_app.models.FarmersMarket.objects.all",
"line_number": 9,
"usage_typ... |
8179016390 | import json
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def hello(event, context):
logger.info(f"AWS Lambda processing message from GitHub: {event}.")
body = {
"message": "Your function executed successfully!",
"input": event
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
| Qif-Equinor/serverless-edc2021 | aws-demo/handler.py | handler.py | py | 396 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 19,
"usage_type": "call"
}
] |
1868384059 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
# Create your models here.
class Country(models.Model):
name = models.CharField(_("Name"), db_column='name', max_length = 150, null=True, blank=True)
code2 = models.CharField(_("Code2"), db_column='code2', max_length = 2, unique = True)
code3 = models.CharField(_("Code3"), db_column='code3', max_length = 3, unique = True)
number = models.CharField(_("Number"), db_column='number', max_length = 3, unique = True)
class Document(models.Model):
image = models.ImageField(_("Document image"), upload_to=settings.UPLOAD_TO, null=True, blank=True)
| amlluch/vectorai | vectorai/restapi/models.py | models.py | py | 696 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": ... |
70728232508 | import frida
import sys
package_name = "com.jni.anto.kalip"
def get_messages_from_js(message, data):
print(message)
print (message['payload'])
def instrument_debugger_checks():
hook_code = """
setTimeout(function(){
Dalvik.perform(function () {
var TM = Dalvik.use("android.os.Debug");
TM.isDebuggerConnected.implementation = function () {
send("Called - isDebuggerConnected()");
return false;
};
});
},0);
"""
return hook_code
process = frida.get_device_manager().enumerate_devices()[-1].attach(package_name)
script = process.create_script(instrument_debugger_checks())
script.on('message',get_messages_from_js)
script.load()
sys.stdin.read()
| antojoseph/frida-android-hooks | debugger.py | debugger.py | py | 800 | python | en | code | 371 | github-code | 6 | [
{
"api_name": "frida.get_device_manager",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sys.stdin.read",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 38,
"usage_type": "attribute"
}
] |
36035072095 | import tweepy
import pandas as pd
import re
import time
from textblob import TextBlob
from sqlalchemy import create_engine
import yaml
import json
TWITTER_CONFIG_FILE = '../auth.yaml'
with open(TWITTER_CONFIG_FILE, 'r') as config_file:
config = yaml.load(config_file)
consumer_key = config['twitter']['consumer_key']
consumer_secret = config['twitter']['consumer_secret']
access_token = config['twitter']['access_token']
access_token_secret = config['twitter']['access_token_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# get geo for top 50 US cities by population
json_data=open('cities_trunc.json').read()
data = json.loads(json_data)
# define twitter API calls by geo coordinates to a 10mi radius
def get_tweet_payload(d):
return {d['city']: '%s,%s,%s' % (d['latitude'], d['longitude'],'10mi')}
geo = {}
i = 0
while i < len(data):
geo.update(get_tweet_payload(data[i]))
i += 1
# manually setting queries against set of popular competitive games - leverage twitch_top.py to keep this dynamic
api = tweepy.API(auth, wait_on_rate_limit=True)
query = ['fortnite', 'overwatch', 'starcraft', 'dota', 'league of legends', 'CSGO', 'hearthstone' ,'pubg', 'tekken', 'ssbm']
d = []
# Since we are using Standard APIs we are limited in data volume, adding additional geo regions and/or queries can result in long run-time
for game in query:
for city,coords in geo.items():
public_tweets = [status for status in tweepy.Cursor(api.search,q=game, geocode=coords, count=100).items(1000)]
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
TweetText = re.sub('[^A-Za-z0-9]+', ' ', tweet.text)
polarity = analysis.sentiment.polarity
subjectivity = analysis.sentiment.subjectivity
d.append((TweetText,
polarity,
subjectivity,
game,
city))
# use Pandas to format analyzed tweets into CSV file for appending to a database
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = timestr + "_tweets.csv"
cols=['Tweet','polarity','subjectivity','game','city']
df = pd.DataFrame(d, columns=cols)
df = df[['Tweet','polarity','subjectivity','game','city']]
df.drop_duplicates(['Tweet'], keep='last')
df.to_csv(filename, encoding='utf-8-sig') | dgletts/project-spirit-bomb | tweets.py | tweets.py | py | 2,259 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "yaml.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tweepy.OAuthHandler",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_numbe... |
39304842298 | import os
from flask import Flask, jsonify, Blueprint
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bcrypt import Bcrypt
import flask_restplus
from werkzeug.contrib import fixers
# instantiate the extensions
db = SQLAlchemy()
migrate = Migrate()
bcrypt = Bcrypt()
def register_api(_app):
from users.api.users import ns as users_ns
from users.api.auth import ns as auth_ns
blueprint = Blueprint('api', __name__)
api = flask_restplus.Api(
app=blueprint,
doc=_app.config['SWAGGER_PATH'],
version=_app.config['API_VERSION'],
title='Shows On Demand - Users Service REST API',
description='Shows on deman Users service API for users access.',
validate=_app.config['RESTPLUS_VALIDATE']
)
api.add_namespace(auth_ns, path='/{}/auth'.format(_app.config['API_VERSION']))
api.add_namespace(users_ns, path='/{}/users'.format(_app.config['API_VERSION']))
_app.register_blueprint(blueprint)
def create_app():
# instantiate the app
app = Flask(__name__)
# enable CORS
CORS(app)
# set config
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
app_config = app.config
# set up extensions
db.init_app(app)
bcrypt.init_app(app)
migrate.init_app(app, db)
app.wsgi_app = fixers.ProxyFix(app.wsgi_app)
# register blueprints
register_api(app)
return app
| guidocecilio/shows-on-demand-users | src/users/__init__.py | __init__.py | py | 1,480 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask_migrate.Migrate",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask_bcrypt.Bcrypt",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": ... |
12932593468 | from django.db import models
class UserMailmapManager(models.Manager):
"""A queryset manager which defers all :class:`models.DateTimeField` fields, to avoid
resetting them to an old value involuntarily."""
@classmethod
def deferred_fields(cls):
try:
return cls._deferred_fields
except AttributeError:
cls._deferred_fields = [
field.name
for field in UserMailmap._meta.get_fields()
if isinstance(field, models.DateTimeField) and not field.auto_now
]
return cls._deferred_fields
def get_queryset(self):
return super().get_queryset().defer(*self.deferred_fields())
class UserMailmap(models.Model):
"""
Model storing mailmap settings submitted by users.
"""
user_id = models.CharField(max_length=50, null=True)
"""Optional user id from Keycloak"""
from_email = models.TextField(unique=True, null=False)
"""Email address to find author in the archive"""
from_email_verified = models.BooleanField(default=False)
"""Indicates if the from email has been verified"""
from_email_verification_request_date = models.DateTimeField(null=True)
"""Last from email verification request date"""
display_name = models.TextField(null=False)
"""Display name to use for the author instead of the archived one"""
display_name_activated = models.BooleanField(default=False)
"""Indicates if the new display name should be used"""
to_email = models.TextField(null=True)
"""Optional new email to use in the display name instead of the archived one"""
to_email_verified = models.BooleanField(default=False)
"""Indicates if the to email has been verified"""
to_email_verification_request_date = models.DateTimeField(null=True)
"""Last to email verification request date"""
mailmap_last_processing_date = models.DateTimeField(null=True)
"""Last mailmap synchronisation date with swh-storage"""
last_update_date = models.DateTimeField(auto_now=True)
"""Last date that mailmap model was updated"""
class Meta:
app_label = "swh_web_mailmap"
db_table = "user_mailmap"
# Defer _date fields by default to avoid updating them by mistake
objects = UserMailmapManager()
@property
def full_display_name(self) -> str:
if self.to_email is not None and self.to_email_verified:
return "%s <%s>" % (self.display_name, self.to_email)
else:
return self.display_name
class UserMailmapEvent(models.Model):
"""
Represents an update to a mailmap object
"""
timestamp = models.DateTimeField(auto_now=True, null=False)
"""Timestamp of the moment the event was submitted"""
user_id = models.CharField(max_length=50, null=False)
"""User id from Keycloak of the user who changed the mailmap.
(Not necessarily the one who the mail belongs to.)"""
request_type = models.CharField(max_length=50, null=False)
"""Either ``add`` or ``update``."""
request = models.TextField(null=False)
"""JSON dump of the request received."""
successful = models.BooleanField(default=False, null=False)
"""If False, then the request failed or crashed before completing,
and may or may not have altered the database's state."""
class Meta:
indexes = [
models.Index(fields=["timestamp"]),
]
app_label = "swh_web_mailmap"
db_table = "user_mailmap_event"
| SoftwareHeritage/swh-web | swh/web/mailmap/models.py | models.py | py | 3,525 | python | en | code | 11 | github-code | 6 | [
{
"api_name": "django.db.models.Manager",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 16,
"usage_type": "attribute"
},
{
"... |
24685520932 | from . import views
from django.urls import path
app_name = 'bankapp' #namespace
urlpatterns = [
path('',views.home,name='home'),
path('login/',views.login,name='login'),
path('register/',views.register,name='register'),
path('logout/',views.logout,name='logout'),
path('user/',views.user,name='user'),
path('form/',views.form,name="form"),
] | simisaby/bank | Bank/bankapp/urls.py | urls.py | py | 371 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
20468728047 | # sourcery skip: do-not-use-staticmethod
"""
A module that contains the AIConfig class object that contains the configuration
"""
from __future__ import annotations
import os
from typing import Type
import yaml
class AIConfig:
"""
A class object that contains the configuration information for the AI
Attributes:
ai_name (str): El nombre de la IA.
ai_role (str): La descripción de la función de la IA.
ai_goals (list): La lista de objetivos que la IA debe cumplir.
"""
def __init__(
self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None
) -> None:
"""
Initialize a class instance
Parameters:
ai_name (str): El nombre de la IA.
ai_role (str): La descripción de la función de la IA.
ai_goals (list): La lista de objetivos que la IA debe cumplir.
Returns:
None
"""
if ai_goals is None:
ai_goals = []
self.ai_name = ai_name
self.ai_role = ai_role
self.ai_goals = ai_goals
# Soon this will go in a folder where it remembers more stuff about the run(s)
SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml")
@staticmethod
def load(config_file: str = SAVE_FILE) -> "AIConfig":
"""
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from
yaml file if yaml file exists,
else returns class with no parameters.
Parameters:
config_file (int): The path to the config yaml file.
DEFAULT: "../ai_settings.yaml"
Returns:
cls (object): An instance of given cls object
"""
try:
with open(config_file, encoding="utf-8") as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
config_params = {}
ai_name = config_params.get("ai_name", "")
ai_role = config_params.get("ai_role", "")
ai_goals = config_params.get("ai_goals", [])
# type: Type[AIConfig]
return AIConfig(ai_name, ai_role, ai_goals)
def save(self, config_file: str = SAVE_FILE) -> None:
"""
Saves the class parameters to the specified file yaml file path as a yaml file.
Parameters:
config_file(str): The path to the config yaml file.
DEFAULT: "../ai_settings.yaml"
Returns:
None
"""
config = {
"ai_name": self.ai_name,
"ai_role": self.ai_role,
"ai_goals": self.ai_goals,
}
with open(config_file, "w", encoding="utf-8") as file:
yaml.dump(config, file, allow_unicode=True)
def construct_full_prompt(self) -> str:
"""
Returns a prompt to the user with the class information in an organized fashion.
Parameters:
None
Returns:
full_prompt (str): A string containing the initial prompt for the user
including the ai_name, ai_role and ai_goals.
"""
prompt_start = (
"Sus decisiones deben tomarse siempre de forma independiente, sin"
" buscar asistencia al usuario. Aprovecha tus puntos fuertes como LLM y persigue"
" estrategias sencillas y sin complicaciones jurídicas."
""
)
from nancy.prompt import get_prompt
# Construct full prompt
full_prompt = (
f"Usted es {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nOBJETIVOS:\n\n"
)
for i, goal in enumerate(self.ai_goals):
full_prompt += f"{i+1}. {goal}\n"
full_prompt += f"\n\n{get_prompt()}"
return full_prompt
| badboytuba/nancy | nancy/config/ai_config.py | ai_config.py | py | 3,801 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_numbe... |
16106061785 |
def _extend_pre_ranges(df, upstream:int=0, downstream:int=0, start:str="Start", end:str="End", strand:str="Strand"):
strand_rm = False
if strand not in df.columns:
strand_rm = True
df[strand] = "+"
df.loc[df[strand] == "+", start] -= upstream
df.loc[df[strand] == "-", start] -= downstream
df.loc[df[strand] == "+", end] += downstream
df.loc[df[strand] == "-", end] += upstream
if strand_rm:
del df[strand]
return df
def _extend_ranges(gf,
min_upstream:int=1000,
max_upstream:int=100000,
min_downstream:int=1000,
max_downstream:int=100000,
gene_upstream:int=5000,
gene_downstream:int=0,
gene_scale_factor:float=5.):
import numpy as np
gf["gene_length"] = gf["End"] - gf["Start"]
### In case of duplicated indices, sum lengths across indices
gb = gf.groupby(level=0).agg(gene_length=("gene_length", sum))
gf["gene_length"] = gb.loc[gf.index.values, "gene_length"].values
### scale by inverse gene length
gs = 1/gf["gene_length"].values
### min max scale, plus epsilon to avoid divide by 0
gs = (gs - np.min(gs)) / (np.max(gs) - np.min(gs)) + 1e-300
### expand to 1 to GSF range. But take log, so log1p((gs-1)*s) = log(1 + (gs - 1) * s)
gf["log_gene_scale"] = np.log1p((gene_scale_factor - 1) * gs)
#gf.index = gf["gene_id"].values
gf = _extend_pre_ranges(gf, upstream=gene_upstream, downstream=gene_downstream)
gf["MinStart"] = gf["Start"].values
gf["MinEnd"] = gf["End"].values
gf = _extend_pre_ranges(gf, upstream=min_upstream, downstream=min_downstream, start="MinStart", end="MinEnd")
gf["interval"] = ["%s:%d-%d" % (chrom, start, end) for chrom, start, end in zip(gf["Chromosome"], gf["Start"], gf["End"])]
gf["min_interval"] = ["%s:%d-%d" % (chrom, start, end) for chrom, start, end in zip(gf["Chromosome"], gf["MinStart"], gf["MinEnd"])]
return gf
def estimate_features_archr(adata, feature_df,
min_upstream:int=1000,
max_upstream:int=100000,
min_downstream:int=1000,
max_downstream:int=100000,
gene_upstream:int=5000,
gene_downstream:int=0,
target_sum:int=None,
gene_scale_factor:float=5.,
peak_column:str=None, ## If not provided, will use peak index
feature_column:str=None,
var_column_tolerance:float=0.999, ### Tolerance used for determining if a feature should be kept, in case of weird things
distal:bool=True, ### Use nearest gene to a peak if unassigned
log1p:bool=False,
save_raw:bool=False,
layer:str=None):
import numpy as np
import pandas as pd
import scipy.sparse
import pyranges
import anndata
import scanpy as sc
from .timer import template
sw = template()
if not isinstance(feature_df, pd.DataFrame):
raise ValueError("Feature_df is not a dataframe")
if not np.all(np.isin(["Chromosome", "Start", "End"], feature_df.columns)):
raise ValueError("Feature_df does not have ranges")
if feature_column is not None:
feature_df.index = feature_df[feature_column].values
with sw("Extending ranges"):
gf = _extend_ranges(feature_df, min_upstream=min_upstream, min_downstream=min_downstream,
max_upstream=max_upstream, max_downstream=max_downstream,
gene_upstream=gene_upstream, gene_downstream=gene_downstream,
gene_scale_factor=gene_scale_factor)
gr = pyranges.from_dict({"Chromosome": gf["Chromosome"],
"Start": gf["Start"],
"End": gf["End"],
"feature_index": gf.index.values,
"feature_interval": gf["interval"].values})
mingr = pyranges.from_dict({"Chromosome": gf["Chromosome"],
"Start": gf["MinStart"],
"End": gf["MinEnd"],
"feature_index": gf.index.values,
"feature_interval": gf["interval"].values})
##
## Now, get peak ranges (pr) from peak frame (pf)
##
with sw("Extracting peak ranges"):
if peak_column is None:
pstr = adata.var_names.values
else:
pstr = adata.var[peak_column].values
pf = pd.DataFrame([x.replace(":", "-", 1).split("-") for x in pstr], columns=["Chromosome", "Start", "End"], index=adata.var_names)
pr = pyranges.from_dict({"Chromosome": pf["Chromosome"], "Start": pf["Start"], "End": pf["End"], "peak_name": pf.index.values})
with sw("Calculating overlaps"):
iif = gf.drop_duplicates("interval")
iif.index = iif["interval"]
## Once peak ranges are gathered, find intersecting gene bodies:
inter_df = pr.join(gr).df.loc[:, ["peak_name", "feature_index", "feature_interval"]]
inter_df["Distance"] = 0
## Then, find genes with minimum upstream/downstream distance away
min_df = pr.join(mingr).df.loc[:, ["peak_name", "feature_index", "feature_interval"]]
## diff. is accurate, unless overlapping intervals. Do not need to worry, as duplicates from inter_df will take care
diff = pf.loc[min_df["peak_name"].values, ["Start", "Start", "End", "End"]].values.astype(int) - iif.loc[min_df["feature_interval"].values, ["Start", "End", "Start", "End"]].values.astype(int)
min_df["Distance"] = np.abs(diff).min(1) + 1
## Finally, find distal. Only need nearest gene
if distal:
distance_df = pr.nearest(gr).df.loc[:, ["peak_name", "feature_index", "feature_interval", "Distance"]]
## Concat such that 1) prioritized intersections, then 2) minimum distance away, then 3) distal
df = pd.concat([inter_df, min_df, distance_df]).drop_duplicates(["peak_name", "feature_index"])
else:
df = pd.concat([inter_df, min_df]).drop_duplicates(["peak_name", "feature_index"])
df["weight"] = np.exp(-1 - np.abs(df["Distance"]) / 5000. + iif.loc[df["feature_interval"].values, "log_gene_scale"].values)
with sw("Calculating accessibility"):
if gf.index.duplicated().sum() > 0:
### Get columns that are the same across repeated indices
nf = gf.groupby(level=0).nunique()
gf = gf.loc[~gf.index.duplicated(keep="first"), nf.columns[(nf==1).mean() >= var_column_tolerance]]
S = scipy.sparse.csr_matrix((df["weight"].values,
(pf.index.get_indexer(df["peak_name"].values),
gf.index.get_indexer(df["feature_index"].values))),
shape=(pf.shape[0], gf.shape[0]))
if layer is not None and layer in adata.layers:
X = adata.layers[layer]
else:
X = adata.X
gdata = anndata.AnnData(X.dot(S), obs=adata.obs, var=gf, dtype=np.float32, obsm=adata.obsm, obsp=adata.obsp,
uns={k: v for k, v in adata.uns.items() if k in ["neighbors", "files", "lsi", "pca", "umap", "leiden"]})
if save_raw:
gdata.layers["raw"] = gdata.X.copy()
if target_sum is not None and target_sum > 0:
sc.pp.normalize_total(gdata, target_sum=target_sum)
else:
print("Using median normalization")
sc.pp.normalize_total(gdata)
if log1p:
sc.pp.log1p(gdata)
return gdata
def estimate_genes_archr(adata, gtf:str,
min_upstream:int=1000,
max_upstream:int=100000,
min_downstream:int=1000,
max_downstream:int=100000,
gene_upstream:int=5000,
gene_downstream:int=0,
target_sum:int=None,
gene_scale_factor:float=5.,
peak_column:str=None, ## If not provided, will use peak index
feature_column:str="gene_id", ### If not provided, will use feature index
log1p:bool=True,
distal:bool=True,
save_raw:bool=False,
layer:str=None):
import numpy as np
import pandas as pd
import scipy.sparse
import pyranges
import anndata
import scanpy as sc
from .timer import template
sw = template()
with sw("Reading GTF"):
gf = pyranges.read_gtf(gtf).df
gf = gf.loc[gf["Feature"] == "gene", :]
if feature_column in gf.columns:
gf.index = gf[feature_column].values.astype(str)
gdata = estimate_features_archr(adata, feature_df=gf,
min_upstream=min_upstream, min_downstream=min_downstream,
max_upstream=max_upstream, max_downstream=max_downstream,
gene_upstream=gene_upstream, gene_downstream=gene_downstream,
target_sum=target_sum, gene_scale_factor=gene_scale_factor,
peak_column=peak_column,
feature_column=feature_column,
log1p=log1p, layer=layer, save_raw=save_raw)
gdata.var = gdata.var.loc[:, ["gene_id", "gene_name"]]
gdata.var_names = gdata.var["gene_name"].values
gdata.var_names_make_unique()
del gdata.var["gene_name"]
gdata.var.columns = ["gene_ids"]
add_gene_length(gdata.var, gtf)
return gdata
def get_tss(tss:str):
import pandas as pd
tss = pd.read_csv(tss, sep="\t", header=None)
tss.columns = ["Chromosome", "Start", "End", "gene_id", "score", "strand"]
df = tss.groupby(["Chromosome", "gene_id", "strand"]).agg(left=("Start", "min"),
right=("End", "max")).reset_index()
df["interval"] = df["Chromosome"] + ":" + df["left"].astype(str) + "-" + df["right"].astype(str)
df.index = df["gene_id"].values
return df
def add_interval(var, tss:str, inplace=True):
tf = get_tss(tss)
interval = [tf["interval"].get(g, "NA") for g in var["gene_ids"]]
if inplace:
var["interval"] = interval
else:
import pandas as pd
return pd.Series(interval, index=var.index, name="interval")
def add_gene_length(var, gtf:str=None, inplace=True):
import pandas as pd
import pyranges
from .timer import template
sw = template()
with sw("Reading GTF"):
gf = pyranges.read_gtf(gtf).df
gf = gf.loc[gf["Feature"] == "gene",:]
gf["gene_length"] = gf["End"] - gf["Start"]
gf.index = gf["gene_id"].values
gl = [gf["gene_length"].get(g, -1) for g in var["gene_ids"]]
gs = [gf["Strand"].get(g, "*") for g in var["gene_ids"]]
if not inplace:
var = var.copy()
var["gene_length"] = gl
var["strand"] = gs
if not inplace:
return var.loc[:, ["gene_length", "strand"]]
def add_gene_info(var, gene_info:str=None, inplace=True):
"""Add a STAR geneInfo.tab file to .var"""
import pandas as pd
df = pd.read_csv(gene_info, sep="\t", skiprows=[0], header=None)
df.index = df[0].values
gi = [df[2].get(g, "NA") for g in var["gene_ids"]]
if inplace:
var["gene_type"] = gi
else:
return pd.Series(gi, index=var.index, name="gene_type")
| KellisLab/benj | benj/gene_estimation.py | gene_estimation.py | py | 11,891 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "numpy.min",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.log1p",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "timer.template",
"line_number": 6... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.