seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14794717807 | import os
# force TF to use CPU instead of GPU (sadly my discrete card is not support the latest CUDA version)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import gym
from gym import envs
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import time
# build DQN model from keras layers
def dqn_model(input_layer_size, output_layer_size, learning_rate=0.001):
model = Sequential()
model.add(Dense(128, input_dim=input_layer_size, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(output_layer_size, activation='linear'))
model.compile(loss='mse', optimizer=Adam(lr=learning_rate))
return model
if __name__ == "__main__":
print(envs.registry.all())
env = gym.make('LunarLander-v2')
print(env.action_space)
print(env.observation_space)
OBSERVATION_SPACE_SIZE = env.observation_space.shape[0]
ACTION_SPACE_SIZE = env.action_space.n
model = dqn_model(OBSERVATION_SPACE_SIZE, ACTION_SPACE_SIZE)
NUMBER_OF_EPISODES = 1000
MAX_NUMBER_OF_STEPS_IN_EPISODE = 750
BATCH_SIZE = 64
DISCOUNT_RATE = 0.99
exploration_rate = 1.0
EXPLORATION_RATE_MINIMUM = 0.1
EXPLORATION_RATE_CHANGE = (exploration_rate - EXPLORATION_RATE_MINIMUM) / ((NUMBER_OF_EPISODES / 2) * 100)
obs_arr = np.array([])
next_obs_arr = np.array([])
actions_arr = np.array([], dtype=int)
rewards_arr = np.array([])
done_arr = np.array([])
total_rewards = np.array([])
for episode_idx in range(NUMBER_OF_EPISODES):
start_time = time.time()
# Restart the environment to start a new episode
obs = env.reset()
obs = np.reshape(obs, (1, OBSERVATION_SPACE_SIZE))
episode_total_reward = 0
explore_count = 0
exploit_count = 0
for step_idx in range(MAX_NUMBER_OF_STEPS_IN_EPISODE):
# render a view with the environment current state
# env.render()
# choose action greedily (exploration-exploitation trade-off)
if np.random.random() < exploration_rate:
# explore
explore_count += 1
action = env.action_space.sample()
else:
# exploit: ask DQN to make a prediction of which action is preferred here
exploit_count += 1
prediction = model.predict(obs)[0]
# print("prediction: ", prediction)
action = np.argmax(prediction)
# do the action
next_obs, reward, is_done, info = env.step(action)
next_obs = np.reshape(next_obs, (1, OBSERVATION_SPACE_SIZE))
episode_total_reward += reward
if len(obs_arr) == 0:
obs_arr = np.array(obs)
next_obs_arr = np.array(next_obs)
else:
obs_arr = np.vstack((obs_arr, obs))
next_obs_arr = np.vstack((next_obs_arr, next_obs))
actions_arr = np.append(actions_arr, action)
rewards_arr = np.append(rewards_arr, reward)
done_arr = np.append(done_arr, is_done)
obs = next_obs
if len(obs_arr) >= BATCH_SIZE:
# print(episode_idx, ":", step_idx)
indexes = np.random.randint(len(done_arr), size=BATCH_SIZE)
batch_obs = np.squeeze(obs_arr[indexes])
batch_next_obs = np.squeeze(next_obs_arr[indexes])
batch_action = actions_arr[indexes]
batch_reward = rewards_arr[indexes]
batch_done = done_arr[indexes]
all_targets = model.predict_on_batch(batch_obs)
targets = batch_reward + DISCOUNT_RATE * (np.amax(model.predict_on_batch(batch_next_obs), axis=1)) * (1 - batch_done)
all_targets[[np.array([i for i in range(BATCH_SIZE)])], batch_action] = targets
# train the model
model.fit(batch_obs, all_targets, verbose=0)
# change exploration rate at every step until it becomes less or eq than exploration_rate_min
# if exploration_rate > EXPLORATION_RATE_MINIMUM:
# exploration_rate -= EXPLORATION_RATE_CHANGE
if exploration_rate > 0.01:
exploration_rate *= 0.99
# print("ep#%d:step#%d, obs=%s, reward=%s, done=%s" % (episode_idx, step_idx, observation, reward, is_done))
if is_done:
break
total_rewards = np.append(total_rewards, episode_total_reward)
print("----------------------")
print("Episode %d reward: %d" % (episode_idx, episode_total_reward))
print("Last step: %d" % (step_idx))
print("Exploration rate: %f" % (exploration_rate))
print("Explore count: %d" % (explore_count))
print("Exploit count: %d" % (exploit_count))
print("Time for episode: ", (time.time() - start_time), " sec.")
# lunar lander problem is considered solved when total reward is 200+ points;
# so we check if the last episodes was successful, the training would be stopped
if np.mean(total_rewards[-100:]) >= 200:
break
# plot training progress
x = list(range(len(total_rewards)))
plt.plot(x, total_rewards)
plt.xlabel("Episode")
plt.ylabel("Total reward")
plt.show()
# save the model for future testing
model.save('model.h5')
# close gym environment
env.close()
| Ielay/ml-lunar-lander-problem-rl | src/train_model.py | train_model.py | py | 5,554 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "keras.models.Sequential",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "keras.layer... |
33460982801 | import bpy
import os
from io_scs_tools.utils.printout import lprint
from io_scs_tools.utils import get_scs_globals as _get_scs_globals
def strip_sep(path):
"""Strips double path separators (slashes and backslashes) on the start and the end of the given path
:param path: path to strip separators from
:type path: str
:return: new stripped path
:rtype: str
"""
return path.strip("\\\\").strip("//")
def get_filename(path, with_ext=True):
"""Returns file name from given file, with or without extension.
It finds last "os.sep" inside string and returns tail.
:param path: path with file name
:type path: str
:param with_ext: return file name with extension or not
:type with_ext: bool
:return: file name with or without extension
:rtype: str
"""
# find last separator; prefer os.sep otherwise search for normal slash
last_sep = path.rfind(os.sep)
if last_sep < 0:
last_sep = path.rfind("/")
new_path = path[last_sep + 1:]
if not with_ext and new_path.rfind(".") > 0:
new_path = new_path[:new_path.rfind(".")]
return new_path
def repair_path(filepath):
"""Takes a Blender filepath and tries to make it a valid absolute path."""
if filepath != '':
# print('0 filepath:\n"%s"' % filepath)
filepath = bpy.path.abspath(filepath, start=None, library=None) # make the path absolute
# print('1 filepath:\n"%s"' % filepath)
filepath = os.path.normpath(filepath) # repair things like "...\dir\dir\..\dir\..."
# print('2 filepath:\n"%s"' % filepath)
return filepath
def relative_path(base_path, path):
"""Takes a base path and other path and returns the relative version of the second path
if possible, otherwise it returns the original one (absolute path)."""
repaired_base_path = repair_path(base_path)
# print('repaired_base_path:\n\t"%s"' % repaired_base_path)
repaired_path = repair_path(path)
# print('repaired_path:\n\t"%s"' % repaired_path)
if len(repaired_base_path) > 2:
# presuming that equality of first two chars means we are on same mount point
if startswith(repaired_path[:2], repaired_base_path[:2]):
rel_path = os.path.relpath(repaired_path, repaired_base_path).replace("\\", "/")
# print('rel_path:\n\t"%s"' % rel_path)
if not rel_path.startswith("//"):
rel_path = "//" + rel_path
return rel_path
else:
lprint("W Not possible to create a relative path! Returning absolute path (%s)", (repaired_path,))
return repaired_path
else:
lprint("W No base path specified! It's not possible to create a relative path! Returning absolute path (%s)", (repaired_path,))
return repaired_path
def get_abs_path(path_in, subdir_path='', is_dir=False, skip_mod_check=False):
"""Gets absolute path to the "SCS Project Base Path" if given path is relative (starts with: "//"),
otherwise original path is returned.
If relative path is existing and valid, it returns the absolute path, otherwise None.
Optionally a subdir_path can be provided, which will be added to the 'SCS Project Base Path'.
If skipping of mod check is not specified then function will also try to look in two
parent base directories, in the case "SCS Project Base Path" is currently set to mod/dlc package.
:param path_in: Absolute or relative path to current 'SCS Project Base path'
:type path_in: str
:param subdir_path: Additional subdirs can be provided, they will be added to the 'SCS Project Base Path'
:type subdir_path: str
:param is_dir: flag specifying if given path should be directory
:type is_dir: bool
:param skip_mod_check: flag specifying if check for dlc/mod should be skipped
:type skip_mod_check: bool
:return: Absolute path or None
:rtype: str
"""
root_path = _get_scs_globals().scs_project_path
if subdir_path != '':
root_path = os.path.join(root_path, subdir_path)
if path_in.startswith("//"):
if len(root_path) > 2:
result = os.path.join(root_path, path_in[2:])
else:
result = None
else:
result = path_in
existance_check = os.path.isdir if is_dir else os.path.isfile
if result is not None and not existance_check(result) and not skip_mod_check:
result = get_abs_path(path_in, subdir_path="../base", is_dir=is_dir, skip_mod_check=True)
if result is not None and not existance_check(result) and not skip_mod_check:
result = get_abs_path(path_in, subdir_path="../../base", is_dir=is_dir, skip_mod_check=True)
# finally if file/dir not found return correct abs path, not the one from parent dirs
if result is not None and not existance_check(result) and not skip_mod_check:
result = get_abs_path(path_in, subdir_path=subdir_path, is_dir=is_dir, skip_mod_check=True)
return result
def is_valid_shader_texture_path(shader_texture):
"""It returns True if there is valid Shader Texture file, otherwise False.
:param shader_texture: SCS texture path, can be absolute or relative
:type shader_texture: str
:return: True if there is valid Shader Texture file, otherwise False
:rtype: bool
"""
if shader_texture != "":
if shader_texture.startswith("//"): # RELATIVE PATH
shader_texture_abs_path = get_abs_path(shader_texture)
if shader_texture_abs_path and os.path.isfile(shader_texture_abs_path):
return True
else: # ABSOLUTE PATH
if os.path.isfile(shader_texture):
return True
return False
def is_valid_shader_presets_library_path():
"""It returns True if there is valid "*.txt" file in
the Shader Presets Library directory, otherwise False."""
shader_presets_filepath = _get_scs_globals().shader_presets_filepath
if shader_presets_filepath != "":
if shader_presets_filepath.startswith("//"): # RELATIVE PATH
shader_presets_abs_path = get_abs_path(shader_presets_filepath)
if shader_presets_abs_path:
if os.path.isfile(shader_presets_abs_path):
return True
else: # ABSOLUTE PATH
if os.path.isfile(shader_presets_filepath):
return True
return False
def is_valid_trigger_actions_rel_path():
"""It returns True if there is valid "*.sii" file in
the Trigger Actions directory, otherwise False."""
trig_actions_abs_path = get_abs_path(_get_scs_globals().trigger_actions_rel_path)
if trig_actions_abs_path:
if os.path.isfile(trig_actions_abs_path):
return True
else:
return False
else:
return False
def is_valid_sign_library_rel_path():
"""It returns True if there is valid "*.sii" file in
the Sign Library directory, otherwise False."""
sign_library_abs_path = get_abs_path(_get_scs_globals().sign_library_rel_path)
if sign_library_abs_path:
if os.path.isfile(sign_library_abs_path):
return True
else:
return False
else:
return False
def is_valid_tsem_library_rel_path():
"""It returns True if there is valid "*.sii" file in
the Traffic Semaphore Profile Library directory, otherwise False."""
tsem_library_abs_path = get_abs_path(_get_scs_globals().tsem_library_rel_path)
if tsem_library_abs_path:
if os.path.isfile(tsem_library_abs_path):
return True
else:
return False
else:
return False
def is_valid_traffic_rules_library_rel_path():
"""It returns True if there is valid "*.sii" file in
the Traffic Rules Library directory, otherwise False."""
traffic_rules_library_abs_path = get_abs_path(_get_scs_globals().traffic_rules_library_rel_path)
if traffic_rules_library_abs_path:
if os.path.isfile(traffic_rules_library_abs_path):
return True
else:
return False
else:
return False
def is_valid_hookup_library_rel_path():
"""It returns True if there is at least one "*.sii" file in
the resulting CgFX Library directory, otherwise False."""
hookup_library_abs_path = get_abs_path(_get_scs_globals().hookup_library_rel_path, is_dir=True)
if hookup_library_abs_path:
for root, dirs, files in os.walk(hookup_library_abs_path):
for file in files:
if file.endswith(".sii"):
return True
return False
else:
return False
def is_valid_matsubs_library_rel_path():
"""It returns True if there is valid "*.db" file in
the Material Substance Library directory, otherwise False."""
matsubs_library_abs_path = get_abs_path(_get_scs_globals().matsubs_library_rel_path)
# print(' matsubs_library_abs_path: %r' % str(matsubs_library_abs_path))
if matsubs_library_abs_path:
if os.path.isfile(matsubs_library_abs_path):
return True
else:
return False
else:
return False
def get_addon_installation_paths():
"""Returns a list of paths to the directories where the addon can be installed."""
script_paths = bpy.utils.script_paths()
addon_dirs = ('addons', 'addons_contrib')
script_locations = []
for script_path in script_paths:
for addon_dir in addon_dirs:
script_locations.append(os.path.join(script_path, addon_dir, 'io_scs_tools'))
scs_installation_dirs = []
for location in script_locations:
if os.path.isdir(location):
scs_installation_dirs.append(location)
if len(scs_installation_dirs) == 0:
lprint('''\n\nE The installation directory of "SCS Blender Tools" couldn't be detected! (Shouldn't happen!)\n''')
elif len(scs_installation_dirs) > 1:
lprint('\n\nW More than one installation of "SCS Blender Tools" detected!\n\t Please remove redundant installations so the only one '
'remain.\n')
return scs_installation_dirs
def get_shader_presets_filepath():
"""Returns a valid filepath to "shader_presets.txt" file. If the file doesn't exists,
the empty string is returned and Shader Presets won't be available."""
scs_installation_dirs = get_addon_installation_paths()
shader_presets_file = ''
for location in scs_installation_dirs:
test_path = os.path.join(location, 'shader_presets.txt')
if os.path.isfile(test_path):
shader_presets_file = test_path
break
return shader_presets_file
def get_texture_path_from_tobj(tobj_filepath, raw_value=False):
"""Get absolute path of texture from given tobj filepath.
If raw value is requested returned path is direct value written in TOBJ.
NOTE: there is no safety check if file exists.
:param tobj_filepath: absolute tobj file path
:type tobj_filepath: str
:param raw_value: flag for indicating if texture path shall be returned as it's written in TOBJ
:type raw_value: bool
:return: absolute texture file path if found or None
:rtype: str | None
"""
from io_scs_tools.internals.containers.tobj import TobjContainer
container = TobjContainer.read_data_from_file(tobj_filepath, skip_validation=raw_value)
tobj_dir, tobj_filename = os.path.split(tobj_filepath)
if container is None:
return None
if raw_value:
return container.map_names[0]
if container.map_names[0][0] == "/":
return get_abs_path("//" + container.map_names[0][1:])
return os.path.join(tobj_dir, container.map_names[0])
def get_texture_extens_and_strip_path(texture_path):
"""Gets all supported texture extensions and strips given input path for any of it.
:param texture_path: shader texture raw path value
:type texture_path: str
:return: list of extensions and stripped path as tuple
:rtype: tuple[list[str], str]
"""
extensions = [".tobj", ".tga", ".png"]
# strip of any extensions ( endswith is most secure, because of possible multiple extensions )
if texture_path.endswith(".tobj"):
extensions.insert(0, texture_path[-5:])
texture_path = texture_path[:-5]
elif texture_path.endswith(".tga") or texture_path.endswith(".png"):
extensions.insert(0, texture_path[-4:])
texture_path = texture_path[:-4]
return extensions, texture_path
def get_scs_texture_str(texture_string):
"""Get texture string as presented in SCS files: "/material/environment/vehicle_reflection"
without any file extensions. Input path can also have texture object extension or supported images extensions.
Path will be searched and returned in this order:
1. relative path on current SCS Project Base Path
2. relative path on parent base dirs of current SCS Project Base Path in the case of mod/dlc
3. find absolute file path
4. return unchanged texture string path
:param texture_string: texture string for which texture should be found e.g.: "/material/environment/vehicle_reflection"
:type texture_string: str
:return: relative path to texture object or absolute path to texture object or uncanged texture string
:rtype: str
"""
scs_project_path = _get_scs_globals().scs_project_path
orig_texture_string = texture_string
# remove any directory separators left overs from different platform
texture_string = texture_string.replace("/", os.sep).replace("\\", os.sep)
extensions, texture_string = get_texture_extens_and_strip_path(texture_string)
# if texture string starts with scs project path we can directly strip of project path
if startswith(texture_string, scs_project_path):
texture_string = texture_string[len(scs_project_path):]
else: # check if texture string came from base project while scs project path is in dlc/mod folder
# first find longest matching path
i = 1
while startswith(scs_project_path, texture_string[:i]) and i < len(texture_string):
i += 1
# now check if provided texture string is the same as:
# current scs project path + one or two directories up + non matched path of the part
for infix in ("..", ".." + os.sep + ".."):
nonmatched_path_part = texture_string[i - 2:]
modif_texture_string = os.path.join(scs_project_path, infix + nonmatched_path_part)
# if one or two directories up is the same path as texture string
# and non matched path part is starting with /base we got a hit:
# resulting relative path is non matched path part with stripped "/base" start
if is_samepath(modif_texture_string, texture_string) and startswith(nonmatched_path_part, os.sep + "base"):
texture_string = nonmatched_path_part[5:]
break
# check for relative TOBJ, TGA, PNG
for ext in extensions:
texture_path = get_abs_path("//" + texture_string.strip(os.sep) + ext)
if texture_path and os.path.isfile(texture_path):
return "//" + texture_string.replace(os.sep, "/").strip("/") + ext
# check for absolute TOBJ, TGA, PNG
for ext in extensions:
texture_path = get_abs_path(texture_string + ext, skip_mod_check=True)
if texture_path and os.path.isfile(texture_path):
return texture_string.replace(os.sep, "/") + ext
return orig_texture_string
def get_tobj_path_from_shader_texture(shader_texture, check_existance=True):
"""Gets TOBJ path from shader texture value if exists, otherwise returning None.
:param shader_texture: shader texture raw path value
:type shader_texture: str
:param check_existance: flag indicating if tobj path should be also checked for existance
:type check_existance: bool
:return: TOBJ absolute path or None if not found
:rtype: str | None
"""
# strip of any extensions ( endswith is most secure, because of possible multiple extensions )
if shader_texture.endswith(".tobj"):
tobj_filpath = shader_texture
elif shader_texture.endswith(".tga") or shader_texture.endswith(".png"):
tobj_filpath = shader_texture[:-4] + ".tobj"
else:
tobj_filpath = shader_texture + ".tobj"
# NOTE: if there is no existence check then we also shouldn't check for mods file system structure
tobj_filpath = get_abs_path(tobj_filpath, skip_mod_check=not check_existance)
if not check_existance or (tobj_filpath and os.path.isfile(tobj_filpath)):
return tobj_filpath
return None
def get_skeleton_relative_filepath(armature, directory, default_name):
"""Get's skeleton relative path to given directory. This path can be used for linking
skeletons in PIM and PIA files.
:param armature: armature object which will be used as scs skeleton
:type armature: bpy.types.Object
:param directory: directory from which relative path of skeleton should be gotten
:type directory: str
:param default_name: if custom path is empty this name will be used as the name of pis file
:type default_name: str
:return: relative path to predicted PIS file of given armature
:rtype: str
"""
skeleton_custom_dir = armature.scs_props.scs_skeleton_custom_export_dirpath
skeleton_custom_name = armature.scs_props.scs_skeleton_custom_name
skeleton_path = ""
if skeleton_custom_dir != "":
if skeleton_custom_dir.startswith("//"):
skeleton_path = os.path.relpath(os.path.join(_get_scs_globals().scs_project_path, skeleton_custom_dir[2:]), directory)
else:
lprint("E Custom skeleton export path is not relative to SCS Project Base Path.\n\t " +
"Custom path will be ignored, which might lead to wrongly linked skeleton file inside PIM and PIA files.")
skeleton_name = (skeleton_custom_name if skeleton_custom_name != "" else default_name) + ".pis"
return os.path.join(skeleton_path, skeleton_name)
def get_animations_relative_filepath(scs_root, directory):
"""Get's skeleton relative path to given directory. This path can be used for linking
skeletons in PIM and PIA files.
:param scs_root: scs root object of this animation
:type scs_root: bpy.types.Object
:param directory: directory from which relative path of animaton should be gotten
:type directory: str
:return: relative path to predicted PIS file of given armature
:rtype: str
"""
anims_path = ""
if scs_root.scs_props.scs_root_object_allow_anim_custom_path:
animations_custom_dir = scs_root.scs_props.scs_root_object_anim_export_filepath
if animations_custom_dir != "":
if animations_custom_dir.startswith("//"):
anims_path = os.path.relpath(os.path.join(_get_scs_globals().scs_project_path, animations_custom_dir[2:]), directory)
else:
return None
return anims_path
def get_global_export_path():
"""Gets global export path.
If default export path is empty and blend file is saved inside current scs project path -> return blend file dir;
Otherwise return scs project path combined with default export path.
:return: global export path defined by directory of saved blend file and default export path from settings
:rtype: str
"""
scs_project_path = _get_scs_globals().scs_project_path
is_blend_file_within_base = bpy.data.filepath != "" and startswith(bpy.data.filepath, scs_project_path)
default_export_path = bpy.context.scene.scs_props.default_export_filepath
# if not set try to use Blender filepath
if default_export_path == "" and is_blend_file_within_base:
return os.path.dirname(bpy.data.filepath)
else:
return os.path.join(scs_project_path, default_export_path.strip("//"))
def get_custom_scs_root_export_path(root_object):
"""Gets custom export file path for given SCS Root Object.
If custom export path is empty and blend file is saved inside current scs project path -> return blend file dir;
Otherwise return scs porject path combined with custom scs root export path.
:param root_object: scs root object
:type root_object: bpy.types.Object
:return: custom export directory path of given SCS Root Object; None if custom export for SCS Root is disabled
:rtype: str | None
"""
scs_project_path = _get_scs_globals().scs_project_path
is_blend_file_within_base = bpy.data.filepath != "" and startswith(bpy.data.filepath, scs_project_path)
custom_filepath = None
if root_object.scs_props.scs_root_object_allow_custom_path:
scs_root_export_path = root_object.scs_props.scs_root_object_export_filepath
# if not set try to use Blender filepath
if scs_root_export_path == "" and is_blend_file_within_base:
custom_filepath = os.path.dirname(bpy.data.filepath)
else:
custom_filepath = os.path.join(scs_project_path, scs_root_export_path.strip("//"))
return custom_filepath
def get_all_infixed_file_paths(filepath, include_given_path=True):
"""Gets files from same directory using any infixed word without,
however dot can not appear in infix.
:param filepath: absolute filepath which shall be checked for any infixed files
:type filepath: str
:param include_given_path: if True given file path will be included in returning list otherwise no
:type include_given_path: bool
:return: list of all infixed files; optionally given filepath can be added to result list too
:rtype: list[str]
"""
infixed_filepaths = [filepath] if include_given_path else []
orig_dir, orig_file = os.path.split(filepath)
# if original directory doesn't exists skip searching for any infix files
if not os.path.isdir(orig_dir):
return infixed_filepaths
last_ext_i = orig_file.rfind(".")
orig_file_prefix = orig_file[:last_ext_i]
orig_file_postfix = orig_file[last_ext_i:]
for file in os.listdir(orig_dir):
# if given file path is already prefixed make sure to ignore it
if file == orig_file:
continue
if file.startswith(orig_file_prefix) and file.endswith(orig_file_postfix) and file.count(".") == 2:
infixed_filepaths.append(os.path.join(orig_dir, file))
return infixed_filepaths
def startswith(path1, path2):
"""Checks if first given path starts with second given path.
It also takes into account windows drive letter which can be big or small.
:param path1: first path
:type path1: str
:param path2: second path
:type path2: str
:return: True if path1 starts with path2; False otherwise
:rtype: bool
"""
norm_path1 = full_norm(path1)
norm_path2 = full_norm(path2)
return norm_path1.startswith(norm_path2)
def is_samepath(path1, path2):
"""Checks if paths are the same
It also takes into account windows drive letter which can be big or small.
:param path1: first path
:type path1: str
:param path2: second path
:type path2: str
:return: True if path1 starts with path2; False otherwise
:rtype: bool
"""
norm_path1 = full_norm(path1)
norm_path2 = full_norm(path2)
return norm_path1 == norm_path2
def full_norm(path1):
"""Normalize path.
It also takes into account windows drive letter which can be big or small.
:param path1: path
:type path1: str
:return: normalized path
:rtype: str
"""
norm_path1 = os.path.normpath(path1)
norm_path1 = os.path.normcase(norm_path1)
return norm_path1
def readable_norm(path):
"""Normalize path in nice human readable form.
On windows it also converts backslashes to forward ones, to have cross platform output.
:param path: path to normalize
:type path: str
:return: normalized path
:rtype: str
"""
norm_path = os.path.normpath(path)
norm_path = norm_path.replace("\\", "/")
return norm_path
| paypink/BlenderTools | addon/io_scs_tools/utils/path.py | path.py | py | 24,096 | python | en | code | null | github-code | 1 | [
{
"api_name": "os.sep",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "bpy.path.abspath",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "bpy.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.path.normpath",
"li... |
9519485154 | from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator
from ..models import Posts
from django.db.models import Q, Count
def index(request, category=None):
page = request.GET.get('page', '1') # 페이지를 읽어온다. 없을경우 1을 뱉는다 ?page =
kw = request.GET.get('kw', '') # 검색어
if category == 'freeboard':
posts_list = Posts.objects.filter(category_id=1).order_by('-create_date')
elif category == 'diary':
posts_list = Posts.objects.filter(category_id=2).order_by('-create_date')
elif category == 'QnA':
posts_list = Posts.objects.filter(category_id=3).order_by('-create_date')
else:
posts_list = Posts.objects.order_by('-create_date')
if kw:
posts_list = posts_list.filter( # icontains 대소문자 구분없음, 있음은 contains
Q(subject__icontains=kw) | # 제목 검색
Q(content__icontains=kw) | # 내용 검색
Q(reply__content__icontains=kw) | # 댓글 내용
Q(reply__author__username__icontains=kw) | # 댓글 작성자
Q(author__username__icontains=kw) # 일기 작성자
).distinct() # 중복제거
paginator = Paginator(posts_list, 10) # 한페이지에 보여줄 페이지 갯수 10
page_obj = paginator.get_page(page) # 페이지 오브젝트에 담아서 보여줌
context = {'category': category, 'posts_list': page_obj, 'page': page, 'kw': kw}
return render(request, 'main/posts_list.html', context)
def detail(request, posts_id):
posts = get_object_or_404(Posts, pk=posts_id)
page = request.GET.get('page', '1')
kw = request.GET.get('kw', 'first') # 정렬방법
if kw == 'voter': # 추천순
paginator = Paginator(posts.reply_set.all().annotate(num_votes=Count('voter')).order_by('-num_votes'), 10)
elif kw == 'latest': # 최신순
paginator = Paginator(posts.reply_set.all().order_by('-id'), 10)
else: # 등록순
paginator = Paginator(posts.reply_set.all(), 10)
page_obj = paginator.get_page(page)
context = {'posts': posts, 'reply_list': page_obj, 'page': page, 'kw': kw}
return render(request, 'main/posts_detail.html', context) | Ksiyeong/MyDiary | main/views/base_views.py | base_views.py | py | 2,242 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "models.Posts.objects.filter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Posts.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "models.Posts",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "mo... |
14247111818 | from typing import Text
import requests
import json
import pprint as pp
from datetime import datetime
import re
def get_suggestions(search_string: str):
base_url = 'https://utility.arcgis.com/usrsvcs/servers/b89ba3a68c664268b9bdea76948b4f11/rest/services/World/GeocodeServer/suggest'
query_data = {
'f': 'json',
'text': search_string,
'maxSuggestions': 60,
'countryCode': 'BRA'
}
req = requests.get(base_url, params=query_data)
res = json.loads(req.content)
valid_suggestions = list(filter(lambda x: 'Curitiba' in x['text'], res['suggestions']))
return valid_suggestions
def find_adress_candidates(single_line: str):
base_url = 'https://utility.arcgis.com/usrsvcs/servers/b89ba3a68c664268b9bdea76948b4f11/rest/services/World/GeocodeServer/findAddressCandidates'
query_data = {
'f': 'json',
'outSR': json.dumps({
'wkid': 102100
}),
'outFields': '*',
'countryCode': 'BRA',
'maxLocations': 6,
'SingleLine': single_line
}
req = requests.get(base_url, params=query_data)
res = json.loads(req.content)
return res
def get_codope(geometry):
base_url = 'https://services1.arcgis.com/46Oage49MS2a3O6A/arcgis/rest/services/Mapa_Rodizio_Abastecimento_RMC_View/FeatureServer/1/query'
query_data = {
'f': 'json',
'returnGeometry': False,
'geometryType': 'esriGeometryPoint',
'outFields': '*',
'geometry': json.dumps(geometry)
}
req = requests.get(base_url, params=query_data)
res = json.loads(req.content)
codope = res['features'][0]['attributes']['codope']
return codope
def get_by_codope(codope: str):
base_url = 'https://services1.arcgis.com/46Oage49MS2a3O6A/arcgis/rest/services/Mapa_Rodizio_Abastecimento_RMC_View/FeatureServer/2/query'
query_data = {
'f': 'json',
'returnGeometry': False,
'outFields': '*',
'where': "(CODOPE = '{}')".format(codope)
}
req = requests.get(base_url, params=query_data)
res = json.loads(req.content)
return res
def print_time_stamps(query_data):
data = query_data['features']
for entry in data:
entry = entry['attributes']
ts_retomada = datetime.fromtimestamp(entry['RETOMADA'] / 1000)
ts_norm = datetime.fromtimestamp(entry['NORMALIZACAO'] / 1000)
ts_init = datetime.fromtimestamp(entry['INICIO'] / 1000)
print('INICIO: {} / NORMALIZACAO: {} / RETOMADA: {}'.format(ts_init, ts_norm, ts_retomada))
if __name__ == '__main__':
suggestions = get_suggestions(search_string='Jose Clementino Bettega')
# pp.pprint(suggestions)
single_line = suggestions[0]['text']
res = find_adress_candidates(single_line=single_line)
geometry = res['candidates'][0]['location']
geometry['spatialReference'] = {
'wkid': 102100
}
codope = get_codope(geometry)
res = get_by_codope(codope)
print('Results query by codope')
pp.pprint(res)
# print_time_stamps(res)
print('CODOPE: {}'.format(codope))
# pp.pprint(res) | jmmeneguel/SaneparWaterRestrictionCalendar | src/Sanepar/main.py | main.py | py | 3,227 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": ... |
27387352934 | from __future__ import print_function
import os.path
import sys
import densenet
import numpy as np
import sklearn.metrics as metrics
from keras.datasets import cifar10
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, TensorBoard
from keras import backend as K
batch_size = 100
nb_classes = 10
nb_epoch = 40
img_rows, img_cols = 32, 32
img_channels = 3
img_dim = (img_channels, img_rows, img_cols) if K.image_dim_ordering() == "th" else (img_rows, img_cols, img_channels)
depth = 40
nb_dense_block = 3
growth_rate = 12
nb_filter = -1
dropout_rate = 0.0 # 0.0 for data augmentation
if len(sys.argv) > 1:
augment = sys.argv[1]
else:
augment = 'false'
(trainX, trainY), (testX, testY) = cifar10.load_data()
trainX = trainX.astype('float32')
testX = testX.astype('float32')
trainX = densenet.preprocess_input(trainX)
testX = densenet.preprocess_input(testX)
Y_train = np_utils.to_categorical(trainY, nb_classes)
Y_test = np_utils.to_categorical(testY, nb_classes)
# GENERATOR
generator = ImageDataGenerator(rotation_range=15,
width_shift_range=5. / 32,
height_shift_range=5. / 32,
horizontal_flip=True)
generator.fit(trainX, seed=0)
# MODELS
model2k = densenet.DenseNet(img_dim, classes=nb_classes, depth=depth, nb_dense_block=nb_dense_block,
growth_rate=growth_rate, nb_filter=nb_filter, dropout_rate=dropout_rate, weights=None,
bottleneck=True, growth_rate_factor=2)
print("Models created")
# 2K MODEL
print("Building model 2k...")
model2k.summary()
optimizer = Adam(lr=1e-3) # Using Adam instead of SGD to speed up training
model2k.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])
print("Finished compiling")
# Load model
weights_file_2k = "weights/DenseNet-40-12-CIFAR10-2K.h5"
if os.path.exists(weights_file_2k):
# model.load_weights(weights_file, by_name=True)
print("Model loaded.")
out_dir = "weights/"
tb_dir_2k = "tensorboard/2k"
lr_reducer = ReduceLROnPlateau(monitor='val_acc', factor=np.sqrt(0.1),
cooldown=0, patience=5, min_lr=1e-5, verbose=1)
model_checkpoint_2k = ModelCheckpoint(weights_file_2k, monitor="val_acc", save_best_only=True,
save_weights_only=True, verbose=1)
# tensorboard_2k = TensorBoard(log_dir=tb_dir_2k+"/logs", histogram_freq=5, batch_size=batch_size, write_graph=False,
# write_images=True, write_grads=True)
callbacks_2k = [lr_reducer, model_checkpoint_2k]
try:
if augment == 'true':
print("Training with data augmentation...")
model2k.fit_generator(generator.flow(trainX, Y_train, batch_size=batch_size),
steps_per_epoch=len(trainX) // batch_size, epochs=nb_epoch,
callbacks=callbacks_2k,
validation_data=(testX, Y_test),
validation_steps=testX.shape[0] // batch_size, verbose=1)
else:
print("Training without data augmentation...")
model2k.fit(trainX, Y_train, batch_size=batch_size, epochs=nb_epoch, callbacks=callbacks_2k,
validation_data=(testX, Y_test), verbose=2)
except KeyboardInterrupt:
print("Training interrupted")
sys.exit(1)
yPreds_2k = model2k.predict(testX)
yPred_2k = np.argmax(yPreds_2k, axis=1)
yTrue = testY
accuracy_2k = metrics.accuracy_score(yTrue, yPred_2k) * 100
error_2k = 100 - accuracy_2k
print("2K Accuracy : ", accuracy_2k)
print("2K Error : ", error_2k)
del model2k
# 4K MODEL
model4k = densenet.DenseNet(img_dim, classes=nb_classes, depth=depth, nb_dense_block=nb_dense_block,
growth_rate=growth_rate, nb_filter=nb_filter, dropout_rate=dropout_rate, weights=None,
bottleneck=True, growth_rate_factor=4)
print("Building model 4k...")
model4k.summary()
optimizer = Adam(lr=1e-3) # Using Adam instead of SGD to speed up training
model4k.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])
print("Finished compiling")
# Load model
weights_file_4k = "weights/DenseNet-40-12-CIFAR10-4K.h5"
if os.path.exists(weights_file_4k):
# model.load_weights(weights_file, by_name=True)
print("Model loaded.")
out_dir = "weights/"
tb_dir_4k = "tensorboard/4k"
lr_reducer = ReduceLROnPlateau(monitor='val_acc', factor=np.sqrt(0.1),
cooldown=0, patience=5, min_lr=1e-5, verbose=1)
model_checkpoint_4k = ModelCheckpoint(weights_file_4k, monitor="val_acc", save_best_only=True,
save_weights_only=True, verbose=1)
# tensorboard_4k = TensorBoard(log_dir=tb_dir_4k+"/logs", histogram_freq=5, batch_size=batch_size, write_graph=False,
# write_images=True, write_grads=True)
callbacks_4k = [lr_reducer, model_checkpoint_4k]
try:
if augment == 'true':
print("Training with data augmentation...")
model4k.fit_generator(generator.flow(trainX, Y_train, batch_size=batch_size),
steps_per_epoch=len(trainX) // batch_size, epochs=nb_epoch,
callbacks=callbacks_4k,
validation_data=(testX, Y_test),
validation_steps=testX.shape[0] // batch_size, verbose=1)
else:
print("Training without data augmentation...")
model4k.fit(trainX, Y_train, batch_size=batch_size, epochs=nb_epoch, callbacks=callbacks_4k,
validation_data=(testX, Y_test), verbose=2)
except KeyboardInterrupt:
print("Training interrupted")
sys.exit(1)
yPreds_4k = model4k.predict(testX)
yPred_4k = np.argmax(yPreds_4k, axis=1)
yTrue = testY
accuracy_4k = metrics.accuracy_score(yTrue, yPred_4k) * 100
error_4k = 100 - accuracy_4k
print("4K Accuracy : ", accuracy_4k)
print("4K Error : ", error_4k)
del model4k
# 6K MODEL
print("Building model 6k...")
model6k = densenet.DenseNet(img_dim, classes=nb_classes, depth=depth, nb_dense_block=nb_dense_block,
growth_rate=growth_rate, nb_filter=nb_filter, dropout_rate=dropout_rate, weights=None,
bottleneck=True, growth_rate_factor=6)
model6k.summary()
optimizer = Adam(lr=1e-3) # Using Adam instead of SGD to speed up training
model6k.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])
print("Finished compiling")
# Load model
weights_file_6k = "weights/DenseNet-40-12-CIFAR10-6K.h5"
if os.path.exists(weights_file_6k):
# model.load_weights(weights_file, by_name=True)
print("Model loaded.")
out_dir = "weights/"
tb_dir_6k = "tensorboard/6k"
model_checkpoint_6k = ModelCheckpoint(weights_file_6k, monitor="val_acc", save_best_only=True,
save_weights_only=True, verbose=1)
# tensorboard_6k = TensorBoard(log_dir=tb_dir_6k+"/logs", histogram_freq=5, batch_size=batch_size, write_graph=False,
# write_images=True, write_grads=True)
callbacks_6k = [lr_reducer, model_checkpoint_6k]
try:
if augment == 'true':
print("Training with data augmentation...")
model6k.fit_generator(generator.flow(trainX, Y_train, batch_size=batch_size),
steps_per_epoch=len(trainX) // batch_size, epochs=nb_epoch,
callbacks=callbacks_6k,
validation_data=(testX, Y_test),
validation_steps=testX.shape[0] // batch_size, verbose=1)
else:
print("Training without data augmentation...")
model6k.fit(trainX, Y_train, batch_size=batch_size, epochs=nb_epoch, callbacks=callbacks_6k,
validation_data=(testX, Y_test), verbose=2)
except KeyboardInterrupt:
print("Training interrupted")
sys.exit(1)
yPreds_6k = model6k.predict(testX)
yPred_6k = np.argmax(yPreds_6k, axis=1)
accuracy_6k = metrics.accuracy_score(yTrue, yPred_6k) * 100
error_6k = 100 - accuracy_6k
print("6K Accuracy : ", accuracy_6k)
print("6K Error : ", error_6k)
del model6k
# 8K MODEL
print("Building model 8k...")
model8k = densenet.DenseNet(img_dim, classes=nb_classes, depth=depth, nb_dense_block=nb_dense_block,
growth_rate=growth_rate, nb_filter=nb_filter, dropout_rate=dropout_rate, weights=None,
bottleneck=True, growth_rate_factor=8)
model8k.summary()
optimizer = Adam(lr=1e-3) # Using Adam instead of SGD to speed up training
model8k.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])
print("Finished compiling")
# Load model
weights_file_8k = "weights/DenseNet-40-12-CIFAR10-8K.h5"
if os.path.exists(weights_file_8k):
# model.load_weights(weights_file, by_name=True)
print("Model loaded.")
out_dir = "weights/"
tb_dir_8k = "tensorboard/8k"
model_checkpoint_8k = ModelCheckpoint(weights_file_8k, monitor="val_acc", save_best_only=True,
save_weights_only=True, verbose=1)
# tensorboard_8k = TensorBoard(log_dir=tb_dir_8k+"/logs", histogram_freq=5, batch_size=batch_size, write_graph=False,
# write_images=True, write_grads=True)
callbacks_8k = [lr_reducer, model_checkpoint_8k]
try:
if augment == 'true':
print("Training with data augmentation...")
model8k.fit_generator(generator.flow(trainX, Y_train, batch_size=batch_size),
steps_per_epoch=len(trainX) // batch_size, epochs=nb_epoch,
callbacks=callbacks_8k,
validation_data=(testX, Y_test),
validation_steps=testX.shape[0] // batch_size, verbose=1)
else:
print("Training without data augmentation...")
model8k.fit(trainX, Y_train, batch_size=batch_size, epochs=nb_epoch, callbacks=callbacks_8k,
validation_data=(testX, Y_test), verbose=2)
except KeyboardInterrupt:
print("Training interrupted")
sys.exit(1)
yPreds_8k = model8k.predict(testX)
yPred_8k = np.argmax(yPreds_8k, axis=1)
accuracy_8k = metrics.accuracy_score(yTrue, yPred_8k) * 100
error_8k = 100 - accuracy_8k
print("8K Accuracy : ", accuracy_8k)
print("8K Error : ", error_8k)
del model8k | nima200/densenet-ablation | src/cifar10/BNLGrowthRate/cifar10.py | cifar10.py | py | 10,546 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.backend.image_dim_ordering",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "sys.argv... |
1952095518 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import collections
from util import compute_aggreeings, AverageMeter, get_mask, mask_tokens
def eval(model, val_loader, a2v, args, test=False):
model.eval()
count = 0
metrics, counts = collections.defaultdict(int), collections.defaultdict(int)
with torch.no_grad():
if not args.mc:
model.module._compute_answer_embedding(a2v)
for i, batch in enumerate(val_loader):
answer_id, answer, video, question = (
batch["answer_id"],
batch["answer"],
batch["video"].cuda(),
batch["question"].cuda(),
)
video_len = batch["video_len"]
question_mask = (question > 0).float()
video_mask = get_mask(video_len, video.size(1)).cuda()
count += answer_id.size(0)
if not args.mc:
predicts = model(
video,
question,
text_mask=question_mask,
video_mask=video_mask,
)
topk = torch.topk(predicts, dim=1, k=10).indices.cpu()
if args.dataset != "ivqa":
answer_id_expanded = answer_id.view(-1, 1).expand_as(topk)
else:
answer_id = (answer_id / 2).clamp(max=1)
answer_id_expanded = answer_id
metrics = compute_aggreeings(
topk,
answer_id_expanded,
[1, 10],
["acc", "acc10"],
metrics,
ivqa=(args.dataset == "ivqa"),
)
else:
fusion_proj, answer_proj = model(
video,
question,
text_mask=question_mask,
video_mask=video_mask,
answer=answer.cuda(),
)
fusion_proj = fusion_proj.unsqueeze(2)
predicts = torch.bmm(answer_proj, fusion_proj).squeeze()
predicted = torch.max(predicts, dim=1).indices.cpu()
metrics["acc"] += (predicted == answer_id).sum().item()
step = "val" if not test else "test"
for k in metrics:
v = metrics[k] / count
logging.info(f"{step} {k}: {v:.2%}")
return metrics["acc"] / count
def train(model, train_loader, a2v, optimizer, criterion, scheduler, epoch, args):
model.train()
running_vqa_loss, running_acc, running_mlm_loss = (
AverageMeter(),
AverageMeter(),
AverageMeter(),
)
for i, batch in enumerate(train_loader):
answer_id, answer, video, question = (
batch["answer_id"],
batch["answer"],
batch["video"].cuda(),
batch["question"].cuda(),
)
video_len = batch["video_len"]
question_mask = (question > 0).float()
video_mask = (
get_mask(video_len, video.size(1)).cuda() if args.max_feats > 0 else None
)
N = answer_id.size(0)
if not args.mc:
model.module._compute_answer_embedding(a2v)
predicts = model(
video,
question,
text_mask=question_mask,
video_mask=video_mask,
)
else:
fusion_proj, answer_proj = model(
video,
question,
text_mask=question_mask,
video_mask=video_mask,
answer=answer.cuda(),
)
fusion_proj = fusion_proj.unsqueeze(2)
predicts = torch.bmm(answer_proj, fusion_proj).squeeze()
if args.dataset == "ivqa":
a = (answer_id / 2).clamp(max=1).cuda()
vqa_loss = criterion(predicts, a)
predicted = torch.max(predicts, dim=1).indices.cpu()
predicted = F.one_hot(predicted, num_classes=len(a2v))
running_acc.update((predicted * a.cpu()).sum().item() / N, N)
else:
vqa_loss = criterion(predicts, answer_id.cuda())
predicted = torch.max(predicts, dim=1).indices.cpu()
running_acc.update((predicted == answer_id).sum().item() / N, N)
if args.mlm_prob:
inputs = batch["question"]
inputs, labels = mask_tokens(
inputs, model.module.bert.bert_tokenizer, mlm_probability=0.15
)
mlm_loss = model(
video,
question=inputs.cuda(),
labels=labels.cuda(),
text_mask=question_mask,
video_mask=video_mask,
mode="mlm",
)
mlm_loss = mlm_loss.mean()
loss = mlm_loss + vqa_loss
else:
loss = vqa_loss
optimizer.zero_grad()
loss.backward()
if args.clip:
nn.utils.clip_grad_norm_(model.parameters(), max_norm=args.clip)
optimizer.step()
scheduler.step()
running_vqa_loss.update(vqa_loss.detach().cpu().item(), N)
if args.mlm_prob:
running_mlm_loss.update(mlm_loss.detach().cpu().item(), N)
if (i + 1) % (len(train_loader) // args.freq_display) == 0:
if args.mlm_prob:
logging.info(
f"Epoch {epoch + 1}, Epoch status: {float(i + 1) / len(train_loader):.4f}, Training VideoQA loss: "
f"{running_vqa_loss.avg:.4f}, Training acc: {running_acc.avg:.2%}, Training MLM loss: {running_mlm_loss.avg:.4f}"
)
else:
logging.info(
f"Epoch {epoch + 1}, Epoch status: {float(i + 1) / len(train_loader):.4f}, Training VideoQA loss: "
f"{running_vqa_loss.avg:.4f}, Training acc: {running_acc.avg:.2%}"
)
running_acc.reset()
running_vqa_loss.reset()
running_mlm_loss.reset()
| antoyang/just-ask | train/train_videoqa.py | train_videoqa.py | py | 6,219 | python | en | code | 103 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "util.get_mask",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.topk",
... |
6759682425 | import json
class Kasir:
stokBarang = {}
barangPembeli = {}
def __init__(self):
pass
def simpanBeliJson(self):
with open("beli.json", "w") as beliFileJson:
json.dump(Kasir.barangPembeli, beliFileJson)
def isiStok(self):
with open("stok.json", "r") as stokReadJson:
stokDict = json.load(stokReadJson)
for key, value in stokDict.items():
print("Barang :", key, "Harga:", value)
def isiKranjang(self):
print("Isi Belanjaan Anda: ")
for key, value in Kasir.barangPembeli.items():
print("BARANG \t:", key)
for key2 in value:
print(key2.upper(), "\t:", value[key2])
def tambahStok(self):
stokTrue = input("Apa Anda Belum Punya Database Stock?(y/t) ")
keyStok = input("Masukkan Barang: ")
valueStok = int(input("Masukkan Harga: "))
if stokTrue.lower() == "y":
Kasir.stokBarang.update({keyStok: valueStok})
with open("stok.json", "w") as stokFileJson:
json.dump(Kasir.stokBarang, stokFileJson)
print("=== Database Stock Sudah diBuat!!!===")
else:
with open("stok.json", "r") as stokReadJson:
stokDict = json.load(stokReadJson)
if keyStok.lower() in stokDict.keys():
print("Barang Sudah Ada!!! \nTambahkan Barang Lain!!!")
Kasir.tambahStok(self)
else:
stokDict.update({keyStok: valueStok})
with open("stok.json", "w") as stokFileJson:
json.dump(stokDict, stokFileJson)
print("=== Stock Berhasil Ditambahkan :) ===")
def editStok(self):
keyStokEdit = input("Masukkan Barang yang Dirubah: ")
valueStokEdit = int(input("Masukkan Harga: "))
with open("stok.json", "r") as stokReadJson:
stokDict = json.load(stokReadJson)
if keyStokEdit.lower() in stokDict.keys():
stokDict.update({keyStokEdit: valueStokEdit})
with open("stok.json", "w") as stokFileJson:
json.dump(stokDict, stokFileJson)
print("=== Stock Berhasil DiRubah :) ===")
else:
print("===Stock Barang Tidak Ditemukan!!!===")
print("===Edit Barang Yang Ada===!!!")
Kasir.editStok(self)
def hapusStok(self):
keyStockHapus = input("Masukkan Nama Barang yang Dihapus: ")
with open("stok.json", "r") as stokReadJson:
stokDict = json.load(stokReadJson)
if keyStockHapus.lower() in stokDict.keys():
del stokDict[keyStockHapus]
with open("stok.json", "w") as stokFileJson:
json.dump(stokDict, stokFileJson)
print("=== Stock Berhasil Dihapus :) ===")
else:
print("===Stock Barang Tidak Ditemukan!!!===")
print("===Hapus Barang Yang Ada===!!!")
Kasir.hapusStok(self)
def beliStock(self, keyStokBeli, jmlhStokBeli):
with open("stok.json", "r") as stokReadJson:
stokDict = json.load(stokReadJson)
Kasir.barangPembeli.update(
{keyStokBeli: {"jumlah": jmlhStokBeli, "harga": stokDict[keyStokBeli]}})
print("===Belanja Ditambahkan Kekranjang===")
Kasir.simpanBeliJson(self)
def totalIsiKranjang(self):
totalHarga = 0
hargaKali = 0
with open("beli.json", "r") as beliJson:
stokDict = json.load(beliJson)
for key, value in stokDict.items():
harga = float(value["harga"])
jumlah = int(value["jumlah"])
hargaKali = harga * jumlah
print(f"{key} \t: {harga} * {jumlah} = {hargaKali}")
totalHarga += hargaKali
print("Harga Total Barang: ", totalHarga)
# testing fungstion
def testTotalIsiKranjang(self):
totalHarga = 0
hargaKali = 0
with open("beli.json", "r") as beliJson:
stokDict = json.load(beliJson)
for key, value in stokDict.items():
harga = float(value["harga"])
jumlah = int(value["jumlah"])
hargaKali = harga * jumlah
print(f"{key} \t: {harga} * {jumlah} = {hargaKali}")
totalHarga += hargaKali
return totalHarga
def testIsiKranjang(self, cariVaribel):
with open("beli.json", "r") as beliJson:
stokDict = json.load(beliJson)
if cariVaribel.lower() in stokDict.keys():
return True
else:
return False
def testIsiStok(self, cariVaribel):
with open("stok.json", "r") as stokReadJson:
stokDict = json.load(stokReadJson)
if cariVaribel.lower() in stokDict.keys():
return True
else:
return False
| TakayumAja/ipplUasSem5 | Kasir.py | Kasir.py | py | 5,060 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.dump",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 40,
... |
33198791197 | from datetime import datetime
from django.core.paginator import Paginator
from django.db.models import Q
from django.http import JsonResponse
from django.core.serializers import serialize
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView
from .models import MatsterApplication
from .forms import MasterForm
from Appartament.models import Appartament
from House.models import House
from User.models import User
# Create your views here.
class MasterList(ListView):
model = MatsterApplication
template_name = 'Master_application/master_list.html'
queryset = MatsterApplication.objects.all()
def get_context_data(self, *, object_list=None, **kwargs):
context = super(MasterList, self).get_context_data(**kwargs)
context['owners'] = User.objects.all()
context['masters'] = User.objects.filter(role__user__isnull=False).distinct()
return context
class MasterCreate(CreateView):
form_class = MasterForm
template_name = 'Master_application/master_create.html'
success_url = reverse_lazy('master_list')
def get_context_data(self, **kwargs):
context = super(MasterCreate, self).get_context_data(**kwargs)
return context
class MasterUpdate(UpdateView):
form_class = MasterForm
model = MatsterApplication
template_name = 'Master_application/master_update.html'
success_url = reverse_lazy('master_list')
def get_context_data(self, **kwargs):
context = super(MasterUpdate, self).get_context_data(**kwargs)
return context
def form_valid(self, form):
return super().form_valid(form=form)
class MasterDetail(DetailView):
model = MatsterApplication
template_name = 'Master_application/master_detail.html'
class MasterDelete(DeleteView):
model = MatsterApplication
success_url = reverse_lazy('master_list')
def get(self, request, *args, **kwags):
return self.delete(self, request, *args, *kwags)
def filter_master_appartament(request):
print(request.GET.get('owner_id'))
if request.GET.get('owner_id') != '' or request.GET.get('owner_id') is not None:
appartaments = serialize('json', Appartament.objects.filter(owner_id=request.GET.get('owner_id')))
houses = serialize('json', House.objects.all())
return JsonResponse({'appartaments': appartaments, 'houses': houses}, status=200)
def master_appartament_list(request):
masters = MatsterApplication.objects.all()
#Search
search_master_num = request.GET.get('master_num')
search_master_time = request.GET.get('master_time')
search_master_type = request.GET.get('master_type')
search_master_description = request.GET.get('master_description')
search_master_flat = request.GET.get('master_flat')
search_master_owner = request.GET.get('master_owner')
search_master_phone = request.GET.get('master_phone')
search_master_name = request.GET.get('master_name')
search_master_status = request.GET.get('master_status')
query = Q()
if search_master_num:
query &= Q(id__icontains=search_master_num)
if search_master_time:
start = datetime.strptime(search_master_time[0:10], '%d.%m.%Y').date()
end = datetime.strptime(search_master_time[13:23], '%d.%m.%Y').date()
print()
print('date:', start, '+', end)
print()
query &= Q(date_master__gte=start.strftime('%Y-%m-%d'), date_master__lte=end.strftime('%Y-%m-%d'))
if search_master_type:
query &= Q(typeMaster__name=search_master_type)
if search_master_description:
query &= Q(description_problem__icontains=search_master_description)
if search_master_flat:
query &= Q(appartament__number_appartament__icontains=search_master_flat)
if search_master_owner:
query &= Q(ownerAppartament=search_master_owner)
if search_master_phone:
query &= Q(name_master__phone_number__icontains=search_master_phone)
if search_master_name:
query &= Q(name_master_id=search_master_name)
if search_master_status:
query &= Q(status=search_master_status)
masters = masters.filter(query)
start = int(request.GET.get('start', 0))
length = int(request.GET.get('length', 10))
paginator = Paginator(masters, length)
masters = paginator.get_page(start // length + 1)
data = []
for master in masters:
data.append({
'id': master.id,
'time': f'{master.date_master.strftime("%d.%m.%Y")}-{master.time_master.strftime("%H:%M")}',
'type': master.typeMaster.name,
'description': master.description_problem,
'flat': master.appartament.number_appartament,
'owner': master.ownerAppartament.get_full_name(),
'phone': master.name_master.phone_number,
'master': master.name_master.get_full_name(),
'status': master.status
})
response = {
'draw': request.GET.get('draw'),
'recordsTotal': MatsterApplication.objects.all().count(),
'recordsFiltered': masters.paginator.count,
'data': data
}
return JsonResponse(response) | Sampleeeees/MyHouse24 | Master_application/views.py | views.py | py | 5,264 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "models.MatsterApplication",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "models.MatsterApplication.objects.all",
"line_number": 20,
"usage_type": "call"
... |
41244035127 | import glob
import argparse
import re
import os.path
import shutil
import sys
import tempfile
import itertools
import json
VALID_PROTOCOLS = ['git', 'ssh', 'http', 'https', 'ssh-colon', 'file', 'relative']
VERSION = '0.2.0'
parser = argparse.ArgumentParser(
description = 'Recursively search directory for .git/config files and update all remote URIs',
formatter_class = argparse.RawDescriptionHelpFormatter,
epilog = '''
%(prog)s expects a JSON file containing search paramaters and a substitution
dictionary. A simple example:
{
"search": {
"hostname": "oldgit|oldgit.example.org",
"path": "/+(?:var|srv)/+git"
},
"replace": {
"hostname": "newgit.example.com",
"username": "git",
"protocol": "ssh-colon",
"substitutions": {
"oldproject1": "new/path/project1",
"oldproject2": "oldproject2"
}
}
}
With this file search for .git/config files recursively and replace all
occurrences matching the provided search rules and replace the URLs.
.gitmodules can also be modified.
(Note that search.hostname and search.path are regular expressions, which have
to be escaped to be valid JSON. These expressions are part of a bigger
expression and must not contain anchors like '^' and '$'.)
%(prog)s provides several ways to test your rules before changing your files.
In all cases %(prog)s expects a JSON file as the first argument and one or
several files or directories. The files are expected to be .git/config files
(and/or .gitmodules files if --modules is given) and/or directories that will
be searched recursively for mentioned files.
%(prog)s --list-configs
Simply list all the .git/config files found (with matching URIs inside)
%(prog)s --list-projects
List all the project names matching the search criteria. If the project does
not have a substitution rule, a warning will be issued.
%(prog)s --list-projects --list-categorised --show-new-path
List all found projects, indented by config file name, along with the new
name it will be substituted with (not the full URI, but the new path).
%(prog)s --dry-run
Print the contents of all the files that would have been modified.
The resulting URI must be specified with hostname, username and protocol. All
of these can be overridden by the command line. When using the protocol
'relative' the hostname may be of the form '../..' and username is ignored.
TIP: Before changing .gitmodules files, it might be a good idea to run %(prog)s
--list-config --modules-only to get a list of the modified projects. This can
be handy for an automated script running 'git commit' of the changes later:
%(prog)s --list-config config.json --modules-only my_path
This is done by the helper script 'list_affected_gitmodules.sh'. Another
script, commit_gitmodules_changes.sh, will add and commit all affected
.gitmodules files and push them to the remote 'origin'. The complete list of
actions for renaming all URIs in local .git/configs as well as updating all
submodules and committing and pushing the changes are as follows:
# Do a dry run first:
%(prog)s --modules --dry-run config.json my_paths
# Store a list of the projects that will be changed and needs to be commited:
/usr/share/doc/%(prog)s/scripts/list_affected_gitmodules.sh config.json my_paths > modified_projects
# Replace URIs:
%(prog)s --modules config.json my_paths
# Add, commit and push changes to submodule URIs:
/usr/share/doc/%(prog)s/scripts/commit_gitmodules_changes.sh modified_projects
'''
)
parser.add_argument('--list-configs', action = 'store_true', help = 'List all matching .git/config files and do nothing else')
parser.add_argument('--list-projects', action = 'store_true', help = 'List all projects found in all found .git/config files and do nothing else')
parser.add_argument('--show-new-path', const = ' → ', default = None, nargs = '?', help = 'When listing projects, also show their new paths, separated by a delimiter (default " → ")')
parser.add_argument('--list-categorised', action = 'store_true', help = 'When listing projects, print them indented under the config file they are found in')
parser.add_argument('--dry-run', '-d', action = 'store_true', help = 'Instead of substituting paths just print what would have been done (same as --list-projects --show-new-path --list-categorised')
parser.add_argument('--protocol', '-p', choices = VALID_PROTOCOLS, help = 'Protocol to use in the new URI, default ssh (overrides config)')
parser.add_argument('--username', '-u', help = 'Username to use in the new URI (overrides config)')
parser.add_argument('--modules', '-m', action = 'store_true', help = 'Replace/inspect URIs in .gitmodules files as well')
parser.add_argument('--modules-only', action = 'store_true', help = 'Replace/inspect URIs in .gitmodules files only')
parser.add_argument('--hostname', help = 'Hostname in the new URI (overrides config)')
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
parser.add_argument('config', help = 'JSON configuration file')
parser.add_argument('targets', nargs = '+', metavar = 'file|directory', help = 'One or several directories containing git projects, or one or several git project files, to be updated')
def printIfMatching(gitConfigFile):
with open(gitConfigFile, 'r') as config:
if regex.search(config.read()):
print(gitConfigFile)
def listProjects(gitConfigFile, subst, categorise = False, showNew = None):
if categorise:
print(gitConfigFile, ':', sep='')
with open(gitConfigFile, 'r') as config:
for match in regex.finditer(config.read()):
project = match.group('project')
try:
newName = subst[project]
except KeyError :
print('WARNING: The project "{0}" does not have a defined new path'.format(project), file = sys.stderr)
continue
if categorise:
print('\t', end='')
if showNew is not None:
print('{0}{1}{2}'.format(project, showNew, newName))
else:
print(match.group('project'))
def replace(gitConfigFile, subst, host, proto, username = None, dryRun = False):
if dryRun:
print('In {0} the following would be the new contents:'.format(gitConfigFile))
(_, tempFile) = tempfile.mkstemp()
try:
with open(gitConfigFile, 'r') as configOrig, open(tempFile, 'w') as temp:
def replace(match):
project = match.group('project')
try:
newName = subst[project]
except KeyError :
print('WARNING: The project "{0}" does not have a defined new path and will not be replaced'.format(project), file = sys.stderr)
if not dryRun:
temp.write(match.string)
return match.string
user = username + '@' if username is not None else ''
if proto == 'ssh-colon':
uri = '{user}{host}:{path}'.format(
user = user,
host = host,
path = newName
)
elif proto == 'relative':
uri = '{host}/{path}'.format(
host = host,
path = newName
)
else:
uri = '{proto}://{user}{host}/{path}'.format(
proto = proto,
user = user,
host = host,
path = newName
)
return match.group('key') + uri
result = regex.sub(lambda match: replace(match), configOrig.read())
if dryRun:
print(result)
else:
temp.write(result)
except IOError as e:
print('Failed to replace config / creating temporary file {}: {}'.format(e.filename, e.strerror), file = sys.stderr)
try:
os.remove(tempFile)
except IOError:
pass
else:
if not dryRun:
shutil.move(tempFile, gitConfigFile)
def confFiles(path, includeGitModules = False, gitModulesOnly = False):
if gitModulesOnly:
return glob.iglob(target + '/**/.gitmodules', recursive = True)
elif includeGitModules:
return itertools.chain(
glob.iglob(target + '/**/.git/config', recursive = True),
glob.iglob(target + '/**/.git/modules/**/config', recursive = True),
glob.iglob(target + '/**/.gitmodules', recursive = True)
)
else:
return itertools.chain(
glob.iglob(target + '/**/.git/config', recursive = True),
glob.iglob(target + '/**/.git/modules/**/config', recursive = True)
)
def validateJSONConfig(json):
if 'protocol' in json['replace'] and json['replace']['protocol'] not in VALID_PROTOCOLS:
raise ValueError('replace/protocol "{}" is invalid (valid protocols: {})'
.format(json['replace']['protocol'], ', '.join(VALID_PROTOCOLS)))
try:
re.compile(json['search']['hostname'])
if 'path' in json['search']:
re.compile(json['search']['path'])
except re.error as e:
print('Config contains invalid regex "{}": {}'.format(e.pattern, e.msg), file = sys.stderr)
if __name__ == '__main__':
args = parser.parse_args()
with open(args.config) as confFile:
json = json.load(confFile)
validateJSONConfig(json)
if 'path' in json['search']:
# TODO: relative path is way to simplified:
regex = re.compile("""
^(?P<key>\s*url\s*=\s*)
(?:
# Either an URI:
(?:(file|ssh|git|http|https)://)? # protocol, optional
(?:[a-z_][a-z0-9_-]*[$]?@)?? # Linux username, followed by '@', optional
(?:{hosts}) # Accepted hostnames
:?(?:{paths}) # Common paths
| # or a relative path
\.\.)
/+(?P<project>[^\.\n]+) # project name
(?:\.git)?? # optionally followed by ".git"
\s*$""".format(
hosts = json['search']['hostname'],
paths = json['search']['path']), re.MULTILINE | re.VERBOSE)
else:
regex = re.compile("""
^(?P<key>\s*url\s*=\s*)
(?:(file|ssh|git|http|https)://)? # protocol, optional
(?:[a-z_][a-z0-9_-]*[$]?@)?? # Linux username, followed by '@', optional
(?:{hosts}) # Accepted hostnames
:?
(?P<project>[^\.\n]+) # project path/name
(?:\.git)?? # optionally followed by ".git"
\s*$""".format(hosts = json['search']['hostname']), re.MULTILINE | re.VERBOSE)
for target in args.targets:
if os.path.isdir(target):
for config in confFiles(target, args.modules, args.modules_only):
if args.list_configs:
printIfMatching(config)
elif args.list_projects:
listProjects(
config,
subst = json['replace']['substitutions'],
categorise = args.list_categorised,
showNew = args.show_new_path
)
else:
replace(
config,
subst = json['replace']['substitutions'],
host = args.hostname if args.hostname else json['replace']['hostname'],
proto = args.protocol if args.protocol else json['replace']['protocol'],
username = args.username if args.username else json['replace'].get('username'),
dryRun = args.dry_run
)
else:
if args.list_configs:
printIfMatching(target)
elif args.list_projects:
listProjects(
target,
subst = json['replace']['substitutions'],
categorise = args.list_categorised,
showNew = args.show_new_path
)
else:
replace(
target,
subst = json['replace']['substitutions'],
host = args.hostname if args.hostname else json['replace']['hostname'],
proto = args.protocol if args.protocol else json['replace']['protocol'],
username = args.username if args.username else json['replace'].get('username'),
dryRun = args.dry_run
)
| misje/git-rename-uri | git-rename-uri.py | git-rename-uri.py | py | 13,138 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 118,
"usage_type": "attribute"
},
{
... |
17335067722 | # -*- coding: utf-8 -*-
__doc__ == """
Simple view that exposes a REST API over HTTP
to retrieve the least of last read books from
a bookshelf.
To run it:
.. code-block:: python
$ python last_read.py --topic mytopic --broker <BROKER_ADDR>:9092 --port 8080 --name lastread --id lastread1 --tags book last
This will listen for HTTP request on `127.0.0.1:8080`
and will return a JSON encoded list of book
documents.
The service will be automatically registered to the local consul
agent (assuming running localhost:8500) using the
name, id and tags provided
When the process is terminated, the service is deregistered
automatically.
"""
import asyncio
import json
import signal
from aiohttp import web
from discolib import register_service, deregister_service
from eventlib import consume_events, stop_consuming_events
from restlib import webserver, route_to_resource
from utils import get_cli_parser
# internal state
bookshelf = []
loop = asyncio.get_event_loop()
async def bookshelf_view(request):
"""
View to see the current list of books
in your bookshelf.
"""
return web.Response(body=json.dumps(bookshelf).encode('utf-8'),
headers={"content-type": "application/json"})
async def event_handler(message):
"""
Called whenever a new event was received from
the event store.
Simply store the event in a local state arena.
"""
bookshelf.append(message.value.decode('utf-8'))
def run():
"""
Entry point to this microservice.
"""
args = get_cli_parser().parse_args()
# schedule the internal event consumer
# that will run until we terminate this service
asyncio.ensure_future(consume_events(topic=args.topic.encode('utf-8'),
group=args.group,
addr=args.broker,
callback=event_handler))
# let's start the REST server that will
# serve the view's resource
srv = loop.run_until_complete(webserver(args.addr, args.port))
loop.run_until_complete(route_to_resource(bookshelf_view))
# finally, let's advertize this service
# to the rest of the world
svc = loop.run_until_complete(register_service(
id=args.id,
name=args.name,
port=args.port,
address=args.addr,
tags=args.tags
)
)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(deregister_service(svc))
loop.run_until_complete(stop_consuming_events())
srv.close()
loop.run_until_complete(srv.wait_closed())
# give the time for remaining requests to complete
loop.run_until_complete(asyncio.sleep(2))
loop.close()
if __name__ == '__main__': # pragma: no cover
run()
| antifragilesoftware/sandbox | kafka/microservices/last_read.py | last_read.py | py | 2,901 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "asyncio.get_event_loop",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "aiohttp.web.Response",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "json.dumps",
... |
20165127873 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.dashboard, name='dashboard'),
url(r'^add_mentor$', views.add_mentor, name='add_mentor'),
url(r'^add_investor$', views.add_investor, name='add_investor'),
url(r'^show_startups$', views.show_startups, name='show_startups'),
url(r'^show_investors$', views.show_investors, name='show_investors'),
url(r'^show_mentors$', views.show_mentors, name='show_mentors'),
#url(r'^test$', views.test),
url(r'^upload_documents$', views.upload_documents, name='upload_documents'),
url(r'^update_info$', views.update_info, name='update_info'),
url(r'^show_tickets$', views.show_tickets, name='show_tickets'),
url(r'^show_incubation$', views.show_incubation, name='show_incunbation'),
url(r'^accept_fund/(?P<pk>[0-9]+)$',views.accept_fund,name='accept_fund'),
url(r'^reject_fund/(?P<pk>[0-9]+)$',views.reject_fund,name='reject_fund'),
url(r'^accept_incubation/(?P<pk>[0-9]+)$',views.accept_incubation,name='accept_incubation'),
url(r'^reject_incubation/(?P<pk>[0-9]+)$',views.reject_incubation,name='reject_incubation'),
url(r'^show_ticket/(?P<pk>[0-9]+)$',views.show_ticket,name='show_ticket'),
url(r'^solve_ticket/(?P<pk>[0-9]+)$',views.solve_ticket,name='solve_ticket'),
url(r'^assign_mentor$', views.assign_mentor, name='assign_mentor'),
url(r'^reviews$', views.reviews, name='reviews'),
url(r'^set_milestone/(?P<pk>[0-9]+)$',views.set_milestone,name='set_milestone'),
url(r'^show_milestone/(?P<pk>[0-9]+)$',views.show_milestone,name='show_milestone'),
url(r'^show_funded_startups$', views.show_funded_startups, name='show_funded_startups'),
url(r'^complete_milestone/(?P<pk>[0-9]+)$',views.complete_milestone,name='complete_milestone'),
] | ramgopal18998/incubator-management-system | Startup_incubator/administrator/urls.py | urls.py | py | 1,817 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.co... |
43220039182 | import yaml
from modulemd.components import ModuleComponents
from modulemd.content import ModuleContent
from modulemd.rpms import ModuleRPMs
from modulemd.profile import ModuleProfile
supported_mdversions = ( 0, )
class ModuleMetadata(object):
"""Class representing the whole module."""
REPODATA_FILENAME = "modulemd"
def __init__(self):
"""Creates a new ModuleMetadata instance."""
self.mdversion = max(supported_mdversions)
self.name = ""
self.version = ""
self.release = ""
self.summary = ""
self.description = ""
self.module_licenses = set()
self.content_licenses = set()
self.buildrequires = dict()
self.requires = dict()
self.community = ""
self.documentation = ""
self.tracker = ""
self.xmd = dict()
self.profiles = dict()
self.components = None
def load(self, f):
"""Loads a metadata file into the instance.
:param str f: File name to load
"""
with open(f, "r") as infile:
data = infile.read()
self.loads(data)
def loads(self, s):
"""Loads metadata from a string.
:param str s: Raw metadata in YAML
:raises ValueError: If the metadata is invalid or unsupported.
"""
yml = yaml.safe_load(s)
if "document" not in yml or yml["document"] != "modulemd":
raise ValueError("The supplied data isn't a valid modulemd document")
if "version" not in yml:
raise ValueError("version is required")
if yml["version"] not in supported_mdversions:
raise ValueError("The supplied metadata version isn't supported")
self.mdversion = yml["version"]
if "data" not in yml or not isinstance(yml["data"], dict):
return
if "name" in yml["data"]:
self.name = yml["data"]["name"]
if "version" in yml["data"]:
self.version = yml["data"]["version"]
if "release" in yml["data"]:
self.release = yml["data"]["release"]
if "summary" in yml["data"]:
self.summary = yml["data"]["summary"]
if "description" in yml["data"]:
self.description = str(yml["data"]["description"]).strip()
if ("license" in yml["data"]
and isinstance(yml["data"]["license"], dict)
and "module" in yml["data"]["license"]
and yml["data"]["license"]["module"]):
self.module_licenses = set(yml["data"]["license"]["module"])
if ("license" in yml["data"]
and isinstance(yml["data"]["license"], dict)
and "content" in yml["data"]["license"]):
self.content_licenses = set(yml["data"]["license"]["content"])
if ("dependencies" in yml["data"]
and isinstance(yml["data"]["dependencies"], dict)):
if ("buildrequires" in yml["data"]["dependencies"]
and yml["data"]["dependencies"]["buildrequires"]):
self.buildrequires = yml["data"]["dependencies"]["buildrequires"]
if ("requires" in yml["data"]["dependencies"]
and yml["data"]["dependencies"]["requires"]):
self.requires = yml["data"]["dependencies"]["requires"]
if "references" in yml["data"] and yml["data"]["references"]:
if "community" in yml["data"]["references"]:
self.community = yml["data"]["references"]["community"]
if "documentation" in yml["data"]["references"]:
self.documentation = yml["data"]["references"]["documentation"]
if "tracker" in yml["data"]["references"]:
self.tracker = yml["data"]["references"]["tracker"]
if "xmd" in yml["data"]:
self.xmd = yml["data"]["xmd"]
if ("profiles" in yml["data"]
and isinstance(yml["data"]["profiles"], dict)):
for profile in yml["data"]["profiles"].keys():
self.profiles[profile] = ModuleProfile()
if "description" in yml["data"]["profiles"][profile]:
self.profiles[profile].description = \
str(yml["data"]["profiles"][profile]["description"])
if "rpms" in yml["data"]["profiles"][profile]:
self.profiles[profile].rpms = \
set(yml["data"]["profiles"][profile]["rpms"])
if ("components" in yml["data"]
and isinstance(yml["data"]["components"], dict)):
self.components = ModuleComponents()
if "rpms" in yml["data"]["components"]:
self.components.rpms = ModuleRPMs()
if "dependencies" in yml["data"]["components"]["rpms"]:
self.components.rpms.dependencies = \
yml["data"]["components"]["rpms"]["dependencies"]
if "api" in yml["data"]["components"]["rpms"]:
self.components.rpms.api = \
set(yml["data"]["components"]["rpms"]["api"])
if "packages" in yml["data"]["components"]["rpms"]:
for p, e in yml["data"]["components"]["rpms"]["packages"].items():
extras = dict()
extras["rationale"] = e["rationale"]
if "repository" in e:
extras["repository"] = e["repository"]
if "cache" in e:
extras["cache"] = e["cache"]
if "commit" in e:
extras["commit"] = e["commit"]
if "arches" in e:
extras["arches"] = e["arches"]
if "multilib" in e:
extras["multilib"] = e["multilib"]
self.components.rpms.add_package(p, **extras)
if "filter" in yml["data"]["components"]["rpms"]:
self.components.rpms.filter = \
set(yml["data"]["components"]["rpms"]["filter"])
def dump(self, f):
"""Dumps the metadata into the supplied file.
:param str f: File name of the destination
"""
data = self.dumps()
with open(f, "w") as outfile:
outfile.write(data)
def dumps(self):
"""Dumps te metadata into a string.
:rtype: str
:raises Exception: If metadata validation fails
"""
if not self.validate:
raise Exception("Metadata validation failed")
data = dict()
# header
data["document"] = "modulemd"
data["version"] = self.mdversion
# data
data["data"] = dict()
data["data"]["name"] = self.name
data["data"]["version"] = self.version
data["data"]["release"] = self.release
data["data"]["summary"] = self.summary
data["data"]["description"] = self.description
data["data"]["license"] = dict()
data["data"]["license"]["module"] = list(self.module_licenses)
if self.content_licenses:
data["data"]["license"]["content"] = list(self.content_licenses)
if self.buildrequires or self.requires:
data["data"]["dependencies"] = dict()
if self.buildrequires:
data["data"]["dependencies"]["buildrequires"] = self.buildrequires
if self.requires:
data["data"]["dependencies"]["requires"] = self.requires
if self.community or self.documentation or self.tracker:
data["data"]["references"] = dict()
if self.community:
data["data"]["references"]["community"] = self.community
if self.documentation:
data["data"]["references"]["documentation"] = self.documentation
if self.tracker:
data["data"]["references"]["tracker"] = self.tracker
if self.xmd:
data["data"]["xmd"] = self.xmd
if self.profiles:
data["data"]["profiles"] = dict()
for profile in self.profiles.keys():
if self.profiles[profile].description:
if profile not in data["data"]["profiles"]:
data["data"]["profiles"][profile] = dict()
data["data"]["profiles"][profile]["description"] = \
str(self.profiles[profile].description)
if self.profiles[profile].rpms:
if profile not in data["data"]["profiles"]:
data["data"]["profiles"][profile] = dict()
data["data"]["profiles"][profile]["rpms"] = \
list(self.profiles[profile].rpms)
if self.components:
data["data"]["components"] = dict()
if self.components.rpms:
data["data"]["components"]["rpms"] = dict()
data["data"]["components"]["rpms"]["dependencies"] = \
self.components.rpms.dependencies
data["data"]["components"]["rpms"]["api"] = \
list(self.components.rpms.api)
if self.components.rpms.packages:
data["data"]["components"]["rpms"]["packages"] = dict()
for p, e in self.components.rpms.packages.items():
extra = dict()
extra["rationale"] = e["rationale"]
if "commit" in e:
extra["commit"] = e["commit"]
if "repository" in e:
extra["repository"] = e["repository"]
if "cache" in e:
extra["cache"] = e["cache"]
if "arches" in e:
extra["arches"] = e["arches"]
if "multilib" in e:
extra["multilib"] = e["multilib"]
data["data"]["components"]["rpms"]["packages"][p] = \
extra
if self.components.rpms.filter:
data["data"]["components"]["rpms"]["filter"] = \
list(self.components.rpms.filter)
return yaml.safe_dump(data)
def validate(self):
"""Performs an in-depth validation of the metadata instance.
:rtype: bool
:raises TypeError: If properties are holding data of incorrect type
:raises ValueError: If properties are holding invalid data
"""
if not isinstance(self.mdversion, int):
raise TypeError("mdversion must be an integer")
if not isinstance(self.name, str):
raise TypeError("name must be a string")
if not isinstance(self.version, str):
raise TypeError("version must be a string")
if not isinstance(self.release, str):
raise TypeError("release must be a string")
if not isinstance(self.summary, str):
raise TypeError("summary must be a string")
if not isinstance(self.description, str):
raise TypeError("description must be a string")
if not isinstance(self.module_licenses, set):
raise TypeError("module_licenses must be a set")
for l in self.module_licenses:
if not isinstance(l, str):
raise TypeError("module_licenses must be a set of strings")
if not isinstance(self.content_licenses, set):
raise TypeError("content_licenses must be a set")
for l in self.content_licenses:
if not isinstance(l, str):
raise TypeError("content_licenses must be a set of strings")
if not isinstance(self.buildrequires, dict):
raise TypeError("buildrequires must be a dictionary")
for n, v in self.buildrequires.items():
if not isinstance(n, str) or not isinstance(v, str):
raise TypeError("buildrequires keys and values must be strings")
if not isinstance(self.requires, dict):
raise TypeError("requires must be a dictionary")
for n, v in self.requires.items():
if not isinstance(n, str) or not isinstance(v, str):
raise TypeError("requires keys and values must be strings")
if not isinstance(self.community, str):
raise TypeError("community must be a string")
if not isinstance(self.documentation, str):
raise TypeError("documentation must be a string")
if not isinstance(self.tracker, str):
raise TypeError("tracker must be a string")
if self.xmd is not None and not isinstance(self.xmd, dict):
raise TypeError(
"xmd must be a dictionary or null, current value: {!r}".format(self.xmd)
)
if not isinstance(self.profiles, dict):
raise TypeError("profiles must be a dictionary")
for p in self.profiles.keys():
if not isinstance(p, str):
raise TypeError("profiles keys must be strings")
if not isinstance(self.profiles[p], ModuleProfile):
raise TypeError("profiles values must be instances of ModuleProfile")
if not isinstance(self.profiles[p].description, str):
raise TypeError("profile description must be string")
if not isinstance(self.profiles[p].rpms, set):
raise TypeError("profile rpms must be sets")
for ps in self.profiles[p].rpms:
if not isinstance(ps, str):
raise TypeError("profile rpms must be sets of strings")
if not isinstance(self.components, ModuleComponents) \
and self.components is not None:
raise TypeError("components must be an instance of ModuleComponents")
if self.components:
if self.components.rpms:
if not isinstance(self.components.rpms, ModuleRPMs):
raise TypeError("rpms must be an instance of ModuleRPMs")
if not isinstance(self.components.rpms.dependencies, bool):
raise TypeError("rpms.dependencies must be a boolean")
if not isinstance(self.components.rpms.api, set):
raise TypeError("rpms.api must be a set")
for a in self.components.rpms.api:
if not isinstance(a, str):
raise TypeError("rpms.api must be a set of strings")
if not isinstance(self.components.rpms.filter, set):
raise TypeError("rpms.filter must be a set")
for a in self.components.rpms.filter:
if not isinstance(a, str):
raise TypeError("rpms.filter must be a set of strings")
if self.components.rpms.packages:
if not isinstance(self.components.rpms.packages, dict):
raise TypeError("rpms.packages must be a dictionary")
for p, e in self.components.rpms.packages.items():
if not isinstance(p, str):
raise TypeError("rpms.packages keys must be strings")
if not isinstance(e, dict):
raise TypeError("rpms.packages values must dictionaries")
for k, v in e.items():
if not isinstance(k, str):
raise TypeError("rpms extras keys must be strings")
if k == "rationale" and v:
if not isinstance(v, str):
raise TypeError("rpms rationale must be a string")
if k == "commit" and v:
if not isinstance(v, str):
raise TypeError("rpms commit must be a string")
if k == "repository" and v:
if not isinstance(v, str):
raise TypeError("rpms repository must be a string")
if k == "cache" and v:
if not isinstance(v, str):
raise TypeError("rpms cache must be a string")
if k == "arches" and v:
if not isinstance(v, list):
raise TypeError("rpms arches must be a list")
for s in v:
if not isinstance(s, str):
raise TypeError("arches must be a list of strings")
if k == "multilib" and v:
if not isinstance(v, list):
raise TypeError("rpms multilib must be a list")
for s in v:
if not isinstance(s, str):
raise TypeError("multilib must be a list of strings")
if not self.name:
raise ValueError("name is required")
if not self.version:
raise ValueError("version is required")
if not self.release:
raise ValueError("release is required")
if not self.summary:
raise ValueError("summary is required")
if not self.description:
raise ValueError("description is required")
if not self.module_licenses:
raise ValueError("at least one module license is required")
if self.components:
if self.components.rpms:
for p, e in self.components.rpms.packages.items():
if "rationale" not in e:
raise ValueError(p, "has no rationale")
# TODO: Validate dependency version formats
return True
@property
def mdversion(self):
"""An int property representing the metadata format version used.
This is automatically set to the highest supported version for
new objects or set by the loaded document. This value can be
changed to one of the supported_mdversions to alter the output
format.
"""
return self._mdversion
@mdversion.setter
def mdversion(self, i):
if i not in supported_mdversions:
raise ValueError("Unsupported metadata version")
self._mdversion = int(i)
@property
def name(self):
"""A string property representing the name of the module."""
return self._name
@name.setter
def name(self, s):
self._name = str(s)
@property
def version(self):
"""A string property representing the version of the module."""
return self._version
@version.setter
def version(self, s):
self._version = str(s)
@property
def release(self):
"""A string property representing the release of the module."""
return self._release
@release.setter
def release(self, s):
self._release = str(s)
@property
def summary(self):
"""A string property representing a short summary of the module."""
return self._summary
@summary.setter
def summary(self, s):
self._summary = str(s)
@property
def description(self):
"""A string property representing a detailed description of the
module."""
return self._description
@description.setter
def description(self, s):
self._description = str(s)
@property
def module_licenses(self):
"""A set of strings, a property, representing the license terms
of the module itself."""
return self._module_licenses
@module_licenses.setter
def module_licenses(self, ss):
if not isinstance(ss, set):
raise TypeError("module_licenses requires a set")
self._module_licenses = set([str(x) for x in ss])
def add_module_license(self, s):
"""Adds a module license to the set.
:param str s: License name
"""
self._module_licenses.add(str(s))
def del_module_license(self, s):
"""Removes the supplied license from the module licenses set.
:param str s: License name
"""
self._module_licenses.discard(str(s))
def clear_module_licenses(self):
"""Clears the module licenses set."""
self._module_licenses.clear()
@property
def content_licenses(self):
"""A set of strings, a property, representing the license terms
of the module contents."""
return self._content_licenses
@content_licenses.setter
def content_licenses(self, ss):
if not isinstance(ss, set):
raise TypeError("content_licenses requires a set")
self._content_licenses = set([str(x) for x in ss])
def add_content_license(self, s):
"""Adds a content license to the set.
:param str s: License name
"""
self._content_licenses.add(str(s))
def del_content_license(self, s):
"""Removes the supplied license from the content licenses set.
:param str s: License name
"""
self._content_licenses.discard(str(s))
def clear_content_licenses(self):
"""Clears the content licenses set."""
self._content_licenses.clear()
@property
def requires(self):
"""A dictionary property representing the required dependencies of
the module.
Keys are the required module names (strings), values are their
mininum required versions (also strings).
"""
return self._requires
@requires.setter
def requires(self, d):
if d and not isinstance(d, dict):
raise TypeError("Incorrect data type passed for requires")
if d:
self._requires = { str(k) : str(v) for k, v in d.items() }
else:
self._requires = dict()
def add_requires(self, n, v):
"""Adds a required module dependency.
:param str n: Required module name
:param str v: Required module version
"""
self._requires[str(n)] = str(v)
update_requires = add_requires
def del_requires(self, n):
"""Deletes the dependency on the supplied module.
:param str n: Required module name
"""
if str(n) in self._requires:
del self._requires[str(n)]
def clear_requires(self):
"""Removes all required runtime dependencies."""
self._requires = dict()
@property
def buildrequires(self):
"""A dictionary property representing the required build dependencies
of the module.
Keys are the required module names (strings), values are their
minimum required versions (also strings).
"""
return self._buildrequires
@buildrequires.setter
def buildrequires(self, d):
if d and not isinstance(d, dict):
raise TypeError("Incorrect data type passed for buildrequires")
self._buildrequires = { str(k) : str(v) for k, v in d.items() }
def add_buildrequires(self, n, v):
"""Adds a module build dependency.
:param str n: Required module name
:param str v: Required module version
"""
self._buildrequires[str(n)] = str(v)
update_buildrequires = add_buildrequires
def del_buildrequires(self, n):
"""Deletes the build dependency on the supplied module.
:param str n: Required module name
"""
if str(n) in self._buildrequires:
del self._buildrequires[str(n)]
def clear_buildrequires(self):
"""Removes all build dependencies."""
self._buildrequires = dict()
@property
def community(self):
"""A string property representing a link to the upstream community
for this module."""
return self._community
@community.setter
def community(self, s):
self._community = str(s)
@property
def documentation(self):
"""A string property representing a link to the upstream
documentation for this module."""
return self._documentation
@documentation.setter
def documentation(self, s):
self._documentation = str(s)
@property
def tracker(self):
"""A string property representing a link to the upstream bug tracker
for this module."""
return self._tracker
@tracker.setter
def tracker(self, s):
self._tracker = str(s)
@property
def xmd(self):
"""A dictionary property containing user-defined data."""
return self._xmd
@xmd.setter
def xmd(self, d):
if d and not isinstance(d, dict):
raise TypeError("Incorrect data supplied for xmd")
self._xmd = d
@property
def profiles(self):
"""A dictionary property representing the module profiles."""
return self._profiles
@profiles.setter
def profiles(self, o):
if not isinstance(o, dict):
raise TypeError("Incorrect data types passed for profiles")
self._profiles = o
@property
def components(self):
"""A ModuleComponents instance property representing the components
defining the module."""
return self._components
@components.setter
def components(self, o):
if o and not isinstance(o, ModuleComponents):
raise TypeError("Incorrect data type passed for components")
self._components = o
| xsuchy/modulemd | modulemd/__init__.py | __init__.py | py | 25,774 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "yaml.safe_load",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "modulemd.profile.ModuleProfile",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "modulemd.components.ModuleComponents",
"line_number": 107,
"usage_type": "call"
},
{
... |
20828379656 | from users.models import MossaicUser
from risk_models.models import *
from projects.models import *
from communities.models import *
from django import forms
from django.forms.models import *
from django.forms import ModelForm, Textarea
from django.forms.widgets import HiddenInput
from django.forms.models import inlineformset_factory
from django.forms.models import modelformset_factory
from django.forms.models import BaseInlineFormSet
from django.forms import ModelForm, Textarea
from django.forms.widgets import HiddenInput
# this code is terrible.... will fix someday
class SurveyItem(ModelForm):
class Meta:
model = Observation
widgets = {
'community': HiddenInput,
'user': HiddenInput,
'timestamp': HiddenInput,
'metric': HiddenInput
}
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
return super(SurveyItem, self).__init__(*args, **kwargs)
def is_filled(self):
cleaned_data = self.cleaned_data
metric = cleaned_data.get("metric")
if metric.metricType == 'M':
mcValue = cleaned_data.get("mcValue")
if mcValue == None:
return False
return True
if metric.metricType == 'D':
value = cleaned_data.get("value")
if value == None:
return False
return True
return False
def clean(self):
cleaned_data = self.cleaned_data
metric = cleaned_data.get("metric")
if metric.metricType == 'M':
mcValue = cleaned_data.get("mcValue")
if mcValue == None:
raise forms.ValidationError('No choice made')
return cleaned_data
if metric.metricType == 'D':
value = cleaned_data.get("value")
if value == None:
raise forms.ValidationError('No choice made')
return cleaned_data
def save(self, *args, **kwargs):
kwargs['commit'] = False
obs = super(SurveyItem, self).save(*args, **kwargs)
if self.request:
obs.user = MossaicUser.objects.get(pk=self.request.user.id) #there must be a better way...
obs.save() | parauchf/mossaic | communities/forms.py | forms.py | py | 1,961 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.forms.widgets.HiddenInput",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.forms.widgets.HiddenInput",
"line_number": 25,
"usage_type": "name"
},
... |
13035732107 | from pyramid.httpexceptions import HTTPError
from pyramid.httpexceptions import HTTPNotFound
from oekocms.views import error_view
from oekocms.views import exception_decorator
from fanstatic import Library
from fanstatic import Resource
import kotti.static as ks
lib_oekocms = Library('oekocms', 'static')
view_css = Resource(lib_oekocms, "layout.css", depends=[ks.view_css])
edit_css = Resource(lib_oekocms, "edit.css", depends=[ks.edit_css])
nav_css = Resource(lib_oekocms, "primary-navigation.css")
def add_fanstatic_resources(config):
ks.view_needed.add(view_css)
ks.view_needed.add(nav_css)
ks.edit_needed.add(edit_css)
def includeme(config):
config.include('pyramid_zcml')
config.load_zcml('configure.zcml')
config.add_view(
error_view,
context=HTTPNotFound,
renderer='oekocms:templates/view/error-404.pt',
)
config.add_view(
error_view,
context=HTTPError,
renderer='oekocms:templates/view/error.pt',
)
config.add_view(
error_view,
decorator=exception_decorator,
context=Exception,
renderer='oekocms:templates/view/error.pt',
)
config.add_static_view('static-oekocms', 'oekocms:static')
config.override_asset('kotti', 'oekocms:kotti-overrides/')
add_fanstatic_resources(config) | chrneumann/oekocms | oekocms/__init__.py | __init__.py | py | 1,328 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fanstatic.Library",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "fanstatic.Resource",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "kotti.static.view_css",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "kotti... |
29250118675 | import sys
from collections import deque
dq = deque()
N, K = map(int, sys.stdin.readline().rstrip().split(' '))
for i in range(1, N + 1):
dq.append(i)
eleminated = []
while len(dq) != 0:
count = 1
while count < K:
dq.append(dq.popleft())
count += 1
eleminated.append(dq.popleft())
print('<', end='')
for e in range(len(eleminated)):
print(eleminated[e], end='')
if e < len(eleminated) - 1: print(',',end=' ')
print('>') | ho991217/Python | BOJ/Data Structure/Queue/[11866] Yosefus.py | [11866] Yosefus.py | py | 463 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "attribute"
}
] |
41321025697 | import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import pickle
from joblib import dump, load
from sklearn import ensemble
from sklearn import metrics
import numpy as np
def main():
GrBosstClass = load_GRB()
Bagging = load_Bagging()
data = loadSet()
defaulData = loadDefaultSet()
currentModelSelected = 0
vl15H = 0
speedWind = 0
cloydy3H = 0
vl9H = 0
rainToday = 1
sun = 0
tempAir15H = 0
dav15H = 0
minTemp = 0
maxTemp = 0
st.title("Модуль С")
st.markdown("<style> .big-font {font-size:30px !important; }</style>", unsafe_allow_html=True)
st.sidebar.markdown('<p class="big-font"> <b>Модели</b> </p>', unsafe_allow_html=True)
modelSelect = st.sidebar.radio("Выберите модель машинного обучения", ["Градиентный бустинг", "Бэггиннг"])
st.sidebar.markdown('<p class="big-font"> <b>Дата сеты</b> </p>', unsafe_allow_html=True)
dataSetSelect = st.sidebar.radio("Выберите дата сет", ["Исходный", "После препроцесса"])
if dataSetSelect == "После препроцесса":
'''В датасете после препроцесса остались только самые важные десять полей. Также перекодированы столбцы: локация, направление ветра и был ли сегодня дождь.'''
st.write(data.head(10))
if dataSetSelect == "Исходный":
''' Исходный датасет имеет полный список полей. Также в нём не заполнены пустые значения и ни один признак не перекодирован.'''
st.write(defaulData.head(10))
if modelSelect == "Градиентный бустинг":
currentModelSelected = GrBosstClass
"Предсказание градиентного бустинга"
"Градиентный бустинг предсказывает с точностью в 85%"
if modelSelect == "Бэггиннг":
currentModelSelected = Bagging
"Предсказание бэггиннга"
"Бэггинг предсказывает с точностью в 84%"
"Для предсказания дождя на завтрашний день исопльзуются основные 10 параметров."
""
inpType = st.radio("Как будете вводить данные?", ['Строкой', "Буду заполнять каждое поле отдельно!"])
if inpType == "Буду заполнять каждое поле отдельно!":
vl15H = st.number_input("Влажность воздуха в 15:00")
speedWind = st.number_input("Скорость порыва ветра")
cloydy3H = st.number_input("Облачность в 15:00 (от 0 до 9)")
vl9H = st.number_input("Влажность в 9 утра")
rainTodayStr = st.radio("Был ли сегодня дождь?", ["Да" , "Нет"])
rainToday = 1
if rainTodayStr =="Да":
rainToday = 1
else:
rainToday = 0
sun = st.number_input("Солнечный свет (от 0 до 13,9)")
tempAir15H = st.number_input("Температура воздуха в 15:00")
dav15H = st.number_input("Давление в 15:00")
minTemp = st.number_input("Минимальная температура воздуха")
maxTemp = st.number_input("Максимальная температура воздуха")
if inpType == "Строкой":
a = st.text_input('Ввести данные строкой:', help= "Ввдетие числа через запятую. Без точек и запятых в конце строки!")
if a:
a = a.split(',')
a = [float(i) for i in a]
polya = a
else:
polya = int(currentModelSelected.predict([[vl15H,speedWind,cloydy3H,vl9H,rainToday,sun,tempAir15H,dav15H,minTemp,maxTemp]]))
"Введенные данные: " + str(vl15H) + ", " + str(speedWind) + ", " + str(cloydy3H) + ", " + str(vl9H) + ", " +str(rainToday) + ", " + str(sun) + ", " + str(tempAir15H) + ", " + str(dav15H) + ", " + str(minTemp) + ", " + str(maxTemp) + "."
if st.button("Рассчитать прогноз", key=None, help="Предсказать будет ли дождь завтра при помощи метода " + modelSelect):
res = polya
if res == 0:
st.markdown("<p style=\"font-size:30px\">Скорее всего завтра не будет дождя 😄</p>", unsafe_allow_html=True)
else:
st.markdown("<p style=\"font-size:30px\">Скорее всего завтра будет дождь... 😔</p>", unsafe_allow_html=True)
@st.cache
def loadSet():
df = pd.read_csv("data/watherPreProcces.csv")
return df
### models
@st.cache
def load_GRB():
with open('models/GrBosstClass.pkl', 'rb') as pkl_file:
grb = pickle.load(pkl_file)
return grb
@st.cache(allow_output_mutation=True)
def load_Bagging():
with open('models/Bagging.pkl', 'rb') as pkl_file:
Bagging = pickle.load(pkl_file)
return Bagging
@st.cache
def loadDefaultSet():
df = pd.read_csv("data/weather.csv")
return df
if __name__ == "__main__":
main()
| sanloid/WheaterPrediction | App.py | App.py | py | 5,315 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.title",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.markdown",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "streaml... |
73631206115 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="outlierpack-NG",
version="0.1.0",
author="Nikhil Gupta",
author_email="ngupta_be17@thapar.edu",
description="Removing outliers from a pandas dataframe",
url='https://github.com/CachingNik/OutlierPack',
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
entry_points = {
'console_scripts': ['Outcli=outlib.outcli:main'],
},
keywords = ['CLI', 'OUTLIER', 'Data', 'outlier removal'],
python_requires='>=2.7',
)
| CachingNik/OutlierPack | setup.py | setup.py | py | 629 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 15,
"usage_type": "call"
}
] |
27990442786 | import requests
import threading
import sys
import os
from bs4 import BeautifulSoup
from pandas import DataFrame
from functools import reduce
if len(sys.argv) == 1:
CSV_FNAME = 'volunteermatch_data.csv'
else:
CSV_FNAME = sys.argv[1]
URL_PART1 = 'https://www.volunteermatch.org/search/'
locations = ['New+York%2C+NY%2C+USA', 'Chicago%2C+IL%2C+USA',
'San+Francisco%2C+CA%2C+USA', 'Boston%2C+MA%2C+USA',
'Houston%2C+TX%2C+USA', 'Los+Angeles%2C+CA%2C+USA',
'Philadelphia%2C+PA%2C+USA', 'Seattle%2C+WA%2C+USA',
'Atlanta%2C+GA%2C+USA', 'Dallas%2C+TX%2C+USA',
'Portland%2C+OR%2C+USA', 'Cleveland%2C+OH%2C+USA',
'Denver%2C+CO%2C+USA', 'Washington%2C+DC%2C+USA']
categories = {
'community': 25,
'arts_culture': 34,
'advocacy': 23,
'seniors': 12,
'homeless_housing': 7,
'health_med': 11,
'child_youth': 22,
'animals': 30,
'edu_lit': 15,
'crisis_support': 14,
'women': 3
}
dfs = []
def url_to_df(urls, cat):
opps = []
for url in urls:
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html5lib')
table = soup.find('div', attrs={'class': 'search-page'})
for row in table.findAll('div', attrs={'class': 'searchitem PUBLIC'}):
opp = {}
opp['title'] = row.h3.text.strip()
opp['org'] = row.find_all('a')[1].text.strip()
desc_search = row.find_all('p')
desc_index = 0
for i in range(len(desc_search)):
if len(desc_search[i].getText()) > 100:
desc_index = i
opp['desc'] = row.find_all('p')[desc_index].getText().strip()
opp['desc'] = opp['desc'].split('\n')[0]
opp[cat] = True
opps.append(opp)
df = DataFrame(opps)
df = df.drop_duplicates(keep=False)
dfs.append(df)
threads = []
for cat, cat_v in categories.items():
urls = []
for loc in locations:
url = URL_PART1 + f'?categories={cat_v}&' \
+ f'l={loc}'
urls += [url + f'&s={s}' for s in [x*10+1 for x in range(100)]]
t = threading.Thread(target=url_to_df, args=(urls, cat))
threads.append(t)
t.start()
for t in threads:
t.join()
final_df = reduce(lambda df1, df2: df1.merge(df2, left_on=['title', 'org', 'desc'],
right_on=['title', 'org', 'desc'],
how='outer'), dfs)
final_df = final_df.fillna(False)
final_df.to_csv(CSV_FNAME)
| UChicago-Tech-Team-In2It/scraping | scrape.py | scrape.py | py | 2,580 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line... |
10273949450 | """
wordcloud.py: A reusable library for word cloud visualizations
"""
"""
from PIL import Image
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import pandas as pd
def make_wordcloud(results, vid_dict):
cloud = WordCloud(background_color="white",width=1000,height=1000, max_words=10,relative_scaling=0.5,
normalize_plurals=False).generate_from_frequencies(results[vid_dict])
plt.imshow(cloud)
"""
import matplotlib.pyplot as plt
from wordcloud import WordCloud
word_could_dict = {'Git':100, 'GitHub':100, 'push':50, 'pull':10, 'commit':80, 'add':30, 'diff':10,
'mv':5, 'log':8, 'branch':30, 'checkout':25}
print(word_could_dict['Git'])
wordcloud = WordCloud(width = 1000, height = 500).generate_from_frequencies(word_could_dict)
plt.figure(figsize=(15,8))
plt.imshow(wordcloud)
plt.show() | johnmccarthy23/textbeast | wc.py | wc.py | py | 871 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "wordcloud.WordCloud",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplo... |
18204561723 | """URL patterns for Directory Services"""
from django.urls import path
# Must be full path import to allow including url patterns in project urls
from os2datascanner.projects.admin.import_services import views
urlpatterns = [
path('ldap/add/<uuid:org_id>',
views.LDAPAddView.as_view(),
name='add-ldap'),
path('ldap/edit/<uuid:pk>',
views.LDAPUpdateView.as_view(),
name='edit-ldap'),
path('ldap/test/connection',
views.verify_connection,
name='test-ldap-connection'),
path('ldap/test/authentication',
views.verify_authentication,
name='test-ldap-authentication'),
path('ldap/import/<uuid:pk>',
views.LDAPImportView.as_view(),
name='import-ldap'),
path('msgraph-organization/add/<uuid:org_id>',
views.MSGraphAddView.as_view(),
name='add-msgraph'),
path('msgraph-organization/add/<uuid:org_id>/',
views.MSGraphAddView.as_view(),
name='add-msgraph'),
path('msgraph-organization/edit/<uuid:pk>',
views.MSGraphUpdateView.as_view(),
name='edit-msgraph'),
path('msgraph-organization/import/<uuid:pk>',
views.MSGraphImportView.as_view(),
name='import-msgraph'),
path('os2mo-organization/add/<uuid:org_id>',
views.OS2moAddView.as_view(),
name='add-os2mo'),
path('os2mo-organization/import/<uuid:pk>',
views.OS2moImportView.as_view(),
name='import-os2mo'),
]
| os2datascanner/os2datascanner | src/os2datascanner/projects/admin/import_services/urls.py | urls.py | py | 1,526 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os2datascanner.projects.admin.import_services.views.LDAPAddView.as_view",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os2datascanner.projects.admin.import_services.views.LDAPA... |
33519675992 | import wx
from wx.lib.agw import customtreectrl
from wx.lib.agw.aui import GetManager
from ..controller.project import Project
from ..pluginapi import Plugin
from ..pluginapi.plugin import ActionInfo
class FileExplorerPlugin(Plugin):
"""Provides a tree view for Files and Folders. Opens selected item with mouse right-click."""
datafile = property(lambda self: self.get_selected_datafile())
defaults = {"opened": True,
"docked": True,
"own colors": False
}
def __init__(self, application, controller=None):
Plugin.__init__(self, application, default_settings=self.defaults)
self._app = application
self.settings = self._app.settings.config_obj['Plugins']['File Explorer']
self._parent = wx.App.Get().GetTopWindow()
self._filemgr = self.filemgr
self._filemgr.SetThemeEnabled(True)
self._mgr = GetManager(self._filemgr)
self._controller = controller
self._pane = None
self._filetreectrl = None
self.font = self._filemgr.GetFont()
def register_frame(self, parent=None):
if parent:
self._parent = parent
if self._mgr.GetPane("file_manager") in self._mgr._panes:
register = self._mgr.InsertPane
else:
register = self._mgr.AddPane
register(self._filemgr, wx.lib.agw.aui.AuiPaneInfo().Name("file_manager").
Caption("Files").LeftDockable(True).CloseButton(True))
self._mgr.Update()
def enable(self):
self.register_action(ActionInfo('View', 'View File Explorer', self.on_show_file_explorer,
shortcut='F11',
doc='Show File Explorer panel',
position=1))
# self.save_setting('opened', True)
if self.opened:
self.on_show_file_explorer(None)
def close_tree(self):
self._mgr.DetachPane(self._filemgr)
self._filemgr.Hide()
self._mgr.Update()
self.save_setting('opened', False)
def disable(self):
self.close_tree()
# self.save_setting('opened', False)
self.unsubscribe_all()
self.unregister_actions()
def is_focused(self):
return self._filemgr.HasFocus()
def on_show_file_explorer(self, event):
_ = event
if not self._parent:
self._parent = wx.App.Get().GetWindow() # self.frame
if not self._filemgr: # This is not needed because file explorer is always created
self._filemgr = FileExplorer(self._parent, self._controller)
self._pane = self._mgr.GetPane(self._filemgr)
global_settings = self._app.settings.config_obj['General']
apply_global = global_settings['apply to panels']
use_own = self.settings['own colors']
if apply_global or not use_own:
html_background = self.settings.get('background help', (240, 242, 80))
html_foreground = self.settings.get('foreground text', (7, 0, 70))
else:
html_background = self.settings.get('background', (240, 242, 80))
html_foreground = self.settings.get('foreground', (7, 0, 70))
html_font_face = self.settings.get('font face', '')
html_font_size = self.settings.get('font size', 11)
self._filetreectrl = self._filemgr.GetTreeCtrl()
self._filemgr.Show(True)
self._filemgr.SetMinSize(wx.Size(200, 225))
self._mgr.DetachPane(self._filemgr)
self._mgr.AddPane(self.filemgr,
wx.lib.agw.aui.AuiPaneInfo().Name("file_manager").
Caption("Files").LeftDockable(True).
CloseButton(True))
self._filemgr.SetBackgroundStyle(wx.BG_STYLE_SYSTEM)
self._filemgr.SetBackgroundColour(html_background)
self._filemgr.SetForegroundColour(html_foreground)
self.font = self._filemgr.GetFont()
self.font.SetFaceName(html_font_face)
self.font.SetPointSize(html_font_size)
self._filemgr.SetFont(self.font)
self._filemgr.Refresh()
self._filetreectrl.SetBackgroundColour(html_background)
self._filetreectrl.SetForegroundColour(html_foreground)
self._filetreectrl.SetFont(self.font)
self._filetreectrl.Refresh()
self._filemgr.Raise()
self._mgr.Update()
self.save_setting('opened', True)
self.update_tree()
def update_tree(self):
if not self._filemgr:
return
self._filemgr.update_tree()
class FileExplorer(wx.GenericDirCtrl):
def __init__(self, parent, controller=None):
wx.GenericDirCtrl.__init__(self, parent, id=-1, size=(200, 225), style=wx.DIRCTRL_3D_INTERNAL)
self._controller = controller
self.SetThemeEnabled(True)
self.Refresh()
def update_tree(self):
if isinstance(self._controller, Project):
if self._controller.data and len(self._controller.data.directory) > 1:
self.SelectPath(self._controller.data.source)
try:
self.ExpandPath(self._controller.data.source)
except Exception:
pass
self.Refresh()
self.Update()
| robotframework/RIDE | src/robotide/ui/fileexplorerplugin.py | fileexplorerplugin.py | py | 5,378 | python | en | code | 910 | github-code | 1 | [
{
"api_name": "pluginapi.Plugin",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pluginapi.Plugin.__init__",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pluginapi.Plugin",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "wx.App.Ge... |
7757712695 | #coding=utf-8
import sys
import pygame
from Bullet import Bullet
from Alien import Alien
from time import sleep
def check_keydown_events(event,ai_settings,screen,ship,bullets,stats,play_Button,aliens,sb):
"""按键响应"""
#按下的是方向键右
if event.key == pygame.K_RIGHT:
ship.moving_right = True
#按下的是方向键左
elif event.key == pygame.K_LEFT:
ship.moving_left = True
#按下的是空格
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings,screen,ship,bullets)
#按下q
elif event.key == pygame.K_q:
sys.exit()#退出游戏
#按下p
elif event.key == pygame.K_p:
start_game(ai_settings,screen,stats,play_Button,ship,aliens,bullets,sb)
def check_play_button(ai_settings,screen,stats,play_Button,ship,aliens,bullets,sb,mouse_x,mouse_y):
"""鼠标点击play按钮时的相应"""
if play_Button.rect.collidepoint(mouse_x,mouse_y) and not stats.game_action:
start_game(ai_settings,screen,stats,play_Button,ship,aliens,bullets,sb)
def start_game(ai_settings,screen,stats,play_Button,ship,aliens,bullets,sb):
"""开始游戏"""
#改变游戏是否开开始的标识为true
stats.game_action = True
#重置游戏
stats.reset_stats()
#清空外星人和子弹
aliens.empty()
bullets.empty()
#创建一组外星人
create_fleet(ai_settings,screen,ship,aliens)
#创建飞船
ship.center_ship()
#隐藏光标
pygame.mouse.set_visible(False)
#重置速度
ai_settings.initialize_dynamic_settings()
#重新设置计分
sb.prep_level()
sb.prep_score()
sb.prep_high_score()
sb.prep_ship()#显示剩余飞机数量
def fire_bullet(ai_settings,screen,ship,bullets):
"""发射子弹"""
if len(bullets) < ai_settings.bullet_allowed:#如果子弹数量没有超过限制
new_bullet = Bullet(ai_settings,screen,ship)#创建一个新子弹
bullets.add(new_bullet)#添加到
def check_keyup_events(event,ship):
"""按键松开"""
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def check_events(ai_settings,aliens,screen,ship,bullets,stats,play_Button,sb):
"""按键响应事件"""
for event in pygame.event.get():
if event.type == pygame.QUIT:#点击窗口关闭按钮
sys.exit()#退出游戏
#检测键盘有按键按下
elif event.type == pygame.KEYDOWN:
check_keydown_events(event,ai_settings,screen,ship,bullets,stats,play_Button,aliens,sb)
#检测键盘是否有按键松开
elif event.type == pygame.KEYUP:
check_keyup_events(event,ship)
#检测是否有鼠标按下
elif event.type == pygame.MOUSEBUTTONDOWN:
#获取鼠标点击的位置
mouse_x,mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings,screen,stats,play_Button,ship,aliens,bullets,sb,mouse_x,mouse_y)
def update_screen(ai_settings,screen,ship,stats,aliens,bullets,play_Button,sb):
"""更新动画"""
screen.fill(ai_settings.screen_color)#为屏幕填充颜色,RGB
#根据子弹的大小位置等信息绘制子弹
for bullet in bullets:
bullet.draw_bullet()
#绘制飞船
ship.blitem()
#自动根据编组内每个成员的rect绘制
aliens.draw(screen)
if not stats.game_action:
play_Button.draw_button()
#刷新分数
sb.show_score()
#让最近绘制的屏幕可见
pygame.display.flip()#不断擦去旧屏幕,显示新屏幕,不断刷新,形成动画
def update_bullet(ai_settings,screen,ship,stats,aliens,bullets,sb):
"""刷新子弹"""
#自动对编组中的每一个成员执行update
bullets.update()
#对消失的子弹进行删除
for bullet in bullets:
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_alien_collisions(ai_settings,screen,ship,stats,aliens,bullets,sb)
def check_bullet_alien_collisions(ai_settings,screen,ship,stats,aliens,bullets,sb):
"""外星人与子弹发生碰撞"""
#检查外星人与子弹是否产生碰撞
collisions = pygame.sprite.groupcollide(bullets,aliens,True,True)#后面两个布尔值的意思是若发生碰撞则两个元素都删除,
#若改为false,true则发生碰撞时子弹不消失,外星人消失
if collisions:
for aliens in collisions.values():
stats.score+=ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats,sb)
if len(aliens) == 0:#如果外星人全部消失则重新创建一组外星人
bullets.empty()#清除子弹
ai_settings.increase_speed()
create_fleet(ai_settings,screen,ship,aliens)
stats.level+=1#消灭一组外星人 等级加一
sb.prep_level()#刷新等级
ai_settings.bullet_with+=0.5
def update_aliens(ai_settings,stats,screen,ship,aliens,bullets,sb):
"""更新外星人"""
check_fleet_edges(ai_settings,aliens)
aliens.update()
#如果外星人与飞船相撞
if pygame.sprite.spritecollideany(ship,aliens):
ship_hit(ai_settings,stats,screen,ship,aliens,bullets,sb)
#到屏幕底端
check_aliens_bottom(ai_settings,stats,screen,ship,aliens,bullets,sb)
def ship_hit(ai_settings,stats,screen,ship,aliens,bullets,sb):
"""外星人与飞船相撞"""
if stats.ships_left > 1:
stats.ships_left-=1#飞船数减一
aliens.empty()#外星人重置
bullets.empty()#子弹重置
create_fleet(ai_settings,screen,ship,aliens)#创建新的外星人
ship.center_ship()#重置飞船位置
stats.game_action = True
sleep(0.1)#暂停0.1s
sb.prep_ship()#更新当前飞机图标
else :
stats.game_action = False
pygame.mouse.set_visible(True)
def get_number_aliens_x(ai_settings,alien_width):
"""计算每行外星人数"""
available_space_x = ai_settings.screen_width - (3 * alien_width)#减去一个外星人的位置
number_aliens_x = int(available_space_x / (2 * alien_width))#能放多少个外星人
return number_aliens_x
def create_alien(ai_settings,screen,aliens,alien_number,number_row):
"""创建一个外星人,并将它放到分组里"""
alien = Alien(ai_settings,screen)#创建一个外星人
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number#位置,每个外星人之间间隔一个外星人的距离
alien.rect.x = alien.x#设置位置,外星人的位置由rect中的值决定
alien.rect.y =3* alien.rect.height + 2 * alien.rect.height * number_row
aliens.add(alien)#添加到编组中
def get_number_rows(ai_settings,ship_height,alien_height):
"""计算可容纳多少行外星人"""
available_space_y = ai_settings.screen_height - 4* alien_height - ship_height
#窗口高度减去外星人与最上面边距之间的距离,外星人与飞船之间的距离(外星人高度的三倍),飞船的高度
number_rows = available_space_y / (2 * alien_height)#减后的距离除以外星人所占的高度(每行外星人之间有一行外星人高度的距离)
return number_rows
def create_fleet(ai_settings,screen,ship,aliens):
"""创建一组外星人"""
alien = Alien(ai_settings,screen)#生成外星人单个实例
alien_width = alien.rect.width#外星人宽
number_aliens_x = get_number_aliens_x(ai_settings,alien_width)#一行创建多少个外星人
number_rows = int(get_number_rows(ai_settings,ship.rect.height,alien.rect.height))#创建多少行外星人
for number_row in range(number_rows):
for alien_number in range(number_aliens_x):#从0开始计数
create_alien(ai_settings,screen,aliens,alien_number,number_row)#创建外星人
def check_fleet_edges(ai_settings,aliens):
"""外星人到达屏幕边缘做出的反应"""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings,aliens)
break
def change_fleet_direction(ai_settings,aliens):
"""外星人向下移动,并改变移动方向"""
for alien in aliens.sprites():
alien.rect.y+=ai_settings.fleet_drop_speed#向下移动
ai_settings.fleet_direction *= -1#改变方向
def check_aliens_bottom(ai_settings,stats,screen,ship,aliens,bullets,sb):
"""检查是否有外星人到达屏幕底端"""
screen_rect = screen.get_rect()#获取屏幕rect对象
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:#当外星人的底部大于或等于窗口的底部时,说明外星人撞到屏幕底部
ship_hit(ai_settings,stats,screen,ship,aliens,bullets,sb)
break
def check_high_score(stats,sb):
"""检查分数,获取最高分"""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score() | zXin1112/Python-Practice | alien_invasion/alien_invasion/game_function.py | game_function.py | py | 9,113 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.K_RIGHT",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_q... |
35733666865 | # -*- coding: utf-8 -*-
# @Time : 2020/11/22 8:50
# @Author : Zhongyi Hua
# @FileName: ssr_utils.py
# @Usage:
# @Note:
# @E-mail: njbxhzy@hotmail.com
from Bio import SeqIO
from os import remove as del_file
import multiprocessing as multi
from tqdm import tqdm
def build_rep_set(repeat_file, unit_cutoff=None, motif_length_cutoff=None):
"""
Outputs the repeats info dictionary used by the get_ssrs function.
Takes list of repeat motifs from repeats file(output by generate_repeats function) as input.
Creates a dictionary with expanded repeat as the key and (class, motif_length, strand) as values.
Works either by "length_cutoff=" or by "unit_cutoff=" arguments.
"""
repeats_out = dict()
cutoffs = set()
if unit_cutoff is None:
unit_cutoff = {1: 9, 2: 6, 3: 6, 4: 6, 5: 5, 6: 4}
for line in repeat_file:
motif_dict = dict()
L = line.strip().split('\t')
motif_length = int(L[2])
if motif_length < motif_length_cutoff:
continue
motif = L[0]
motif = motif*unit_cutoff[motif_length]
cutoffs.add(len(motif))
motif_dict['class'] = L[1]
motif_dict['motif_length'] = motif_length
motif_dict['strand'] = L[3]
repeats_out[motif] = motif_dict
repeats_out['cutoff'] = sorted(list(cutoffs))
return repeats_out
def get_ssrs(seq_record, repeats_info, out):
"""Native function that identifies repeats in fasta files."""
if type(out) == str:
out_file = open(out, 'w')
else:
out_file = out
length_cutoffs = repeats_info['cutoff']
input_seq = str(seq_record.seq).upper()
input_seq_length = len(input_seq)
for length_cutoff in length_cutoffs:
fallback = length_cutoff - 1
sub_start = 0 # substring start
sub_stop = sub_start + length_cutoff # substring stop
while sub_stop <= input_seq_length:
sub_stop = sub_start + length_cutoff
sub_seq = input_seq[sub_start:sub_stop]
if sub_seq in repeats_info:
match = True
repeat_data = repeats_info[sub_seq]
motif_length = repeat_data['motif_length']
rep_class = repeat_data['class']
strand = repeat_data['strand']
offset = length_cutoff % motif_length
repeat_seq = input_seq[sub_start+offset:sub_start+offset+motif_length]
i = 0
while match:
j = sub_stop
if sub_stop >= input_seq_length:
match = False
match_length = sub_stop - sub_start
num_units = int(match_length/motif_length)
print(seq_record.id, sub_start + 1, sub_stop + 1, rep_class, match_length, strand, num_units, sub_seq[:motif_length], seq_record.id + '_' + str(sub_start + 1), sep="\t", file=out_file)
sub_start = sub_stop - fallback
elif input_seq[j] == repeat_seq[i]:
sub_stop += 1
i += 1
if i >= motif_length:
i = 0
else:
match = False
match_length = sub_stop - sub_start
num_units = int(match_length/motif_length)
print(seq_record.id, sub_start + 1, sub_stop + 1, rep_class, match_length, strand, num_units, sub_seq[:motif_length], seq_record.id + '_' + str(sub_start + 1), sep="\t", file=out_file)
sub_start = sub_stop - fallback
else:
sub_start += 1
if type(out) == str:
out_file.close()
def fasta_ssrs(args, repeats_info):
"""
:param args:
:param repeats_info: return from build_rep_set function
:return:
"""
handle = open(args.input, 'r')
records = [ _ for _ in SeqIO.parse(handle, 'fasta')]
num_records = len(records)
file_output = open(args.output, 'w')
print('\t'.join(['seqid', 'start', 'end', 'class', 'length', 'strand', 'units', 'motif', 'ID']), file=file_output)
if args.threads > 1:
i = 0
pool = multi.Pool(processes=args.threads)
for record in records:
out_name = './temp_%s.tsv' % (i)
i += 1
pool.apply_async(get_ssrs, (record, repeats_info, out_name,))
pool.close()
pool.join()
# Concat all the output files into one.
temp_outs = tqdm(range(num_records), total=num_records)
for o in temp_outs:
name = './temp_%s.tsv' % (o)
temp_outs.set_description("Concatenating file: %d " % (o))
with open(name, 'r') as fh:
for line in fh:
print(line.strip(), file=file_output)
del_file(name)
elif args.threads == 1:
records = tqdm(records, total=num_records)
for record in records:
records.set_description("Processing %s" % (record.id))
get_ssrs(record, repeats_info, file_output)
| Hua-CM/IdenSSR | ssr_utils.py | ssr_utils.py | py | 5,154 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "Bio.SeqIO.parse",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"li... |
38907288611 | import math
import random
import pygame
import sys
from pygame.locals import *
from copy import deepcopy
pygame.init()
pygame.mixer.init()
width, height = 640, 480
screen = pygame.display.set_mode((width, height))
player1keys = [False, False, False, False]
player2keys = [False, False, False, False]
player1pos = [0] #Min: 0 Max: 2130
player2pos = [[615, 150, 3]]
ballpos = [300, 300]
ballvel = [-1, 1]
for num in range(0, 6):
player1pos.append(player1pos[num] + 17)
player2pos.append([player2pos[num][0], player2pos[num][1] - 17, 3])
UP, LEFT, DOWN, RIGHT = 0, 1, 2, 3
xMAX, yMAX, xMIN, yMIN = 614, 451, 0, 0
posMIN, posMAX = 0, 2130
hasBounced = False
player1 = pygame.image.load("sprites/player1.png")
player2 = pygame.image.load("sprites/player2.png")
ball = pygame.image.load("sprites/ball.png")
running = 1
exitcode = 0
while running:
screen.fill(0)
for box in player1pos:
if(box <= xMAX):
screen.blit(player1, [box + 1, yMIN + 1])
elif(box <= xMAX + yMAX):
screen.blit(player1, [xMAX + 1, box - xMAX + 1])
elif(box <= xMAX*2 + yMAX):
screen.blit(player1, [xMAX*2 + yMAX - box + 1, yMAX + 1])
else:
screen.blit(player1, [xMIN + 1, xMAX*2 + yMAX*2 - box + 1])
for box in player2pos:
if(box[2] == UP):
if(box[0] > xMAX):
box[2] = RIGHT
box[0] = xMAX
elif(box[0] < xMIN):
box[2] = LEFT
box[0] = xMIN
if(box[2] == LEFT):
if(box[1] > yMAX):
box[2] = DOWN
box[1] = yMAX
elif(box[1] < yMIN):
box[2] = UP
box[1] = yMIN
if(box[2] == DOWN):
if(box[0] > xMAX):
box[2] = RIGHT
box[0] = xMAX
elif(box[0] < xMIN):
box[2] = LEFT
box[0] = xMIN
if(box[2] == RIGHT):
if(box[1] > yMAX):
box[2] = DOWN
box[1] = yMAX
elif(box[1] < yMIN):
box[2] = UP
box[1] = yMIN
screen.blit(player2, [box[0], box[1]])
screen.blit(ball, ballpos)
ballpos[0] += ballvel[0]
ballpos[1] += ballvel[1]
#if(player1pos[0][2] == 1 and ballpos[0] <= player1pos[0][0] and ballpos[1] <= player1pos[5][1] and ballpos[1] >= player1pos[0][1]):
# ballvel[0] *= -1
#if(player1pos[0][2] == 3 and ballpos[0] >= player1pos[0][0] - 17 and ballpos[1] >= player1pos[5][1] and ballpos[1] <= player1pos[0][1]):
# ballvel[0] *= -1
if(ballpos[0] > xMAX - 8 or ballpos[0] < xMIN - 10):
if not hasBounced:
ballvel[0] *= -1
hasBounced = True
else:
hasBounced = False
if(ballpos[1] > yMAX - 5 or ballpos[1] < yMIN - 10):
if not hasBounced:
ballvel[1] *= -1
hasBounced = True
else:
hasBounced = False
pygame.display.flip()
for ev in pygame.event.get():
if ev.type == KEYDOWN:
if ev.key == K_w:
player1keys[0] = True
elif ev.key == K_a:
player1keys[1] = True
elif ev.key == K_s:
player1keys[2] = True
elif ev.key == K_d:
player1keys[3] = True
if ev.key == K_UP:
player2keys[0] = True
elif ev.key == K_LEFT:
player2keys[1] = True
elif ev.key == K_DOWN:
player2keys[2] = True
elif ev.key == K_RIGHT:
player2keys[3] = True
if ev.type == KEYUP:
if ev.key == K_w:
player1keys[0] = False
elif ev.key == K_a:
player1keys[1] = False
elif ev.key == K_s:
player1keys[2] = False
elif ev.key == K_d:
player1keys[3] = False
elif ev.key == K_UP:
player2keys[0] = False
elif ev.key == K_LEFT:
player2keys[1] = False
elif ev.key == K_DOWN:
player2keys[2] = False
elif ev.key == K_RIGHT:
player2keys[3] = False
if ev.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
#Checks Player #1's Key inputs
if player1keys[0]:
if player1pos[6] > xMAX and player1pos[0] <= xMAX + yMAX:
for num in range(0,7):
player1pos[num] -= 2
if player1pos[6] > xMAX*2 + yMAX:
for num in range(0,7):
player1pos[num] += 2
if player1pos[num] > 2130:
player1pos[num] -= 2130
elif player1keys[2]:
if player1pos[6] > xMAX and player1pos[0] <= xMAX + yMAX:
for num in range(0,7):
player1pos[num] += 2
if player1pos[6] > xMAX*2 + yMAX:
for num in range(0,7):
player1pos[num] -= 2
elif player1keys[1]:
if player1pos[6] > xMAX + yMAX and player1pos[0] <= xMAX*2 + yMAX:
for num in range(0,7):
player1pos[num] += 2
if player1pos[0] < xMAX or player1pos[0] > 2015:
for num in range(0,7):
player1pos[num] -= 2
if player1pos[num] < 0:
player1pos[num] += 2130
elif player1keys[3]:
if player1pos[6] > xMAX + yMAX and player1pos[0] <= xMAX*2 + yMAX:
for num in range(0,7):
player1pos[num] -= 2
if player1pos[0] < xMAX or player1pos[0] > 2010:
for num in range(0,7):
player1pos[num] += 2
#Checks Player #2's key inputs
if player2keys[0]:
if player2pos[6][2] == LEFT:
for box in player2pos:
if(box[2] == LEFT):
box[1] -= 2
elif(box[2] == UP):
box[0] += 2
elif player2pos[0][2] == RIGHT:
for box in player2pos:
if(box[2] == RIGHT):
box[1] -= 2
elif(box[2] == UP):
box[0] -= 2
elif player2keys[2]:
if player2pos[0][2] == LEFT:
for box in player2pos:
if(box[2] == LEFT):
box[1] += 2
elif(box[2] == DOWN):
box[0] += 2
elif player2pos[6][2] == RIGHT:
for box in player2pos:
if(box[2] == RIGHT):
box[1] += 2
elif(box[2] == DOWN):
box[0] -= 2
elif player2keys[1]:
if player2pos[0][2] == UP:
for box in player2pos:
if(box[2] == UP):
box[0] -= 2
elif(box[2] == LEFT):
box[1] += 2
elif player2pos[6][2] == DOWN:
for box in player2pos:
if(box[2] == DOWN):
box[0] -= 2
elif(box[2] == LEFT):
box[1] -= 2
elif player2keys[3]:
if player2pos[6][2] == UP:
for box in player2pos:
if(box[2] == UP):
box[0] += 2
elif(box[2] == RIGHT):
box[1] += 2
elif player2pos[0][2] == DOWN:
for box in player2pos:
if(box[2] == DOWN):
box[0] += 2
elif(box[2] == RIGHT):
box[1] -= 2
pygame.time.delay(6)
| prfy9b/Ponkle | Ponkle/main.py | main.py | py | 7,606 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode"... |
11807782156 | import os
import re
import csv
import spacy
import convert2txt
nlp = spacy.load('skill-cv-bn')
def create_dic(filename):
result = {
'Filename': filename,
'Name':[],
'Number':[],
'Email':[],
'Experience':[],
'Companies':[],
'Designation':[],
'Responsibilities':[],
'Skills':[],
'Certification':[],
'Degree':[],
'Institution':[],
'Edu_subject':[],
'Graduation_year':[]
}
return result
def get_info(text, dic_name):
nlp_text = nlp(text)
for e in nlp_text.ents:
if e.label_ == 'Name':
dic_name['Name'].append(e.text)
elif e.label_ == 'Number':
dic_name['Number'].append(e.text)
elif e.label_ == 'Email':
dic_name['Email'].append(e.text)
elif e.label_ == 'Experience':
dic_name['Experience'].append(e.text)
elif e.label_ == 'Companies':
dic_name['Companies'].append(e.text)
elif e.label_ == 'Designation':
dic_name['Designation'].append(e.text)
elif e.label_ == 'Responsibilities':
dic_name['Responsibilities'].append(e.text)
elif e.label_ == 'Skills':
dic_name['Skills'].append(e.text)
elif e.label_ == 'Certification':
dic_name['Certification'].append(e.text)
elif e.label_ == 'Degree':
dic_name['Degree'].append(e.text)
elif e.label_ == 'Institution':
dic_name['Institution'].append(e.text)
elif e.label_ == 'Edu_subject':
dic_name['Edu_subject'].append(e.text)
elif e.label_ == 'Graduation_year':
dic_name['Graduation_year'].append(e.text)
def extract_info(cv_dir):
extracted_info = []
files = os.listdir(cv_dir)
for file in files:
if file.endswith('.pdf'):
text = convert2txt.extract_text(cv_dir+file, '.pdf')
result = create_dic(file)
get_info(text, result)
extracted_info.append(result)
elif file.endswith('.doc'):
text = convert2txt.extract_text(cv_dir+file, '.doc')
result = create_dic(file)
get_info(text, result)
extracted_info.append(result)
elif file.endswith('.docx'):
text = convert2txt.extract_text(cv_dir+file, '.docx')
result = create_dic(file)
get_info(text, result)
extracted_info.append(result)
elif file.endswith('.txt'):
with open(cv_dir+file, encoding='utf-8-sig') as f:
text = f.read()
result = create_dic(file)
get_info(text, result)
extracted_info.append(result)
return extracted_info
def process(txt):
data = txt
data = re.sub(r'â+', ' ', data)
data = re.sub(r'ï+', ' ', data)
data = re.sub(r'\\u+', ' ', data)
data = re.sub(r'â€+', ' ', data)
data = re.sub(r'™+', ' ', data)
data = re.sub(r'â€+', ' ', data)
data = re.sub(r'â+', ' ', data)
data = re.sub(r'\s+', ' ', data)
return data
def create_csv_row_dic(headers, dic):
result = {}
for i in range(len(headers)):
if i == 0:
result[headers[i]] = dic[headers[i]]
else:
result[headers[i]] = ', '.join([process(i) for i in dic[headers[i]]])
return result
def export_to_csv(csv_name, extracted_info):
with open(csv_name, 'w', newline='', encoding="utf-8") as csv_file:
headers = [
'Filename',
'Name',
'Number',
'Email',
'Experience',
'Companies',
'Designation',
'Responsibilities',
'Skills',
'Certification',
'Degree',
'Institution',
'Edu_subject',
'Graduation_year'
]
writer = csv.DictWriter(csv_file, fieldnames=headers)
writer.writeheader()
for dic in extracted_info:
write_dic = create_csv_row_dic(headers, dic)
writer.writerow(write_dic)
export_to_csv('skills_report.csv', extract_info('../cv/txt/')) | remon-rakibul/FitFinder | test-skill-cv-bn.py | test-skill-cv-bn.py | py | 4,365 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "spacy.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "convert2txt.extract_text",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "convert2txt.extract_tex... |
41055718886 | import json
from test_tools.example_stubber import ExampleStubber
class OrganizationsStubber(ExampleStubber):
"""
A class that implements a variety of stub functions that are used by the
AWS Organizations unit tests.
The stubbed functions all expect certain parameters to be passed to them as
part of the tests, and will raise errors when the actual parameters differ from
the expected.
"""
def __init__(self, client, use_stubs=True):
"""
Initializes the object with a specific client and configures it for
stubbing or AWS passthrough.
:param client: A Boto3 Organizations client.
:param use_stubs: When True, use stubs to intercept requests. Otherwise,
pass requests through to AWS.
"""
super().__init__(client, use_stubs)
@staticmethod
def _make_policy_summary(policy):
return {
"Id": policy["id"],
"Arn": f'arn:aws:organizations::111111111111:policy/{policy["name"]}',
"Name": policy["name"],
"Description": policy["description"],
"Type": policy["type"],
"AwsManaged": False,
}
def stub_create_policy(self, policy, error_code=None):
expected_parameters = {
"Name": policy["name"],
"Description": policy["description"],
"Content": json.dumps(policy["content"]),
"Type": policy["type"],
}
response = {
"Policy": {
"PolicySummary": self._make_policy_summary(policy),
"Content": json.dumps(policy["content"]),
}
}
self._stub_bifurcator(
"create_policy", expected_parameters, response, error_code=error_code
)
def stub_list_policies(self, policy_filter, policies, error_code=None):
expected_parameters = {"Filter": policy_filter}
response = {"Policies": [self._make_policy_summary(pol) for pol in policies]}
self._stub_bifurcator(
"list_policies", expected_parameters, response, error_code=error_code
)
def stub_describe_policy(self, policy, error_code=None):
expected_parameters = {"PolicyId": policy["id"]}
response = {
"Policy": {
"PolicySummary": self._make_policy_summary(policy),
"Content": json.dumps(policy["content"]),
}
}
self._stub_bifurcator(
"describe_policy", expected_parameters, response, error_code=error_code
)
def stub_attach_policy(self, policy_id, target_id, error_code=None):
expected_parameters = {"PolicyId": policy_id, "TargetId": target_id}
self._stub_bifurcator(
"attach_policy", expected_parameters, error_code=error_code
)
def stub_detach_policy(self, policy_id, target_id, error_code=None):
expected_parameters = {"PolicyId": policy_id, "TargetId": target_id}
self._stub_bifurcator(
"detach_policy", expected_parameters, error_code=error_code
)
def stub_delete_policy(self, policy_id, error_code=None):
expected_parameters = {"PolicyId": policy_id}
self._stub_bifurcator(
"delete_policy", expected_parameters, error_code=error_code
)
| awsdocs/aws-doc-sdk-examples | python/test_tools/organizations_stubber.py | organizations_stubber.py | py | 3,333 | python | en | code | 8,378 | github-code | 1 | [
{
"api_name": "test_tools.example_stubber.ExampleStubber",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "json.d... |
19509604543 | import json
import pytest
from api.processing import units
from unyt import unyt_quantity
def test_unyt_encoder():
test_unyt_dict = {
"test_quantity": unyt_quantity(5, "Mpc"),
}
expected = '{"test_quantity": "5 Mpc"}'
assert expected == json.dumps(test_unyt_dict, cls=units.UnytEncoder)
def test_convert_swift_units_dict_types_success():
test_units_dict = {
"filename": "/a/test/file/path",
"units": {
"Unit current in cgs (U_I)": unyt_quantity(1.0, "statA"),
"Unit length in cgs (U_L)": unyt_quantity(1e24, "cm"),
"Unit mass in cgs (U_M)": unyt_quantity(1.9e43, "g"),
"Unit temperature in cgs (U_T)": unyt_quantity(1.0, "K"),
"Unit time in cgs (U_t)": unyt_quantity(1.09e19, "s"),
},
"mass": unyt_quantity(1.0e10, "Msun"),
"length": unyt_quantity(1.0, "Mpc"),
"time": unyt_quantity(1000.0, "Gyr"),
"current": unyt_quantity(1.0, "statA"),
"temperature": unyt_quantity(1.0, "K"),
}
retrieved_dict = units.convert_swift_units_dict_types(test_units_dict)
for key, value in retrieved_dict.items():
if isinstance(value, dict):
for _subkey, subvalue in retrieved_dict[key].items():
assert isinstance(subvalue, str)
else:
assert isinstance(value, str)
def test_convert_swift_units_dict_types_failure():
test_units_dict = {
"filename": "/a/test/file/path",
"mass": unyt_quantity(1.0e10, "Msun"),
"length": unyt_quantity(1.0, "Mpc"),
"time": unyt_quantity(1000.0, "Gyr"),
"current": unyt_quantity(1.0, "statA"),
"temperature": unyt_quantity(1.0, "K"),
}
with pytest.raises(units.SWIFTUnytException):
units.convert_swift_units_dict_types(test_units_dict)
def test_create_unyt_quantities_success():
test_dict = {
"filename": "/a/test/file/path",
"units": {
"Unit current in cgs (U_I)": "1.0 statA",
"Unit length in cgs (U_L)": "1e+24 cm",
"Unit mass in cgs (U_M)": "1.9e+43 g",
"Unit temperature in cgs (U_T)": "1.0 K",
"Unit time in cgs (U_t)": "1.09e+19 s",
},
"mass": "10000000000.0 Msun",
"length": "1.0 Mpc",
"time": "1000.0 Gyr",
"current": "1.0 statA",
"temperature": "1.0 K",
}
unyt_quantity_dict = units.create_unyt_quantities(test_dict)
for key, value in unyt_quantity_dict.items():
if isinstance(value, dict):
for _subkey, subvalue in unyt_quantity_dict[key].items():
assert isinstance(subvalue, unyt_quantity)
elif key != "filename":
assert isinstance(value, unyt_quantity)
def test_create_unyt_quantities_failure():
test_dict = {
"filename": "/a/test/file/path",
"mass": "10000000000.0 Msun",
"length": "1.0 Mpc",
"time": "1000.0 Gyr",
"current": "1.0 statA",
"temperature": "1.0 K",
}
with pytest.raises(units.SWIFTUnytException):
units.create_unyt_quantities(test_dict)
| UCL-ARC/dirac-swift-api | tests/test_units.py | test_units.py | py | 3,144 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "unyt.unyt_quantity",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "api.processing.units.UnytEncoder",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "a... |
31518563478 | import flask
from flask import Flask, render_template, request, redirect
from web3 import Web3
app = Flask(__name__)
candidates = {1: "badi mohammad", 2: "hamza saht", 3: "omar hussein"}
# Connect to the Sepolia network
web3 = Web3(
Web3.HTTPProvider(
"https://eth-sepolia.g.alchemy.com/v2/ji4q9T9gKbrr9hk41agAd6JvmuvKhDJk"
)
)
# Load the contract ABI and address
contract_address = "0x9eE6c65458701930770ADE3ACC9dE59437640b83"
contract_abi = [
{"inputs": [], "stateMutability": "nonpayable", "type": "constructor"},
{
"inputs": [{"internalType": "uint8", "name": "_candidateId", "type": "uint8"}],
"name": "vote",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function",
},
{
"inputs": [{"internalType": "uint256", "name": "", "type": "uint256"}],
"name": "candidates",
"outputs": [
{"internalType": "uint256", "name": "id", "type": "uint256"},
{"internalType": "string", "name": "name", "type": "string"},
{"internalType": "uint256", "name": "voteCount", "type": "uint256"},
],
"stateMutability": "view",
"type": "function",
},
{
"inputs": [],
"name": "candidatesCount",
"outputs": [{"internalType": "uint8", "name": "", "type": "uint8"}],
"stateMutability": "view",
"type": "function",
},
{
"inputs": [{"internalType": "uint8", "name": "_candidateId", "type": "uint8"}],
"name": "getCandidatesVoteCount",
"outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}],
"stateMutability": "view",
"type": "function",
},
{
"inputs": [{"internalType": "address", "name": "", "type": "address"}],
"name": "voters",
"outputs": [{"internalType": "bool", "name": "", "type": "bool"}],
"stateMutability": "view",
"type": "function",
},
]
# Instantiate the contract
contract = web3.eth.contract(address=contract_address, abi=contract_abi)
# Define the home route
@app.route("/")
def home():
return render_template("index.html")
# Define a route to interact with the smart contract
@app.route("/interact", methods=["GET", "POST"])
def interact():
if flask.request.method == "GET":
badi = contract.functions.getCandidatesVoteCount(1).call()
hamza = contract.functions.getCandidatesVoteCount(2).call()
omar = contract.functions.getCandidatesVoteCount(3).call()
return render_template("result.html", Badi=badi, Hamza=hamza, Omar=omar)
elif flask.request.method == "POST":
votedCandidate = request.form.get("name")
for key in candidates:
if votedCandidate.lower() in candidates[key]:
votedCandidate = key
break
else:
return render_template("index.html")
print(contract.functions.vote(votedCandidate).call())
return redirect("/interact", code=302)
if __name__ == "__main__":
app.run(debug=True)
| NaturalT314/Dapp-Voting-System | app.py | app.py | py | 3,156 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "web3.Web3",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "web3.Web3.HTTPProvider",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "web3.Web3",
"line_num... |
73152578915 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db.models.signals import m2m_changed
from django.core.mail import send_mail
import uuid
from time import sleep
import geocoder
import urllib3
urllib3.disable_warnings()
GENDER = (
('male', 'MALE'),
('female', 'FEMALE')
)
class Player(AbstractUser):
avatar = models.CharField(max_length=100, default="default_avatar.png")
birthday = models.DateField(null=True, blank=True)
gender = models.CharField(max_length=20, choices=GENDER, default='male')
address = models.CharField(max_length=255, blank=True, null=True)
home_field = models.ForeignKey("Location", blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'Player'
verbose_name_plural = 'Players'
def __str__(self):
return self.email
FACILITY_TYPE = (
('indoor', 'Indoor'),
('outdoor', 'Outdoor')
)
FIELD_TYPE = (
('grass', 'Grass'),
('turf', 'Turf'),
('futsal', 'Futsal')
)
class Location(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=255)
facility_type = models.CharField(max_length=20, choices=FACILITY_TYPE, default='outdoor')
field_type = models.CharField(max_length=20, choices=FIELD_TYPE, default='grass')
url = models.CharField(max_length=120, null=True, blank=True)
lat = models.FloatField(default=0)
lng = models.FloatField(default=0)
created_by = models.ForeignKey(Player, related_name="field_creator")
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
def save(self, **kwargs):
address = self.address.encode('utf-8')
trial = 3
while trial > 0:
sleep(0.05)
try:
g = geocoder.google(address)
latlon = g.geojson['features'][0]['geometry']['coordinates']
self.lat = latlon[1]
self.lng = latlon[0]
break
except (RuntimeError, ValueError, TypeError, Exception):
trial = trial - 1
super(Location, self).save()
class GameEvent(models.Model):
location = models.ForeignKey(Location, related_name="events")
datetime = models.DateTimeField()
description = models.CharField(max_length=255, blank=True, null=True)
players = models.ManyToManyField(Player)
created_by = models.ForeignKey(Player, related_name="game_creator")
def __str__(self):
return self.location.name
class GameInvitation(models.Model):
game = models.ForeignKey(GameEvent)
player = models.ForeignKey(Player)
code = models.CharField(max_length=50)
is_accepted = models.BooleanField(default=False)
def notify_players(sender, instance, action, reverse, model, pk_set, **kwargs):
if action == 'post_add':
send_mail(
'Game Event Notification',
'You are invited to the game ({}) at {} {}'.format(instance.description, instance.location, instance.datetime),
'info@goodfoot.club',
[ii.email for ii in Player.objects.filter(id__in=pk_set)],
fail_silently=False,
)
# m2m_changed.connect(notify_players, sender=GameEvent.players.through)
| donlafranchi/gfcmap | general/models.py | models.py | py | 3,400 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 21,
"usage_type": "call"
}... |
27969864189 | # coding=utf-8
from django import forms
from django.db.models import Q
from django.forms.models import formset_factory
from BanBanTong.db import models
class RolePrivilegesForm(forms.ModelForm):
class Meta:
model = models.RolePrivilege
exclude = ['uuid', 'role']
class RoleForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(RoleForm, self).__init__(*args, **kwargs)
self.fields['name'].error_messages = {
'required': u'请输入角色名!'
}
def clean(self):
cleaned_data = super(RoleForm, self).clean()
name = cleaned_data.get('name')
# 判断角色名是否重复
if name:
existing = False
if self.instance._state.adding:
if models.Role.objects.filter(name=name).count() > 0:
existing = True
else:
if models.Role.objects.filter(~Q(uuid=self.instance.uuid),
name=name).count() > 0:
existing = True
if existing:
self._errors['name'] = self.error_class([u'角色名已存在!'])
return cleaned_data
class Meta:
model = models.Role
exclude = ['uuid']
RolePrivilegesFormSet = formset_factory(RolePrivilegesForm)
| xiaolin0199/bbt | apps/BanBanTong/forms/role.py | role.py | py | 1,348 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "BanBanTong.db.models.RolePrivilege",
"line_number": 12,
"usage_type": "attribute"
},
{
"ap... |
39893956481 | import ipaddress
import math
class FirstSpyEstimator:
def __init__(self, tx_maps, true_sources: {ipaddress.ip_address: [int]}):
self.p = 0.0
self.r = 0.0
self.r_old = 0.0
benign_nodes = len(true_sources)
self.observer_map = {} # BLOCK_ID: observer
txs = sorted([item for sublist in list(true_sources.values()) for item in sublist])
tx_ip_map = {}
for tx in txs:
first_time = math.inf
for spy in tx_maps:
spy_data = tx_maps[spy]
obs_time = spy_data[tx][1]
if obs_time < first_time:
first_time = obs_time
from_ip = spy_data[tx][0]
tx_ip_map[tx] = from_ip
self.observer_map[tx] = spy
#print("Mapped TX %d to IP %s, first spy: %d" % (tx, from_ip, observer_map[tx]))
self.ip_tx_map = {} # Mapping created by adversary
for ip in true_sources.keys():
self.ip_tx_map[ip] = []
for tx, ip in tx_ip_map.items():
if ip not in self.ip_tx_map:
self.ip_tx_map[ip] = []
self.ip_tx_map[ip].append(tx)
for ip in true_sources.keys():
fp = 0
fn = 0
tp = 0
for tx in true_sources[ip]:
if tx in self.ip_tx_map[ip]:
tp += 1
else:
fn += 1
if tp == 0:
continue
for tx in self.ip_tx_map[ip]:
if tx not in true_sources[ip]:
fp += 1
self.r += tp/(tp+fn)
self.p += tp/(tp+fp)
#print(len(true_sources.keys()))
t_s = len(true_sources.keys())
self.r = self.r/t_s
self.p = self.p/t_s
| jansp/kadcast-privacy | kadcast/estimators.py | estimators.py | py | 1,833 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "ipaddress.ip_address",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "math.inf",
"line_number": 17,
"usage_type": "attribute"
}
] |
41425319510 | import numpy as np
import pandas as pd
import csv
from datetime import datetime, timedelta
from polygon import RESTClient
import yfinance as yf
import matplotlib.pyplot as plt
d2 = datetime.today() - timedelta(days=1)
d1 = d2 - timedelta(days=365)
d3 = d2 - timedelta(days=30)
start_d = d1.strftime('%Y-%m-%d')
end_d = d2.strftime('%Y-%m-%d')
rsi_d = d3.strftime('%Y-%m-%d')
def movingAverages(ticker, time_frame, key, s_sma_days = 50, l_sma_days = 200, s_ema_days = 12, l_ema_days = 26, smoothing = 2):
#dates for polygon api
start_d = datetime.today() - timedelta(days=365)
end_d = datetime.today()
#array with short and long simple moving average
sma = [[], []]
#array with short and long exponential moving average
ema = [[], []]
#use polygon to get market data from start to end date
with RESTClient(key) as client:
bars = client.stocks_equities_aggregates(ticker, 1, 'day', start_d.strftime('%Y-%m-%d'), end_d.strftime('%Y-%m-%d'))
try:
data = bars.results
prices = []
for dataset in data:
prices.append(dataset['c'])
volume = data[-2]['v']
except:
prices = []
volume = 0
#reverses array so newer prices occur first
prices = prices[::-1]
#checks if stock has enough historical data / enough data points
if len(prices) < l_sma_days+time_frame+1:
#returns empty arrays to be checked later
return [sma, ema]
#calculates the short simple ma for the time designated in time_frame
for x in range(time_frame):
sum = 0
i = 0
for y in range(x, s_sma_days + x):
if i == s_sma_days:
break
sum += prices[y]
i += 1
sma[0].append(sum/s_sma_days)
#calculates the long simple ma for the time designated in time_frame
for x in range(time_frame):
sum = 0
i = 0
for y in range(x, l_sma_days + x):
if i == l_sma_days:
break
sum += prices[y]
i += 1
sma[1].append(sum/l_sma_days)
#sample short sma to be used in ema calculation
sum = 0
for x in range(time_frame, time_frame + s_ema_days):
sum += prices[x]
samp_s_sma = sum / (s_ema_days)
#sample long sma to be used in ema calculation
sum = 0
for x in range(time_frame, time_frame + l_ema_days):
sum += prices[x]
samp_l_sma = sum / (l_ema_days)
s_factor = (smoothing / (1 + s_ema_days))
l_factor = (smoothing / (1 + l_ema_days))
#goes backwards through time_frame, calculating each more recent short term EMA
for x in reversed(range(0, time_frame)):
if x == time_frame - 1:
ema[0].append(prices[x] * s_factor + samp_s_sma * (1 - s_factor))
else:
ema[0].append(prices[x] * s_factor + ema[0][-1] * (1 - s_factor))
#same process for long term EMAs
for x in reversed(range(0, time_frame)):
if x == time_frame - 1:
ema[1].append(prices[x] * l_factor + samp_l_sma * (1 - l_factor))
else:
ema[1].append(prices[x] * l_factor + ema[1][-1] * (1 - l_factor))
#reverses ema lists
ema[0] = ema[0][::-1]
ema[1] = ema[1][::-1]
#plt.figure(1)
#plt.plot(ema[0])
#plt.plot(ema[1])
#plt.show()
#returns SMA and EMA
return [sma, ema, volume]
def buySignal(ticker, time_frame, key, smoothing=2):
#get simple moving averages and assign each into list for comprehensibility
ma = movingAverages(ticker, time_frame, key)
s_sma = ma[0][0]
l_sma = ma[0][1]
s_ema = ma[1][0]
l_ema = ma[1][1]
#makes sure data isnt invalid
if len(s_sma) == 0:
return None
if len(s_ema) == 0:
return None
#getting the moving average convergence divergence (macd) line
macd_line = []
for x in range(len(s_ema)):
macd_line.append(s_ema[x] - l_ema[x])
#prior macd used for last signal line calculation
samp_macd = macd_line[-1] - (macd_line[-2] - macd_line[-1])
#create signal line as ema of past 9 macds
sig_line = []
sig_days = 9
for x in reversed(range(0, time_frame)):
if x == time_frame - 1:
sig_line.append(macd_line[x] * (smoothing / (1 + sig_days)) + samp_macd * (1 - (smoothing / (1 + sig_days))))
else:
sig_line.append(macd_line[x] * (smoothing / (1 + sig_days)) + sig_line[time_frame-2-x] * (1 - (smoothing / (1 + sig_days))))
#turn macd and signal to displau old --> new on graph
macd_line = macd_line[::-1]
#plt.figure(1)
#plt.plot(macd_line, 'r')
#plt.plot(sig_line, 'b')
#plt.show()
#check if macd is increasing
macd_signal = None
#see if macd crosses signal within time frame
for x in range(1, time_frame):
if macd_line[x-1] - sig_line[x-1] < 0 and macd_line[x] - sig_line[x] > 0:
macd_signal = True
elif macd_line[x-1] - sig_line[x-1] > 0 and macd_line[x] - sig_line[x] < 0:
macd_signal = False
#check if golden cross happened recently
golden_sma = s_sma[0] > l_sma[0]
golden_ema = s_ema[0] > s_sma[0]
#checks if these crosses happened recentl
rcent1 = False
rcent2 = False
for x in range(time_frame):
if golden_sma and (s_sma[x] <= l_sma[x]) and rcent1 == False:
rcent1 = True
elif golden_ema and (s_ema[x] <= l_ema[x]) and rcent2 == False:
rcent2 = True
#if short-term mas are increasing
inc_sma = ((s_sma[0] - s_sma[time_frame - 1]) / time_frame) > 0
inc_ema = ((s_ema[0] - s_sma[time_frame - 1]) / time_frame) > 0
#makes sure there is enough volume
vol = ma[2] > 75000
#print(golden_sma)
#print(rcent1)
#print(golden_ema)
#print(rcent2)
#print(inc_sma)
#print(inc_ema)
#print(vol)
#print(macd_signal)
#plt.figure(2)
#plt.plot(s_sma, 'r')
#plt.plot(l_sma, 'b')
#plt.show()
if (golden_sma and rcent1) and inc_sma and vol and macd_signal == True: #or (golden_ema and rcent2))
return 'buy'
def sellSignal(ticker, time_frame, key):
#Stochastic is above 85ish
#RSI above 70%
#stop loss under 20%
#is the ema stalling
ma = movingAverages(ticker, time_frame, key)
short_ema = ma[1][0]
sma = ma[0][0]
ema_stall = False
if short_ema[0] < short_ema[-1]:
ema_stall = True
#get data for stochastic oscillator
data = yf.download(tickers = ticker, period= '1mo', interval='30m')
prices = data['Close'].tolist()
prices = prices[::-1]
#calculate fast stochastic oscillator, slow stochastic oscillator
fast_indexs = []
days= 0
for x in range(1, len(prices)-1):
if days == time_frame*16:
break
lastC = prices[x-1]
low14 = min(prices[x:142+x])
high14 = max(prices[x:142+x])
stoch_ind = ((lastC - low14) / (high14 - low14)) * 100
fast_indexs.append(stoch_ind)
days+=1
slow_indexs = []
for x in range(0, time_frame):
slow_indexs.append(sum(fast_indexs[x:39+x]) / 39)
stochastic = False
for val in slow_indexs:
if val > 85:
stochastic = True
#get RSI
rsi = True #change to false once finished
#with RESTClient(key) as client:
# bars = client.stocks_equities_aggregates(ticker, 1, 'day', rsi_d.strftime('%Y-%m-%d'), end_d.strftime('%Y-%m-%d'))
# try:
# data = bars.results
# opens = []
# closes = []
# for dataset in data:
# opens.append(dataset['o'])
# closes.append(dataset['c'])
# except:
# opens = []
# closes = []
#for x in range(opens):
if ema_stall and stochastic == True and rsi == True:
return 'sell'
return None
| amoszczynski/Mid-Term_Algo-Trader | v1/signaling.py | signaling.py | py | 7,971 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "datetime.datetime.today",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.ti... |
2350374306 | '''
Lex Strings
Name: <your name>
'''
from collections import Counter, defaultdict
#
# Complete the 'rearrangedString' function below.
#
# The function is expected to return a STRING.
# The function accepts following parameters:
# 1. STRING s
#
# 1. This function will filter the alphanumeric characters from the string and sort them.
def filter_and_sort(s):
alphanumeric_chars_list = [ch for ch in s if ch.isalnum()]
alphanumeric_chars = ''.join(alphanumeric_chars_list)
return sorted(alphanumeric_chars, reverse=True)
# 2. This function will take the sorted string and group the characters by their count.
def group_by_count(sorted_chars):
char_count = Counter(sorted_chars)
groups = defaultdict(list)
for char, count in sorted(char_count.items(), key=lambda x: (-x[1], x[0])):
groups[count].append(char)
return groups
# 3. This function will take the groups and apply the alternating sort order.
def alternate_sort(groups):
sorted_groups = []
ascending_order = True
for count, chars in sorted(groups.items(), reverse=True):
chars = ''.join(chars)
if not ascending_order:
chars = chars[::-1]
sorted_groups.append((count, chars))
ascending_order = not ascending_order
return sorted_groups
# 4.
def format_result(sorted_groups):
return ','.join(f"{count}{chars}" for count, chars in sorted_groups)
def rearrangedString(s):
# Write your code here
'''
1. Extract alphanumeric characters: We'll start by extracting only alphanumeric characters from the input string.
2. Create frequency groups: We'll group characters by their frequency, preserving the order of characters within each group.
3. Sort and alternate groups: We'll sort the groups by frequency, and then apply the alternating order rule within each group.
4. Concatenate and format: Finally, we'll concatenate the groups and format the result as required.
'''
sorted_chars = filter_and_sort(s)
groups = group_by_count(sorted_chars)
sorted_groups = alternate_sort(groups)
result = format_result(sorted_groups)
if __name__ == '__main__':
import sys
sys.stdin = open('lex_strings_sample_data.txt', 'r')
for tc in range(10):
s = input()
result = rearrangedString(s)
print('Test Case #{}: {}'.format(tc + 1, result))
'''
Your output on console should be:
Test Case #1: 6in,4ts,3aegr,2o,1ESTfhlmpx
Test Case #2: 5a,4se,3r,2tonkihc,1ACFHLRSdflmuwy
Test Case #3: 2135ei,1tsrohgfdaTPI9642
Test Case #4: 7es,4lh,2a,1ytrobS
Test Case #5: 8a,5n,4gu,3rlic,2CPSdehmosty,1vbVRJIBA
Test Case #6: 7ae,6ni,5t,4rhd,3gl,2vsoc,119CDIOVbmpuwy
Test Case #7: 9e,5nica,4or,3th,202CTsu,1vpmlfdSA
Test Case #8: 53,472,345689deo,2trni1,1IPTafghsu
Test Case #9: 14e,13p,7i,6r,5cdk,4P,2alost,1ywnmfH
Test Case #10: 8o,7e,5hn,3wtsrda,2ikp,1ylfbT10
'''
| tkong9/ACSL-Sr | Assignments/lex.py | lex.py | py | 2,876 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 66,
"usage_type": "attribute"
}
] |
33357721855 | import pygame
from pygame.surface import Surface
import sys
from Screens.Content import Content, State
from Options.Options import KEY_REPEAT_DELAY, KEY_REPEAT_INTERVAL, TITLE_H_START, TITLE_W_START, TITLE_H_SIZE, BUTTON_H, BUTTON_W, FONT, BRICK_SIZE, ICON_H, ICON_W
from Options.Colors import Colors, Color_mod
class Settings(Content):
def __init__(self) -> None:
self.options_1: list[str] = [
'Next tetr',
'Tetr. shadow',
'Fps',
'Fall Speed',
'LockDelay'
]
self.options_2: list[str] = [
'Key Rep. Interval',
'Key Rep. Delay',
'Soft Drop',
'Return'
]
self.border: int = (TITLE_H_SIZE - (len(self.options_1) * BUTTON_H)) // (2 * len(self.options_1))
def update(self, display: Surface) -> State:
# Remover e colocar em opcoes no Title()
for e in pygame.event.get():
if e.type == pygame.MOUSEBUTTONDOWN:
return State.Title
pygame.key.set_repeat(KEY_REPEAT_DELAY, KEY_REPEAT_INTERVAL)
return State.Stay
def draw(self, display: Surface) -> None:
self.draw_buttons(display)
def draw_buttons(self, display) -> None:
for i in range(len(self.options_1)):
pygame.draw.rect(display,
Colors.PURPLE.value,
(TITLE_W_START - (3 * BUTTON_W // 4),
TITLE_H_START // 2 + (i * (BUTTON_H + self.border)),
BUTTON_W,
BUTTON_H // 2))
pygame.draw.rect(display,
Color_mod().get_shadow_from_color(Colors.PURPLE),
(TITLE_W_START - (3 * BUTTON_W // 4),
TITLE_H_START // 2 + (i * (BUTTON_H + self.border)) + (BUTTON_H // 2),
BUTTON_W,
BUTTON_H // 2))
pygame.draw.rect(display,
Color_mod().get_light_from_color(Colors.PURPLE),
(TITLE_W_START - (3 * BUTTON_W // 4),
TITLE_H_START // 2 + (i * (BUTTON_H + self.border)),
BUTTON_W,
BUTTON_H),
width=4)
button_text: Surface = FONT.render(self.options_1[i], True, Colors.WHITE.value)
display.blit(button_text, (TITLE_W_START - (3 * BUTTON_W // 4) + (BUTTON_H // 2), TITLE_H_START // 2 + (i * (BUTTON_H + self.border))))
for i in range(len(self.options_2)):
pygame.draw.rect(display,
Colors.PURPLE.value,
(TITLE_W_START + (3 * BUTTON_W // 4),
TITLE_H_START // 2 + (i * (BUTTON_H + self.border)),
BUTTON_W,
BUTTON_H // 2))
pygame.draw.rect(display,
Color_mod().get_shadow_from_color(Colors.PURPLE),
(TITLE_W_START + (3 * BUTTON_W // 4),
TITLE_H_START // 2 + (i * (BUTTON_H + self.border)) + (BUTTON_H // 2),
BUTTON_W,
BUTTON_H // 2))
pygame.draw.rect(display,
Color_mod().get_light_from_color(Colors.PURPLE),
(TITLE_W_START + (3 * BUTTON_W // 4),
TITLE_H_START // 2 + (i * (BUTTON_H + self.border)),
BUTTON_W,
BUTTON_H),
width=4)
button_text: Surface = FONT.render(self.options_2[i], True, Colors.WHITE.value)
display.blit(button_text, (TITLE_W_START + (3 * BUTTON_W // 4) + (BUTTON_H // 2), TITLE_H_START // 2 + (i * (BUTTON_H + self.border))))
| HTsuyoshi/py-tetris | src/Screens/Settings.py | Settings.py | py | 4,030 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "Screens.Content.Content",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "Options.Options.TITLE_H_SIZE",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "Options.Options.BUTTON_H",
"line_number": 25,
"usage_type": "name"
},
{
"api... |
6585776436 | import threading
import time
import serial.tools.list_ports
from os import environ
from re import search
from PyQt6.QtCore import QObject, pyqtSignal
from UM.Platform import Platform
from UM.Signal import Signal, signalemitter
from UM.OutputDevice.OutputDevicePlugin import OutputDevicePlugin
from UM.i18n import i18nCatalog
from cura.PrinterOutput.PrinterOutputDevice import ConnectionState
from . import USBPrinterOutputDevice
i18n_catalog = i18nCatalog("cura")
@signalemitter
class USBPrinterOutputDeviceManager(QObject, OutputDevicePlugin):
"""Manager class that ensures that an USBPrinterOutput device is created for every connected USB printer."""
addUSBOutputDeviceSignal = Signal()
progressChanged = pyqtSignal()
def __init__(self, application, parent = None):
if USBPrinterOutputDeviceManager.__instance is not None:
raise RuntimeError("Try to create singleton '%s' more than once" % self.__class__.__name__)
super().__init__(parent = parent)
USBPrinterOutputDeviceManager.__instance = self
self._application = application
self._serial_port_list = []
self._usb_output_devices = {}
self._usb_output_devices_model = None
self._update_thread = threading.Thread(target = self._updateThread)
self._update_thread.daemon = True
self._check_updates = True
self._application.applicationShuttingDown.connect(self.stop)
# Because the model needs to be created in the same thread as the QMLEngine, we use a signal.
self.addUSBOutputDeviceSignal.connect(self.addOutputDevice)
self._application.globalContainerStackChanged.connect(self.updateUSBPrinterOutputDevices)
# The method updates/reset the USB settings for all connected USB devices
def updateUSBPrinterOutputDevices(self):
for device in self._usb_output_devices.values():
if isinstance(device, USBPrinterOutputDevice.USBPrinterOutputDevice):
device.resetDeviceSettings()
def start(self):
self._check_updates = True
self._update_thread.start()
def stop(self, store_data: bool = True):
self._check_updates = False
def _onConnectionStateChanged(self, serial_port):
if serial_port not in self._usb_output_devices:
return
changed_device = self._usb_output_devices[serial_port]
if changed_device.connectionState == ConnectionState.Connected:
self.getOutputDeviceManager().addOutputDevice(changed_device)
else:
self.getOutputDeviceManager().removeOutputDevice(serial_port)
def _updateThread(self):
while self._check_updates:
container_stack = self._application.getGlobalContainerStack()
if container_stack is None:
time.sleep(5)
continue
port_list = [] # Just an empty list; all USB devices will be removed.
if container_stack.getMetaDataEntry("supports_usb_connection"):
machine_file_formats = [file_type.strip() for file_type in container_stack.getMetaDataEntry("file_formats").split(";")]
if "text/x-gcode" in machine_file_formats:
# We only limit listing usb on windows is a fix for connecting tty/cu printers on MacOS and Linux
port_list = self.getSerialPortList(only_list_usb=Platform.isWindows())
self._addRemovePorts(port_list)
time.sleep(5)
def _addRemovePorts(self, serial_ports):
"""Helper to identify serial ports (and scan for them)"""
# First, find and add all new or changed keys
for serial_port in list(serial_ports):
if serial_port not in self._serial_port_list:
self.addUSBOutputDeviceSignal.emit(serial_port) # Hack to ensure its created in main thread
continue
self._serial_port_list = list(serial_ports)
for port, device in self._usb_output_devices.items():
if port not in self._serial_port_list:
device.close()
def addOutputDevice(self, serial_port):
"""Because the model needs to be created in the same thread as the QMLEngine, we use a signal."""
device = USBPrinterOutputDevice.USBPrinterOutputDevice(serial_port)
device.connectionStateChanged.connect(self._onConnectionStateChanged)
self._usb_output_devices[serial_port] = device
device.connect()
def getSerialPortList(self, only_list_usb = False):
"""Create a list of serial ports on the system.
:param only_list_usb: If true, only usb ports are listed
"""
base_list = []
try:
port_list = serial.tools.list_ports.comports()
except TypeError: # Bug in PySerial causes a TypeError if port gets disconnected while processing.
port_list = []
for port in port_list:
if not isinstance(port, tuple):
port = (port.device, port.description, port.hwid)
if not port[2]: # HWID may be None if the device is not USB or the system doesn't report the type.
continue
if only_list_usb and not port[2].startswith("USB"):
continue
# To prevent cura from messing with serial ports of other devices,
# filter by regular expressions passed in as environment variables.
# Get possible patterns with python3 -m serial.tools.list_ports -v
# set CURA_DEVICENAMES=USB[1-9] -> e.g. not matching /dev/ttyUSB0
pattern = environ.get('CURA_DEVICENAMES')
if pattern and not search(pattern, port[0]):
continue
# set CURA_DEVICETYPES=CP2102 -> match a type of serial converter
pattern = environ.get('CURA_DEVICETYPES')
if pattern and not search(pattern, port[1]):
continue
# set CURA_DEVICEINFOS=LOCATION=2-1.4 -> match a physical port
# set CURA_DEVICEINFOS=VID:PID=10C4:EA60 -> match a vendor:product
pattern = environ.get('CURA_DEVICEINFOS')
if pattern and not search(pattern, port[2]):
continue
base_list += [port[0]]
return list(base_list)
__instance = None # type: USBPrinterOutputDeviceManager
@classmethod
def getInstance(cls, *args, **kwargs) -> "USBPrinterOutputDeviceManager":
return cls.__instance
| Ultimaker/Cura | plugins/USBPrinting/USBPrinterOutputDeviceManager.py | USBPrinterOutputDeviceManager.py | py | 6,487 | python | en | code | 5,387 | github-code | 1 | [
{
"api_name": "UM.i18n.i18nCatalog",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtCore.QObject",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "UM.OutputDevice.OutputDevicePlugin.OutputDevicePlugin",
"line_number": 22,
"usage_type": "name"
... |
11510469822 | # Released under the MIT License. See LICENSE for details.
#
"""Playlist related functionality."""
from __future__ import annotations
import copy
import logging
from typing import Any, TYPE_CHECKING
import babase
if TYPE_CHECKING:
from typing import Sequence
from bascenev1._session import Session
PlaylistType = list[dict[str, Any]]
def filter_playlist(
playlist: PlaylistType,
sessiontype: type[Session],
add_resolved_type: bool = False,
remove_unowned: bool = True,
mark_unowned: bool = False,
name: str = '?',
) -> PlaylistType:
"""Return a filtered version of a playlist.
Strips out or replaces invalid or unowned game types, makes sure all
settings are present, and adds in a 'resolved_type' which is the actual
type.
"""
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
from bascenev1._map import get_filtered_map_name
from bascenev1._gameactivity import GameActivity
assert babase.app.classic is not None
goodlist: list[dict] = []
unowned_maps: Sequence[str]
available_maps: list[str] = list(babase.app.classic.maps.keys())
if (remove_unowned or mark_unowned) and babase.app.classic is not None:
unowned_maps = babase.app.classic.store.get_unowned_maps()
unowned_game_types = babase.app.classic.store.get_unowned_game_types()
else:
unowned_maps = []
unowned_game_types = set()
for entry in copy.deepcopy(playlist):
# 'map' used to be called 'level' here.
if 'level' in entry:
entry['map'] = entry['level']
del entry['level']
# We now stuff map into settings instead of it being its own thing.
if 'map' in entry:
entry['settings']['map'] = entry['map']
del entry['map']
# Update old map names to new ones.
entry['settings']['map'] = get_filtered_map_name(
entry['settings']['map']
)
if remove_unowned and entry['settings']['map'] in unowned_maps:
continue
# Ok, for each game in our list, try to import the module and grab
# the actual game class. add successful ones to our initial list
# to present to the user.
if not isinstance(entry['type'], str):
raise TypeError('invalid entry format')
try:
# Do some type filters for backwards compat.
if entry['type'] in (
'Assault.AssaultGame',
'Happy_Thoughts.HappyThoughtsGame',
'bsAssault.AssaultGame',
'bs_assault.AssaultGame',
'bastd.game.assault.AssaultGame',
):
entry['type'] = 'bascenev1lib.game.assault.AssaultGame'
if entry['type'] in (
'King_of_the_Hill.KingOfTheHillGame',
'bsKingOfTheHill.KingOfTheHillGame',
'bs_king_of_the_hill.KingOfTheHillGame',
'bastd.game.kingofthehill.KingOfTheHillGame',
):
entry[
'type'
] = 'bascenev1lib.game.kingofthehill.KingOfTheHillGame'
if entry['type'] in (
'Capture_the_Flag.CTFGame',
'bsCaptureTheFlag.CTFGame',
'bs_capture_the_flag.CTFGame',
'bastd.game.capturetheflag.CaptureTheFlagGame',
):
entry[
'type'
] = 'bascenev1lib.game.capturetheflag.CaptureTheFlagGame'
if entry['type'] in (
'Death_Match.DeathMatchGame',
'bsDeathMatch.DeathMatchGame',
'bs_death_match.DeathMatchGame',
'bastd.game.deathmatch.DeathMatchGame',
):
entry['type'] = 'bascenev1lib.game.deathmatch.DeathMatchGame'
if entry['type'] in (
'ChosenOne.ChosenOneGame',
'bsChosenOne.ChosenOneGame',
'bs_chosen_one.ChosenOneGame',
'bastd.game.chosenone.ChosenOneGame',
):
entry['type'] = 'bascenev1lib.game.chosenone.ChosenOneGame'
if entry['type'] in (
'Conquest.Conquest',
'Conquest.ConquestGame',
'bsConquest.ConquestGame',
'bs_conquest.ConquestGame',
'bastd.game.conquest.ConquestGame',
):
entry['type'] = 'bascenev1lib.game.conquest.ConquestGame'
if entry['type'] in (
'Elimination.EliminationGame',
'bsElimination.EliminationGame',
'bs_elimination.EliminationGame',
'bastd.game.elimination.EliminationGame',
):
entry['type'] = 'bascenev1lib.game.elimination.EliminationGame'
if entry['type'] in (
'Football.FootballGame',
'bsFootball.FootballTeamGame',
'bs_football.FootballTeamGame',
'bastd.game.football.FootballTeamGame',
):
entry['type'] = 'bascenev1lib.game.football.FootballTeamGame'
if entry['type'] in (
'Hockey.HockeyGame',
'bsHockey.HockeyGame',
'bs_hockey.HockeyGame',
'bastd.game.hockey.HockeyGame',
):
entry['type'] = 'bascenev1lib.game.hockey.HockeyGame'
if entry['type'] in (
'Keep_Away.KeepAwayGame',
'bsKeepAway.KeepAwayGame',
'bs_keep_away.KeepAwayGame',
'bastd.game.keepaway.KeepAwayGame',
):
entry['type'] = 'bascenev1lib.game.keepaway.KeepAwayGame'
if entry['type'] in (
'Race.RaceGame',
'bsRace.RaceGame',
'bs_race.RaceGame',
'bastd.game.race.RaceGame',
):
entry['type'] = 'bascenev1lib.game.race.RaceGame'
if entry['type'] in (
'bsEasterEggHunt.EasterEggHuntGame',
'bs_easter_egg_hunt.EasterEggHuntGame',
'bastd.game.easteregghunt.EasterEggHuntGame',
):
entry[
'type'
] = 'bascenev1lib.game.easteregghunt.EasterEggHuntGame'
if entry['type'] in (
'bsMeteorShower.MeteorShowerGame',
'bs_meteor_shower.MeteorShowerGame',
'bastd.game.meteorshower.MeteorShowerGame',
):
entry[
'type'
] = 'bascenev1lib.game.meteorshower.MeteorShowerGame'
if entry['type'] in (
'bsTargetPractice.TargetPracticeGame',
'bs_target_practice.TargetPracticeGame',
'bastd.game.targetpractice.TargetPracticeGame',
):
entry[
'type'
] = 'bascenev1lib.game.targetpractice.TargetPracticeGame'
gameclass = babase.getclass(entry['type'], GameActivity)
if entry['settings']['map'] not in available_maps:
raise babase.MapNotFoundError()
if remove_unowned and gameclass in unowned_game_types:
continue
if add_resolved_type:
entry['resolved_type'] = gameclass
if mark_unowned and entry['settings']['map'] in unowned_maps:
entry['is_unowned_map'] = True
if mark_unowned and gameclass in unowned_game_types:
entry['is_unowned_game'] = True
# Make sure all settings the game defines are present.
neededsettings = gameclass.get_available_settings(sessiontype)
for setting in neededsettings:
if setting.name not in entry['settings']:
entry['settings'][setting.name] = setting.default
goodlist.append(entry)
except babase.MapNotFoundError:
logging.warning(
'Map \'%s\' not found while scanning playlist \'%s\'.',
entry['settings']['map'],
name,
)
except ImportError as exc:
logging.warning(
'Import failed while scanning playlist \'%s\': %s', name, exc
)
except Exception:
logging.exception('Error in filter_playlist.')
return goodlist
def get_default_free_for_all_playlist() -> PlaylistType:
"""Return a default playlist for free-for-all mode."""
# NOTE: these are currently using old type/map names,
# but filtering translates them properly to the new ones.
# (is kinda a handy way to ensure filtering is working).
# Eventually should update these though.
return [
{
'settings': {
'Epic Mode': False,
'Kills to Win Per Player': 10,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Doom Shroom',
},
'type': 'bs_death_match.DeathMatchGame',
},
{
'settings': {
'Chosen One Gets Gloves': True,
'Chosen One Gets Shield': False,
'Chosen One Time': 30,
'Epic Mode': 0,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Monkey Face',
},
'type': 'bs_chosen_one.ChosenOneGame',
},
{
'settings': {
'Hold Time': 30,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Zigzag',
},
'type': 'bs_king_of_the_hill.KingOfTheHillGame',
},
{
'settings': {'Epic Mode': False, 'map': 'Rampage'},
'type': 'bs_meteor_shower.MeteorShowerGame',
},
{
'settings': {
'Epic Mode': 1,
'Lives Per Player': 1,
'Respawn Times': 1.0,
'Time Limit': 120,
'map': 'Tip Top',
},
'type': 'bs_elimination.EliminationGame',
},
{
'settings': {
'Hold Time': 30,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'The Pad',
},
'type': 'bs_keep_away.KeepAwayGame',
},
{
'settings': {
'Epic Mode': True,
'Kills to Win Per Player': 10,
'Respawn Times': 0.25,
'Time Limit': 120,
'map': 'Rampage',
},
'type': 'bs_death_match.DeathMatchGame',
},
{
'settings': {
'Bomb Spawning': 1000,
'Epic Mode': False,
'Laps': 3,
'Mine Spawn Interval': 4000,
'Mine Spawning': 4000,
'Time Limit': 300,
'map': 'Big G',
},
'type': 'bs_race.RaceGame',
},
{
'settings': {
'Hold Time': 30,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Happy Thoughts',
},
'type': 'bs_king_of_the_hill.KingOfTheHillGame',
},
{
'settings': {
'Enable Impact Bombs': 1,
'Enable Triple Bombs': False,
'Target Count': 2,
'map': 'Doom Shroom',
},
'type': 'bs_target_practice.TargetPracticeGame',
},
{
'settings': {
'Epic Mode': False,
'Lives Per Player': 5,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Step Right Up',
},
'type': 'bs_elimination.EliminationGame',
},
{
'settings': {
'Epic Mode': False,
'Kills to Win Per Player': 10,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Crag Castle',
},
'type': 'bs_death_match.DeathMatchGame',
},
{
'map': 'Lake Frigid',
'settings': {
'Bomb Spawning': 0,
'Epic Mode': False,
'Laps': 6,
'Mine Spawning': 2000,
'Time Limit': 300,
'map': 'Lake Frigid',
},
'type': 'bs_race.RaceGame',
},
]
def get_default_teams_playlist() -> PlaylistType:
"""Return a default playlist for teams mode."""
# NOTE: these are currently using old type/map names,
# but filtering translates them properly to the new ones.
# (is kinda a handy way to ensure filtering is working).
# Eventually should update these though.
return [
{
'settings': {
'Epic Mode': False,
'Flag Idle Return Time': 30,
'Flag Touch Return Time': 0,
'Respawn Times': 1.0,
'Score to Win': 3,
'Time Limit': 600,
'map': 'Bridgit',
},
'type': 'bs_capture_the_flag.CTFGame',
},
{
'settings': {
'Epic Mode': False,
'Respawn Times': 1.0,
'Score to Win': 3,
'Time Limit': 600,
'map': 'Step Right Up',
},
'type': 'bs_assault.AssaultGame',
},
{
'settings': {
'Balance Total Lives': False,
'Epic Mode': False,
'Lives Per Player': 3,
'Respawn Times': 1.0,
'Solo Mode': True,
'Time Limit': 600,
'map': 'Rampage',
},
'type': 'bs_elimination.EliminationGame',
},
{
'settings': {
'Epic Mode': False,
'Kills to Win Per Player': 5,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Roundabout',
},
'type': 'bs_death_match.DeathMatchGame',
},
{
'settings': {
'Respawn Times': 1.0,
'Score to Win': 1,
'Time Limit': 600,
'map': 'Hockey Stadium',
},
'type': 'bs_hockey.HockeyGame',
},
{
'settings': {
'Hold Time': 30,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Monkey Face',
},
'type': 'bs_keep_away.KeepAwayGame',
},
{
'settings': {
'Balance Total Lives': False,
'Epic Mode': True,
'Lives Per Player': 1,
'Respawn Times': 1.0,
'Solo Mode': False,
'Time Limit': 120,
'map': 'Tip Top',
},
'type': 'bs_elimination.EliminationGame',
},
{
'settings': {
'Epic Mode': False,
'Respawn Times': 1.0,
'Score to Win': 3,
'Time Limit': 300,
'map': 'Crag Castle',
},
'type': 'bs_assault.AssaultGame',
},
{
'settings': {
'Epic Mode': False,
'Kills to Win Per Player': 5,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Doom Shroom',
},
'type': 'bs_death_match.DeathMatchGame',
},
{
'settings': {'Epic Mode': False, 'map': 'Rampage'},
'type': 'bs_meteor_shower.MeteorShowerGame',
},
{
'settings': {
'Epic Mode': False,
'Flag Idle Return Time': 30,
'Flag Touch Return Time': 0,
'Respawn Times': 1.0,
'Score to Win': 2,
'Time Limit': 600,
'map': 'Roundabout',
},
'type': 'bs_capture_the_flag.CTFGame',
},
{
'settings': {
'Respawn Times': 1.0,
'Score to Win': 21,
'Time Limit': 600,
'map': 'Football Stadium',
},
'type': 'bs_football.FootballTeamGame',
},
{
'settings': {
'Epic Mode': True,
'Respawn Times': 0.25,
'Score to Win': 3,
'Time Limit': 120,
'map': 'Bridgit',
},
'type': 'bs_assault.AssaultGame',
},
{
'map': 'Doom Shroom',
'settings': {
'Enable Impact Bombs': 1,
'Enable Triple Bombs': False,
'Target Count': 2,
'map': 'Doom Shroom',
},
'type': 'bs_target_practice.TargetPracticeGame',
},
{
'settings': {
'Hold Time': 30,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Tip Top',
},
'type': 'bs_king_of_the_hill.KingOfTheHillGame',
},
{
'settings': {
'Epic Mode': False,
'Respawn Times': 1.0,
'Score to Win': 2,
'Time Limit': 300,
'map': 'Zigzag',
},
'type': 'bs_assault.AssaultGame',
},
{
'settings': {
'Epic Mode': False,
'Flag Idle Return Time': 30,
'Flag Touch Return Time': 0,
'Respawn Times': 1.0,
'Score to Win': 3,
'Time Limit': 300,
'map': 'Happy Thoughts',
},
'type': 'bs_capture_the_flag.CTFGame',
},
{
'settings': {
'Bomb Spawning': 1000,
'Epic Mode': True,
'Laps': 1,
'Mine Spawning': 2000,
'Time Limit': 300,
'map': 'Big G',
},
'type': 'bs_race.RaceGame',
},
{
'settings': {
'Epic Mode': False,
'Kills to Win Per Player': 5,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Monkey Face',
},
'type': 'bs_death_match.DeathMatchGame',
},
{
'settings': {
'Hold Time': 30,
'Respawn Times': 1.0,
'Time Limit': 300,
'map': 'Lake Frigid',
},
'type': 'bs_keep_away.KeepAwayGame',
},
{
'settings': {
'Epic Mode': False,
'Flag Idle Return Time': 30,
'Flag Touch Return Time': 3,
'Respawn Times': 1.0,
'Score to Win': 2,
'Time Limit': 300,
'map': 'Tip Top',
},
'type': 'bs_capture_the_flag.CTFGame',
},
{
'settings': {
'Balance Total Lives': False,
'Epic Mode': False,
'Lives Per Player': 3,
'Respawn Times': 1.0,
'Solo Mode': False,
'Time Limit': 300,
'map': 'Crag Castle',
},
'type': 'bs_elimination.EliminationGame',
},
{
'settings': {
'Epic Mode': True,
'Respawn Times': 0.25,
'Time Limit': 120,
'map': 'Zigzag',
},
'type': 'bs_conquest.ConquestGame',
},
]
| efroemling/ballistica | src/assets/ba_data/python/bascenev1/_playlist.py | _playlist.py | py | 20,107 | python | en | code | 468 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "bascenev1._session.Session",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "babase.app... |
28833725406 | import sqlite3
import typing
from timetable_loader import BaseTimetableLoader
def add_new_user(cursor: sqlite3.Cursor, user_id, group_id: int):
cursor.execute("""
INSERT INTO users VALUES (?, ?)
""", (user_id, group_id))
def delete_timetable_for_group(cursor: sqlite3.Cursor, group_id: int):
cursor.execute("""
DELETE FROM timetable WHERE group_id = ?
""", (group_id, ))
def upload_timetable_for_group(cursor: sqlite3.Cursor, timetable: BaseTimetableLoader):
cursor.executemany(f"""
INSERT INTO timetable VALUES (?, ?, ?, ?, ?, ?)
""", timetable)
def get_timetable_for_group_and_day(cursor: sqlite3.Cursor, group_id: int, weekday: str):
return cursor.execute("""
SELECT subject, tutor, place, time
FROM timetable
WHERE group_id = ? AND weekday = ?
""", (group_id, weekday)).fetchall()
| just3mhz/timetable_bot | sql_queries.py | sql_queries.py | py | 874 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sqlite3.Cursor",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.Cursor",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.Cursor",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "timetable_... |
32396154456 | import scipy.stats as sts
import argparse
import pandas as pd
import glob
def ParseArguments():
parser = argparse.ArgumentParser(description="Kolmogorov–Smirnov test")
parser.add_argument('--input-file', default="generated_numbers.pkl", required=False, help='input file (default: %(default)s)')
parser.add_argument('--input-dir', default="pickles_dir", required=False, help='input directory with .pkl files (default: %(default)s)')
parser.add_argument('--pval-file', default="p-values.csv", required=False, help='output file with p-values (default: %(default)s)')
args = parser.parse_args()
return args.input_file, args.input_dir, args.pval_file
input_file, input_dir, pval_file = ParseArguments()
if input_dir=="":
numbers_info = pd.read_pickle(input_file)
M = int(numbers_info['Modulus'])
numbers = numbers_info['numbers'] #liczby (pseudolosowe)
numbers = list(map(lambda x: x/M, numbers)) #zamiana na liczby z przedziału (0 , 1)
test = sts.kstest(numbers, cdf = 'uniform')
print("Kolmogorov–Smirnov test results:")
print("p-value: "+str(test[1]))
print("Statistic: "+str(test[0]))
print("PARAMETERS:")
print("M: "+str(numbers_info['Modulus']))
print("n: "+str(numbers_info['n']))
print("Numbers have beed generated with: "+str(numbers_info['PRNG']))
pvals = []
pvals.append(test[1])
print("Saving p-value to ",pval_file)
df=pd.DataFrame(pvals,columns=["p-value"])
df.to_csv(pval_file, index = False)
else:
print("input_dir = ", input_dir)
pvals = []
file_list = list((glob.glob(input_dir + "/**.pkl")))
file_list.sort()
for file_name in file_list:
print("Processing file ", file_name, " ...")
numbers_info = pd.read_pickle(file_name)
M=numbers_info['Modulus']
numbers = numbers_info['numbers']
numbers = list(map(lambda x: x/M, numbers)) #zamiana na liczby z przedziału (0 , 1)
test = sts.kstest(numbers, cdf = 'uniform')
pvals.append(test[1])
print("Saving p-values to ",pval_file)
df=pd.DataFrame(pvals,columns=["p-value"])
df.to_csv(pval_file, index = False)
| lorek/ZPS2021 | scripts/z3KStest.py | z3KStest.py | py | 2,157 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.read_pickle",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scipy.stats.kstest",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scipy.st... |
32952521600 | import binascii
import base58
import hashlib
from hashlib import sha256
from ecdsa import SECP256k1,VerifyingKey,util,BadSignatureError
from six import b
#字符串按字节反转。大小端数据的字符串转换。eg:"123456" to "563412"
def str_reverse_on_byte(data):
return binascii.hexlify(binascii.unhexlify(data)[::-1]).decode()
#16进制字符串转整型。eg:"11d" to 285
def hexstr2int(hexstr,byteorder='big'):
return int.from_bytes(binascii.a2b_hex(hexstr), byteorder=byteorder)
###########################################################################################################
# get_varint(data):
# 解析varint
# params:
# data:待解析数据。数据类型为:str
# return:
# 返回解析结果。数据类型为:tuple,长度为2。
# tuple[0]为varint的数值位(不包含标识位),tuple[1]为数据位长度。数值位为小端存储。
###########################################################################################################
def get_varint(data):
flag = hexstr2int(data[0:2])
if flag < 253:
return data[0:2],0
if flag == 253:
varint_size = 4
elif flag == 254:
varint_size = 8
elif flag == 255:
varint_size = 16
return str_reverse_on_byte(data[2:varint_size + 2]),varint_size
###########################################################################################################
# single_sha256(data,byteorder = 'big'):
# 计算sha256
# params:
# data:待计算数据。数据类型为:str
# byteorder:字节序。"big"为大端,"little":为小端。数据类型为:str
# return:
# 返回sha256分散结果,数据类型为:str
###########################################################################################################
def single_sha256(data,byteorder = 'big'):
if(byteorder == 'little'):
res = binascii.hexlify(sha256(binascii.unhexlify(data)).digest()[::-1])
else:
res = binascii.hexlify(sha256(binascii.unhexlify(data)).digest())
return res.decode()
###########################################################################################################
# double_sha256(data,byteorder = 'big'):
# 计算两次sha256
# params:
# data:待计算数据。数据类型为:str
# byteorder:字节序。"big"为大端,"little":为小端。数据类型为:str
# return:
# 返回两次sha256分散结果,数据类型为:str
###########################################################################################################
def double_sha256(data,byteorder = 'big'):
if(byteorder == 'little'):
res = binascii.hexlify(sha256(sha256(binascii.unhexlify(data)).digest()).digest()[::-1])
else:
res = binascii.hexlify(sha256(sha256(binascii.unhexlify(data)).digest()).digest())
return res.decode()
###########################################################################################################
# single_ripemd160(data,byteorder = 'big'):
# 计算ripemd160
# params:
# data:待计算数据。数据类型为:str
# byteorder:字节序。"big"为大端,"little":为小端。数据类型为:str
# return:
# 返回ripemd160分散结果,数据类型为:str
###########################################################################################################
def single_ripemd160(data,byteorder = 'big'):
if (byteorder == 'little'):
res = binascii.hexlify(hashlib.new('ripemd160', binascii.unhexlify(data)).digest()[::-1])
else:
res = hashlib.new('ripemd160', binascii.unhexlify(data)).hexdigest()
return res
###########################################################################################################
# single_base58(data):
# base58编码
# params:
# data:待做编码的数据。数据类型为:str
# return:
# 返回base58编码数据。数据类型为:str
###########################################################################################################
def single_base58(data):
return base58.b58encode(binascii.unhexlify(data))
###########################################################################################################
# pubkey_2_btc_addr(pubkey,ver):
# 通过公钥计算比特币地址
# params:
# pubkey:公钥,DER格式。数据类型为:str
# ver:版本号,通常为"00"。数据类型为:str
# return:
# 返回比特币地址,数据类型为:str
###########################################################################################################
def pubkey_2_btc_addr(pubkey,ver):
# 对pubKey计算sha256,结果再做ripemd160
res_hash160 = single_ripemd160(single_sha256(pubkey))
# 填充版本号,一般为00
res_hash160 = ver + res_hash160
# 计算两次sha256,所得结果的前8位作为校验值,填充到加密数据后
res_dhash = double_sha256(res_hash160)
res_dhash = res_hash160 + res_dhash[:8]
# base58编码
res_b58 = single_base58(res_dhash)
return res_b58
###########################################################################################################
# pkhash_2_btc_addr(pkhash,ver):
# 通过公钥哈希值计算比特币地址
# params:
# pkhash:公钥哈希值,即公钥计算SHA256和RIPEMD160后的值。数据类型为:str
# ver:版本号,通常为"00"。数据类型为:str
# return:
# 返回比特币地址,数据类型为:str
###########################################################################################################
def pkhash_2_btc_addr(pkhash,ver):
# 填充版本号,一般为00
res = ver + pkhash
# 计算两次sha256,所得结果的前8位作为校验值,填充到加密数据后
res_dhash = double_sha256(res)
res_dhash = res + res_dhash[:8]
# base58编码
res_b58 = single_base58(res_dhash)
return res_b58
###########################################################################################################
# get_merkle_tree(merkle_leaves):
# 计算merkle tree,返回merkle tree全节点。最后一个节点为merkle root。
# params:
# merkle_leaves:叶子节点。数据类型为:list
# return:
# merkle tree全节点。数据类型为:list
###########################################################################################################
def get_merkle_tree(merkle_leaves):
node_count = len(merkle_leaves)
nodes = []
for index in range(1, node_count + 1, 2):
b1 = binascii.unhexlify(merkle_leaves[index - 1])[::-1]
if index == node_count:
b2 = binascii.unhexlify(merkle_leaves[index - 1])[::-1]
else:
b2 = binascii.unhexlify(merkle_leaves[index])[::-1]
nodes.append(binascii.hexlify(sha256(sha256(b1 + b2).digest()).digest()[::-1]).decode())
if len(nodes) != 1:
new_nodes = get_merkle_tree(nodes)
for n in new_nodes:
merkle_leaves.append(n)
else:
merkle_leaves.append(nodes[0])
return merkle_leaves
###########################################################################################################
# op_scriptsig_ecdsa_verify(sig,msg,pubkey,sigdecode = 'der'):
# op_scriptsig验证,返回验证结果。
# params:
# sig:签名数据。数据类型:str
# msg:消息。数据类型:str
# pubkey:公钥。数据类型:str
# sigdecode:签名格式。默认为DER格式,数据类型:str
# return:
# 验证码结构。True or False
###########################################################################################################
def op_scriptsig_ecdsa_verify(sig,msg,pubkey,sigdecode = 'der'):
try:
if sigdecode == 'der':
vk = VerifyingKey.from_string(bytes.fromhex(pubkey), curve=SECP256k1)
res = vk.verify(binascii.unhexlify(sig.encode('ascii')), b(msg), hashfunc=hashlib.sha256,
sigdecode=util.sigdecode_der)
else:
vk = VerifyingKey.from_string(bytes.fromhex(pubkey), curve=SECP256k1)
res = vk.verify(binascii.unhexlify(sig.encode('ascii')), b(msg), hashfunc=hashlib.sha256)
return res
except BadSignatureError:
return False
except Exception as ex:
return False | whichouno/Python | BlockChain/Utils.py | Utils.py | py | 8,269 | python | de | code | 0 | github-code | 1 | [
{
"api_name": "binascii.hexlify",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "binascii.unhexlify",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "binascii.a2b_hex",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "binascii.hexlify... |
27982625722 | from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, TIMESTAMP, Float, Table
from sqlalchemy.orm import relationship
from sqlalchemy.sql.sqltypes import Text
from database import Base
class User(Base):
__tablename__ = "users"
# user_id, roll_no, name, email, phone_number, password, can make reviews and orders
user_id = Column(Integer, primary_key=True, index=True)
roll_no = Column(String, unique=True, index=True)
name = Column(String, index=True)
email = Column(String, unique=True, index=True)
phone_number = Column(String, unique=True, index=True)
# one to many relationship with orders
orders = relationship("Order", back_populates="customer")
# one to many relationship with reviews
reviews = relationship("Review", back_populates="reviewer")
materials_stores = Table('materials_stores', Base.metadata,
Column('store_id', Integer, ForeignKey('shops.shop_id')),
Column('material_id', Integer, ForeignKey('raw_materials.id')),
Column('available', Boolean, nullable=False),
)
class Shop(Base):
__tablename__ = "shops"
shop_id = Column(Integer, primary_key=True, index=True)
name = Column(String, index=True)
address = Column(String, index=True)
description = Column(Text)
contact = Column(String, index=True)
# create a column tags which stores an array of texts
tags = Column(String)
# create a column image which stores the image url
image_url = Column(String)
avg_rating = Column(String)
orders = relationship("Order", back_populates="shop")
reviews = relationship("Review", back_populates="reviewed_shop")
items = relationship("Item", back_populates="shop")
materials = relationship("RawMaterial", secondary=materials_stores, back_populates="stores")
class Order(Base):
__tablename__ = "orders"
# id, user_id, shop_id, created_at, status
id = Column(Integer, primary_key=True, index=True)
user_id = Column(Integer, ForeignKey("users.user_id"))
shop_id = Column(Integer, ForeignKey("shops.shop_id"))
total = Column(Float)
created_at = Column(String)
status = Column(String)
# many to one relationship with users
customer = relationship("User", back_populates="orders")
# many to one relationship with shops
shop = relationship("Shop", back_populates="orders")
# one to many relationship with order_items
items = relationship("OrderItem", back_populates="order")
class OrderItem(Base):
__tablename__ = "order_items"
# id, order_id, item_id, quantity
id = Column(Integer, primary_key=True, index=True)
order_id = Column(Integer, ForeignKey("orders.id"))
item_id = Column(Integer, ForeignKey("food_item.item_id"))
quantity = Column(Integer)
total = Column(Integer)
# many to one relationship with orders
order = relationship("Order", back_populates="items")
# many to one relationship with items
item = relationship("Item", back_populates="order_items")
class Review(Base):
__tablename__ = "reviews"
# id, user_id, shop_id, created_at, rating, comment
id = Column(Integer, primary_key=True, index=True)
user_id = Column(Integer, ForeignKey("users.user_id"))
shop_id = Column(Integer, ForeignKey("shops.shop_id"))
item_id = Column(Integer, ForeignKey("food_item.item_id"))
created_at = Column(TIMESTAMP)
rating = Column(Integer)
comment = Column(String)
# many to one relationship with users
reviewer = relationship("User", back_populates="reviews")
# many to one relationship with shops
reviewed_shop = relationship("Shop", back_populates="reviews")
# many to one relationship with items
reviewed_item = relationship("Item", back_populates="reviews")
class Item(Base):
__tablename__ = "food_item"
item_id = Column(Integer, primary_key=True, index=True)
name = Column(String)
veg_or_nonveg = Column(String)
description = Column(String)
price = Column(Integer)
order_id = Column(Integer, ForeignKey("orders.id"))
shop_id = Column(Integer, ForeignKey("shops.shop_id"))
available = Column(Boolean)
item_rating = Column(Float)
category = Column(String)
shop = relationship("Shop", back_populates="items")
reviews = relationship("Review", back_populates="reviewed_item")
order_items = relationship("OrderItem", back_populates="item")
class RawMaterial(Base):
__tablename__ = "raw_materials"
id = Column(Integer, primary_key=True, index=True)
name = Column(String)
stores = relationship("Shop", secondary=materials_stores, back_populates="materials")
| Gautam-Nanda/FEAST | models.py | models.py | py | 4,781 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "database.Base",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Colu... |
70852848035 | import torch
import torchvision
from torch import nn
class MyBetaModel(nn.Module):
def __init__(self):
super(MyBetaModel, self).__init__()
self.convModel = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(2),
nn.Conv2d(32, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, 1, 2),
nn.MaxPool2d(2),
)
self.convFlatten = nn.Sequential(
nn.Flatten(),
nn.Linear(1024, 64, bias=True),
nn.Linear(64, 10, bias=True),
)
def forward(self, inputs):
temp = self.convModel(inputs)
outputs = self.convFlatten(temp)
return outputs
if __name__ == '__main__':
processModel = MyBetaModel()
torch.save(processModel, './BetaModel/myBetaModel.pth') # save model
# process validation
x = torch.rand(1, 3, 32, 32)
processVAL = torch.Tensor(x)
result = processModel(processVAL)
print(result.shape)
print(result.size)
print(result)
| JevonLiuZz/PyTorchBaseStone | Models/BetaModel.py | BetaModel.py | py | 1,062 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
41858320835 | import json
notas = list()
medias = list()
auxiliar = list()
def cadastro_aluno(ficha):
cont = 0
resp = 'S'
print('\nCADASTRAR ALUNO\n')
while cont < 1 and resp in 'Ss' :
aluno = input('Digite o nome do aluno: ')
cad = 0
for alunos in ficha:
for nome in alunos:
if nome == aluno:
print('Aluno já cadastrado. Deseja cadastrar outro aluno? (S/N)')
resp = input()
cad += 1
if cad == 0:
auxiliar.append(aluno)
auxiliar.append(notas[:])
auxiliar.append(medias[:])
ficha.append(auxiliar[:])
auxiliar.clear()
cont += 1
def adicionar_nota(ficha):
cont = 0
resp = 'S'
print('\nADICIONANDO NOTA\n')
while cont < 1 and resp in 'Ss':
cad = 0
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.\n')
cont += 1
else:
nome_pesquisado = input('Digite o nome do aluno: ')
for aluno in ficha:
if aluno[0] == nome_pesquisado:
cont2 = True
while cont2:
try :
nota = float(input('Digite a nota do aluno (0.0 - 10.0): '))
if (10 >= nota >= 0) and len(aluno[1]) < 3:
aluno[1].append(nota)
cad += 1
cont += 1
cont2 = False
media = 0
for nota in aluno[1]:
media += nota
media /= len(aluno[1])
aluno[2].clear()
aluno[2].append(media)
elif len(aluno[1]) >= 3:
print('Aluno já possui 3 notas')
print('Utilize a opção 6 se deseja editar a nota do aluno')
cont2 = False
else:
print('Nota fora do intervalo. Tente novamente')
except Exception as erro:
print('Erro ao digitar nota')
print(f'O erro foi {erro}')
if cad >= 1:
print('Nota adicionada\n')
else:
print('Nota não cadastrada. Verifique se o nome do aluno foi digitado corretamente.\n')
resp = input('Deseja tentar novamente? (S/N): ')
def remover_aluno(ficha):
print('\nREMOVER ALUNO\n')
cont = 0
while cont < 1:
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.\n')
cont += 1
else:
cad = 0
nome_pesquisado = input('Digite o nome do aluno: ')
for aluno in ficha:
if aluno[0] == nome_pesquisado:
ficha.remove(aluno)
cad += 1
cont += 1
if cad == 1:
print(f'O(A) aluno(a) {nome_pesquisado} removido(a)')
elif nome_pesquisado == 'sair':
cont +=1
else:
print(f'O nome {nome_pesquisado} não foi encontrado')
print('Digite sair para voltar ao menu')
def remover_nota(ficha):
print('\nREMOVER NOTA\n')
cont = 0
while cont < 1:
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.\n')
cont += 1
else:
cad = 0
id_nota = True
nome_pesquisado = input('Digite o nome do aluno: ')
for aluno in ficha:
if aluno[0] == nome_pesquisado:
try :
nota_pesquisada = int(input('Digite número da nota que deseja remover (1ª, 2ª ou 3ª nota): '))
if len(aluno[1]) >= nota_pesquisada:
aluno[1].pop(nota_pesquisada - 1)
cont += 1
cad += 1
media = 0
for nota in aluno[1]:
media += nota
if len(aluno[1]) > 0:
media /= len(aluno[1])
aluno[2].clear()
aluno[2].append(media)
else:
aluno[2].clear()
else:
id_nota = False
except Exception as erro:
print('Erro ao digitar a nota')
print(f'O erro foi {erro}')
id_nota = False
if cad == 1:
print(f'A nota {nota_pesquisada} foi removida.')
elif not(id_nota):
print(f'A nota não foi não encontrada')
else:
print(f'{nome_pesquisado} não foi encontrado')
def editar_nome(ficha):
print('\nEDITAR NOME\n')
cont = 0
while cont < 1:
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.\n')
cont += 1
else:
cad = 0
nome_pesquisado = input('Digite o nome do aluno: ')
for aluno in ficha:
if aluno[0] == nome_pesquisado:
nome = input('Digite o novo nome: ')
aluno[0] = nome
cont += 1
cad += 1
if cad == 1:
print('Nome editado')
elif nome_pesquisado == 'sair':
cont += 1
else:
print('Aluno não encontrado\n Digite sair para voltar ao menu')
def editar_nota(ficha):
print('\nEDITAR NOTA\n')
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.\n')
else:
cont = 0
while cont < 1:
cad = 0
id_nota = True
nome_pesquisado = input('Digite o nome do aluno: ')
for aluno in ficha:
if aluno[0] == nome_pesquisado:
try :
nota_pesquisada = int(input('Digite número da nota que deseja editar (1ª, 2ª ou 3ª nota): '))
if len(aluno[1]) >= nota_pesquisada:
nota = float(input('Digite a nova nota: '))
aluno[1].pop(nota_pesquisada - 1)
aluno[1].insert(nota_pesquisada - 1, nota)
cont += 1
cad += 1
media = 0
for nota in aluno[1]:
media += nota
if len(aluno[1]) > 0:
media /= len(aluno[1])
aluno[2].clear()
aluno[2].append(media)
else:
aluno[2].clear()
else:
id_nota = False
except Exception as erro:
print('Ocorreu erro ao buscar nota')
print(f'O erro foi {erro}')
id_nota = False
if cad == 1:
print(f'A nota {nota_pesquisada} foi alterada com sucesso')
elif not(id_nota):
print(f'A nota não foi encontrada')
else:
print(f'{nome_pesquisado} não foi encontrado')
print(ficha)
def buscar_aluno(ficha):
print('\nBUSCAR ALUNO POR NOME\n')
cont = 0
while cont < 1:
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.\n')
cont += 1
else:
cad = False
nome_pesquisado = input('Digite o nome do aluno: ')
for aluno in ficha:
if nome_pesquisado in aluno[0]:
notas = str()
media = 0.0
for nota in aluno[1]:
notas += str(nota) + " "
for nota in aluno[2]:
media += nota
print('Nome: {0}. Notas: {1}. Média: {2:.2f}'.format(str(aluno[0]).lower().capitalize(), notas, media))
cont += 1
cad = True
if not(cad) and nome_pesquisado != 'SAIR':
print('Aluno não encontrado\nTente novamente ou digite SAIR para voltar ao menu inicial')
elif nome_pesquisado == 'SAIR':
cont += 1
def media_turma(ficha):
print('\nMÉDIA DA TURMA\n')
cont = 0
while cont < 1:
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.')
cont += 1
else:
media_turma = 0
for aluno in ficha:
for media in aluno[2]:
media_turma += media
media_turma /= len(ficha)
cont += 1
print('A media da turma é {0:.2f}\n'.format(media_turma))
def melhor_aluno(ficha):
print('\nMELHOR ALUNO\n')
cont = 0
while cont < 1:
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.')
cont += 1
else:
maior = 0
for aluno in ficha:
for media in aluno[2]:
if maior < media:
maior = media
for aluno in ficha:
notas = str()
for nota in aluno[1]:
notas += str(nota) + " "
for media in aluno[2]:
if maior == media:
print('{0}. Notas: {1}. Media: {2:.2f}'.format(str(aluno[0]).lower().capitalize(), notas, media))
cont += 1
print()
def classificacao_alfabetica(ficha):
print('\nALUNOS EM ORDEM ALFABÉTICA\n')
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.\n')
else:
pos = 0
while pos < len(ficha):
pos2 = pos
while (ficha[pos2][0] < ficha[pos2 - 1][0]) and pos2 > 0 :
aux = ficha[pos2]
ficha[pos2] = ficha[pos2 - 1]
ficha[pos2 - 1] = aux
pos2 -= 1
pos += 1
for aluno in ficha:
if len(aluno[1]) > 0:
notas = str()
media = 0
for nota in aluno[1]:
notas += str(nota) + ' '
for nota in aluno[2]:
media += nota
else:
media = 0
notas = '- '
print('Nome: {0} Notas: {1} Média: {2:.2f}'.format(aluno[0], notas, media))
print()
def classificacao_media(ficha):
print('\nALUNOS CLASSIFICADOS POR MEDIA\n')
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.\n')
else:
pos = 0
while pos < len(ficha):
pos2 = pos
while (ficha[pos2][2] > ficha[pos2 - 1][2]) and pos2 > 0 :
aux = ficha[pos2]
ficha[pos2] = ficha[pos2 - 1]
ficha[pos2 - 1] = aux
pos2 -= 1
pos += 1
for aluno in ficha:
if len(aluno[1]) > 0:
notas = str()
media = 0
for nota in aluno[1]:
notas += str(nota) + ' '
for nota in aluno[2]:
media += nota
else:
notas = '- '
media = 0
print('Nome: {0} Notas: {1} Média: {2:.2f}'.format(str(aluno[0]).lower().capitalize(), notas, media))
print()
def aprovados(ficha):
print('\nAPROVADOS POR MEDIA\n')
cont = 0
while cont < 1:
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.\n')
cont += 1
else:
cad = False
for aluno in ficha:
media = 0
notas = str()
for nota in aluno[1]:
notas += str(nota) + ' '
for nota in aluno[2]:
media += nota
if media >= 7:
print('{0}. Notas: {1}. Média: {2}'.format(aluno[0], notas, media))
cad = True
cont += 1
if not(cad):
print('Não houve alunos aprovados\n')
print()
def alunos_final(ficha):
print('\nALUNOS NA FINAL\n')
cont = 0
while cont < 1:
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.\n')
cont += 1
else:
cad = False
for aluno in ficha:
media = 0
notas = str()
for nota in aluno[1]:
notas += str(nota) + ' '
for nota in aluno[2]:
media += nota
if 5 <= media < 7:
print('{0}. Notas: {1}. Média: {2}'.format(aluno[0], notas, media))
cad = True
if not(cad):
print('Não houve alunos na final\n')
cont += 1
def reprovados(ficha):
print('\nALUNOS REPROVADOS\n')
cont = 0
while cont < 1:
if len(ficha) <= 0:
print('Ficha ainda não possui alunos cadastrados.\n')
cont += 1
else:
cad = False
for aluno in ficha:
if len(aluno[1]) > 0:
media = 0
notas = str()
for nota in aluno[1]:
notas += str(nota) + ' '
for nota in aluno[2]:
media += nota
if media < 5:
print('{0}. Notas: {1}. Média: {2}'.format(aluno[0], notas, media))
cad = True
if not(cad):
print('Não houveram alunos reprovados\n')
cont += 1
def salvar_json(ficha) :
lista_salvar = [
dict(nome = aluno[0], notas = aluno[1], media = aluno[2])
for aluno in ficha
]
dict_salvar = {"Ficha": lista_salvar}
dict_salvar = json.dumps(dict_salvar, indent = 4, sort_keys = False)
try:
arquivo_json = open("dados.json", "w")
arquivo_json.write(dict_salvar)
arquivo_json.close()
except Exception as erro:
print('Ocorreu um erro ao carregar o arquivo.')
print('O erro é: {}'.format(erro))
def importar_json(ficha):
arquivo_json = open('dados.json', 'r')
dados_json = json.load(arquivo_json)
alunos = dados_json['Ficha']
try :
for aluno in alunos :
auxiliar.append(aluno['nome'])
auxiliar.append(aluno['notas'])
auxiliar.append(aluno['media'])
ficha.append(auxiliar[:])
auxiliar.clear()
except Exception as erro:
print('Ocorreu um erro')
print(f'O erro é {erro}') | niverton-felipe/unifacisa | cadastro.py | cadastro.py | py | 16,107 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 442,
"usage_type": "call"
}
] |
20564522844 | from re import search
import hashlib
from Crypto.Cipher import AES
from Crypto import Random
import os
from configparser import ConfigParser
from locale import getdefaultlocale
import gettext
from subprocess import check_output, CalledProcessError
import logging
from logging.handlers import TimedRotatingFileHandler
import warnings
from tkinter import TclVersion
from tkinter.messagebox import showwarning
# --- paths
PATH = os.path.dirname(__file__)
if os.access(PATH, os.W_OK) and os.path.exists(os.path.join(PATH, "images")):
# the app is not installed
# local directory containing config files
LOCAL_PATH = os.path.join(PATH, "config")
PATH_LOCALE = os.path.join(PATH, "locale")
PATH_IMAGES = os.path.join(PATH, "images")
else:
# local directory containing config files
LOCAL_PATH = os.path.join(os.path.expanduser("~"), ".checkmails")
PATH_LOCALE = "/usr/share/locale"
PATH_IMAGES = "/usr/share/checkmails/images"
if not os.path.isdir(LOCAL_PATH):
os.mkdir(LOCAL_PATH)
PATH_CONFIG = os.path.join(LOCAL_PATH, "checkmails.ini")
LOG_PATH = os.path.join(LOCAL_PATH, "checkmails.log")
# --- log
handler = TimedRotatingFileHandler(LOG_PATH, when='midnight',
interval=1, backupCount=7)
logging.basicConfig(level=logging.INFO,
format='%(asctime)-15s %(levelname)s: %(message)s',
handlers=[handler])
logging.getLogger().addHandler(logging.StreamHandler())
# --- ttf fonts
local_path = os.path.join(os.path.expanduser("~"), ".fonts")
try:
local_fonts = os.listdir(local_path)
except FileNotFoundError:
local_fonts = []
TTF_FONTS = {f.split(".")[0]: os.path.join(local_path, f)
for f in local_fonts if search(r".(ttf|TTF)$", f)}
for root, dirs, files in os.walk("/usr/share/fonts"):
for f in files:
if search(r".(ttf|TTF)$", f):
TTF_FONTS[f.split(".")[0]] = os.path.join(root, f)
if "LiberationSans-Bold" in TTF_FONTS:
default_font = "LiberationSans-Bold"
elif TTF_FONTS:
default_font = list(TTF_FONTS.keys())[0]
else:
default_font = ""
# --- read config file
CONFIG = ConfigParser()
if os.path.exists(PATH_CONFIG):
CONFIG.read(PATH_CONFIG)
LANGUE = CONFIG.get("General", "language")
if not CONFIG.has_option("General", "font"):
CONFIG.set("General", "font", default_font)
elif CONFIG.get("General", "font") not in TTF_FONTS:
CONFIG.set("General", "font", default_font)
if not CONFIG.has_option("General", "check_update"):
CONFIG.set("General", "check_update", "True")
if not CONFIG.has_option("General", "trayicon"):
CONFIG.set("General", "trayicon", "")
else:
LANGUE = ""
CONFIG.add_section("General")
CONFIG.add_section("Mailboxes")
# time in ms between to checks
CONFIG.set("General", "time", "300000")
CONFIG.set("General", "timeout", "60000")
CONFIG.set("General", "font", default_font)
CONFIG.set("General", "check_update", "True")
CONFIG.set("Mailboxes", "active", "")
CONFIG.set("Mailboxes", "inactive", "")
CONFIG.set("General", "trayicon", "")
def save_config():
"""Save configuration to config file."""
with open(PATH_CONFIG, 'w') as fichier:
CONFIG.write(fichier)
# --- system tray icon
def get_available_gui_toolkits():
"""Check which gui toolkits are available to create a system tray icon."""
toolkits = {'gtk': True, 'qt': True, 'tk': True}
b = False
try:
import gi
b = True
except ImportError:
toolkits['gtk'] = False
try:
import PyQt5
b = True
except ImportError:
try:
import PyQt4
b = True
except ImportError:
try:
import PySide
b = True
except ImportError:
toolkits['qt'] = False
tcl_packages = check_output(["tclsh",
os.path.join(PATH, "packages.tcl")]).decode().strip().split()
toolkits['tk'] = "tktray" in tcl_packages
b = b or toolkits['tk']
if not b:
raise ImportError("No GUI toolkits available to create the system tray icon.")
return toolkits
TOOLKITS = get_available_gui_toolkits()
GUI = CONFIG.get("General", "trayicon").lower()
if not TOOLKITS.get(GUI):
DESKTOP = os.environ.get('XDG_CURRENT_DESKTOP')
if DESKTOP == 'KDE':
if TOOLKITS['qt']:
GUI = 'qt'
else:
warnings.warn("No version of PyQt was found, falling back to another GUI toolkits so the system tray icon might not behave properly in KDE.")
GUI = 'gtk' if TOOLKITS['gtk'] else 'tk'
else:
if TOOLKITS['gtk']:
GUI = 'gtk'
elif TOOLKITS['qt']:
GUI = 'qt'
else:
GUI = 'tk'
CONFIG.set("General", "trayicon", GUI)
# --- Translation
APP_NAME = "checkmails"
if LANGUE not in ["en", "fr"]:
# Check the default locale
lc = getdefaultlocale()[0][:2]
if lc == "fr":
# If we have a default, it's the first in the list
LANGUE = "fr_FR"
else:
LANGUE = "en_US"
CONFIG.set("General", "language", LANGUE[:2])
gettext.find(APP_NAME, PATH_LOCALE)
gettext.bind_textdomain_codeset(APP_NAME, "UTF-8")
gettext.bindtextdomain(APP_NAME, PATH_LOCALE)
gettext.textdomain(APP_NAME)
LANG = gettext.translation(APP_NAME, PATH_LOCALE,
languages=[LANGUE], fallback=True)
LANG.install()
# --- Cryptographic functions to safely store login information
def decrypt(mailbox, pwd):
"""Returns the login and password for the mailbox that where encrypted using pwd."""
key = hashlib.sha256(pwd.encode()).digest()
try:
with open(os.path.join(LOCAL_PATH, mailbox), 'rb') as fich:
iv = fich.read(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, iv)
server, login, password, folder = cipher.decrypt(fich.read()).decode().split("\n")
except FileNotFoundError:
showwarning(_("Warning"), _("Unknown mailbox %(name)r will be removed from configuration file.") % {'name': mailbox})
active = CONFIG.get("Mailboxes", "active").split(", ")
inactive = CONFIG.get("Mailboxes", "inactive").split(", ")
while "" in active:
active.remove("")
while "" in inactive:
inactive.remove("")
if mailbox in active:
active.remove(mailbox)
if mailbox in inactive:
inactive.remove(mailbox)
CONFIG.set("Mailboxes", "active", ", ".join(active))
CONFIG.set("Mailboxes", "inactive", ", ".join(inactive))
save_config()
return None, None, None, None
return server, login, password, folder
def encrypt(mailbox, pwd, server, login, password, folder):
"""Encrypt the mailbox connection information using pwd."""
key = hashlib.sha256(pwd.encode()).digest()
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, iv)
info = [server, login, password, folder]
with open(os.path.join(LOCAL_PATH, mailbox), 'wb') as fich:
fich.write(iv)
try:
fich.write(cipher.encrypt("\n".join(info)))
except TypeError:
fich.write(cipher.encrypt("\n".join(info).encode()))
# --- Images
ICON = os.path.join(LOCAL_PATH, "icon_mail.png")
if GUI == 'tk':
IMAGE = os.path.join(PATH_IMAGES, "mail.png")
FONTSIZE = 10
else:
IMAGE = os.path.join(PATH_IMAGES, "mail128.png")
FONTSIZE = 70
ICON_48 = os.path.join(PATH_IMAGES, "mail48.png")
IMAGE2 = os.path.join(PATH_IMAGES, "mail.svg")
ADD = os.path.join(PATH_IMAGES, "add.png")
DEL = os.path.join(PATH_IMAGES, "del.png")
EDIT = os.path.join(PATH_IMAGES, "edit.png")
IM_ERROR = os.path.join(PATH_IMAGES, "error.png")
IM_QUESTION = os.path.join(PATH_IMAGES, "question.png")
def internet_on():
"""Check the Internet connexion."""
try:
check_output(["ping", "-c", "1", "www.google.com"])
return True
except CalledProcessError:
return False
# --- compatibility
if TclVersion < 8.6:
# then tkinter cannot import PNG files directly, we need to use PIL
# but to create an image from a string with the data keyword, we still need
# the regular tkinter.PhotoImage
from PIL import ImageTk
from tkinter import PhotoImage as TkPhotoImage
class MetaPhotoImage(type):
def __call__(cls, *args, **kwargs):
if 'file' in kwargs:
return ImageTk.PhotoImage(*args, **kwargs)
else:
return TkPhotoImage(*args, **kwargs)
class PhotoImage(metaclass=MetaPhotoImage):
pass
else:
# no need of ImageTk dependency
from tkinter import PhotoImage
| j4321/CheckMails | checkmailslib/constants.py | constants.py | py | 8,761 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.access",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.W_OK",
"line_number": 1... |
3588852824 | import logging
from typing import Any, Dict
from sqlalchemy import NUMERIC, DateTime, bindparam, case, func, select
from sqlalchemy.dialects.postgresql import INTERVAL
from sqlalchemy.sql import Select
from sqlalchemy.sql.functions import concat
from execution_engine.constants import CohortCategory
from execution_engine.omop.concepts import Concept
from execution_engine.omop.criterion.abstract import Criterion
from execution_engine.util import Interval, ValueNumber, value_factory
from execution_engine.util.sql import SelectInto
__all__ = ["DrugExposure"]
class DrugExposure(Criterion):
"""A drug exposure criterion in a cohort definition."""
def __init__(
self,
name: str,
exclude: bool,
category: CohortCategory,
drug_concepts: list[str],
ingredient_concept: Concept,
dose: ValueNumber | None,
frequency: int | None,
interval: Interval | str | None,
route: Concept | None,
) -> None:
"""
Initialize the drug administration action.
"""
super().__init__(name=name, exclude=exclude, category=category)
self._set_omop_variables_from_domain("drug")
self._drug_concepts = drug_concepts
self._ingredient_concept = ingredient_concept
self._dose = dose
self._frequency = frequency
if interval is not None:
if isinstance(interval, str):
interval = Interval(interval)
assert isinstance(interval, Interval), "interval must be an Interval or str"
self._interval = interval
self._route = route
@property
def concept(self) -> Concept:
"""Get the concept of the ingredient associated with this DrugExposure"""
return self._ingredient_concept
def _sql_filter_concept(self, query: Select) -> Select:
"""
Return the SQL to filter the data for the criterion.
"""
drug_exposure = self._table
query = query.where(drug_exposure.c.drug_concept_id.in_(self._drug_concepts))
return query
def _sql_generate(self, query: Select) -> Select:
drug_exposure = self._table
query = self._sql_filter_concept(query)
if self._dose is not None:
if self._route is not None:
# route is not implemented yet because it uses HemOnc codes in the standard vocabulary
# (cf concept_class_id = 'Route') but these are not standard codes and HemOnc might not be
# addressable in FHIR
logging.warning("Route specified, but not implemented yet")
# todo: this won't work if no interval is specified (e.g. when just looking for a single dose)
interval = func.cast(concat(1, self._interval.name), INTERVAL) # type: ignore
interval_length_seconds = func.cast(
func.extract("EPOCH", interval), NUMERIC
).label("interval_length_seconds")
one_second = func.cast(concat(1, "second"), INTERVAL)
# Filter only drug_exposures that are inbetween the start and end date of the cohort
query = super()._insert_datetime(query)
interval_starts = query.add_columns(
(
func.date_trunc(
"day", bindparam("observation_start_datetime", type_=DateTime)
)
+ interval_length_seconds
* (
func.floor(
func.extract(
"EPOCH",
(
drug_exposure.c.drug_exposure_start_datetime
- func.date_trunc(
"day",
bindparam(
"observation_start_datetime", type_=DateTime
),
)
),
)
/ interval_length_seconds
)
* one_second
)
).label("interval_start"),
drug_exposure.c.drug_exposure_start_datetime.label("start_datetime"),
drug_exposure.c.drug_exposure_end_datetime.label("end_datetime"),
drug_exposure.c.quantity.label("quantity"),
).cte("interval_starts")
date_ranges = select(
interval_starts.c.person_id,
func.generate_series(
interval_starts.c.interval_start,
interval_starts.c.end_datetime,
interval,
).label("interval_start"),
interval_starts.c.start_datetime,
interval_starts.c.end_datetime,
interval_starts.c.quantity,
).cte("date_ranges")
interval_quantities = (
select(
date_ranges.c.person_id,
date_ranges.c.interval_start,
(
func.least(
date_ranges.c.end_datetime,
date_ranges.c.interval_start + interval,
)
- func.greatest(
date_ranges.c.start_datetime,
date_ranges.c.interval_start,
)
).label("time_diff"),
date_ranges.c.start_datetime,
date_ranges.c.end_datetime,
date_ranges.c.quantity,
)
.select_from(date_ranges)
.cte("interval_quantities")
)
# Calculate the ratio of the interval that the drug was taken and handle the case where the
# interval is 0 (set ratio to 1 explicitly, this is a "bolus" dose)
ir_ratio_num = func.extract("EPOCH", interval_quantities.c.time_diff)
ir_ratio_denom = func.extract(
"EPOCH", interval_quantities.c.end_datetime
) - func.extract("EPOCH", interval_quantities.c.start_datetime)
ir_ratio = case(
(ir_ratio_denom == 0, 1), else_=ir_ratio_num / ir_ratio_denom
).label("ratio")
interval_ratios = (
select(
interval_quantities.c.person_id,
interval_quantities.c.interval_start,
ir_ratio,
interval_quantities.c.quantity,
)
.select_from(interval_quantities)
.cte("interval_ratios")
)
c_interval_quantity = func.sum(
interval_ratios.c.quantity * interval_ratios.c.ratio
).label("interval_quantity")
c_interval_count = func.count().label("interval_count")
query = (
select(
interval_ratios.c.person_id,
interval_ratios.c.interval_start.label("valid_from"),
(interval_ratios.c.interval_start + interval - one_second).label(
"valid_to"
),
c_interval_quantity,
c_interval_count,
)
.select_from(interval_ratios)
.where(interval_ratios.c.ratio > 0)
.group_by(interval_ratios.c.person_id, interval_ratios.c.interval_start)
.having(
self._dose.to_sql(column_name=c_interval_quantity, with_unit=False)
)
.having(c_interval_count >= self._frequency)
.order_by(interval_ratios.c.person_id, interval_ratios.c.interval_start)
)
return query
def _insert_datetime(self, query: SelectInto) -> SelectInto:
"""
Return the SQL to insert the datetime for the criterion.
Nothing to do here, because the datetime is already inserted by the
_sql_generate method.
"""
return query
def _sql_select_data(self, query: Select) -> Select:
"""
Return the SQL to select the data for the criterion.
"""
drug_exposure = self._table
query = query.add_columns(
drug_exposure.c.drug_concept_id.label("parameter_concept_id"),
drug_exposure.c.drug_exposure_start_datetime.label("start_datetime"),
drug_exposure.c.drug_exposure_end_datetime.label("end_datetime"),
drug_exposure.c.quantity.label("drug_dose_as_number"),
)
return query
def description(self) -> str:
"""
Get a human-readable description of the criterion.
"""
return (
f"{self.__class__.__name__}['{self._name}']("
f"ingredient={self._ingredient_concept.concept_name}, "
f"dose={str(self._dose)}, frequency={self._frequency}/{self._interval}, "
f"route={self._route.concept_name if self._route is not None else None} "
f")"
)
def dict(self) -> dict[str, Any]:
"""
Return a dictionary representation of the criterion.
"""
return {
"name": self._name,
"exclude": self._exclude,
"category": self._category.value,
"drug_concepts": self._drug_concepts,
"ingredient_concept": self._ingredient_concept.dict(),
"dose": self._dose.dict() if self._dose is not None else None,
"frequency": self._frequency,
"interval": self._interval,
"route": self._route.dict() if self._route is not None else None,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "DrugExposure":
"""
Create a drug exposure criterion from a dictionary representation.
"""
dose = value_factory(**data["dose"]) if data["dose"] is not None else None
assert dose is None or isinstance(dose, ValueNumber), "Dose must be a number"
return cls(
name=data["name"],
exclude=data["exclude"],
category=CohortCategory(data["category"]),
drug_concepts=data["drug_concepts"],
ingredient_concept=Concept(**data["ingredient_concept"]),
dose=dose,
frequency=data["frequency"],
interval=data["interval"],
route=Concept(**data["route"]) if data["route"] is not None else None,
)
| CODEX-CELIDA/execution-engine | execution_engine/omop/criterion/drug_exposure.py | drug_exposure.py | py | 10,683 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "execution_engine.omop.criterion.abstract.Criterion",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "execution_engine.constants.CohortCategory",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "execution_engine.omop.concepts.Concept",
"line_n... |
32081852087 | # tests.py
from unittest import TestCase, main as unittest_main
from app import app
from unittest import TestCase, main as unittest_main, mock
from bson.objectid import ObjectId
sample_id = ObjectId('5d55cffc4a3d4031f42827a3')
sample_deck = { 'img': "static/red.jpeg", 'description': 'Red playing cards' }
sample_data = {
'img': sample_deck['img'],
'description': sample_deck['description']
}
sample_id = ObjectId('5d55cffc4a3d4031f42827a4')
sample_item = { 'img': "static/red.jpeg", 'description': 'Red playing cards', 'quantity': 1}
sample_item = {
'img': sample_item['img'],
'description': sample_item['description'],
'quantity': sample_item['quantity']
}
class PlaylistsTests(TestCase):
"""Flask tests."""
def setUp(self):
"""Stuff to do before every test."""
# Get the Flask test client
self.client = app.test_client()
# Show Flask errors that happen during tests
app.config['TESTING'] = True
#tests shop home page
def test_index(self):
result = self.client.get('/index')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Deck of Cards', result.data)
#tests cart view page
def test_cart(self):
result = self.client.get('/cart')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Currently in cart', result.data)
#tests checkout page
def test_new(self):
result = self.client.get('/cart/checkout')
self.assertEqual(result.status, '405 METHOD NOT ALLOWED')
if __name__ == '__main__':
unittest_main() | Gaoyagi/Contractor | test.py | test.py | py | 1,596 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "bson.objectid.ObjectId",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bson.objectid.ObjectId",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "app.ap... |
71710416995 | import json
import requests
import pandas as pd
import sweetviz as sv
import pandas_profiling
def extract_data(url = "http://api.tvmaze.com/schedule/web?date=2020-12-"):
data = []
for i in range(1,32):
url_api = url + str(i).zfill(2)
result = requests.get(url_api)
dato = json.loads(result.text)
with open("./json/2020-12-" + str(i) +'.json', 'w') as json_file:
json.dump(dato, json_file)
data.extend(dato)
return data
def dataframe_episodes(data):
episodes = []
for episode in data:
show_id = episode["_embedded"]["show"]["id"]
episode_id = episode["id"]
episode_name = episode["name"]
episode_url = episode["url"]
episode_season = episode["season"]
episode_number = episode["number"]
episode_type = episode["type"]
episode_airdate = episode["airdate"]
episode_airtime = episode["airtime"]
episode_airstamp = episode["airstamp"]
episode_runtime = episode["runtime"]
episode_rating = episode["rating"]["average"]
episode = {
'show_id': show_id,
'episode_id': episode_id,
'name': episode_name,
'url': episode_url,
'season': episode_season,
'number': episode_number,
'type': episode_type,
'airdate': episode_airdate,
'airtime': episode_airtime,
'airstamp': episode_airstamp,
'runtime': episode_runtime,
'rating': episode_rating,
}
episodes.append(episode)
df_episodes = pd.DataFrame(episodes).reset_index(drop=True)
return df_episodes
def dataframe_shows(data):
shows = []
for episode in data:
show_id = episode["_embedded"]["show"]["id"]
show_name = episode["_embedded"]["show"]["name"]
show_url = episode["_embedded"]["show"]["url"]
show_name = episode["_embedded"]["show"]["name"]
show_type = episode["_embedded"]["show"]["type"]
show_language = episode["_embedded"]["show"]["language"]
show_genres = episode["_embedded"]["show"]["genres"]
show_status = episode["_embedded"]["show"]["status"]
show_runtime = episode["_embedded"]["show"]["runtime"]
show_averageRuntime = episode["_embedded"]["show"]["averageRuntime"]
show_premiered = episode["_embedded"]["show"]["premiered"]
show_ended = episode["_embedded"]["show"]["ended"]
show_officialSite = episode["_embedded"]["show"]["officialSite"]
show_schedule_time = episode["_embedded"]["show"]["schedule"]["time"]
show_schedule_days = episode["_embedded"]["show"]["schedule"]["days"]
show_rating = episode["_embedded"]["show"]["rating"]["average"]
show_weight = episode["_embedded"]["show"]["weight"]
show_network = episode["_embedded"]["show"]["network"]
try:
show_webChannel_id = episode["_embedded"]["show"]["webChannel"]["id"]
except:
show_webChannel_id = []
show_externals = episode["_embedded"]["show"]["externals"]
try:
show_image = episode["_embedded"]["show"]["image"]["original"]
except:
show_image = []
show_summary = episode["_embedded"]["show"]["summary"]
show = {
'show_id': show_id,
'name': show_name,
"url": show_url,
"type": show_type,
"language": show_language,
"genres": show_genres,
"status": show_status,
"runtime": show_runtime,
"averageRuntime": show_averageRuntime,
"premiered":show_premiered,
"ended": show_ended,
"officialSite": show_officialSite,
"schedule_time": show_schedule_time,
"schedule_days": show_schedule_days,
"rating": show_rating,
"weight": show_weight,
"network": show_network,
"webchannnel_id": show_webChannel_id,
"externals": show_externals,
"image": show_image,
"summary": show_summary
}
shows.append(show)
df_shows = pd.DataFrame(shows).drop_duplicates("show_id").reset_index(drop=True)
return df_shows
def dataframe_webchannel(data):
web_channels = []
for episode in data:
try:
show_webChannel_id = episode["_embedded"]["show"]["webChannel"]["id"]
show_webChannel_name = episode["_embedded"]["show"]["webChannel"]["name"]
show_webChannel_country_name = episode["_embedded"]["show"]["webChannel"]["country"]["name"]
show_webChannel_country_code = episode["_embedded"]["show"]["webChannel"]["country"]["code"]
show_webChannel_country_timezone = episode["_embedded"]["show"]["webChannel"]["country"]["timezone"]
show_webChannel_officialSite = episode["_embedded"]["show"]["webChannel"]["officialSite"]
except:
pass
web_channel = {
'id': show_webChannel_id,
'name': show_webChannel_name,
'country': show_webChannel_country_name,
'code': show_webChannel_country_code,
'timezone': show_webChannel_country_timezone,
'officialSite': show_webChannel_officialSite,
}
web_channels.append(web_channel)
df_webchannel = pd.DataFrame(web_channels).drop_duplicates("id").reset_index(drop=True)
return df_webchannel
def profile_sv(df):
analysis = sv.analyze(df)
return analysis
def profile_shows(df):
df = df[["show_id", "name", "url", "type", "language", "status", "runtime", "averageRuntime", "premiered"]]
profile_shows= pandas_profiling.ProfileReport(df)
profile_shows.to_file("df_shows.html")
return df
def drop_missing(df):
thresh = len(df) * 0.6
df.dropna(axis=1, thresh=thresh, inplace=True)
return df
def to_date(df):
df["airstamp"] = df['airstamp'].str[:10]
df["airstamp"] = df["airstamp"].apply(pd.to_datetime)
df["airdate"] = df['airdate'].apply(pd.to_datetime)
return df
def format_shows(df):
df["premiered"] = df["premiered"].apply(pd.to_datetime)
df['summary'] = df_shows['summary'].str.replace(r'<[^<]+?>', '')
return df
def to_category(df):
cols = df.select_dtypes(include='object').columns
for col in cols:
ratio = len(df[col].value_counts()) / len(df)
if ratio < 0.05:
df[col] = df[col].astype('category')
return df
if __name__ == '__main__':
datos = extract_data()
#Dataframe Episodes
dataframe_episodes = dataframe_episodes(datos)
dataframe_episodes = drop_missing(dataframe_episodes)
dataframe_episodes = to_date(dataframe_episodes)
dataframe_episodes = to_category(dataframe_episodes)
dataframe_episodes.to_csv('dataframe_episodes.csv', index=False)
profile_episodes = profile_sv(dataframe_episodes)
profile_episodes.show_html('dataframe_episodes.html')
#Dataframe Shows
df_shows = dataframe_shows(datos)
df_shows = drop_missing(df_shows)
df_shows = format_shows(df_shows)
df_shows = to_category(df_shows)
df_shows.to_csv('df_shows.csv', index=False)
#profile_shows = profile_shows(df_shows)
#Dataframe webchannel
df_webchannel = dataframe_webchannel(datos)
df_webchannel = drop_missing(df_webchannel)
df_webchannel = to_category(df_webchannel)
df_webchannel.to_csv('df_webchannel.csv', index=False)
profile_webchannel = profile_sv(df_webchannel)
profile_webchannel.show_html('df_webchannel.html') | Harolencio/lulo_bank_data_test | src/automatic_process.py | automatic_process.py | py | 7,651 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number... |
21061957738 | import numpy as np
import glob
import sys
import cv2
import os
import json
import operator
from matplotlib import pyplot as plt
def get_data(directory):
MIN_MATCH_COUNT = 10
img1 = cv2.imread('data/original.jpg',0) #queryImage
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 5)
flann = cv2.FlannBasedMatcher(index_params, search_params)
all_images_to_compare = []
allmatches={}
titles = []
for f in glob.iglob("database\*"):
image = cv2.imread(f)
titles.append(f)
all_images_to_compare.append(image)
for image_to_compare, title in zip(all_images_to_compare, titles):
kp2, des2 = sift.detectAndCompute(image_to_compare,None)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
#print(matches)
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
'''allmatches.append(good)
best=allmatches.sort()
for i in best:
print(i)
print(type(allmatches))'''
if len(good)>0:
#print("Enough matches are found - %d" % (len(good)))
#print(title)
allmatches[title]=len(good)
sorted_d = sorted(allmatches.items(),key = operator.itemgetter(1), reverse=True)
arr = []
for s in sorted_d[:3]:
arr.append(s[0])
#cv2.imshow('Output',s[0])
arr1 = []
for x in arr:
y = x.split("\\")
arr1.append(y)
return json.dumps(arr1)
if __name__ == "__main__":
print (get_data(sys.argv[1])) | aishwaryaanaidu/sketch-recognition | sift/looping.py | looping.py | py | 1,926 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.xfeatures2d.SIFT_create",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.xfeatures2d",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cv2.Flann... |
42926702835 | import requests
import re
url = "http://46.101.60.26:31068/question3/"
session = requests.Session()
with open("top-usernames-shortlist.txt", "r") as f:
wordlist = f.readlines()
time_taken = {}
print("Testing the time taken")
print("======================")
for word in wordlist:
word = word.rstrip()
data = {
"userid": word,
"passwd": "asdfqwerqwerqwerqwersdfsdafsdfssdfsdfadsfsdfsdfsfsdfwqeasdfwefasdfefasdfef"
}
r = session.post(url, data=data)
print(f"[!] {word: <15} -> {r.elapsed.total_seconds()}")
time_taken[word] = r.elapsed.total_seconds()
print("======================")
print(time_taken)
| singha-brother/Web_Security_Notes | HTB_Academy/BROKEN_AUTHENTICATION/questions/username_bf03.py | username_bf03.py | py | 619 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "requests.Session",
"line_number": 6,
"usage_type": "call"
}
] |
73732029792 | from django.urls import path
from tasks.views import complete, create, create_task_to_card, delete, list_all, update, find_by_id
app_name = 'tasks'
urlpatterns = [
path('create-task/', create, name='create'),
path('tasks/', list_all, name='list_all'),
path('tasks/<int:pk>/', find_by_id, name='find_by_id'),
path('task-to-card/<int:pk>/', create_task_to_card, name='create_task_to_card'),
path('delete-task/<int:pk>/', delete, name='delete'),
path('update-task/<int:pk>/', update, name='update'),
path('complete-task/<int:pk>/', complete, name='complete'),
]
| l-eduardo/potential-pancake | tasks/urls.py | urls.py | py | 590 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tasks.views.create",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tasks.views.lis... |
27706601958 | from datetime import datetime, date, timedelta
from datetime import timedelta
import tweepy
from tweepy import OAuthHandler
import json
from lab9_config import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET
# Authorization setup to access the Twitter API
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
tweets = api.user_timeline(id = 'umsi')
i = 0
for tweet in tweets:
print (i, tweet.id)
i += 1
today = datetime.now()
before = today - timedelta(weeks = 2)
d = date(2016, 9, 1)
t= datetime(2016, 9, 1, 0, 0)
t1 = datetime(2016, 9, 1, 0, 1)
print (type(t.year), type(t.month))
print (today>t)
print (before.day)
print (today>before)
print (t<t1)
# f = open('tweets.json', 'w')
# f.write('\naaaaaa')
| wudixiaoyu008/twitter-api-and-database | test.py | test.py | py | 821 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tweepy.OAuthHandler",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lab9_config.CONSUMER_KEY",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "lab9_config.CONSUMER_SECRET",
"line_number": 13,
"usage_type": "argument"
},
{
"... |
425665856 | from typing import Union
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import patches
from matplotlib import lines
# Custom imports:
from Synthetic_Sequencer import synthtools as syn
# matrix_imager(tp_matrix, classes, facies_dict, layout, filepath, title=None):
# ======================================================================================================================
# INPUT:
# ======================================================================================================================
## tp_matrix: a transition probability matrix with shape (F, F).
## classes: a list of all unique facies classes, size F.
## facies_dict: a dictionary with as key:value pairs 'lithology:code'.
## layout: a dictionary containing as key:value pairs 'facies class:[color, hatch]'.
## ideal [optional]: bool; if True, plots ideal sequence lithology bar corresponding to the j=1 or j=-1 diagonal pairs.
## filepath [optional]: string containing the directory and filename to which the figure is saved. Default = None.
## title [optional]: string; sets the title of the figure. Default = None.
## cmap [optional]: matplotlib colormap used for the TP matrix visualization. Default = 'Greens'.
# ======================================================================================================================
# OUTPUT:
# ======================================================================================================================
## Visualizes 'tp_matrix' with probability values, colormap, lithology labels and an ideal sequence lithology bar.
def matrix_imager(tp_matrix: np.ndarray, classes: list, facies_dict: dict, layout: dict, ideal: bool = False,
filepath: str = None, title: str = None, cmap: str = 'Greens') -> Union[list, None]:
# The number of facies classes F:
F = len(classes)
# Create figure and axes:
if ideal:
fig, axes = plt.subplots(nrows=1, ncols=2, gridspec_kw={'width_ratios': [F, 1]})
else:
fig, axes = plt.subplots(nrows=1, ncols=1)
axes = [axes]
fig.set_size_inches(5, 5)
# Visualize the matrix:
axes[0].imshow(tp_matrix, cmap=cmap)
## Create grid:
axes[0].set_xticks(np.arange(0, F, 1))
axes[0].set_yticks(np.arange(0, F, 1))
axes[0].set_xticks(np.arange(-0.5, F, 0.5), minor='true')
axes[0].set_yticks(np.arange(-0.5, F, 0.5), minor='true')
axes[0].grid(which='minor', color='black', lw=2)
## Create lithology labels:
x_labels = []
rev_facies_dict = syn.reverse_dict(facies_dict)
for i in range(F):
x_labels.append(rev_facies_dict[str(i)])
axes[0].set_xticklabels(x_labels, weight='bold', fontsize='small')
x_labels.reverse()
axes[0].set_yticklabels(x_labels, weight='bold', fontsize='small')
## Patch out the j=0 diagonal:
for i in range(F + 1):
axes[0].add_patch(patches.Rectangle(((-1.5 + i), ((F - 0.5) - i)), 1, 1, edgecolor='black',
facecolor='darkgray', hatch='\/x', lw=2))
## Display the matrix values:
for i in range(F):
for j in range(F):
if (F - (i + 1)) != j:
axes[0].text(j, i, str(round(tp_matrix[i, j], 2)), va='center', ha='center', fontsize='large')
## Add in the j-diagonals with labels:
j = 0
for i in range(F * 2 - 1):
if i <= (F - 1):
if i < (F - 1):
axes[0].add_artist(
lines.Line2D([-0.5, 0.5 + i], [0.5 + i, -0.5], lw=1, linestyle='--', color='gray', alpha=0.6))
if i == (F - 1):
axes[0].add_artist(
lines.Line2D([-0.5, 0.5 + i], [0.5 + i, -0.5], lw=1, linestyle='--', color='white', alpha=0.9))
axes[0].text(0.5 + i - 0.15, -0.5 - 0.05, 'j=' + str(-(F - (i + 1))))
else:
axes[0].add_artist(
lines.Line2D([0.5 + j, F - 0.5], [F - 0.5, 0.5 + j], lw=1, linestyle='--', color='gray', alpha=0.6))
axes[0].text(F - 0.5 + 0.05, 0.5 + j + 0.025, 'j=' + str(j + 1))
j += 1
## Add lithology bar next to matrix if ideal = True:
if ideal:
lith_bar = np.ones((F, 1))
axes[1].imshow(lith_bar)
### Find the sum value in the (j=1,j=-F) and (j=-1,j=F) diagonal pairs:
diag_sum_pos = tp_matrix[0, 0]
diag_sum_neg = tp_matrix[F-1, F-1]
for i in range(F-1):
#### The j=1th diagonal:
diag_sum_pos += tp_matrix[(F-1)-i, 1+i]
#### The j=-1th diagonal:
diag_sum_neg += tp_matrix[(F-2)-i, i]
### If the (j=1,j=-F) diagonal has the highest sum value, reverse the row labels:
if diag_sum_pos > diag_sum_neg:
x_labels.reverse()
### Add lithology bar in order of the row labels:
for i in range(len(x_labels)):
axes[1].add_patch(patches.Rectangle((-0.5, -0.5 + i), 1, 1, edgecolor='black',
hatch=layout[x_labels[i]][1], facecolor=layout[x_labels[i]][0]))
axes[1].text(1.1, (-0.5 + i) + 0.5, x_labels[i], weight='semibold', ha='center', va='center')
### Remove ticks from the lithology bar:
axes[1].set_xticks([])
axes[1].set_yticks([])
## Set titles:
axes[0].set_title(title, y=1.05, weight='bold', fontsize='large')
if ideal:
axes[1].set_title("Ideal \n Sequence:", y=1.03, weight='semibold', fontsize='medium')
# Save figure to selected filepath:
plt.savefig(filepath, bbox_inches='tight')
plt.close(fig)
# If ideal = True, then return the ideal sequence order:
if ideal:
return x_labels
return
| Ruben0-0/bep | Visualization_Tools/matrix_visualizers.py | matrix_visualizers.py | py | 5,698 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.ndarray",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matpl... |
43343485878 | #python script to concatenate tiff files of split timepoint movies
#Author: M. Secchi with the help of N. Gogoberidze
#Date: 2022-12-06
#Lines 16 and 39 have to be edited to match the path of the folder containing the split movies and the path of the folder where the concatenated movies will be saved
import os
import natsort
from natsort import natsorted
import numpy as np
from skimage import io
from tifffile import imwrite
filename =[]
#copy your path here
filepath='/rds/general/user/mas515/home/CP/output_data_movie_test'
for root, dirs, files in os.walk(filepath): # will open up all the folders, dirs is all the name of the folder it finds, files will contain all the filenames it finds
for file in files:
if file.endswith(" - T=0.tiff"):
ind=file.index("_Ch3_xyzCorrected.tif - T=0.tiff")#_Ch3_xyzCorrected.tif - T=0.tiff
rep=file[ind:]
file_init=file.replace(rep, '_')
filename.append(file_init)
filename
sequence=[]
im_data_sequences = []
for root, dirs, files in os.walk(filepath):
for unique_file in filename:
sequence=[os.path.join(root, i) for i in files if unique_file in i]# i is representing an individual file name
x=natsorted(sequence)
im_data_sequence = [io.imread(fn) for fn in x]
im_data_sequences.append((unique_file, im_data_sequence)) #the parenthesis is creating a tuple which has 2 elements
print(x)
for movie_name, movie_timepoints in im_data_sequences:#because tuple : first element will be movie name, second element will be movie_timepoint
# try:
out_image = io.concatenate_images(movie_timepoints)
imwrite(f'/rds/general/user/mas515/home/CP/concatenated_KuO_output/{movie_name}Ch8_xyzCorrected.tiff', out_image, imagej=True, metadata={'axes': 'TZYX'})
print(f'did write movies/{movie_name}.tiff')
# except:
# print(f'failed to write movies/{movie_name}.tiff') | Mas515/Image_analysis | concatenate.py | concatenate.py | py | 1,913 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.walk",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"us... |
9295811562 | import numpy as np
from gym.utils import seeding
import math
import matplotlib.pyplot as plt
from maze import MazeWorldSmall
def randargmax(b, np_random=None):
if np_random is None:
np_random = np.random
return np_random.choice(np.flatnonzero(b == b.max()))
class Agent(object):
def __init__(self, env, seed=None):
self.env = env
self.np_random, _ = seeding.np_random(seed)
@property
def n_states(self):
return self.env.observation_space.n
@property
def n_actions(self):
return self.env.action_space.n
def act(self, s, explore):
raise NotImplementedError
def update(self, s, a, s1, r, done, verbose=False):
raise NotImplementedError
def close(self):
pass
# No function approximation
# So it's just thinking about value and RL
class TabularQLearningAgent(Agent):
def __init__(
self,
action_space,
observation_space,
gamma,
lr,
seed=None):
super().__init__(env=None, seed=seed)
n = observation_space.n
m = action_space.n
self.Q = np.zeros((n, m))
self.training_episode = 0
self.lr = lr
# self._boltzmann_schedule = boltzmann_schedule
self.gamma = gamma
def act(self, s, eps):
# Break ties randomly.
best_a = randargmax(self.Q[s, :], self.np_random)
if eps == 0:
return best_a
# Epsilon-greedy action selection
if self.np_random.random_sample() > eps:
return best_a
else:
return self.np_random.choice(range(len(self.Q[s, :])))
def step(self, s, a, r, s1, done):
self.update(s, a, r, s1, done)
def get_q_values(self, s):
return self.Q[s, :]
def update(self, s, a, r, s1, done, verbose=False):
if verbose:
print(a)
print({
'before': self.Q[s, :],
})
# update_q_value = q_table[action, state] + alpha * (reward + (gamma_discount * next_state_value) - q_table[action, state])
self.Q[s, a] += self.lr * (r + self.gamma * np.max(self.Q[s1, :]) - self.Q[s, a])
if verbose:
print({'after': self.Q[s, :]})
if done:
self.training_episode += 1
def save(self, path):
# we are not saving
np.savez_compressed(path, Q=self.Q, training_episodes=self.training_episode)
def load(self, path):
obj = np.load(path)
self.Q = obj['Q']
self.training_episode = obj['training_episodes']
def visualize(self):
print(self.Q)
# we pretend this is the Stable-baselines training loop that we are not allowed to change
def train(agent, env, n_episodes=20, max_step=10,
eps_start=1.0, eps_end=0.01, eps_decay=0.9, eval_render=True):
eps = eps_start
rewards = []
for episode in range(n_episodes):
state = env.reset()
score, done, step_cnt = 0, False, 0
while not done and step_cnt <= max_step:
action = agent.act(state, eps=eps)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
score += reward
step_cnt += 1
state = next_state
eps = max(eps_end, eps_decay * eps)
rewards.append(score)
if eval_render:
state = env.reset()
score, step_cnt, done = 0, 0, False
env.render()
while not done and step_cnt < max_step:
action = agent.act(state, eps=0)
next_state, reward, done, _ = env.step(action)
score += reward
state = next_state
step_cnt += 1
env.render()
# a plot on reward should be average across all training instances
return rewards
def plot_average_corr_training_reward():
n_episodes = 100
num_of_runs = 100
avg_rewards = np.zeros(n_episodes)
for _ in range(num_of_runs):
env = MazeWorldSmall(program=None) # correct env
agent = TabularQLearningAgent(env.action_space,
env.observation_space,
gamma=0.9, lr=0.5,
seed=12345)
rewards = train(agent, env, n_episodes, eps_end=0, eval_render=False, max_step=10) # max_step=8
avg_rewards += np.array(rewards)
# expected highest reward is 9 (because we have 10% of not getting any reward)
avg_rewards = avg_rewards / num_of_runs
plt.plot(avg_rewards)
plt.hlines(y=9, xmin=0, xmax=n_episodes, linestyle='--') # color ='r',
plt.xlabel("Episodes")
plt.ylabel("Reward")
plt.title(f"Average across {n_episodes} runs, episodic reward")
plt.show()
def run_once():
env = MazeWorldSmall(program=None) # correct env
agent = TabularQLearningAgent(env.action_space,
env.observation_space,
gamma=0.9, lr=0.5,
seed=12345)
# 20 episodes won't work...ha
train(agent, env, 100, eval_render=True, max_step=8)
# The purpose to reset (evaluate continously) is because
# some games after failing, does not respawn correctly, or reset the game into playable condition
def evaluate_continuously(env, agent, max_step, num_balls_win, eps, finish_reward=100):
total_reward, step_cnt = 0, 0
total_win, total_loss = 0, 0
while (max(total_win, total_loss) < num_balls_win) and step_cnt < max_step:
score, done = 0, False
state = env.reset()
while not done and step_cnt <= max_step:
action = agent.act(state, eps=eps)
next_state, reward, done, _ = env.step(action)
score += reward
total_reward += reward
state = next_state
step_cnt += 1
# print(step_cnt, score)
if reward == 10:
total_win += 1
elif reward == -5:
total_loss += 1
if total_win == 3:
total_reward += finish_reward
elif total_loss == 3:
total_reward -= finish_reward
return total_reward, total_win, total_loss
# now we are outside the training loop, we can collect data however we want
def execute_agent_collect_data(env, agent, max_step, num_balls_win, eps):
# finish_reward=100
# We have not implemented this...
total_reward, step_cnt = 0, 0
total_win, total_loss = 0, 0
trajectory = []
while (max(total_win, total_loss) < num_balls_win) and step_cnt < max_step:
score, done = 0, False
state = env.reset()
while not done and step_cnt <= max_step:
action = agent.act(state, eps=eps)
next_state, reward, done, _ = env.step(action)
trajectory.append((state, action, reward, next_state))
score += reward
total_reward += reward
state = next_state
step_cnt += 1
# print(step_cnt, score)
if reward == 10:
total_win += 1
elif reward == -5:
total_loss += 1
# Not implemented yet
# if total_win == 3:
# total_reward += finish_reward
# elif total_loss == 3:
# total_reward -= finish_reward
return trajectory
def test_evaluate_continuously():
env = MazeWorldSmall(program=None) # correct env
agent = TabularQLearningAgent(env.action_space,
env.observation_space,
gamma=0.9, lr=0.5,
seed=12345)
# 20 episodes won't work...ha
train(agent, env, 150, eval_render=True, max_step=10) # max_step=8
for _ in range(30):
# max_step = 30
total_reward = evaluate_continuously(env, agent, max_step=30, num_balls_win=3, eps=0., finish_reward=20)
print(total_reward)
def test_collect_traj():
env = MazeWorldSmall(program=None) # correct env
agent = TabularQLearningAgent(env.action_space,
env.observation_space,
gamma=0.9, lr=0.5,
seed=12345)
# 20 episodes won't work...ha
train(agent, env, 150, eval_render=True, max_step=10) # max_step=8
# max_step = 30
traj = execute_agent_collect_data(env, agent, max_step=30, num_balls_win=3, eps=0.)
print(traj[-5:])
if __name__ == '__main__':
pass
# run_once()
# plot_average_corr_training_reward()
# test_evaluate_continuously()
test_collect_traj() | windweller/AutoGrade | autograde/toy/q_learning.py | q_learning.py | py | 8,613 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.flatnonzero",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "gym.utils.seeding.np_random",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "gym.u... |
22575362577 | import sys
import itertools
def solution(mylist):
number= [x for x in range(1,mylist+1)]
answer= list(map(list, itertools.permutations(number)))
answer.sort()
return answer
n = int(input())
results = solution(n)
for i in results:
for j in i:
print(j,end=" ")
print("") | dydwkd486/coding_test | baekjoon/python/baekjoon10974.py | baekjoon10974.py | py | 303 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.permutations",
"line_number": 6,
"usage_type": "call"
}
] |
70957373473 | """create relation between users and papers
Revision ID: 4e860e37bb37
Revises: 1bd8afe10204
Create Date: 2013-10-03 20:59:42.374353
"""
# revision identifiers, used by Alembic.
revision = '4e860e37bb37'
down_revision = '1bd8afe10204'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('userpapers',
sa.Column('id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('paper_id', sa.Integer(), nullable=False),
sa.Column('read_at', sa.DateTime(), nullable=True),
sa.Column('score', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['paper_id'], ['papers.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('user_id', 'paper_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('userpapers')
### end Alembic commands ###
| dedalusj/PaperChase | backend/alembic/versions/4e860e37bb37_create_relation_betw.py | 4e860e37bb37_create_relation_betw.py | py | 1,028 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
19780094728 | # bot.py
import discord # IMPORT DISCORD.PY. ALLOWS ACCESS TO DISCORD'S API.
import os # IMPORT THE OS MODULE.
import sqlite3
import time
from datetime import datetime, date
from dotenv import load_dotenv # IMPORT LOAD_DOTENV FUNCTION FROM DOTENV MODULE.
from discord.ext import commands, tasks # IMPORT COMMANDS FROM THE DISCORD.EXT MODULE.
load_dotenv() # LOADS THE .ENV FILE THAT RESIDES ON THE SAME LEVEL AS THE SCRIPT.
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN") # GRAB THE API TOKEN FROM THE .ENV FILE.
intents = discord.Intents.default()
intents.members = True
intents.message_content = True
bot = commands.Bot(command_prefix="!", intents = intents) # CREATES A NEW BOT OBJECT WITH A SPECIFIED PREFIX.
conn = None
conn = sqlite3.connect('rent.db') # Connect to rent database
cur = conn.cursor()
@bot.event
async def on_ready():
print(bot.user.name + " is here!")
binLoop.start()
reminder_channel_id = 1086391871439384686
monthDays = {
"01":31,
"02":28, # Assumes not a leap year for February
"03":31,
"04":30,
"05":31,
"06":30,
"07":31,
"08":31,
"09":30,
"10":31,
"11":30,
"12":31
}
#?############################## UTILITY FUNCTIONS ###############################
def format_date(date):
"""
Formats the given date to dd-mm-yyyy.
Args:
date (str): date of rent payment due (yyyy-mm-dd)
Returns:
formated_date (str): Formatted date using (dd-mm-yyyy)
"""
str_as_date = datetime.strptime(date, "%Y-%m-%d")
formated_date = str_as_date.strftime("%d-%m-%Y")
return (formated_date)
def get_next_rent():
"""
Returns the details of the next rent payment due.
Returns:
next_rent (list): Date and amount due for next rent payment
"""
today = date.today()
cur.execute("SELECT * FROM rent_payments")
entries = cur.fetchall()
next_rent = nextEntry(entries, today, 0)
return (next_rent)
def get_next_bin():
"""
Returns the next bin collection.
Returns:
next_rent (list): Date and type for next bin collection
"""
today = date.today()
cur.execute("SELECT * FROM bin_collections")
collectionEntries = cur.fetchall()
next_bin = nextEntry(collectionEntries, today, 0)
return (next_bin)
def nextEntry(entries, dateToday, format):
"""
Iterates through entries, retrieves those with dates later than
today. Sorts this list to return the first (and therefore next due)
entry.
Args:
entries (list): list of entries ~ (date, info) from bin / rent db
td (date): todays' date ~ YYYY-MM-DD
form (int): desired return format (0 == next due entry, 1 ==
all future entries)
Returns:
next (list): Next upcoming entry (bin/rent) from date
"""
upcomingEntries = []
for entry in entries:
entryDate = datetime.strptime(entry[0], '%Y-%m-%d')
if entryDate.date() > dateToday:
upcomingEntries.append(entry)
sortedUpcoming = sorted(upcomingEntries)
if format == 0:
next = sortedUpcoming[0]
return(next)
elif format == 1:
return(sortedUpcoming)
return("Invalid type input!")
#?############################## BOT LOOPS ###############################
@tasks.loop(seconds=30)
async def binLoop():
"""
Checks next bin collection due every loop, sending a reminder at 7PM evening before
a bin collection is due.
"""
channel = bot.get_channel(reminder_channel_id)
currentDate = datetime.now() # FORMAT: YYYY-MM-DD HH:MM:SS
currentDay = currentDate.strftime("%d")
currentMonth = currentDate.strftime("%m")
nextBin = get_next_bin() # Format (String date, String type)
binDate = nextBin[0]
binType = nextBin[1]
binDayT = datetime.strptime(format_date(binDate), '%d-%m-%Y') # Get day of next bin collection
dayOfNextCollection = binDayT.strftime("%d")
if(int(currentDay) == (int(dayOfNextCollection)-1) or
(dayOfNextCollection == "01" and currentDay == monthDays[currentMonth] )):
currentTime = currentDate.strftime("%H:%M")
if (currentTime == '19:00'):
await channel.send("@everyone Reminder, **" + binType + "** is being collected tomorrow on **"
+ format_date(binDate) + "**")
time.sleep(70) # Sleep to avoid multiple reminders
#?############################## BOT COMMANDS ###############################
# Command !next_rent. This takes 0 arguments from the user and prints the next due rent into the channel.
@bot.command(
help = "Prints the next rent payment due back to the channel.", # Adds to '!help next_rent' message
brief = "Prints the next rent payment due back to the channel." # Adds to '!help' message
)
async def next_rent(ctx):
"""
Prints the next rent payment due.
Args:
ctx (Class): Represents the context in which a command is being invoked under.
"""
rent_details = get_next_rent()
await ctx.channel.send("The next rent is due on: **" + format_date(rent_details[0]) +
"** (dd-mm-yyyy) for **£" + str(rent_details[1]) + "**")
# Command !all_rent. This takes 0 arguments from the user and prints all future rent payments into the channel.
@bot.command(
help="Prints all remaining rent payment due back to the channel.", # Adds to '!help all_rent' message
brief="Prints all remaining rent payment due back to the channel." # Adds to '!help' message
)
async def all_rent(ctx):
"""
Prints all remaining rent payments.
Args:
ctx (Class): Represents the context in which a command is being invoked under.
"""
today = date.today()
cur.execute("SELECT * FROM rent_payments WHERE date_due > " + str(today))
rows = cur.fetchall()
rents = nextEntry(rows, today, 1) # 1 == all future entries
for rent in rents:
await ctx.channel.send("Rent due: **" + format_date(rent[0]) + "** (dd-mm-yyyy) for **£" + str(rent[1]) + "**")
# Command !next_bin. This takes 0 arguments from the user and prints the next due bin collection into the channel.
@bot.command(
help = "Prints the next rent bin collection back to the channel.", # Adds to '!help next_bin' message
brief = "Prints the next rent bin collection back to the channel." # Adds to '!help' message
)
async def next_bin(ctx):
"""
Prints the next bin collection due.
Args:
ctx (Class): Represents the context in which a command is being invoked under.
"""
bin_details = get_next_bin()
await ctx.channel.send("The next bin collection is **" + str(bin_details[1]) + "** collected on **"
+ format_date(bin_details[0]) + "** (dd-mm-yyyy)")
# Command !all_bins. This takes 0 arguments from the user and prints all future bin collections into the channel.
@bot.command(
help="Prints all future bin collections back to the channel.", # Adds to '!help all_bin' message
brief="Prints all future bin collections back to the channel." # Adds to '!help' message
)
async def all_bins(ctx):
"""
Prints all bin collections.
Args:
ctx (Class): Represents the context in which a command is being invoked under.
"""
today = date.today()
cur.execute("SELECT * FROM bin_collections WHERE collection_day > " + str(today))
rows = cur.fetchall()
bins = nextEntry(rows, today, 1) # 1 == all future entries
for bin in bins:
await ctx.channel.send(str(bin[1] + " collected on " + format_date(bin[0])))
bot.run(DISCORD_TOKEN) # Executes the bot with the specified token. | JoelW2003/Bin-Reminder-Bot | cindyBot/bot.py | bot.py | py | 7,980 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "discord.Intents.default",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "discord.Intents"... |
72772102433 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import KFold
#数据加载 & 处理
def create_data():
corpus_path = './naiveBayes'
sample_cate = ['alt.atheism', 'soc.religion.christian']
X = fetch_20newsgroups(data_home=corpus_path, subset='all', categories=sample_cate,
remove=('headers', 'footers', 'quotes'))
return X
# 向量化处理
def vectorizer_data(data):
vectorizer = TfidfVectorizer(stop_words='english', lowercase=True)
X_vector = vectorizer.fit_transform(data.data)
X_vector = X_vector.todense() # 将稀疏矩阵转换成一般矩阵
# print(X_vector.shape)
return X_vector
X = create_data()
X_vector = vectorizer_data(X)
class NaiveBayes:
def __init__(self):
self.model = None
# 数学期望
@staticmethod
def mean(x):
return sum(x)/float(len(x))
# 方差
def std(self, x):
avg = self.mean(x)
return np.sqrt(sum(np.power(x_i-avg, 2) for x_i in x)/float(len(x)))
# 概率密度函数
def gaussian_prob(self, x, mean, std):
exp = np.exp(-1*(np.power(x-mean, 2))/(2*np.power(std, 2)+(1e-5)))
return (1/(np.sqrt(2*np.pi*math.pow(std, 2))+(1e-5)))*exp
# 计算训练的均值和方差
def mean_and_std(self, x):
mean_and_std = [(self.mean(i), self.std(i)) for i in zip(*x)]
return mean_and_std
# 分别求出数学期望和标准差
def fit(self, x, y):
labels = list(set(y))
data = {label: [] for label in labels}
for f, label in zip(x, y):
data[label].append(f)
self.model = {label: self.mean_and_std(value) for label,value in data.items()}
return "GaussianNB train Done!"
# 计算概率
def prob(self, data):
probability = {}
for label, value in self.model.items():
probability[label] = 1
# print(range(len(value)))
for i in range(len(value)):
mean, std = value[i]
probability[label] *= self.gaussian_prob(data[i], mean, std)
return probability
# 类别
def predict(self, x_test):
label = sorted(self.prob(x_test).items(), key=lambda x: x[-1])[-1][0]
return label
# 精确度
def score(self, x_test, y_test):
right = 0
for x, y in zip(x_test, y_test):
label = self.predict(x)
if label == y:
right += 1
return right / float(len(x_test))
kf = KFold(n_splits=5, random_state=1, shuffle=True)
k = 0
for Xtrain, Xtest in kf.split(X_vector):
X_train = np.array(X_vector)[Xtrain, :]
X_test = np.array(X_vector)[Xtest, :]
y_train = np.array(X.target)[Xtrain]
y_test = np.array(X.target)[Xtest]
model = NaiveBayes()
model.fit(X_train, y_train)
k = k + 1
predictRes = 0
scoreRes = 0
for j in range(len(Xtest)): # 此处变量j 和54行循环的变量i 命名要区分开
predictRes += model.predict(X_test[j, :])
scoreRes += model.score([X_test[j, :].tolist()], [y_test[j].tolist()]) # 需要转化成列表
print("第" + str(k) + "次交叉验证的预测结果为:" ,predictRes/len(Xtest))
print("第" + str(k) + "次交叉验证测试集的精确度为:" ,scoreRes/len(Xtest))
print("第" + str(k) + '次交叉验证的测试集的score为: ', model.score(X_test, y_test))
| kildallithro/HW_ML-2020-2021_1 | naiveBayes_20newsgroups/naiveBayes.py | naiveBayes.py | py | 3,569 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.datasets.fetch_20newsgroups",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 42,
"usage_type": "call"
... |
89305565 | # -*- coding: utf-8 -*-
import os
import re
import ssl
import threading
from math import pi
import rospy
from geometry_msgs.msg import Twist
import paho.mqtt.client as mqtt
from fiware_ros_turtlesim.params import getParams, findItem
from fiware_ros_turtlesim.logging import getLogger
logger = getLogger(__name__)
class CommandSender(object):
def __init__(self, node_name):
self.node_name = node_name
self.__client = mqtt.Client(protocol=mqtt.MQTTv311)
self.__client.on_connect = self._on_connect
self.__client.on_message = self._on_message
rospy.on_shutdown(self._do_stop)
rospy.on_shutdown(self.__client.disconnect)
rospy.on_shutdown(self.__client.loop_stop)
self._params = getParams(rospy.get_param("~"))
topic = findItem(self._params.ros.topics, 'key', 'turtlesim')
self.__ros_pub = rospy.Publisher(topic.name, Twist, queue_size=10)
self.__do_move = False
self.__lock = threading.Lock()
self._cmd_payload_re = re.compile(findItem(self._params.mqtt.topics, 'key', 'command_sender').re)
def connect(self):
logger.infof('Connect mqtt broker')
if hasattr(self._params.mqtt, 'cafile'):
cafile_path = self._params.mqtt.cafile.strip()
if len(cafile_path) > 0 and os.path.isfile(cafile_path):
self.__client.tls_set(cafile_path, tls_version=ssl.PROTOCOL_TLSv1_2)
if hasattr(self._params.mqtt, 'username') and hasattr(self._params.mqtt, 'password'):
username = self._params.mqtt.username.strip()
password = self._params.mqtt.password.strip()
if len(username) > 0 and len(password) > 0:
self.__client.username_pw_set(username, password)
self.__client.connect(self._params.mqtt.host, port=self._params.mqtt.port, keepalive=60)
self.__client.loop_start()
return self
def start(self):
logger.infof('CommandSender start : {}', self.node_name)
rospy.spin()
logger.infof('CommandSender stop : {}', self.node_name)
def nodetest(self):
from collections import namedtuple
logger.warnf('Test publish using publishtest of rostest')
r = rospy.Rate(0.5)
while not rospy.is_shutdown():
self._on_message(None, None, namedtuple('msg', ('payload',))(payload='device_id@move|circle'))
r.sleep()
def _on_connect(self, client, userdata, flags, response_code):
logger.infof('mqtt connect status={}', response_code)
client.subscribe(findItem(self._params.mqtt.topics, 'key', 'command_sender').name)
def _on_message(self, client, userdata, msg):
payload = str(msg.payload)
logger.infof('received message from mqtt: {}', payload)
matcher = self._cmd_payload_re.match(payload)
if matcher:
cmd = matcher.group('cmd')
device_id = matcher.group('device_id')
if cmd == 'circle':
self._do_circle()
elif cmd == 'square':
self._do_square()
elif cmd == 'triangle':
self._do_triangle()
elif cmd == 'cross':
self._do_stop()
elif cmd == 'up':
self._do_forward()
elif cmd == 'down':
self._do_backward()
elif cmd == 'left':
self._do_turnleft()
elif cmd == 'right':
self._do_turnright()
else:
logger.warnf('unknown cmd: {}', payload)
cmd = 'UNKNOWN CMD: {}'.format(cmd)
topic = findItem(self._params.mqtt.topics, 'key', 'command_sender_exec').name
fmt = findItem(self._params.mqtt.topics, 'key', 'command_sender_exec').format
self.__client.publish(topic, fmt.format(device_id=device_id, cmd=cmd))
else:
logger.warnf('unkown payload: {}', payload)
logger.debugf('active threds = {}', threading.active_count())
def _do_circle(self):
logger.infof('do circle')
def move(self):
self.__circle(int(2 * pi * self._params.ros.rate))
return self._do_move(move)
def _do_square(self):
logger.infof('do square')
def move(self):
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi / 2)
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi / 2)
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi / 2)
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi / 2)
return self._do_move(move)
def _do_triangle(self):
logger.infof('do triangle')
def move(self):
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi * 2 / 3)
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi * 2 / 3)
self.__linear(2 * self._params.ros.rate)
self.__rotate(pi * 2 / 3)
return self._do_move(move)
def _do_forward(self):
logger.infof('do forward')
def move(self):
self.__linear(int(self._params.ros.rate * 0.2))
return self._do_move(move)
def _do_backward(self):
logger.infof('do backward')
def move(self):
self.__linear(int(self._params.ros.rate * 0.2), reverse=True)
return self._do_move(move)
def _do_turnleft(self):
logger.infof('do turn left')
def move(self):
self.__rotate(pi / 16)
return self._do_move(move)
def _do_turnright(self):
logger.infof('do turn right')
def move(self):
self.__rotate(pi / 16, reverse=True)
return self._do_move(move)
def _do_stop(self):
with self.__lock:
self.__do_move = False
logger.infof('sotp moving')
def _do_move(self, callback):
def func():
if not callable(callback):
return
if self.__do_move:
logger.infof('now moving')
return
with self.__lock:
self.__do_move = True
callback(self)
with self.__lock:
self.__do_move = False
thread = threading.Thread(target=func)
thread.start()
return thread
def __circle(self, ticks):
move_cmd = Twist()
move_cmd.linear.x = 1.0
move_cmd.angular.z = 1.0
self.__move(ticks, move_cmd)
def __linear(self, ticks, reverse=False):
move_cmd = Twist()
move_cmd.linear.x = 1.0 if not reverse else -1.0
self.__move(ticks, move_cmd)
def __rotate(self, angle, reverse=False):
move_cmd = Twist()
move_cmd.angular.z = 1.0 if not reverse else -1.0
ticks = int(angle * self._params.ros.rate)
self.__move(ticks, move_cmd)
def __move(self, ticks, move_cmd):
r = rospy.Rate(self._params.ros.rate)
for t in range(ticks):
if not self.__do_move:
break
self.__ros_pub.publish(move_cmd)
r.sleep()
self.__ros_pub.publish(Twist())
| tech-sketch/fiware-ros-turtlesim | src/fiware_ros_turtlesim/command_sender.py | command_sender.py | py | 7,227 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "fiware_ros_turtlesim.logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "paho.mqtt.client.Client",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "paho.mqtt.client",
"line_number": 21,
"usage_type": "name"
},
{
"a... |
36046629711 | import requests
from bs4 import BeautifulSoup as bs
page = requests.get('https://umggaming.com/leaderboards')
soup = bs(page.text, 'html.parser')
# Get table with the id of leaderboards
leaderboards = soup.find('table', {'id': 'leaderboard-table'})
# Get tbody from table
tbody = leaderboards.find('tbody')
# Get all the tr elements in tbody
for tr in tbody.find_all('tr'):
# get td text with striped of their white space
place = tr.find_all('td')[0].text.strip()
username = tr.find_all('td')[1].text.strip()
xp = tr.find_all('td')[3].text.strip()
print(place, username, xp)
| edwardspresume/Sandbox | python/Data-scraper/leaderboard.py | leaderboard.py | py | 599 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 5,
"usage_type": "call"
}
] |
24981871542 | # encoding='utf-8'
import contextlib
import functools
import inspect
from multiprocessing import Lock as m_lock
class TestDecorator:
'''
test decorate
'''
def __init__(self, name):
self.name = name
self.mark = "test"
def __call__(self, *args, **kwargs):
if len(args) == 1 and len(kwargs) == 0:
if inspect.isfunction(args[0]):
setattr(args[0], "test_decorator", self.__class__(self.name))
setattr(args[0], "mark", self.mark)
setattr(args[0], "run", True)
return args[0]
test = TestDecorator
class SkipDecorator:
'''
test's skip
'''
def __init__(self, test_item: test = None):
self.test_item = test_item
self.test_item.run = False
self.mark = "test"
def __call__(self):
setattr(self.test_item, "mark", self.mark)
return self.test_item
skip = SkipDecorator
class ParametrizeDecorator:
'''
parameter decorate
'''
def __init__(self, params: list = []):
self.params = params
self.mark = "test"
def __call__(self, *args, **kwargs):
func = args[0]
setattr(func, "mark", self.mark)
setattr(func, "params", self.params)
setattr(func, "run", True)
return func
parameterize = ParametrizeDecorator
class StressDecorator:
'''
stress decorate
'''
def __init__(self, threads = 1, count = 1, time = 1):
self.stress = {
"threads": threads,
"count": count,
"time": time
}
self.mark = "test"
def __call__(self, *args, **kwargs):
func = args[0]
setattr(func, "mark", self.mark)
setattr(func, "stress", self.stress)
setattr(func, "run", True)
return func
stress = StressDecorator
class Locker:
'''
locker
'''
def __init__(self, bus_client, key, lock: m_lock = None):
self.bus_client = bus_client
self.key = key
self.local_lock = lock
def get_lock(self):
'''
get lock from local_lock
:return:
'''
if self.local_lock:
self.local_lock.acquire()
return
def release_lock(self):
'''
release lock from local_lock
:return:
'''
if self.local_lock:
self.local_lock.release()
return
# the lock decorator
def locker(func):
@functools.wraps(func)
def lock(self, *args, **kwargs):
local_lock = None
if len(args) > 0:
key = args[0]
if len(args) > 1:
local_lock = args[1]
else:
local_lock = kwargs.get("lock", None)
else:
key = kwargs.get("key")
local_lock = kwargs.get("lock", None)
locker = Locker(self.bus_client, key, local_lock)
locker.get_lock()
result = func(self, *args, **kwargs)
locker.release_lock()
return result
return lock
# the new locker: can use with 'with'
@contextlib.contextmanager
def new_locker(bus_client, key, lock: m_lock = None):
locker = Locker(bus_client, key, lock)
locker.get_lock()
try:
yield
finally:
locker.release_lock()
return
| hautof/haf | haf/mark.py | mark.py | py | 3,472 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "inspect.isfunction",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Lock",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "multiprocess... |
13518570965 | import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading
import time
import pprint
from bs4 import BeautifulSoup
import csv
import os
class WebSpider(threading.Thread):
def __init__(self, cookies, headers):
self.cookies = cookies
self.headers = headers
def page_code(self):
a1 = 1
page_num = 20
page_size = 1001
total = page_size / page_num
# data = {}
data = []
all_data = []
for i in range(1, page_size):
list_number = a1 + (i - 1) * page_num
step = (list_number + page_num) - 1
# data[i] = {list_number,step}
data.append([list_number, step])
# print(list_number,step)
# print(data)
return data
'''
https://www.pythontutorial.net/python-basics/python-write-csv-file/
csv example
'''
def save_to_csv(self, fileName, mode, contents):
with open(f'{fileName}.csv', mode, encoding='UTF8') as f:
writer = csv.writer(f)
for i in contents:
writer.writerow(i)
# print('数据保存成功.')
def get_api_params(self):
urls = []
for i in self.page_code():
step = '-'.join([str(ii) for ii in i])
url = f'https://qingarchives.npm.edu.tw/index.php?act=Archive//{step}'
urls.append(url)
return urls
def get_acckey_and_accnum(self,url):
data_list = []
all_list = []
r = requests.get(url,headers=self.headers,cookies=self.cookies)
print(r.cookies)
soup = BeautifulSoup(r.text, 'lxml')
access_keys = soup.find_all("a", class_="act_content_display")
accnum = soup.find(id='result_access_num').get('value')
for access in access_keys:
data_list = [access.get('acckey'), accnum]
all_list.append(data_list)
print("download url {} finished at {}".format(urls, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))
return all_list
'''
利用线程池去获取acckey 和accnum的值,保存到csv文件中.
'''
def saveAcckey2cvs(self, cookies, headers, urls):
'''
for i in urls:
data = obj.send_get_request(i)
#print(data)
obj.save_to_csv('data','a+',data)
'''
'''
python3 线程学习网站
https://cloud.tencent.com/developer/article/1597890
'''
executor = ThreadPoolExecutor(max_workers=20)
all_task = [executor.submit(obj.get_acckey_and_accnum, (url)) for url in urls]
for task in as_completed(all_task):
data = task.result()
#print(data)
# print("任务 {} down load success".format(data))
obj.save_to_csv('data', 'a+', data)
# print(f"{data}数据保存成功")
'''
这个方法是获取不到接口的数据,想要获取到数据请查看e.py文件的写法
'''
def get_page_api_data(self,headers,cookies):
with open("data.csv", mode="r", encoding="utf-8") as ff:
read = csv.reader(ff)
for i in read:
# print(i[0],i[1])
data = {
'act':f'Display/initial/{i[0]}/{i[1]}'
}
root_url = 'https://qingarchives.npm.edu.tw/index.php'
r = requests.post(root_url,cookies=cookies,headers=headers,data=data)
print(r.text)
if __name__ == '__main__':
cookies = {
'PHPSESSID':'olukv0ldhcv0bu1ojg9qb59raj',
'_ga':'GA1.1.1303102741.1658115604',
'_ga_91MJR5YCWN':'GS1.1.1658115604.1.1.1658118253.0',
}
headers = {
'authority':'qingarchives.npm.edu.tw',
'accept':'application/json, text/javascript, */*; q=0.01',
'accept-language':'zh-CN,zh;q=0.9',
'content-type':'application/x-www-form-urlencoded; charset=UTF-8',
'origin':'https://qingarchives.npm.edu.tw',
'referer':'https://qingarchives.npm.edu.tw/index.php?act=Archive',
'sec-ch-ua':'".Not/A)Brand";v="99", "Google Chrome";v="103", "Chromium";v="103"',
'sec-ch-ua-mobile':'?0',
'sec-ch-ua-platform':'macOS',
'sec-fetch-dest':'empty',
'sec-fetch-mode':'cors',
'sec-fetch-site':'same-origin',
'user-agent':'"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36,"',
'x-requested-with':'XMLHttpRequest',
}
obj = WebSpider(cookies, headers)
urls = obj.get_api_params()
#print(urls)
#local_data = obj.saveAcckey2cvs(cookies,headers,urls)
if os.path.exists('data.csv'):
print('读取本地的data.csv,获取acckey和accnum')
local_data = obj.get_page_api_data(cookies, headers)
else:
print('开始去网站爬取acckey 和accnum....')
obj.saveAcckey2cvs(cookies, headers, urls)
| huchiwen/LearnSpider | qa_crawl.py | qa_crawl.py | py | 5,023 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "threading.Thread",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
31966512950 | from typing import List
from synt.terminals import *
from lex.tokens import *
TERMINAL_COLOR = "\033[36m"
NONTERMINAL_COLOR = "\033[37m"
RESET_COLOR = "\033[0m"
class Rule:
def __init__(self, result, parts):
self.result = result
self.parts = parts
def loadGrammar() -> list:
file = open("synt/grammar.txt", "r")
raw = file.readlines()
grammar = []
for row in raw:
row = row.strip()
row = row.split('//')[0]
if row == "":
continue
parts = row.split()
args = []
for part in parts:
if part not in ["=", "+", "-x-", "-?-", "---", "-!-"]:
args.append(part)
grammar.append(Rule(args[0], args[1:]))
return grammar
def build_tree(tokens: List[Token], grammar: List[Rule]):
stack = []
print('\n\n\n_________ tokens _________\n')
for i in range(len(tokens)):
nextToken = tokens[-i-1]
print(f"{NONTERMINAL_COLOR}{nextToken.__class__.__name__}:{RESET_COLOR}")
stack.append(nextToken)
flag = True
while flag:
flag = False
for rule in grammar:
if len(stack) >= len(rule.parts) and all(
stack[-j-1].__class__.__name__
==
word for j, word in enumerate(rule.parts)
):
if rule == grammar[15]:
if (i < len(tokens)-1) and (isinstance(tokens[-i-2], Type)):
break
if rule.result == "FunctionCall":
if (i < len(tokens)-1) and (isinstance(tokens[-i-2], Type)):
break
if (i < len(tokens)-1) and \
(isinstance(nextToken, Identifier)) and \
(isinstance(tokens[-i-2], SBClose)):
break
class_name = rule.result
parts = []
for j in range(len(rule.parts)):
parts.append(stack.pop())
nonterminal = globals()[class_name](parts)
stack.append(nonterminal)
flag = True
break
print(*stack)
return stack, 0
| PaulCh4/compiler-labs | compiler/synt/syntax.py | syntax.py | py | 2,323 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 39,
"usage_type": "name"
}
] |
10317462795 | import uproot4
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.collections import PatchCollection
from matplotlib import cm
from matplotlib.colors import LogNorm, Normalize
import awkward1 as ak
from lxml import etree as ET
def get_module_positions(base_name, tree):
# <position name="pos_397" x=... y=... z=... ... />
"""
<physvol copynumber="372" name="crystal_module_3720x55600cea9670">
<volumeref ref="crystal_module0x556007d83b50"/>
<position name="crystal_module_3720x55600cea9670_pos" unit="mm" x="-71.7499999999999" y="-399.75" z="175"/>
</physvol>
"""
# We grep xml_phys_volumes and xml_positions this way because it is order of magnitude faster
xml_phys_volumes = tree.xpath(f"//physvol[starts-with(@name,'{base_name}')]")
xml_positions = tree.xpath(f"//position[starts-with(@name,'{base_name}')]")
positions_by_id = {}
for i, xml_physvol in enumerate(xml_phys_volumes):
xml_position = xml_positions[i]
x = float(xml_position.attrib['x'])
y = float(xml_position.attrib['y'])
name = xml_physvol.attrib['name']
copynumber = xml_physvol.attrib['copynumber']
module_id = name[len(base_name) + len(copynumber)+1:] # +1 for '_'
module_id = int(module_id, 16)
positions_by_id[module_id] = (x,y)
return positions_by_id
def get_module_geometry(base_name, tree):
update = tree.xpath(f"//box[starts-with(@name,'{base_name}')]")[0]
unit = update.attrib['lunit']
size_x = float(update.attrib['x'])
size_y = float(update.attrib['y'])
size_z = float(update.attrib['z'])
return size_x, size_y, size_z, unit
def build_calorimeter_section(ax, positions, size_x, size_y):
dx = size_x / 2.0
dy = size_y / 2.0
module_rects = []
for position in positions:
x, y = position
patch = patches.Rectangle((x-dx, y-dy), width=size_x, height=size_y, edgecolor='black', facecolor='gray')
module_rects.append(patch)
col = PatchCollection(module_rects, match_original=True)
ax.add_collection(col)
ax.autoscale()
ax.axis('equal')
return ax
def plot_calorimeter_hits(root_file, ax, pos_by_id, size_x, size_y, start_event, process_events=1):
tree = root_file["events"]
entry_start=start_event
entry_stop = start_event + process_events
events = tree.arrays(['ce_emcal_id', 'ce_emcal_adc'],
library="ak", how="zip", entry_start=entry_start, entry_stop=entry_stop)
print(events.type)
ids = ak.flatten(events.ce_emcal.id)
weights = ak.flatten(events.ce_emcal.adc)
build_calorimeter_section(ax, pos_by_id.values(), size_x, size_y)
norm = LogNorm()
norm.autoscale(weights)
cmap = cm.get_cmap('inferno')
weights_by_id = {}
for id, weight in zip(ids, weights):
if id in weights_by_id.keys():
weights_by_id[id] += weight
else:
weights_by_id[id] = weight
dx = size_x / 2.0
dy = size_y / 2.0
module_rects = []
for id, weight in weights_by_id.items():
if id>1000000:
continue
x,y = pos_by_id[id]
patch = patches.Rectangle((x-dx, y-dy), size_x, size_y, edgecolor='black', facecolor=cmap(norm(weight)))
module_rects.append(patch)
col = PatchCollection(module_rects, match_original=True)
ax.add_collection(col)
return norm, cmap, ax
| eic/hybrid_calorimeter_tools | event_display.py | event_display.py | py | 3,470 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "matplotlib.patches.Rectangle",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.collections.PatchCollection",
"line_number": 57,
"usage_type": "call"
},
{... |
3560320164 | # In this Bite you calculate the total amount of points earned with Ninja Belts
# by accessing the given ninja_belts dict.
#
# You learn how to access score and ninjas (= amount of belt owners) from
# no less than a namedtuple (if you're new to them, check out the basic Point example in the docs).
#
# Why a namedtuple, you did not even mention a tuple yet?!
#
# Good point, well in our Bites we might actually use them even more so let's
# get to know them here (if you have a free evening read up on the collections module as well and thank us later).
#
# The function returns the total score int. You learn to write generic
# code because we test for an updated ninja_belts dict as well, see the TESTS tab.
from collections import namedtuple
BeltStats = namedtuple('BeltStats', 'score ninjas')
ninja_belts = {'yellow': BeltStats(50, 11),
'orange': BeltStats(100, 7),
'green': BeltStats(175, 1),
'blue': BeltStats(250, 5)}
def get_total_points(belts=ninja_belts):
total_result = 0
for key, value in belts.items():
total_result += (value[0] * value[1])
return total_result
# tests for this exercise in tests.py
| panpusto/codewars_exercises | exercises/intro_bites_08_by_PyBites.py | intro_bites_08_by_PyBites.py | py | 1,186 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 18,
"usage_type": "call"
}
] |
40882243652 | import click
import dotenv
from rich import print
from rich.console import Console
console = Console()
from dotenv import load_dotenv
load_dotenv()
import os
import pynetbox
import requests
import re
import yaml
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
NETBOX_TOKEN = os.getenv("NETBOX_TOKEN")
NETBOX_HOST = os.getenv("NETBOX_HOST")
NETBOX_PORT = os.getenv("NETBOX_PORT", 443)
NETBOX_SSL_VERIFY = eval(os.getenv("NETBOX_SSL_VERIFY", "True"))
TESTBED_USER = os.getenv("TESTBED_USER")
TESTBED_PASS = os.getenv("TESTBED_PASS")
TESTBED_ENABLE = os.getenv("TESTBED_ENABLE")
DEBUG = False
def get_testbed_details(nb_device):
if DEBUG:
print(f"DEBUG: Device={nb_device.name}, Device Model={nb_device.device_type.model}")
if nb_device.device_type.model.lower().startswith("ie-3"):
return {
"os": "iosxe",
"platform": "cat3k",
"type": "switch"
}
if nb_device.device_type.model.lower().startswith("ir1101"):
return {
"os": "iosxe",
"platform": "sdwan",
"type": "router"
}
return {
"os": "nos",
"platform": "network",
"type": "device"
}
@click.command("get-devices-by-tag")
@click.option("--tag", required=True)
@click.option("--debug/--no-debug", required=False, default=False)
@click.option("--netbox-token", required=False, default=NETBOX_TOKEN)
@click.option("--netbox-host", required=False, default=NETBOX_HOST)
@click.option("--netbox-port", required=False, default=NETBOX_PORT)
@click.option("--netbox-ssl-verify/--no-netbox-ssl-verify", required=False, default=NETBOX_SSL_VERIFY)
@click.option("--testbed-user", required=False, default=TESTBED_USER)
@click.option("--testbed-pass", required=False, default=TESTBED_PASS)
@click.option("--testbed-enable", required=False, default=TESTBED_ENABLE)
def get_devices_by_tag(tag,debug,netbox_token,netbox_host,netbox_port,netbox_ssl_verify,testbed_user,testbed_pass,testbed_enable):
global DEBUG
DEBUG=debug
if not netbox_token or len(netbox_token) == 0:
print("[bold][red]Netbox API token not defined. Add environment variable NETBOX_TOKEN or use --netbox-token command line option.[/red][/bold]")
exit()
if not netbox_host or len(netbox_host) == 0:
print("[bold][red]Netbox host not defined. Add environment variable NETBOX_HOST or use --netbox-host command line option.[/red][/bold]")
exit()
nb = pynetbox.api(f"https://{netbox_host}:{netbox_port}",token=netbox_token)
nb_session = requests.session()
nb_session.verify=netbox_ssl_verify
nb.http_session=nb_session
device_list=nb.dcim.devices.filter(tag=tag.lower())
testbed_testbed = {
"name": tag,
"credentials": {
"default": {
"username": testbed_user,
"password": testbed_pass
},
"enable": {
"password": testbed_enable
}
}
}
testbed_devices = {}
testbed_topology = {}
try:
for device in device_list:
device.full_details()
details = get_testbed_details(device)
testbed_devices[device.name] = {
"type": details["type"],
"os": details["os"],
"platform": details["platform"],
"connections": {
"cli": {
"protocol": "ssh",
"ip": re.sub("\/.*$", "", device.primary_ip4.address)
}
}
}
if device.config_context.get("pyats_custom"):
testbed_devices[device.name]["custom"] = device.config_context["pyats_custom"]
for interface in nb.dcim.interfaces.filter(device=device.name):
if interface.cable:
interface.connected_endpoint.device.full_details()
#if interface.connected_endpoint.device.device_type.manufacturer.slug == "cisco":
if device.name not in testbed_topology.keys():
testbed_topology[device.name] = { "interfaces": {} }
testbed_topology[device.name]["interfaces"][interface.name] = {
"link": f"cable-{interface.cable.id}",
"type": "ethernet"
}
if nb.ipam.ip_addresses.get(device=device.name,interface=interface.name):
testbed_topology[device.name]["interfaces"][interface.name]["ipv4"] = nb.ipam.ip_addresses.get(device=device.name,interface=interface.name).address
except Exception as e:
print("[bold][red]Error retrieving device list from Netbox[/red][/bold]")
print_traceback = input("Print traceback? [Y/N]: ")
if print_traceback == "Y":
console.print_exception(show_locals=True)
exit()
testbed = {
"testbed": testbed_testbed,
"devices": testbed_devices,
"topology": testbed_topology
}
testbed_yaml = yaml.dump(testbed)
print(testbed_yaml)
@click.group()
def cli():
"""A tool for generating a testbed yaml from Netbox inventory"""
pass
cli.add_command(get_devices_by_tag)
if __name__ == "__main__":
cli()
| jeremypng/netbox-to-pyats | netbox_to_testbed.py | netbox_to_testbed.py | py | 5,377 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "rich.console.Console",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "urllib3.disable_warnings",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib... |
10380546163 | #! /usr/bin/env python3
import rospy
import tf
def main():
rospy.init_node("robot_tf_broadcaster")
tf_br = tf.TransformBroadcaster()
rate = rospy.Rate(100)
while not rospy.is_shutdown():
translate_date = (0.1, 0.0, 0.2)
rotation_data = (0, 0, 0, 1)
tf_br.sendTransform(translate_date, rotation_data, rospy.Time.now(), "base_laser", "base_link")
rate.sleep()
if __name__ == "__main__":
main()
| Jieshoudaxue/ros_senior | learning_tf_homework/scripts/tf_broadcaster.py | tf_broadcaster.py | py | 430 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "rospy.init_node",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tf.TransformBroadcaster",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "rospy.Rate",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "rospy.is_shutdown",
... |
37338999785 | from concurrent import futures
import grpc
import servicos_pb2
import servicos_pb2_grpc
import threading
import fila
import banco
import time
import os
import config
import random
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
f1 = fila.Fila()
f2 = fila.Fila()
f3 = fila.Fila()
f4 = fila.Fila()
bd = banco.Banco()
class Requisicao(servicos_pb2_grpc.RequisicaoServicer):
def Create(self,request,context):
requisicao = "1 " + request.chave + " " + request.valor
#tratar responsabilidade
f1.insere(requisicao)
def Read(self, request, context):
requisicao = "2 " + request.chave
f1.insere(requisicao)
def Update(self, request, context):
requisicao = "3 " + request.chave + " " + request.valor
f1.insere(requisicao)
def Delete(self, request, context):
requisicao = "4 " + request.chave
f1.insere(requisicao)
#return servicos_pb2.Resultado(resposta="OK\n")
class Server(object):
def __init__(self):
self.antecessor = None
self.sucessor = None
self.cfg = config.Config()
self.host = self.cfg.getHost().strip("\n")
self.port = random.randint(int(self.cfg.getMinPort().strip("\n")), int(self.cfg.getMaxPort().strip("\n")))
print("host = " + self.host +"\nport= " + str(self.port))
def main(self):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
servicos_pb2_grpc.add_RequisicaoServicer_to_server(Requisicao(), server)
server.add_insecure_port(self.host+':'+str(self.port))
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
def duplica_thread(self):
while True:
while not f1.vazia():
try:
comando = f1.retira()
f2.insere(comando)
f3.insere((comando))
except:
pass
def log_thread(self):
while True:
while not f2.vazia():
pass #
def banco_thread(self):
while True:
while not f3.vazia():
try:
cm = f3.retira()
cm = str(cm)
cm = cm.split(' ',2)
ok = False
if int(cm[0]) == 1 :
if bd.create(int(cm[1]),cm[2]):
ok = True
msg = "OK"
elif int(cm[0]) == 2 :
read = bd.read(int(cm[1]))
if read:
msg = "Chave:" + str(cm[1]) +" Valor: " + read
ok = True
elif int(cm[0]) == 3 :
if bd.update(int(cm[1]),cm[2]):
msg = "OK"
ok = True
elif int(cm[0]) == 4 :
if bd.delete(int(cm[1])):
msg = "OK"
ok = True
if not ok:
msg = "NOK"
except:
pass
def responder(self,msg):
return servicos_pb2.Resultado(resposta=msg)
def run(self):
duplica = threading.Thread(target=self.duplica_thread, name="duplica",args=())
duplica.setDaemon(True)
duplica.start()
log = threading.Thread(target=self.log_thread, name="log",args=())
log.setDaemon(True)
log.start()
banco = threading.Thread(target=self.banco_thread, name="banco",args=())
banco.setDaemon(True)
banco.start()
self.main()
def run_server():
server = Server()
server.run()
if __name__ == '__main__':
run_server() | ruehara/SdPython | protos/server_grpc.py | server_grpc.py | py | 4,072 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fila.Fila",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "fila.Fila",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "fila.Fila",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "fila.Fila",
"line_number": 17,
... |
27405001850 | import numpy as np
import torch
import torch.nn as nn
import random
from collections import deque
from vicero.policy import Policy
from copy import deepcopy
from vicero.algorithms.common.neuralnetwork import NeuralNetwork, NetworkSpecification
# DQN (Deep Q Networks)
# DQN is an approximated variant of Q-learning
# Significant differences:
# - The table is replaced by a neural network
# - Experiences are stored in a replay buffer, which is used to train the network
# The update rule is similarly to Q-learning based on the Bellman equation
# In vicero, certain shared traits of different algorithms are pulled out
# and placed in the common module. Neural networks and replay buffers are
# among those. This allows for this module to be written more cleanly, with
# a more pure focus on the reinforcement learning.
class DQN:
def __init__(self, env, spec=None, alpha=1e-3, gamma=.95, epsilon_start=0.9, epsilon_end=1e-3, memory_length=2000, state_to_reward=None, render=True, qnet_path=None, qnet=None, plotter=None, caching_interval=1000, plot_durations=False):
self.plot_durations = plot_durations
# learning rate
self.alpha = alpha
# discount factor
self.gamma = gamma
# exploration rate
self.epsilon_start = epsilon_start
self.epsilon_end = epsilon_end
self.epsilon = self.epsilon_start
self.env = env
self.state_to_reward = state_to_reward
self.plotter = plotter
self.device = torch.device('cpu')
# the following 4 lines should be elegantly generalized
torch.set_default_tensor_type('torch.DoubleTensor')
optimizer = torch.optim.Adam
loss_fct = nn.MSELoss
self.n_actions = env.action_space.n
if qnet is not None:
self.qnet = qnet
elif spec is not None:
self.feature_size = env.observation_space.shape[0]
self.qnet = NeuralNetwork(self.feature_size, self.n_actions, spec).to(self.device)
else:
raise Exception('The qnet, qnet_path and spec argument cannot all be None!')
if qnet_path is not None:
self.qnet.load_state_dict(torch.load(qnet_path))
self.memory = deque(maxlen=memory_length)
self.optimizer = optimizer(self.qnet.parameters(), lr=self.alpha)
self.criterion = loss_fct()
self.render = render
self.state_visits = {}
self.history = []
self.maxq_history = []
self.maxq_temp = float('-inf')
self.loss_history = []
self.loss_temp = 0
self.loss_count = 0
self.cached_qnet = deepcopy(self.qnet)
self.caching_interval = caching_interval
self.total_iterations = 0
def train_episode(self, e, num_episodes, batch_size, training_iter=5000, completion_reward=None, verbose=False, plot=False, eps_decay=True):
self.epsilon = self.epsilon_start - (self.epsilon_start - self.epsilon_end) * (e / num_episodes)
state = self.env.reset()
state = torch.from_numpy(state).to(self.device)#torch.from_numpy(np.flip(state,axis=0).copy())
done = False
score = 0
progress = 0
self.maxq_temp = float('-inf')
duration = 0
for time in range(training_iter):
duration = time
if self.render: self.env.render()
if state in self.state_visits.keys():
self.state_visits[state] += 1
else:
self.state_visits[state] = 1
action = self.exploratory_action(state, record_maxq=True)
next_state, reward, done, _ = self.env.step(action)
self.total_iterations += 1
if self.state_to_reward:
reward = self.state_to_reward(next_state)
if completion_reward is not None and done:
reward = completion_reward
score += reward
next_state = torch.from_numpy(next_state).to(self.device)#np.flip(next_state,axis=0).copy()).to(self.device)
self.remember(state, action, reward, next_state, done)
state = next_state
if done: break
if self.total_iterations % self.caching_interval == 0:
self.cached_qnet = deepcopy(self.qnet)
if len(self.memory) > batch_size:
self.replay(batch_size, eps_decay)
self.history.append(duration if self.plot_durations else score)
self.maxq_history.append(self.maxq_temp)
if self.plotter is not None:
self.plotter(self.history)
if verbose:
print("episode: {}/{}, score: {:.2}, e: {:.2}, maxQ={:.2}, len(memory)={}".format(e, num_episodes, float(score), self.epsilon, self.maxq_temp, len(self.memory)))
if self.loss_count > 0:
self.loss_history.append(self.loss_temp / self.loss_count)
self.loss_temp = 0
self.loss_count = 0
def train(self, num_episodes, batch_size, training_iter=5000, completion_reward=None, verbose=False, plot=False, eps_decay=True):
for e in range(num_episodes):
self.train_episode(e, num_episodes, batch_size, training_iter, completion_reward, verbose, plot, eps_decay)
def replay(self, batch_size, eps_decay):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
state = state.to(self.device)
reward = torch.tensor(reward, dtype=torch.double, requires_grad=False)
target = reward
outputs = self.cached_qnet(next_state)
target = (reward + self.gamma * torch.max(outputs))
target_f = self.qnet(state)
target_f[action] = target
prediction = self.qnet(state)
loss = self.criterion(prediction, target_f)
self.loss_temp += float(loss)
self.loss_count += 1
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def exploratory_action(self, state, record_maxq=False):
if np.random.rand() <= self.epsilon:
return np.random.choice(range(self.n_actions))
outputs = self.qnet(state)
if record_maxq:
self.maxq_temp = max([self.maxq_temp] + list(outputs))
return outputs.max(0)[1].numpy()
def greedy_action(self, state):
outputs = self.qnet(state)
return outputs.max(0)[1].numpy()
def action_distribution(self, state):
state = torch.from_numpy(state).to(self.device)
out = self.qnet(state)
return nn.Softmax(dim=0)(out)
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def save(self, name):
torch.save(self.qnet.state_dict(), name)
def copy_target_policy(self, verbose=False):
cpy = deepcopy(self.qnet)
device = self.device
def policy(state):
state = torch.from_numpy(state).to(device)
#state = torch.from_numpy(np.flip(state,axis=0).copy())
#state = state.to(device)
distribution = cpy(state)
if verbose:
print('state:', state)
print('Q(state):', distribution)
return distribution.max(0)[1].numpy()
return Policy(policy) | CogitoNTNU/vicero | vicero/algorithms/deepqlearning.py | deepqlearning.py | py | 7,626 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.set_default_tensor_type",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.... |
1707036596 | """Team and Tessera models
Revision ID: 20c506662a93
Revises: 089ccde49d20
Create Date: 2021-10-06 13:50:48.166391
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "20c506662a93"
down_revision = "089ccde49d20"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"team",
sa.Column("id", sa.CHAR(length=36), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_team_name"), "team", ["name"], unique=True)
op.create_table(
"tessera",
sa.Column("id", sa.CHAR(length=36), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("asana_id", sa.String(), nullable=False),
sa.Column("github_handle", sa.String(), nullable=False),
sa.Column("slack_id", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_tessera_asana_id"), "tessera", ["asana_id"], unique=False
)
op.create_index(
op.f("ix_tessera_github_handle"),
"tessera",
["github_handle"],
unique=False,
)
op.create_index(op.f("ix_tessera_name"), "tessera", ["name"], unique=False)
op.create_index(
op.f("ix_tessera_slack_id"), "tessera", ["slack_id"], unique=False
)
op.create_table(
"team_project_association",
sa.Column("team_id", sa.CHAR(length=36), nullable=True),
sa.Column("project_id", sa.CHAR(length=36), nullable=True),
sa.ForeignKeyConstraint(
["project_id"],
["asana_project.id"],
name="team_project_project_fk",
),
sa.ForeignKeyConstraint(
["team_id"], ["team.id"], name="team_project_team_fk"
),
)
op.create_table(
"team_tessera_association",
sa.Column("team_id", sa.CHAR(length=36), nullable=True),
sa.Column("tessera_id", sa.CHAR(length=36), nullable=True),
sa.ForeignKeyConstraint(
["team_id"], ["team.id"], name="team_tessera_team_fk"
),
sa.ForeignKeyConstraint(
["tessera_id"], ["tessera.id"], name="team_tessera_tessera_fk"
),
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("team_tessera_association")
op.drop_table("team_project_association")
op.drop_index(op.f("ix_tessera_slack_id"), table_name="tessera")
op.drop_index(op.f("ix_tessera_name"), table_name="tessera")
op.drop_index(op.f("ix_tessera_github_handle"), table_name="tessera")
op.drop_index(op.f("ix_tessera_asana_id"), table_name="tessera")
op.drop_table("tessera")
op.drop_index(op.f("ix_team_name"), table_name="team")
op.drop_table("team")
# ### end Alembic commands ###
| tesselo/giges | migrations/versions/202110061350_20c506662a93_team_and_tessera_models.py | 202110061350_20c506662a93_team_and_tessera_models.py | py | 3,004 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.CHAR"... |
43184286389 | # 串接串接。擷取公開資料
import urllib.request as request #1.網路連線模組
import json #2.為了解讀json格式所以載入json模組
src="https://padax.github.io/taipei-day-trip-resources/taipei-attractions-assignment.json"
with request.urlopen(src) as response: #3.將網址中的資料整塊讀取下來
data=json.load(response) #4.利用json模組 處理 json 資料格式
# print(int(data["result"]["results"][0]["xpostDate"][:4])>2015)
##原作
# scopeList=data["result"]["results"]
# # print(scopeList)
# for eachScope in scopeList:
# # print(eachScope)
# if int(eachScope["xpostDate"][:4])>=2015:
# allLink=eachScope["file"]
# jpgPos=allLink.lower().find(".jpg")
# # print(allLink[:jpgPos+4])
# jpgLink=allLink[:jpgPos+4]
# print(eachScope["stitle"]+","+eachScope["address"][5:8]+","+eachScope["longitude"]+","+eachScope["latitude"]+","+jpgLink)
##寫入
scopeList=data["result"]["results"]
with open("data.csv","w",encoding="utf-8-sig") as file:
for eachScope in scopeList:
# print(eachScope)
if int(eachScope["xpostDate"][:4])>=2015:
allLink=eachScope["file"]
jpgPos=allLink.lower().find(".jpg")
# print(allLink[:jpgPos+4])
jpgLink=allLink[:jpgPos+4]
# print(eachScope["stitle"]+","+eachScope["address"][5:8]+","+eachScope["longitude"]+","+eachScope["latitude"]+","+jpgLink)
file.write(eachScope["stitle"]+","+eachScope["address"][5:8]+","+eachScope["longitude"]+","+eachScope["latitude"]+","+jpgLink+"\n")
# for eachCompany in clist:
# file.write(eachCompany["公司名稱"]+"\n") | TaiLinChung/week-3.github.io | week03/w0301.py | w0301.py | py | 1,742 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 6,
"usage_type": "call"
}
] |
31588169742 | from selenium import webdriver
from bs4 import BeautifulSoup as bs
import pandas as pd
import requests, re
import os
import time
import pandas as pd
page = 15
name = '北京朝阳大悦城'
info_table = pd.DataFrame(columns=['昵称', '口味', '环境', '服务', '时间', '评论'])
css_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}
font_size = 14
start_y = 23
def get_font_dict_by_offset(url):
global css_headers, start_y, font_size
"""
获取坐标偏移的文字字典, 会有最少两种形式的svg文件(目前只遇到两种)
"""
res = requests.get(url, headers=css_headers)
html = res.text
font_dict = {}
y_list = re.findall(r'd="M0 (\d+?) ', html)
if y_list:
font_list = re.findall(r'<textPath .*?>(.*?)<', html)
for i, string in enumerate(font_list):
y_offset = start_y - int(y_list[i])
sub_font_dict = {}
for j, font in enumerate(string):
x_offset = -j * font_size
sub_font_dict[x_offset] = font
font_dict[y_offset] = sub_font_dict
else:
font_list = re.findall(r'<text.*?y="(.*?)">(.*?)<', html)
for y, string in font_list:
y_offset = start_y - int(y)
sub_font_dict = {}
for j, font in enumerate(string):
x_offset = -j * font_size
sub_font_dict[x_offset] = font
font_dict[y_offset] = sub_font_dict
# print(font_dict)
return font_dict
def get_css_info(url):
global css_headers
res = requests.get(url, headers=css_headers)
html = res.text
background_image_link = re.findall(r'background-image:.*?\((.*?svg)\)', html)
# print(background_image_link)
background_image_link_list = []
for i in background_image_link:
url = 'http:' + i
background_image_link_list.append(url)
# print(background_image_link_list)
html = re.sub(r'span.*?\}', '', html)
group_offset_list = re.findall(r'\.([a-zA-Z0-9]{5,6}).*?round:(.*?)px (.*?)px;', html)
'''
多个偏移字典,合并在一起;;;
'''
font_dict_by_offset_list = {}
for i in background_image_link_list:
font_dict_by_offset_list.update(get_font_dict_by_offset(i))
font_dict_by_offset = font_dict_by_offset_list
# print(font_dict_by_offset)
font_dict = {}
for class_name, x_offset, y_offset in group_offset_list:
x_offset = x_offset.replace('.0', '')
y_offset = y_offset.replace('.0', '')
try:
font_dict[class_name] = font_dict_by_offset[int(y_offset)][int(x_offset)]
except:
font_dict[class_name] = ''
return font_dict
def getdata():
pre = '//*[@id="review-list"]/div[2]/div[3]/div[3]/div[3]/ul/li['
# a = driver.find_element_by_xpath('//*[@id="review-list"]/div[2]/div[3]/div[3]/div[3]/ul/li[1]/div/div[4]')
# print(a.text)
thelist = []
for i in range(1, 16):
alist = []
name_xpath = pre + str(i) + ']/div/div[1]/a'
kouwei_xpath = pre + str(i) + ']/div/div[2]/span[2]/span[1]'
huanjing_xpath = pre + str(i) + ']/div/div[2]/span[2]/span[2]'
fuwu_xpath = pre + str(i) + ']/div/div[2]/span[2]/span[3]'
time_xpath = pre + str(i) + ']/div/div[7]/span[1]'
name = driver.find_element_by_xpath(name_xpath).text
time.sleep(0.5)
try:
kouwei = driver.find_element_by_xpath(kouwei_xpath).text
except:
try:
kouwei_xpath = pre + str(i) + ']/div/div[3]/span[2]/span[1]'
kouwei = driver.find_element_by_xpath(kouwei_xpath).text
except:
kouwei = 'none'
time.sleep(0.5)
try:
huanjing = driver.find_element_by_xpath(huanjing_xpath).text
except:
try:
huanjing_xpath = pre + str(i) + ']/div/div[3]/span[2]/span[2]'
huanjing = driver.find_element_by_xpath(huanjing_xpath).text
except:
huanjing = 'none'
time.sleep(0.5)
try:
fuwu = driver.find_element_by_xpath(fuwu_xpath).text
except:
try:
fuwu_xpath = pre + str(i) + ']/div/div[3]/span[2]/span[3]'
fuwu = driver.find_element_by_xpath(fuwu_xpath).text
except:
fuwu = 'none'
time.sleep(0.5)
try:
date = driver.find_element_by_xpath(time_xpath).text
except:
try:
time_xpath = pre + str(i) + ']/div/div[6]/span[1]'
date = driver.find_element_by_xpath(time_xpath).text
except:
date = 'none'
time.sleep(0.5)
alist.append(name)
alist.append(str(kouwei).replace('口味:', ''))
alist.append(str(huanjing).replace('环境:', ''))
alist.append(str(fuwu).replace('服务:', ''))
alist.append(date)
# info_table.loc[i] = alist
thelist.append(alist)
time.sleep(1)
return thelist
def next_page():
# driver.find_element_by_xpath('//*[@id="review-list"]/div[2]/div[3]/div[3]/div[4]/div/a[10]').click()
driver.find_element_by_class_name('NextPage').click()
# 展开评论
def zhankai():
pre = '//*[@id="review-list"]/div[2]/div[3]/div[3]/div[3]/ul/li['
for i in range(1, 16):
try:
zhankai_xpath = pre + str(i) + ']/div/div[3]/div/a'
driver.find_element_by_xpath(zhankai_xpath).click()
except:
try:
zhankai_xpath = pre + str(i) + ']/div/div[4]/div/a'
driver.find_element_by_xpath(zhankai_xpath).click()
except:
print('没能成功展开')
time.sleep(0.5)
# 滑动滑条,加载全部信息
def drop_down():
for x in range(1, 15, 2):
time.sleep(0.5)# 防止被预测到反爬
h = x/14
js = 'document.documentElement.scrollTop = document.documentElement.scrollHeight * %f' % h
driver.execute_script(js)
h = 0.1
js = 'document.documentElement.scrollTop = document.documentElement.scrollHeight * %f' % h
driver.execute_script(js)
def login():
driver.find_element_by_xpath('//*[@id="top-nav"]/div/div[2]/span[1]/a[1]').click()
time.sleep(15) # 登录
def get_comment():
a = driver.page_source
html_tree = bs(a, 'lxml')
name_texts = html_tree.find_all("a", class_ = "name") # 找到所有的名字
item_texts = html_tree.find_all("span", class_ = "item") # 找到所有的评分
item_list = []
for each in item_texts:
if '人均' not in str(each.get_text()):
item_list.append(str(each.get_text()).replace('口味:', '').replace('环境:', '').replace('服务:', '').replace('\n', '').replace(' ', ''))
date_texts = html_tree.find_all("span", class_ = "time") # 找到所有的时间
alist = []
comment_texts = html_tree.find_all("div", class_="review-words") # 找到所有的评论
comment_list = []
for comment_text in comment_texts:
comment_text = str(comment_text)
class_set = []
# print(html_text)
for span in re.findall(r'<svgmtsi class="([a-zA-Z0-9]{5,6})"></svgmtsi>', comment_text):
class_set.append(span)
for class_name in class_set:
try:
comment_text = re.sub(r'<svgmtsi class="%s"></svgmtsi>' % class_name, font_dict[class_name], comment_text)
# print('{}已替换完毕_______________________________'.format(font_dict[class_name]))
except:
comment_text = re.sub(r'<svgmtsi class="%s"></svgmtsi>' % class_name, '', comment_text)
print('替换失败…………………………………………………………………………&&&&&&&&&&&&&&&&&&&&&&&&')
b = str(re.findall('[\u4e00-\u9fa5]+', comment_text)).replace('收起评论', '').replace('文字', '').replace('[', '').replace(']', '').replace('\'', '')
comment_list.append(b)
for i in range(1, 16):
clist = []
clist.append(str(name_texts[i].get_text()).replace(' ', '').replace('\n', ''))
clist.append(item_list[i * 3])
clist.append(item_list[i * 3 + 1])
clist.append(item_list[i * 3 + 2])
clist.append(str(date_texts[i - 1].get_text()).replace(' ', '').replace('\n', '')[:10])
clist.append(comment_list[i - 1])
alist.append(clist)
print(alist)
return alist
if __name__ == '__main__':
driver = webdriver.Chrome(r'D:\360极速浏览器下载\chromedriver_win32\chromedriver.exe')
driver.get('http://www.dianping.com/shop/6026269/review_all')
time.sleep(3)
login()
# 获取密码表
css_link = driver.find_element_by_xpath('/html/head/link[4]').get_attribute('href')
font_dict = get_css_info(css_link)
time.sleep(2)
# drop_down()
for i in range(0, page):
zhankai()
alist = get_comment()
for j in range(0, 15):
try:
info_table.loc[i * 15 + j] = alist[j]
print(' 已完成' + str(i * 15 + j) + '个')
except:
info_table.to_csv('西贝筱面村' + name + '.csv', encoding='gbk')
print('已完成' + str(i + 1) + '页')
next_page()
# print(info_table)
info_table.to_csv('西贝筱面村' + name + '.csv', encoding='gbk') | moyuweiqing/dazhongdianping | by-selenium.py | by-selenium.py | py | 9,781 | python | en | code | 35 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_numbe... |
45470229472 | import queue
import sys
import threading
import time
import os
import cv2
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QDialog, QApplication, QMessageBox
from PyQt5.uic import loadUi
import serial
import csv
from serialThreadFile import serialThreadClass
class FrambotUI(QDialog):
def __init__(self):
super(FrambotUI, self).__init__()
loadUi('FrambotUI.ui', self)
self.index= 0
self.image = None
self.image1 = None
self.writer = None
self.processedImage = None
self.path = None
self.ImageNumber = 0
self.Enabled = False
self.capture = None
self.capture1 = None
self.Save1 = False
self.out = None
self.out1 = None
self.port = 'COM4'
self.port_2 = 'COM6'
self.isPort1connected = False
self.isPort2connected = False
self.pushButton_Up.clicked.connect(self.advance)
self.pushButton_Down.clicked.connect(self.retreat)
self.pushButton_Left.clicked.connect(self.turn_left)
self.pushButton_Right.clicked.connect(self.turn_right)
self.feedstartButton.clicked.connect(self.feed_start)
self.feedendButton.clicked.connect(self.feed_end)
self.feedstartButton_2.clicked.connect(self.feed_start_2)
self.feedendButton_2.clicked.connect(self.feed_end_2)
self.startButton.clicked.connect(self.start_webcam) ## add function ID
self.stopButton.clicked.connect(self.stop_webcam)
self.indexButton.clicked.connect(self.index_up)
self.indexDownButton.clicked.connect(self.index_down)
self.saveButton.clicked.connect(self.save_video)
self.sendButton.clicked.connect(self.send_msg)
self.sendButton_2.clicked.connect(self.send_msg_2)
self.comportBox.activated.connect(self.update_comport)
self.comportBox_2.activated.connect(self.update_comport_2)
self.mySerial = serialThreadClass()
self.mySerial_2 = serialThreadClass()
self.mySerial.msg.connect(self.textEdit.append)
self.mySerial_2.msg.connect(self.textEdit_2.append)
def advance(self):
"""
Sending Msg to Arudino for advance
"""
msg = 'K11 D2'
try:
self.mySerial.sendSerial(msg)
except serial.SerialException:
print('Port1 - Failed to send a message ')
def retreat(self):
msg = 'K13 D2'
try:
self.mySerial.sendSerial(msg)
except serial.SerialException:
print('Port1 - Failed to send a message ')
def turn_left(self):
msg = 'K14 D2'
try:
self.mySerial.sendSerial(msg)
except serial.SerialException:
print('Port1 - Failed to send a message ')
def turn_right(self):
msg = 'K15 D2'
try:
self.mySerial.sendSerial(msg)
except serial.SerialException:
print('Port1 - Failed to send a message ')
def feed_end_2(self):
self.mySerial_2.terminate()
def feed_start_2(self):
try:
self.mySerial_2.open()
self.mySerial_2.start() # run thread
self.isPort2connected = True
except serial.SerialException:
print('Failed to open Comport.')
def update_comport_2(self, index):
self.port_2 = self.comportBox_2.currentText()
self.mySerial_2.updateport(self.port_2)
print('Current port for comport2 is :' + self.comportBox.currentText())
def send_msg_2(self):
try:
msg_2 = self.lineEdit_2.text()
# self.setFocus() # For keyboard control
self.mySerial_2.sendSerial(msg_2)
except serial.SerialException:
print('Port2 - Failed to send a message ')
def feed_end(self):
self.mySerial.terminate()
def feed_start(self):
try:
self.mySerial.open()
self.mySerial.start() # run thread
self.isPort1connected = True
except serial.SerialException:
print('Failed to open Comport.')
def update_comport(self, index):
self.port = self.comportBox.currentText()
self.mySerial.updateport(self.port)
print('Current port for comport1 is :' + self.comportBox.currentText())
def send_msg(self):
try:
msg = self.lineEdit.text()
# self.setFocus() # For keyboard control
self.mySerial.sendSerial(msg)
except serial.SerialException:
print('Port1 - Failed to send a message ')
def save_video(self):
# Save Data Button
if self.Save1:
self.Save1 = False
self.out.release()
self.out1.release()
print('Stop Saving')
else:
##Create video file name
localtime = time.localtime(time.time())
if localtime.tm_min < 10:
minuteWithZero = '0' + str(localtime.tm_min)
else:
minuteWithZero = str(localtime.tm_min)
video1 = 'FrontView_' + str(localtime.tm_hour) + minuteWithZero + str(localtime.tm_sec) + '.wma'
video2 = 'TopView_' + str(localtime.tm_hour) + minuteWithZero + str(localtime.tm_sec) + '.wma'
##Create path, directory for saving
directory = str(localtime.tm_hour) + str(localtime.tm_min) + str(localtime.tm_sec)
self.path = os.path.join(os.getcwd(), directory)
video1 = os.path.join(self.path, video1)
video2 = os.path.join(self.path, video2)
csvName = os.path.join(self.path, 'Data.csv')
if not os.path.exists(self.path):
os.makedirs(self.path)
##Write Header for CSV file
myFile = open(csvName, 'w+', newline='')
header = ['Index','FrontImageName', 'TopImageName', 'FrontBack1', 'FrontBack2', 'LeftRight1', 'LeftRigh2',
'ax(ms2)', 'ay(ms2)', 'az(ms2)','gx(rad/s)','gy(rad/s)','gz(rad/s)',
'mx(nT)','my(nT)','mz(nT)', 'latitude' ,'longitude','altitude','Heading(IMU(rad))','Heading(GPS)']
with myFile:
self.writer = csv.writer(myFile)
self.writer.writerow(header)
try:
self.out = cv2.VideoWriter(video1, cv2.VideoWriter_fourcc('W', 'M', 'V', '1'), 30, (640, 480))
self.out1 = cv2.VideoWriter(video2, cv2.VideoWriter_fourcc('W', 'M', 'V', '1'), 30, (640, 480))
print('Video Recorders have been created')
# preparing saving. The image data are saved in update_frame()
self.Save1 = True
except cv2.error:
print('Failed to create Camera has not been initiated')
def index_up(self):
"""
update index
used in go-stop-go testing
used in initial soil sensor testing
:return:
"""
self.index = self.index + 1
self.indexButton.setText(str(self.index))
def index_down(self):
"""
decrease index
used in go-stop-go testing
used in initial soil sensor testing
:return:
"""
self.index = self.index - 1
self.indexButton.setText(str(self.index))
def start_webcam(self):
self.capture = cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.capture1 = cv2.VideoCapture(1)
self.capture1.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
self.capture1.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.timer = QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(200) #call update_frame function every 200ms 5hz
def update_frame(self):
# 1. Update frame for both cameras
# 2. Capture images when required
# 3. Record videos when required
try:
ret, self.image = self.capture.read()
except cv2.error:
print('Failed to capture images from Camera0')
self.image = cv2.flip(self.image, 1)
self.disply_image(self.image, 1)
try:
ret1, self.image1 = self.capture1.read()
except cv2.error:
print('Failed to capture images from Camera1')
self.image1 = cv2.flip(self.image1, 1)
self.disply_image(self.image1, 2)
if self.Save1:
a = self.mySerial.getmsgstr()
b = a[2:-5] #Remove b' at the Front and /r/n at the End
msg_list=b.split(',')
threshold = (int(msg_list[0]) + int(msg_list[1])) / 2
if threshold> 140.0: ##save image and data only robot is moving forward.
self.ImageNumber = self.ImageNumber + 1 #number of image captured
front_image_name = 'FrontImage' + str(self.ImageNumber)
top_image_name = 'TopImage' + str(self.ImageNumber)
front_image_path = os.path.join(self.path, front_image_name) + '.png'
top_image_path = os.path.join(self.path, top_image_name) + '.png'
csv_out = str(self.index) + ',' + front_image_name + ',' + top_image_name + ',' + b
my_list = csv_out.split(',') # Imagename1 Imagename2 L/R1 L/R2 F/B1 F/B2 Roll Pitch Heading
csv_name = os.path.join(self.path, 'Data.csv')
myFile = open(csv_name, 'a+', newline='')
with myFile:
self.writer = csv.writer(myFile)
self.writer.writerow(my_list)
cv2.imwrite(front_image_path, self.image) #save png Image
cv2.imwrite(top_image_path, self.image1) #save png Image
#Save Video
self.out.write(self.image)
self.out1.write(self.image1)
def stop_webcam(self):
self.timer.stop()
def disply_image(self, img, window=1):
qformat = QImage.Format_Indexed8
if len(img.shape) == 3: # [0]=rows, [1]=cols [2]=channels
if (img.shape[2]) == 4:
qformat = QImage.Format_RGBA8888
print('RGBA')
else:
qformat = QImage.Format_RGB888
# print('RGB')
outImage = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat)
# BGR -> RGB
outImage = outImage.rgbSwapped()
if window == 1:
self.imgLabel.setPixmap(QPixmap.fromImage(outImage))
self.imgLabel.setScaledContents(True)
if window == 2:
self.imgLabel2.setPixmap(QPixmap.fromImage(outImage))
self.imgLabel2.setScaledContents(True)
app = QApplication(sys.argv)
window = FrambotUI()
window.setWindowTitle('Frambot UI test')
# window.setGeometry(100,100,400,200)
window.show()
window.setFocus()
sys.exit(app.exec_())
| jaehyunShinRmit/OpencvTest | main2.py | main2.py | py | 11,014 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "serialThreadFile.serialThreadClass",
"line_number": 57,
"usage_type": "call"
},
{
"api_n... |
44303046716 | import gym
import numpy as np
from keras_ppo_icm import Agent
if __name__ == '__main__':
#env_name = 'MountainCar-v0'
env_name = 'CartPole-v1'
env = gym.make(env_name)
agent = Agent(
input_shape=env.observation_space.shape,
action_num=env.action_space.n,
alpha=1e-3,
beta=5e-3,
gamma=0.99,
eta=30,
icm_alpha=1e-3,
icm_beta=0.2,
entropy_coef=.1,
entropy_decay=0.999)
from tensorflow.keras.utils import plot_model
plot_model(agent.actor, to_file='actor_model.png', show_shapes=True)
plot_model(agent.critic, to_file='critic_model.png', show_shapes=True)
plot_model(agent.icm, to_file='icm_model.png', show_shapes=True)
episode_num = 0
solved = False
total_steps = 0
total_rewards = []
while not solved:
state = env.reset()
done = False
running_reward = 0
train_step = 0
while not done:
state, reward, done, _ = agent.step(env, state)
running_reward += reward
train_step += 1
#env.render()
actor_loss, critic_loss, icm_loss = agent.learn()
total_rewards.append(running_reward)
total_rewards = total_rewards[-100:]
mean_reward = np.mean(total_rewards)
print(
f'Episode: {episode_num: 6d} ({train_step: 3d}), '
f'Mean Reward: {mean_reward: 4.0f} ({running_reward: 4.0f}), '
f'Actor Loss: {actor_loss:0.5f}, '
f'Critic Loss: {critic_loss:0.5f}, '
f'ICM Loss: {icm_loss:0.5f}'
)
if episode_num % 100 == 0:
agent.save_model(
f'{env_name.lower()}_actor.h5',
f'{env_name.lower()}_critic.h5',
f'{env_name.lower()}_icm.h5'
)
if mean_reward >= 500:
solved = True
agent.save_model(
f'{env_name.lower()}_actor.h5',
f'{env_name.lower()}_critic.h5',
f'{env_name.lower()}_icm.h5'
)
print(f'Solved after {episode_num} ({total_steps} steps)')
total_steps += train_step
episode_num += 1
| Techno263/rl-exploration | keras_ppo_icm/keras_ppo_learn.py | keras_ppo_learn.py | py | 2,196 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gym.make",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keras_ppo_icm.Agent",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.utils.plot_model",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tensorf... |
1959000648 | # -*- coding: utf-8 -*-
from googleapiclient.http import MediaFileUpload
import pandas as pd
import os
from googleapiclient.discovery import build
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from oauth2client import file, client, tools
from apiclient import errors, discovery
from httplib2 import Http
from typing import *
from pprint import pprint
import traceback
import string
import getpass
# HOME_PATH = '/ldap_home/{}/chat_keywords_screening/keyword-screening/'.format(getpass.getuser())
HOME_PATH = './'
TOKEN_PATH = HOME_PATH + 'token.json'
CREDENTIAL_PATH = HOME_PATH + 'credentials.json'
SCOPES=['https://www.googleapis.com/auth/spreadsheets','https://mail.google.com/','https://www.googleapis.com/auth/drive']
# assert os.path.exists(TOKEN_PATH), "could not find %s" % (TOKEN_PATH)
assert os.path.exists(CREDENTIAL_PATH), "could not find %s" % (CREDENTIAL_PATH)
class GService:
sheet_service = None
mail_service = None
drive_service = None
def __init__(self) -> None:
"""
Service Provider Object for Google Sheet, Drive, and Mail
"""
creds = Credentials.from_authorized_user_file(TOKEN_PATH, SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(CREDENTIAL_PATH, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(TOKEN_PATH, 'w') as token:
token.write(creds.to_json())
self.sheet_service = build('sheets', 'v4', credentials=creds)
self.mail_service = build('gmail', 'v1', credentials=creds)
self.drive_service = build('drive', 'v3', credentials=creds)
def add_dropdown_validation(
self,
gsheet_id: str,
sheet_id: int,
start_row: int,
end_row: int,
start_col: int,
end_col: int,
values: Iterable,
input_msg: str = '',
strict: bool = False
) -> dict:
"""
Will apply dropdown style validation on cells bounded by starting & ending rows & columns. \n
Row & column indexes start from 0 (cell A1 is row 0 column 0). \n
`start_row` and `start_col` are inclusive. \n
`end_row` and `end_col` are exclusive. \n
Example
-------
`start_row` = 0 \n
`end_row` = 5 \n
`start_col` = 0 \n
`end_col` = 2 \n
Validation will be applied on cell range A1:B5
"""
service = self.sheet_service
spreadsheets = service.spreadsheets()
request_body = {
'requests': [{
'setDataValidation': {
'range': {
'sheetId': sheet_id,
'startRowIndex': start_row,
'endRowIndex': end_row,
'startColumnIndex': start_col,
'endColumnIndex': end_col
},
'rule': {
'condition': {
'type': 'ONE_OF_LIST',
'values':[{'userEnteredValue':v} for v in values]
# 'values': [
# {'userEnteredValue': 'Banned'},
# {'userEnteredValue': 'Normal'}
# ]
},
'inputMessage': input_msg,
'showCustomUi': True,
'strict': strict
},
}
}]
}
response = spreadsheets.batchUpdate(
spreadsheetId=gsheet_id,
body=request_body
).execute()
pprint(response)
return response
def get_sheet_id(self, gsheet_id: str, sheet_name: str) -> int:
service = self.sheet_service
sheet_metadata = service.spreadsheets().get(spreadsheetId=gsheet_id).execute()
properties = sheet_metadata.get('sheets')
id: int = [p.get('properties').get('sheetId') for p in properties if p.get('properties').get('title') == sheet_name][0]
return id
def add_sheet(self, gsheet_id: str, name: str, row_cnt: int = 100, col_cnt: int = 10) -> dict:
service = self.sheet_service
spreadsheets = service.spreadsheets()
request_body = {
'requests': [{
'addSheet': {
'properties': {
'title': name,
'gridProperties': {
'columnCount': col_cnt,
'rowCount': row_cnt
}
}
}
}]
}
response = spreadsheets.batchUpdate(
spreadsheetId=gsheet_id,
body=request_body
).execute()
pprint(response)
return response
def delete_sheet(self, gsheet_id: str, sheet_id: int) -> dict:
service = self.sheet_service
spreadsheets = service.spreadsheets()
request_body = {
'requests': [{
'deleteSheet': {
'sheetId': sheet_id
}
}]
}
response = spreadsheets.batchUpdate(
spreadsheetId=gsheet_id,
body=request_body
).execute()
pprint(response)
return response
def get_sheet_names(self, gsheet_id: str) -> List[str]:
service = self.sheet_service
sheet_metadata = service.spreadsheets().get(spreadsheetId=gsheet_id).execute()
properties = sheet_metadata.get('sheets')
titles = [p.get('properties').get('title') for p in properties]
return titles
def read_google_sheet(self, gsheet_id: str, cell_range: str) -> pd.DataFrame:
service = self.sheet_service
result = service.spreadsheets().values() \
.get(spreadsheetId=gsheet_id, range=cell_range) \
.execute() \
.get('values', ())
try:
header, rows = result[0], result[1:]
numColumn = len(header)
except IndexError as e:
traceback.print_exc()
print('Error opening sheet_id {} cell_range {}'.format(gsheet_id,cell_range))
return None
series = list()
for row in rows:
# fill the end of the list with enough data
row = row + ([None] * (numColumn - len(row)))
series.append(row)
return pd.DataFrame(series, columns=header)
def write_google_sheet(self, gsheet_id: str, cell_range: str, values: List[List[Union[str,int]]], input_option: str = 'USER_ENTERED') -> dict:
"""
`input_option` -> 'RAW' or 'USER_ENTERED'
USER_ENTERED -> gsheet takes the cell values as if typed in by a human (e.g. '2021-11-01' will be auto converted to date)
RAW -> gsheet takes the cell values directly (e.g. '2021-11-01' will remain as string)
"""
body = {'values':values}
service = self.sheet_service
result = service.spreadsheets().values() \
.update(spreadsheetId=gsheet_id, range=cell_range, valueInputOption=input_option, body=body) \
.execute()
print('{0} cells updated'.format(result.get('updatedCells')))
return result
def clear_google_sheet(self, gsheet_id: str, cell_range: str) -> dict:
service = self.sheet_service
request = service.spreadsheets().values().clear(spreadsheetId=gsheet_id, range=cell_range)
response = request.execute()
pprint(response)
return response
def append_google_sheet(self, gsheet_id: str, cell_range: str, values: List[List[Union[str,int]]], insert_new_row: bool=False, input_option: str = 'USER_ENTERED') -> dict:
body = {'values':values}
service = self.sheet_service
if insert_new_row:
insert_data_option = 'INSERT_ROWS'
else:
insert_data_option = 'OVERWRITE'
result = service.spreadsheets().values() \
.append(spreadsheetId=gsheet_id, range=cell_range, insertDataOption=insert_data_option, valueInputOption=input_option, body=body) \
.execute()
pprint(result)
return result
def append_column(self, gsheet_id: str, sheet_id: int, n_cols: int) -> dict:
service = self.sheet_service
body = {
"requests": [
{
"appendDimension": {
"sheetId": sheet_id,
"dimension": "COLUMNS",
"length": n_cols
}
}
]
}
result = service.spreadsheets().batchUpdate(spreadsheetId=gsheet_id, body=body).execute()
pprint(result)
return result
def append_row(self, gsheet_id: str, sheet_id: int, n_rows: int) -> dict:
service = self.sheet_service
body = {
"requests": [
{
"appendDimension": {
"sheetId": sheet_id,
"dimension": "ROWS",
"length": n_rows
}
}
]
}
result = service.spreadsheets().batchUpdate(spreadsheetId=gsheet_id, body=body).execute()
pprint(result)
return result
def delete_column(self, gsheet_id: str, sheet_id: int, start_col: int, end_col: int) -> dict:
"""
Row & column indexes start from 0 (cell A1 is row 0 column 0). \n
`start_col` is inclusive. \n
`end_col` is exclusive. \n
Example
-------
`start_col` = 0 \n
`end_col` = 3 \n
Columns A-C will be deleted
"""
service = self.sheet_service
body = {
"requests": [
{
"deleteDimension": {
"range": {
"sheetId": sheet_id,
"dimension": "COLUMNS",
"startIndex": start_col,
"endIndex": end_col
}
}
}
],
}
result = service.spreadsheets().batchUpdate(spreadsheetId=gsheet_id, body=body).execute()
pprint(result)
return result
def delete_row(self, gsheet_id: str, sheet_id: int, start_row: int, end_row: int) -> dict:
"""
Row & column indexes start from 0 (cell A1 is row 0 column 0). \n
`start_row` is inclusive. \n
`end_row` is exclusive. \n
Example
-------
`start_row` = 0 \n
`end_row` = 5 \n
Rows 1-5 will be deleted
"""
service = self.sheet_service
body = {
"requests": [
{
"deleteDimension": {
"range": {
"sheetId": sheet_id,
"dimension": "ROWS",
"startIndex": start_row,
"endIndex": end_row
}
}
}
],
}
result = service.spreadsheets().batchUpdate(spreadsheetId=gsheet_id, body=body).execute()
pprint(result)
return result
def get_sheet_dimension(self, gsheet_id: str, sheet_name: str) -> Tuple[int, int]:
"""return -> tuple(row_count, column_count)"""
service = self.sheet_service
sheet_metadata = service.spreadsheets().get(spreadsheetId=gsheet_id).execute()
properties = sheet_metadata.get('sheets')
grid_properties = [p.get('properties').get('gridProperties') for p in properties if p.get('properties').get('title') == sheet_name][0]
row_count, column_count = grid_properties.get('rowCount'), grid_properties.get('columnCount')
return row_count, column_count
def create_folder(self, folder_name: str, parent_folder_id: str = None) -> dict:
service = self.drive_service
if parent_folder_id is None:
metadata = {
'name': folder_name,
'mimeType': 'application/vnd.google-apps.folder'
}
else:
metadata = {
'name': folder_name,
'mimeType': 'application/vnd.google-apps.folder',
'parents': [parent_folder_id]
}
folder = service.files().create(
body=metadata,
fields='id'
).execute()
print('Folder ID:', folder.get('id'))
return folder
def upload_file(self, local_file_path: str, gdrive_file_name: str, parent_gdrive_folder_id: str = None, file_type: str = None, google_docs_type: str = None) -> dict:
service = self.drive_service
metadata = {'name': gdrive_file_name}
if parent_gdrive_folder_id is not None:
metadata.update({'parents': [parent_gdrive_folder_id]})
if google_docs_type is not None:
metadata.update({'mimeType': google_docs_type})
media = MediaFileUpload(local_file_path, mimetype=file_type)
gdrive_file = service.files().create(
body=metadata,
media_body=media,
fields='id'
).execute()
print('File ID:', gdrive_file.get('id'))
return gdrive_file
def create_file(self, gdrive_file_name: str, parent_gdrive_folder_id: str, google_docs_type: str = None) -> dict:
service = self.drive_service
metadata = {
'name': gdrive_file_name,
'parents': [parent_gdrive_folder_id],
'mimeType': google_docs_type
}
gdrive_file = service.files().create(
body=metadata,
fields='id'
).execute()
print('File ID:', gdrive_file.get('id'))
return gdrive_file
def list_files(self, gdrive_folder_id: str) -> List[dict]:
service = self.drive_service
response = service.files().list(
q="'{}' in parents".format(gdrive_folder_id)
).execute()
return response.get('files', [])
def df_to_value_range(df: pd.DataFrame, include_header=True) -> List[List[str]]:
df.fillna('', inplace=True)
headers = [str(h) for h in list(df.columns)]
content = [[str(c) for c in df.loc[i]] for i in df.index]
if include_header:
return [headers] + content
else:
return content
def numeric_col_to_letter_col(col_index: int):
"""
Examples
-------------
0 --> A \n
25 --> Z \n
26 --> AA \n
and so on ...
"""
if col_index < 26:
return string.ascii_uppercase[col_index]
else:
return numeric_col_to_letter_col(col_index // 26 - 1) + numeric_col_to_letter_col(col_index % 26)
# Testing sheet: https://docs.google.com/spreadsheets/d/1CH5nkNA5-zeeA3wYuU0WRF6Fg4zxg2iJoL3Ti_HGbXc/
if __name__ == "__main__":
gs = GService()
g_files = gs.list_files('1XMOvZNlUMGPWPfMC_oW31g1kgbI-Ox86')
print(g_files) | StevePrat/google_service_python | google_service.py | google_service.py | py | 15,535 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "google.oauth2.credentials.Credentials.from_authorized_user_file",
"line_number": 37,
"usage_type": "call"
}... |
41334978552 | import base64
import json
import jwt
import os
import re
import time
from urllib.parse import urlencode
from jupyterhub.handlers.login import LogoutHandler
from jupyterhub.utils import url_path_join
from oauthenticator.generic import GenericOAuthenticator
from tornado.httpclient import HTTPClientError, HTTPRequest, AsyncHTTPClient
from traitlets import default, Bool, Int, Unicode
from .config import OPENSTACK_RC_AUTH_STATE_KEY
class LogoutRedirectHandler(LogoutHandler):
"""Redirect user to IdP logout page to clean upstream session."""
async def render_logout_page(self):
self.redirect(self.authenticator.token_url, permanent=False)
class SessionRefreshHandler(LogoutHandler):
"""Redirect user back to internal page after clearing their session.
This allows an effective "refresh" flow, where if the user is already
logged in to the IdP, they can proceed directly back to where they were
before, but with a refreshed session.
"""
async def render_logout_page(self, next_page=None):
if next_page is None:
next_page = self.get_argument("next", "/")
if not next_page.startswith("/"):
self.log.warning(f"Redirect to non-relative location {next_page} blocked.")
next_page = "/"
# Redirect relative to the hub prefix. This is very important! The reason is,
# only by redirecting to a hub-owned path will it understand that user needs
# to be logged in again, because we explicitly killed the Hub session/login.
# If we directly go back to a user server, the server's session is actually
# still alive! It's not totally clear how we can tell that cookie to get cleared
# on logout; it seems the assumption is that users always get there via some
# other Hub handler.
next_page = url_path_join(self.app.hub_prefix, next_page)
html = await self.render_template("auth_refresh.html", next_page=next_page)
self.finish(html)
class OpenstackOAuthenticator(GenericOAuthenticator):
"""
A generic authenticator that supports getting and refreshing user tokens.
"""
login_service = "Chameleon"
# Force auto_login so that we don't render the default login form.
auto_login = Bool(True)
# The user's Keystone/Keycloak tokens are stored in auth_state and the
# authenticator is not very useful without it.
enable_auth_state = Bool(True)
# Check state of authentication token before allowing a new spawn.
# The Keystone authenticator will fail if the user's unscoped token has
# expired, forcing them to log in, which is the right thing.
refresh_pre_spawn = Bool(True)
# Automatically check the auth state this often.
# This isn't very useful for us, since we can't really do anything if
# the token has expired realistically (can we?), so we increase the poll
# interval just to reduce things the authenticator has to do.
# TODO(jason): we could potentially use the auth refresh mechanism to
# generate a refresh auth token from Keycloak (and then exchange it for
# a new Keystone token.)
auth_refresh_age = Int(60 * 60)
@default("scope")
def _scope_default(self):
return [
"openid",
"profile",
]
#hub_public_url = Unicode(
# os.getenv("JUPYTERHUB_PUBLIC_URL"),
# config=True,
# help="""
# The full (public) base URL of the JupyterHub
# server. JupyterHub should really provide this to
# managed services, but it doesn't, so we have to. The
# issue is that we are behind a reverse proxy, so we need
# to inform JupyterHub of this.
# """,
#)
keystone_auth_url = Unicode(
os.getenv("OS_AUTH_URL", ""),
config=True,
help="""
Keystone authentication URL
""",
)
keystone_interface = Unicode(
os.getenv("OS_INTERFACE", "public"),
config=True,
help="""
Keystone endpoint interface
""",
)
keystone_identity_api_version = Unicode(
os.getenv("OS_IDENTITY_API_VERSION", "3"),
config=True,
help="""
Keystone API version (default=v3)
""",
)
keystone_identity_provider = Unicode(
os.getenv("OS_IDENTITY_PROVIDER", "chameleon"),
config=True,
help="""
Keystone identity provider name. This identity provider must have its
client ID included as an additional audience in tokens generated for
the client ID specified in `keycloak_client_id`. This allows the token
generated for one client to be re-used to authenticate against another.
""",
)
keystone_protocol = Unicode(
os.getenv("OS_PROTOCOL", "openid"),
config=True,
help="""
Keystone identity protocol name
""",
)
keystone_project_domain_name = Unicode(
os.getenv("OS_PROJECT_DOMAIN_NAME", "chameleon"),
config=True,
help="""
Keystone domain name for federated domain
""",
)
keystone_default_region_name = Unicode(
os.getenv("OS_REGION_NAME", ""),
config=True,
help="""
A default region to use when choosing Keystone endpoints
""",
)
async def authenticate(self, handler, data=None):
# TODO fix overrides here
"""Authenticate with Keycloak."""
auth_dict = await super().authenticate(handler, data)
auth_state = auth_dict["auth_state"]
access_token = auth_state["access_token"]
decoded_access_token = jwt.decode(
access_token, options={"verify_signature": False}
)
refresh_token = auth_state["refresh_token"]
decoded_refresh_token = jwt.decode(
refresh_token, options={"verify_signature": False}
)
expires_at = decoded_access_token.get("exp")
refresh_expires_at = decoded_refresh_token.get("exp")
user_headers = self._get_default_headers()
user_headers["Authorization"] = "Bearer {}".format(access_token)
req = HTTPRequest(self.userdata_url, method="GET", headers=user_headers)
try:
http_client = AsyncHTTPClient()
user_resp = await http_client.fetch(req)
except HTTPClientError as err:
self.log.error(f"Unexpected HTTP error fetching user data: {err}")
return None
user_json = json.loads(user_resp.body.decode("utf8", "replace"))
username = user_json.get("preferred_username")
# TODO override this in keycloak
is_admin = os.getenv("OAUTH_ADMIN_PROJECT", "Chameleon") in map(
lambda x : x.get("id"), user_json.get("projects", [])
)
user_projects = user_json.get("projects", [])
has_active_allocations = len(user_projects) > 0
if not has_active_allocations:
self.log.info(f"User {username} does not have any active allocations")
return None
if self._has_keystone_config():
openstack_rc = {
"OS_AUTH_URL": self.keystone_auth_url,
"OS_INTERFACE": self.keystone_interface,
"OS_IDENTITY_API_VERSION": self.keystone_identity_api_version,
"OS_ACCESS_TOKEN": access_token,
"OS_IDENTITY_PROVIDER": self.keystone_identity_provider,
"OS_PROTOCOL": self.keystone_protocol,
"OS_AUTH_TYPE": "v3oidcaccesstoken",
"OS_PROJECT_DOMAIN_NAME": self.keystone_project_domain_name,
}
if self.keystone_default_region_name:
openstack_rc["OS_REGION_NAME"] = self.keystone_default_region_name
if user_projects:
openstack_rc["OS_PROJECT_NAME"] = user_projects[0]["id"]
else:
self.log.warning(
(
"No Keystone configuration available, cannot set OpenStack "
"RC variables"
)
)
openstack_rc = None
auth_state["is_federated"] = True
auth_state["expires_at"] = expires_at
auth_state["refresh_expires_at"] = refresh_expires_at
auth_state[OPENSTACK_RC_AUTH_STATE_KEY] = openstack_rc
return {
"name": username,
"admin": is_admin,
"auth_state": auth_state,
}
async def pre_spawn_start(self, user, spawner):
"""Fill in OpenRC environment variables from user auth state."""
auth_state = await user.get_auth_state()
if not auth_state:
# auth_state not enabled
self.log.error(
"auth_state is not enabled! Cannot set OpenStack RC parameters"
)
return
openrc_vars = auth_state.get(OPENSTACK_RC_AUTH_STATE_KEY, {})
for rc_key, rc_value in openrc_vars.items():
spawner.environment[rc_key] = rc_value
def get_handlers(self, app):
"""Override the default handlers to include a custom logout handler."""
# Override the /logout handler; because our handlers are installed
# first, and the first match wins, our logout handler is preferred,
# which is good, because JupyterLab can only invoke this handler
# when the user wants to log out, currently.
handlers = [
("/logout", LogoutRedirectHandler),
("/auth/refresh", SessionRefreshHandler),
]
handlers.extend(super().get_handlers(app))
return handlers
def _has_keystone_config(self):
return (
self.keystone_auth_url
and self.keystone_identity_provider
and self.keystone_protocol
)
def _get_default_headers(self):
return {
"Accept": "application/json",
"User-Agent": "JupyterHub",
}
def _get_client_credential_headers(self):
headers = self._get_default_headers()
b64key = base64.b64encode(
bytes("{}:{}".format(self.client_id, self.client_secret), "utf8")
)
headers["Authorization"] = "Basic {}".format(b64key.decode("utf8"))
return headers
| ChameleonCloud/jupyterhub-chameleon | jupyterhub_chameleon/authenticator/openstack_oauth.py | openstack_oauth.py | py | 10,242 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "jupyterhub.handlers.login.LogoutHandler",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "jupyterhub.handlers.login.LogoutHandler",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "jupyterhub.utils.url_path_join",
"line_number": 47,
"usag... |
33939740836 | import numpy as np
import matplotlib.pyplot as plt
tempMDS = np.load("MDS_temperature.npy")
grassmannMDS = np.load("MDS_Grassmann.npy")
plt.figure()
plt.title("MDS plot (from euclidean distances between T fields)")
plt.scatter(tempMDS[:,0],tempMDS[:,1], c = 'b', marker = 'o', s=30)
plt.show()
plt.figure()
plt.title("MDS plot (from Grassmann distances)")
plt.scatter(grassmannMDS[:,0],grassmannMDS[:,1], c = 'b', marker = 'o', s=30)
plt.show()
| siyuanhenpc/HESIYUAN | TP3/04-visu_MDS.py | 04-visu_MDS.py | py | 453 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.load",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"... |
30469737676 | # Find unique
# Given the list of IDs, which contains many duplicate integers and one unique
# integer, find the unique integer.
from functools import reduce
from typing import List
import unittest
def find_unique(arr: List[int]) -> int:
return reduce(lambda x, y: x ^ y, arr)
class TestFindUnique(unittest.TestCase):
def test_find_unique(self):
tests = [
[[1, 9, 7, 9, 3, 1, 3], 7],
[[-1, 4, 2, 9, 9, -1, 4], 2]
]
for arr, expected in tests:
self.assertEqual(find_unique(arr), expected)
if __name__ == "__main__":
unittest.main()
| kchenx/interview-practice | 11-bit-manipulation/find_unique.py | find_unique.py | py | 610 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "functools.reduce",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "unittest.main",
... |
8604062010 | import kivy
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.uix.tabbedpanel import TabbedPanelItem, TabbedPanel
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.checkbox import CheckBox
from kivy.uix.spinner import Spinner
from kivy.core.window import Window
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg
import ast
import json
from kivy.factory import Factory
import os
import matplotlib.pyplot as plt
from definitions import PARAMETERS_VARIATIONS_INFO_DIR, MODEL_TRAININGS_DIR, getModelDir
class TabWithInfo(TabbedPanelItem):
def __init__(self, process, runs_len, **kwargs):
super(TabWithInfo, self).__init__(**kwargs)
self.process = process
self.runs_len = runs_len
self.last_graphs = []
self.param_value_model = None
def updateTab(self, info):
if "Defaults" in info.keys():
self.param_value_model = info["Defaults"].items()
self.showParams()
else:
distance, torque, velocity = info["best_multi_fitness"]
status = " (Running)" if not info["terminate"] else " (Finished)"
self.ids.model_label.text = f"{info['model'][1]}/{self.runs_len}" + status
self.ids.generation_label.text = str(info["generation"])
self.ids.fitness_label.text = str(info["best_fitness"])
self.ids.distance_label.text = str(distance)
self.ids.torque_label.text = str(torque)
self.ids.velocity_label.text = str(velocity)
self.ids.mean_fitness_label.text = str(info["mean_fitness"])
self.ids.time_label.text = "%.2f s" % info["time_elapsed"]
if self.last_graphs is not None:
for graph in self.last_graphs:
plt.close(graph)
self.last_graphs = [info["fitness_graph"], info["individual_graph"], info["pareto_graph"]]
self.showGraphs()
def showParams(self):
self.ids.params_tab.clear_widgets()
for param, value in sorted(self.param_value_model):
self.ids.params_tab.add_widget(Label(text=str(param)))
self.ids.params_tab.add_widget(Label(text=str(value)))
self.ids.params_tab.add_widget(Factory.SmallYSeparator())
self.ids.params_tab.add_widget(Factory.SmallYSeparator())
def showGraphs(self):
if self.last_graphs is not None:
self.ids.general_graphs.clear_widgets()
self.ids.pareto_graph.clear_widgets()
fitness_graph = FigureCanvasKivyAgg(self.last_graphs[0])
individual_graph = FigureCanvasKivyAgg(self.last_graphs[1])
pareto_graph = FigureCanvasKivyAgg(self.last_graphs[2])
self.ids.general_graphs.add_widget(fitness_graph)
fitness_graph.draw()
self.ids.general_graphs.add_widget(individual_graph)
individual_graph.draw()
self.ids.pareto_graph.add_widget(pareto_graph)
pareto_graph.draw()
class MainGrid(GridLayout):
def __init__(self, queue, runs_len, **kwargs):
super(MainGrid, self).__init__(**kwargs)
self.queue = queue
self.runs_len = runs_len
self.cores = []
self.tabs = []
self.cols = 1
self.tabbedPanel = TabbedPanel()
self.tabbedPanel.do_default_tab = False
self.add_widget(self.tabbedPanel)
Clock.schedule_interval(self.update_info, 0.1)
def update_info(self, dt):
if not self.queue.empty():
cores = [tab.process for tab in self.tabs]
element = self.queue.get()
process, model = element["model"]
if process not in cores:
new_tab = TabWithInfo(process, self.runs_len)
new_tab.text = f"Process {len(cores) + 1}"
self.tabs.append(new_tab)
self.tabbedPanel.add_widget(new_tab)
self.tabbedPanel.switch_to(new_tab)
current_tab = list(filter(lambda x: x.process == process, self.tabs))[0]
current_tab.updateTab(element)
class MainWindowGrid(GridLayout):
pass
class InformationWindow(App):
def __init__(self, queue, title, runs_len, **kwargs):
super(InformationWindow, self).__init__(**kwargs)
self.queue = queue
self.title = 'Model: ' + title
self.runs_len = runs_len
self.interrupted = False
def on_request_close(self, *args):
self.interrupted = True
self.stop()
return True
def build(self):
Window.bind(on_request_close=self.on_request_close)
w_width = 1080
w_height = 607
Window.minimum_width = w_width
Window.minimum_height = w_height
Window.size = (w_width, w_height)
return MainGrid(self.queue, self.runs_len)
class MainWindow(App):
def __init__(self, default_params, **kwargs):
super(MainWindow, self).__init__(**kwargs)
# Run
self.chosen_option = None
self.default_params = default_params
self.text_inputs = {}
self.use_defaults = False
self.all_combinations = False
self.continue_tuning = False
self.run_window = None
self.cores = 1
self.repetitions = 1
# Render
self.render_window = None
self.individuals_checkboxes = []
self.render_model_name = ''
self.render_run = 0
self.all_runs = False
# Tuning
self.tuning_window = None
self.tuning_run = None
self.information = {}
for key, value in self.default_params.items():
self.default_params[key] = [value]
with open(PARAMETERS_VARIATIONS_INFO_DIR, 'r') as f:
self.parameters_variations = json.load(f)
def build(self):
self.title = 'Robotic Manipulator'
return MainWindowGrid()
def runAll(self):
self.chosen_option = 1
self.root.ids.run_button.text = "Run (Run All)"
self.root.ids.info_layout.clear_widgets()
self.runWindow()
def initializeOnly(self):
self.chosen_option = 2
self.root.ids.run_button.text = "Run (Initialize Only)"
self.root.ids.info_layout.clear_widgets()
def profiling(self):
self.chosen_option = 3
self.root.ids.run_button.text = "Run (Profiling)"
self.root.ids.info_layout.clear_widgets()
def render(self):
self.chosen_option = 4
self.root.ids.run_button.text = "Run (Render)"
self.root.ids.info_layout.clear_widgets()
self.renderWindow()
def findParetoFrontier(self):
self.chosen_option = 5
self.root.ids.run_button.text = "Run (Find Pareto Frontier)"
self.root.ids.info_layout.clear_widgets()
def tuneModel(self):
self.chosen_option = 6
self.root.ids.run_button.text = "Close"
self.root.ids.info_layout.clear_widgets()
self.tuningWindow()
def runButton(self):
if self.chosen_option == 1:
self.parameters_variations = {key: ast.literal_eval('[' + t_input.text + ']') for key, t_input in self.text_inputs.items()}
with open(PARAMETERS_VARIATIONS_INFO_DIR, 'w') as f:
json.dump(self.parameters_variations, f, indent=4)
try:
self.cores = int(self.run_window.ids.cores.text)
except ValueError:
self.cores = 1
try:
self.repetitions = int(self.run_window.ids.repetitions.text)
except ValueError:
self.repetitions = 1
self.information = {
'parameters_variations': self.parameters_variations,
'cores': self.cores,
'run_name': self.run_window.ids.run_name.text,
'all_combinations': self.all_combinations,
'continue_tuning': self.continue_tuning,
'repetitions': self.repetitions
}
elif self.chosen_option == 4:
self.information = {
'render_model_name': self.render_model_name,
'render_run': self.render_run,
'render_individuals': [i for i, val in enumerate([chbox.active for chbox in self.individuals_checkboxes]) if val],
'all_runs': self.all_runs
}
self.information['final_option'] = self.chosen_option
self.stop()
def on_checkbox_active(self, checkbox, value):
self.use_defaults = value
self.showParameters()
def allRunsCheckBox(self, checkbox, value):
self.render_window.ids.individuals_layout.disabled = value
self.all_runs = value
def tuneParametersCheckBox(self, checkbox, value):
self.all_combinations = value
def continueTuningCheckBox(self, checkbox, value):
self.continue_tuning = value
def runWindow(self):
self.run_window = Factory.RunWindow()
self.root.ids.info_layout.add_widget(self.run_window)
self.use_defaults = self.run_window.ids.use_defaults.active
self.showParameters()
def showParameters(self):
self.run_window.ids.parameters_layout.clear_widgets()
the_params = self.default_params if self.use_defaults else self.parameters_variations
for param, value in sorted(the_params.items()):
self.text_inputs[param] = TextInput(text=', '.join(map(str, value)), multiline=False)
self.run_window.ids.parameters_layout.add_widget(Label(text=str(param)))
self.run_window.ids.parameters_layout.add_widget(self.text_inputs[param])
def renderWindow(self):
self.render_window = Factory.RenderWindow()
self.root.ids.info_layout.add_widget(self.render_window)
models = [name for name in os.listdir(MODEL_TRAININGS_DIR) if os.path.isdir(os.path.join(MODEL_TRAININGS_DIR, name))]
self.render_window.ids.model_selection.values = models
def tuningWindow(self):
self.tuning_window = Factory.TuningResultsWindow()
self.root.ids.info_layout.add_widget(self.tuning_window)
models = [name for name in os.listdir(MODEL_TRAININGS_DIR) if os.path.isdir(os.path.join(MODEL_TRAININGS_DIR, name))]
self.tuning_window.ids.tuning_model_selection.values = models
def selectedModel(self, instance, model):
self.render_model_name = model
self.render_window.ids.individuals_selection.clear_widgets()
amount_of_runs = len([name for name in os.listdir(os.path.join(MODEL_TRAININGS_DIR, model, 'Graphs', 'Individuals')) if os.path.isdir(os.path.join(MODEL_TRAININGS_DIR, model, 'Graphs', 'Individuals', name))])
self.render_window.ids.run_selection.values = list(map(str, sorted(range(amount_of_runs), reverse=True)))
def selectedRun(self, instance, run):
self.render_run = int(run)
self.render_window.ids.individuals_selection.clear_widgets()
with open(getModelDir(self.render_model_name)) as f:
model_data = json.load(f)
run_data = model_data["Best Individuals"][self.render_run]
individuals = run_data["Genes"]
self.individuals_checkboxes = []
for i, ind in enumerate(individuals):
self.individuals_checkboxes.append(CheckBox())
self.render_window.ids.individuals_selection.add_widget(Label(text=f'Individual {i} (fit: %.4f)' % ind[2]))
self.render_window.ids.individuals_selection.add_widget(self.individuals_checkboxes[i])
def selectedModelForTuning(self, instance, run):
self.tuning_run = str(run)
def selectAllIndividuals(self):
if all([chbox.active for chbox in self.individuals_checkboxes]):
for chbox in self.individuals_checkboxes:
chbox.active = False
else:
for chbox in self.individuals_checkboxes:
chbox.active = True
def generateTuningDict(self):
if self.tuning_window is not None:
self.tuning_window.ids.tuning_results.clear_widgets()
if self.tuning_run is None:
return
else:
import main
from definitions import getTuningDict
import json
main.findDominantsFromTuning(self.tuning_run)
with open(getTuningDict(self.tuning_run)) as f:
tuning_dict = json.load(f)
for i, (key, val) in enumerate(tuning_dict["best"].items()):
widg = self.singleTuningInfo(**{"name": key, "values": val})
self.tuning_window.ids.tuning_results.add_widget(widg)
self.tuning_window.ids.tuning_results.rows_minimum[i] = self.tuning_window.ids.tuning_scroll_view.height * len(val) * 0.06
def singleTuningInfo(self, **kwargs):
top_layout = GridLayout()
top_layout.cols = 1
top_layout.add_widget(Factory.SeparatorY())
inside_layout = GridLayout()
inside_layout.cols = 2
param_name = Label()
param_name.text = kwargs["name"]
param_values = GridLayout()
param_values.cols = 2
for val in kwargs["values"]:
l_1 = Label()
l_1.text = str(val[0])
l_2 = Label()
l_2.text = "%.4f" % val[1]
param_values.add_widget(l_1)
param_values.add_widget(l_2)
inside_layout.add_widget(param_name)
inside_layout.add_widget(param_values)
top_layout.add_widget(inside_layout)
top_layout.add_widget(Factory.SeparatorY())
return top_layout
def runInfoDisplay(queue, title, event, runs_len):
info_window = InformationWindow(queue=queue, title=title, runs_len=runs_len)
info_window.run()
if info_window.interrupted and event is not None:
event.set()
def runMainWindow(default_params):
main_window = MainWindow(default_params)
main_window.run()
main_window_information = main_window.information
return main_window_information
| jurrutiag/Robotic-Manipulator | InfoDisplay/InformationWindow.py | InformationWindow.py | py | 14,142 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "kivy.uix.tabbedpanel.TabbedPanelItem",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"ap... |
72367971553 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 10 20:16:54 2020
Various preprocessing functions
@author: kfinity
"""
import pandas as pd
import numpy as np
import json
import nltk
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api._errors import TranscriptsDisabled, NoTranscriptFound, VideoUnavailable
import os
import googleapiclient.discovery
import googleapiclient.errors
from google.oauth2 import service_account
# Given the full JSON contents of our "speeches.json" file, return a dataframe
def create_bow(json):
json=tag_interviews(json)
df=pd.DataFrame(columns=['id','speaker','date','speech','title','transcript_type'])
for i in range(len(json)): # for each entry...
for k in range(len(json[i]['items'])): #for each speech...
id = json[i]['items'][k]['id']
speaker = json[i]['items'][k]['candidate']
date = json[i]['items'][k]['snippet']['publishedAt']
title = json[i]['items'][k]['snippet']['title']
transcript_type = json[i]['items'][k]['type']
try:
captions = json[i]['items'][k]['captions']
except KeyError:
captions = None
if captions != None:
just_text = [j['text'] for j in captions]
speech = ' '.join(just_text)
row = {"id": id, "speaker": speaker, "date": date, "speech":speech, "title":title,"transcript_type": transcript_type}
df = df.append(row,ignore_index=True)
# drop speeches with no transcript
df = df.dropna()
# drop any duplicate videoIds
df = df.reset_index().drop_duplicates(subset=['id'])\
.sort_values('date').set_index('id')
return df
# Given a dataframe, break up the speech into tokens
def tokenize(df, remove_pos_tuple=False):
df = df.reset_index().set_index(['id','speaker'])
df = df.speech\
.apply(lambda x: pd.Series(nltk.pos_tag(nltk.WhitespaceTokenizer().tokenize(x))))\
.stack()\
.to_frame()\
.rename(columns={0:'pos_tuple'})
# Grab info from tuple
df['pos'] = df.pos_tuple.apply(lambda x: x[1])
df['token_str'] = df.pos_tuple.apply(lambda x: x[0])
if remove_pos_tuple:
df = df.drop('pos_tuple', 1)
df['term_str'] = df['token_str'].str.lower().str.replace('[\W_]', '')
df.index.names = ['id','speaker','token_id']
return df
# Create a TFIDF table
def create_tfidf(token,bag,count_method='n',tf_method='sum',idf_method='standard'):
BOW = token.groupby(bag+['term_id']).term_id.count().to_frame().rename(columns={'term_id':'n'})
BOW['c'] = BOW.n.astype('bool').astype('int')
DTCM = BOW[count_method].unstack().fillna(0).astype('int')
if tf_method == 'sum':
TF = DTCM.T / DTCM.T.sum()
elif tf_method == 'max':
TF = DTCM.T / DTCM.T.max()
elif tf_method == 'log':
TF = np.log10(1 + DTCM.T)
elif tf_method == 'raw':
TF = DTCM.T
elif tf_method == 'double_norm':
TF = DTCM.T / DTCM.T.max()
TF = tf_norm_k + (1 - tf_norm_k) * TF[TF > 0] # EXPLAIN; may defeat purpose of norming
elif tf_method == 'binary':
TF = DTCM.T.astype('bool').astype('int')
TF = TF.T
DF = DTCM[DTCM > 0].count()
N = DTCM.shape[0]
if idf_method == 'standard':
IDF = np.log10(N / DF)
elif idf == 'max':
IDF = np.log10(DF.max() / DF)
elif idf_method == 'smooth':
IDF = np.log10((1 + N) / (1 + DF)) + 1 # Correct?
TFIDF = TF * IDF
return TFIDF
#Identify which speeches are interviews
def find_interviews(json):
videoIDs = []
for i in range(len(json)): # for each entry...
for k in range(len(json[i]['items'])): #for each speech...
if 'interview' in json[i]['items'][k]['snippet']['title'].lower():
videoIDs.append(json[i]['items'][k]['id'])
else:
pass
return videoIDs
def tag_interviews(json):
interviews = find_interviews(json)
for i in range(len(json)): # for each entry...
for k in range(len(json[i]['items'])): #for each speech...
if json[i]['items'][k]['id'] in interviews:
json[i]['items'][k]['type'] = 'interview'
else:
json[i]['items'][k]['type'] = 'speech'
return json
def remove_speech(videoID, json):
for i in range(len(json)): # for each entry...
for k in range(len(json[i]['items'])): #for each speech...
if json[i]['items'][k]['id'] == videoID:
del json[i]['items'][k]
break
else:
continue
break
print("Don't forget to save your new json!")
# download captions and youtube details for a single videoId
# match the json structure from "create_records.py"
def build_video_details(videoId, candidate):
captions = None
if 'youtube' not in locals():
SCOPES = ["https://www.googleapis.com/auth/youtube.readonly"]
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
service_account_file = "service_account.json"
# Get credentials and create an API client
credentials = service_account.Credentials.from_service_account_file(
service_account_file, scopes=SCOPES)
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials)
request = youtube.videos().list(
part="snippet,contentDetails,statistics",
id=videoId
)
response = request.execute()
try:
captions = YouTubeTranscriptApi.get_transcript(videoId)
except TranscriptsDisabled:
print("Transcripts disabled for video: %s" % videoId)
except NoTranscriptFound:
print("No transcript found for video: %s" % videoId)
except VideoUnavailable:
print("Video no longer available: %s" % videoId)
response['items'][0]['candidate'] = candidate
response['items'][0]['captions'] = captions
return response
# add a single speech to the json
def add_speech(videoId, candidate, json):
for i in range(len(json)): # for each entry...
for k in range(len(json[i]['items'])): #for each speech...
if json[i]['items'][k]['id'] == videoId:
print("videoID {} already exists in speeches json")
exit
# is a new speech, can add it...
resp = build_video_details(videoId, candidate)
if resp['items'][0]['captions'] != None:
json.append(resp)
| kfinity/capstone-speeches | preproc.py | preproc.py | py | 6,682 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "nltk.pos_tag",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "nltk.WhitespaceTokenizer"... |
5562710262 | """Gateway Class Module"""
from typing import Dict
import internal_schema
import stores.chirpstack.base
import stores.chirpstack.gateway_profile
import stores.chirpstack.network_server
import stores.chirpstack.organization
import stores.chirpstack.service_profile
import exception
class Gateway(stores.chirpstack.base.IdBasedResource[internal_schema.Gateway]):
"""Implementation of Gateway"""
_RESOURCE_NAME_IN_URL = "gateways"
_RESOURCE_NAME_IN_REQ = "gateway"
@classmethod
def _excel_to_rest_format(cls, item: internal_schema.Gateway) -> Dict:
payload = item.dict()
gw_store = stores.chirpstack.gateway_profile.GatewayProfile()
gw_id = gw_store.get_id_by_name(item.gatewayProfile)
ns_store = stores.chirpstack.network_server.NetworkServer()
ns_id = ns_store.get_id_by_name(item.networkServer)
org_store = stores.chirpstack.organization.Organization()
org_id = org_store.get_id_by_name(item.organization)
svc_store = stores.chirpstack.service_profile.ServiceProfile()
svc_id = svc_store.get_id_by_name(item.serviceProfile)
payload.update(
{
"gatewayProfileID": gw_id,
"networkServerID": ns_id,
"organizationID": org_id,
"serviceProfileID": svc_id,
}
)
payload["location"] = {}
payload.pop("gatewayProfile")
payload.pop("networkServer")
payload.pop("organization")
payload.pop("serviceProfile")
return payload
def _rest_to_excel_format(self, item: Dict) -> internal_schema.Gateway:
gw = stores.chirpstack.gateway_profile.GatewayProfile().read_by_id(
item["gatewayProfileID"]
)
ns = stores.chirpstack.network_server.NetworkServer().read_by_id(
item["networkServerID"]
)
org = stores.chirpstack.organization.Organization().read_by_id(
item["organizationID"]
)
svc = stores.chirpstack.service_profile.ServiceProfile().read_by_id(
item["serviceProfileID"]
)
if gw is None:
raise exception.ResourceNotFoundError("Gateway Profile not found")
if ns is None:
raise exception.ResourceNotFoundError("Network Server not found")
if org is None:
raise exception.ResourceNotFoundError("Organization not found")
if svc is None:
raise exception.ResourceNotFoundError("Service Profile not found")
item.update(
{
"gatewayProfile": gw.name,
"networkServer": ns.name,
"organization": org.name,
"serviceProfile": svc.name,
}
)
return internal_schema.Gateway(**item)
| williamlun/fastapi_playground | keycloak_import/src/stores/chirpstack/gateway.py | gateway.py | py | 2,806 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "stores.chirpstack.base.chirpstack",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "stores.chirpstack.base",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "internal_schema.Gateway",
"line_number": 13,
"usage_type": "attribute"
},... |
9526298834 | """
@author: Aashis Khanal
@email: sraashis@gmail.com
"""
from collections import OrderedDict as _ODict
import os as _os
import torch as _torch
import coinstac_sparse_dinunet.config as _conf
import coinstac_sparse_dinunet.metrics as _base_metrics
import coinstac_sparse_dinunet.utils as _utils
import coinstac_sparse_dinunet.utils.tensorutils as _tu
import coinstac_sparse_dinunet.vision.plotter as _plot
from coinstac_sparse_dinunet.config.keys import *
from coinstac_sparse_dinunet.utils import stop_training_
from coinstac_sparse_dinunet.utils.logger import *
import torch
import torch as _torch
import copy
import torch.nn.functional as F
import torch.nn as nn
import types
class NNTrainer:
def __init__(self, data_handle=None, **kw):
self.cache = data_handle.cache
self.input = _utils.FrozenDict(data_handle.input)
self.state = _utils.FrozenDict(data_handle.state)
self.nn = _ODict()
self.device = _ODict()
self.optimizer = _ODict()
self.data_handle = data_handle
def _init_nn_model(self):
r"""
User cam override and initialize required models in self.distrib dict.
"""
raise NotImplementedError('Must be implemented in child class.')
def _init_nn_weights(self, **kw):
r"""
By default, will initialize network with Kaimming initialization.
If path to pretrained weights are given, it will be used instead.
"""
if self.cache.get('pretrained_path') is not None:
self.load_checkpoint(self.cache['pretrained_path'])
elif self.cache['mode'] == Mode.TRAIN:
_torch.manual_seed(self.cache['seed'])
for mk in self.nn:
_tu.initialize_weights(self.nn[mk])
def _init_optimizer(self):
r"""
Initialize required optimizers here. Default is Adam,
"""
first_model = list(self.nn.keys())[0]
self.optimizer['adam'] = _torch.optim.Adam(self.nn[first_model].parameters(),
lr=self.cache['learning_rate'])
def init_nn(self, init_model=False, init_optim=False, set_devices=False, init_weights=False):
if init_model: self._init_nn_model()
if init_optim: self._init_optimizer()
if init_weights: self._init_nn_weights(init_weights=init_weights)
if set_devices: self._set_gpus()
def snip_forward_linear(self, params, x):
return F.linear(x, params.weight * params.weight_mask, params.bias)
def snip_forward_conv2d(self, params, x):
return F.conv2d(x, params.weight * params.weight_mask, params.bias,
params.stride, params.padding, params.dilation, params.groups)
def apply_mask_to_model(self, model, mask):
prunable_layers = filter(
lambda layer: isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear),
model.modules()
)
for layer, keep_mask in zip(prunable_layers, mask):
assert (layer.weight.shape == keep_mask.shape)
# Set the masked weights to zero (NB the biases are ignored)
layer.weight.data[keep_mask == 0.] = 0.
return model
@staticmethod
def _forward_pre_hook(module, x):
module.mask.requires_grad_(False)
mask = module.mask
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
module.weight.data.mul_(mask.to(module.weight.to(device)))
def register_pre_hook_mask(self, masks=None):
masks_count = 0
if masks is not None:
print("Registering Mask!")
assert masks is not None, 'Masks should be generated first.'
for model_key in self.nn:
for name, module in self.nn[model_key].named_modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
module.mask = nn.Parameter(masks[masks_count]).requires_grad_(False).to(
module.weight.to(self.device['gpu']))
masks_count += 1
module.register_forward_pre_hook(self._forward_pre_hook)
# def apply_mask_to_model(self, model, mask):
# prunable_layers = filter(
# lambda layer: isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear),
# model.modules()
# )
# # print(torch.sum(torch.cat([torch.flatten(x == 1) for x in mask])))
# total_non_zero = 0
# for layer, keep_mask in zip(prunable_layers, mask):
# assert (layer.weight.shape == keep_mask.shape)
#
# def hook_factory(keep_mask):
# """
# The hook function can't be defined directly here because of Python's
# late binding which would result in all hooks getting the very last
# mask! Getting it through another function forces early binding.
# """
#
# def hook(grads):
# return grads * keep_mask
#
# return hook
#
# # Step 1: Set the masked weights to zero (NB the biases are ignored)
# # Step 2: Make sure their gradients remain zero
# layer.weight.data[keep_mask == 0.] = 0.
# total_non_zero += torch.sum(torch.cat([torch.flatten(x != 0.) for x in layer.weight.data]))
# layer.weight.register_hook(hook_factory(keep_mask))
#
# return model
def apply_snip_pruning(self, dataset_cls):
train_dataset = self.data_handle.get_train_dataset_for_masking(dataset_cls=dataset_cls)
loader = self.data_handle.get_loader('train', dataset=train_dataset, drop_last=True, shuffle=True)
mini_batch = next(iter(loader)) # inputs, labels, ix
# inputs, labels = mini_batch['inputs'], mini_batch['labels']
for model_key in self.nn:
net = copy.deepcopy(self.nn[model_key])
device = next(iter(self.nn[model_key].parameters())).device
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
layer.weight_mask = nn.Parameter(_torch.ones_like(layer.weight)).to(device)
nn.init.xavier_normal(layer.weight)
layer.weight.requires_grad = False
if isinstance(layer, nn.Linear):
layer.forward = types.MethodType(self.snip_forward_linear, layer)
if isinstance(layer, nn.Conv2d):
layer.forward = types.MethodType(self.snip_forward_conv2d, layer)
net.to(device)
net.zero_grad()
it = self.single_iteration_for_masking(net, mini_batch)
sparsity_level = abs(1 - it['sparsity_level'])
it['loss'].backward()
grads_abs = []
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
grads_abs.append(torch.abs(layer.weight_mask.grad))
# Gather all scores in a single vector and normalise
all_scores = torch.cat([torch.flatten(x) for x in grads_abs])
norm_factor = torch.sum(all_scores)
all_scores.div_(norm_factor)
num_params_to_keep = int(len(all_scores) * sparsity_level)
threshold, _ = torch.topk(all_scores, num_params_to_keep, sorted=True)
acceptable_score = threshold[-1]
keep_masks = []
for g in grads_abs:
keep_masks.append(((g / norm_factor) >= acceptable_score).float())
# initialize_mult = []
# for i in range(len(grads_abs)):
# initialize_mult.append(grads_abs[i] / norm_factor)
self.nn[model_key] = self.apply_mask_to_model(self.nn[model_key], keep_masks)
return keep_masks
def _set_gpus(self):
self.device['gpu'] = _torch.device("cpu")
if self.cache.get('gpus') is not None and len(self.cache['gpus']) > 0:
if _conf.CUDA_AVAILABLE:
self.device['gpu'] = _torch.device(f"cuda:{self.cache['gpus'][0]}")
if len(self.cache['gpus']) >= 2:
for mkey in self.nn:
self.nn[mkey] = _torch.nn.DataParallel(self.nn[mkey], self.cache['gpus'])
else:
pass
# raise Exception(f'*** GPU not detected in {self.state["clientId"]}. ***')
for model_key in self.nn:
self.nn[model_key] = self.nn[model_key].to(self.device['gpu'])
def load_checkpoint(self, file_path):
try:
chk = _torch.load(file_path)
except:
chk = _torch.load(file_path, map_location='cpu')
if chk.get('source', 'Unknown').lower() == 'coinstac':
for m in chk['models']:
try:
self.nn[m].module.load_state_dict(chk['models'][m])
except:
self.nn[m].load_state_dict(chk['models'][m])
for m in chk['optimizers']:
try:
self.optimizer[m].module.load_state_dict(chk['optimizers'][m])
except:
self.optimizer[m].load_state_dict(chk['optimizers'][m])
else:
mkey = list(self.nn.keys())[0]
try:
self.nn[mkey].module.load_state_dict(chk)
except:
self.nn[mkey].load_state_dict(chk)
def save_checkpoint(self, file_path, src='coinstac'):
checkpoint = {'source': src}
for k in self.nn:
checkpoint['models'] = {}
try:
checkpoint['models'][k] = self.nn[k].module.state_dict()
except:
checkpoint['models'][k] = self.nn[k].state_dict()
for k in self.optimizer:
checkpoint['optimizers'] = {}
try:
checkpoint['optimizers'][k] = self.optimizer[k].module.state_dict()
except:
checkpoint['optimizers'][k] = self.optimizer[k].state_dict()
_torch.save(checkpoint, file_path)
def evaluation(self, mode='eval', dataset_list=None, save_pred=False, use_padded_sampler=False):
for k in self.nn:
self.nn[k].eval()
eval_avg, eval_metrics = self.new_averages(), self.new_metrics()
eval_loaders = []
for d in dataset_list:
if d and len(d) > 0:
eval_loaders.append(
self.data_handle.get_loader(handle_key=mode, dataset=d, shuffle=False,
use_padded_sampler=use_padded_sampler)
)
def _update_scores(_out, _it, _avg, _metrics):
if _out is None:
_out = {}
_avg.accumulate(_out.get('averages', _it['averages']))
_metrics.accumulate(_out.get('metrics', _it['metrics']))
with _torch.no_grad():
for loader in eval_loaders:
its = []
metrics = self.new_metrics()
avg = self.new_averages()
for i, batch in enumerate(loader, 1):
it = self.iteration(batch)
if save_pred:
if self.cache['load_sparse']:
its.append(it)
else:
_update_scores(self.save_predictions(loader.dataset, it), it, avg, metrics)
else:
_update_scores(None, it, avg, metrics)
if self.cache['verbose'] and len(eval_loaders) <= 1 and lazy_debug(i):
info(
f" Itr:{i}/{len(loader)}, "
f"Averages:{it.get('averages').get()}, Metrics:{it.get('metrics').get()}"
)
if save_pred and self.cache['load_sparse']:
its = self.reduce_iteration(its)
_update_scores(self.save_predictions(loader.dataset, its), its, avg, metrics)
if self.cache['verbose'] and len(eval_loaders) > 1:
info(f" {mode}, {avg.get()}, {metrics.get()}")
eval_metrics.accumulate(metrics)
eval_avg.accumulate(avg)
info(f"{mode} metrics: {eval_avg.get()}, {eval_metrics.get()}", self.cache.get('verbose'))
return eval_avg, eval_metrics
def training_iteration_local(self, i, batch):
r"""
Learning step for one batch.
We decoupled it so that user could implement any complex/multi/alternate training strategies.
"""
it = self.iteration(batch)
it['loss'].backward()
if i % self.cache.get('local_iterations', 1) == 0:
first_optim = list(self.optimizer.keys())[0]
self.optimizer[first_optim].step()
self.optimizer[first_optim].zero_grad()
return it
def init_training_cache(self):
self.cache[Key.TRAIN_LOG] = []
self.cache[Key.VALIDATION_LOG] = []
self.cache['best_val_epoch'] = 0
self.cache.update(best_val_score=0.0 if self.cache['metric_direction'] == 'maximize' else _conf.max_size)
def train_local(self, train_dataset, val_dataset):
out = {}
if not isinstance(val_dataset, list):
val_dataset = [val_dataset]
loader = self.data_handle.get_loader('train', dataset=train_dataset, drop_last=True, shuffle=True)
local_iter = self.cache.get('local_iterations', 1)
tot_iter = len(loader) // local_iter
for ep in range(1, self.cache['epochs'] + 1):
for k in self.nn:
self.nn[k].train()
_metrics, _avg = self.new_metrics(), self.new_averages()
ep_avg, ep_metrics, its = self.new_averages(), self.new_metrics(), []
for i, batch in enumerate(loader, 1):
its.append(self.training_iteration_local(i, batch))
if i % local_iter == 0:
it = self.reduce_iteration(its)
ep_avg.accumulate(it['averages']), ep_metrics.accumulate(it['metrics'])
_avg.accumulate(it['averages']), _metrics.accumulate(it['metrics'])
_i, its = i // local_iter, []
if lazy_debug(_i) or _i == tot_iter:
info(f"Ep:{ep}/{self.cache['epochs']},Itr:{_i}/{tot_iter},{_avg.get()},{_metrics.get()}",
self.cache.get('verbose'))
self.cache[Key.TRAIN_LOG].append([*_avg.get(), *_metrics.get()])
_metrics.reset(), _avg.reset()
self.on_iteration_end(i=_i, ep=ep, it=it)
if val_dataset and ep % self.cache.get('validation_epochs', 1) == 0:
info('--- Validation ---', self.cache.get('verbose'))
val_averages, val_metric = self.evaluation(mode='validation', dataset_list=val_dataset,
use_padded_sampler=True)
self.cache[Key.VALIDATION_LOG].append([*val_averages.get(), *val_metric.get()])
out.update(**self._save_if_better(ep, val_metric))
self._on_epoch_end(ep=ep, ep_averages=ep_avg, ep_metrics=ep_metrics,
val_averages=val_averages, val_metrics=val_metric)
if lazy_debug(ep):
self._save_progress(self.cache, epoch=ep)
if self._stop_early(ep, val_metric, val_averages=val_averages,
epoch_averages=ep_avg, epoch_metrics=ep_metrics):
break
self._save_progress(self.cache, epoch=ep)
_utils.save_cache(self.cache, self.cache['log_dir'])
return out
def iteration(self, batch):
r"""
Left for user to implement one mini-bath iteration:
Example:{
inputs = batch['input'].to(self.device['gpu']).float()
labels = batch['label'].to(self.device['gpu']).long()
out = self.distrib['model'](inputs)
loss = F.cross_entropy(out, labels)
out = F.softmax(out, 1)
_, pred = torch.max(out, 1)
sc = self.new_metrics()
sc.add(pred, labels)
avg = self.new_averages()
avg.add(loss.item(), len(inputs))
return {'loss': loss, 'averages': avg, 'output': out, 'metrics': sc, 'predictions': pred}
}
Note: loss, averages, and metrics are required, whereas others are optional
-we will have to do backward on loss
-we need to keep track of loss
-we need to keep track of metrics
"""
return {}
def single_iteration_for_masking(self, model, batch):
r"""
Left for user to implement one mini-bath iteration:
Example:{
inputs = batch['input'].to(self.device['gpu']).float()
labels = batch['label'].to(self.device['gpu']).long()
out = self.distrib['model'](inputs)
loss = F.cross_entropy(out, labels)
out = F.softmax(out, 1)
_, pred = torch.max(out, 1)
sc = self.new_metrics()
sc.add(pred, labels)
avg = self.new_averages()
avg.add(loss.item(), len(inputs))
return {'loss': loss, 'averages': avg, 'output': out, 'metrics': sc, 'predictions': pred}
}
Note: loss, averages, and metrics are required, whereas others are optional
-we will have to do backward on loss
-we need to keep track of loss
-we need to keep track of metrics
"""
return {}
def save_predictions(self, dataset, its):
pass
def reduce_iteration(self, its):
reduced = {}.fromkeys(its[0].keys(), None)
for key in reduced:
if isinstance(its[0][key], _base_metrics.COINNAverages):
reduced[key] = self.new_averages()
[reduced[key].accumulate(ik[key]) for ik in its]
elif isinstance(its[0][key], _base_metrics.COINNMetrics):
reduced[key] = self.new_metrics()
[reduced[key].accumulate(ik[key]) for ik in its]
else:
def collect(k=key, src=its):
_data = []
is_tensor = isinstance(src[0][k], _torch.Tensor)
is_tensor = is_tensor and not src[0][k].requires_grad and src[0][k].is_leaf
for ik in src:
if is_tensor:
_data.append(ik[k] if len(ik[k].shape) > 0 else ik[k].unsqueeze(0))
else:
_data.append(ik[k])
if is_tensor:
return _torch.cat(_data)
return _data
reduced[key] = collect
return reduced
def _save_if_better(self, epoch, val_metrics):
return {}
def new_metrics(self):
return _base_metrics.COINNMetrics()
def new_averages(self):
return _base_metrics.COINNAverages(num_averages=1)
def _on_epoch_end(self, ep, **kw):
r"""
Any logic to run after an epoch ends.
"""
return {}
def on_iteration_end(self, i, ep, it):
r"""
Any logic to run after an iteration ends.
"""
return {}
def _save_progress(self, cache, epoch):
_plot.plot_progress(cache, self.cache['log_dir'], plot_keys=[Key.TRAIN_LOG], epoch=epoch)
_plot.plot_progress(cache, self.cache['log_dir'], plot_keys=[Key.VALIDATION_LOG],
epoch=epoch // self.cache['validation_epochs'])
def _stop_early(self, epoch, val_metrics=None, **kw):
return stop_training_(epoch, self.cache)
| bishalth01/coinstac_sparse_dinunet | coinstac_sparse_dinunet/nn/basetrainer.py | basetrainer.py | py | 20,152 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "coinstac_sparse_dinunet.utils.FrozenDict",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "coinstac_sparse_dinunet.utils",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "coinstac_sparse_dinunet.utils.FrozenDict",
"line_number": 30,
"usa... |
5264072017 | #! /usr/bin/env python
# coding:utf-8
# Author qingniao
from logging.handlers import RotatingFileHandler
from logging import getLogger, ERROR
from os.path import join, isdir
from sys import stdout
from os import mkdir
def log_out(conf):
"""set log output
set log file and level
:return: None
"""
try:
if not conf.log:
return False
Log = getLogger()
if isinstance(conf.log_file, str):
if not isdir(conf.log_path):
mkdir(conf.log_path)
path = join(conf.log_path, conf.log_file)
f = RotatingFileHandler(path, mode='a+', maxBytes=conf.log_maxsize)
Log.addHandler(f)
else:
Log.addHandler(conf.log_output)
if conf.log_debug:
Log.addHandler(stdout)
if conf.log_level:
Log.setLevel(conf.log_level)
else:
Log.setLevel(ERROR)
except IOError as e:
Log = getLogger()
Log.error('Log file error, No such file or directory')
| XorgX304/xss_fuzz | lib/log.py | log.py | py | 1,034 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_num... |
38169728179 | import requests
rhost = "http://localhost:"
red = "\033[0;31m"
green = "\033[0;32m"
blue = "\033[0;34m"
nc = "\033[0m"
def getRequest(port, uri, expected):
print(red + "GET " + rhost + str(port) + uri + nc)
print(green + "Expected: " + expected + nc)
r = requests.get(rhost + str(port) + uri)
print(blue + "I received:" + nc)
print("Status Code: " + str(r.status_code))
for h in r.headers:
print(h + ":" + r.headers[h])
print()
print(r.text)
def classic(port):
pages = [
["/index.html", "Content of index.html"],
["/noperm.html", "Error"],
["/", "Autoindex"],
["", "Autoindex"],
["/dir/indexDir.html", "Content of indexDir.html"],
["/ipointtodir/indexDir.html", "Content of indexDir.html"],
["/dir/", "autoindex of dir"],
["/dir/subdir/emptyFile.html", "No Body - No Crash - No Infinite Loop"],
["/text.txt", "Content of text.txt"],
["/idonotexist", "Error"]
]
for page in pages:
getRequest(port, page[0], page[1])
input("Press Enter: ")
| acoudert/webserv_tester | test/get/classic.py | classic.py | py | 1,135 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
70887149155 | from __future__ import absolute_import, division, unicode_literals, print_function, nested_scopes
import logging
import os
import subprocess
from sshutil import conn
from sshutil.cache import _setup_travis
__author__ = 'Christian Hopps'
__version__ = '1.0'
__docformat__ = "restructuredtext en"
logger = logging.getLogger(__name__)
def setup_module(_):
_setup_travis()
class CalledProcessError(subprocess.CalledProcessError):
def __init__(self, code, command, output=None, error=None):
try:
super(CalledProcessError, self).__init__(code, command, output, error)
except TypeError:
super(CalledProcessError, self).__init__(code, command, output)
self.stderr = error
self.args = [code, command, output, error]
def read_to_eof(recvmethod):
buf = recvmethod(conn.MAXSSHBUF)
while buf:
yield buf
buf = recvmethod(conn.MAXSSHBUF)
def terminal_size():
import fcntl
import termios
import struct
h, w, unused, unused = struct.unpack('HHHH',
fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0)))
return w, h
def shell_escape_single_quote(command):
"""Escape single quotes for use in a shell single quoted string
Explanation:
(1) End first quotation which uses single quotes.
(2) Start second quotation, using double-quotes.
(3) Quoted character.
(4) End second quotation, using double-quotes.
(5) Start third quotation, using single quotes.
If you do not place any whitespaces between (1) and (2), or between
(4) and (5), the shell will interpret that string as a one long word
"""
return command.replace("'", "'\"'\"'")
class SSHCommand(conn.SSHConnection):
def __init__(self,
command,
host,
port=22,
username=None,
password=None,
debug=False,
cache=None,
proxycmd=None):
"""An command to execute over an ssh connection.
:param command: The shell command to execute.
:param host: The host to execute the command on.
:param port: The ssh port to use.
:param username: The username to authenticate with if `None` getpass.get_user() is used.
:param password: The password or public key to authenticate with.
If `None` given will also try using an SSH agent.
:type password: str or ssh.PKey
:param debug: True to enable debug level logging.
:param cache: A connection cache to use.
:type cache: SSHConnectionCache
:param proxycmd: Proxy command to use when making the ssh connection.
"""
self.command = command
self.exit_code = None
self.output = ""
self.debug = debug
self.error_output = ""
super(SSHCommand, self).__init__(host, port, username, password, debug, cache, proxycmd)
def _get_pty(self):
width, height = terminal_size()
# try:
# width, height = terminal_size()
# except IOError:
# # probably not running from a terminal.
# width, height = 80, 24
# os.environ['TERM'] = "vt100"
return self.chan.get_pty(term=os.environ['TERM'], width=width, height=height)
def run_status_stderr(self):
"""Run the command returning exit code, stdout and stderr.
:return: (returncode, stdout, stderr)
>>> status, output, error = SSHCommand("ls -d /etc", "localhost").run_status_stderr()
>>> status
0
>>> print(output, end="")
/etc
>>> print(error, end="")
>>> status, output, error = SSHCommand("grep foobar doesnt-exist", "localhost").run_status_stderr()
>>> status
2
>>> print(output, end="")
>>>
>>> print(error, end="")
grep: doesnt-exist: No such file or directory
"""
if self.debug:
logger.debug("RUNNING: %s", str(self.command))
try:
if isinstance(self, SSHPTYCommand):
self._get_pty()
self.chan.exec_command(self.command)
self.exit_code = self.chan.recv_exit_status()
self.output = "".join([x.decode('utf-8') for x in read_to_eof(self.chan.recv)])
self.error_output = "".join(
[x.decode('utf-8') for x in read_to_eof(self.chan.recv_stderr)])
if self.debug:
logger.debug("RESULT: exit: %s stdout: '%s' stderr: '%s'", str(self.exit_code),
str(self.output), str(self.error_output))
return (self.exit_code, self.output, self.error_output)
finally:
self.close()
def run_stderr(self):
"""
Run a command, return stdout and stderr,
:return: (stdout, stderr)
:raises: CalledProcessError
>>> cmd = SSHCommand("ls -d /etc", "localhost")
>>> output, error = cmd.run_stderr()
>>> print(output, end="")
/etc
>>> print(error, end="")
>>> cmd = SSHCommand("grep foobar doesnt-exist", "localhost")
>>> cmd.run_stderr() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
CalledProcessError: Command 'grep foobar doesnt-exist' returned non-zero exit status 2
"""
status, unused, unused = self.run_status_stderr()
if status != 0:
raise CalledProcessError(self.exit_code, self.command, self.output, self.error_output)
return self.output, self.error_output
def run_status(self):
"""
Run a command, return exitcode and stdout.
:return: (status, stdout)
>>> status, output = SSHCommand("ls -d /etc", "localhost").run_status()
>>> status
0
>>> print(output, end="")
/etc
>>> status, output = SSHCommand("grep foobar doesnt-exist", "localhost").run_status()
>>> status
2
>>> print(output, end="")
"""
return self.run_status_stderr()[0:2]
def run(self):
"""
Run a command, return stdout.
:return: stdout
:raises: CalledProcessError
>>> cmd = SSHCommand("ls -d /etc", "localhost")
>>> print(cmd.run(), end="")
/etc
>>> cmd = SSHCommand("grep foobar doesnt-exist", "localhost")
>>> cmd.run() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
CalledProcessError: Command 'grep foobar doesnt-exist' returned non-zero exit status 2
"""
return self.run_stderr()[0]
class SSHPTYCommand(SSHCommand):
"""Instances of this class also obtain a PTY prior to executing the command"""
class ShellCommand(object):
def __init__(self, command, debug=False):
self.command_list = ["/bin/sh", "-c", command]
self.debug = debug
self.exit_code = None
self.output = ""
self.error_output = ""
def run_status_stderr(self):
"""
Run a command over an ssh channel, return exit code, stdout and stderr.
>>> cmd = ShellCommand("ls -d /etc")
>>> status, output, error = cmd.run_status_stderr()
>>> status
0
>>> print(output, end="")
/etc
>>> print(error, end="")
"""
"""
>>> status, output, error = ShellCommand("grep foobar doesnt-exist").run_status_stderr()
>>> status
2
>>> print(output, end="")
>>>
>>> print(error, end="")
grep: doesnt-exist: No such file or directory
"""
try:
if self.debug:
logger.debug("RUNNING: %s", str(self.command_list))
pipe = subprocess.Popen(
self.command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
output, error_output = pipe.communicate()
self.output = output.decode('utf-8')
self.error_output = error_output.decode('utf-8')
self.exit_code = pipe.returncode
except OSError as error:
logger.debug("RESULT: OSError: %s stdout: '%s' stderr: '%s'", str(error),
str(self.output), str(self.error_output))
self.exit_code = 1
else:
if self.debug:
logger.debug("RESULT: exit: %s stdout: '%s' stderr: '%s'", str(self.exit_code),
str(self.output), str(self.error_output))
return (self.exit_code, self.output, self.error_output)
def run_stderr(self):
"""
Run a command over an ssh channel, return stdout and stderr,
Raise CalledProcessError on failure
>>> cmd = ShellCommand("ls -d /etc")
>>> output, error = cmd.run_stderr()
>>> print(output, end="")
/etc
>>> print(error, end="")
>>> cmd = ShellCommand("grep foobar doesnt-exist")
>>> cmd.run_stderr() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
CalledProcessError: Command 'grep foobar doesnt-exist' returned non-zero exit status 2
"""
status, unused, unused = self.run_status_stderr()
if status != 0:
raise CalledProcessError(self.exit_code, self.command_list, self.output,
self.error_output)
return self.output, self.error_output
def run_status(self):
"""
Run a command over an ssh channel, return exitcode and stdout.
>>> status, output = ShellCommand("ls -d /etc").run_status()
>>> status
0
>>> print(output, end="")
/etc
>>> status, output = ShellCommand("grep foobar doesnt-exist").run_status()
>>> status
2
>>> print(output, end="")
"""
return self.run_status_stderr()[0:2]
def run(self):
"""
Run a command over an ssh channel, return stdout.
Raise CalledProcessError on failure.
>>> cmd = ShellCommand("ls -d /etc", False)
>>> print(cmd.run(), end="")
/etc
>>> cmd = ShellCommand("grep foobar doesnt-exist", False)
>>> cmd.run() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
CalledProcessError: Command 'grep foobar doesnt-exist' returned non-zero exit status 2
"""
return self.run_stderr()[0]
if __name__ == "__main__":
import time
import gc
cmd = SSHCommand("ls -d /etc", "localhost", debug=True)
print(cmd.run())
gc.collect()
print(SSHCommand("ls -d /etc", "localhost", debug=True).run())
gc.collect()
print("Going to sleep for 2")
time.sleep(2)
gc.collect()
print("Waking up")
print(SSHCommand("ls -d /etc", "localhost", debug=True).run())
gc.collect()
print("Exiting")
| choppsv1/pysshutil | sshutil/cmd.py | cmd.py | py | 11,231 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sshutil.cache._setup_travis",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 19,
"usage_type": "attribute"
},
{
"... |
29160921048 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 19 17:15:14 2019
@author: KUSHAL
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sg
# Harris Corner Detector
class HarrisCornerDetector:
def InputImage(self, img, thres = 117):
"""Takes input image for the class"""
self.img = img
self.Thres = thres
def Gradients(self):
"""
Takes: image
Returns: gradients in X and Y direction
Ix = dI/dx(u,v)
Iy = dI/dy(u,v)
"""
self.img = np.float64(self.img)
Ix = cv2.Sobel(self.img/ 255, cv2.CV_64F, 1, 0, ksize=3)
Iy = cv2.Sobel(self.img/ 255, cv2.CV_64F, 0, 1, ksize=3)
return Ix, Iy
def StructureMatrix(self, Ix, Iy):
"""
Takes: Gradients in X and Y direction
Returns: M = [Ixx, Ixy
Iyx, Iyy]
"""
Ixx = np.multiply(Ix, Ix)
Ixy = np.multiply(Ix, Iy)
Iyx = np.multiply(Iy, Ix)
Iyy = np.multiply(Iy, Iy)
M = [[Ixx, Ixy], [Iyx, Iyy]]
return M
def Gaussian(self, image_matrix, sigma):
"""
Takes: A matrix of images
Returns: Gaussian filtered results of all images in matrix
"""
gauss_matrix = []
for image_list in image_matrix:
gauss_list = []
for image in image_list:
gauss_image = cv2.GaussianBlur(image, (5, 5), sigma , sigma)
gauss_list.append(gauss_image)
gauss_matrix.append(gauss_list)
return gauss_matrix
def CorenerStrength(self, gauss_matrix):
"""
Takes: Gaussian filtered Structure matrix
Returns: Corner strngth image
"""
det_M = np.multiply(gauss_matrix[0][0], gauss_matrix[1][1]) - np.multiply(gauss_matrix[0][1], gauss_matrix[1][0])
trace = gauss_matrix[0][0] + gauss_matrix[1][1]
alpha_trace = 0.06 * np.multiply(trace, trace)
Q_uv = det_M - alpha_trace
return Q_uv
def ReturnCorners(self, Q_uv, image):
"""
Takes: Corner strength function
Returns: Locations of corners
"""
corner1 = (Q_uv.copy() - np.min(Q_uv)) * 255 / (np.max(Q_uv) - np.min(Q_uv))
corner = corner1.copy()
corner1[corner1 > self.Thres] = 255
corner1[corner1 <= self.Thres] = 0
corner = corner * corner1
GREEN = (0, 255, 0)
corners = []
img = image.copy()
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for x in range(2, image.shape[1] - 2):
for y in range(2, image.shape[0] - 2):
window = corner[y-2:y+3, x-2:x+3]
if corner[y,x] != np.max(np.max(window)):
corner[y,x] = 0
for x in range(0, image.shape[1]):
for y in range(0, image.shape[0]):
if corner[y,x] !=0:
cv2.circle(img, (x, y), 3, GREEN)
img[y,x] = np.array([0, 255, 0])
corners.append((y,x))
corners = np.asarray(corners)
return img, corners
def Detect(self):
"""
Detects corners and saves the highlighted images as ab.png
"""
Ix, Iy = self.Gradients()
M = self.StructureMatrix(Ix, Iy)
M_bar = self.Gaussian(M, 3)
Q_uv = self.CorenerStrength(M_bar)
Result, Corners = self.ReturnCorners(Q_uv, img)
Result = np.uint8(Result)
cv2.imwrite('CornerDetection.png', Result)
return Result, Corners
if __name__ == "__main__":
img = cv2.imread(r"C:\Users\Kushal Patel\Desktop\Courses\Computer Vision\Homework 2\hotelImages\hotel.seq0.png", cv2.IMREAD_GRAYSCALE)
HD = HarrisCornerDetector()
HD.InputImage(img, thres = 89)
Result, Corners = HD.Detect()
#print(Corners)
w = 7
image_list = []
for image_ind in range(50):
image_list.append(cv2.imread(r"C:\Users\Kushal Patel\Desktop\Courses\Computer Vision\Homework 2\hotelImages\hotel.seq"+str(image_ind) + ".png", cv2.IMREAD_GRAYSCALE))
image_list = np.asarray(image_list)
del(image_ind)
Corners1 = np.array([])
for i in range(Corners.shape[0]):
x_i = Corners[i, 0]
y_i = Corners[i, 1]
if x_i - w >= 0 and y_i - w >= 0 and x_i +w <= img.shape[0] and y_i+w <= img.shape[1]:
Corners1 = np.append(Corners1, Corners[i])
Corners1 = Corners1.reshape(int(Corners1.shape[0] / 2), 2)
def interpolated_window(image, pixel_loction, window_size):
window = np.zeros((window_size, window_size))
for i in range(window.shape[0]):
window[i] = window[i] + np.arange(window_size) - int(window_size/2)
x_add = window
y_add = np.flip(window.T)
interpol_x = pixel_loction[0] + x_add
interpol_y = pixel_loction[1] + y_add
x1 = np.floor(interpol_x)
x1 = x1.astype(int)
y1 = np.floor(interpol_y)
y1 = y1.astype(int)
x2 = np.ceil(interpol_x)
x2 = x2.astype(int)
y2 = np.ceil(interpol_y)
y2 = y2.astype(int)
x = pixel_loction[0]
y = pixel_loction[1]
for i in range(x1.shape[0]):
for j in range(x1.shape[1]):
if x1[i][j] != x2[i][j] and y1[i][j] != y2[i][j]:
if x1[i][j] < 480 and x2[i][j] < 480 and y1[i][j] < 512 and y2[i][j] < 512:
window[i][j] = (image[x1[i][j]][y1[i][j]] * (x2[i][j] - x) * (y2[i][j] - y) +
image[x2[i][j]][y1[i][j]] * (x - x1[i][j]) * (y2[i][j] - y) +
image[x1[i][j]][y2[i][j]] * (x2[i][j] - x) * (y - y1[i][j]) +
image[x2[i][j]][y2[i][j]] * (x - x1[i][j]) * (y - y1[i][j])
) / ((x2[i][j] - x1[i][j]) * (y2[i][j] - y1[i][j]))
else:
if x1[i][j] < 480 and x2[i][j] < 480 and y1[i][j] < 512 and y2[i][j] < 512:
window[i][j] = image[x1[i][j]][y1[i][j]]
return window
def Tracker(I1g, I2g, window_size, tau=1):
kernel_x = np.array([[-1., 1.], [-1., 1.]])
kernel_y = np.array([[-1., -1.], [1., 1.]])
kernel_t = np.array([[1., 1.], [1., 1.]])#*.25
I1g = I1g / 255.
I2g = I2g / 255.
mode = 'same'
fx = sg.convolve2d(I1g, kernel_x, boundary='symm', mode=mode)
fy = sg.convolve2d(I1g, kernel_y, boundary='symm', mode=mode)
ft = sg.convolve2d(I2g, kernel_t, boundary='symm', mode=mode) + sg.convolve2d(I1g, -kernel_t, boundary='symm', mode=mode)
u = np.zeros(Corners1.shape[0])
v = np.zeros(Corners1.shape[0])
for k in range(Corners1.shape[0]):
i = Corners1[k,0]
j = Corners1[k,1]
Ix = interpolated_window(fx, (i,j), window_size).flatten()
Iy = interpolated_window(fy, (i,j), window_size).flatten()
It = interpolated_window(ft, (i,j), window_size).flatten()
b = np.reshape(It, (It.shape[0],1))
A = np.vstack((Ix, Iy)).T # get A here
if np.min(abs(np.linalg.eigvals(np.matmul(A.T, A)))) >= tau:
nu = np.matmul(np.linalg.pinv(A), b) # get velocity here
u[k]=nu[1]
v[k]=nu[0]
return np.asarray((u,v)).T
u_v = Tracker(image_list[0], image_list[1], 15, tau=1e-5)
x_y_corners = np.zeros((len(image_list), Corners1.shape[0], 2))
x_y_corners[0, :, :] = Corners1
x_y_second = x_y_corners[0, :, :] + u_v
for i in range(1, len(image_list)):
x_y_corners[i, :, :] = x_y_corners[i-1, :, :] - u_v
u_v = Tracker(image_list[i], image_list[i-1], 15, tau= 1e-5)
img_res = Result.copy()
IMG = cv2.cvtColor(image_list[49] , cv2.COLOR_GRAY2BGR)
for k in range(20):
j = np.random.randint(0, 693)
for i in range(x_y_corners.shape[0]):
x = x_y_corners[i][j][0]
y = x_y_corners[i][j][1]
if x < 480 and y < 512:
img_res[int(x), int(y)][2] = 255
IMG[int(x), int(y)][2] = 255
cv2.circle(IMG, (int(y), int(x)), 1, (0, 0, 255))
cv2.circle(img_res, (int(y), int(x)), 1, (0, 0, 255))
cv2.imwrite("Motion_Tracking1.png", img_res)
cv2.imwrite("Test1.png", IMG)
Image_second = Result.copy()
#Image_second = cv2.cvtColor(Image_second , cv2.COLOR_GRAY2BGR)
for i in range(x_y_second.shape[0]):
x = x_y_second[i][0]
y = x_y_second[i][1]
cv2.circle(Image_second, (int(y), int(x)), 1, (0, 0, 255))
cv2.imwrite("Motion_Tracking_second1.png", Image_second) | kushal123478/Motion-Tracking | MotionTracking.py | MotionTracking.py | py | 9,112 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.float64",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.Sobel",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.CV_64F",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "cv2.Sobel",
"line_number"... |
16452225862 | import torch
import unittest
import sys
import torch_testing as tt
sys.path.append("..")
from layer_sim.networks import *
from layer_sim import datasets
class TestCNNLeNet(unittest.TestCase):
def setUp(self):
self.lenet = LeNet5(10)
self.x = torch.rand((2,1,28,28))
def test_output_comp(self):
self.assertEqual(self.lenet.classifier[-1].out_features, 10)
def test_forward(self):
x = self.lenet(self.x)
self.assertEqual(list(x.shape), [2,10])
def test_hooks(self):
self.lenet.eval()
x, out = self.lenet._hook_layers(self.lenet.features, self.x, layer_types_to_hook=(nn.ReLU, nn.AvgPool2d))
self.assertEqual(len(out), 4)
self.assertEqual(list(x.shape), [2, 16, 4, 4])
x, out = self.lenet._hook_layers(self.lenet.flat, x, layer_types_to_hook=(nn.ReLU, nn.AvgPool2d))
self.assertEqual(len(out), 0)
self.assertEqual(list(x.shape), [2, 256])
x, out = self.lenet._hook_layers(self.lenet.classifier, x, layer_types_to_hook=(nn.ReLU, nn.AvgPool2d))
self.assertEqual(len(out), 2)
self.assertEqual(list(x.shape), [2, 10])
def test_forward_with_hooks(self):
self.lenet.eval()
out = self.lenet.forward_with_hooks(self.x, layer_types_to_hook=(nn.ReLU, nn.AvgPool2d))
X = self.lenet(self.x)
self.assertEqual(len(out), 7)
tt.assert_almost_equal(X, out[-1])
def test_representations(self):
self.lenet.eval()
loader, _ = datasets.MNIST("../data", 128, train=False, num_workers=4)
rep = self.lenet.extract_network_representation(loader, limit_datapoints=500, device="cpu", layer_types_to_hook=(nn.ReLU, nn.AvgPool2d))
self.assertEqual(len(rep), 7)
for r in rep:
self.assertEqual(r.size(0), 500)
# testing some specific dims
self.assertEqual(list(rep[0].shape), [500, 6, 24, 24])
self.assertEqual(list(rep[3].shape), [500, 16, 4, 4])
self.assertEqual(list(rep[4].shape), [500, 120])
self.assertEqual(list(rep[5].shape), [500, 84])
class TestVGGSVHN(unittest.TestCase):
def setUp(self):
self.net = VGG_SVHN(5)
self.x = torch.rand((5, 3, 32, 32))
class PhonyDataset(torch.utils.data.Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index], 0
def __len__(self):
return len(self.data)
# create random dataset
dataset = PhonyDataset(torch.rand((100,3,32,32)))
self.loader = torch.utils.data.DataLoader(dataset, batch_size=10)
def test_forward(self):
x = self.net.forward(self.x)
self.assertEqual(list(x.shape), [5, 5])
def test_forward_with_hooks(self):
self.net.eval()
out = self.net.forward_with_hooks(self.x)
self.assertEqual(len(out), 12)
x = self.net(self.x)
tt.assert_almost_equal(x, out[-1])
@unittest.skip("data download too heavy")
def test_representations(self):
rep = self.net.extract_network_representation(self.loader, limit_datapoints=25, device="cpu")
self.assertEqual(len(rep), 12)
for r in rep:
self.assertEqual(r.size(0), 25)
def test_failure_pts(self):
self.assertRaises(RuntimeError, self.net.extract_network_representation, self.loader, limit_datapoints=500)
| marcozullich/pruned_layer_similarity | tests/test_networks.py | test_networks.py | py | 3,634 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.rand",
"... |
588989209 | import matplotlib.pyplot as plt
import numpy as np
def dft(samples: np.ndarray, bins: int) -> np.ndarray:
freq_components = np.zeros(bins, dtype=complex)
for freq_bin in range(bins):
freq_weight = 0
freq = freq_bin / bins
for sample_idx in range(samples.size):
angle = 2 * np.pi * freq * sample_idx
freq_weight += samples[sample_idx] * np.exp(-1j * angle)
freq_components[freq_bin] = freq_weight
return freq_components
if __name__ == "__main__":
sample_rate = 10 # samples per unit time
time = 10 # units of time
t = np.arange(sample_rate * time)
sine1 = 4 * np.sin(2 * np.pi * 0.10 * t)
sine2 = np.sin(2 * np.pi * 0.24 * t)
samples = sine1 + sine2
plt.figure(0)
plt.title("Samples")
plt.plot(t, samples)
frequency_components = dft(samples, sample_rate * time)
plt.figure(1)
plt.title("DFT")
plt.plot(np.arange(0, sample_rate, sample_rate/100), np.abs(frequency_components))
plt.figure(2)
plt.title("NumPy FFT")
frequency_components_np_fft = np.fft.fftshift(np.fft.fft(samples))
plt.plot(np.arange(sample_rate/-2, sample_rate/2, sample_rate/100), np.abs(frequency_components_np_fft))
plt.show()
| lenka98/digital_signal_processing | discrete_fourier_transform.py | discrete_fourier_transform.py | py | 1,241 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.ndarray",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line_numb... |
21767862234 | import glob
import json
import os
import unittest
from pytext.builtin_task import register_builtin_tasks
from pytext.config import LATEST_VERSION, pytext_config_from_json
from pytext.config.config_adapter import ADAPTERS, upgrade_one_version
from pytext.utils.file_io import PathManager
register_builtin_tasks()
class ConfigAdapterTest(unittest.TestCase):
def test_has_all_adapters(self):
self.assertEqual(
LATEST_VERSION,
max(ADAPTERS.keys()) + 1,
f"Missing adapter for LATEST_VERSION",
)
for i, v in enumerate(sorted(ADAPTERS.keys())):
self.assertEqual(i, v, f"Missing adapter for version {i}")
def test_upgrade_one_version(self):
# Always show the full diff, easier to debug when getting a failed log
self.maxDiff = None
for p in glob.iglob(
os.path.join(os.path.dirname(__file__), "json_config/*.json")
):
print("Trying to upgrade file:" + p)
with PathManager.open(p) as f:
test_data = json.load(f)
for test_case in test_data:
old_config = test_case["original"]
new_config = upgrade_one_version(old_config)
self.assertEqual(new_config, test_case["adapted"])
# ensure every historical config can be upgrade to latest
def test_upgrade_to_latest(self):
for p in glob.iglob(
os.path.join(os.path.dirname(__file__), "json_config/*.json")
):
print("Trying to upgrade file:" + p)
with PathManager.open(p) as f:
test_data = json.load(f)
for test_case in test_data:
# make sure the config can be instantiated, don't need return value
pytext_config_from_json(test_case["original"])
| amohamedwa/pytext | pytext/config/test/config_adapter_test.py | config_adapter_test.py | py | 1,855 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pytext.builtin_task.register_builtin_tasks",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pytext.config.LATEST_VERSION",
"line_number": 18,
"usage_type": "argume... |
31636778041 | # Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
# Modifications to create of the HMMLearn module: Gael Varoquaux
# More API changes: Sergei Lebedev <superbobry@gmail.com>
# FSHMM code: Elizabeth Fons <elifons@gmail.com>
# Alejandro Sztrajman <asztrajman@gmail.com>
# FSHMM algorithm from:
# Adams, Stephen & Beling, Peter & Cogill, Randy. (2016).
# Feature Selection for Hidden Markov Models and Hidden Semi-Markov Models.
"""
The :mod:`hmmlearn.hmm` module implements hidden Markov models.
"""
import numpy as np
import pandas as pd
from sklearn import cluster
from sklearn.mixture import (
GMM, sample_gaussian,
log_multivariate_normal_density,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from scipy.misc import logsumexp
from sklearn.base import BaseEstimator, _pprint
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import check_is_fitted
from base import _BaseHMM
from hmmlearn.utils import normalize, iter_from_X_lengths, normalize
from base import ConvergenceMonitor
__all__ = ["GMMHMM", "GaussianHMM", "MultinomialHMM", "GaussianFSHHM"]
COVARIANCE_TYPES = frozenset(("spherical", "diag", "full", "tied"))
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions.
Parameters
----------
n_components : int
Number of states.
covariance_type : string
String describing the type of covariance parameters to
use. Must be one of
* "spherical" --- each state uses a single variance value that
applies to all features;
* "diag" --- each state uses a diagonal covariance matrix;
* "full" --- each state uses a full (i.e. unrestricted)
covariance matrix;
* "tied" --- all states use **the same** full covariance matrix.
Defaults to "diag".
min_covar : float
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
startprob_prior : array, shape (n_components, )
Initial state occupation prior distribution.
transmat_prior : array, shape (n_components, n_components)
Matrix of prior transition probabilities between states.
algorithm : string
Decoder algorithm. Must be one of "viterbi" or "map".
Defaults to "viterbi".
random_state: RandomState or an int seed
A random number generator instance.
n_iter : int, optional
Maximum number of iterations to perform.
tol : float, optional
Convergence threshold. EM will stop if the gain in log-likelihood
is below this value.
verbose : bool, optional
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means and 'c' for covars. Defaults
to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means and 'c' for covars.
Defaults to all parameters.
Attributes
----------
n_features : int
Dimensionality of the Gaussian emissions.
monitor\_ : ConvergenceMonitor
Monitor object used to check the convergence of EM.
transmat\_ : array, shape (n_components, n_components)
Matrix of transition probabilities between states.
startprob\_ : array, shape (n_components, )
Initial state occupation distribution.
means\_ : array, shape (n_components, n_features)
Mean parameters for each state.
covars\_ : array
Covariance parameters for each state.
The shape depends on ``covariance_type``::
(n_components, ) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
Examples
--------
>>> from hmmlearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
"""
def __init__(self, n_components=1, covariance_type='diag',
min_covar=1e-3,
startprob_prior=1.0, transmat_prior=1.0,
means_prior=0, means_weight=0,
covars_prior=1e-2, covars_weight=1,
algorithm="viterbi", random_state=None,
n_iter=400, tol=1e-9, verbose=False,
params="stmc", init_params="stmc"):
_BaseHMM.__init__(self, n_components,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
tol=tol, params=params, verbose=verbose,
init_params=init_params)
self.covariance_type = covariance_type
self.min_covar = min_covar
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
def _get_covars(self):
"""Return covars as a full matrix."""
if self.covariance_type == 'full':
return self._covars_
elif self.covariance_type == 'diag':
return np.array([np.diag(cov) for cov in self._covars_])
elif self.covariance_type == 'tied':
return np.array([self._covars_] * self.n_components)
elif self.covariance_type == 'spherical':
return np.array(
[np.eye(self.n_features) * cov for cov in self._covars_])
def _set_covars(self, covars):
self._covars_ = np.asarray(covars).copy()
covars_ = property(_get_covars, _set_covars)
def _check(self):
super(GaussianHMM, self)._check()
self.means_ = np.asarray(self.means_)
self.n_features = self.means_.shape[1]
if self.covariance_type not in COVARIANCE_TYPES:
raise ValueError('covariance_type must be one of {0}'
.format(COVARIANCE_TYPES))
_validate_covars(self._covars_, self.covariance_type,
self.n_components)
def _init(self, X, lengths=None):
super(GaussianHMM, self)._init(X, lengths=lengths)
_, n_features = X.shape
if hasattr(self, 'n_features') and self.n_features != n_features:
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (n_features, self.n_features))
self.n_features = n_features
if 'm' in self.init_params or not hasattr(self, "means_"): #paper: mu initialized randomly
kmeans = cluster.KMeans(n_clusters=self.n_components,
random_state=self.random_state)
kmeans.fit(X)
self.means_ = kmeans.cluster_centers_
if 'c' in self.init_params or not hasattr(self, "covars_"): #paper: sigma initialized with 4
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components).copy()
self._covars_ = np.ones(self._covars_.shape)*4.0 #villavilla
def _compute_log_likelihood(self, X):
return log_multivariate_normal_density(
X, self.means_, self._covars_, self.covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self.covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self.means_[state], cv, self.covariance_type,
random_state=random_state)
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
if self.covariance_type in ('tied', 'full'):
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
# print(stats)
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice)
if 'm' in self.params or 'c' in self.params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in self.params:
if self.covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self.covariance_type in ('tied', 'full'):
# posteriors: (nt, nc); obs: (nt, nf); obs: (nt, nf)
# -> (nc, nf, nf)
stats['obs*obs.T'] += np.einsum(
'ij,ik,il->jkl', posteriors, obs, obs)
def _do_mstep(self, stats):
super(GaussianHMM, self)._do_mstep(stats)
means_prior = self.means_prior
means_weight = self.means_weight
# TODO: find a proper reference for estimates for different
# covariance models.
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in self.params:
self.means_ = ((means_weight * means_prior + stats['obs'])
/ (means_weight + denom))
if 'c' in self.params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
meandiff = self.means_ - means_prior
if self.covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * meandiff**2
+ stats['obs**2']
- 2 * self.means_ * stats['obs']
+ self.means_**2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = \
(covars_prior + cv_num) / np.maximum(cv_den, 1e-5)
if self.covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self.covariance_type in ('tied', 'full'):
cv_num = np.empty((self.n_components, self.n_features,
self.n_features))
for c in range(self.n_components):
obsmean = np.outer(stats['obs'][c], self.means_[c])
cv_num[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self.means_[c], self.means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self.covariance_type == 'tied':
self._covars_ = ((covars_prior + cv_num.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self.covariance_type == 'full':
self._covars_ = ((covars_prior + cv_num) /
(cvweight + stats['post'][:, None, None]))
#init values
#self.startprob_ -> pi_i (i:estado)
#self.transmat_ -> a_ij (i,j:estado)
#self.means_ -> mu_il (i:estado, l:feature/serie)
#self._covars_ -> sigma2_il (i:estado, l:feature/serie)
#self.rho_ -> rho_l (l:feature/serie)
#self.epsilon_ -> epsilon_l (l:feature/serie)
#self.tau_ -> tau_l (l:feature/serie)
#self.n_features = X.shape[1], con dimension L
#self.n_components -> number of states
#FIXME: revisar que cuando le asigné tau o std() a una variable esté bien eso, y que no tendría que haberle asignado _covars_ o algo distinto.
#FIXME: revisar que las cuentas finales con np.dot(np.sum()) y np.sum(np.sum()) hagan lo que tienen que hacer.
def gaussiana(x, mu, sigma2):
return (1.0/np.sqrt(2*np.pi*sigma2))*np.exp(-0.5*((x - mu)**2)/sigma2)
class GaussianFSHMM(GaussianHMM):
def __init__(self, k, **kwargs):
super(GaussianFSHMM, self).__init__(**kwargs)
self.k_factor_ = k
def init_values_FS(self, X, epsilon=None, tau=None, rho=None): #default initialization is for the first example of the paper
self.rho_ = np.ones(self.n_features)*0.5 if (rho is None) else rho
self.epsilon_ = pd.DataFrame(X).mean().values if (epsilon is None) else epsilon
self.tau_ = pd.DataFrame(X).std().values if (tau is None) else tau
def select_hyperparams(self, X): #pass hyperparameters as arguments of this function
self.p_ = np.ones(self.n_components)*2
self.a_ = np.ones((self.n_components, self.n_components))*2
self.b_ = pd.DataFrame(X).mean().values # self.b_ = X.mean().values
self.m_ = self.means_.copy()
for l in range(self.n_features):
self.m_[0, l] = self.b_[l] - self.tau_[l]
self.m_[1, l] = self.b_[l] + self.tau_[l]
self.s_ = np.ones(self.m_.shape)*0.5
self.zeta_ = np.ones(self.m_.shape)*0.5
self.eta_ = np.ones(self.m_.shape)*0.5
self.c_ = np.ones(self.n_features)*1.0
self.nu_ = np.ones(self.n_features)*0.5
self.psi_ = np.ones(self.n_features)*0.5
self.k_ = np.ones(self.n_features)*self.k_factor_
def compute_FS_ESTEP(self, X, gamma):
I = self.n_components
L = self.n_features
T = X.shape[0]
self.uilt = np.zeros((I, L, T))
self.vilt = np.zeros((I, L, T))
for i in range(I):
for l in range(L):
for t in range(T):
eilt = self.rho_[l]*gaussiana(X[t, l], self.means_[i, l], self._covars_[i, l])
hilt = (1.0-self.rho_[l])*gaussiana(X[t, l], self.epsilon_[l], self.tau_[l])
gilt = eilt + hilt
uilt = gamma[t, i]*eilt/gilt #tiene este orden gamma?
vilt = gamma[t, i] - uilt
self.uilt[i, l, t] = uilt
self.vilt[i, l, t] = vilt
def fit(self, X, lengths=None):
"""Estimate model parameters.
An initialization step is performed before entering the
EM algorithm. If you want to avoid this step for a subset of
the parameters, pass proper ``init_params`` keyword argument
to estimator's constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, )
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
self._init(X, lengths=lengths)
self._check()
self.init_values_FS(X)
self.select_hyperparams(X)
self.monitor_ = ConvergenceMonitor(self.tol, self.n_iter, self.verbose)
for iter in range(self.n_iter):
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprob, fwdlattice = self._do_forward_pass(framelogprob)
curr_logprob += logprob
bwdlattice = self._do_backward_pass(framelogprob)
posteriors = self._compute_posteriors(fwdlattice, bwdlattice) #posteriors <- gamma
self._accumulate_sufficient_statistics(
stats, X[i:j], framelogprob, posteriors, fwdlattice,
bwdlattice)
self.compute_FS_ESTEP(X, posteriors)
# XXX must be before convergence check, because otherwise
# there won't be any updates for the case ``n_iter=1``.
self._do_mstep(X, stats)
self.monitor_.report(curr_logprob)
if self.monitor_.converged:
break
return self
def _do_mstep(self, X, stats):
"""Performs the M-step of EM algorithm.
Parameters
----------
stats : dict
Sufficient statistics updated from all available samples.
"""
# The ``np.where`` calls guard against updating forbidden states
# or transitions in e.g. a left-right HMM.
I = self.n_components
L = self.n_features
T = X.shape[0]
if 's' in self.params:
startprob_ = self.startprob_prior - 1.0 + stats['start'] #dimensions? #estos asumimos que estan bien
self.startprob_ = np.where(self.startprob_ == 0.0,
self.startprob_, startprob_)
normalize(self.startprob_)
if 't' in self.params:
transmat_ = self.transmat_prior - 1.0 + stats['trans'] #dimensions? #estos asumimos que estan bien
self.transmat_ = np.where(self.transmat_ == 0.0,
self.transmat_, transmat_)
normalize(self.transmat_, axis=1)
means_prior = self.means_prior
means_weight = self.means_weight
# TODO: find a proper reference for estimates for different
# covariance models.
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in self.params:
for i in range(I):
for l in range(L):
sil2 = self.s_[i,l]**2
sigmail2 = self._covars_[i,l]
term1 = sil2*np.dot(self.uilt[i, l, :], X[:,l])
num = term1 + self.m_[i, l]*sigmail2
den = sil2*np.sum(self.uilt[i, l, :]) + sigmail2
self.means_[i, l] = num/den
#self.means_ = ((means_weight * means_prior + stats['obs'])
# / (means_weight + denom))
if 'c' in self.params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
meandiff = self.means_ - means_prior
if self.covariance_type in ('spherical', 'diag'):
for i in range(I):
for l in range(L):
term1 = np.dot(self.uilt[i, l, :], (X[:,l] - self.means_[i, l])**2)
num = term1 + 2 * self.eta_[i, l]
den = np.sum(self.uilt[i, l, :]) + 2*(self.zeta_[i, l] + 1.0)
self._covars_[i, l] = num/den
for l in range(L):
cl2 = self.c_[l]**2
bl = self.b_[l]
taul2 = self.tau_[l]**2
psil = self.psi_[l]
nul = self.nu_[l]
epsilonl = self.epsilon_[l]
kl = self.k_[l]
hatT = T + 1 + kl
epsilonl_num = cl2*np.dot(np.sum(self.vilt[:, l, :], axis=0), X[:, l]) + taul2*bl #????
epsilonl_den = cl2*np.sum(np.sum(self.vilt[:, l, :], axis=0), axis=-1) + taul2
self.epsilon_[l] = epsilonl_num / epsilonl_den
taul_num = np.dot(np.sum(self.vilt[:, l, :], axis=0), (X[:, l] - epsilonl)**2) + 2*psil #????
taul_den = np.sum(np.sum(self.vilt[:, l, :], axis=0), axis=-1) + 2 * (nul + 1.0)
self.tau_[l] = np.sqrt(taul_num / taul_den)
self.rho_[l] = (hatT - np.sqrt(hatT**2 - 4*kl*np.sum(np.sum(self.uilt[:, l, :], axis=0), axis=-1))) / (2*kl)
| elifons/FeatureSaliencyHMM | hmm.py | hmm.py | py | 20,656 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "base._BaseHMM",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "base._BaseHMM.__init__",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "base._BaseHMM",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "numpy.array",
... |
42389999243 | """
Homework4.
Replace 'pass' by your implementation.
"""
import numpy as np
import helper
# Insert your package here
from sympy import *
from scipy.ndimage.filters import gaussian_filter
from scipy.optimize import leastsq, minimize
'''
Q2.1: Eight Point Algorithm
Input: pts1, Nx2 Matrix
pts2, Nx2 Matrix
M, a scalar parameter computed as max (imwidth, imheight)
Output: F, the fundamental matrix
'''
def eightpoint(pts1, pts2, M):
# Replace pass by your implementation
T = np.eye(3) / M
T[2, 2] = 1;
pts1 = pts1.astype('float')/M
pts2 = pts2.astype('float')/M
A = np.vstack([
pts1[:, 0]*pts2[:, 0],pts1[:, 0]*pts2[:, 1], pts1[:, 0],
pts1[:, 1]*pts2[:, 0],pts1[:, 1]*pts2[:, 1], pts1[:, 1],
pts2[:, 0],pts2[:, 1], np.ones(pts1.shape[0])
]).T
[U, S, V] = np.linalg.svd(A)
F = np.reshape(V[-1,:], (3,3))
F = helper.refineF(F, pts1, pts2)
F = T.T @ F @ T
return F
'''
Q2.2: Seven Point Algorithm
Input: pts1, Nx2 Matrix
pts2, Nx2 Matrix
M, a scalar parameter computed as max (imwidth, imheight)
Output: Farray, a list of estimated fundamental matrix.
'''
def sevenpoint(pts1, pts2, M):
T = np.eye(3) / M
T[2, 2] = 1;
pts1 = pts1.astype('float')/M
pts2 = pts2.astype('float')/M
Fs = []
A = np.vstack([
pts1[:, 0]*pts2[:, 0],pts1[:, 0]*pts2[:, 1], pts1[:, 0],
pts1[:, 1]*pts2[:, 0],pts1[:, 1]*pts2[:, 1], pts1[:, 1],
pts2[:, 0],pts2[:, 1], np.ones(pts1.shape[0])
]).T
[U, S, V] = np.linalg.svd(A)
F1 = np.reshape(V[-1,:], (3,3))
F2 = np.reshape(V[-2,:], (3,3))
alpha = Symbol('alpha')
eqn = Matrix(F1 + alpha*F2).det()
solns = roots(eqn)
for i, sol in enumerate(solns):
if re(sol)==sol:
sol = float(sol)
F = F1 + sol*F2
F = helper.refineF(F, pts1, pts2)
Fs.append(T.T @ F @ T)
return Fs
'''
Q3.1: Compute the essential matrix E.
Input: F, fundamental matrix
K1, internal camera calibration matrix of camera 1
K2, internal camera calibration matrix of camera 2
Output: E, the essential matrix
'''
def essentialMatrix(F, K1, K2):
return K2.T @ F @ K1
'''
Q3.2: Triangulate a set of 2D coordinates in the image to a set of 3D points.
Input: C1, the 3x4 camera matrix
pts1, the Nx2 matrix with the 2D image coordinates per row
C2, the 3x4 camera matrix
pts2, the Nx2 matrix with the 2D image coordinates per row
Output: P, the Nx3 matrix with the corresponding 3D points per row
err, the reprojection error.
'''
def triangulate(C1, pts1, C2, pts2):
P = []
error = 0
for pt1, pt2 in zip(pts1, pts2):
x1, y1, x2, y2 = pt1[0], pt1[1], pt2[0], pt2[1]
A = np.vstack([(x1*C1[2, :]-C1[0, :]),
(y1*C1[2, :]-C1[1, :]),
(x2*C2[2, :]-C2[0, :]),
(y2*C2[2, :]-C2[1, :])])
[U, S, V] = np.linalg.svd(A)
w = V[-1,:]/V[-1,-1]
p1_reproj = C1 @ w
p2_reproj = C2 @ w
p1_reproj = p1_reproj/p1_reproj[-1]
p2_reproj = p2_reproj/p2_reproj[-1]
error += (np.linalg.norm(p1_reproj[:2]- pt1)**2 + np.linalg.norm(p2_reproj[:2]- pt2)**2)
P.append(w[:3])
P = np.vstack(P)
return P, error
'''
Q4.1: 3D visualization of the temple images.
Input: im1, the first image
im2, the second image
F, the fundamental matrix
x1, x-coordinates of a pixel on im1
y1, y-coordinates of a pixel on im1
Output: x2, x-coordinates of the pixel on im2
y2, y-coordinates of the pixel on im2
'''
def epipolarCorrespondence(im1, im2, F, x1, y1):
sy, sx, _ = im2.shape
v = np.array([x1, y1, 1])
l = F.dot(v)
s = np.sqrt(l[0]**2+l[1]**2)
if s==0:
error('Zero line vector in displayEpipolar')
l = l/s
if l[0] != 0:
ye = sy-1;
ys = 0
Y = np.arange(np.ceil(min(ys,ye)), np.ceil(max(ys,ye)))
X = np.round(-(l[1]*Y + l[2])/l[0])
else:
xe = sx-1
xs = 0
X = np.arange(np.ceil(min(xs,xe)), np.ceil(max(xs,xe)))
Y = np.round(-(l[0]*X + l[2])/l[1])
X = np.round(X).astype('int')
Y = np.round(Y).astype('int')
S = 15
patch1 = getPatch(im1, y1, x1, S)
x2, y2 = None, None
min_dist = np.inf
g_kernel = gaussian(S, 15)
g_kernel_3d = np.repeat(g_kernel[:, :, np.newaxis], 3, axis=2)
for i, j in zip(X, Y):
patch2 = getPatch(im2, j, i, S)
if patch2 is None:continue
dist = np.linalg.norm(g_kernel_3d*(patch1-patch2))
if dist < min_dist :
min_dist = dist
[x2, y2] = i, j
return x2, y2
def getPatch(im, x, y, S):
h, w = im.shape[:2]
if ((x + S//2+1)<h and (x - S//2)>0 and (y + S//2+1)<w and (y - S//2)>0 ):
return im[x-S//2:x+S//2+1, y-S//2:y+S//2+1, :]
else:#if not valid patch
return None
def gaussian(size, sigma):
X = np.linspace(-size//2 + 1, size//2 + 1, size)
Y = np.linspace(-size//2 + 1, size//2 + 1, size)
XX, YY = np.meshgrid(X, Y)
g = np.exp(-((XX**2 + YY**2)/(2.0*sigma**2)))
return g/np.sum(g)
'''
Q5.1: RANSAC method.
Input: pts1, Nx2 Matrix
pts2, Nx2 Matrix
M, a scaler parameter
Output: F, the fundamental matrix
'''
def ransacF(pts1, pts2, M):
p = pts1.shape[0]
bestF = None
bestF_inlier_count = 0
bestF_inliers = None
tol = 0.0022
num_iter = 100
pts1_h = np.hstack((pts1, np.ones((p, 1))))
pts2_h = np.hstack((pts2, np.ones((p, 1))))
for iter in range(num_iter):
randPtsIdx = np.random.choice(p, 7, replace=False)
randLoc1, randLoc2 = pts1_h[randPtsIdx, :], pts2_h[randPtsIdx, :]
Fs = sevenpoint(randLoc1[:, :2], randLoc2[:, :2], M)
for F in Fs:
dst = np.diag(pts2_h @ F @ pts1_h.T)
inliers = np.abs(dst) < tol
inliers_count = np.sum(inliers)
if inliers_count > bestF_inlier_count:
bestF_inlier_count = inliers_count
bestF_inliers = inliers
bestF = F
print('#'*100)
print('iter: ', iter, ' inliers: ', int(100*inliers_count/p),'/', int(100*bestF_inlier_count/p))
print('#'*100)
if (bestF_inlier_count/p) >=.75:
break
bestFs = sevenpoint(pts1[bestF_inliers, :2], pts2[bestF_inliers, :2], M)
for F in bestFs:
dst = np.diag(pts2_h @ F @ pts1_h.T)
inliers = np.abs(dst) < tol
inliers_count = np.sum(inliers)
bestF_inlier_count = 0
if inliers_count > bestF_inlier_count:
#print('new max: ', inliers_count)
bestF_inlier_count = inliers_count
bestF = F
bestF_inliers = inliers
bestF = helper.refineF(bestF, pts1[bestF_inliers, :2], pts2[bestF_inliers, :2])
return bestF, np.where(bestF_inliers)
'''
Q5.2: Rodrigues formula.
Input: r, a 3x1 vector
Output: R, a rotation matrix
'''
def rodrigues(r):
# Replace pass by your implementation
r = r.reshape(3, 1)
#return cv2.Rodrigues(r.astype('float'))[0]
th = np.linalg.norm(r)
if th == 0:
return np.eye(3)
u = r/th
a1, a2, a3 = u[0], u[1], u[2]
u_x = np.array([[0, -a3, a2],
[a3, 0, -a1],
[-a2, a1, 0]])
#R = np.eye(3) + np.sin(th)*rx + (1 - np.cos(th) )*(r_n @ r_n.T - np.eye(3))
R = np.eye(3)*np.cos(th) + (1 - np.cos(th))*(u @ u.T) + np.sin(th)*u_x
return R
'''
Q5.2: Inverse Rodrigues formula.
Input: R, a rotation matrix
Output: r, a 3x1 vector
'''
def invRodrigues(R):
th = np.arccos((np.trace(R) - 1)/2)
if th == 0 or np.isnan(th):
return np.zeros(3).reshape(-1, 1)
else:
A = (R - R.T)/2
a32, a13, a21 = A[2,1], A[0,2], A[1,0]
rho = np.array([a32, a13, a21]).T
s = np.linalg.norm(rho)
c = (np.trace(R) - 1)/2
u = rho/s
th = np.arctan2(s, c)
r = u*th
return r
'''
Q5.3: Rodrigues residual.
Input: K1, the intrinsics of camera 1
M1, the extrinsics of camera 1
p1, the 2D coordinates of points in image 1
K2, the intrinsics of camera 2
p2, the 2D coordinates of points in image 2
x, the flattened concatenationg of P, r2, and t2.
Output: residuals, the difference between original and estimated projections
'''
def rodriguesResidual(K1, M1, p1, K2, p2, x):
N = p1.shape[0]
P = x[:-6].reshape(N, 3)
r2 = x[-6:-3].reshape(3, 1)
t2 = x[-3:].reshape(3, 1)
R2 = rodrigues(r2)
M2 = np.concatenate([R2, t2], axis=1)
P_h = np.hstack((P, np.ones((N,1))))
p1_hat = (K1 @ M1) @ P_h.T
p2_hat = (K2 @ M2) @ P_h.T
p1_hat = (p1_hat/p1_hat[2, :])[:2, :].T
p2_hat = (p2_hat/p2_hat[2, :])[:2, :].T
residuals = np.concatenate([(p1-p1_hat).reshape([-1]),
(p2-p2_hat).reshape([-1])])
return residuals
'''
Q5.3 Bundle adjustment.
Input: K1, the intrinsics of camera 1
M1, the extrinsics of camera 1
p1, the 2D coordinates of points in image 1
K2, the intrinsics of camera 2
M2_init, the initial extrinsics of camera 1
p2, the 2D coordinates of points in image 2
P_init, the initial 3D coordinates of points
Output: M2, the optimized extrinsics of camera 1
P2, the optimized 3D coordinates of points
'''
def bundleAdjustment(K1, M1, p1, K2, M2_init, p2, P_init):
N = p1.shape[0]
R2 = M2_init[:, :3]
t2 = M2_init[:, 3]
r2 = invRodrigues(R2)
x_init = np.concatenate([P_init.reshape([-1]), r2, t2])
rod_func = lambda x: (rodriguesResidual(K1,M1,p1,K2,p2,x)**2).sum()
#rod_func = lambda x: rodriguesResidual(K1, M1, p1, K2, p2, x)
#x_star, flag = leastsq(rod_func, x_init)
res = minimize(rod_func, x_init)
x_star = res.x
P_star = x_star[:-6].reshape(N, 3)
r2_star = x_star[-6:-3]
R2_star = rodrigues(r2_star)
t2_star = x_star[-3:].reshape(-1, 1)
M2 = np.hstack((R2_star, t2_star))
return M2, P_star
| danenigma/Traditional-Computer-Vision | 3D-Reconstruction-bundle-adjustment/code/submission.py | submission.py | py | 9,561 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.eye",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.svd",
"line_number... |
19158925299 | """
Logic for transferring a subunit (aka XBlock, XModule) from Open edX to a Blockstore Bundle.
"""
from __future__ import absolute_import, print_function, unicode_literals
import json
import logging
from django.utils.translation import gettext as _
from . import compat
from .block_serializer import XBlockSerializer
from .blockstore_client import add_file_to_draft, commit_draft, create_bundle, create_draft
log = logging.getLogger(__name__)
BUNDLE_DRAFT_NAME = 'relay_import'
BUNDLE_SCHEMA_VERSION = 0.1
def _bundle_type(block_type):
"""
Return the manifest type to set for the given block_type.
"""
if block_type in ['course']:
bundle_type = 'course'
elif block_type in ['chapter', 'sequential']:
bundle_type = 'section'
else:
bundle_type = 'unit'
return 'olx/{}'.format(bundle_type)
def transfer_to_blockstore(root_block_key, bundle_uuid=None, collection_uuid=None):
"""
Transfer the given block (and its children) to Blockstore.
Args:
* block_key: usage key of the Open edX block to transfer
* bundle_uuid: UUID of the destination block
* collection_uuid: UUID of the destination collection
If no bundle_uuid provided, then a new bundle will be created here and that becomes the destination bundle.
"""
# Step 1: Serialize the XBlocks to OLX files + static asset files
serialized_blocks = {} # Key is each XBlock's original usage key
def serialize_block(block_key):
""" Inner method to recursively serialize an XBlock to OLX """
if block_key in serialized_blocks:
return
block = compat.get_block(block_key)
serialized_blocks[block_key] = XBlockSerializer(block)
if block.has_children:
for child_id in block.children:
serialize_block(child_id)
serialize_block(root_block_key)
root_block = compat.get_block(root_block_key)
# Step 2: Create a bundle and draft to hold the incoming data:
if bundle_uuid is None:
log.debug('Creating bundle')
bundle_data = create_bundle(
collection_uuid=collection_uuid,
title=getattr(root_block, 'display_name', root_block_key),
slug=root_block_key.block_id,
description=_("Transferred to Blockstore from Open edX {block_key}").format(block_key=root_block_key),
)
bundle_uuid = bundle_data["uuid"]
log.debug('Creating "%s" draft to hold incoming files', BUNDLE_DRAFT_NAME)
draft_data = create_draft(
bundle_uuid=bundle_uuid,
name=BUNDLE_DRAFT_NAME,
title="OLX imported via openedx-blockstore-relay",
)
bundle_draft_uuid = draft_data['uuid']
# Step 3: Upload files into the draft
manifest = {
'schema': BUNDLE_SCHEMA_VERSION,
'type': _bundle_type(root_block_key.block_type),
'assets': [],
'components': [],
'dependencies': [],
}
# For each XBlock that we're exporting:
for data in serialized_blocks.values():
# Add the OLX to the draft:
folder_path = '{}/'.format(data.def_id)
path = folder_path + 'definition.xml'
log.info('Uploading {} to {}'.format(data.orig_block_key, path))
add_file_to_draft(bundle_draft_uuid, path, data.olx_str)
manifest['components'].append(path)
# If the block depends on any static asset files, add those too:
for asset_file in data.static_files:
asset_path = folder_path + 'static/' + asset_file.name
add_file_to_draft(bundle_draft_uuid, asset_path, asset_file.data)
manifest['assets'].append(asset_path)
# Commit the manifest file. TODO: do we actually need this?
add_file_to_draft(bundle_draft_uuid, 'bundle.json', json.dumps(manifest, ensure_ascii=False))
# Step 4: Commit the draft
commit_draft(bundle_draft_uuid)
log.info('Finished import into bundle {}'.format(bundle_uuid))
| open-craft/openedx-blockstore-relay | openedx_blockstore_relay/transfer_data.py | transfer_data.py | py | 3,962 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "block_serializer.XBlockSerializer",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "blockstore_client.create_bundle",
"line_number": 67,
"usage_type": "call"
},
{
... |
10597139413 | import csv
import codecs
from itertools import zip_longest
from datetime import datetime, date
from typing import List, Dict
class InvalidDatasetError(Exception):
"""
Raised when there is an issue with the format of a dataset
"""
def __init__(self, reason):
self.message = f'Invalid format for dataset: {reason}'
class MissingDatasetError(Exception):
"""
Raised when one of both of the required datasets could not be identified
"""
def __init__(self, dataset_names: list):
self.dataset_names = dataset_names
self.message = f'Datasets were not received for {", ".join(dataset_names)}'
class Transform:
"""
Handles all data transformation logic
"""
# Required fields for each dataset
_JOHN_HOPKINS_FIEILDS = ('Date', 'Country/Region', 'Province/State', 'Lat', 'Long', 'Confirmed', 'Recovered', 'Deaths')
_NYT_FIELDS = ('date', 'cases', 'deaths')
def __init__(self, datasets: list):
"""
Constructor.
Store a list of datasets to transform.
At this stage we do not know/care what the data represents.
"""
self._datasets = datasets
self._identified_datasets = {
'NYT': None,
'JohnHopkins': None
}
def _datum_has_fields(self, datum: dict, fields: tuple) -> bool:
"""
Test whether given dict contains all given fields
"""
return all(f in datum.keys() for f in fields)
def _is_nyt_data(self, datum: dict) -> bool:
"""
Test whether given dict contains all expected NYT fields
"""
return self._datum_has_fields(datum, self._NYT_FIELDS)
def _is_jh_data(self, datum: dict) -> bool:
"""
Test whether given dict contains all expected John Hopkins fields
"""
return self._datum_has_fields(datum, self._JOHN_HOPKINS_FIEILDS)
def _identify_datasets(self):
"""
From the list of datasets passed to the constructor, identify which is which.
Raise various exceptions if the format of the data is incorrect
:returns: self (for method chaining)
"""
for dataset in [ds for ds in self._datasets if ds]:
if isinstance(dataset, list):
datum = dataset[0]
if not isinstance(datum, dict):
raise InvalidDatasetError('Cannot find a dict record')
if self._is_nyt_data(datum):
self._identified_datasets['NYT'] = dataset
elif self._is_jh_data(datum):
self._identified_datasets['JohnHopkins'] = dataset
else:
raise InvalidDatasetError('Required columns are missing')
else:
raise InvalidDatasetError(f'Expected dataset to be a list, but found {type(dataset)}')
else:
# Check we received both
missing_data = [key for key in self._identified_datasets.keys() if not self._identified_datasets[key]]
if missing_data:
raise MissingDatasetError(missing_data)
return self
def _transform_johnhopkins(self):
"""
Transform JH data to required fields, filtering on US data and converting dates to date objects
:returns: self (for method chaining)
"""
try:
self._identified_datasets['JohnHopkins'] = list(
map(lambda r: {
'date': datetime.strptime(r['Date'], '%Y-%m-%d').date(),
'recovered': int(r['Recovered'])
},
filter(lambda r: r['Country/Region'] == 'US', self._identified_datasets['JohnHopkins'])
))
except ValueError as e:
raise InvalidDatasetError(f'{e}')
return self
def _transform_nyt(self):
"""
Transform NYT data to required fields, converting dates to date objects
:returns: self (for method chaining)
"""
try:
self._identified_datasets['NYT'] = list(
map(lambda r: {
'date': datetime.strptime(r['date'], '%Y-%m-%d').date(),
'cases': int(r['cases']),
'deaths': int(r['deaths'])
},
self._identified_datasets['NYT']
))
except ValueError as e:
raise InvalidDatasetError(f'{e}')
return self
def _merge_datasets(self) -> List[dict]:
"""
Merge the two datasets keyed on date property, dropping rows
from either without matching keys
:returns: merged dataset
"""
# Get set of dates in common to both datasets
dates_in_common = set([datum['date'] for datum in self._identified_datasets['JohnHopkins']]) & set([datum['date'] for datum in self._identified_datasets['NYT']])
# Trim both inputs to only those rows that have the same dates
dataset1 = [datum for datum in self._identified_datasets['JohnHopkins'] if datum['date'] in dates_in_common]
dataset2 = [datum for datum in self._identified_datasets['NYT'] if datum['date'] in dates_in_common]
# Merge data and return
return [{**u, **v} for u, v in zip_longest(dataset1, dataset2, fillvalue={})]
def transform_data(self):
"""
Perform the entire data trasformation and return the merged data
:returns: merged dataset
"""
return self._identify_datasets()._transform_johnhopkins()._transform_nyt()._merge_datasets()
| fireflycons/acg-challenge-2020-09 | src/transform.py | transform.py | py | 5,600 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 124,
"usage_type": "call"
},
{
"api_na... |
20808228948 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3;
import uuid;
import datetime;
from storyedit.user import *;
from storyedit.schema import *;
from storyedit.error import *;
from storyedit.story import *;
STORAGEFILE = "db/data.sqlite3";
class SETable(object):
__slots__ = ['name']
def __init__(self, name):
self.name = name;
pass;
def addColumn(self):
pass;
class SEStorage(object):
__slots__ = ['con'];
def __init__(self):
self.con = sqlite3.connect(STORAGEFILE);
schema = UserSchema();
sql = schema.getSchemaString();
self.con.execute(sql);
schema = TreesetSchema();
sql = schema.getSchemaString();
self.con.execute(sql);
schema = TreeSchema();
sql = schema.getSchemaString();
self.con.execute(sql);
schema = StorySchema();
sql = schema.getSchemaString();
self.con.execute(sql);
# self._makeTestData();
def _makeTestData(self):
userid = uuid.uuid1();
treesetid = '';
sql = """insert into user_tbl values('%s', '', 123)
""" % (userid);
self.con.execute(sql);
storyid = 1;
sql = """insert into tree_tbl values('%s', 1, 100, '%s')
""" % (userid, storyid);
self.con.execute(sql);
self.con.commit();
# def insertUser(self, userid, treeid):
# sql = """insert into user_tbl(userid, treeid, createdate)
# values(%s,%s, %s)""";
# sql % (123, 234, 456);
# self.con.execute(sql);
# self.con.commit();
def _insertTree(self):
pass;
def _insertStory(self, story):
pass;
def _updateStory(self):
pass;
def getStory(self, storyid):
sql = """select * from story_tbl where storyid='%s'""" % (storyid);
c = self.con.cursor();
c.execute(sql);
count = 0
for row in c:
count += 1;
s = (row[1], row[2]); # supposed to be (title, body);
assert(count == 1);
return s;
def getTreeSetByUserID(self, userid):
sql = "select treesetid from user_tbl where userid='" + str(userid) + "'";
c = self.con.cursor();
num = c.execute(sql);
treesetid = -1;
for row in c:
treesetid = row[0];
return treesetid;
def saveNewUser(self, userid, treesetid):
#save new user
sql = UserSchema().getInsertString();
self.con.execute(sql, (userid, treesetid, userid));
self.con.commit();
def saveNewStorySet(self, storyset):
# save storyset
sql = TreesetSchema().getInsertString();
assert(storyset.child[0] != None);
blankstory = storyset.child[0];
self.con.execute(sql,
(storyset.treesetID,
blankstory.getStoryID(),
blankstory.getStoryTitle(),
'2012-11-29',
'2012-11-29')
);
self.con.commit();
#save default tree
sql = TreeSchema().getInsertString();
# firststory should be 1.0 -- 100.0
storyid = 1;
self.con.execute(sql, (blankstory.getStoryID(), 1.0, 100.0, storyid));
self.con.commit();
#save default story
sql = StorySchema().getInsertString();
self.con.execute(sql, (None, blankstory.getStoryTitle(), blankstory.getStoryBody()));
self.con.commit();
return Success;
def _getTreeSetFromTreeSetID(self, treesetid):
pass;
def getFirstTreeFromTreeSetID(self, treesetid):
sql = "select * from treeset_tbl where treesetid='%s'" % treesetid;
c = self.con.cursor();
c.execute(sql);
count = 0;
treeid = '';
for row in c:
count += 1;
treeid = row[1]; # FIXME it must be array.
assert(count == 1); # it should be unique
return treeid;
def getFirstStoryFromTreeID(self, treeid):
sql = "select * from tree_tbl where treeid='%s'" % treeid;
c = self.con.cursor();
c.execute(sql);
count = 0;
storyid = '';
for row in c:
count += 1;
storyid = row[3];
assert(count == 1);
# now, get contents;
sql = "select * from story_tbl where storyid='%d'" % storyid;
c.execute(sql);
storyTitle = '';
storyBody = '';
for row in c:
storyTitle = row[1];
storyBody = row[2];
story = SEStory();
story.setStoryTitle(storyTitle);
story.setStoryBody(storyBody);
return story;
def _getTreeDepth(self, treeid):
pass;
def validateUser(self, user):
sql = "select * from user_tbl where userid='" + user.getUserID()+ "'";
c = self.con.cursor();
c.execute(sql);
count = 0;
for row in c:
count += 1;
status = None;
if count >= 1 :
status = Success;
else:
status = InvalidUserError;
status.setDetail(status.getDetail() + ':your userid=%s, but count=%d' % (user.getUserID(), count));
return status
def saveStory(self, user, treenode, story):
self._validateStory(user, treenode);
self._updateStory(self);
return status;
def _validateStory(self, user, treenode):
sql = "select * from tree_tbl where treeid='%s'" % treenode.getTreeID();
c = sql.con.cursor();
c.execute(sql);
count = 0;
for row in c:
count += 1;
status = None;
if count >= 1 :
status = Success;
else:
status = InvalidUserError;
status.setDetail(status.getDetail() + ':your userid=%s, but count=%d' % (user.getUserID(), count));
def finalize(self):
self.con.close();
| welovy/storyedit-ss | storyedit/storage.py | storage.py | py | 4,918 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "uuid.uuid1",
"line_number": 43,
"usage_type": "call"
}
] |
36440192858 | import torch
from image_filter_transformer import Captioner
device = 'cuda'
from torch.utils.data import DataLoader
from captioning_dataset import CaptioningDataset, MyCollate
from tokenizers import Tokenizer
tokenizer_path ="/home/ivlabs/Documents/Kshitij/archive/Flickr_tokenizer.json"
tokenizer = Tokenizer.from_file(tokenizer_path)
print(tokenizer.get_vocab_size())
tokenizer.enable_padding(pad_id=4)
test_dataset = CaptioningDataset(split='test')
test_loader = DataLoader(test_dataset, batch_size=46, shuffle=False, collate_fn=MyCollate())
model = Captioner(img_dim=512, #image encoder
num_proj_layers=1, #image encoder
num_filters=4,
num_layers=3,
enc_heads=2,
enc_pff_dim=128,
enc_dropout=0.1,
topk=3,
tok_vocab_size=2706, #output vocab size
pos_vocab_size=5000, #max possible length of sentence
hidden_dim=512,
dec_heads=4,
dec_pff_dim=128,
dec_dropout=0.1)
# model.load_state_dict(torch.load('/home/ivlabs/Documents/Kshitij/ResNet_Transformer.pth'))
model = model.to(device)
def testing(model, iterator, tokenizer):
predictions = []
locations = []
captions = []
model.eval()
with torch.no_grad():
for data in enumerate(iterator):
batch_locations = data[1][-1]
img = data[1][0]
text = data[1][1].to(device)
batch_size = text.shape[0]
img = img.to(device)
output, _ = model(img, text, train=True)
output = torch.softmax(output, dim=-1)
output = torch.argmax(output, dim=-1)
predictions.extend(tokenizer.decode_batch(output.tolist()))
captions.extend(tokenizer.decode_batch(text.tolist()))
locations.extend(batch_locations)
return predictions, locations, captions
import evaluate
meteor = evaluate.load('meteor')
rouge = evaluate.load('rouge')
bleu = evaluate.load('bleu')
import os
MODEL_TYPE = "Filter"
OUTPUT_PATH = f"/home/ivlabs/Documents/Kshitij/thanmay/models/{MODEL_TYPE}"
MODEL_STORE_PATH = os.path.join(OUTPUT_PATH,f"{MODEL_TYPE}_checkpoint_epoch.pth")
EPOCH_SAVE = 4 # Save the model every EPOCH_SAVE epochs
outfile = open(os.path.join(OUTPUT_PATH, f"{MODEL_TYPE}_scores.txt"), "w")
outfile.write("EPOCH\tBLEU\tMETEOR\tROUGE1\nROUGE2\tROUGE_L\tROUGE_Lsum\n")
NUM_EPOCHS = 40
for epoch in range(EPOCH_SAVE, NUM_EPOCHS + 1, EPOCH_SAVE):
model.load_state_dict(torch.load(MODEL_STORE_PATH.replace("epoch",str(epoch))))
predictions, locations, captions = testing(model,test_loader,tokenizer)
bleu_results = bleu.compute(predictions=predictions, references=captions)
meteor_results = meteor.compute(predictions=predictions, references=captions)
rouge_results = rouge.compute(predictions=predictions, references=captions)
outfile.write(f"{epoch}\t{bleu_results['bleu']}\t{meteor_results['meteor']}\t{rouge_results['rouge1']}\t{rouge_results['rouge2']}\t{rouge_results['rougeL']}\t{rouge_results['rougeLsum']}\n")
outfile.close()
| Kshitij-Ambilduke/Image-Captioning | filter_captioning/testing.py | testing.py | py | 3,164 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tokenizers.Tokenizer.from_file",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tokenizers.Tokenizer",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "captioning_dataset.CaptioningDataset",
"line_number": 14,
"usage_type": "call"
},
{... |
20293381596 | import math as math
import numpy as np
import utm
import json
from .optimalControl import OptimalControl
class Navigation:
# Object for vehicle controller
controller = OptimalControl()
path = None # Trajectory goals
endReached = True
pathCount = 1 # Counter to select which goal to go to
maxAimLen = 4.0 # Length from the point orthogonal to the route for which to aim
minAimLen = 2.0 # Minimum length as to not slow down entirely
deadzone = 4 # Deadzone, when the vehicle is within the radius of the target - change target.
# Simpel P-controller gains (unitconversion)
velGain = 0.4 # velGain * maxAimLen = maxSpeed
rotGain = 1.0
def __init__(self):
self.reset()
def reset(self):
self.path = None
self.pathCount = 1
self.endReached = True
def retry(self):
self.endReached = False
def setPath(self, path):
self.path = path
# Show the path on Google Map
self.logToMap()
def run(self, actualPos, actualHeading, actualVel, actualRot):
# Use the pathfollower algorithm to calculate desired speeds for navigating
velRef, rotRef = self.pathFollow(actualPos, actualHeading)
# Map distance to linear speed, and map error in radians to rad/s
velRef = self.velGain * velRef
rotRef = self.rotGain * rotRef
# Input to the controller
return self.controller.run( velRef, rotRef, actualVel, actualRot)
def pathFollow(self, actualPos, heading):
# Convert lists to numpy arrays for easy vector calculations
actualPos = np.array( actualPos )
targetPos = np.array( self.path[self.pathCount] )
startPos = np.array( self.path[self.pathCount-1] )
# Calculate vector between target and actual postion to calculate the length
target = targetPos - actualPos
distance = math.sqrt( pow(target[0],2) + pow(target[1], 2))
# If vehicle is close to targetPos, update the goal counter
if( abs(distance) <= self.deadzone ):
# Check if there still are a new target
if self.pathCount < (len(self.path) - 1):
self.pathCount = self.pathCount + 1
else:
# Final goal reached, stop moving
return 0, 0
###### Use Helmans method to calculate movement ######
# Calculate vector between start and the target position (the wanted route)
route = targetPos - startPos
# Calculate the point which is orthorgonal to the route from the actualPos
orthPoint = startPos + ( ((actualPos - startPos) @ route)/(route @ route)) * route
### Calculate the aiming point on the route ###
# First calculate distance from orthPoint to targetPos
orthRoute = targetPos - orthPoint
distance = math.sqrt( pow(orthRoute[0],2) + pow(orthRoute[1], 2))
# Depending on the distance, calculate the distance to next aim point
if distance < self.maxAimLen:
aimDistance = self.minAimLen if (distance < self.minAimLen) else distance
else:
aimDistance = self.maxAimLen
### Calculate the target heading for the vehicle ###
# Calculate the lenght of the route
routeLength = math.sqrt( pow(route[0],2) + pow(route[1], 2))
# Calculate the aiming point by adding the route vector scaled by the aimDistance
aimPoint = orthPoint + (aimDistance * route)/routeLength
# Find the vector between actualPos and aimPoint
aimRoute = aimPoint - actualPos
# Calculate the angle of the aimRoute vector
angle = math.atan2(aimRoute[1], aimRoute[0])
# Map atan2 angle to two pi
thetaRef = (angle) % (2 * math.pi)
# Find shortest route in degress and map to radians
thetaError = (thetaRef*180/math.pi - heading*180/math.pi + 540) % 360 - 180
thetaError = thetaError*(math.pi/180)
# Log the path-algorithm to Google Map
# self.logToMap( actualPos, heading, orthPoint, aimPoint )
return aimDistance, thetaError
def logToMap(self, actualPos = None, heading = None, orthPoint = None, aimPoint = None):
route = list()
actual, orth, aim = None, None, None
if self.path:
for i in range(len(self.path)):
route.append( utm.to_latlon(self.path[i][0], self.path[i][1], 32, 'V') )
# Only log actual position if it is different than zero
if actualPos and all(i != 0 for i in actualPos):
actual = utm.to_latlon(actualPos[0], actualPos[1], 32, 'V')
# Only add algorithm data if it is available
if orthPoint and aimPoint:
orth = utm.to_latlon(orthPoint[0], orthPoint[1], 32, 'V')
aim = utm.to_latlon(aimPoint[0], aimPoint[1], 32, 'V')
with open("map/pathlog.json", "w") as file:
data = json.dumps({'route': route, 'actual':actual, 'orth':orth, 'aim': aim, 'heading': heading})
file.write(data)
| ZultTheReal/RobuROC---Python | control/navigation.py | navigation.py | py | 5,349 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "optimalControl.OptimalControl",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
15556401986 | from cProfile import label
import math
import matplotlib.pyplot as plt
class Perceptron():
def perceptron(self):
tabla_or = [[0, 0, 0],[0, 1, 1],[1, 0, 1],[1, 1, 1]]
tabla_and = [[0,0,0], [0,1,0],[1,0,0],[1,1,1]]
tabla_xor = [[0,0,0],[0,1,1],[1,0,1],[1,1,0]]
#listas
lista_w0 = []
lista_w1 = []
lista_w2 = []
errorFila1 = []
errorFila2 = []
errorFila3 = []
errorFila4 = []
listacontador = []
listacontadorpeso = []
w0=0.9
w1=0.66
w2=-0.2
error=1
print("Opciones de tablas para trabajar: \n1)Tabla OR \n2)Tabla AND \n3)Tabla XOR")
pregunta = (int(input("¿Quiere usar la opcion 1, 2 o 3?", )))
if pregunta == 1:
tabla = tabla_or
print ("\nTABLA OR")
for i in tabla:
print(i)
elif pregunta == 2:
tabla = tabla_and
print ("\nTABLA AND")
for j in tabla:
print(j)
elif pregunta == 3:
tabla = tabla_xor
print ("\nTABLA XOR")
for k in tabla:
print(k)
else:
print("Esa no es una opcion")
#print(f"\n---DATOS--- \nw0 = {w0} \nw1 = {w1} \nw2 = {w2} \nerror = {error}")
contador = 0
contador_peso = 0
while error > 0.1:
contador +=1
listacontador.append(contador)
for fila in tabla:
contador_peso +=1
e = 1
e1 = fila[0]
e2 = fila[1]
sd = fila[2] #solucion deseada
x = (w0 * e) + (w1 * e1) + (w2 * e2)
y = 1 / (1 + (math.exp(-x))) #solucion real
error = sd - y #error = solucion deseada - solucion real
delta = y*(1-y)* error
lr = 0.1
delta_w0 = lr * e * delta
w0 = w0 + delta_w0
lista_w0.append(w0)
delta_w1 = lr * e1 * delta
w1 = w1 + delta_w1
lista_w1.append(w1)
delta_w2 = lr * e2 * delta
w2 = w2 + delta_w2
lista_w2.append(w2)
if fila == tabla[0]:
errorFila1.append(error)
elif fila == tabla[1]:
errorFila2.append(error)
elif fila == tabla[2]:
errorFila3.append(error)
elif fila == tabla[3]:
errorFila4.append(error)
listacontadorpeso.append(contador_peso)
print(f"\nEn la iteracion {contador} el error es menor al 10% donde: \nw0 = {w0} \nw1 = {w1} \nw2 = {w2} \nError = {error} ")
#print(f"\nw0 = {w0} \nw1 = {w1} \nw2 = {w2} \nError = {error}")
#GRAFICOS
#peso en base a las iteraciones
print("IMPRIMIENDO GRAFICOS")
fig, ax = plt.subplots()
ax.plot(listacontadorpeso, lista_w0, label="w0")
ax.plot(listacontadorpeso, lista_w1, label="w1")
ax.plot(listacontadorpeso, lista_w2, label="w2")
ax.set_xlabel("ITERACIONES", fontdict = {'fontsize':14, 'fontweight':'bold', 'color':'tab:green'})
ax.set_ylabel("PESOS", fontdict = {'fontsize':14, 'fontweight':'bold', 'color':'tab:blue'})
plt.title("Peso en base de las iteracion")
plt.legend()
plt.savefig('grafigoPesoEnBaseIteracion')
plt.show()
#errores en base a las iteraciones
fig, er = plt.subplots()
er.plot(listacontador, errorFila1, label="Error fila 1")
er.plot(listacontador, errorFila2, label="Error fila 2")
er.plot(listacontador, errorFila3, label="Error fila 3")
er.plot(listacontador, errorFila4, label="Error fila 4")
er.set_xlabel("ITERACIONES", fontdict = {'fontsize':14, 'fontweight':'bold', 'color':'tab:green'})
er.set_ylabel("ERRORES", fontdict = {'fontsize':14, 'fontweight':'bold', 'color':'tab:blue'})
plt.title("Error en base de las iteracion")
plt.legend()
plt.savefig('grafigoErrorEnBaseIteracion')
plt.show()
if __name__ == '__main__':
Perceptron().perceptron() | militomba/inteligenciaArtificial | TP2/perceptron.py | perceptron.py | py | 4,318 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "math.exp",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.py... |
74395377633 | import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN_1D(nn.Module):
def __init__(self):
super(CNN_1D,self).__init__()
# 160x20 -> 128x32 -> 64x32
self.conv_layer1 = nn.Sequential(
nn.Conv1d(in_channels=20, out_channels=32, kernel_size=33),
nn.MaxPool1d(kernel_size=2,stride=2),
nn.ReLU()
)
# 64x32 -> 48x16 -> 24x16
self.conv_layer2 = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=16, kernel_size=17),
nn.MaxPool1d(kernel_size=2,stride=2),
nn.ReLU()
)
self.fc_layer = nn.Sequential(
nn.Flatten(),
nn.Dropout(p=0.5),
nn.Linear(24*16,48),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(48,8),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(8,2),
)
def forward(self,x):
conv_layer1_out = self.conv_layer1(x)
conv_layer2_out = self.conv_layer2(conv_layer1_out)
out = self.fc_layer(conv_layer2_out)
return out
class Attention_CNN(nn.Module):
def __init__(self):
super(Attention_CNN,self).__init__()
self.att = nn.Conv1d(in_channels=20, out_channels=20, kernel_size=1,bias=0)
def forward(self,x):
out = self.att(x)
return out
| BrightSoulXYHY/pattern-recognition-hw01 | bs_nets.py | bs_nets.py | py | 1,386 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
74540182432 | from tornado import ioloop, options, web
options.define("port", default=8080, help="port to listen on")
options.define(
"host", default="127.0.0.1", help="host interface to connect on (0.0.0.0 is all)"
)
options.define("path", help="the files to serve")
SETTINGS = dict(
# enabling compression can have security impacts if not done correctly
compress_response=True,
# not really useful for production
autoreload=True,
)
def make_settings(path):
settings = dict(SETTINGS)
if not path:
raise RuntimeError("path is required")
settings["static_path"] = path
return settings
def make_app(settings):
"""create and return (but do not start) a tornado app"""
app = web.Application(
[
(
r"^/(.*)",
web.StaticFileHandler,
dict(path=settings["static_path"], default_filename="index.html"),
)
],
**settings
)
return app
def main(path, port, host):
"""start a tornado app on the desired port"""
settings = make_settings(path)
app = make_app(settings)
app.listen(port, host)
url = "http://{}:{}/".format(host, port)
print("Watching files: \t\t{static_path}".format(**settings))
print("Hosting site on:\t\t{}".format(url))
print("\nPress `Ctrl+C` to stop")
try:
ioloop.IOLoop.current().start()
except KeyboardInterrupt:
ioloop.IOLoop.current().stop()
print("The server was stopped")
if __name__ == "__main__":
options.parse_command_line()
main(
path=options.options.path, port=options.options.port, host=options.options.host
)
| jupyter/accessibility | pa11y-jupyter/serve.py | serve.py | py | 1,663 | python | en | code | 63 | github-code | 1 | [
{
"api_name": "tornado.options.define",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "tornado.options",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "tornado.options.define",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "tornado.op... |
75080076513 | from fractions import Fraction
from .Belief import Belief
# fstring -- String -- String representation of formula (in s-expression style) (e.g. (Believes!4 alice now phi))
# fstring_no_ann -- String -- String form with no likelihood
# justification -- Justification || None -- Links the formula to its justification
# agent -- String
# time -- String
# formula -- Formula -- Sub-formula (object of modal operator)
class LBelief(Belief):
def __init__(self, fstring, justification, agent, time, formula, likelihood, probability):
super().__init__(fstring, justification, agent, time, formula)
self.likelihood = likelihood
self.probability = probability
if isinstance(formula, LBelief): subf = formula.fstring_no_ann
else: subf = str(formula)
self.fstring_no_ann = "(Believes! " + agent + " " + time + " " + subf + ")"
@classmethod
def from_string(cls, fstring, justification=None):
from .Parser import parse_fstring # Needs to be in function to avoid circular import
# Split off the agent and time, leaving the sub-formula intact
args = fstring.split(maxsplit=3)
agent = args[1]
time = args[2]
formula = parse_fstring(args[3][:-1]) # Pass sub-formula to parser
# Defaults in case either are not set later
likelihood = ''
probability = ''
# The part of the string containing the likelihood and/or probability
uncertainty = fstring.split()[0].split('!')[1]
# Default: Belief has likelihood, no probability
# e.g. (Believes!4 a t phi)
try:
likelihood = int(uncertainty)
# If the exception hits, we may be in the other case, or may have an improperly formatted belief
# e.g. (Believes![l=4,p=1/3] a t phi)
except ValueError:
try:
if uncertainty[0] == '[' and uncertainty[-1] == ']':
uncertainty = uncertainty[1:-1] # Remove square brackets
for x in uncertainty.split(','):
y = x.split('=')[1]
if x.startswith('l'): likelihood = int(y)
elif x.startswith('p'): probability = Fraction(y)
else:
print("Improperly formatted LBelief 1")
exit(1)
except ValueError:
print("Improperly formatted LBelief 2")
exit(1)
return cls(fstring, justification, agent, time, formula, likelihood, probability)
@classmethod
def from_args(cls, agent, time, formula, likelihood='', justification=None, probability=''):
if probability == '': fstring = "(Believes!" + str(likelihood) + " " + agent + " " + time + " " + str(formula) + ")"
elif likelihood == '': fstring = "(Believes![p=" + str(probability) + "] " + agent + " " + time + " " + str(formula) + ")"
else: fstring = "(Believes![l=" + str(likelihood) + ",p=" + str(probability) + "] " + agent + " " + time + " " + str(formula) + ")"
return cls(fstring, justification, agent, time, formula, likelihood, probability)
def is_annotated(self):
return True
def __str__(self):
return super().__str__()
def __eq__(self, other):
return super().__eq__(other) and self.likelihood == other.likelihood and self.probability == other.probability
def __hash__(self):
return super().__hash__()
| RAIRLab/ShadowAdjudicator | adjudicator/formula/LBelief.py | LBelief.py | py | 3,346 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "Belief.Belief",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "Parser.parse_fstring",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "fractions.Fraction",
"line_number": 58,
"usage_type": "call"
}
] |
28195259379 | from flask import Flask, jsonify, request, make_response
import jwt
import datetime
from functools import wraps
# import pliku
app = Flask(__name__)
app.config['SECRET_KEY'] ='thisisthesecretkey'
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = request.args.get('token') #http://127.0.0.1:5000/route?token=alsberJkaekfm233545ur
if not token:
return jsonify({'message' :'Token is missing!'}), 403
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
except:
return jsonify({'message ' : 'Token is invalid'}), 403
return f(*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return jsonify({'message' : 'Aing teu ngarti!!'})
@app.route('/protected')
# @token_required
def protected():
return jsonify({'message' : 'Cuman bisa dipake orang yang punya token bener!'})
# @app.route('/api/<crot>',methods=['POST', 'GET'])
# @token_required
# def crot(crot):
# if request.method=='POST':
# msg = request.form['msg']
# pwd = request.form['pwd']
# return pliku.AESCipher(pwd).encrypt(msg).decode('utf-8')
# else:
# return "error"
@app.route('/login')
def login():
auth = request.authorization
if auth and auth.password == 'admin':
token = jwt.encode({'user' : auth.username, 'exp' : datetime.datetime.utcnow() + datetime.timedelta(minutes=15)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
return make_response('Could not verify', 401, {'WWW-Authenticate' : 'Basic realm="Login required!"'})
if __name__ == '__main__':
app.run(debug=True) | san24id/Arduino-Webservice | bbs.py | bbs.py | py | 1,725 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.reque... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.