seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
18932220810 | import os
import time
import datetime
from pathlib import Path
from datetime import datetime
import psutil
import math
class Location(str):
def __call__(self, *args) -> 'Location':
return Location(self.format(*args))
def exists(self):
return Path(self).exists()
def read(self):
with open(self, 'r') as f:
return f.read()
def write(self, value):
with open(self, 'w') as f:
f.write(value)
class Locations:
PROC_CMDLINE = Location("/proc/{}/cmdline")
PROC_CGROUP = Location("/proc/{}/cgroup")
PROC_SMAP_ROLLUP = Location("/proc/{}/smaps_rollup")
CGROUP_DIR = Location("/sys/fs/cgroup{}")
MEM_CURRENT = Location("{}/memory.current")
MEM_STAT = Location("{}/memory.stat")
MEM_PRESSURE = Location("{}/memory.pressure")
MEM_HIGH = Location("{}/memory.high")
MEMINFO = Location("/proc/meminfo")
class ProcessList:
def __init__(self):
self.pids = {}
def refresh(self):
for process in os.listdir("/proc/"):
if (not process.isdigit()) or (process in self.pids):
continue
pid = int(process)
if not Locations.PROC_CMDLINE(pid).exists():
continue
try:
self.pids[pid] = {
"pid": pid,
"cmdline": Locations.PROC_CMDLINE(pid).read().replace('\0', ' ').strip(),
}
except:
continue
def find(self, name):
return [process for process in self.pids.values() if name == process["cmdline"]]
def find_in(self, name):
return [process for process in self.pids.values() if name in process["cmdline"]]
def __str__(self):
return "\n".join(
"pid: {}\t cmdline: {}".format(pid['pid'], pid['cmdline'])
for pid in self.pids.values()
)
# func found on https://sleeplessbeastie.eu/2019/06/10/how-to-display-memory-used-by-processes-in-human-readable-form-using-python-script/
def pretty(nbytes):
metric = ("B", "kB", "MB", "GB", "TB")
if nbytes == 0:
return "%s %s" % ("0", "B")
nunit = int(math.floor(math.log(nbytes, 1024)))
nsize = round(nbytes/(math.pow(1024, nunit)), 2)
return '%s %s' % (format(nsize, ".2f"), metric[nunit])
if __name__ == "__main__":
process_list = ProcessList()
process_list.refresh()
container = process_list.find("/controller /")
import sys
output_file = sys.argv[1]
with open(output_file, 'w') as file_object:
file_object.write('time;uss;pss\n')
for i in range(100000000000000):
p = psutil.Process(container[0]["pid"])
meminfo = p.memory_full_info()
uss = meminfo.uss
pss = meminfo.pss
print(f"uss {pretty(p.memory_full_info().uss)}, pss={pretty(p.memory_full_info().pss)} ")
with open(output_file, 'a') as file_object:
file_object.write(f'{datetime.now()};{uss};{pss}\n'.replace(".", ","))
time.sleep(0.2)
| idlab-discover/wasm-operator | profile/wasmProfiler.py | wasmProfiler.py | py | 3,043 | python | en | code | 47 | github-code | 36 |
33634152186 | from re import A
import time, random
from Yinsh.yinsh_model import YinshGameRule
from template import Agent
from copy import deepcopy
from collections import deque
import numpy as np
THINKTIME = 0.5
C_PARAM = 1.414
class Node():
def __init__(self, state, game_rule=None, agent_id=None, parent=None, parent_action=None):
self.state = state
self.parent = parent
self.id = agent_id
self.parent_action = parent_action
self.children = []
self.game_rule = game_rule
self._untried_actions = self.get_legal_actions(self.state)
self._number_of_visits = 0
self._results = {}
self._results[1] = 0
self._results[-1] = 0
self._score = self.state.agents[self.id].score
# Current node, victory count minus failure count
def q(self):
wins = self._results[1]
loses = self._results[-1]
return wins - loses
# Number of visits to the current node
def n(self):
return self._number_of_visits
# Expand child nodes
def expand(self):
action = self._untried_actions.pop()
current_state = deepcopy(self.state)
next_state = self.game_rule.generateSuccessor(current_state, action, self.id)
child_node = Node(next_state, parent=self, parent_action=action, agent_id=self.id, game_rule=self.game_rule)
self.children.append(child_node)
return child_node
# The logic of generating the tree is to first determine whether the node is the final state
# if not then determine whether it is fully expanded
# if not then continue to expand the children of the node
# otherwise choose a random node from the child nodes as the next node to be expanded
def run_tree_policy(self):
current_node=self
while not current_node.is_terminal_node():
if not current_node.is_fully_expanded():
return current_node.expand()
else:
current_node=current_node.get_random_child()
return current_node
def is_terminal_node(self):
if not self.parent:
return False
elif len(self.get_legal_actions(self.state)) == 1:
return True
else:
return self._score == 3 or self.parent._score == 3
def is_fully_expanded(self):
return len(self._untried_actions) == 0
# Select an optimal node from all the child nodes (next state)
def choose_best_child(self):
# UCT algorithm
try:
choices_weights = [(c.q() / c.n()) + C_PARAM * np.sqrt((2*np.log(self.n()) / c.n())) for c in self.children]
if self.id == 0:
# If the current player is the first player,
# the child node with the greatest weight is selected as the optimal action
print("mct best action so far")
return self.children[np.argmax(choices_weights)]
else:
# If the current player is a backhand,
# the child node with the smallest weight (the state with the lowest first-hand win rate)
# is selected as the optimal action
print("mct found")
return self.children[np.argmin(choices_weights)]
except:
# error process
return self.get_random_child()
def get_random_child(self):
return random.choice(self.children)
def get_legal_actions(self, current_state):
return deepcopy(self.game_rule.getLegalActions(current_state, self.id))
# Self-play simulation, random selection of actions for child nodes until the endgame
def rollout(self):
current_rollout_state = deepcopy(self.state)
final_result = 0
while final_result == 0:
possible_moves = self.get_legal_actions(current_rollout_state)
action = random.choice(possible_moves)
current_rollout_state = deepcopy(self.game_rule.generateSuccessor(current_rollout_state, action, self.id))
# reward?
new_score = current_rollout_state.agents[self.id].score
if new_score == 3 or self._score == 3:
final_result = 1. if new_score > self._score else -1.
break
if len(self.get_legal_actions(current_rollout_state)) == 1:
break
return final_result
# Goes back up and passes the win/loss information to the parent nodes
def backpropagate(self, result):
self._number_of_visits += 1.
if result == 0:
return
self._results[result] += result
if self.parent:
self.parent.backpropagate(result)
# Each node calculates the best action by playing itself
# and records the result, from which the best child node is selected
def calc_best_action(self,stime):
cnt = 0
while time.time()-stime < THINKTIME:
cnt += 1
node = self.run_tree_policy()
reward = node.rollout()
node.backpropagate(reward)
print("mct state",cnt)
return self.choose_best_child()
# Agent class
class myAgent(Agent):
def __init__(self, _id):
super().__init__(_id)
self.id = _id
self.game_rule = YinshGameRule(2)
# BFS search algorithm for len(actions) > 70
def SelectAction_BFS(self, actions, rootstate):
start_time = time.time()
queue = deque([ (deepcopy(rootstate),[]) ])
count =0
# Conduct BFS starting from rootstate.
while len(queue) and time.time()-start_time < THINKTIME:
count +=1
state, path = queue.popleft()
new_actions = self.game_rule.getLegalActions(state, self.id)
for a in new_actions:
next_state = deepcopy(state)
next_path = path + [a]
score = state.agents[self.id].score
new_state = self.game_rule.generateSuccessor(next_state, a, self.id)
reward = new_state.agents[self.id].score > score
if reward:
print("BFS found",count)
return next_path[0]
else:
queue.append((next_state, next_path))
print("BFS random",count)
return random.choice(actions)
# MCTS algorithm for len(actions) <= 70
def SelectAction(self, actions, rootstate):
start_time = time.time()
while time.time()-start_time < THINKTIME:
if len(actions) > 70:
return self.SelectAction_BFS(actions, rootstate)
else:
tree = Node(rootstate, game_rule=self.game_rule, agent_id=self.id)
return tree.calc_best_action(start_time).parent_action
print('mct random')
return random.choice(actions)
| bzr1/automated-agent-for-a-board-game-yinsh- | agents/t_056/mcts.py | mcts.py | py | 7,015 | python | en | code | 0 | github-code | 36 |
5925495311 | """
Spyral, an awesome library for making games.
"""
__version__ = '0.2'
__license__ = 'MIT'
__author__ = 'Robert Deaton'
from types import ModuleType
import sys
import compat
import pygame
# import mapping to objects in other modules
all_by_module = {
'spyral.sprite' : ['Sprite', 'Group', 'AggregateSprite'],
'spyral.scene' : ['Scene', 'director'],
'spyral.image' : ['Image'],
'spyral.vector' : ['Vec2D'],
'spyral.signal' : ['Signal'],
'spyral.rect' : ['Rect'],
'spyral.animation' : ['Animation'],
'spyral.core' : ['init', 'quit'],
'spyral.font' : ['Font'],
'spyral.camera' : ['Camera'],
'spyral.clock' : ['GameClock'],
'spyral.event' : ['keys']
}
# modules that should be imported when accessed as attributes of werkzeug
attribute_modules = frozenset(['memoize', 'point', 'camera', 'animator', 'event', '_lib', 'color', 'font'])
object_origins = {}
for module, items in all_by_module.iteritems():
for item in items:
object_origins[item] = module
class module(ModuleType):
"""Automatically import objects from the modules."""
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
elif name in attribute_modules:
__import__('spyral.' + name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
"""Just show what we want to show."""
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__',
'__package__', '__version__'))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules['spyral']
# setup the new module and patch it into the dict of loaded modules
new_module = sys.modules['spyral'] = module('spyral')
new_module.__dict__.update({
'__file__': __file__,
'__package__': 'spyral',
'__path__': __path__,
'__doc__': __doc__,
'__version__': __version__,
'__all__': tuple(object_origins) + tuple(attribute_modules),
'__docformat__': 'restructuredtext en'
})
| rdeaton/spyral | __init__.py | __init__.py | py | 2,378 | python | en | code | 3 | github-code | 36 |
74962738343 | from rest_framework.serializers import ModelSerializer
from rest_framework import exceptions
from api.models import VDO
from django_celery_results.models import TaskResult
class VDOSerializer(ModelSerializer):
def create(self, validated_data):
newvdo_record = self.Meta.model.objects.create(**validated_data)
return newvdo_record
class Meta:
model = VDO
fields = '__all__'
class StatusSerializer(ModelSerializer):
def create(self, validated_data):
newvdo_record = self.Meta.model.objects.create(**validated_data)
return newvdo_record
class Meta:
model = TaskResult
fields = 'task_id','status','result'
| 6410615147/cartrack | cartrack/api/serializer.py | serializer.py | py | 688 | python | en | code | 0 | github-code | 36 |
23616237270 | debug_queue = "queue", "inter"
""" How to select the debug queue.
First part of the tuple is the keyword argument to modify when calling
the pbs job, and the second is its value.
"""
accounts = ["BES000"]
""" List of slurm or pbs accounts allowed for use.
This is used by ipython's %launch magic function.
It is not required for slurm systems.
If empty, then %launch will not have a queue option.
"""
qsub_exe = "sbatch"
""" Qsub executable. """
default_pbs = { 'account': accounts[0], 'walltime': "06:00:00", 'nnodes': 1, 'ppn': 8}
""" Defaults parameters filling the pbs script. """
pbs_string = "#! /bin/bash\n"\
"#SBATCH --account={account}\n"\
"#SBATCH --time={walltime}\n"\
"#SBATCH -N {nnodes}\n"\
"#SBATCH -e \"{err}.%j\"\n"\
"#SBATCH -o \"{out}.%j\"\n"\
"#SBATCH -J {name}\n"\
"#SBATCH -D {directory}\n\n"\
"python {scriptcommand}\n"
""" Default slurm script. """
default_comm = { 'n': 2, 'ppn': default_pbs['ppn'] }
""" Default mpirun parameters. """
mpirun_exe = "mpirun -np {n} {placement} numa_wrapper -ppn={ppn} {program}"
""" Command-line to launch external mpi programs. """
def ipython_qstat(self, arg):
""" squeue --user=`whoami` -o "%7i %.3C %3t -- %50j" """
from subprocess import Popen, PIPE
from IPython.utils.text import SList
from getpass import getuser
# finds user name.
whoami = getuser()
squeue = Popen(["squeue", "--user=" + whoami, "-o", "\"%7i %.3C %3t %j\""], stdout=PIPE)
result = squeue.stdout.read().rstrip().split('\n')
result = SList([u[1:-1] for u in result[1:]])
return result.grep(str(arg[1:-1]))
| mdavezac/LaDa | config/redmesa_mpi.py | redmesa_mpi.py | py | 1,720 | python | en | code | 5 | github-code | 36 |
73434596264 | import pandas as pd
import json
import requests
# # ----------from fred---------------
# api_keys = ["36211f27396765eca92b93f01dca74db", "4808384bf945022005347fcf2f6957fb",
# "a66fc3e61d360c6088b022f2c06c831c", "1836f996f9157acd994d59547bb0f65c",
# "4e2f7a3a68190b6584419017414974d5", "bc4c30a690776f1662cd6e8f5d30f3ce",
# "3cd3452ab1a374f571db4822fd6f359a", "ad8557a75ffc5f367492cb34b67539e8"]
#
# api_endpoint = "https://api.stlouisfed.org/fred/series/observations"
# params = {
# "series_id": 'GNPCA',
# "api_key": api_keys[1],
# "file_type": "json"
# }
# response = requests.get(api_endpoint, params=params)
# data_dict = response.json()
# print(data_dict)
# # Extract observations
# observations = data_dict['observations']
#
# # Create DataFrame
# df = pd.DataFrame(observations)
# df_final = pd.DataFrame()
#
# # Convert columns to appropriate data types
# df_final['date'] = pd.to_datetime(df['date'])
# df_final['value'] = pd.to_numeric(df['value'])
#
# print(df_final)
# df_final.to_csv('economic_data.csv', index=False)
# loaded_df_csv = pd.read_csv('economic_data.csv')
# print("Loaded DataFrame from CSV:")
# print(loaded_df_csv)
# ----------from nasdaq---------------
import nasdaqdatalink
nasdaqdatalink.read_key(filename="../api_keys/nasdaq1.txt")
# nasdaqdatalink.bulkdownload('FRED')
df1 = pd.read_csv('../../files/data/FRED_metadata_plus_categories.csv')
codes = df1['code']
# print(df1['code'])
nasdaq_code = []
for code in codes:
nasdaq_code.append('FRED/'+code)
data = nasdaqdatalink.get(nasdaq_code)#['FRED/00XALCATM086NEST', 'FRED/00XALCBEM086NEST'])
print(data.info())
print(data)
data.to_csv('economic_data2.csv', index=True)
loaded_df_csv = pd.read_csv('economic_data2.csv')
print("Loaded DataFrame from CSV:")
print(loaded_df_csv)
| stergioa/masterThesis4 | src/download_data/download_timeseries_values.py | download_timeseries_values.py | py | 1,818 | python | en | code | 0 | github-code | 36 |
40152772858 | """Display a rotomap."""
import enum
import functools
import sys
import cv2
import numpy
import mel.lib.common
import mel.lib.fullscreenui
import mel.lib.image
import mel.rotomap.detectmoles
import mel.rotomap.mask
import mel.rotomap.moles
import mel.rotomap.tricolour
DEFAULT_MASKER_RADIUS = 200
_WHITE = (255, 255, 255)
_BLACK = (0, 0, 0)
def draw_mole(image, x, y, colours):
def circle(radius, col):
cv2.circle(image, (x, y), radius, col, -1)
circle(20, _WHITE)
circle(18, _BLACK)
radius = 16
for index in range(3):
circle(radius, colours[index])
radius -= 4
def draw_non_canonical_mole(image, x, y, colours):
def rect(size, col):
top_left = (x - size, y - size)
bottom_right = (x + size, y + size)
cv2.rectangle(image, top_left, bottom_right, col, -1)
rect(20, _WHITE)
rect(18, _BLACK)
draw_mole(image, x, y, colours)
def draw_crosshair(image, x, y):
inner_radius = 16
outer_radius = 24
directions = [(1, 0), (0, -1), (-1, 0), (0, 1)] # Right, down, left, up
size_color_list = [(3, _WHITE), (2, _BLACK)]
for size, color in size_color_list:
for d in directions:
cv2.line(
image,
(x + (inner_radius * d[0]), y + (inner_radius * d[1])),
(x + (outer_radius * d[0]), y + (outer_radius * d[1])),
color,
size,
)
class Display(mel.lib.fullscreenui.ZoomableMixin):
def __init__(self, screen):
super().__init__()
self._image_display = screen
self._rect = numpy.array((screen.width, screen.height))
title_height, _ = mel.lib.image.measure_text_height_width("abc")
self._spacer_height = 10
self._image_rect = self._rect - numpy.array(
(0, title_height + self._spacer_height)
)
self._title = ""
def show_current(self, image, overlay):
self.zoomable_transform_update(image, self._image_rect)
image = self.zoomable_transform_render()
if overlay is not None:
image = overlay(image, self._transform)
caption = mel.lib.image.render_text_as_image(self._title)
image = mel.lib.image.montage_vertical(
self._spacer_height, image, caption
)
self._image_display.show_opencv_image(image)
def set_title(self, title):
self._title = title
def make_composite_overlay(*overlays):
"""Return an overlay, which will composite the supplied overlays in turn.
:*overlays: The overlay callables to composite.
:returns: A function which will composite *overlays and return the image.
"""
def do_overlay(image, transform):
for o in overlays:
image = o(image, transform)
return image
return do_overlay
class StatusOverlay:
def __init__(self):
self.text = ""
def __call__(self, image, transform):
if self.text:
text_image = mel.lib.image.render_text_as_image(self.text)
mel.lib.common.copy_image_into_image(text_image, image, 0, 0)
return image
class MoleMarkerOverlay:
def __init__(self, uuid_to_tricolour):
self._is_showing_markers = True
self._is_faded_markers = True
self._highlight_uuid = None
self._uuid_to_tricolour = uuid_to_tricolour
if self._uuid_to_tricolour is None:
self._uuid_to_tricolour = (
mel.rotomap.tricolour.uuid_to_tricolour_first_digits
)
self.moles = None
def toggle_markers(self):
self._is_showing_markers = not self._is_showing_markers
def set_highlight_uuid(self, highlight_uuid):
self._highlight_uuid = highlight_uuid
def toggle_faded_markers(self):
self._is_faded_markers = not self._is_faded_markers
def __call__(self, image, transform):
if not self._is_showing_markers:
return image
highlight_mole = None
if self._highlight_uuid is not None:
for m in self.moles:
if m["uuid"] == self._highlight_uuid:
highlight_mole = m
break
marker_image = image
if self._is_faded_markers:
marker_image = image.copy()
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
if mole is highlight_mole:
draw_crosshair(marker_image, x, y)
colours = self._uuid_to_tricolour(mole["uuid"])
if mole[mel.rotomap.moles.KEY_IS_CONFIRMED]:
draw_mole(marker_image, x, y, colours)
else:
draw_non_canonical_mole(marker_image, x, y, colours)
if self._is_faded_markers:
image = cv2.addWeighted(image, 0.75, marker_image, 0.25, 0.0)
return image
class MarkedMoleOverlay:
"""An overlay to make marked moles obvious, for checking mark positions."""
def __init__(self):
self.moles = None
self._highlight_uuid = None
self.is_accentuate_marked_mode = False
def set_highlight_uuid(self, highlight_uuid):
self._highlight_uuid = highlight_uuid
def __call__(self, image, transform):
if self.is_accentuate_marked_mode:
return self._draw_accentuated(image, transform)
else:
return self._draw_markers(image, transform)
def _draw_accentuated(self, image, transform):
# Reveal the moles that have been marked, whilst still showing
# markers. This is good for verifying that markers are actually
# positioned on moles.
mask_radius = 50
image = image.copy() // 2
mask = numpy.zeros((*image.shape[:2], 1), numpy.uint8)
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
cv2.circle(mask, (x, y), mask_radius, 255, -1)
masked_faded = cv2.bitwise_and(image, image, mask=mask)
image = cv2.add(masked_faded, image)
highlight_mole = None
if self._highlight_uuid is not None:
for m in self.moles:
if m["uuid"] == self._highlight_uuid:
highlight_mole = m
break
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
kind = mole.get("kind", None)
looks_like = mole.get("looks_like", None)
colour = (128, 0, 0)
if mole[mel.rotomap.moles.KEY_IS_CONFIRMED]:
colour = (255, 0, 0)
if kind == "mole":
if looks_like == "mole":
colour = (255, 255, 255)
elif looks_like == "non-mole":
colour = (255, 255, 0)
elif looks_like == "unsure":
colour = (255, 0, 128)
else:
raise Exception(f"Mole looks_like is invalid: {mole}")
elif kind == "non-mole":
if looks_like == "mole":
colour = (0, 255, 255)
elif looks_like == "non-mole":
colour = (0, 0, 255)
elif looks_like == "unsure":
colour = (128, 0, 255)
else:
raise Exception(f"Mole looks_like is invalid: {mole}")
cv2.circle(image, (x, y), mask_radius, colour, 2)
if mole is highlight_mole:
draw_crosshair(image, x, y)
return image
def _draw_markers(self, image, transform):
# Hide the moles that have been marked, showing markers
# distinctly from moles. This is good for marking moles that
# haven't been marked, without worrying about the ones that
# have been marked.
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
draw_mole(image, x, y, [[255, 0, 0], [255, 128, 128], [255, 0, 0]])
return image
class BoundingAreaOverlay:
"""An overlay to show the bounding area, if any."""
def __init__(self):
self.bounding_box = None
def __call__(self, image, transform):
image //= 2
if self.bounding_box is not None:
color = (0, 0, 255)
size = 2
space = mel.lib.ellipsespace.Transform(self.bounding_box)
def toimage(point):
point = space.from_space((point))
point = transform.imagexy_to_transformedxy(*point)
return point
border = [
toimage((-1, -1)),
toimage((1, -1)),
toimage((1, 1)),
toimage((-1, 1)),
toimage((-1, -1)),
]
border = numpy.array(border)
centre = [
toimage((0, 0.1)),
toimage((0, -0.1)),
toimage((0.05, 0)),
toimage((0.1, 0)),
toimage((-0.1, 0)),
toimage((0, 0)),
toimage((0, 0.1)),
]
centre = numpy.array(centre)
cv2.drawContours(image, [border, centre], -1, color, size)
return image
class EditorMode(enum.Enum):
edit_mole = 1
edit_mask = 2
bounding_area = 3
mole_mark = 4
debug_automole = 0
class Editor:
def __init__(self, directory_list, screen):
self._uuid_to_tricolour = mel.rotomap.tricolour.UuidTriColourPicker()
self.display = Display(screen)
self.moledata_list = [MoleData(x.image_paths) for x in directory_list]
self._mode = EditorMode.edit_mole
self.moledata_index = 0
self.moledata = self.moledata_list[self.moledata_index]
self._follow = None
self._mole_overlay = MoleMarkerOverlay(self._uuid_to_tricolour)
self.marked_mole_overlay = MarkedMoleOverlay()
self.bounding_area_overlay = BoundingAreaOverlay()
self._status_overlay = StatusOverlay()
self.show_current()
self.masker_radius = DEFAULT_MASKER_RADIUS
def set_smaller_masker(self):
self.masker_radius //= 2
def set_larger_masker(self):
self.masker_radius *= 2
def set_default_masker(self):
self.masker_radius = DEFAULT_MASKER_RADIUS
def set_automoledebug_mode(self):
self._mode = EditorMode.debug_automole
self.show_current()
def set_editmole_mode(self):
self._mode = EditorMode.edit_mole
self.show_current()
def set_editmask_mode(self):
self._mode = EditorMode.edit_mask
self.show_current()
def set_boundingarea_mode(self):
self._mode = EditorMode.bounding_area
self.show_current()
def set_molemark_mode(self):
self._mode = EditorMode.mole_mark
self.show_current()
def set_status(self, text):
self._status_overlay.text = text
def visit(self, visit_target_str):
# Expect a string formatted like this:
#
# path/to/jpg:uuid
#
# Anything after the expected bits is ignored.
#
path, visit_uuid, *_ = visit_target_str.split(":")
print(path, visit_uuid)
for _ in range(len(self.moledata_list)):
if self.moledata.try_jump_to_path(str(path)):
for m in self.moledata.moles:
if m["uuid"] == visit_uuid:
self.moledata.get_image()
self._follow = visit_uuid
self._mole_overlay.set_highlight_uuid(self._follow)
self.marked_mole_overlay.set_highlight_uuid(
self._follow
)
self.show_zoomed_display(m["x"], m["y"])
return
self.show_current()
return
self.moledata_index += 1
self.moledata_index %= len(self.moledata_list)
self.moledata = self.moledata_list[self.moledata_index]
print("Could not find:", path, ":", visit_uuid, file=sys.stderr)
self.show_current()
def follow(self, uuid_to_follow):
self._follow = uuid_to_follow
self._mole_overlay.set_highlight_uuid(self._follow)
self.marked_mole_overlay.set_highlight_uuid(self._follow)
follow_mole = None
for m in self.moledata.moles:
if m["uuid"] == self._follow:
follow_mole = m
break
if follow_mole is not None:
self.show_zoomed_display(follow_mole["x"], follow_mole["y"])
def skip_to_mole(self, uuid_to_skip_to):
original_index = self.moledata.index()
done = False
while not done:
for m in self.moledata.moles:
if m["uuid"] == uuid_to_skip_to:
return
self.moledata.increment()
self.moledata.get_image()
if self.moledata.index() == original_index:
return
def toggle_markers(self):
self._mole_overlay.toggle_markers()
self.show_current()
def toggle_faded_markers(self):
self._mole_overlay.toggle_faded_markers()
self.show_current()
def set_mask(self, mouse_x, mouse_y, enable):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
value = 255 if enable else 0
radius = self.masker_radius
cv2.circle(self.moledata.mask, (image_x, image_y), radius, value, -1)
self.moledata.save_mask()
self.show_current()
def show_current(self):
self.display.set_title(self.moledata.current_image_path())
image = self.moledata.get_image()
if self._mode is EditorMode.edit_mole:
self._mole_overlay.moles = self.moledata.moles
self.display.show_current(
image,
make_composite_overlay(
self._mole_overlay, self._status_overlay
),
)
elif self._mode is EditorMode.debug_automole:
image = image[:]
image = mel.rotomap.detectmoles.draw_debug(
image, self.moledata.mask
)
self.display.show_current(image, None)
elif self._mode is EditorMode.edit_mask:
mask = self.moledata.mask
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR)
gray_image[:, :, 2] = mask
self.display.show_current(gray_image, None)
elif self._mode is EditorMode.bounding_area:
box = self.moledata.metadata.get("ellipse", None)
self.bounding_area_overlay.bounding_box = box
self.display.show_current(image, self.bounding_area_overlay)
elif self._mode is EditorMode.mole_mark:
self.marked_mole_overlay.moles = self.moledata.moles
self.display.show_current(image, self.marked_mole_overlay)
else:
raise Exception("Unknown mode", self._mode)
def show_fitted(self):
self.display.set_fitted()
self.show_current()
def set_zoom_level(self, zoom_level):
self.display.set_zoom_level(zoom_level)
self.show_current()
def show_zoomed(self, mouse_x, mouse_y, zoom_level=None):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
self.display.set_zoomed(image_x, image_y, zoom_level)
self.show_current()
def show_zoomed_display(self, image_x, image_y, zoom_level=None):
self.display.set_zoomed(image_x, image_y, zoom_level)
self.show_current()
def show_prev_map(self):
def transition():
self.moledata_index -= 1
self.moledata_index %= len(self.moledata_list)
self.moledata = self.moledata_list[self.moledata_index]
self._adjusted_transition(transition)
self.show_current()
def show_next_map(self):
def transition():
self.moledata_index += 1
self.moledata_index %= len(self.moledata_list)
self.moledata = self.moledata_list[self.moledata_index]
self._adjusted_transition(transition)
self.show_current()
def show_prev(self):
self._adjusted_transition(self.moledata.decrement)
self.show_current()
def show_next(self):
self._adjusted_transition(self.moledata.increment)
self.show_current()
def _adjusted_transition(self, transition_func):
if self.display.is_zoomed() and "ellipse" in self.moledata.metadata:
pos = self.display.get_zoom_pos()
ellipse = self.moledata.metadata["ellipse"]
pos = mel.lib.ellipsespace.Transform(ellipse).to_space(pos)
transition_func()
self.moledata.ensure_loaded()
if "ellipse" in self.moledata.metadata:
ellipse = self.moledata.metadata["ellipse"]
pos = mel.lib.ellipsespace.Transform(ellipse).from_space(pos)
self.display.set_zoomed(pos[0], pos[1])
else:
transition_func()
def show_next_n(self, number_to_advance):
for i in range(number_to_advance):
self.moledata.increment()
self.moledata.get_image()
self.show_current()
def add_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.add_mole(self.moledata.moles, image_x, image_y)
self.moledata.save_moles()
self.show_current()
def add_mole_display(self, image_x, image_y, mole_uuid=None):
mel.rotomap.moles.add_mole(
self.moledata.moles, image_x, image_y, mole_uuid
)
self.moledata.save_moles()
self.show_current()
def confirm_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mole_uuid = mel.rotomap.moles.get_nearest_mole_uuid(
self.moledata.moles, image_x, image_y
)
mel.rotomap.moles.set_nearest_mole_uuid(
self.moledata.moles, image_x, image_y, mole_uuid, is_canonical=True
)
self.moledata.save_moles()
self.show_current()
def confirm_all(self):
for m in self.moledata.moles:
m["is_uuid_canonical"] = True
self.moledata.save_moles()
self.show_current()
def set_mole_uuid(self, mouse_x, mouse_y, mole_uuid, is_canonical=True):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.set_nearest_mole_uuid(
self.moledata.moles, image_x, image_y, mole_uuid, is_canonical
)
self.moledata.save_moles()
self.show_current()
def get_mole_uuid(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
return mel.rotomap.moles.get_nearest_mole_uuid(
self.moledata.moles, image_x, image_y
)
def get_nearest_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
nearest_index = mel.rotomap.moles.nearest_mole_index(
self.moledata.moles, image_x, image_y
)
mole = None
if nearest_index is not None:
mole = self.moledata.moles[nearest_index]
return mole
def move_nearest_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.move_nearest_mole(
self.moledata.moles, image_x, image_y
)
self.moledata.save_moles()
self.show_current()
def remove_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.remove_nearest_mole(
self.moledata.moles, image_x, image_y
)
self.moledata.save_moles()
self.show_current()
def crud_mole(self, mole_uuid, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
i = mel.rotomap.moles.uuid_mole_index(self.moledata.moles, mole_uuid)
if i is not None:
self.moledata.moles[i]["x"] = image_x
self.moledata.moles[i]["y"] = image_y
else:
mel.rotomap.moles.add_mole(
self.moledata.moles, image_x, image_y, mole_uuid
)
self.moledata.save_moles()
self.show_current()
def remap_uuid(self, from_uuid, to_uuid):
print(f"Remap globally {from_uuid} to {to_uuid}.")
self.moledata.remap_uuid(from_uuid, to_uuid)
self.show_current()
class MoleData:
def __init__(self, path_list):
# Make an instance-specific cache of images. Note that this means that
# mel will need to be re-run in order to pick up changes to mole
# images. This seems to be fine for use-cases to date, only the mole
# data seems to change from underneath really.
@functools.lru_cache()
def load_image(image_path):
return mel.lib.image.load_image(image_path)
self._load_image = load_image
self.moles = []
self.metadata = {}
self.image = None
self.mask = None
self._mask_path = None
self._path_list = path_list
self._list_index = 0
self.image_path = self._path_list[self._list_index]
self._num_images = len(self._path_list)
self._loaded_index = None
self.ensure_loaded()
def get_image(self):
self.ensure_loaded()
return self.image
def reload(self):
self._loaded_index = None
self.ensure_loaded()
def ensure_loaded(self):
if self._loaded_index == self._list_index:
return
image_path = self._path_list[self._list_index]
self.image = self._load_image(image_path)
self.image_path = image_path
self.moles = mel.rotomap.moles.load_image_moles(image_path)
self.metadata = mel.rotomap.moles.load_image_metadata(image_path)
height, width = self.image.shape[:2]
self._mask_path = mel.rotomap.mask.path(image_path)
self.mask = mel.rotomap.mask.load_or_none(image_path)
if self.mask is None:
self.mask = numpy.zeros((height, width), numpy.uint8)
self._loaded_index = self._list_index
def remap_uuid(self, from_uuid, to_uuid):
for image_path in self._path_list:
moles = mel.rotomap.moles.load_image_moles(image_path)
for m in moles:
if m["uuid"] == from_uuid:
m["uuid"] = to_uuid
m[mel.rotomap.moles.KEY_IS_CONFIRMED] = True
mel.rotomap.moles.save_image_moles(moles, image_path)
image_path = self._path_list[self._list_index]
self.moles = mel.rotomap.moles.load_image_moles(image_path)
def decrement(self):
new_index = self._list_index + self._num_images - 1
self._list_index = new_index % self._num_images
def increment(self):
self._list_index = (self._list_index + 1) % self._num_images
def index(self):
return self._list_index
def save_mask(self):
mel.lib.common.write_image(self._mask_path, self.mask)
def save_moles(self):
image_path = self._path_list[self._list_index]
mel.rotomap.moles.normalise_moles(self.moles)
mel.rotomap.moles.save_image_moles(self.moles, image_path)
def current_image_path(self):
return self._path_list[self._list_index]
def try_jump_to_path(self, path):
for i, image_path in enumerate(self._path_list):
if str(path) == str(image_path):
if self._list_index != i:
self._list_index = i
self.ensure_loaded()
return True
return False
# -----------------------------------------------------------------------------
# Copyright (C) 2016-2018 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| aevri/mel | mel/rotomap/display.py | display.py | py | 24,857 | python | en | code | 8 | github-code | 36 |
17895408730 | import tensorflow as tf
import utils # local file import from baselines.toxic_comments
class UtilsTest(tf.test.TestCase):
def test_make_cv_train_and_eval_splits(self):
num_folds = 10
train_fold_ids = ['2', '5']
(train_split, eval_split, train_folds, eval_folds,
eval_fold_ids) = utils.make_cv_train_and_eval_splits(
num_folds, train_fold_ids, return_individual_folds=True)
expected_train_folds = ['train[20%:30%]', 'train[50%:60%]']
expected_eval_folds = [
'train[0%:10%]', 'train[10%:20%]', 'train[30%:40%]', 'train[40%:50%]',
'train[60%:70%]', 'train[70%:80%]', 'train[80%:90%]', 'train[90%:100%]'
]
expected_eval_fold_ids = [0, 1, 3, 4, 6, 7, 8, 9]
self.assertEqual(train_split, 'train[20%:30%]+train[50%:60%]')
self.assertEqual(eval_split, '+'.join(expected_eval_folds))
self.assertListEqual(train_folds, expected_train_folds)
self.assertListEqual(eval_folds, expected_eval_folds)
self.assertListEqual(eval_fold_ids, expected_eval_fold_ids)
if __name__ == '__main__':
tf.test.main()
| google/uncertainty-baselines | baselines/toxic_comments/utils_test.py | utils_test.py | py | 1,078 | python | en | code | 1,305 | github-code | 36 |
71946585384 | from pyspark.sql import SparkSession
from pyspark.sql.functions import lit
spark = (SparkSession
.builder
.appName("files_creator")
.config("spark.sql.sources.partitionOverwriteMode", "dynamic")
.config("hive.exec.dynamic.partition", "true")
.config("hive.exec.dynamic.partition.mode", "nonstrict")
.enableHiveSupport()
.getOrCreate()
)
df = spark.range(100000).cache()
df2 = df.withColumn("partitionCol", lit("p1"))
df2.repartition(200).write.partitionBy("partitionCol").saveAsTable("schema.table")
| actweird/data_repository | Python/files_creator.py | files_creator.py | py | 574 | python | en | code | 0 | github-code | 36 |
40302056937 | import sys
import datetime
from project import app
from common_utilities import Constant
from project.users.models import Counter, Users
from flask_login import login_required, logout_user, current_user
from flask import Blueprint, render_template, session, make_response, jsonify, request, redirect, url_for
sys.path.append('../../')
from project.users.memcache_ctrl import client, CONSTANT
users_blueprint = Blueprint('users', __name__, template_folder='templates')
@users_blueprint.route('/request_accepted_counter', methods=['GET', 'POST'])
@login_required
def request_accepted_counter():
if request.method == "POST":
instagram_username = session.get("username").get("username")
counter_id = session.get("current_counter_id")
if counter_id is None:
counter_id_obj = Counter().get_last_counter_info(instagram_username)
if counter_id_obj is not None:
counter_id = counter_id_obj.id
session["current_counter_id"] = counter_id_obj.id
session.modified = True
total_success_request, total_request, failed_request = 0, 0, 0
is_complete = False
last_date_time = None
try:
dict_index_name = instagram_username.lower() + Constant.CONSTANT().ALL_INFO
all_info = client.get(dict_index_name)
if isinstance(all_info, dict):
total_request = all_info[Constant.CONSTANT().TOTAL_REQUEST_TO_BE_ACCEPT]
total_success_request = all_info[Constant.CONSTANT().SUCCESSFUL_ACCEPTED]
failed_request = all_info[Constant.CONSTANT().REQUEST_FAILED]
is_complete = all_info[Constant.CONSTANT().IS_REQUEST_COMPLETE]
if is_complete == True:
last_date_time = all_info.get("update_date")
else:
counter_stats_row = Counter.get_one_counter(counter_id)
if counter_stats_row is not None:
total_success_request = counter_stats_row.total_accepted_request
total_request = counter_stats_row.input_request_count
failed_request = counter_stats_row.input_request_count
is_complete = counter_stats_row.is_request_complete
last_date_time = counter_stats_row.update_date
except Exception as error:
print("Not able to get count ", error)
response_dict = {"successful": total_success_request,
"username": instagram_username.upper(),
"failed": failed_request,
"isComplete": is_complete,
"total": total_request,
"lastDateTime": last_date_time.strftime("%a %B %d %Y %I:%M:%S %p") if last_date_time is not None else None}
return make_response(jsonify(response_dict), 200)
@users_blueprint.route('/accept_pending_requests', methods=["GET", "POST"])
@login_required
def accept():
if request.method == "POST":
instagram_username = session["username"]["username"]
bot_obj = client.get(instagram_username)["bot_obj"]
no_to_accept = request.form.get("customUserInputNumber", 0)
init_dict_items = {
Constant.CONSTANT().TOTAL_REQUEST_TO_BE_ACCEPT: no_to_accept,
Constant.CONSTANT().IS_REQUEST_COMPLETE: False,
Constant.CONSTANT().SUCCESSFUL_ACCEPTED: 0,
Constant.CONSTANT().REQUEST_FAILED: 0,
}
dict_get_index = instagram_username.lower() + Constant.CONSTANT().ALL_INFO
client.set(dict_get_index, init_dict_items)
new_user_count_req = Counter(
insta_username=instagram_username,
input_request_count=no_to_accept,
total_accepted_request=0,
total_failed_request=0
)
new_user_count_req.save()
counter_id = new_user_count_req.id
session["current_counter_id"] = counter_id
session.modified = True
ctr_item = Counter.get_one_counter(session["current_counter_id"])
resp = bot_obj.approve_pending_follow_requests(number_of_requests=int(no_to_accept), ctr_item=ctr_item, init_dict_items=init_dict_items, dict_get_index=dict_get_index, counter_ctr=0)
if resp == "No request to accept":
return "No request to accept"
if resp == None:
return "True"
return "True"
elif request.method == "GET":
instagram_username = session.get("username").get("username")
user_obj = Users.query.filter_by(insta_username=instagram_username).first()
last_day = str(days_between(user_obj.till_date)) + " days"
return render_template("AcceptRequests.html", last_day=last_day)
@users_blueprint.route('/logout', methods=["GET", "POST"])
@login_required
def logout():
try:
instagram_username = current_user.insta_username
if current_user.is_authenticated():
client.delete(instagram_username)
session.clear()
logout_user()
client.delete(instagram_username)
session.clear()
except:
pass
return redirect(url_for('core.index'))
def days_between(d1):
d1 = datetime.datetime.strptime(str(d1.date()), "%Y-%m-%d")
d2 = datetime.datetime.strptime(str(datetime.datetime.utcnow().date()), "%Y-%m-%d")
return abs((d2 - d1).days)
default_args = {}
footer_var = {"cp_year": datetime.datetime.now().year}
@app.before_first_request
def load_default():
default_args["footer_content"] = render_template("footer.html", **footer_var)
return default_args | PatelFarhaan/ACCEPTME-PROD | project/users/views.py | views.py | py | 5,641 | python | en | code | 0 | github-code | 36 |
29884391783 | """ Run Length Decoding """
def main():
""" print decoded password """
text = input()
result = ""
for i in range(len(text)):
if ord(text[i]) > 65:
result += text[i]
temp = len(result) - 1
print(int(result[:temp])*result[-1], end="")
result = ""
else:
result += text[i]
main()
| DefinitelyNotJay/ejudge | Run Length Decoding.py | Run Length Decoding.py | py | 368 | python | en | code | 0 | github-code | 36 |
34694883753 |
import os,sys
sys.path.append("..")
from tools.iPrint import *
from misc.Color import Colors
class Shell:
def __init__(self) -> None:
self.runShell()
def runShell(self):
while True:
try:
command = input(Colors.RED + "IShell ~$shell> " + Colors.END)
if command in ["q", "quit", "exit", "exit()"]:
return 0
else:
os.system(command)
except KeyboardInterrupt:
return 0
except Exception as e:
print(Colors.RED + "[!]" + Colors.END + "Error: " + e)
Shell()
| lanbinshijie/IceShell | models/shell.py | shell.py | py | 639 | python | en | code | 18 | github-code | 36 |
9783557706 | import requests
from bs4 import BeautifulSoup
# 製作一個串列裝所有可輸入的星座代號
# astros = ["牡羊座","金牛座","雙子座","巨蟹座","獅子座","處女座","天秤座","天蠍座","射手座","摩羯座","水瓶座","雙魚座"]
astrosDict = {"牡羊座":"0","金牛座":"1","雙子座":"2","巨蟹座":"3","獅子座":"4","處女座":"5","天秤座":"6","天蠍座":"7","射手座":"8","摩羯座":"9","水瓶座":"10","雙魚座":"11"}
# 組合字串,並印出所有可選星座
# text = ''
# for index,value in enumerate(astros):
# text += value+":"+str(index)+" "
# print(text)
# 儲存使用者輸入之星座
astrosInput = input("請輸入欲查詢的星座名稱:")
# 將使用者輸入之數字結合網址,並送出請求取得網頁
# 透過觀察得知
# 網址的daily_0 iAstro=0 數字與星座相關
# 讓使用者輸入星座
url = "https://astro.click108.com.tw/daily_"+astrosDict[astrosInput]+".php?iAstro="+astrosDict[astrosInput]
r = requests.get(url)
# 將回傳之網頁HTML轉換為可操作格式
soup = BeautifulSoup(r.text, 'html.parser')
# 利用選擇器,選取到今日整體運勢
data = soup.select(".TODAY_CONTENT > p")
# 印出今日星座運勢
print("\n"+astrosInput+"今日的整體運勢為:\n")
print(data[0].text)
print(data[1].text)
# astros[0]="牡羊座";
# astros[1]="金牛座";
# astros[2]="雙子座";
# astros[3]="巨蟹座";
# astros[4]="獅子座";
# astros[5]="處女座";
# astros[6]="天秤座";
# astros[7]="天蠍座";
# astros[8]="射手座";
# astros[9]="摩羯座";
# astros[10]="水瓶座";
# astros[11]="雙魚座";
| byunli/python | 參考_星座.py | 參考_星座.py | py | 1,621 | python | en | code | 0 | github-code | 36 |
38516771350 | import boto.exception
from awscompat import config, util
from awscompat.connections import ec2_conn
from awscompat.tests.base import TestNode
class TestDescribeImages(TestNode):
"""Failing test for https://bugs.launchpad.net/nova/+bug/755829"""
def pre(self):
assert ec2_conn.get_all_images(
[config['ec2']['test_image_id']]
)[0].name
class TestSecurityGroups(TestNode):
"""Test security group generation."""
def pre(self):
self.group_name = self.make_uuid('group_name')
self.group_desc = self.make_uuid('group_desc')
self.group = ec2_conn.create_security_group(
self.group_name,
self.group_desc
)
groups = util.retry(
lambda: ec2_conn.get_all_security_groups(
groupnames=[self.group_name])
)
assert len(groups)
def post(self):
self.group.delete()
# TODO: this might not raise because of delay.
# so I can't use the retry controller
# I should write a general purpose request wrapper
# which polls until it gets a different response.
@self.assert_raises(boto.exception.EC2ResponseError)
def test_throw():
ec2_conn.get_all_security_groups(groupnames=[self.group_name])
class TestKeyPairs(TestNode):
"""Test keypair generation."""
def pre(self):
self.key_name = self.make_uuid('key_name')
self.keypair = ec2_conn.create_key_pair(self.key_name)
assert len(ec2_conn.get_all_key_pairs(keynames=[self.key_name]))
def post(self):
ec2_conn.delete_key_pair(self.key_name)
@self.assert_raises(boto.exception.EC2ResponseError)
def test_boto_throw():
ec2_conn.get_all_key_pairs(keynames=[self.key_name])
class TestInstance(TestNode):
"""Test EC2 image launch and termination."""
depends = {
'key_pairs': TestKeyPairs,
'security_group': TestSecurityGroups
}
def pre(self, key_pairs=None, security_group=None):
image_id = config['ec2']['test_image_id']
instance_config = config['ec2']['test_instance']
self.image = ec2_conn.get_all_images(image_ids=[image_id])[0]
self.security_group = security_group
self.key_pairs = key_pairs
self.security_group.group.authorize('tcp', 22, 22, '0.0.0.0/0')
self.reservation = self.image.run(
security_groups=[self.security_group.group_name],
key_name=self.key_pairs.key_name,
**instance_config
)
util.wait(
lambda: self.reservation.instances[0].update() == 'running',
timeout=60 * 3
)
util.wait(
lambda: self.testTelnet(
self.reservation.instances[0].public_dns_name,
22
),
timeout = 60 * 5
)
assert util.retry(
lambda: self.testSSH(
self.key_pairs.keypair.material.encode('ascii'),
config['ec2']['test_username'],
self.reservation.instances[0].public_dns_name
),
wait_exp=2
)
def post(self):
self.reservation.instances[0].terminate()
#ec2_conn.terminate_instances([self.reservation.instances[0].id])
util.wait(
lambda: self.reservation.instances[0].update() == 'terminated',
timeout=60 * 2
)
assert util.retry(
lambda: not self.testSSH(
self.key_pairs.keypair.material.encode('ascii'),
config['ec2']['test_username'],
self.reservation.instances[0].public_dns_name
),
wait_exp=2
)
| mwhooker/aws-compat | awscompat/tests/ec2.py | ec2.py | py | 3,744 | python | en | code | 1 | github-code | 36 |
14065685389 | def lying_down(R):
tot = 0
for row in R:
cnt = 0
for j in range(N):
if row[j] == '.':
cnt += 1
if j == N-1 and cnt >= 2:
tot += 1
else:
if cnt >= 2:
tot += 1
cnt = 0
return tot
N = int(input())
room = [list(input()) for _ in range(N)]
room90 = list(map(list, zip(*reversed(room))))
vertical = lying_down(room90)
horizontal = lying_down(room)
print(horizontal, vertical) | yeon-june/BaekJoon | 1652.py | 1652.py | py | 530 | python | en | code | 0 | github-code | 36 |
70233514344 | from flask import render_template,request,redirect,url_for
from .import main
from ..request import get_sources,get_articles,search_news
from ..models import Source
@main.route("/")
def index():
"""
View root function that returns the index page and its data
"""
popular_news = get_sources("popular")
title = "Welcome to the best news outlet"
search_news = request.args.get("article_query")
if search_news:
return redirect(url_for('main.search',news_feed = search_news))
else:
return render_template("index.html",title = title,popular = popular_news)
@main.route("/sources/<sources_id>")
def sources(sources_id):
'''
View news page function that returns the movie details page and its data
'''
news_source = get_articles(sources_id)
title = f"{sources_id}"
return render_template("news.html",id = sources_id,title = title,news = news_source)
@main.route("/search/<news_feed>")
def search(news_feed):
'''
View function to display the search results
'''
news_name_list = news_feed.split(" ")
news_name_format = "+".join(news_name_list)
searched_news = search_news(news_name_format)
title = "News results"
#search_news = request.args.get('article_query')
return render_template("search.html",article = searched_news)
| alexmwaura/NewsApp | app/main/views.py | views.py | py | 1,389 | python | en | code | 0 | github-code | 36 |
72163194664 | # quizz, homework, test college calculator
# 12/09/20
# luis Velasquez
students = {
"frank": {"name": "frank",
"homework": [90.0, 97.0, 75.0, 92.0],
"quizzes": [88.0, 40.0, 94.0],
"tests": [75.0, 90.0]},
"alice": {"name": "alice",
"homework": [100.0, 92.0, 98.0, 100.0],
"quizzes": [82.0, 83.0, 91.0],
"tests": [89.0, 97.0]},
"sarah": {"name": "sarah",
"homework": [0.0, 87.0, 75.0, 22.0],
"quizzes": [0.0, 75.0, 78.0],
"tests": [100.0, 100.0]},
}
for d in students:
print(students[d]["name"])
sumofhw = sum(students[d]["homework"])
hwavg = sumofhw / len(students[d]["homework"])
quizzes = sum(students[d]["quizzes"])
quizavg = quizzes / len(students[d]["quizzes"])
tests = sum(students[d]["tests"])
testavg = tests / len(students[d]["tests"])
hwfinal = 0.10 * hwavg + 0.30 * quizavg + 0.60 * testavg
if hwfinal >= 90:
print("A", )
elif hwfinal >= 80:
print("B")
elif hwfinal >= 70:
print("C")
elif hwfinal >= 60:
print("D")
else:
print("F")
| alejjuuu/Code-work | Python/CCM/grading_list_students.py | grading_list_students.py | py | 1,179 | python | en | code | 2 | github-code | 36 |
7775666459 | import sys
n = int(input())
l = []
for i in range(n):
l.append(int(input()))
lsf = sys.maxsize
pist = 0
maxprofit = 0
for i in l:
if(i < lsf):
lsf = i
pist = i - lsf
if(pist > maxprofit):
maxprofit = pist
print(maxprofit)
| nishu959/Pepcodingdynamicprogramming | busandsellstocksoneyransaction2.py | busandsellstocksoneyransaction2.py | py | 257 | python | en | code | 0 | github-code | 36 |
39114860473 | """
The basic framework of the Iterative Closest Points Matching is provided by Albert-Ludwigs-Universität Freiburg,
the course Introduction to Mobile Robotics (engl.) - Autonomous Mobile Systems
Lecturer: Prof. Dr. Wolfram Burgard, Dr. Michael Tangermann, Dr. Daniel Büscher, Lukas Luft
Co-organizers: Marina Kollmitz, Iman Nematollahi
"""
import numpy as np
import math
import matplotlib.pyplot as plt
import time
def plot_icp(X, P, P0, i, rmse):
plt.cla()
plt.scatter(X[0,:], X[1,:], c='k', marker='o', s=50, lw=0)
plt.scatter(P[0,:], P[1,:], c='r', marker='o', s=50, lw=0)
plt.scatter(P0[0,:], P0[1,:], c='b', marker='o', s=50, lw=0)
plt.legend(('X', 'P', 'P0'), loc='lower left')
plt.plot(np.vstack((X[0,:], P[0,:])), np.vstack((X[1,:], P[1,:])) ,c='k')
plt.title("Iteration: " + str(i) + " RMSE: " + str(rmse))
plt.axis([-10, 15, -10, 15])
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
plt.pause(0.5)
return
def generate_data():
# create reference data
X = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,-1,-2,-3,-4,-5]])
# add noise
P = X + 0.05 * np.random.normal(0, 1, X.shape)
# translate
P[0,:] = P[0,:] + 1
P[1,:] = P[1,:] + 1
# rotate
theta1 = ( 10.0 / 360) * 2 * np.pi
theta2 = (110.0 / 360) * 2 * np.pi
rot1 = np.array([[math.cos(theta1), -math.sin(theta1)],
[math.sin(theta1), math.cos(theta1)]])
rot2 = np.array([[math.cos(theta2), -math.sin(theta2)],
[math.sin(theta2), math.cos(theta2)]])
# sets with known correspondences
P1 = np.dot(rot1, P)
P2 = np.dot(rot2, P)
# sets with unknown correspondences
P3 = np.random.permutation(P1.T).T
P4 = np.random.permutation(P2.T).T
return X, P1, P2, P3, P4
def closest_point_matching(X, P):
"""
Performs closest point matching of two point sets.
Arguments:
X -- reference point set
P -- point set to be matched with the reference
Output:
P_matched -- reordered P, so that the elements in P match the elements in X
"""
P_matched = np.empty(np.shape(P))
num_pts = np.shape(X)[1]
dist_mat = np.empty((num_pts,num_pts))
for i in range(num_pts):
for j in range(num_pts):
dist_mat[i,j] = np.linalg.norm(X[:,i] - P[:,j])
rows, cols = [], []
while(len(rows) != num_pts):
min_ind = np.unravel_index(np.argmin(dist_mat, axis=None), dist_mat.shape)
if (min_ind[0] in rows) or (min_ind[1] in cols):
dist_mat[min_ind] = math.inf
else:
P_matched[:,min_ind[0]] = P[:,min_ind[1]]
dist_mat[min_ind] = math.inf
rows.append(min_ind[0])
cols.append(min_ind[1])
# for i in range(num_pts):
# min_ind = np.unravel_index(np.argmin(dist_mat, axis=None), dist_mat.shape)
# P_matched[:,min_ind[0]] = P[:,min_ind[1]]
# dist_mat[min_ind[0],:] = math.inf
# dist_mat[:,min_ind[1]] = math.inf
return P_matched
def icp(X, P, do_matching):
P0 = P
for i in range(10):
# calculate RMSE
rmse = 0
for j in range(P.shape[1]):
rmse += math.pow(P[0,j] - X[0,j], 2) + math.pow(P[1,j] - X[1,j], 2)
rmse = math.sqrt(rmse / P.shape[1])
# print and plot
print("Iteration:", i, " RMSE:", rmse)
plot_icp(X, P, P0, i, rmse)
# data association
if do_matching:
P = closest_point_matching(X, P)
# substract center of mass
mx = np.transpose([np.mean(X, 1)])
mp = np.transpose([np.mean(P, 1)])
X_prime = X - mx
P_prime = P - mp
# singular value decomposition
W = np.dot(X_prime, P_prime.T)
U, _, V = np.linalg.svd(W)
# calculate rotation and translation
R = np.dot(U, V.T)
t = mx - np.dot(R, mp)
# apply transformation
P = np.dot(R, P) + t
return
def main():
X, P1, P2, P3, P4 = generate_data()
# icp(X, P1, False)
# icp(X, P2, False)
# icp(X, P3, True)
icp(X, P4, True)
plt.waitforbuttonpress()
if __name__ == "__main__":
main()
| SiweiGong/mobile_robot_framework | icp_matching.py | icp_matching.py | py | 4,026 | python | en | code | 1 | github-code | 36 |
25051037536 | import sys
import math
from math import *
from numpy import *
import matplotlib.pyplot as plt
from D52 import R_lst, t_lst, L_lst
from mat import A2195_T84 as mat
from D53 import buckling_opt
from D54 import getConfigs
def format_list(beam_configurations):
cor_list = []
for i in range(len(m_viable)):
cor_list.append(beam_configurations[i][3])
return cor_list
def tankandbeam_opt(m_viable, beam_viable):
viable_list = buckling_opt(1590, R_lst, t_lst, L_lst, mat)[0] #get list of all dimensions
m_final = []
m_beam_config = []
m_viable_int = []
for k in range(len(m_viable)):
m_total = 8 * beam_viable[k] + 1590 #Adjust total mass
m_tank = buckling_opt(m_total,R_lst,t_lst,L_lst,mat)[0][3] #Take specifc mass of tank
if k >= len(m_tank): #Ignore numbers that don't exist in filtered list
break
m_viable_int.append(m_tank[k])
m_beam_config.append(8*beam_viable[k])
for i in range(len(m_viable_int)):
tank_int = m_viable_int[i] #Tank mass
beam_int = m_beam_config[i] #Total Beam mass
m_int = tank_int + beam_int #add masses together
m_final.append(m_int)
m_min = min(m_final)
m_min_index = m_final.index(m_min)
L_min = viable_list[2][m_min_index]
R_min = viable_list[0][m_min_index]
t_min = viable_list[1][m_min_index]
return(m_min, L_min, R_min, t_min, m_min_index)
m_viable = buckling_opt(1590,R_lst, t_lst, L_lst, mat)[0][3] #get the tank mass list from D53
beam_configurations = getConfigs(1)
beam_viable = format_list(beam_configurations) #get the beam mass list from D54
print(tankandbeam_opt(m_viable, beam_viable))
| C08-System-Design/WP5 | D55.py | D55.py | py | 1,865 | python | en | code | 0 | github-code | 36 |
8797239686 |
import json
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import os
import argparse
from pathlib import Path
import tqdm
#import models
from models.backbone import Backbone
from models.classification_head import ClassificationHead
#import dataloaders
from dataloader.cifar10 import CIFAR10
from dataloader.fashion_mnist import FashionMNIST
from dataloader.multi_task_batch_scheduler import BatchSchedulerSampler
# import dataset classes for concatenation purposes
from torch.utils.data.dataset import ConcatDataset
#import progressbar
from utils.utils import progress_bar
from utils.variables import classifier_dict
# trying to figure out how to enumerate over the two dataloaders
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument("--batch-size", type=int, default=128,
help="Training Batch size")
parser.add_argument("--n_epochs", type=int, default=700,
help="No of epochs")
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument("--cifar_checkpoint_path", type=str, default="",
help="CIFAR10's checkpoint")
parser.add_argument("--fashion_mnist_checkpoint_path", type=str, default="",
help="FASHION-MNIST's checkpoint")
parser.add_argument("--training_type", type=str, default="conditioned",
help="type of training (conditioned")
parser.add_argument("--num-workers", type=int, default=2,
help="Number of workers for dataloaders")
parser.add_argument("--backbone", type=str, default="resnet18",
help="BACKBONE TO TRAIN WITH:resnet18/resnet50/resnest50")
args = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#global variables
best_cifar_acc = 0 # best test accuracy for cifar
best_fashion_mnist_acc = 0 # best test accuracy for fashion mnist
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# define learning rate
learning_rate = 0
# returns trainloader and two testloaders
def get_dataloaders():
# combined trainloader for cifar & fash-mnist
# samples alternately from the two batches
cifar = CIFAR10(data_root="dataset/cifar10",
transform=None,
mode='train',
)
fashion_mnist = FashionMNIST(data_root="dataset/fashion-mnist",
transform=None,
mode='train',
)
concat_dataset = ConcatDataset([cifar, fashion_mnist])
trainloader = torch.utils.data.DataLoader(dataset=concat_dataset,
sampler=BatchSchedulerSampler(dataset=concat_dataset,
batch_size=args.batch_size),
batch_size=args.batch_size,
)
testset_cifar = CIFAR10(data_root="dataset/cifar10",
transform=None,
mode='test')
testloader_cifar = torch.utils.data.DataLoader(
testset_cifar, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
testset_fashion_mnist = FashionMNIST(data_root="dataset/fashion-mnist",
transform=None,
mode='test')
testloader_fashion_mnist = torch.utils.data.DataLoader(
testset_fashion_mnist, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
return trainloader, testloader_cifar, testloader_fashion_mnist
# conditional training
def train(epoch):
print('\nEpoch: %d' % epoch)
print('\nLearning Rate: %f'%args.lr)
# print('Total Epochs: %d' % args.n_epochs)
print('Training Type: : %s' % args.training_type)
model.train()
classifier_cifar.train()
classifier_fashion_mnist.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets, meta) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
inputs = inputs.permute(0, 3, 1, 2)
embedding_labels = meta['conditioning_label'].to(device)
embedding_labels = embedding_labels.unsqueeze(1)
optim_model.zero_grad()
optim_classifier_cifar.zero_grad()
optim_classifier_fashion_mnist.zero_grad()
# fwd pass through common backbone
output = model(inputs, embedding_labels)
output_cifar = classifier_cifar(output)
output_fashion_mnist = classifier_fashion_mnist(output)
loss_cifar = criterion(output_cifar, targets)
loss_fashion_mnist = criterion(output_fashion_mnist, targets)
# print(meta['conditioning_label'][0],"label")
#case: dataset is cifar
if meta['conditioning_label'][0] == 0:
#print("dataset: cifar")
loss = loss_cifar
outputs = output_cifar
loss.backward()
optim_classifier_cifar.step()
optim_model.step()
#case: dataset is fashion_mnist
if meta['conditioning_label'][0] == 1:
#print("dataset: fashion_mnist")
loss = loss_fashion_mnist
outputs = output_fashion_mnist
loss.backward()
optim_classifier_fashion_mnist.step()
optim_model.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
# print("predicted",predicted)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
# break for debugging
# code to dump config at the path
def dump_config(epoch, save_dir):
config = {
'epoch:': epoch,
'learning_rate': learning_rate,
'cifar_acc': best_cifar_acc,
'fashion_mnist_acc': best_fashion_mnist_acc,
'n_epochs': args.n_epochs
}
with open(save_dir+'/config.json', 'w') as fp:
json.dump(config, fp)
def test(epoch):
print("in testing code")
global best_cifar_acc
global best_fashion_mnist_acc
model.eval()
classifier_cifar.eval()
classifier_fashion_mnist.eval()
########## EVALUATE IN CIFAR TESTLOADER ONCE ############################
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets, meta) in enumerate(testloader_cifar):
inputs, targets = inputs.to(device), targets.to(device)
inputs = inputs.permute(0, 3, 1, 2)
embedding_labels = meta['conditioning_label'].to(device)
embedding_labels = embedding_labels.unsqueeze(1)
outputs = model(inputs, embedding_labels)
outputs = classifier_cifar(outputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader_cifar), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_cifar_acc:
print('Saving..')
# state = {
# 'model': model.state_dict(),
# 'classifier_cifar': classifier_cifar.state_dict(),
# 'classifier_fashion_mnist': classifier_fashion_mnist.state_dict(),
# 'cifar_acc': acc,
# 'fashion_mnist_acc': best_fashion_mnist_acc,
# 'epoch': epoch,
# }
# dump only the data for cifar
state = {
'model': model.state_dict(),
'classifier_cifar': classifier_cifar.state_dict(),
'classifier_fashion_mnist': classifier_fashion_mnist.state_dict(),
'cifar_acc': acc,
'epoch': epoch,
}
# if not os.path.isdir('checkpoint'):
# os.mkdir('checkpoint')
# dump the dictionary to the
torch.save(state, str(save_dir/'cifar'/'checkpoint.pth'))
best_cifar_acc = acc
dump_config(epoch, str(save_dir/'cifar'))
########## EVALUATE IN FASHION MNIST TESTLOADER ONCE ############################
acc = 0
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets, meta) in enumerate(testloader_fashion_mnist):
inputs, targets = inputs.to(device), targets.to(device)
inputs = inputs.permute(0, 3, 1, 2)
embedding_labels = meta['conditioning_label'].to(device)
embedding_labels = embedding_labels.unsqueeze(1)
outputs = model(inputs, embedding_labels)
outputs = classifier_fashion_mnist(outputs)
# print("targets",targets)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
# print("predicted",predicted)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader_fashion_mnist), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_fashion_mnist_acc:
print('Saving..')
# state = {
# 'model': model.state_dict(),
# 'classifier_cifar': classifier_cifar.state_dict(),
# 'classifier_fashion_mnist': classifier_fashion_mnist.state_dict(),
# 'cifar_acc': best_cifar_acc,
# 'fashion_mnist_acc': acc,
# 'epoch': epoch,
# }
#dump ony fashioon mnist data
state = {
'model': model.state_dict(),
'classifier_cifar': classifier_cifar.state_dict(),
'classifier_fashion_mnist': classifier_fashion_mnist.state_dict(),
'fashion_mnist_acc': acc,
'epoch': epoch,
}
# if not os.path.isdir('checkpoint'):
# os.mkdir('checkpoint')
# dump the dictionary to the
torch.save(state, str(save_dir/'fashion_mnist'/'checkpoint.pth'))
best_fashion_mnist_acc = acc
dump_config(epoch, str(save_dir/'fashion_mnist'))
###################################### TRAINING STARTS HERE ############################
local_data_path = Path('.').absolute()
# create experiment
experiment = args.training_type
save_dir = (local_data_path/'experiments'/args.backbone/experiment)
(save_dir/'cifar').mkdir(exist_ok=True, parents=True)
(save_dir/'fashion_mnist').mkdir(exist_ok=True, parents=True)
# get dataloaders
trainloader, testloader_cifar, testloader_fashion_mnist = get_dataloaders()
# get model with embedding
model = Backbone(backbone=args.backbone,apply_embedding=True).to(device)
# get two separate classifiers
classifier_cifar = ClassificationHead(num_classes=10,in_channels=classifier_dict[args.backbone]).to(device)
classifier_fashion_mnist = ClassificationHead(num_classes=10,in_channels=classifier_dict[args.backbone]).to(device)
# create loss
criterion = nn.CrossEntropyLoss()
# create optimizers
optim_model = optim.SGD(model.parameters(), lr=args.lr,
momentum=0.9, weight_decay=5e-4)
optim_classifier_cifar = optim.SGD(classifier_cifar.parameters(), lr=args.lr,
momentum=0.9, weight_decay=5e-4)
optim_classifier_fashion_mnist = optim.SGD(classifier_fashion_mnist.parameters(), lr=args.lr,
momentum=0.9, weight_decay=5e-4)
############ CODE FOR RESUMING THE TRAINING ###########################################
if args.cifar_checkpoint_path != "" and args.fashion_mnist_checkpoint_path!= "":
# Load data from cifar checkpoint.
print('==> Resuming from cifar..')
checkpoint = torch.load(args.cifar_checkpoint_path)
# LOAD THE MODEL FROM CIFAR BEST WEIGHT FOR NOW, TRY LOADING FROM FASHION-MNIST IN ANOTHER EXPERIMENT
model.load_state_dict(checkpoint['model'])
classifier_cifar.load_state_dict(checkpoint['classifier_cifar'])
best_cifar_acc = checkpoint['cifar_acc']
cifar_epoch = checkpoint['epoch']
# Load data from fashion-mnist checkpoint.
print('==> Resuming from fashion mnist..')
checkpoint = torch.load(args.fashion_mnist_checkpoint_path)
#model.load_state_dict(checkpoint['model'])
classifier_fashion_mnist.load_state_dict(checkpoint['classifier_fashion_mnist'])
best_fashion_mnist_acc = checkpoint['fashion_mnist_acc']
fashion_mnist_epoch = checkpoint['epoch']
# Resolve conflicts in loading data from two separate checkpoints
start_epoch= min(cifar_epoch, fashion_mnist_epoch)
def update_learning_rate(epoch, n_epochs):
# update model lr
ratio = epoch/n_epochs
global learning_rate
if ratio < 0.4:
learning_rate = 0.1
elif 0.4 <= ratio < 0.7:
learning_rate = 0.01
else:
learning_rate = 0.001
# update learning rate
for param_group in optim_model.param_groups:
param_group['lr'] = learning_rate
# update classifier_cifar learning rate
for param_group in optim_classifier_cifar.param_groups:
param_group['lr'] = learning_rate
# update classifier_fashion_mnist learning rate
for param_group in optim_classifier_fashion_mnist.param_groups:
param_group['lr'] = learning_rate
print("ratio: ", ratio, " lr: ", learning_rate)
def main():
# apply the training schedue
for epoch in range(start_epoch, start_epoch+400):
# call train
#update_learning_rate(epoch, args.n_epochs)
train(epoch)
test(epoch)
print("epoch: ", epoch, "Cifar best accuracy found is: ", best_cifar_acc,
"fashion mnist best accuracy found is: ", best_fashion_mnist_acc)
print("Cifar best accuracy found is: ", best_cifar_acc,
"fashion mnist best accuracy found is: ", best_fashion_mnist_acc)
if __name__ == '__main__':
main()
| rajatmodi62/multi-purpose-networks | train_conditioned.py | train_conditioned.py | py | 14,471 | python | en | code | 0 | github-code | 36 |
29290582448 | #!/usr/bin/env python
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('pose_saver')
listener = tf.TransformListener()
listener.waitForTransform('/map', '/base_footprint', rospy.Time(0), rospy.Duration(1.0))
rate = rospy.Rate(20.0)
with open('/media/hao/hao/dataset/ros_pose.txt', 'w') as f:
while not rospy.is_shutdown():
now = rospy.Time.now()
listener.waitForTransform('/map', '/base_footprint', now, rospy.Duration(10.0))
(trans, rot) = listener.lookupTransform('/map', '/base_footprint', now)
euler = tf.transformations.euler_from_quaternion(rot)
f.writelines(str(now) + '\t' + str(trans[0]) + '\t' + str(trans[1]) + '\t' + str(euler[2]) + '\n')
rate.sleep()
| QinHarry/CNN_SLAM | data/ros/src/mit_data/src/pose_saver.py | pose_saver.py | py | 788 | python | en | code | 6 | github-code | 36 |
11903172959 | import time
from flask import request
from data_sheet import session, ShortMessage
from utils.tool import short_message
from ..user import bp
@bp.route('/mobile_text')
def test():
phone = request.json.get("phone")
if phone is None:
return {'code':201,'message':'请输入手机号码'}
try:
indonesia = short_message(phone)
except Exception as e:
print(e)
return {'code':202,'meaasge':'发送失败,请稍后再试'}
try:
result = session.query(ShortMessage).filter(ShortMessage.phonenumber == phone).first()
if result is None:
newMessage = ShortMessage(phonenumber=phone,meaasge=indonesia,time=str(time.time()))
session.add(newMessage)
session.commit()
else:
result.meaasge = indonesia
result.time = str(time.time())
session.add(result)
session.commit()
except Exception as e:
session.rollback()
return {'code':203,'message':'验证码无效'}
return {'code':200,'message':'success'} | yyiridescent/exchange | api/user/mobile_text.py | mobile_text.py | py | 1,107 | python | en | code | 0 | github-code | 36 |
31949200751 |
# Escreva um programa para aprovar o empréstimo bancario para a compra de uma casa. O valor da casa,
# o sálario do comprador e em quantos anos ela vai pagar.
# calcule o valor da prestação mensal, sabendo que ela não pode excerder 30 % do sálario ou então o
# emprestimo será negado.
############################################################################################################################
valor_casa = float(input('\033[37;1m Digite o valor da casa:\033[m '))
renda = float(input('\033[37;1m Digite o valor da sua renda:\033[m '))
tempo = int(input('\033[37;1m Digite quantos anos você que pagar:\033[m'))
valor_prestaçao = valor_casa / (tempo * 12)
renda = (renda * 30) / 100
if valor_prestaçao <= renda:
print('\033[34;1m O emprestimo foi aprovado!!\033[m')
else:
print('\033[31;1m O emprestimo foi negado\n sua renda não e o suficiente!!\033[m')
| Valdo04/Curso_em_videos_python | ex0036.py | ex0036.py | py | 901 | python | pt | code | 0 | github-code | 36 |
36918913878 | # based on A Plus Coding's tutorial at: https://www.youtube.com/watch?v=GKe1aGQlKDY&list=PLryDJVmh-ww1OZnkZkzlaewDrhHy2Rli2
import pygame
import sys
from game_window_class import *
from button_class import *
FPS = 60 # max frames per second
EVALUATE_DAMPER = 10 # decrease to fo evaluations faster
WIDTH = 1150
HEIGHT = 800
GAME_WIN_X = 25
GAME_WIN_Y = 75
BG_COLOUR = (59, 55, 53)
RUN_BUTTON_COLOUR = (72, 107, 79)
RUN_BUTTON_HOVER_COLOUR = (82, 125, 91)
RUN_BUTTON_BORDER_COLOUR = (0,0,0) # (33,33,33)
PAUSE_BUTTON_COLOUR = (130, 113, 77)
PAUSE_BUTTON_HOVER_COLOUR = (150, 130, 87)
PAUSE_BUTTON_BORDER_COLOUR = (0,0,0)
RESET_BUTTON_COLOUR = (110, 69, 69)
RESET_BUTTON_HOVER_COLOUR = (135, 84, 84)
RESET_BUTTON_BORDER_COLOUR = (0,0,0)
#-------------------- SETTING FUNCTIONS --------------------#
def get_events():
global running
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
if mouse_on_grid(mouse_pos):
click_cell(mouse_pos)
else:
for button in buttons:
button.click()
def update():
game_window.update()
for button in buttons:
button.update(mouse_pos, game_state=state)
def draw():
window.fill(BG_COLOUR)
for button in buttons:
button.draw()
game_window.draw()
#-------------------- RUNNING FUNCTIONS --------------------#
def running_get_events():
global running
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
if mouse_on_grid(mouse_pos):
click_cell(mouse_pos)
else:
for button in buttons:
button.click()
def running_update():
game_window.update()
for button in buttons:
button.update(mouse_pos, game_state=state)
if frame_count%(FPS//EVALUATE_DAMPER) == 0:
game_window.evaluate()
def running_draw():
window.fill(BG_COLOUR)
for button in buttons:
button.draw()
game_window.draw()
#-------------------- PAUSED FUNCTIONS --------------------#
def paused_get_events():
global running
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
if mouse_on_grid(mouse_pos):
click_cell(mouse_pos)
else:
for button in buttons:
button.click()
def paused_update():
game_window.update()
for button in buttons:
button.update(mouse_pos, game_state=state)
def paused_draw():
window.fill(BG_COLOUR)
for button in buttons:
button.draw()
game_window.draw()
def mouse_on_grid(pos):
if (pos[0] > GAME_WIN_X) and (pos[0] < WIDTH-GAME_WIN_X):
if (pos[1] > GAME_WIN_Y) and (pos[1] < GAME_WIN_Y+WIN_HEIGHT):
return True
return False
def click_cell(pos):
grid_pos = [pos[0]-GAME_WIN_X, pos[1]-GAME_WIN_Y]
grid_pos[0] = grid_pos[0]//CELL_SIZE
grid_pos[1] = grid_pos[1]//CELL_SIZE
if game_window.grid[grid_pos[1]][grid_pos[0]].alive:
game_window.grid[grid_pos[1]][grid_pos[0]].alive = False
else:
game_window.grid[grid_pos[1]][grid_pos[0]].alive = True
def make_buttons():
buttons = []
# RUN
buttons.append(Button(window, WIDTH//2-50, 25, 100, 30, text='RUN',
colour=RUN_BUTTON_COLOUR, hover_colour=RUN_BUTTON_HOVER_COLOUR,
border_colour=RUN_BUTTON_BORDER_COLOUR, bold_text=True,
function=run_game, state='setting'))
# PAUSE
buttons.append(Button(window, WIDTH//2-50, 25, 100, 30, text='PAUSE',
colour=PAUSE_BUTTON_COLOUR, hover_colour=PAUSE_BUTTON_HOVER_COLOUR,
border_colour=PAUSE_BUTTON_BORDER_COLOUR, bold_text=True,
function=pause_game, state='running'))
# RESUME
buttons.append(Button(window, WIDTH//5-50, 25, 100, 30, text='RESUME',
colour=RUN_BUTTON_COLOUR, hover_colour=RUN_BUTTON_HOVER_COLOUR,
border_colour=RUN_BUTTON_BORDER_COLOUR, bold_text=True,
function=run_game, state='paused'))
# RESET
buttons.append(Button(window, WIDTH//1.25-50, 25, 100, 30, text='RESET',
colour=RESET_BUTTON_COLOUR, hover_colour=RESET_BUTTON_HOVER_COLOUR,
border_colour=RESET_BUTTON_BORDER_COLOUR, bold_text=True,
function=reset_grid, state='paused'))
return buttons
def run_game():
global state
state = 'running'
def pause_game():
global state
state = 'paused'
def reset_grid():
global state
state = 'setting'
game_window.reset_grid()
pygame.init()
window = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
game_window = GameWindow(window, GAME_WIN_X, GAME_WIN_Y)
buttons = make_buttons()
state = 'setting'
frame_count = 0
running = True
while running:
frame_count += 1
mouse_pos = pygame.mouse.get_pos()
if state == 'setting':
get_events()
update()
draw()
if state == 'running':
running_get_events()
running_update()
running_draw()
if state == 'paused':
paused_get_events()
paused_update()
paused_draw()
pygame.display.update()
clock.tick(FPS)
pygame.quit()
sys.exit()
| ruankie/game-of-life | main.py | main.py | py | 5,738 | python | en | code | 0 | github-code | 36 |
37362852715 | import PAsearchSites
import PAgenres
def search(results,encodedTitle,title,searchTitle,siteNum,lang,searchByDateActor,searchDate,searchAll,searchSiteID):
searchResults = HTML.ElementFromURL(PAsearchSites.getSearchSearchURL(siteNum) + encodedTitle)
for searchResult in searchResults.xpath('//div[@class="video-item-big"]'):
#Log(searchResult.text_content())
titleNoFormatting = searchResult.xpath('.//a[@class="v-title"]')[0].text_content()
Log("Result Title: " + titleNoFormatting)
curID = searchResult.xpath('.//a')[0].get("href")
curID = curID.replace('/','+')
Log("ID: " + curID)
releasedDate = searchResult.xpath('.//span[@class="v-stat"]//span[@class="txt"]')[0].text_content()
releasedDate = datetime.strptime(releasedDate, '%d.%m.%y').strftime('%Y-%m-%d')
Log(releasedDate)
Log(str(curID))
lowerResultTitle = str(titleNoFormatting).lower()
if searchByDateActor != True:
score = 102 - Util.LevenshteinDistance(searchTitle.lower(), titleNoFormatting.lower())
else:
searchDateCompare = datetime.strptime(searchDate, '%Y-%m-%d').strftime('%Y-%m-%d')
score = 102 - Util.LevenshteinDistance(searchDateCompare.lower(), releasedDate.lower())
titleNoFormatting = "[" + releasedDate + "] " + titleNoFormatting + " [" + PAsearchSites.searchSites[siteNum][1] + "]"
results.Append(MetadataSearchResult(id = curID + "|" + str(siteNum), name = titleNoFormatting, score = score, lang = lang))
return results
def update(metadata,siteID,movieGenres):
Log('******UPDATE CALLED*******')
temp = str(metadata.id).split("|")[0].replace('+','/')
url = PAsearchSites.getSearchBaseURL(siteID) + temp
detailsPageElements = HTML.ElementFromURL(url)
# Summary
metadata.studio = "Porndoe Premium"
metadata.summary = detailsPageElements.xpath('//p[@class="description"]')[0].text_content()
metadata.title = detailsPageElements.xpath('//h1[@class="big-container-title"]')[0].text_content()
releasedDate = detailsPageElements.xpath('//div[@class="col date"]')[0].text_content()[71:-37]
Log(releasedDate)
date_object = datetime.strptime(releasedDate, '%d.%m.%y')
metadata.originally_available_at = date_object
metadata.year = metadata.originally_available_at.year
metadata.tagline = detailsPageElements.xpath('//div[@class="col channel"]//a')[0].text_content()
Log(metadata.tagline)
metadata.collections.clear()
metadata.collections.add(metadata.tagline)
# Genres
movieGenres.clearGenres()
genres = detailsPageElements.xpath('//a[contains(@href,"/videos/category/")]')
if len(genres) > 0:
for genreLink in genres:
genreName = genreLink.text_content().strip('\n').lower()
movieGenres.addGenre(genreName)
genres = detailsPageElements.xpath('//a[contains(@href,"/videos/tag/")]')
if len(genres) > 0:
for genreLink in genres:
genreName = genreLink.text_content().strip('\n').lower()
movieGenres.addGenre(genreName)
# Actors
metadata.roles.clear()
actors = detailsPageElements.xpath('//a[contains(@class,"pornstar")]')
if len(actors) > 0:
for actorLink in actors:
role = metadata.roles.new()
actorName = actorLink.text_content()
role.name = actorName
actorPageURL = "https://porndoepremium.com" + actorLink.get("href")
actorPage = HTML.ElementFromURL(actorPageURL)
actorPhotoURL = actorPage.xpath('//img[@alt="PS"]')[0].get("src")
role.photo = actorPhotoURL
# Posters/Background
valid_names = list()
metadata.posters.validate_keys(valid_names)
metadata.art.validate_keys(valid_names)
background = detailsPageElements.xpath('//img[@class="owl-lazy"]')[0].get("data-src").replace("thumb/0x250/","crop/1920x1080/")
try:
metadata.art[background] = Proxy.Preview(HTTP.Request(background).content, sort_order = 1)
except:
pass
posters = detailsPageElements.xpath('//img[@class="owl-lazy"]')
posterNum = 1
for poster in posters:
posterURL = poster.get("data-src")
metadata.posters[posterURL] = Proxy.Preview(HTTP.Request(posterURL).content, sort_order = posterNum)
posterNum += 1
return metadata
| PhoenixPlexCode/PhoenixAdult.bundle | Contents/Code/sitePorndoePremium.py | sitePorndoePremium.py | py | 4,420 | python | en | code | 102 | github-code | 36 |
25325013946 | from rest_framework import status
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from rest_framework.generics import RetrieveAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from accounts.models import Profile
from rates.api.serializers import FiatRateListSerializer
from rates.models import FiatRate
class FiatRateAPIView(RetrieveAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
authentication_classes = [BasicAuthentication, SessionAuthentication, JSONWebTokenAuthentication]
def get(self, request):
try:
user_profile_obj = Profile.objects.get(user=request.user)
user_country = FiatRate.objects.get(country=user_profile_obj.country)
status_code = status.HTTP_200_OK
response = {
'success': True,
'status code': status_code,
'message': 'User Fiat Fetched',
'data': [{
'updated': user_country.updated,
'timestamp': user_country.timestamp,
'country': user_profile_obj.get_country(),
'dollar_rate': user_country.dollar_rate
}]
}
except Exception as e:
user_profile_obj = Profile.objects.get(user=request.user)
user_country = FiatRate.objects.get(country='United States Of America')
status_code = status.HTTP_200_OK
response = {
'success': True,
'status code': status_code,
'message': 'User Fiat Fetched',
'data': [{
'updated': user_country.updated,
'timestamp': user_country.timestamp,
'country': user_profile_obj.get_country(),
'dollar_rate': user_country.dollar_rate
}]
}
return Response(response, status=status_code)
class FiatListView(ListAPIView):
authentication_classes = [BasicAuthentication, SessionAuthentication, JSONWebTokenAuthentication]
serializer_class = FiatRateListSerializer
queryset = FiatRate.objects.all()
permission_classes = (IsAuthenticatedOrReadOnly,)
paginate_by = 15
| mathemartins/vendescrow | rates/api/views.py | views.py | py | 2,400 | python | en | code | 0 | github-code | 36 |
12134276149 | import logging
from rest_framework import viewsets, mixins, serializers, generics
from apps.proceso.models.forms.campo import Campo
from .campo_validation_view import CampoValidationSerializer
from .validation_view import ValidationSerializer
log = logging.getLogger(__name__)
class CampoSerializer(serializers.ModelSerializer):
campovalidation_set = CampoValidationSerializer(many=True, read_only=True)
# validation = ValidationSerializer(many=True, read_only=True)
class Meta:
model = Campo
fields = ('id',
'label', 'name', 'type', 'required',
'width', 'placeholder',
'model_name',
'model_pk',
'model_label',
'json', 'formulario', 'icon',
'prefix', 'hint_start', 'hint_end_count_text', 'disabled',
'multiselect', 'order', 'accept_fileinput', 'multiple_fileinput',
'campovalidation_set',
# 'validation',
'tipo_validador',
'roles_validadores',
'documento',
'campos_validados',
'fecha_creacion', 'fecha_actualizacion')
read_only_fields = ('id', 'fecha_creacion', 'fecha_actualizacion',)
class CampoViewSet(viewsets.ModelViewSet):
"""
A simple ViewSet for listing or retrieving Campo.
"""
queryset = Campo.objects.all()
serializer_class = CampoSerializer
class CampoList(generics.ListCreateAPIView):
serializer_class = CampoSerializer
def get_queryset(self):
formulario_id = self.kwargs['formulario_id']
if formulario_id is not None:
return Campo.objects.filter(formulario__id=formulario_id).order_by('order')
else:
return Campo.objects.all().order_by('order') | vitmaraliaga/TesisDgiService | apis/proceso_api/viewsets/forms/campo_view.py | campo_view.py | py | 1,852 | python | en | code | 0 | github-code | 36 |
4824133992 | import pygame
from pygame import font
class Button():
def __init__(self,alien_setting,screen,msg):
# 初始化按钮属性
self.screen = screen
self.screenRect = screen.get_rect()
#设置按钮的尺寸和其他属性
self.width,self.height = 200,50
self.buttonColor = (0,255,0)
self.textColor = (255,255,255)
self.font = font.SysFont(None,48)
# 创建按钮的rect对象,并使其居中
self.rect = pygame.Rect(0,0,self.width,self.height)
self.rect.center = self.screenRect.center
#该按钮的标签只需要创建一次
self.prepMsg(msg)
def prepMsg(self,msg):
'''将msg渲染为图像,并使其在按钮上居中'''
self.msgImage = self.font.render(msg,True,self.textColor,self.buttonColor)
self.msgImageRect = self.msgImage.get_rect()
self.msgImageRect.center = self.rect.center
def drawButton(self):
self.screen.fill(self.buttonColor,self.rect)
self.screen.blit(self.msgImage,self.msgImageRect)
| hongnn/myRemoteWarehouse | alien_game/button.py | button.py | py | 1,077 | python | en | code | 0 | github-code | 36 |
28630512846 | import traceback
from icebox.billing.biller import BaseBiller
from icebox.billing.biller import RESOURCE_TYPE_BANDWIDTH
from densefog import logger
logger = logger.getChild(__file__)
class EipBiller(BaseBiller):
def _collect_usages(self, project_id, eip_ids):
from icebox.model.iaas import eip as eip_model
page = eip_model.limitation(project_ids=[project_id],
eip_ids=eip_ids)
eips = page['items']
resource_usages = []
for eip in eips:
resource_usages.append({
'resource_id': eip['id'],
'resource_name': eip['name'],
'resource_usage': '%dMbps' % eip['bandwidth'],
})
return resource_usages
def allocate_eips(self, project_id, eip_ids):
logger.info('biller to allocate eips: %s' % eip_ids)
if not project_id or not eip_ids:
return
resource_usages = self._collect_usages(project_id, eip_ids)
try:
resp = self.create_resources(project_id,
RESOURCE_TYPE_BANDWIDTH,
None,
resource_usages)
logger.info('create_resources resp code: %s, message: %s' % (
resp['retCode'], resp['message']))
return resp
except Exception:
stack = traceback.format_exc()
logger.trace(stack)
pass
def release_eips(self, project_id, eip_ids):
logger.info('biller to delete eips: %s' % eip_ids)
if not project_id or not eip_ids:
return
try:
resp = self.delete_resources(project_id, eip_ids)
logger.info('delete_resources resp code: %s, message: %s' % (
resp['retCode'], resp['message']))
return resp
except Exception:
stack = traceback.format_exc()
logger.trace(stack)
def update_bandwidth(self, project_id, eip_ids):
logger.info('biller to update brandwidth eips: %s' % eip_ids)
if not project_id or not eip_ids:
return
resource_usages = self._collect_usages(project_id, eip_ids)
try:
resps = []
for resource_usage in resource_usages:
resource_id = resource_usage['resource_id']
usage = resource_usage['resource_usage']
resp = self.modify_resource_attributes(project_id,
resource_id,
None,
usage)
logger.info('modify_resource_attributes resp code: %s, '
'message: %s' % (resp['retCode'], resp['message']))
resps.append(resp)
return resps
except Exception:
stack = traceback.format_exc()
logger.trace(stack)
| hashipod/icebox | core/icebox/billing/eips.py | eips.py | py | 3,051 | python | en | code | 0 | github-code | 36 |
36522588108 | import numpy as np
import os
def read_obj(filename):
faces = []
vertices = []
fid = open(filename, "r")
node_counter = 0
while True:
line = fid.readline()
if line == "":
break
while line.endswith("\\"):
# Remove backslash and concatenate with next line
line = line[:-1] + fid.readline()
if line.startswith("v"):
coord = line.split()
coord.pop(0)
node_counter += 1
vertices.append(np.array([float(c) for c in coord]))
elif line.startswith("f "):
fields = line.split()
fields.pop(0)
# in some obj faces are defined as -70//-70 -69//-69 -62//-62
cleaned_fields = []
for f in fields:
f = int(f.split("/")[0]) - 1
if f < 0:
f = node_counter + f
cleaned_fields.append(f)
faces.append(np.array(cleaned_fields))
fid.close()
faces_np = np.row_stack(faces)
vertices_np = np.row_stack(vertices)
return vertices_np, faces_np
def write_to_obj(filename, vertices, faces=None):
if not filename.endswith('obj'):
filename += '.obj'
name = filename.split('/')[-1]
path = filename.strip(name)
if path == '':
path = './'
if not os.path.exists(path):
os.makedirs(path)
num = vertices.shape[0]
if faces is None:
faces = np.loadtxt('./{:d}face.txt'.format(num), dtype=np.int)
num_face = faces.shape[0]
with open(filename, 'w') as f:
f.write(('v {:f} {:f} {:f}\n'*num).format(*vertices.reshape(-1).tolist()))
f.write(('f {:d} {:d} {:d}\n'*num_face).format(*faces.reshape(-1).tolist()))
def cal_rotation_matrix(rotation_angle=0, axis='x'):
cos_value = np.cos(rotation_angle)
sin_value = np.sin(rotation_angle)
if axis == 'x':
rotation_matrix = np.array(
[
[1., 0., 0.],
[0., cos_value, -1*sin_value],
[0., 1*sin_value, cos_value]
]
)
elif axis == 'y':
rotation_matrix = np.array(
[
[cos_value, 0., sin_value],
[0., 1., 0.],
[-1*sin_value, 0., cos_value]
]
)
elif axis == 'z':
rotation_matrix = np.array(
[
[cos_value, -1*sin_value, 0],
[1*sin_value, cos_value, 0.],
[0., 0., 1.]
]
)
else:
print('axis input should in [\'x\', \'y\', \'z\']')
return rotation_matrix
def mesh_3x(vert, faces):
faces_list = faces.tolist()
vert_list = vert.tolist()
new_vert = []
new_face = list()
new_vert.extend(vert_list)
start = len(vert_list)
for face in faces_list:
new_point = (vert[face[0]] + vert[face[1]] + vert[face[2]]) / 3
new_vert.append([new_point[0], new_point[1], new_point[2]])
new_face.append([face[0], face[1], start])
new_face.append([face[0], start, face[2]])
new_face.append([start, face[1], face[2]])
start += 1
new_verts = np.array(new_vert)
new_faces = np.array(new_face, dtype=int)
return new_verts, new_faces
def cal_distance(p1, p2):
return np.sqrt(np.sum((p1-p2)**2))
if __name__ == "__main__":
pass | wangkangkan/3DClothedHumans | vertex-level_regression/write2obj.py | write2obj.py | py | 3,378 | python | en | code | 5 | github-code | 36 |
4809944238 | '''
Generating and sampling shapes
u,v \in [0,1]
'''
# TODO Derive each class from PhiFlow Geometry class
from phi.torch.flow import *
# Helper function to sample initial and target position of 2 shapes
# Get O overlapping, and U non-necessarily overlapping ('unique') sample points
def get_points_for_shapes(shape_0, shape_target, O=30, U=30):
sampler_union = ShapeSampler(shape_0, shape_target, N=O, h1=2, h2=7)
sampler_0 = ShapeSampler(shape_0, N=U, h1=3, h2=11)
sampler_target = ShapeSampler(shape_target, N=U, h1=3, h2=11)
# Concatenate both the union and non-union points
p_0 = math.concat((sampler_union.p, sampler_0.p), instance('i'))
p_0 = shape_0.create_points(p_0)
p_t = math.concat((sampler_union.p, sampler_target.p), instance('i'))
p_t = shape_target.create_points(p_t)
return (p_0, p_t)
'''
N: number of sample points
h1, h2: primes for halton sequence
A, B: shapes, of which the subsection is sampled
if B is None, then only A is sampled
'''
class ShapeSampler:
def __init__(self, A, B=None, N=16, h1=5, h2=3):
self.A = A # shape 1
self.B = B # shape 2
self.N = N
self.h1 = h1
self.h2 = h2
self.p = []
# generate N points in [0,1]x[0,1]
# Check wheter (u,v) is inside the shape
# if not, then throw it away, and try another one
for u, v in zip(self.halton(self.N, h1), self.halton(self.N, h2)):
if A.sdf(u,v) < 0.0:
if B == None or B.sdf(u,v) < 0.0:
# (u, v) is both in A and B (or either B is not present)
self.p.append([u,v])
if len(self.p) == self.N:
break
self.p = math.tensor(self.p,
instance(i=N) & channel(vector='x,y'))
def get_sample_points(self):
return self.p
# Generates the b base Halton-sequence
# source: https://en.wikipedia.org/wiki/Halton_sequence
def halton(self, n, b):
n, d = 0, 1
while True:
x = d - n
if x == 1:
n = 1
d *= b
else:
y = d // b
while x <= y:
y //= b
n = (b + 1) * y - x
yield n / d
'''
pos: lower left corner of the encompassing rectangle
size: size by which to scale encompassing rectangle
'''
class Circle:
def __init__(self, pos=(1,1), size=1):
self.pos = pos
self.size = size
self.radius = size/2
'''
(u, v): sample point in [0,1]x[0,1]
return: SDF(u,v) which is
- > 0 outside the shape
- = 0 on the border of the shape
- < 0 inside the shape
with a sphere r=0.5, centered at o=[0.5, 0.5]
'''
def sdf(self, u, v):
o = [0.5, 0.5]
r = 0.5
# distance from center of the circle
dist = math.sqrt((u-o[0])**2 + (v-o[1])**2)
return dist - r
def create_points(self, p_sample):
self.p = self.f(p_sample)
return self.p
def f(self, p):
scale = tensor([self.size, self.size], channel(p))
tx, ty = self.pos
translate = tensor([tx, ty], channel(p))
return (p * scale) + translate
def get_smoke(self, domain=Box(x=math.PI, y=math.PI), x=100, y=100):
r = self.radius
center = tensor([self.pos[0]+r, self.pos[1]+r], channel(vector='x,y'))
smoke = CenteredGrid(
Sphere(center=center, radius=r),
extrapolation.BOUNDARY,
x=x, y=y,
bounds=(domain)
)
return smoke
def get_trivial_points(self):
return math.tensor([
[0.5, 0.5], # center
[0.885, 0.885], # upper right
[0.115, 0.885], # upper left
[0.115, 0.115], # lower left
[0.885, 0.115], # lower right
], instance(i=5) & channel(vector='x,y'))
'''
pos: (x,y) position of lower left corner
size: side length of the square
'''
class Square:
def __init__(self, pos=(1,1), size=1):
self.pos = pos
self.size = size
'''
(u, v): sample point in [0,1]x[0,1]
return: SDF(u,v) which is
- > 0 outside the shape
- = 0 on the border of the shape
- < 0 inside the shape
with a sphere r=0.5, centered at o=[0.5, 0.5]
'''
def sdf(self, u, v):
# All of the unit square is inside
return -1
def create_points(self, p_sample):
self.p = self.f(p_sample)
return self.p
def f(self, p):
scale = tensor([self.size, self.size], channel(p))
tx, ty = self.pos
translate = tensor([tx, ty], channel(p))
return (p * scale) + translate
def get_smoke(self, domain=Box(x=math.PI, y=math.PI), x=100, y=100):
lower = tensor([self.pos[0], self.pos[1]], channel(vector='x,y'))
upper = tensor([self.pos[0]+self.size, self.pos[1]+self.size], channel(vector='x,y'))
smoke = CenteredGrid(
Box(lower=lower,
upper=upper),
extrapolation.BOUNDARY,
x=x, y=y,
bounds=(domain)
)
return smoke
def get_trivial_points(self,):
return math.tensor([
[0.5, 0.5], # center
[1, 1], # upper right
[0, 1], # upper left
[0, 0], # lower left
[1, 0], # lower right
], instance(i=5) & channel(vector='x,y'))
'''
pos: lower left corner of the encompassing rectangle
'''
class Triangle():
def __init__(self, pos=(1,1), size=1.0):
self.pos = pos
self.size = size
'''
(u, v): sample point in [0,1]x[0,1]
return: SDF(u,v) which is
- > 0 outside the shape
- = 0 on the border of the shape
- < 0 inside the shape
with a sphere r=0.5, centered at o=[0.5, 0.5]
'''
def sdf(self, u, v):
# needs to be under both the left and right sides
y_left = +2*u
y_right = -2*u + 2
if v < y_left and v < y_right and v > 0:
return -1 #inside triangle
return 1 #outside triangle
def create_points(self, p_sample):
self.p = self.f(p_sample)
return self.p
def f(self, p):
scale = tensor([self.size, self.size], channel(p))
tx, ty = self.pos
translate = tensor([tx, ty], channel(p))
return (p * scale) + translate
def get_smoke(self,domain=Box(x=math.PI, y=math.PI), x=100, y=100):
smoke = CenteredGrid(
self.get_marker,
extrapolation.BOUNDARY,
x=x, y=y,
bounds=(domain)
)
return smoke
def get_marker(self, p):
u, v = p.vector['x'], p.vector['y']
y_left = +2*(u-self.pos[0]) + self.pos[1]
y_right = -2*(u-self.pos[0]) + 2 + self.pos[1]
bool_inside = (v < y_left) & (v < y_right) & (v > self.pos[1])
bool_inside = math.all(bool_inside, 'vector')
return bool_inside
def get_trivial_points(self):
return math.tensor([
[0.5, 0.5], # center
[0.5, 1], # upper right
[0.5, 1], # upper left
[0, 0], # lower left
[1, 0], # lower right
], instance(i=5) & channel(vector='x,y'))
# TODO
def get_f_moon():
return
# TODO BME logo
| bobarna/eigenfluid-control | src/shapes.py | shapes.py | py | 7,366 | python | en | code | 1 | github-code | 36 |
35015760399 | import argparse
import numpy as np
import torch
from copy import deepcopy
from gluonts.dataset.multivariate_grouper import MultivariateGrouper
from gluonts.dataset.repository.datasets import get_dataset
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import MultivariateEvaluator
from tsdiff.forecasting.models import (
ScoreEstimator,
TimeGradTrainingNetwork_AutoregressiveOld, TimeGradPredictionNetwork_AutoregressiveOld,
TimeGradTrainingNetwork_Autoregressive, TimeGradPredictionNetwork_Autoregressive,
TimeGradTrainingNetwork_All, TimeGradPredictionNetwork_All,
TimeGradTrainingNetwork_RNN, TimeGradPredictionNetwork_RNN,
TimeGradTrainingNetwork_Transformer, TimeGradPredictionNetwork_Transformer,
TimeGradTrainingNetwork_CNN, TimeGradPredictionNetwork_CNN,
)
from tsdiff.utils import NotSupportedModelNoiseCombination, TrainerForecasting
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def energy_score(forecast, target):
obs_dist = np.mean(np.linalg.norm((forecast - target), axis=-1))
pair_dist = np.mean(
np.linalg.norm(forecast[:, np.newaxis, ...] - forecast, axis=-1)
)
return obs_dist - pair_dist * 0.5
def train(
seed: int,
dataset: str,
network: str,
noise: str,
diffusion_steps: int,
epochs: int,
learning_rate: float,
batch_size: int,
num_cells: int,
hidden_dim: int,
residual_layers: int,
):
np.random.seed(seed)
torch.manual_seed(seed)
covariance_dim = 4 if dataset != 'exchange_rate_nips' else -4
# Load data
dataset = get_dataset(dataset, regenerate=False)
target_dim = int(dataset.metadata.feat_static_cat[0].cardinality)
train_grouper = MultivariateGrouper(max_target_dim=min(2000, target_dim))
test_grouper = MultivariateGrouper(num_test_dates=int(len(dataset.test) / len(dataset.train)), max_target_dim=min(2000, target_dim))
dataset_train = train_grouper(dataset.train)
dataset_test = test_grouper(dataset.test)
val_window = 20 * dataset.metadata.prediction_length
dataset_train = list(dataset_train)
dataset_val = []
for i in range(len(dataset_train)):
x = deepcopy(dataset_train[i])
x['target'] = x['target'][:,-val_window:]
dataset_val.append(x)
dataset_train[i]['target'] = dataset_train[i]['target'][:,:-val_window]
# Load model
if network == 'timegrad':
if noise != 'normal':
raise NotSupportedModelNoiseCombination
training_net, prediction_net = TimeGradTrainingNetwork_Autoregressive, TimeGradPredictionNetwork_Autoregressive
elif network == 'timegrad_old':
if noise != 'normal':
raise NotSupportedModelNoiseCombination
training_net, prediction_net = TimeGradTrainingNetwork_AutoregressiveOld, TimeGradPredictionNetwork_AutoregressiveOld
elif network == 'timegrad_all':
training_net, prediction_net = TimeGradTrainingNetwork_All, TimeGradPredictionNetwork_All
elif network == 'timegrad_rnn':
training_net, prediction_net = TimeGradTrainingNetwork_RNN, TimeGradPredictionNetwork_RNN
elif network == 'timegrad_transformer':
training_net, prediction_net = TimeGradTrainingNetwork_Transformer, TimeGradPredictionNetwork_Transformer
elif network == 'timegrad_cnn':
training_net, prediction_net = TimeGradTrainingNetwork_CNN, TimeGradPredictionNetwork_CNN
estimator = ScoreEstimator(
training_net=training_net,
prediction_net=prediction_net,
noise=noise,
target_dim=target_dim,
prediction_length=dataset.metadata.prediction_length,
context_length=dataset.metadata.prediction_length,
cell_type='GRU',
num_cells=num_cells,
hidden_dim=hidden_dim,
residual_layers=residual_layers,
input_size=target_dim * 4 + covariance_dim,
freq=dataset.metadata.freq,
loss_type='l2',
scaling=True,
diff_steps=diffusion_steps,
beta_end=20 / diffusion_steps,
beta_schedule='linear',
num_parallel_samples=100,
pick_incomplete=True,
trainer=TrainerForecasting(
device=device,
epochs=epochs,
learning_rate=learning_rate,
num_batches_per_epoch=100,
batch_size=batch_size,
patience=10,
),
)
# Training
predictor = estimator.train(dataset_train, dataset_val, num_workers=8)
# Evaluation
forecast_it, ts_it = make_evaluation_predictions(dataset=dataset_test, predictor=predictor, num_samples=100)
forecasts = list(forecast_it)
targets = list(ts_it)
score = energy_score(
forecast=np.array([x.samples for x in forecasts]),
target=np.array([x[-dataset.metadata.prediction_length:] for x in targets])[:,None,...],
)
evaluator = MultivariateEvaluator(quantiles=(np.arange(20)/20.0)[1:], target_agg_funcs={'sum': np.sum})
agg_metric, _ = evaluator(targets, forecasts, num_series=len(dataset_test))
metrics = dict(
CRPS=agg_metric['mean_wQuantileLoss'],
ND=agg_metric['ND'],
NRMSE=agg_metric['NRMSE'],
CRPS_sum=agg_metric['m_sum_mean_wQuantileLoss'],
ND_sum=agg_metric['m_sum_ND'],
NRMSE_sum=agg_metric['m_sum_NRMSE'],
energy_score=score,
)
metrics = { k: float(v) for k,v in metrics.items() }
return metrics
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train forecasting model.')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--dataset', type=str)
parser.add_argument('--network', type=str, choices=[
'timegrad', 'timegrad_old', 'timegrad_all', 'timegrad_rnn', 'timegrad_transformer', 'timegrad_cnn'
])
parser.add_argument('--noise', type=str, choices=['normal', 'ou', 'gp'])
parser.add_argument('--diffusion_steps', type=int, default=100)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--learning_rate', type=int, default=1e-3)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--num_cells', type=int, default=100)
parser.add_argument('--hidden_dim', type=int, default=100)
parser.add_argument('--residual_layers', type=int, default=8)
args = parser.parse_args()
metrics = train(**args.__dict__)
for key, value in metrics.items():
print(f'{key}:\t{value:.4f}')
# Example:
# python -m tsdiff.forecasting.train --seed 1 --dataset electricity_nips --network timegrad_rnn --noise ou --epochs 100
| morganstanley/MSML | papers/Stochastic_Process_Diffusion/tsdiff/forecasting/train.py | train.py | py | 6,723 | python | en | code | 12 | github-code | 36 |
247176838 | from trytond.model import ModelView, ModelSQL, fields
from trytond.pool import Pool
from sql import Table, Column, Literal, Desc, Asc, Expression, Flavor
from sql.functions import Now, Extract
from sql.operators import Or, And, Concat, ILike, Operator
from sql.conditionals import Coalesce
from sql.aggregate import Count, Max
from nereid import (
request, abort, render_template, login_required, url_for, flash, jsonify,
current_app, route
)
from nereid.contrib.pagination import Pagination, BasePagination
from nereid.ctx import has_request_context
__all__ = ['Artist']
class Artist(ModelSQL, ModelView):
"Artist"
__name__ = "songbook.artist"
_rec_name = 'full_name'
last_name = fields.Char('Last Name', required=True, select=True)
first_name = fields.Char('First Name', select=True)
full_name = fields.Function(
fields.Char('Full Name'), 'get_full_name',
searcher='search_full_name'
)
rev_name = fields.Function(
fields.Char('Reversed Name'), 'get_rev_name'
)
songs = fields.One2Many(
"songbook.song",
'artist',
'Songs by This Artist'
)
def serialize(self):
"""
Serialize the artist object and return a dictionary.
"""
object_json = {
"url": url_for(
'songbook.artist.render_html',
id=self.id,
),
"objectType": self.__name__,
"id": self.id,
"lastName": self.last_name,
"firstName": self.first_name,
"fullName": self.full_name,
}
return object_json
@classmethod
def __setup__(cls):
super(Artist, cls).__setup__()
cls._sql_constraints = [
('name_uniq', 'UNIQUE(last_name, first_name)',
'An artist with that name already exists.')
]
cls._order.insert(0, ('last_name', 'ASC'))
cls._order.insert(1, ('first_name', 'ASC'))
@classmethod
def search_full_name(cls, name, clause):
"Search Full Name"
_, operator, value = clause
Operator = fields.SQL_OPERATORS[operator]
table = cls.__table__()
fullname = Concat(
Coalesce(table.first_name, Literal('')),
Concat(
Literal(' '),
Coalesce(table.last_name, Literal(''))
)
)
query = table.select(table.id, where=Operator(fullname, value))
return [('id', 'in', query)]
def get_full_name(self, name):
if self.first_name is None:
fullname = self.last_name
else:
fullname = "%s %s" % (self.first_name, self.last_name)
return fullname.strip(" ")
def get_rev_name(self, name):
if self.first_name is None:
revname = self.last_name
else:
revname = "%s, %s" % (self.last_name, self.first_name)
return revname.strip(", ")
@classmethod
@route('/songbook/api/artists', methods=['GET', 'POST'])
def call_api_index(cls):
"""
JSON-formatted REST API to support 3rd party integration, apps
and web page javascript such as search-as-you-type.
"""
name_filter = '%' + request.args.get('namecontains', '') + '%'
domain = [
('full_name', 'ilike', name_filter)
]
artists = cls.search(domain, limit=int(request.args.get('limit', '5')))
return jsonify(
artists=[a.serialize() for a in artists]
)
@classmethod
@route('/songbook/artists/<int:id>', methods=['GET'])
def render_html(cls, id=0):
"""
output details of a selected artist to web client
"""
artist=cls.browse([id])[0]
return render_template(
'songbook_artist-detail.jinja',
artist=artist
)
@classmethod
@route('/songbook/artists', methods=['GET', 'POST'])
def render_html_index(cls):
"""
output artist list to web client
"""
name_filter = '%' + request.args.get('namecontains', '') + '%'
page = request.args.get('page', 1, int)
domain = [
('full_name', 'ilike', name_filter)
]
artists = Pagination(
cls, domain, page, 25
)
return render_template(
'songbook_artist-list.jinja',
artists=artists
)
| coalesco/trytond_songbook | artist.py | artist.py | py | 4,420 | python | en | code | 0 | github-code | 36 |
7706902084 | from bert_models.base_bert_model import BaseBertModel
import joblib
from sklearn.ensemble import GradientBoostingClassifier
import os
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import f1_score
class BaseBertModelWithBoost(BaseBertModel):
def __init__(self, output_dir, label_list, boost_model = "", gpu_device_num_hub=0,gpu_device_num = 1, batch_size = 16, max_seq_length = 256,\
bert_model_hub = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1", model_folder = "", label_column = "label",
use_concat_results=False):
BaseBertModel.__init__(self, output_dir, label_list, gpu_device_num_hub=gpu_device_num_hub,
gpu_device_num = gpu_device_num, batch_size = batch_size, max_seq_length = max_seq_length,
bert_model_hub = bert_model_hub, model_folder = model_folder, label_column = label_column,
use_concat_results = use_concat_results)
self.load_boost_model(boost_model)
def load_boost_model(self, folder):
self.sgBoost = joblib.load(os.path.join(folder,'GradientBoost.joblib')) if folder != "" else GradientBoostingClassifier()
def save_boost_model(self, folder):
if not os.path.exists(folder):
os.makedirs(folder)
joblib.dump(self.sgBoost, os.path.join(folder,'GradientBoost.joblib'))
def prepare_dataset_for_boosting(self, train, use_tail = False):
es_train_prob_study_type_11, res_train_full_study_type_11, train_y_study_type_11 = self.evaluate_model(train)
if use_tail:
es_train_prob_study_type_11_tail, res_train_full_study_type_11_tail, train_y_study_type_11_tail = self.evaluate_model(train, False)
sg_boost_x = np.concatenate([ es_train_prob_study_type_11, es_train_prob_study_type_11_tail],axis=1)
return sg_boost_x
return es_train_prob_study_type_11
def prepare_datasets_for_boosting(self, train, test, study_df, use_tail = False):
sg_boost_x = self.prepare_dataset_for_boosting(train, use_tail = use_tail)
sg_boost_test_x = self.prepare_dataset_for_boosting(test, use_tail = use_tail)
sg_boost_study_x = self.prepare_dataset_for_boosting(study_df, use_tail = use_tail)
return sg_boost_x, sg_boost_test_x, sg_boost_study_x
def train_boost_model(self, train, test, study_df, use_tail = False, n_estimators = 60, max_depth = 8, for_train = True):
sg_boost_x, sg_boost_test_x, sg_boost_study_x = self.prepare_datasets_for_boosting(train, test, study_df, use_tail = use_tail)
train_y = list(train[self.label_column].values)
test_y = list(test[self.label_column].values)
study_df_y = list(study_df[self.label_column].values)
if for_train:
self.sgBoost = GradientBoostingClassifier(n_estimators = n_estimators, max_depth=max_depth)
self.sgBoost.fit(sg_boost_test_x, test_y)
print(self.sgBoost.score(sg_boost_test_x, test_y))
print(self.sgBoost.score(sg_boost_x, train_y))
print(self.sgBoost.score(sg_boost_study_x, study_df_y))
print(confusion_matrix(study_df_y, self.sgBoost.predict(sg_boost_study_x)))
print(classification_report(study_df_y, self.sgBoost.predict(sg_boost_study_x)))
def evaluate_boost_model(self, test, use_tail = False):
sg_boost_test_x =self.prepare_dataset_for_boosting(test, use_tail = use_tail)
test_y = list(test[self.label_column].values)
print(self.sgBoost.score(sg_boost_test_x, test_y))
print(confusion_matrix(test_y, self.sgBoost.predict(sg_boost_test_x)))
print(classification_report(test_y, self.sgBoost.predict(sg_boost_test_x)))
def predict_with_boosting(self, df, with_head_tail = False):
res_prob, res_label, res_y = self.predict_for_df(df)
if with_head_tail:
res_prob_tail, res_label_tail, res_y = self.predict_for_df(df, is_head = False)
res_prob = np.concatenate([ res_prob, res_prob_tail],axis=1)
res_prob, res_label = [self.softmax(x) for x in self.sgBoost.decision_function(res_prob)], self.sgBoost.predict(res_prob)
return res_prob, res_label, res_y
| MariyaIvanina/articles_processing | src/bert_models/base_bert_model_with_boosting.py | base_bert_model_with_boosting.py | py | 4,205 | python | en | code | 3 | github-code | 36 |
44217228363 | """
@author: Miguel Taibo Martínez
Date: Nov 2021
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import gpflow
import pandas as pd
import sobol_seq
from gpflow.utilities import print_summary
from frontutils import get_pareto_undominated_by
class frontGP(object):
def __init__(self, O:int, C:int, d:int, lowerBounds: float, upperBounds: float, X = None, Y = None, noise_variance=0.01):
self.O = O
self.C = C
self.d = d
self.lowerBounds = lowerBounds
self.upperBounds = upperBounds
self.X = X
self.Y = Y
self.noise_variance = noise_variance
self.multiGPR : MultiGPR = None
def addSample(self, x, y, save=False, filename=None):
if self.X is None or self.Y is None:
self.X = np.array([x])
self.Y = np.array([y])
return
self.X = np.append(self.X, [x], axis=0)
self.Y = np.append(self.Y, [y], axis=0)
if save and filename is not None:
self.writeSample(filename, x,y)
def updateGP(self):
self.multiGPR = MultiGPR(X = self.X, Y = self.Y, noise_variance = self.noise_variance)
def optimizeKernel(self):
self.multiGPR.optimizeKernel()
## Visualization methods
def plot(self):
fig, axs = plt.subplots(nrows = self.O, ncols=self.d, figsize=(10,5))
if self.d >1:
for j in range(self.d):
grid = np.ones((10_000,self.d))
for k in range(self.d):
grid[:,k]=grid[:,k]*(self.upperBounds[k]+self.lowerBounds[k])/2
xx = np.linspace(self.lowerBounds[j], self.upperBounds[j], 10_000).reshape(10_000, 1)
grid[:,j]=xx[:,0]
mean, var = self.multiGPR.predict_y(grid)
if self.O==1:
axs[j].plot(self.X[:,j], self.Y[:,0], 'kx', mew=2)
axs[j].plot(grid[:,j], mean[:,0], 'C0', lw=2)
axs[j].fill_between(grid[:,j],
mean[:,0] - 2*np.sqrt(var[:,0]),
mean[:,0] + 2*np.sqrt(var[:,0]),
color='C0', alpha=0.2)
else:
for i in range(self.O):
axs[i, j].plot(self.X[:,j], self.Y[:,i], 'kx', mew=2)
axs[i, j].plot(grid[:,j], mean[:,i], 'C0', lw=2)
axs[i, j].fill_between(grid[:,j],
mean[:,i] - 2*np.sqrt(var[:,i]),
mean[:,i] + 2*np.sqrt(var[:,i]),
color='C0', alpha=0.2)
else:
xx = np.linspace(self.lowerBounds[0], self.upperBounds[0], 10_000).reshape(10_000, 1)
mean, var = self.multiGPR.predict_y(xx)
if self.O==1:
axs.plot(self.X, self.Y[:,0], 'kx', mew=2)
axs.plot(xx[:,0], mean[:,0], 'C0', lw=2)
axs.fill_between(xx[:,0],
mean[:,0] - 2*np.sqrt(var[:,0]),
mean[:,0] + 2*np.sqrt(var[:,0]),
color='C0', alpha=0.2)
else:
for i in range(self.O):
axs[i].plot(self.X, self.Y[:,i], 'kx', mew=2)
axs[i].plot(xx[:,0], mean[:,i], 'C0', lw=2)
axs[i].fill_between(xx[:,0],
mean[:,i] - 2*np.sqrt(var[:,i]),
mean[:,i] + 2*np.sqrt(var[:,i]),
color='C0', alpha=0.2)
return fig, axs
def plotParetos(self, state):
pareto_front = np.array(state.pareto_front)
pareto_set = np.array(state.pareto_set)
for idx, mm in enumerate(state.objective_mms):
if mm:
pareto_front[:,idx]=-pareto_front[:,idx]
best_known_pareto_front = get_pareto_undominated_by(self.Y)
best_known_pareto_set = getSetfromFront(self.X, self.Y, best_known_pareto_front)
fig1, axs1 = plt.subplots(figsize=(8,8))
if self.d>1:
axs1.plot(pareto_set[:,state.input_names.index(state.setx)],pareto_set[:,state.input_names.index(state.sety)], 'bx', markersize=3, label=r"Estimated Pareto Set")
axs1.plot(best_known_pareto_set[:,state.input_names.index(state.setx)], best_known_pareto_set[:,state.input_names.index(state.sety)], 'gx', markersize=10, label=r"Best Known Pareto Set")
axs1.set_ylabel(state.sety, fontsize=14)
axs1.set_xlabel(state.setx, fontsize=14)
else:
axs1.plot(pareto_set[:,0], [0 for _ in pareto_set[:,0]],'bx', markersize=3, label=r"Estimated Pareto Set")
axs1.plot(best_known_pareto_set[:,0], [0 for _ in best_known_pareto_set[:,0]],'gx', markersize=10, label=r"Best Known Pareto Set")
axs1.set_xlabel(state.input_names[0], fontsize=14)
axs1.set_yticks(ticks = [])
axs1.legend(fontsize=14)
fig2, axs2 = plt.subplots(figsize=(8,8))
axs2.plot(pareto_front[:,state.objective_names.index(state.frontx)], pareto_front[:,state.objective_names.index(state.fronty)], 'xb', markersize=3, label=r"Estimated Pareto Front")
axs2.plot(best_known_pareto_front[:,state.objective_names.index(state.frontx)],best_known_pareto_front[:,state.objective_names.index(state.fronty)], 'xg', markersize=10, label=r"Best Known Pareto Front")
axs2.set_xlabel(state.frontx, fontsize=14)
axs2.set_ylabel(state.fronty, fontsize=14)
axs2.legend(fontsize=14)
return fig1,fig2
def plotMetrics(self, state):
fig, axs = plt.subplots(figsize=(8,8))
axs.plot(state.ns, state.agd, label=r"$AGD_1(\mathcal{Y}_E^*, \mathcal{Y}_{BK}^*)$")
axs.plot(state.ns, state.adh, label=r"$d_{ADH}(\mathcal{Y}_E^*, \mathcal{Y}_{BK}^*)$")
axs.set_ylabel("Log Metrics",fontsize=14)
axs.set_xlabel("Algorithm Iteration",fontsize=14)
axs.legend(fontsize=14)
return fig
def dlParetos(self, state):
pareto_front = np.array(state.pareto_front)
pareto_set = np.array(state.pareto_set)
df =pd.DataFrame(data = np.append(pareto_set,pareto_front,axis=1),columns=state.input_names+state.objective_names)
return df.to_csv()
def dlParetoBestKnown(self, state):
pareto_front = get_pareto_undominated_by(self.Y)
pareto_set = getSetfromFront(self.X, self.Y, pareto_front)
df =pd.DataFrame(data = np.append(pareto_set,pareto_front,axis=1),columns=state.input_names+state.objective_names)
return df.to_csv()
class MultiGPR(object):
def __init__(self, X = None, Y = None, noise_variance=0.01):
self.GPRs = [
gpflow.models.GPR(
[X, Y[:,i:i+1]],
kernel = gpflow.kernels.SquaredExponential(),
mean_function = gpflow.mean_functions.Constant(),
noise_variance = noise_variance
)
for i in range(Y.shape[-1])
]
self.opt = gpflow.optimizers.Scipy()
def optimizeKernel(self):
for GPR in self.GPRs:
self.opt.minimize(
GPR.training_loss,
variables=GPR.trainable_variables)
def predict_y(self, xx):
mean_vars = tf.concat([GPR.predict_y(xx) for GPR in self.GPRs], axis=-1)
mean = mean_vars[0]
var = mean_vars[1]
return mean, var
def predict_f_samples(self, xx, n_samples):
presamples = [GPR.predict_f_samples(xx, n_samples) for GPR in self.GPRs]
samples = tf.concat(presamples[:], axis=-1)
return samples
def printGPRs(self):
for GPR in self.GPRs:
print_summary(GPR)
def getSetfromFront(xvalues, yvalues, front):
res = None
for y in front:
x = xvalues[np.where(np.all(yvalues==y,axis=1))[0]]
if res is None:
res = np.array(x)
else:
res = np.append(res,x, axis=0)
return res
| MiguelTaibo/DashboardMOO | streamlit-front/frontGP.py | frontGP.py | py | 8,161 | python | en | code | 0 | github-code | 36 |
4224070148 | import os
import argparse
import gzip
import sys
import time
import numpy as np
from multiprocessing import Pool
from contextlib import closing
import csv
import tensorflow as tf
from six.moves import urllib
bin_freq = 23
spect_width = bin_freq # Don't add one pixel of zeros on either side of the image
window_size = 100
dim_Y = 11
MEAN_SPEC = 10.786225977
# For MNIST
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
# MNIST embeddings directory
dataMnistdir = '../mnistAct/'
MNIST_DIM = 512
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
# mnist_data = {digit: np.loadtxt(dataMnistdir + str(digit) + ".txt") for digit in range(10)}
label_map = {'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'o':0,'z':0}
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
# Organizing MNIST data into mnist images and labels
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
mnist_values = extract_data(test_data_filename, 10000)
mnist_labels = extract_labels(test_labels_filename, 10000)
mnist_data = {i:[] for i in range(10)}
for i in range(len(mnist_labels)):
lab = mnist_labels[i]
val = mnist_values[i]
mnist_data[lab].append(val)
print(mnist_data[7][0].shape)
def load_from_file(f):
'''Given a file, returns a list of the string values in that value'''
data = []
for line in f:
vector = []
line = line.replace("[", "")
line = line.replace("]", "")
line_chars = line.split()
for char in line_chars:
# vector.append(float(char)-MEAN_SPEC)
vector.append(float(char))
try:
assert len(vector) == bin_freq
data.append(vector)
except AssertionError:
if len(vector) == 0:
pass
else:
# print len(vector)
raise AssertionError
# Now we have a list of length-23 vectors which we need to trim/pad to
# window_size
if len(data)>window_size:
#cut excess rows
cut = 1.*(len(data) - window_size)
data = data[int(np.floor(cut/2)):-int(np.ceil(cut/2))]
else:
# pad data with excess rows of zeros about center
cut = 1.*(window_size - len(data))
data = [[0]*bin_freq]*int(np.floor(cut/2)) + data + [[0]*bin_freq]*int(np.ceil(cut/2))
#Convert data to a numpy array and invert it
data = np.flipud(np.array(data,dtype=np.float32))
# #Pad one pixel of zeros on top and bottom of array
# zero = np.zeros((bin_freq,))
# data[0] = zero
# data[-1] = zero
return data.flatten().tolist()
def ld(rootdir,target):
with open(target, 'wb') as datafile:
writer = csv.writer(datafile)
for subdir, dirs, files in os.walk(rootdir):
for filename in files:
y = filename[3]
f = open(os.path.join(subdir, filename))
row = load_from_file(f)
f.close()
writer.writerow([y] + row)
def get_mnist_embedding(label):
digit = label[1]
data = mnist_data[digit]
i = np.random.randint(0,len(data))
return label[0],data[i]
def get_label_map(s):
return label_map[s]
def get_mismatch_mnist_embedding(label):
i = label[1]
out = None
for j in range(10):
if i != j:
_, data = get_mnist_embedding((label[0],j))
if out is None:
out = data
else:
out = np.vstack([out, data])
return label[0],out.flatten()
def generate_mnist_set(labels):
matches = []
mismatches = []
labels = [(i, label_map[labels[i].decode('utf-8')]) for i in range(len(labels))]
with closing(Pool()) as pool:
matches = pool.map(get_mnist_embedding, labels)
mismatches = pool.map(get_mismatch_mnist_embedding, labels)
matches = np.array([match[1] for match in sorted(matches)])
mismatches = np.array([mismatch[1] for mismatch in sorted(mismatches)])
return matches, mismatches.reshape((len(labels),9,-1))
| KaranKash/DigitSpeak | untrained/load_data.py | load_data.py | py | 5,542 | python | en | code | 2 | github-code | 36 |
16221940900 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
import unittest
from nose.tools import assert_in, assert_raises
from wechatsogou.const import WechatSogouConst
from wechatsogou.request import WechatSogouRequest
class TestBasicGenSearchArticleURL(unittest.TestCase):
def test_gen_hot_url(self):
for hot_index in filter(lambda x: not x.startswith('__'), dir(WechatSogouConst.hot_index)):
url = WechatSogouRequest.gen_hot_url(hot_index)
assert_in('http://weixin.sogou.com/wapindex/wap/0612/wap_', url)
assert_in('0.html', url)
with assert_raises(AssertionError):
WechatSogouRequest.gen_hot_url(hot_index, 0)
for page in range(1, 5):
url = WechatSogouRequest.gen_hot_url(hot_index, page)
assert_in('http://weixin.sogou.com/wapindex/wap/0612/wap_', url)
assert_in('{}.html'.format(page - 1), url)
if __name__ == '__main__':
unittest.main()
| chyroc/WechatSogou | test/test_request_gen_hot_url.py | test_request_gen_hot_url.py | py | 1,032 | python | en | code | 5,658 | github-code | 36 |
70376094505 | SPACE4 = " "*4
SPACE_SIZE = 4
MAX_CELL_LENGTH = 50
CONTROL_STATEMENTS = 'if,elif,else,while,def,for,try,except'.split(",")
REMOVE_VARS = ["_input_as_print", "_this_func", "_params", "_namespace", "_inputs", "_code"] # helpers for identifying scope
PRINT_INPUT_DEC = "!@#$@###"
SYMBOL_REPS = {">":"$gt",
"<":"$lt",
"!=":"$ineq",
"==":"$eq",
"+":"$add",
"-": "$sub",
"*": "$mul",
"/": "$div",
"%": "$mod",
"(": "$oprb",
")": "$clrb",
"[": "$opsb",
"]": "$clsb"} | try-except-try-accept/trace_to_the_top | config.py | config.py | py | 604 | python | en | code | 0 | github-code | 36 |
73408801703 | from http.server import BaseHTTPRequestHandler, HTTPServer
import os
import cgi
import string
import random
from controller import *
hostname="localhost"
serverport=8000
global userid
userid=1
class server(BaseHTTPRequestHandler):
def do_GET(self):
if self.path =='/':
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
output=HomeViewHandler()
self.wfile.write(output.encode())
if self.path=='/post/create/':
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
output=PostCreateForm()
self.wfile.write(output.encode())
if self.path.startswith('/post/') and self.path[6:-1].isdigit() and self.path.endswith('/'):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
print(self.path[6:-1])
postID=self.path[6:-1]
#need some place holder code to check if the post exists or not
output=PostViewHandler(postID)
self.wfile.write(output.encode())
if self.path=='/search/':
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
print(self.path[6:-1])
#need some place holder code to check if the post exists or not
output=SearchViewHandler
self.wfile.write(output.encode())
if self.path.startswith('/search/results/') and self.path[-1:]=='/':
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
lookup= self.path[16:-1]
output=SearchResultHandler(lookup)
self.wfile.write(output.encode())
if self.path.startswith('/user/') and self.path[6:-1].isdigit() and self.path.endswith('/'):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
print(self.path[6:-1])
#need some place holder code to check if the post exists or not
output=''' <div>
<h2> This is a User </h2>
</div>
'''
self.wfile.write(output.encode())
if self.path=='/messages/':
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
print(self.path[6:-1])
#need some place holder code to check if the post exists or not
output=''' <div>
<h2> This page lists all the conversations</h2>
</div>
'''
self.wfile.write(output.encode())
if self.path.startswith('/messages/') and self.path[10:-1].isdigit() and self.path.endswith('/'):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
print(self.path[10:-1])
roomid=self.path[10:-1]
#need some place holder code to check if the post exists or not
output=''' <div>
<h2> This is a Specific Conversation room </h2>
<form method="POST" enctype="multipart/form-data" action="/messages/%s/">
<input name="msgContent"type="text" placeholder="Send something awesome for others to view">
<br>
<input type="submit" value="Send">
</form>
</div>
''' % (self.path[10:-1])
self.wfile.write(output.encode())
def do_POST(self):
if self.path=='/post/create/':
ctype,pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'],"utf-8")
#for demonstration purposes we have the user id hardcoded but irl that hardcoded value would be the userid
if ctype=='multipart/form-data':
fields=cgi.parse_multipart(self.rfile,pdict)
new_caption = fields.get('caption')
new_caption=str(new_caption)
new_caption=new_caption[3:-2]
new_title = fields.get('postTitle')
new_title=str(new_title)
new_title=new_title[3:-2]
new_file= fields.get('filename')
file_type=str(fields.get('type'))
fileformat=0
if file_type[3:-2] == 'on':
fileformat=1
new_file=str(new_file)
new_file=new_file[3:-2]
mediaID= 100
mediaID=MediaHandler(fileformat,new_file,userid)
#ideally when a user is created, it would also make a repository with user's unique id and all the media is stored in it
#in this line call a function to send data to db
postID=PostCreateHandler(mediaID, userid, new_title, new_caption)
redirect='/post/'+postID+'/'
self.send_response(301)
self.send_header('content-type','text/html')
self.send_header('Location', redirect)
self.end_headers()
if self.path=='/search/':
redirect='/search/results/'
ctype,pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'],"utf-8")
content_len= int(self.headers.get('Content-length'))
pdict['CONTENT-LENGTH']=content_len
if ctype=='multipart/form-data':
fields=cgi.parse_multipart(self.rfile,pdict)
results = fields.get('lookup')
print(results)
results=str(results)
results=results[3:-2]
results = results.replace(' ','-')
redirect+=results+'/'
#in this line call a function to send data to db
print(results)
self.send_response(301)
self.send_header('content-type','text/html')
self.send_header('Location', redirect)
self.end_headers()
if self.path.startswith('/messages/') and self.path[10:-1].isdigit() and self.path.endswith('/'):
ctype,pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'],"utf-8")
redirect=self.path
if ctype=='multipart/form-data':
fields=cgi.parse_multipart(self.rfile,pdict)
new_caption = fields.get('msgContent')
#in this line call a function to send data to db
print(new_caption)
self.send_response(301)
self.send_header('content-type','text/html')
self.send_header('Location', redirect)
self.end_headers()
httpd = HTTPServer((hostname, serverport),server)
httpd.serve_forever()
| crash1604/PurePythonBackend | server.py | server.py | py | 5,714 | python | en | code | 0 | github-code | 36 |
35058044672 | from flask import Flask, request, jsonify
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.engine import Engine
from sqlalchemy import event
import os
import sqlite3
database = SQLAlchemy()
app = Flask(__name__)
app.config.from_mapping(
SECRET_KEY = 'dev',
SQLALCHEMY_DATABASE_URI = r"sqlite:///C:\\Users\syedr\Documents\databaseFlask\backend\sqlite.db"
)
@event.listens_for(Engine, 'connect')
def activate_foreign_keys(connection, connection_record):
if type(connection) is sqlite3.Connection:
cursor = connection.cursor()
cursor.execute('PRAGMA foreign_keys=ON')
cursor.close()
database.init_app(app)
with app.app_context():
database.reflect()
print(database.metadata.tables.keys())
try:
os.makedirs(app.instance_path)
except OSError:
pass
from .routes import * | Talha7011235/studentCourseInstructorDatabaseFlaskIndividualAssignment | backend/__init__.py | __init__.py | py | 857 | python | en | code | 0 | github-code | 36 |
9678451404 | def has_valid_checksum(card_number):
total = []
check_digit = None
if len(card_number) == 16:
check_digit = int(card_number[-1])
card_number = card_number[:-1]
# Multiply odd digits by 2 + # Subtract 9 to numbers over 9 #
for i, number in enumerate(card_number, start=1):
if i % 2:
total.append(int(number) * 2)
continue
total.append(int(number))
total = list(map(lambda x: x - 9 if x > 9 else x, total))
compare_digit = 10 - sum(i for i in total) % 10 if sum(i for i in total) % 10 else 0
if check_digit:
return compare_digit == check_digit
return compare_digit
print(has_valid_checksum('3000003972196503'))
| stasshi/SimpleBankingApp | banking/Test.py | Test.py | py | 718 | python | en | code | 0 | github-code | 36 |
28524863095 | import numpy as np
# Read matrix A and vector b from user input
n = int(input("Enter the size of the matrix: "))
A = np.zeros((n,n))
b = np.zeros(n)
for i in range(n):
row = input(f"Enter the coefficients of row {i+1} of matrix A, separated by spaces: ")
A[i,:] = np.array([float(x) for x in row.split()])
b[i] = float(input(f"Enter the constant term for row {i+1} of vector b: "))
def gauss_elimination(A, b):
n = len(b)
# Forward elimination
for k in range(n-1):
for i in range(k+1, n):
factor = A[i,k] / A[k,k]
for j in range(k+1, n):
A[i,j] -= factor * A[k,j]
b[i] -= factor * b[k]
# Print intermediate results
print("Step ", k*n+i-k*(k+1)//2+1, ":")
print(A)
print(b)
# Back substitution
x = np.zeros(n)
x[n-1] = b[n-1] / A[n-1,n-1]
for i in range(n-2, -1, -1):
sum = b[i]
for j in range(i+1, n):
sum -= A[i,j] * x[j]
x[i] = sum / A[i,i]
return x
# Call the Gauss Elimination function and print the result
x = gauss_elimination(A, b)
print("Solution: ", x)
| umang27102001/AssignmentsMCA | assignmentNM/.py/assignment3A.py | assignment3A.py | py | 1,183 | python | en | code | 0 | github-code | 36 |
41898945847 | a = [11,2,1,15]
b = [235,127,19,4,12,23]
def smallest_pair():
a.sort()
b.sort()
i = 0
j = 0
current_pick = None
minimum = None
minimum_pair = None
while i < len(a) and j < len(b):
print(a[i], b[j], current_pick)
if a[i] <= b[j]:
if current_pick and current_pick != "a":
diff = abs(a[i] - b[j])
if not minimum:
minimum = diff
minimum_pair = (a[i], b[j])
else:
minimum = min(diff, minimum)
if minimum == diff:
minimum_pair = (a[i], b[j])
current_pick = "a"
i += 1
else:
if current_pick and current_pick != "b":
print(b[j] - a[i])
diff = abs(a[i] - b[j])
if not minimum:
minimum = diff
minimum_pair = (a[i], b[j])
else:
minimum = min(diff, minimum)
if minimum == diff:
minimum_pair = (a[i], b[j])
current_pick = "b"
j += 1
print(minimum_pair)
smallest_pair()
| puyuan/codejam | practice/smallestdiffpairs.py | smallestdiffpairs.py | py | 1,219 | python | en | code | 0 | github-code | 36 |
71032555624 | """add adventure logs
Revision ID: 36dc23330424
Revises: 0db346b0362b
Create Date: 2022-02-06 21:15:27.347180
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '36dc23330424'
down_revision = '0db346b0362b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('adventure_log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('discovered_locations_id', sa.Integer(), nullable=True),
sa.Column('discoverable_locations_id', sa.Integer(), nullable=True),
sa.Column('discovered_items_id', sa.Integer(), nullable=True),
sa.Column('discoverable_items_id', sa.Integer(), nullable=True),
sa.Column('game_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['discoverable_items_id'], ['item.id'], name=op.f('fk_adventure_log_discoverable_items_id_item')),
sa.ForeignKeyConstraint(['discoverable_locations_id'], ['location.id'], name=op.f('fk_adventure_log_discoverable_locations_id_location')),
sa.ForeignKeyConstraint(['discovered_items_id'], ['item.id'], name=op.f('fk_adventure_log_discovered_items_id_item')),
sa.ForeignKeyConstraint(['discovered_locations_id'], ['location.id'], name=op.f('fk_adventure_log_discovered_locations_id_location')),
sa.ForeignKeyConstraint(['game_id'], ['game.id'], name=op.f('fk_adventure_log_game_id_game')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_adventure_log'))
)
op.create_index(op.f('ix_adventure_log_id'), 'adventure_log', ['id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_adventure_log_id'), table_name='adventure_log')
op.drop_table('adventure_log')
# ### end Alembic commands ###
| lauraturnbull/griddle-earth | engine/migrations/alembic/versions/36dc23330424_add_adventure_logs.py | 36dc23330424_add_adventure_logs.py | py | 1,863 | python | en | code | 0 | github-code | 36 |
19565236986 | import spacy
nlp = spacy.load('en_core_web_md')
with open('movies.txt', 'r') as f_movies:
movies = f_movies.readlines()
compare_movie = ('''Planet Hulk: Will he save
their world or destroy it? When the Hulk becomes too dangerous for the
Earth, the Illuminati trick Hulk into a shuttle and launch him into space to a
planet where the Hulk can live in peace. Unfortunately, Hulk land on the
planet Sakaar where he is sold into slavery and trained as a gladiator.''')
nlp_compare_movie = nlp(compare_movie)
most_similar_score = -1
most_similar_movie = ""
for movie in movies:
movie = nlp(movie)
similarity_score = nlp_compare_movie.similarity(movie)
if similarity_score > most_similar_score:
most_similar_score = similarity_score
most_similar_movie = movie.text
print("Most similar movie:", most_similar_movie)
print("Similarity score:", most_similar_score)
| vho1988/semantic_similarity | watch_next.py | watch_next.py | py | 921 | python | en | code | 0 | github-code | 36 |
20584236046 | from typing import *
class Solution:
def bestTeamScore(self, scores: List[int], ages: List[int]) -> int:
res = 0
totalNum = len(scores)
bestScoreOnPlayer = [0] * totalNum
score_age = sorted(list(zip(scores, ages)))
for i in range(totalNum):
for prev in range(i):
if (score_age[prev][1] <= score_age[i][1]):
bestScoreOnPlayer[i] = max(bestScoreOnPlayer[i], bestScoreOnPlayer[prev])
bestScoreOnPlayer[i] += score_age[i][0]
res = max(res, bestScoreOnPlayer[i])
return res
a = Solution()
print(a.bestTeamScore([1,1,1,1,1,1,1,1,1,1], [811,364,124,873,790,656,581,446,885,134]))
| RabbltMan/leetcode_dump | 1626/1626.py | 1626.py | py | 714 | python | en | code | 0 | github-code | 36 |
37636283080 | # Given a set of points in the xy-plane, determine the minimum area of a rectangle formed from these points, with sides parallel to the x and y axes.
# If there isn't any rectangle, return 0.
# Example 1:
# Input: [[1,1],[1,3],[3,1],[3,3],[2,2]]
# Output: 4
# Example 2:
# Input: [[1,1],[1,3],[3,1],[3,3],[4,1],[4,3]]
# Output: 2
# Note:
# 1 <= points.length <= 500
# 0 <= points[i][0] <= 40000
# 0 <= points[i][1] <= 40000
# All points are distinct.
class Solution:
def minAreaRect(self, points: List[List[int]]) -> int:
x = {}
y = {}
for p in points:
x.setdefault(p[0],set()).add(tuple(p))
y.setdefault(p[1],set()).add(tuple(p))
area = float("inf")
for bl in points:
for ul in x[bl[0]]:
if ul[1] > bl[1]:
for br in y[bl[1]]:
if br[0]>bl[0]:
if (br[0],ul[1]) in x[br[0]]:
area = min(area,(br[0]-bl[0])*(ul[1]-bl[1]))
return 0 if area==float('inf') else area
| sunnyyeti/Leetcode-solutions | 939 Minimum Area Rectangle.py | 939 Minimum Area Rectangle.py | py | 1,086 | python | en | code | 0 | github-code | 36 |
8012209011 | from __future__ import absolute_import, print_function, division
from deepmedic.frontEnd.configParsing.utils import getAbsPathEvenIfRelativeIsGiven, parseAbsFileLinesInList, parseFileLinesInList, check_and_adjust_path_to_ckpt
from deepmedic.dataManagement import samplingType
class TrainSessionParameters(object) :
#To be called from outside too.
@staticmethod
def getSessionName(sessionName) :
return sessionName if sessionName is not None else "trainSession"
#REQUIRED:
@staticmethod
def errorRequireChannelsTraining():
print("ERROR: Parameter \"channelsTraining\" needed but not provided in config file. This parameter should provide paths to files, as many as the channels (modalities) of the task. Each of the files should contain a list of paths, one for each case to train on. These paths in a file should point to the .nii(.gz) files that are the corresponding channel for a patient. Please provide it in the format: channelsTraining = [\"path-to-file-for-channel1\", ..., \"path-to-file-for-channelN\"]. The paths should be given in quotes, separated by commas (list of strings, python-style). Exiting."); exit(1)
errReqChansTr = errorRequireChannelsTraining
@staticmethod
def errorRequireGtLabelsTraining():
print("ERROR: Parameter \"gtLabelsTraining\" needed but not provided in config file. This parameter should provide the path to a file. That file should contain a list of paths, one for each case to train on. These paths should point to the .nii(.gz) files that contain the corresponding Ground-Truth labels for a case. Please provide it in the format: gtLabelsTraining = \"path-to-file\". The path should be given in quotes (a string, python-style). Exiting."); exit(1)
errReqGtTr = errorRequireGtLabelsTraining
@staticmethod
def errorRequireSamplMasksAreProbabMapsTrain() :
print("ERROR: Parameter \"samplingMasksAreProbabMapsTrain\" needed but not provided in config file. This parameters is needed when parameter \"useDefaultTrainingSamplingFromGtAndRoi\" = False, in order to know whether the provided masks are probability maps or segmentation labels. Please provide parameter in the form: samplingMasksAreProbabMapsTrain = True/False. True if the masks given at \"masksForPos(Neg)SamplingTrain\" are probability maps (can be non-normalized, like weights), or False if they are binary segmentation masks. Exiting."); exit(1)
errReqMasksTypeTr = errorRequireSamplMasksAreProbabMapsTrain
@staticmethod
def warnDefaultPosSamplMasksTrain() :
print("WARN: Parameter \"weightedMapsForPosSamplingTrain\" was not provided in config file, even though advanced training options were triggered by setting \"useDefaultTrainingSamplingFromGtAndRoi\" = False. This parameter should point to a file that lists the paths (per case/patient) to weighted-maps that indicate where to perform the sampling of training samples. Can be provided in the format: weightedMapsForPosSamplingTrain = \"path-to-file\". The target file should have one entry per case (patient). Each of them should be pointing to .nii(.gz) files that indicate where to extract the positive samples. \n\tDEFAULT: In the case it is not provided (like now!) the corresponding samples are extracted uniformly from the whole volume!")
return None
warnDefPosMasksTr = warnDefaultPosSamplMasksTrain
@staticmethod
def warnDefaultNegSamplMasksTrain() :
print("WARN: Parameter \"weightedMapsForNegSamplingTrain\" was not provided in config file, even though advanced training options were triggered by setting \"useDefaultTrainingSamplingFromGtAndRoi\" = False. This parameter should point to a file that lists the paths (per case/patient) to weighted-maps that indicate where to perform the sampling of training samples. Can be provided in the format: weightedMapsForNegSamplingTrain = \"path-to-file\". The target file should have one entry per case (patient). Each of them should be pointing to .nii(.gz) files that indicate where to extract the negative samples. \n\tDEFAULT: In the case it is not provided (like now!) the corresponding samples are extracted uniformly from the whole volume!")
return None
warnDefNegMasksTr = warnDefaultNegSamplMasksTrain
@staticmethod
def errorRequirePredefinedLrSched() :
print("ERROR: Parameter \"typeOfLearningRateSchedule\" was set to \"predefined\", but no predefined schedule was given. Please specify at which epochs to lower the Learning Rate, by providing the corresponding parameter in the format: predefinedSchedule = [epoch-for-1st-decrease, ..., epoch-for-last-decrease], where the epochs are specified by an integer > 0. Exiting."); exit(1)
errReqPredLrSch = errorRequirePredefinedLrSched
@staticmethod
def errorAutoRequiresValSamples() :
print("ERROR: Parameter \"typeOfLearningRateSchedule\" was set to \"auto\". This requires performing validation on samples throughout training, because this schedule lowers the Learning Rate when validation-accuracy plateaus. However the parameter \"performValidationOnSamplesThroughoutTraining\" was set to False in the configuration file, or was ommitted, which triggers the default value, False! Please set the parameter performValidationOnSamplesThroughoutTraining = True. You will then need to provide the path to the channels of the validation cases in the format: channelsValidation = [\"path-to-file-that-lists-paths-to-channel-1-for-every-case\", ..., \"path-to-file-that-lists-paths-to-channel-N-for-every-case\"] (python style list-of-strings)."+\
"\t Also, you will need to provide the Ground-Truth for the validation cases, in the format: gtLabelsValidation = \"path-to-file\", where the file lists the paths to the GT labels of each validation case. Exiting!"); exit(1)
@staticmethod
def errorRequireChannelsVal() :
print("ERROR: Parameter \"channelsValidation\" was not provided, although it is required to perform validation, although validation was requested (parameters \"performValidationOnSamplesThroughoutTraining\" or \"performFullInferenceOnValidationImagesEveryFewEpochs\" was set to True). You will need to provide a list with path to files that list where the channels for each validation case can be found. The corresponding parameter must be provided in the format: channelsValidation = [\"path-to-file-that-lists-paths-to-channel-1-for-every-case\", ..., \"path-to-file-that-lists-paths-to-channel-N-for-every-case\"] (python style list-of-strings). Exiting."); exit(1)
errReqChannsVal = errorRequireChannelsVal
@staticmethod
def errorReqGtLabelsVal() :
print("ERROR: Parameter \"gtLabelsValidation\" was not provided, although it is required to perform validation on training-samples, which was requested (parameter \"performValidationOnSamplesThroughoutTraining\" was set to True). It is also useful so that the DSC score is reported if full-inference on the validation samples is performed (when parameter \"performFullInferenceOnValidationImagesEveryFewEpochs\" is set to True)! You will need to provide the path to a file that lists where the GT labels for each validation case can be found. The corresponding parameter must be provided in the format: gtLabelsValidation = \"path-to-file-that-lists-GT-labels-for-every-case\" (python style string). Exiting."); exit(1)
#VALIDATION
@staticmethod
def errorReqNumberOfEpochsBetweenFullValInfGreaterThan0() :
print("ERROR: It was requested to perform full-inference on validation images by setting parameter \"performFullInferenceOnValidationImagesEveryFewEpochs\" to True. For this, it is required to specify the number of epochs between two full-inference procedures. This number was given equal to 0. Please specify a number greater than 0, in the format: numberOfEpochsBetweenFullInferenceOnValImages = 1 (Any integer. Default is 1). Exiting!"); exit(1)
@staticmethod
def errorRequireNamesOfPredictionsVal() :
print("ERROR: It was requested to perform full-inference on validation images by setting parameter \"performFullInferenceOnValidationImagesEveryFewEpochs\" to True and then save some of the results (segmentation maps, probability maps or feature maps), either manually or by default. For this, it is required to specify the path to a file, which should contain names to give to the results. Please specify the path to such a file in the format: namesForPredictionsPerCaseVal = \"./validation/validationNamesOfPredictionsSimple.cfg\" (python-style string). Exiting!"); exit(1)
@staticmethod
def errorRequirePercentOfPosSamplesVal():
print("ERROR: Advanced sampling was enabled by setting: useDefaultUniformValidationSampling = False. This requires providing the percentage of validation samples that should be extracted as positives (from the positive weight-map). Please specify a float between 0.0 and 1.0, eg in the format: percentOfSamplesToExtractPositiveVal = 0.5. Exiting!"); exit(1)
errReqPercPosTrVal = errorRequirePercentOfPosSamplesVal
@staticmethod
def warnDefaultPercentOfPosSamplesVal():
print("WARN: Advanced sampling was enabled by setting: useDefaultUniformValidationSampling = False. This requires providing the percentage of validation samples that should be extracted as positives (from the positive weight-map). Please specify a float between 0.0 and 1.0, eg in the format: percentOfSamplesToExtractPositiveVal = 0.5. \n\tDEFAULT: In the case not given (like now!) default value of 0.5 is used!")
return 0.5
warnDefPercPosTrVal = warnDefaultPercentOfPosSamplesVal
@staticmethod
def warnDefaultPosSamplMasksVal() :
print("WARN: Parameter \"weightedMapsForPosSamplingVal\" was not provided in config file, even though advanced validation-sampling options were triggered by setting \"useDefaultUniformValidationSampling\" = False. This parameter should point to a file that lists the paths (per case/patient) to weighted-maps that indicate where to perform the sampling of validation samples. Can be provided in the format: weightedMapsForPosSamplingVal = \"path-to-file\". The target file should have one entry per case (patient). Each of them should be pointing to .nii(.gz) files that indicate where to extract the positive samples. \n\tDEFAULT: In the case it is not provided (like now!) the corresponding samples are extracted uniformly from the whole volume!")
return None
warnDefPosMasksVal = warnDefaultPosSamplMasksVal
@staticmethod
def warnDefaultNegSamplMasksVal() :
print("WARN: Parameter \"weightedMapsForNegSamplingVal\" was not provided in config file, even though advanced validation-sampling options were triggered by setting \"useDefaultUniformValidationSampling\" = False. This parameter should point to a file that lists the paths (per case/patient) to weighted-maps that indicate where to perform the sampling of validation samples. Can be provided in the format: weightedMapsForNegSamplingVal = \"path-to-file\". The target file should have one entry per case (patient). Each of them should be pointing to .nii(.gz) files that indicate where to extract the negative samples. \n\tDEFAULT: In the case it is not provided (like now!) the corresponding samples are extracted uniformly from the whole volume!")
return None
warnDefNegMasksVal = warnDefaultNegSamplMasksVal
@staticmethod
def errorRequireOptimizer012() :
print("ERROR: The parameter \"sgd0orAdam1orRms2\" must be given 0,1 or 2. Omit for default. Exiting!"); exit(1)
@staticmethod
def errorRequireMomentumClass0Nestov1() :
print("ERROR: The parameter \"classicMom0OrNesterov1\" must be given 0 or 1. Omit for default. Exiting!"); exit(1)
@staticmethod
def errorRequireMomValueBetween01() :
print("ERROR: The parameter \"momentumValue\" must be given between 0.0 and 1.0 Omit for default. Exiting!"); exit(1)
@staticmethod
def errorRequireMomNonNorm0Norm1() :
print("ERROR: The parameter \"momNonNorm0orNormalized1\" must be given 0 or 1. Omit for default. Exiting!"); exit(1)
# Deprecated :
@staticmethod
def errorDeprecatedPercPosTraining():
print("ERROR: Parameter \"percentOfSamplesToExtractPositiveTrain\" in the config file is now Deprecated! "+\
" Please remove this entry from the train-config file. If you do not want the default behaviour but "+\
"instead wish to specify the proportion of Foreground and Background samples yourself, please "+\
" activate the \"Advanced Sampling\" options (useDefaultTrainingSamplingFromGtAndRoi=False), "+\
" choose type-of-sampling Foreground/Background (typeOfSamplingForTraining = 0) and use the new "+\
"variable \"proportionOfSamplesToExtractPerCategoryTraining\" which replaces the functionality of the deprecated "+\
"(eg. proportionOfSamplesToExtractPerCategoryTraining = [0.3, 0.7]). Exiting."); exit(1)
errDeprPercPosTr = errorDeprecatedPercPosTraining
def __init__(self,
log,
mainOutputAbsFolder,
folderForSessionCnnModels,
folderForPredictionsVal,
folderForFeaturesVal,
num_classes,
model_name,
cfg):
#Importants for running session.
# From Session:
self.log = log
self.mainOutputAbsFolder = mainOutputAbsFolder
# From Config:
self.sessionName = self.getSessionName( cfg[cfg.SESSION_NAME] )
abs_path_to_cfg = cfg.get_abs_path_to_cfg()
abs_path_to_saved = getAbsPathEvenIfRelativeIsGiven( cfg[cfg.SAVED_MODEL], abs_path_to_cfg ) if cfg[cfg.SAVED_MODEL] is not None else None # Load pretrained model.
self.savedModelFilepath = check_and_adjust_path_to_ckpt( self.log, abs_path_to_saved) if abs_path_to_saved is not None else None
#====================TRAINING==========================
self.filepath_to_save_models = folderForSessionCnnModels + "/" + model_name + "." + self.sessionName
if cfg[cfg.CHANNELS_TR] is None:
self.errReqChansTr()
if cfg[cfg.GT_LABELS_TR] is None:
self.errReqGtTr()
#[[case1-ch1, ..., caseN-ch1], [case1-ch2,...,caseN-ch2]]
listOfAListPerChannelWithFilepathsOfAllCasesTrain = [parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(channelConfPath, abs_path_to_cfg)) for channelConfPath in cfg[cfg.CHANNELS_TR]]
self.channelsFilepathsTrain = [ list(item) for item in zip(*tuple(listOfAListPerChannelWithFilepathsOfAllCasesTrain)) ] #[[case1-ch1, case1-ch2], ..., [caseN-ch1, caseN-ch2]]
self.gtLabelsFilepathsTrain = parseAbsFileLinesInList( getAbsPathEvenIfRelativeIsGiven(cfg[cfg.GT_LABELS_TR], abs_path_to_cfg) )
#[Optionals]
#~~~~~~~~~Sampling~~~~~~~
self.providedRoiMasksTrain = True if cfg[cfg.ROI_MASKS_TR] else False
self.roiMasksFilepathsTrain = parseAbsFileLinesInList( getAbsPathEvenIfRelativeIsGiven(cfg[cfg.ROI_MASKS_TR], abs_path_to_cfg) ) if self.providedRoiMasksTrain else []
if cfg[cfg.PERC_POS_SAMPLES_TR] is not None : #Deprecated. Issue error and ask for correction.
self.errDeprPercPosTr()
#~~~~~~~~~Advanced Sampling~~~~~~~
#ADVANCED CONFIG IS DISABLED HERE IF useDefaultSamplingFromGtAndRoi = True!
self.useDefaultTrainingSamplingFromGtAndRoi = cfg[cfg.DEFAULT_TR_SAMPLING] if cfg[cfg.DEFAULT_TR_SAMPLING] is not None else True
DEFAULT_SAMPLING_TYPE_TR = 0
if self.useDefaultTrainingSamplingFromGtAndRoi :
self.samplingTypeInstanceTrain = samplingType.SamplingType( self.log, DEFAULT_SAMPLING_TYPE_TR, num_classes )
numberOfCategoriesOfSamplesTr = self.samplingTypeInstanceTrain.getNumberOfCategoriesToSample()
self.samplingTypeInstanceTrain.setPercentOfSamplesPerCategoryToSample( [1.0/numberOfCategoriesOfSamplesTr]*numberOfCategoriesOfSamplesTr )
self.forEachSamplingCategory_aListOfFilepathsToWeightMapsOfEachPatientTraining = None
else :
samplingTypeToUseTr = cfg[cfg.TYPE_OF_SAMPLING_TR] if cfg[cfg.TYPE_OF_SAMPLING_TR] is not None else DEFAULT_SAMPLING_TYPE_TR
self.samplingTypeInstanceTrain = samplingType.SamplingType( self.log, samplingTypeToUseTr, num_classes)
if samplingTypeToUseTr in [0,3] and cfg[cfg.PROP_OF_SAMPLES_PER_CAT_TR] is not None :
self.samplingTypeInstanceTrain.setPercentOfSamplesPerCategoryToSample( cfg[cfg.PROP_OF_SAMPLES_PER_CAT_TR] )
else :
numberOfCategoriesOfSamplesTr = self.samplingTypeInstanceTrain.getNumberOfCategoriesToSample()
self.samplingTypeInstanceTrain.setPercentOfSamplesPerCategoryToSample( [1.0/numberOfCategoriesOfSamplesTr]*numberOfCategoriesOfSamplesTr )
# This could be shortened.
if cfg[cfg.WEIGHT_MAPS_PER_CAT_FILEPATHS_TR] is not None :
#[[case1-weightMap1, ..., caseN-weightMap1], [case1-weightMap2,...,caseN-weightMap2]]
listOfAListPerWeightMapCategoryWithFilepathsOfAllCasesTrain = [parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(weightMapConfPath, abs_path_to_cfg)) for weightMapConfPath in cfg[cfg.WEIGHT_MAPS_PER_CAT_FILEPATHS_TR]]
else :
listOfAListPerWeightMapCategoryWithFilepathsOfAllCasesTrain = None
self.forEachSamplingCategory_aListOfFilepathsToWeightMapsOfEachPatientTraining = listOfAListPerWeightMapCategoryWithFilepathsOfAllCasesTrain #If None, following bool will turn False.
self.providedWeightMapsToSampleForEachCategoryTraining = self.forEachSamplingCategory_aListOfFilepathsToWeightMapsOfEachPatientTraining is not None
#~~~~~~~~ Training Cycle ~~~~~~~~~~~
self.numberOfEpochs = cfg[cfg.NUM_EPOCHS] if cfg[cfg.NUM_EPOCHS] is not None else 35
self.numberOfSubepochs = cfg[cfg.NUM_SUBEP] if cfg[cfg.NUM_SUBEP] is not None else 20
self.numOfCasesLoadedPerSubepoch = cfg[cfg.NUM_CASES_LOADED_PERSUB] if cfg[cfg.NUM_CASES_LOADED_PERSUB] is not None else 50
self.segmentsLoadedOnGpuPerSubepochTrain = cfg[cfg.NUM_TR_SEGMS_LOADED_PERSUB] if cfg[cfg.NUM_TR_SEGMS_LOADED_PERSUB] is not None else 1000
self.num_parallel_proc_sampling = cfg[cfg.NUM_OF_PROC_SAMPL] if cfg[cfg.NUM_OF_PROC_SAMPL] is not None else 1
self.nepsilon = cfg[cfg.N_EPS] if cfg[cfg.N_EPS] is not None else 10 ** (-6)
self.nxi = cfg[cfg.N_XI] if cfg[cfg.N_XI] is not None else 2.5
self.nside = cfg[cfg.N_SIDE] if cfg[cfg.N_SIDE] is not None else 1
self.probaugmentbackground = cfg[cfg.PROB_AUGBG] if cfg[cfg.PROB_AUGBG] is not None else 1
#~~~~~~~ Learning Rate Schedule ~~~~~~~~
assert cfg[cfg.LR_SCH_TYPE] in ['stable', 'predef', 'poly', 'auto', 'expon']
self.lr_sched_params = {'type': cfg[cfg.LR_SCH_TYPE] if cfg[cfg.LR_SCH_TYPE] is not None else 'poly',
'predef': { 'epochs': cfg[cfg.PREDEF_SCH],
'div_lr_by': cfg[cfg.DIV_LR_BY] if cfg[cfg.DIV_LR_BY] is not None else 2.0 },
'auto': { 'min_incr_of_val_acc_considered': cfg[cfg.AUTO_MIN_INCR_VAL_ACC] if cfg[cfg.AUTO_MIN_INCR_VAL_ACC] is not None else 0.0,
'epochs_wait_before_decr': cfg[cfg.NUM_EPOCHS_WAIT] if cfg[cfg.NUM_EPOCHS_WAIT] is not None else 5,
'div_lr_by': cfg[cfg.DIV_LR_BY] if cfg[cfg.DIV_LR_BY] is not None else 2.0 },
'poly': { 'epochs_wait_before_decr': cfg[cfg.NUM_EPOCHS_WAIT] if cfg[cfg.NUM_EPOCHS_WAIT] is not None else self.numberOfEpochs/3,
'final_ep_for_sch': self.numberOfEpochs },
'expon': { 'epochs_wait_before_decr': cfg[cfg.NUM_EPOCHS_WAIT] if cfg[cfg.NUM_EPOCHS_WAIT] is not None else self.numberOfEpochs/3,
'final_ep_for_sch': self.numberOfEpochs,
'lr_to_reach_at_last_ep': cfg[cfg.EXPON_SCH][0] if cfg[cfg.EXPON_SCH] is not None else 1.0/(2**(8)),
'mom_to_reach_at_last_ep': cfg[cfg.EXPON_SCH][1] if cfg[cfg.EXPON_SCH] is not None else 0.9 }
}
#Predefined.
if self.lr_sched_params['type'] == 'predef' and self.lr_sched_params['predef']['epochs'] is None :
self.errReqPredLrSch()
#~~~~~~~~~~~~~~ Augmentation~~~~~~~~~~~~~~
self.reflectImagesPerAxis = cfg[cfg.REFL_AUGM_PER_AXIS] if cfg[cfg.REFL_AUGM_PER_AXIS] else [False, False, False]
self.performIntAugm = cfg[cfg.PERF_INT_AUGM_BOOL] if cfg[cfg.PERF_INT_AUGM_BOOL] is not None else False
if self.performIntAugm :
self.sampleIntAugmShiftWithMuAndStd = cfg[cfg.INT_AUGM_SHIF_MUSTD] if cfg[cfg.INT_AUGM_SHIF_MUSTD] else [0.0 , 0.05]
self.sampleIntAugmMultiWithMuAndStd = cfg[cfg.INT_AUGM_MULT_MUSTD] if cfg[cfg.INT_AUGM_MULT_MUSTD] else [1.0 , 0.01]
self.doIntAugm_shiftMuStd_multiMuStd = [True, self.sampleIntAugmShiftWithMuAndStd, self.sampleIntAugmMultiWithMuAndStd]
else :
self.doIntAugm_shiftMuStd_multiMuStd = [False, 'plcholder', [], []]
#===================VALIDATION========================
self.val_on_samples_during_train = cfg[cfg.PERFORM_VAL_SAMPLES] if cfg[cfg.PERFORM_VAL_SAMPLES] is not None else False
if self.lr_sched_params['type'] == 'auto' and not self.val_on_samples_during_train :
self.errorAutoRequiresValSamples()
self.val_on_whole_volumes = cfg[cfg.PERFORM_VAL_INFERENCE] if cfg[cfg.PERFORM_VAL_INFERENCE] is not None else False
#Input:
if self.val_on_samples_during_train or self.val_on_whole_volumes :
if cfg[cfg.CHANNELS_VAL] :
listOfAListPerChannelWithFilepathsOfAllCasesVal = [parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(channelConfPath, abs_path_to_cfg)) for channelConfPath in cfg[cfg.CHANNELS_VAL]]
#[[case1-ch1, case1-ch2], ..., [caseN-ch1, caseN-ch2]]
self.channelsFilepathsVal = [ list(item) for item in zip(*tuple(listOfAListPerChannelWithFilepathsOfAllCasesVal)) ]
else :
self.errReqChannsVal()
else :
self.channelsFilepathsVal = []
if self.val_on_samples_during_train :
self.gtLabelsFilepathsVal = parseAbsFileLinesInList( getAbsPathEvenIfRelativeIsGiven(cfg[cfg.GT_LABELS_VAL], abs_path_to_cfg) ) if cfg[cfg.GT_LABELS_VAL] is not None else self.errorReqGtLabelsVal()
elif self.val_on_whole_volumes :
self.gtLabelsFilepathsVal = parseAbsFileLinesInList( getAbsPathEvenIfRelativeIsGiven(cfg[cfg.GT_LABELS_VAL], abs_path_to_cfg) ) if cfg[cfg.GT_LABELS_VAL] is not None else []
else : # Dont perform either of the two validations.
self.gtLabelsFilepathsVal = []
self.providedGtVal = True if self.gtLabelsFilepathsVal is not None else False
#[Optionals]
self.providedRoiMasksVal = True if cfg[cfg.ROI_MASKS_VAL] is not None else False #For fast inf.
self.roiMasksFilepathsVal = parseAbsFileLinesInList( getAbsPathEvenIfRelativeIsGiven(cfg[cfg.ROI_MASKS_VAL], abs_path_to_cfg) ) if self.providedRoiMasksVal else []
#~~~~~Validation on Samples~~~~~~~~
self.segmentsLoadedOnGpuPerSubepochVal = cfg[cfg.NUM_VAL_SEGMS_LOADED_PERSUB] if cfg[cfg.NUM_VAL_SEGMS_LOADED_PERSUB] is not None else 3000
#~~~~~~~~~Advanced Validation Sampling~~~~~~~~~~~
#ADVANCED OPTION ARE DISABLED IF useDefaultUniformValidationSampling = True!
self.useDefaultUniformValidationSampling = cfg[cfg.DEFAULT_VAL_SAMPLING] if cfg[cfg.DEFAULT_VAL_SAMPLING] is not None else True
DEFAULT_SAMPLING_TYPE_VAL = 1
if self.useDefaultUniformValidationSampling :
self.samplingTypeInstanceVal = samplingType.SamplingType( self.log, DEFAULT_SAMPLING_TYPE_VAL, num_classes )
numberOfCategoriesOfSamplesVal = self.samplingTypeInstanceVal.getNumberOfCategoriesToSample()
self.samplingTypeInstanceVal.setPercentOfSamplesPerCategoryToSample( [1.0/numberOfCategoriesOfSamplesVal]*numberOfCategoriesOfSamplesVal )
self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal = None
else :
samplingTypeToUseVal = cfg[cfg.TYPE_OF_SAMPLING_VAL] if cfg[cfg.TYPE_OF_SAMPLING_VAL] is not None else DEFAULT_SAMPLING_TYPE_VAL
self.samplingTypeInstanceVal = samplingType.SamplingType( self.log, samplingTypeToUseVal, num_classes)
if samplingTypeToUseVal in [0,3] and cfg[cfg.PROP_OF_SAMPLES_PER_CAT_VAL] is not None:
self.samplingTypeInstanceVal.setPercentOfSamplesPerCategoryToSample( cfg[cfg.PROP_OF_SAMPLES_PER_CAT_VAL] )
else :
numberOfCategoriesOfSamplesVal = self.samplingTypeInstanceVal.getNumberOfCategoriesToSample()
self.samplingTypeInstanceVal.setPercentOfSamplesPerCategoryToSample( [1.0/numberOfCategoriesOfSamplesVal]*numberOfCategoriesOfSamplesVal )
# TODO: Shorten this
if cfg[cfg.WEIGHT_MAPS_PER_CAT_FILEPATHS_VAL] is not None:
#[[case1-weightMap1, ..., caseN-weightMap1], [case1-weightMap2,...,caseN-weightMap2]]
self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal = [parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(weightMapConfPath, abs_path_to_cfg)) for weightMapConfPath in cfg[cfg.WEIGHT_MAPS_PER_CAT_FILEPATHS_VAL]]
else :
self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal = None
self.providedWeightMapsToSampleForEachCategoryValidation = self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal is not None
#~~~~~~Full inference on validation image~~~~~~
self.num_epochs_between_val_on_whole_volumes = cfg[cfg.NUM_EPOCHS_BETWEEN_VAL_INF] if cfg[cfg.NUM_EPOCHS_BETWEEN_VAL_INF] is not None else 1
if self.num_epochs_between_val_on_whole_volumes == 0 and self.val_on_whole_volumes :
self.errorReqNumberOfEpochsBetweenFullValInfGreaterThan0()
#predictionsf
self.saveSegmentationVal = cfg[cfg.SAVE_SEGM_VAL] if cfg[cfg.SAVE_SEGM_VAL] is not None else True
self.saveProbMapsBoolPerClassVal = cfg[cfg.SAVE_PROBMAPS_PER_CLASS_VAL] if (cfg[cfg.SAVE_PROBMAPS_PER_CLASS_VAL] is not None and cfg[cfg.SAVE_PROBMAPS_PER_CLASS_VAL] != []) else [True]*num_classes
self.filepathsToSavePredictionsForEachPatientVal = None #Filled by call to self.makeFilepathsForPredictionsAndFeatures()
self.suffixForSegmAndProbsDictVal = cfg[cfg.SUFFIX_SEGM_PROB_VAL] if cfg[cfg.SUFFIX_SEGM_PROB_VAL] is not None else {"segm": "Segm", "prob": "ProbMapClass"}
#features:
self.saveIndividualFmImagesVal = cfg[cfg.SAVE_INDIV_FMS_VAL] if cfg[cfg.SAVE_INDIV_FMS_VAL] is not None else False
self.saveMultidimensionalImageWithAllFmsVal = cfg[cfg.SAVE_4DIM_FMS_VAL] if cfg[cfg.SAVE_4DIM_FMS_VAL] is not None else False
if self.saveIndividualFmImagesVal == True or self.saveMultidimensionalImageWithAllFmsVal == True:
indices_fms_per_pathtype_per_layer_to_save = [cfg[cfg.INDICES_OF_FMS_TO_SAVE_NORMAL_VAL]] +\
[cfg[cfg.INDICES_OF_FMS_TO_SAVE_SUBSAMPLED_VAL]] +\
[cfg[cfg.INDICES_OF_FMS_TO_SAVE_FC_VAL]]
self.indices_fms_per_pathtype_per_layer_to_save = [item if item is not None else [] for item in indices_fms_per_pathtype_per_layer_to_save] #By default, save none.
else:
self.indices_fms_per_pathtype_per_layer_to_save = None
self.filepathsToSaveFeaturesForEachPatientVal = None #Filled by call to self.makeFilepathsForPredictionsAndFeatures()
#Output:
#Given by the config file, and is then used to fill filepathsToSavePredictionsForEachPatient and filepathsToSaveFeaturesForEachPatient.
self.namesToSavePredictionsAndFeaturesVal = parseFileLinesInList( getAbsPathEvenIfRelativeIsGiven(cfg[cfg.NAMES_FOR_PRED_PER_CASE_VAL], abs_path_to_cfg) ) if cfg[cfg.NAMES_FOR_PRED_PER_CASE_VAL] else None #CAREFUL: Here we use a different parsing function!
if not self.namesToSavePredictionsAndFeaturesVal and self.val_on_whole_volumes and (self.saveSegmentationVal or True in self.saveProbMapsBoolPerClassVal or self.saveIndividualFmImagesVal or self.saveMultidimensionalImageWithAllFmsVal) :
self.errorRequireNamesOfPredictionsVal()
#===================== OTHERS======================
#Preprocessing
self.padInputImagesBool = cfg[cfg.PAD_INPUT] if cfg[cfg.PAD_INPUT] is not None else True
#Others useful internally or for reporting:
self.numberOfCasesTrain = len(self.channelsFilepathsTrain)
self.numberOfCasesVal = len(self.channelsFilepathsVal)
self.run_input_checks = cfg[cfg.RUN_INP_CHECKS] if cfg[cfg.RUN_INP_CHECKS] is not None else True
#HIDDENS, no config allowed for these at the moment:
self.useSameSubChannelsAsSingleScale = True
self.subsampledChannelsFilepathsTrain = "placeholder" #List of Lists with filepaths per patient. Only used when above is False.
self.subsampledChannelsFilepathsVal = "placeholder" #List of Lists with filepaths per patient. Only used when above is False.
# Re-weight samples in the cost function *on a per-class basis*: Type of re-weighting and training schedule.
# E.g. to exclude a class, or counter class imbalance.
# "type": string/None, "prms": any/None, "schedule": [ min_epoch, max_epoch ]
# Type, prms combinations: "freq", None || "per_c", [0., 2., 1., ...] (as many as classes)
# "schedule": Constant before epoch [0], linear change towards equal weight (=1) until epoch [1], constant equal weights (=1) afterwards.
self.reweight_classes_in_cost = cfg[cfg.W_C_IN_COST] if cfg[cfg.W_C_IN_COST] is not None else {"type": None, "prms": None, "schedule": [0, self.numberOfEpochs]}
if self.reweight_classes_in_cost["type"] == "per_c":
assert len(self.reweight_classes_in_cost["prms"]) == num_classes
self._makeFilepathsForPredictionsAndFeaturesVal( folderForPredictionsVal, folderForFeaturesVal )
#====Optimization=====
self.learningRate = cfg[cfg.LRATE] if cfg[cfg.LRATE] is not None else 0.001
self.optimizerSgd0Adam1Rms2 = cfg[cfg.OPTIMIZER] if cfg[cfg.OPTIMIZER] is not None else 2
if self.optimizerSgd0Adam1Rms2 == 0 :
self.b1Adam = "placeholder"; self.b2Adam = "placeholder"; self.eAdam = "placeholder";
self.rhoRms = "placeholder"; self.eRms = "placeholder";
elif self.optimizerSgd0Adam1Rms2 == 1 :
self.b1Adam = cfg[cfg.B1_ADAM] if cfg[cfg.B1_ADAM] is not None else 0.9 #default in paper and seems good
self.b2Adam = cfg[cfg.B2_ADAM] if cfg[cfg.B2_ADAM] is not None else 0.999 #default in paper and seems good
self.eAdam = cfg[cfg.EPS_ADAM] if cfg[cfg.EPS_ADAM] is not None else 10**(-8)
self.rhoRms = "placeholder"; self.eRms = "placeholder";
elif self.optimizerSgd0Adam1Rms2 == 2 :
self.b1Adam = "placeholder"; self.b2Adam = "placeholder"; self.eAdam = "placeholder";
self.rhoRms = cfg[cfg.RHO_RMS] if cfg[cfg.RHO_RMS] is not None else 0.9 #default in paper and seems good
self.eRms = cfg[cfg.EPS_RMS] if cfg[cfg.EPS_RMS] is not None else 10**(-4) # 1e-6 was the default in the paper, but blew up the gradients in first try. Never tried 1e-5 yet.
else :
self.errorRequireOptimizer012()
self.classicMom0Nesterov1 = cfg[cfg.MOM_TYPE] if cfg[cfg.MOM_TYPE] is not None else 1
if self.classicMom0Nesterov1 not in [0,1]:
self.errorRequireMomentumClass0Nestov1()
self.momNonNormalized0Normalized1 = cfg[cfg.MOM_NORM_NONNORM] if cfg[cfg.MOM_NORM_NONNORM] is not None else 1
if self.momNonNormalized0Normalized1 not in [0,1] :
self.errorRequireMomNonNorm0Norm1()
self.momentumValue = cfg[cfg.MOM] if cfg[cfg.MOM] is not None else 0.6
if self.momentumValue < 0. or self.momentumValue > 1:
self.errorRequireMomValueBetween01()
#==Regularization==
self.L1_reg_weight = cfg[cfg.L1_REG] if cfg[cfg.L1_REG] is not None else 0.000001
self.L2_reg_weight = cfg[cfg.L2_REG] if cfg[cfg.L2_REG] is not None else 0.0001
self.marginm = cfg[cfg.MARGIN] if cfg[cfg.MARGIN] is not None else 0
self.mixup_rate = cfg[cfg.MIX_RATE] if cfg[cfg.MIX_RATE] is not None else 0
self.mixup_biasmargin = cfg[cfg.MIX_MAR] if cfg[cfg.MIX_MAR] is not None else 0
#============= HIDDENS ==============
# Indices of layers that should not be trained (kept fixed).
layersToFreezePerPathwayType = [cfg[cfg.LAYERS_TO_FREEZE_NORM],
cfg[cfg.LAYERS_TO_FREEZE_SUBS],
cfg[cfg.LAYERS_TO_FREEZE_FC]]
indicesOfLayersToFreezeNorm = [ l-1 for l in layersToFreezePerPathwayType[0] ] if layersToFreezePerPathwayType[0] is not None else []
indicesOfLayersToFreezeSubs = [ l-1 for l in layersToFreezePerPathwayType[1] ] if layersToFreezePerPathwayType[1] is not None else indicesOfLayersToFreezeNorm
indicesOfLayersToFreezeFc = [ l-1 for l in layersToFreezePerPathwayType[2] ] if layersToFreezePerPathwayType[2] is not None else []
# Three sublists, one per pathway type: Normal, Subsampled, FC. eg: [[0,1,2],[0,1,2],[]
self.indicesOfLayersPerPathwayTypeToFreeze = [ indicesOfLayersToFreezeNorm, indicesOfLayersToFreezeSubs, indicesOfLayersToFreezeFc ]
self.losses_and_weights = cfg[cfg.LOSSES_WEIGHTS] if cfg[cfg.LOSSES_WEIGHTS] is not None else {"xentr": 1.0, "iou": None, "dsc": None, "focaloneside": None, "focalonesidegama": 2.0}
assert True in [self.losses_and_weights[k] is not None for k in ["xentr", "iou", "dsc", "focaloneside"]]
"""
#NOTES: variables that have to do with number of pathways:
self.indicesOfLayersPerPathwayTypeToFreeze (="all" always currently. Hardcoded)
self.useSameSubChannelsAsSingleScale, (always True currently)
self.subsampledChannelsFilepathsTrain,
self.subsampledChannelsFilepathsVal, (Deprecated. But I should support it in future, cause it works well for non-scale pathways)
indices_fms_per_pathtype_per_layer_to_save (Repeat subsampled!)
"""
def _makeFilepathsForPredictionsAndFeaturesVal( self,
absPathToFolderForPredictionsFromSession,
absPathToFolderForFeaturesFromSession
) :
self.filepathsToSavePredictionsForEachPatientVal = []
self.filepathsToSaveFeaturesForEachPatientVal = []
if self.namesToSavePredictionsAndFeaturesVal is not None : # standard behavior
for case_i in range(self.numberOfCasesVal) :
filepathForCasePrediction = absPathToFolderForPredictionsFromSession + "/" + self.namesToSavePredictionsAndFeaturesVal[case_i]
self.filepathsToSavePredictionsForEachPatientVal.append( filepathForCasePrediction )
filepathForCaseFeatures = absPathToFolderForFeaturesFromSession + "/" + self.namesToSavePredictionsAndFeaturesVal[case_i]
self.filepathsToSaveFeaturesForEachPatientVal.append( filepathForCaseFeatures )
else : # Names for predictions not given. Special handling...
if self.numberOfCasesVal > 1 : # Many cases, create corresponding namings for files.
for case_i in range(self.numberOfCasesVal) :
self.filepathsToSavePredictionsForEachPatientVal.append( absPathToFolderForPredictionsFromSession + "/pred_case" + str(case_i) + ".nii.gz" )
self.filepathsToSaveFeaturesForEachPatientVal.append( absPathToFolderForPredictionsFromSession + "/pred_case" + str(case_i) + ".nii.gz" )
else : # Only one case. Just give the output prediction folder, the io.py will save output accordingly.
self.filepathsToSavePredictionsForEachPatientVal.append( absPathToFolderForPredictionsFromSession )
self.filepathsToSaveFeaturesForEachPatientVal.append( absPathToFolderForPredictionsFromSession )
def get_path_to_load_model_from(self):
return self.savedModelFilepath
def print_params(self) :
logPrint = self.log.print3
logPrint("")
logPrint("=============================================================")
logPrint("========= PARAMETERS FOR THIS TRAINING SESSION ==============")
logPrint("=============================================================")
logPrint("Session's name = " + str(self.sessionName))
logPrint("Model will be loaded from save = " + str(self.savedModelFilepath))
logPrint("~~Output~~")
logPrint("Main output folder = " + str(self.mainOutputAbsFolder))
logPrint("Path and filename to save trained models = " + str(self.filepath_to_save_models))
logPrint("~~~~~~~~~~~~~~~~~~Generic Information~~~~~~~~~~~~~~~~")
logPrint("Number of Cases for Training = " + str(self.numberOfCasesTrain))
logPrint("Number of Cases for Validation = " + str(self.numberOfCasesVal))
logPrint("~~~~~~~~~~~~~~~~~~Training parameters~~~~~~~~~~~~~~~~")
logPrint("Filepaths to Channels of the Training Cases = " + str(self.channelsFilepathsTrain))
logPrint("Filepaths to Ground-Truth labels of the Training Cases = " + str(self.gtLabelsFilepathsTrain))
logPrint("~~Sampling~~")
logPrint("Region-Of-Interest Masks provided = " + str(self.providedRoiMasksTrain))
logPrint("Filepaths to ROI Masks of the Training Cases = " + str(self.roiMasksFilepathsTrain))
logPrint("~~Advanced Sampling~~")
logPrint("Using default sampling = " + str(self.useDefaultTrainingSamplingFromGtAndRoi) + ". NOTE: Adv.Sampl.Params are auto-set to perform default sampling if True.")
logPrint("Type of Sampling = " + str(self.samplingTypeInstanceTrain.getStringOfSamplingType()) + " ("+ str(self.samplingTypeInstanceTrain.getIntSamplingType()) + ")")
logPrint("Sampling Categories = " + str(self.samplingTypeInstanceTrain.getStringsPerCategoryToSample()) )
logPrint("Percent of Samples to extract per Sampling Category = " + str(self.samplingTypeInstanceTrain.getPercentOfSamplesPerCategoryToSample()))
logPrint("Provided Weight-Maps, pointing where to focus sampling for each category (if False, samples will be extracted based on GT and ROI) = " + str(self.providedWeightMapsToSampleForEachCategoryTraining))
logPrint("Paths to weight-maps for sampling of each category = " + str(self.forEachSamplingCategory_aListOfFilepathsToWeightMapsOfEachPatientTraining))
logPrint("~~Training Cycle~~")
logPrint("Number of Epochs = " + str(self.numberOfEpochs))
logPrint("Number of Subepochs per epoch = " + str(self.numberOfSubepochs))
logPrint("Number of cases to load per Subepoch (for extracting the samples for this subepoch) = " + str(self.numOfCasesLoadedPerSubepoch))
logPrint("Number of Segments loaded on GPU per subepoch for Training = " + str(self.segmentsLoadedOnGpuPerSubepochTrain) + ". NOTE: This number of segments divided by the batch-size defines the number of optimization-iterations that will be performed every subepoch!")
logPrint("Number of parallel processes for sampling = " + str(self.num_parallel_proc_sampling))
logPrint("~~Learning Rate Schedule~~")
logPrint("Type of schedule = " + str(self.lr_sched_params['type']))
logPrint("[Predef] Predefined schedule of epochs when the LR will be lowered = " + str(self.lr_sched_params['predef']['epochs']))
logPrint("[Predef] When decreasing Learning Rate, divide LR by = " + str(self.lr_sched_params['predef']['div_lr_by']) )
logPrint("[Poly] Initial epochs to wait before lowering LR = " + str(self.lr_sched_params['poly']['epochs_wait_before_decr']) )
logPrint("[Poly] Final epoch for the schedule = " + str(self.lr_sched_params['poly']['final_ep_for_sch']) )
logPrint("[Auto] Initial epochs to wait before lowering LR = " + str(self.lr_sched_params['auto']['epochs_wait_before_decr']) )
logPrint("[Auto] When decreasing Learning Rate, divide LR by = " + str(self.lr_sched_params['auto']['div_lr_by']) )
logPrint("[Auto] Minimum increase in validation accuracy (0. to 1.) that resets the waiting counter = " + str(self.lr_sched_params['auto']['min_incr_of_val_acc_considered']))
logPrint("[Expon] (Deprecated) parameters = " + str(self.lr_sched_params['expon']))
logPrint("~~Data Augmentation During Training~~")
logPrint("Reflect images per axis = " + str(self.reflectImagesPerAxis))
logPrint("Perform intensity-augmentation [I'= (I+shift)*mult] = " + str(self.performIntAugm))
logPrint("[Int. Augm.] Sample Shift from N(mu,std) = " + str(self.doIntAugm_shiftMuStd_multiMuStd[1]))
logPrint("[Int. Augm.] Sample Multi from N(mu,std) = " + str(self.doIntAugm_shiftMuStd_multiMuStd[2]))
logPrint("[Int. Augm.] (DEBUGGING:) full parameters [ doIntAugm, shift, mult] = " + str(self.doIntAugm_shiftMuStd_multiMuStd))
logPrint("Noise epsilon (train) = " + str(self.nepsilon))
logPrint("Noise initial stength (train) = " + str(self.nxi))
logPrint("Adversarial direction mode (train) = " + str(self.nside))
logPrint("The probability of background sample being augmented = " + str(self.probaugmentbackground))
logPrint("~~~~~~~~~~~~~~~~~~Validation parameters~~~~~~~~~~~~~~~~")
logPrint("Perform Validation on Samples throughout training? = " + str(self.val_on_samples_during_train))
logPrint("Perform Full Inference on validation cases every few epochs? = " + str(self.val_on_whole_volumes))
logPrint("Filepaths to Channels of the Validation Cases (Req for either of the above) = " + str(self.channelsFilepathsVal))
logPrint("Provided Ground-Truth for Validation = " + str(self.providedGtVal) + ". NOTE: Required for Val on samples. Not Req for Full-Inference, but DSC will be reported if provided.")
logPrint("Filepaths to Ground-Truth labels of the Validation Cases = " + str(self.gtLabelsFilepathsVal))
logPrint("Provided ROI masks for Validation = " + str(self.providedRoiMasksVal) + ". NOTE: Validation-sampling and Full-Inference will be limited within this mask if provided. If not provided, Negative Validation samples will be extracted from whole volume, except if advanced-sampling is enabled, and the user provided separate weight-maps for sampling.")
logPrint("Filepaths to ROI masks for Validation Cases = " + str(self.roiMasksFilepathsVal))
logPrint("~~~~~~~Validation on Samples throughout Training~~~~~~~")
logPrint("Number of Segments loaded on GPU per subepoch for Validation = " + str(self.segmentsLoadedOnGpuPerSubepochVal))
logPrint("~~Advanced Sampling~~")
logPrint("Using default uniform sampling for validation = " + str(self.useDefaultUniformValidationSampling) + ". NOTE: Adv.Sampl.Params are auto-set to perform uniform-sampling if True.")
logPrint("Type of Sampling = " + str(self.samplingTypeInstanceVal.getStringOfSamplingType()) + " ("+ str(self.samplingTypeInstanceVal.getIntSamplingType()) + ")")
logPrint("Sampling Categories = " + str(self.samplingTypeInstanceVal.getStringsPerCategoryToSample()) )
logPrint("Percent of Samples to extract per Sampling Category = " + str(self.samplingTypeInstanceVal.getPercentOfSamplesPerCategoryToSample()))
logPrint("Provided Weight-Maps, pointing where to focus sampling for each category (if False, samples will be extracted based on GT and ROI) = " + str(self.providedWeightMapsToSampleForEachCategoryValidation))
logPrint("Paths to weight-maps for sampling of each category = " + str(self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal))
logPrint("~~~~~Validation with Full Inference on Validation Cases~~~~~")
logPrint("Perform Full-Inference on Val. cases every that many epochs = " + str(self.num_epochs_between_val_on_whole_volumes))
logPrint("~~Predictions (segmentations and prob maps on val. cases)~~")
logPrint("Save Segmentations = " + str(self.saveSegmentationVal))
logPrint("Save Probability Maps for each class = " + str(self.saveProbMapsBoolPerClassVal))
logPrint("Filepaths to save results per case = " + str(self.filepathsToSavePredictionsForEachPatientVal))
logPrint("Suffixes with which to save segmentations and probability maps = " + str(self.suffixForSegmAndProbsDictVal))
logPrint("~~Feature Maps~~")
logPrint("Save Feature Maps = " + str(self.saveIndividualFmImagesVal))
logPrint("Save FMs in a 4D-image = " + str(self.saveMultidimensionalImageWithAllFmsVal))
logPrint("Min/Max Indices of FMs to visualise per pathway-type and per layer = " + str(self.indices_fms_per_pathtype_per_layer_to_save))
logPrint("Filepaths to save FMs per case = " + str(self.filepathsToSaveFeaturesForEachPatientVal))
logPrint("~~Optimization~~")
logPrint("Initial Learning rate = " + str(self.learningRate))
logPrint("Optimizer to use: SGD(0), Adam(1), RmsProp(2) = " + str(self.optimizerSgd0Adam1Rms2))
logPrint("Parameters for Adam: b1= " + str(self.b1Adam) + ", b2=" + str(self.b2Adam) + ", e= " + str(self.eAdam) )
logPrint("Parameters for RmsProp: rho= " + str(self.rhoRms) + ", e= " + str(self.eRms) )
logPrint("Momentum Type: Classic (0) or Nesterov (1) = " + str(self.classicMom0Nesterov1))
logPrint("Momentum Non-Normalized (0) or Normalized (1) = " + str(self.momNonNormalized0Normalized1))
logPrint("Momentum Value = " + str(self.momentumValue))
logPrint("~~Costs~~")
logPrint("Loss functions and their weights = " + str(self.losses_and_weights))
logPrint("Reweight samples in cost on a per-class basis = " + str(self.reweight_classes_in_cost))
logPrint("L1 Regularization term = " + str(self.L1_reg_weight))
logPrint("L2 Regularization term = " + str(self.L2_reg_weight))
logPrint("Margin term = " + str(self.marginm))
logPrint("Mixup Rate term = " + str(self.mixup_rate))
logPrint("Mixup Margin term = " + str(self.mixup_biasmargin))
logPrint("~~Freeze Weights of Certain Layers~~")
logPrint("Indices of layers from each type of pathway that will be kept fixed (first layer is 0):")
logPrint("Normal pathway's layers to freeze = "+ str(self.indicesOfLayersPerPathwayTypeToFreeze[0]))
logPrint("Subsampled pathway's layers to freeze = "+ str(self.indicesOfLayersPerPathwayTypeToFreeze[1]))
logPrint("FC pathway's layers to freeze = "+ str(self.indicesOfLayersPerPathwayTypeToFreeze[2]))
logPrint("~~~~~~~~~~~~~~~~~~Other Generic Parameters~~~~~~~~~~~~~~~~")
logPrint("Check whether input data has correct format (can slow down process) = " + str(self.run_input_checks))
logPrint("~~Pre Processing~~")
logPrint("Pad Input Images = " + str(self.padInputImagesBool))
logPrint("========== Done with printing session's parameters ==========")
logPrint("=============================================================\n")
def get_args_for_train_routine(self) :
args = [self.log,
self.filepath_to_save_models,
self.val_on_samples_during_train,
{"segm": self.saveSegmentationVal, "prob": self.saveProbMapsBoolPerClassVal},
self.filepathsToSavePredictionsForEachPatientVal,
self.suffixForSegmAndProbsDictVal,
self.channelsFilepathsTrain,
self.channelsFilepathsVal,
self.gtLabelsFilepathsTrain,
self.providedGtVal,
self.gtLabelsFilepathsVal,
self.providedWeightMapsToSampleForEachCategoryTraining, #Always true, since either GT labels or advanced-mask-where-to-pos
self.forEachSamplingCategory_aListOfFilepathsToWeightMapsOfEachPatientTraining,
self.providedWeightMapsToSampleForEachCategoryValidation, #If false, corresponding samples will be extracted uniformly from whole image.
self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal,
self.providedRoiMasksTrain, # also used for int-augm.
self.roiMasksFilepathsTrain,# also used for int-augm
self.providedRoiMasksVal, # also used for fast inf
self.roiMasksFilepathsVal, # also used for fast inf and also for uniform sampling of segs.
self.numberOfEpochs,
self.numberOfSubepochs,
self.numOfCasesLoadedPerSubepoch,
self.segmentsLoadedOnGpuPerSubepochTrain,
self.segmentsLoadedOnGpuPerSubepochVal,
self.num_parallel_proc_sampling,
#-------Sampling Type---------
self.samplingTypeInstanceTrain,
self.samplingTypeInstanceVal,
#-------Preprocessing-----------
self.padInputImagesBool,
self.mixup_rate,
self.mixup_biasmargin,
#-------Data Augmentation-------
self.doIntAugm_shiftMuStd_multiMuStd,
self.reflectImagesPerAxis,
self.useSameSubChannelsAsSingleScale,
self.subsampledChannelsFilepathsTrain,
self.subsampledChannelsFilepathsVal,
# Validation
self.val_on_whole_volumes,
self.num_epochs_between_val_on_whole_volumes,
#--------For FM visualisation---------
self.saveIndividualFmImagesVal,
self.saveMultidimensionalImageWithAllFmsVal,
self.indices_fms_per_pathtype_per_layer_to_save,
self.filepathsToSaveFeaturesForEachPatientVal,
#-------- Others --------
self.run_input_checks,
self.nepsilon,
self.nxi,
self.probaugmentbackground
]
return args
def get_args_for_trainer(self) :
args = [self.log,
self.indicesOfLayersPerPathwayTypeToFreeze,
self.losses_and_weights,
# Regularisation
self.L1_reg_weight,
self.L2_reg_weight,
# Cost Schedules
#Weighting Classes differently in the CNN's cost function during training:
self.reweight_classes_in_cost,
self.marginm,
self.mixup_biasmargin
]
return args
def get_args_for_optimizer(self) :
args = [self.log,
self.optimizerSgd0Adam1Rms2,
self.lr_sched_params,
self.learningRate,
self.momentumValue,
self.classicMom0Nesterov1,
self.momNonNormalized0Normalized1,
self.b1Adam,
self.b2Adam,
self.eAdam,
self.rhoRms,
self.eRms
]
return args
def get_args_for_adversarial(self) :
args = [self.log,
self.nside
]
return args
| ZerojumpLine/OverfittingUnderClassImbalance | DeepMedic/deepmedic/frontEnd/configParsing/trainSessionParams.py | trainSessionParams.py | py | 52,975 | python | en | code | 21 | github-code | 36 |
31045721366 | import sys, getopt
import os, shutil
from subprocess import Popen, PIPE
def main(argv):
targetDir = ""
propFile = ""
try:
opts, args = getopt.getopt(argv, "hd:p:", ["help", "dir=", "prop="])
except getopt.GetoptError:
print("LockExternalsRevision.py -d <TargetDir> -p <SvnPropsFile>")
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print("LockExternalsRevision.py -d <TargetDir> -p <SvnPropsFile>")
sys.exit(2)
elif opt in ("-d", "--dir"):
targetDir = arg
elif opt in ("-p", "--prop"):
propFile = arg
print("Target Dir: " + targetDir)
targetDir = os.path.abspath(targetDir)
print("Absolute path: " + targetDir)
if os.path.exists(targetDir) == False :
print("This path doesn't exist")
sys.exit(2)
print("Setting up externals property...")
process = Popen(["svn", "pset", "svn:externals", targetDir, "-F", propFile], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
if len(stderr) > 0 :
print("Error: " + stderr)
return
print("Done")
if __name__ == "__main__":
main(sys.argv[1:])
| NguyenThanhDung/Study | Python/LockExternalsRevision.py | LockExternalsRevision.py | py | 1,206 | python | en | code | 0 | github-code | 36 |
42979785156 | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import render
from django.template import RequestContext
from .models import Project
# Create your views here.
def index(request):
'''Show all news'''
posts_list = Project.objects.all().order_by('-id')
paginator = Paginator(posts_list, 6)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
vars = dict(
posts=posts,
)
context = {
'posts': posts,
'paginator' : paginator,
}
return render(request, 'project/index.tpl', context)
| gulla-k/pwd | project/views.py | views.py | py | 749 | python | en | code | 0 | github-code | 36 |
37630155256 | #!/usr/bin/env python
from net import *
import csv
import cv2
from cv_bridge import CvBridge, CvBridgeError
import os
import numpy as np
from PIL import Image
import tensorflow as tf
from skimage import color
import time
import rospy
from cone_detection.msg import Label
#Init ros.
rospy.init_node('local_network_test')
#Net parameters.
image_width = rospy.get_param('/cone/width_pixel')
image_height = rospy.get_param('/cone/height_pixel')
path_to_candidate = rospy.get_param('/candidate_path')
path_to_model = rospy.get_param('/model_path')
datasets = rospy.get_param('/neural_net/datasets')
datasets_validation = rospy.get_param('/neural_net/datasets_validation')
#Init and saver variable.
keep_prob = tf.placeholder(tf.float32)
input_placeholder = tf.placeholder(tf.float32, [None, image_height, image_width, 3])
output_placeholder = tf.placeholder(tf.float32, [None, 2])
input_placeholder_flat = tf.contrib.layers.flatten(input_placeholder)
y_true = tf.argmax(output_placeholder, dimension=1)
output_layer = fully_connected(input_placeholder_flat, 0.01, keep_prob)
y_pred = tf.argmax(tf.nn.softmax(output_layer), dimension=1)
def deleteFolderContent(path):
for element in os.listdir(path):
os.remove(os.path.join(path, element))
class NeuralNet:
def __init__(self):
#Init tf session.
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(self.session, path_to_model + getModelName(datasets) + " .cpkt")
#Init cone list.
image_list = []
# Start timer.
start_time = time.time()
# Labeling.
for i in range(0,1000):
path = path_to_candidate + datasets_validation + "/" + str(i) + ".jpg"
try:
img = Image.open(path)
arr = np.array(img.getdata(),np.uint8)
arr = arr.reshape(image_height, image_width, 3)
image_list.append(self.labeling(arr, i))
cv2.imwrite(path_to_candidate + "candidates/" + str(i) + ".jpg", arr)
except:
continue
# Stop timer.
end_time = time.time()
time_difference = end_time - start_time
print("Labeling time usage: " + str(time_difference) + " s")
# Getting labels.
labeled_list = []
reader = csv.reader(open(path_to_candidate + datasets_validation + "/" + "labeling.csv"))
for row in reader:
image = int(row[0])
label = int(row[1])
labeled_list.append([image, label])
# Accuracy by comparing lists.
correct = 0.0;
for element in image_list:
index = element[0]
for labeled_element in labeled_list:
if(index == labeled_element[0] and element[1] == labeled_element[1]):
correct += 1.0
break
accuracy = correct / (len(labeled_list) - 1)
print("Labeling accuracy: " + str(accuracy))
def labeling(self,msg,index):
#Get image.
image = np.zeros((1,image_height, image_width,3))
image[0][:][:][:] = color.rgb2lab(msg) / 255.0
# Labeling.
label = y_pred.eval(session=self.session,feed_dict={input_placeholder: image, keep_prob: 1.0})
if(label == [0]):
cv2.imwrite(path_to_candidate + "cones/" + str(index) + ".jpg", msg)
return [index, 1]
else:
return [index, 0]
#------------------------------------------------------------------------
if __name__ == '__main__':
#Delete files in candidates and cones order.
deleteFolderContent(path_to_candidate + "candidates/")
deleteFolderContent(path_to_candidate + "cones/")
#Init neural net.
neural_net = NeuralNet() | ProjectARCConeDetection/cone_detection | neural_net/local_network_test.py | local_network_test.py | py | 3,822 | python | en | code | 1 | github-code | 36 |
73921921384 | import pandas as pd
import sys, os, MySQLdb
import pandas as pd
import numpy as np
from collections import defaultdict
import click
db = MySQLdb.connect(host="localhost", user=os.environ["DATAVIVA2_DB_USER"],
passwd=os.environ["DATAVIVA2_DB_PW"],
db=os.environ["DATAVIVA2_DB_NAME"])
db.autocommit(1)
cursor = db.cursor()
cursor.execute("select id_ibge, id from attrs_bra where id_ibge is not null and length(id) = 8;")
bra_lookup = {str(r[0])[:-1]:r[1] for r in cursor.fetchall()}
cursor.execute("select substr(id, 2), id from attrs_cnae where length(id) = 6;")
cnae_lookup = {str(r[0]):r[1] for r in cursor.fetchall()}
cursor.execute("select id, id from attrs_cbo where length(id) = 4;")
cbo_lookup = {r[0]:r[1] for r in cursor.fetchall()}
cbo_lookup["-1"] = "xxxx" # data uses -1 for missing occupation
missing = {
"bra_id": defaultdict(int),
"cnae_id": defaultdict(int),
"cbo_id": defaultdict(int)
}
def map_gender(x):
MALE, FEMALE = 0, 1
gender_dict = {MALE: 1, FEMALE: 2}
if x in gender_dict:
return str(gender_dict[x])
return str(3)
def map_color(color):
INDIAN, WHITE, BLACK, ASIAN, MULTI, UNKNOWN = 1,2,4,6,8,9
color_dict = {INDIAN:1, WHITE:2, BLACK:3, ASIAN:4, MULTI:5, 9:UNKNOWN, -1:UNKNOWN }
return str(color_dict[int(color)])
def map_age(age):
age_bucket = int(np.floor( int(age) / 10 ))
if age_bucket == 0:
age_bucket = 1
elif age_bucket > 6:
age_bucket = 6
return str(age_bucket)
def map_literacy(lit):
ILLITERATE, BASIC, HIGHSCHOOL, COLLEGE, UNKNOWN = 1, 2, 3, 4, 9
lit_dict = {1:ILLITERATE, 2:ILLITERATE, 3:BASIC, 4:BASIC, 5:BASIC, 6:BASIC, 7:HIGHSCHOOL,
8:HIGHSCHOOL, 9:COLLEGE, -1:UNKNOWN }
return str(lit_dict[int(lit)])
def floatvert(x):
x = x.replace(',', '.')
try:
return float(x)
except:
return np.nan
def bra_replace(raw):
try:
return bra_lookup[str(raw).strip()]
except:
missing["bra_id"][raw] += 1
return None
def cnae_replace(raw):
try:
return cnae_lookup[str(raw).strip()]
except:
missing["cnae_id"][raw] += 1
return None
def cbo_replace(raw):
try:
return cbo_lookup[str(raw).strip()[:4]]
except:
missing["cbo_id"][raw] += 1
return None
cols = ["cbo_id", "cnae_id", "literacy", "age", "est_id", "simple", "bra_id", "num_emp", "color", "wage_dec", "wage", "gender", "est_size", "year"]
coerce_cols = {"bra_id": bra_replace, "cnae_id":cnae_replace, "cbo_id":cbo_replace, \
"wage":floatvert, "emp_id":str, "est_id": str}
@click.command()
@click.argument('file_path', type=click.Path(exists=True))
def main(file_path):
output_file = file_path + ".h5"
df = pd.read_csv(file_path, header=0, sep=";", names=cols, converters=coerce_cols)
df["d_id"] = df.apply(lambda x:'%s%s%s%s' % (
map_gender(x['gender']), map_age(x['age']),
map_color(x['color']), map_literacy(x['literacy'])
), axis=1)
df.to_hdf(output_file, 'table')
if __name__ == '__main__':
main()
| vamsijkrishna/dataviva-scripts | scripts/rais_new/helpers/csv2hdf.py | csv2hdf.py | py | 3,228 | python | en | code | 0 | github-code | 36 |
32531618030 | """
Author: Michael Thompson (mjt106@case.edu)
Date: 9/20/2020
Brief: This file navigates commands input from the client physically accessing the device
"""
from src import client_command_list
import inspect
class ClientCommander:
def __init__(self):
self._client_command_list = client_command_list.ClientCommandList()
self._command_list = {}
self._construct_command_list()
@staticmethod
def _get_command():
return input("Enter command (type list for all commands, <command> help for more)\n").split(" ")
def _parse_command(self, command):
try:
self._command_list[command[0]](command)
except KeyError:
print("Command \"" + command[0] + "\" not found")
def _construct_command_list(self):
for element in inspect.getmembers(self._client_command_list):
if element[0][0] != "_":
self._command_list[element[0]] = element[1]
def run(self):
"""
Runs the core configuration and commands loop
"""
command = ClientCommander._get_command()
self._parse_command(command)
print()
while command[0] != "exit":
command = ClientCommander._get_command()
self._parse_command(command)
print()
print("Shutting down")
| Rembed/Rembed-EmbeddedBackend | src/client_commander.py | client_commander.py | py | 1,333 | python | en | code | 0 | github-code | 36 |
35919708000 | import difflib
import redis
from pymongo import MongoClient
client = MongoClient('mongodb+srv://Alex:goit123@utcluster.zrkwr.mongodb.net/myFirstDatabase?retryWrites=true&w=majority')
def add():
name = input('Enter name: ')
if db.ContactBook.find_one({'name': name}):
print(f"The record with name '{name}' is already exist. Try another name or update the one")
phone = input('Enter phone: ')
email = input('Enter email: ')
db.ContactBook.insert_one({'name': name, 'email': email, 'phone': phone})
print('New record successfully added')
def showall():
for rec in db.ContactBook.find():
print(f'name = {rec["name"]}, phone = {rec["phone"]}, email = {rec["email"]}')
def delete():
name = input('Enter name: ')
if db.ContactBook.find_one({'name': name}):
db.ContactBook.delete_one({'name': name})
print(f'Record with name "{name}" has been successfully deleted')
else:
print("There is no such record in DB")
def show():
name = input('Enter name: ')
result = db.ContactBook.find_one({'name': name})
if result:
print(f'name = {result["name"]}, phone = {result["phone"]}, email = {result["email"]}')
else:
print("There is no such record in DB")
def update():
name = input('Enter name: ')
if db.ContactBook.find_one({'name': name}):
print("The record exists in DB. Enter a new data:")
phone = input('Enter phone: ')
email = input('Enter email: ')
db.ContactBook.update_one({'name': name}, {'$set': {'name': name, 'email': email, 'phone': phone}})
print(f'Record "{name}" has been successfully updated')
else:
print("There is no such record in DB. Try another command")
def find():
data = input('Enter data: ')
query = {"$or": [{"phone": {"$regex": data}}, {"email": {"$regex": data}}]}
res = db.ContactBook.find(query, {'_id': 0})
if res != None:
for rec in res:
print(f" Name = {rec['name']}, phone = {rec['phone']}, email = {rec['email']}")
else:
print("There is no such record in DB. Try another command")
def command_assistant():
commands = ['add', 'show', 'delete', 'show_all', 'exit', 'update', 'find'] # list of commands
r = redis.StrictRedis(host='localhost', port=6379, db=0)
while True:
command = str(input('Enter command:\n>>> ')).lower().strip()
if not command in commands: # prediction logic
if r.get(command): # checking cache
print(f"(Cache)Perhaps you mean {(r.get(command)).decode('utf-8')}")
ans = str(input("Answer (Y/N): ")).lower()
if ans == "n":
print("Command input error, try again")
continue
elif ans == "y":
variant = r.get(command).decode('utf-8')
break
else:
variant = str(difflib.get_close_matches(command, commands, cutoff=0.1, n=1))[2:-2] # prediction realisation
print(f"Perhaps you mean {variant}")
answer = str(input("Answer (Y/N): ")).lower()
if answer == "n":
print("Command input error, try again")
continue
elif answer == "y":
r.set(command, variant)
break
else:
variant = command
break
return variant
if __name__ == '__main__':
with client:
db = client.myfirst_mongoDB
print(f'{" "*20}*** Welcome to Personal assistant Contact book DB edition!***')
print("Commands:\n - add;\n - show;\n - show_all;\n - delete;\n - update;\n - find;\n - exit\n")
while True:
try:
answer = command_assistant()
except (ConnectionRefusedError, redis.exceptions.ConnectionError, ConnectionError) as Error:
print("Error! Connection problems to Redis. App is working without command prediction")
answer = str(input('Enter command:\n>>> ')).lower().strip()
if answer == 'add':
add()
continue
elif answer == 'show_all':
showall()
continue
elif answer == 'delete':
delete()
continue
elif answer == 'show':
show()
continue
elif answer == 'update':
update()
continue
elif answer == 'find':
find()
continue
elif answer == 'exit':
break
else:
print("Command input error. Try correct command again")
continue
print("Good bye!")
| AlexUtchenko/goit-python | WEB10/PA_Mongo_Redis_Nodic.py | PA_Mongo_Redis_Nodic.py | py | 4,975 | python | en | code | 0 | github-code | 36 |
34566807465 | import pytorch_lightning as pl
from transformers import AdamW
class DST_Seq2Seq(pl.LightningModule):
def __init__(self, args, tokenizer, model):
super().__init__()
self.tokenizer = tokenizer
self.model = model
self.lr = args["lr"]
def training_step(self, batch, batch_idx):
self.model.train()
outputs = self.model(
input_ids=batch['encoder_input'],
attention_mask=batch['attention_mask'],
labels=batch['decoder_output']
)
return {'loss': outputs.loss, 'log': {'train_loss': outputs.loss}}
def validation_step(self, batch, batch_idx):
self.model.eval()
outputs = self.model(
input_ids=batch['encoder_input'],
attention_mask=batch['attention_mask'],
labels=batch['decoder_output']
)
return {'eval_loss': outputs.loss, 'log': {'eval_loss': outputs.loss}}
def validation_epoch_end(self, outputs):
eval_loss_mean = sum([output['eval_loss'] for output in outputs]) / len(outputs)
results = {'progress_bar': {'eval_loss': eval_loss_mean.item()}, 'log': {'eval_loss': eval_loss_mean.item()}, 'eval_loss': eval_loss_mean.item()}
return results
def configure_optimizers(self):
return AdamW(self.parameters(), lr=self.lr, correct_bias=True)
| minson123-github/ADL21-Final-Project | T5DST/model.py | model.py | py | 1,211 | python | en | code | 0 | github-code | 36 |
33227873422 | import urllib.request
import sys
import time
from os import path
from os import popen
import argparse
def arguments():
"""Parse the arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('URL', help="URL of the file",
default=None, type=str)
parser.add_argument('des', help="The name of the file\
to be saved with.", default=None, nargs='?')
args = parser.parse_args()
return args
def get_terminal_length():
"""Return the length of the terminal."""
rows, cols = popen('stty size', 'r').read().split()
return int(cols)
def get_name(URL):
"""Try to get the name of the file from the URL."""
name = 'temp'
temp_url = URL
split_url = temp_url.split('/')
for name in split_url[::-1]:
if name != '':
break
return name
def format_size(size):
"""Format the passed size.
If its more than an 1 Mb then return the size in Mb's
else return it in Kb's along with the unit.
"""
formatted_size = size
dw_unit = 'bytes'
if formatted_size > (1024 * 1024 * 1024):
formatted_size = size / (1024 * 1024 * 1024)
dw_unit = "GB's"
elif formatted_size > (1024 * 1024):
formatted_size = size / (1024 * 1024)
dw_unit = "MB's"
elif formatted_size > 1024:
formatted_size = size / 1024
dw_unit = "kb's"
return (formatted_size, dw_unit)
def download(url, des=None):
try:
# Check if the des is passed
if des is not None:
if path.isdir(des):
des = path.join(des, get_name(url))
else:
des = get_name(URL)
# Download files with a progressbar showing the percentage
try:
u = urllib.request.urlopen(url)
except Exception as e:
print("ERROR: {}".format(e))
return False
f = open(des, 'wb')
meta = u.info()
file_size = None
try:
file_size = int(meta["Content-Length"])
formatted_file_size, dw_unit = format_size(file_size)
print("Size: {} {}".format(round(formatted_file_size), dw_unit))
print("Saving as: {}".format(des))
except TypeError:
pass
file_size_dl = 0
block_sz = 8192
beg_time = time.time()
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
# Initialize all the variables that cannot be calculated
# to ''
speed = ''
time_left = ''
time_unit = ''
percent = ''
if file_size is not None:
# Calculate speed
speed = (file_size_dl / 1024) / (time.time() - beg_time)
# Calculate time left
time_left = round(((file_size - file_size_dl) / 1024) / speed)
time_unit = 's'
# Convert to min or hours as req
if time_left > 3600:
time_left = round(time_left / 3600)
time_unit = 'h'
elif time_left > 60:
time_left = round(time_left / 60)
time_unit = 'm'
# Calculate percentage
percent = file_size_dl * 100 / file_size
# file_size to show
file_size_to_disp, dw_unit = format_size(file_size_dl)
# Basename
basename = path.basename(des)
# Calculate amount of space req in between
length = get_terminal_length()
stuff_len = len(basename) + 13 + 17 + 7 + 26 + 3
space = 0
if stuff_len < length:
space = length - stuff_len
elif stuff_len > length:
basename = basename[:(length - stuff_len) - 2] + '..'
if file_size is not None:
status = r"%s %s %0.2f %s |%d kbps| ETA: %s %s |%-20s| |%3.2f%%|" % (basename, space * " ", file_size_to_disp, dw_unit, speed, time_left, time_unit, "-" * int(percent / 5), percent)
else:
status = r"%s %s %0.2f %s" %(basename, space * " ", file_size_to_disp, dw_unit)
sys.stdout.write('\r')
sys.stdout.write(status)
sys.stdout.flush()
f.close()
print()
return True
except Exception as e:
print("ERROR: {}".format(e))
return False
if __name__ == "__main__":
args = arguments()
download(args.URL, args.des)
| TrendingTechnology/QuickWall | QuickWall/download.py | download.py | py | 4,652 | python | en | code | null | github-code | 36 |
3592670664 | from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from typing import List
from db.database import get_db
from security.auth import oauth2_scheme, get_current_user
from . import schemas, crud
router = APIRouter()
@router.post("/news/add")
async def add_new(title: str, desc: str, db: Session = Depends(get_db), token: str = Depends(oauth2_scheme)):
user = get_current_user(db, token)
if not user.is_admin:
raise HTTPException(status_code=400, detail="No permision")
return crud.add_new(db, title, desc)
@router.delete("/news/{new_id}/delete")
async def delete_new(new_id: int, db: Session = Depends(get_db), token: str = Depends(oauth2_scheme)):
user = get_current_user(db, token)
if not user.is_admin:
raise HTTPException(status_code=400, detail="No permision")
return crud.delete_new(db, new_id)
@router.get("/news", response_model=List[schemas.News])
async def read_news(db: Session = Depends(get_db)):
return crud.get_news(db)
| ostrekodowanie/Synapsis | backend/api/news/routes.py | routes.py | py | 1,024 | python | en | code | 0 | github-code | 36 |
20926366393 | from .env_reader import env, csv
from .base import BASE_DIR
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
ALLOWED_HOSTS = env('ALLOWED_HOSTS', cast=csv())
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
} | akkbaeva/sixbox_pr | src/sixbox_pr/settings/local.py | local.py | py | 426 | python | en | code | 0 | github-code | 36 |
9571125723 | #!/usr/bin/env python
import os, subprocess, sys, pwd, grp, stat
import socket, requests, json, yaml, time, logging, re, argparse
import paramiko
from pprint import pprint
_api = 'https://foobar.ru/api/'
user, group = pwd.getpwuid(os.getuid()).pw_name, grp.getgrgid(pwd.getpwuid(os.getuid()).pw_gid).gr_name
def get_hosts(hostname):
all_hosts = []
try:
host_groups = [group['name'] for group in json.loads(requests.get(_api + \
'foo/{}?format=json'.format(hostname)).text)]
except Exception as e:
host_groups = []
logging.warning('{}: {}'.format(e.__class__.__name__, e))
if host_groups:
for group in host_groups:
all_hosts.extend([host['fqdn'] for host in json.loads(requests.get(_api + \
'bar/{}?format=json'.format(group)).text)])
all_hosts = list(set(all_hosts))
all_hosts.remove(hostname)
return all_hosts
def get_config(*args):
local = False
if args and type(args) is tuple: local = args[0]
config = {}
config_files = filter(lambda x: re.match('config-[0-9]+.yml', x), os.listdir('./'))
if local:
if config_files:
timestamp = max([re.search('config-([0-9]+).yml', x).group(1) for x in config_files])
with open('config-{}.yml'.format(timestamp), 'r') as config_file:
try:
config = yaml.load(config_file)
except Exception as e:
logging.warning('{}: {}'.format(e.__class__.__name__, e))
else:
try:
config = yaml.load((requests.get('https://raw.githubusercontent.com/' \
'asmartishin/python_scripts/master/file_sync/config.yml').text))
list(map(os.remove, config_files))
with open('config-{}.yml'.format(int(time.time())), 'w') as config_file:
config_file.write(yaml.dump(config ,default_flow_style=False))
except Exception as e:
logging.warning('{}: {}'.format(e.__class__.__name__, e))
if config_files:
timestamp = max([re.search('config-([0-9]+).yml', x).group(1) for x in config_files])
with open('config-{}.yml'.format(timestamp), 'r') as config_file:
try:
config = yaml.load(config_file)
except Exception as e:
logging.warning('{}: {}'.format(e.__class__.__name__, e))
return config
# Here directory permission changes to the ones, that the user starting the script has,
# cause I assume that we start it under admin user, don't know if it is a good idea.
def get_local_files(config):
local_files = []
for directory in config['directories']:
if not os.path.isdir(directory):
subprocess.call('sudo mkdir -p {}'.format(directory), shell=True)
if user != pwd.getpwuid(os.stat(directory).st_uid).pw_name or \
group != grp.getgrgid(os.stat(directory).st_gid).gr_name:
subprocess.call('sudo chown -R {}:{} {}'.format(user, group, directory), shell=True)
for dirpath, dirnames, filenames in os.walk(directory):
local_files += [os.path.join(dirpath, filename) for filename in filenames]
return local_files
def get_host_files(hostname, config):
remote_files = []
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, username=user)
sftp = ssh.open_sftp()
for directory in config['directories']:
try:
sftp.stat(directory)
except IOError as e:
if e.errno == 2:
ssh.exec_command(('sudo mkdir -p {}').format(directory))
huser, hgroup = re.search('([A-Za-z]+)\ ([A-Za-z_ ]+)\ ' ,ssh.exec_command(('ls -ld {}').\
format(directory))[1].read().rstrip()).group(1, 2)
if user != huser or group != hgroup:
ssh.exec_command('sudo chown -R {}:{} {}'.format(user, group, directory))
remote_files.extend(ssh.exec_command(('find {} -type f | xargs readlink -f').\
format(directory))[1].read().splitlines())
sftp.close()
ssh.close()
return remote_files
def push_files(local_files_tuple, remote_files):
print('Push: ')
hostname = local_files_tuple[0]
local_files = local_files_tuple[1]
for rhost in remote_files:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(rhost, username=user)
sftp = ssh.open_sftp()
for lfile in local_files:
if lfile not in remote_files[rhost]:
if lfile.split('/')[-2] not in remote_files.keys():
pdir, pfile = os.path.split(lfile)
rdir = '{}/{}'.format(pdir, hostname)
rpath = '{}/{}/{}'.format(pdir, hostname, pfile)
ssh.exec_command('mkdir -p {}'.format(rdir))
print('{} --> {}:{}'.format(lfile, rhost, rpath))
sftp.put(lfile, rpath)
sftp.close()
ssh.close()
print
def pull_files(local_files_tuple, remote_files):
print('Pull: ')
hostname = local_files_tuple[0]
local_files = local_files_tuple[1]
all_hosts = remote_files.keys()
all_hosts.append(hostname)
for rhost in remote_files:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(rhost, username=user)
sftp = ssh.open_sftp()
for rfile in remote_files[rhost]:
if rfile not in local_files:
if rfile.split('/')[-2] not in all_hosts:
pdir, pfile = os.path.split(rfile)
ldir = '{}/{}'.format(pdir, rhost)
lpath = '{}/{}/{}'.format(pdir, rhost, pfile)
subprocess.call('mkdir -p {}'.format(ldir), shell=True)
print('{} <-- {}:{}'.format(lpath, rhost, rfile))
sftp.get(rfile, lpath)
sftp.close()
ssh.close()
print
def parse_arguments():
parser = argparse.ArgumentParser(description='Script for syncing files on servers')
parser.add_argument('-l', '--local', action='store_true', default=False,
help='Use local copy of config file')
return parser.parse_args()
if __name__ == "__main__":
remote_files = {}
args = parse_arguments()
hostname = socket.getfqdn()
remote_hosts = get_hosts(hostname)
config = get_config(args.local)
if not config:
raise RuntimeError('Could not load config. Exiting.')
local_files_tuple = (hostname, get_local_files(config))
for host in remote_hosts:
try:
remote_files[host] = get_host_files(host, config)
except Exception as e:
logging.warning('{}: {}'.format(e.__class__.__name__, e))
push_files(local_files_tuple, remote_files)
pull_files(local_files_tuple, remote_files)
| ttymonkey/python | file_sync/sync.py | sync.py | py | 7,025 | python | en | code | 0 | github-code | 36 |
26164662406 | import lpips
import numpy
import torch
import json
from skimage.metrics import structural_similarity as ssim
class ImageMetrics:
@staticmethod
def __l2__metric__tensor(first_image: torch.Tensor, second_image: torch.Tensor, range=255.):
return ImageMetrics.__l2_metric__numpy(first_image.numpy(), second_image.numpy(), range)
@staticmethod
def __l2_metric__numpy(first_image: numpy.ndarray, second_image: numpy.ndarray, range=255.):
return lpips.l2(first_image, second_image, range)
@staticmethod
def l2_metric(first_image, second_image, range=255.):
if type(first_image) != type(second_image):
raise Exception('Images are not of the same type')
if type(first_image) == numpy.ndarray:
return ImageMetrics.__l2_metric__numpy(first_image, second_image, range)
if type(first_image) == torch.Tensor:
return ImageMetrics.__l2__metric__tensor(first_image, second_image, range)
raise Exception('Unsupported image type')
@staticmethod
def __psnr__metric__tensor(first_image: torch.Tensor, second_image: torch.Tensor, peak=255.):
return ImageMetrics.__psnr_metric__numpy(first_image.numpy(), second_image.numpy(), peak)
@staticmethod
def __psnr_metric__numpy(first_image: numpy.ndarray, second_image: numpy.ndarray, peak=255.):
return lpips.psnr(first_image, second_image, peak)
@staticmethod
def psnr_metric(first_image, second_image, peak=1.):
if type(first_image) != type(second_image):
raise Exception('Images are not of the same type')
if type(first_image) == numpy.ndarray:
return ImageMetrics.__psnr_metric__numpy(first_image, second_image, peak)
if type(first_image) == torch.Tensor:
return ImageMetrics.__psnr__metric__tensor(first_image, second_image, peak)
raise Exception('Unsupported image type')
@staticmethod
def __ssim__metric__tensor(first_image: torch.Tensor, second_image: torch.Tensor, range=255.):
return ImageMetrics.__ssim_metric__numpy(first_image.numpy(), second_image.numpy(), range)
@staticmethod
def __ssim_metric__numpy(first_image: numpy.ndarray, second_image: numpy.ndarray, range=255.):
return ssim(first_image, second_image, channel_axis=2)
@staticmethod
def ssim_metric(first_image, second_image, range=255.):
if type(first_image) != type(second_image):
raise Exception('Images are not of the same type')
if type(first_image) == numpy.ndarray:
return ImageMetrics.__ssim_metric__numpy(first_image, second_image, range)
if type(first_image) == torch.Tensor:
return ImageMetrics.__ssim__metric__tensor(first_image, second_image, range)
raise Exception('Unsupported image type')
@staticmethod
def __lpips__metric__tensor(model: lpips.LPIPS, first_image: torch.Tensor, second_image: torch.Tensor):
return model(first_image, second_image).detach().numpy().flatten()[0]
@staticmethod
def lpips_metric(first_image, second_image, model):
if type(first_image) != type(second_image):
raise Exception('Images are not of the same type')
if type(first_image) == torch.Tensor:
return ImageMetrics.__lpips__metric__tensor(model, first_image, second_image)
raise Exception('Unsupported image type')
@staticmethod
def metric_export(first_image, second_image, interpolation, lpips_model):
dictionary = {
"name": interpolation,
"psnr": str(ImageMetrics.psnr_metric(first_image, second_image, peak=255.)),
"l2": str(ImageMetrics.l2_metric(first_image, second_image)),
"ssim": str(ImageMetrics.ssim_metric(first_image, second_image)),
"lpips": str(ImageMetrics.lpips_metric(torch.tensor(first_image).permute(2, 0, 1),
torch.tensor(second_image).permute(2, 0, 1), lpips_model))
}
return dictionary
@staticmethod
def metric_export_all(metrics, file_name):
json_object = json.dumps(metrics, indent=4)
with open(f'{file_name}_metrics.json', "w") as outfile:
outfile.write(json_object) | dinogrgic1/real-time-video-upscale-master-thesis | models/ImageMetrics.py | ImageMetrics.py | py | 4,275 | python | en | code | 0 | github-code | 36 |
39453776358 | from functools import reduce
class Solution:
def multiply(self, num1: str, num2: str) -> str:
sumList = []
i = 0
for a in num1[::-1]:
inta = int(a)
snum = 0
tempSingleList = []
for b in num2[::-1]:
intb = int(b)
tmuli = inta*intb+snum
tsnum = tmuli//10
tgnum = tmuli%10
tempSingleList.append(tgnum)
snum = tsnum
if snum!=0:
tempSingleList.append(snum)
sumList.append(tempSingleList[::-1]+[ 0 for i in range(0,i)])
i = i+1
result = "".join(map(lambda x:str(x),reduce(lambda x,y:self.strSum(x,y),sumList))).lstrip('0')
return result if len(result)>0 else '0'
def strSum(self,num1,num2):
num1 = num1[::-1]
num2 = num2[::-1]
numLength = max(len(num1),len(num2))
y = 0
numSum = []
for index in range(0,numLength):
a = num1[index] if index<len(num1) else 0
b = num2[index] if index<len(num2) else 0
tempSum = a+b+y
y = tempSum//10
s = tempSum%10
numSum.append(s)
if y !=0:
numSum.append(y)
return numSum[::-1]
if __name__ == "__main__":
s = Solution()
print(s.multiply('7','871'))
# print(s.strSum([8,2],[2,3])) | haodongxi/leetCode | 43.py | 43.py | py | 1,429 | python | en | code | 0 | github-code | 36 |
19102436100 | # _*_ coding: utf-8 _*_
from pycse import regress
import numpy as np
time=np.array([0.0,50.0,100.0,150.0,200.0,250.0,300.0])
Ca=np.array([50.0,38.0,30.6,25.6,22.2,19.5,17.4])*1e-3
T=np.column_stack([time**0,time,time**2,time**3,time**4])
alpha=0.05
p,pint,se=regress(T,Ca,alpha)
print(pint)
# new one
import numpy as np
from scipy.stats.distributions import t
time=np.array([0.0,50.0,100.0,150.0,200.0,250.0,300.0])
Ca=np.array([50.0,38.0,30.6,25.6,22.2,19.5,17.4])*1e-3
T=np.column_stack([time**0,time,time**2,time**3,time**4])
p,res,rank,s=np.linalg.lstsq(T,Ca)
# the parameter are now in p
# compute the confidence intervals
n=len(Ca)
k=len(p)
sigma2=np.sum((Ca-np.dot(T,p))**2)/(n-k) # RMSE
C=sigma2*np.linalg.inv(np.dot(T.T,T)) # covariance matrix
se=np.sqrt(np.diag(C)) # standard error
alhpa=0.05 #100*(1-alpha) confidence level
sT=t.ppf(1.0-alpha/2.0,n-k) # student T multiplier
CI=sT*se
for beta,c1 in zip(p,CI):
print('{2:1.2e} {0:1.4e} {1:1.4e}'.format(beta-c1,beta+c1,beta))
SS_tot=np.sum((Ca-np.mean(Ca))**2)
SS_err=np.sum((np.dot(T,p)-Ca)**2)
Rsq=1-SS_err/SS_tot
print('R^2 = {0}'.format(Rsq))
# plot fit
import matplotlib.pyplot as plt
plt.plot(time,Ca,'bo',label='Raw data')
plt.plot(time,np.dot(T,p),'r-',label='fit')
plt.xlabel('Time')
plt.ylabel('Ca (mol/L)')
plt.legend(loc='best')
plt.savefig('58.jpg',dpi=300)
plt.show()
print('plot done')
print('I will fight for myself') | ruanyangry/pycse-data_analysis-code | PYSCE-code/58.py | 58.py | py | 1,483 | python | en | code | 0 | github-code | 36 |
17041944520 | import numpy as np
import typing
import random
import string
import os
import pickle
import inspect
import time
# time stamp + line numbers
def write_log(file, timestamp, function_name, input_ids=[], output_ids=[], frame=None, args=None):
# if function_name == '__getitem__': # handle edge case
# return
if not frame:
frame_info = inspect.stack()[-1]
else:
frame_info = inspect.getframeinfo(frame)
if frame_info:
fileline = ','.join([str(frame_info.filename), str(frame_info.lineno)])
code_context = frame_info.code_context
else:
fileline = ''
code_context = ''
args = str(args)
log = {'time': timestamp, 'filename': fileline, 'context': code_context, 'function_name': function_name, 'input_ids': input_ids,
'output_ids': output_ids, 'args': args}
log = str(log)
log = log + '\n'
file.write(log)
def write_child_log(file, time, parent_ids, child_ids):
if isinstance(parent_ids, list):
parent_ids = ','.join(parent_ids)
if isinstance(child_ids, list):
parent_ids = ','.join(child_ids)
log = '{};relation;{};{}\n'.format(time, parent_ids, child_ids)
file.write(log)
def write_new_log(file, time, id):
log = '{};new;{}\n'.format(time, id)
file.write(log)
def rand_string(N):
return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(N))
class LoggedNDArray(np.ndarray):
file_name = '/tmp/logs/log.txt'
directory = '/tmp/logs'
next_id = 1
def __new__(cls, input_array):
obj = np.asarray(input_array).view(cls)
return obj
def __array_finalize__(self, obj):
# if obj is None:
self.file = open(self.file_name, 'a+')
if isinstance(obj, LoggedNDArray):
id_ = str(obj.get_id())
self.write_log = getattr(obj, 'write_log', True)
if self.write_log:
write_child_log(self.file, time.time(), id_, str(self.get_id()))
else:
self.write_log = True
write_new_log(self.file, time.time(), str(self.get_id()))
def __getitem__(self, key) -> typing.Any:
if self.write_log:
write_log(self.file, str(time.time()), self.__getitem__.__name__, input_ids=self.get_id(),
args={'key': key})
return super().__getitem__(key)
def __setitem__(self, key, value) -> None:
if self.write_log:
write_log(self.file, str(time.time()), self.__setitem__.__name__, input_ids=self.get_id(),
args={'key': key})
self.file.write(str(self.funct) + " ; " + self.__setitem__.__name__ + " ; " + str(key) + '\n')
return super().__setitem__(key, value)
def get_id(self, index=None):
if not hasattr(self, 'id'):
self.id = LoggedNDArray.next_id
LoggedNDArray.next_id += 1
if index != None:
id_ = str(self.id) + '_' + index
else:
id_ = self.id
id_ = (self.shape, id_)
return id_
def set_write_log(self, value):
self.write_log = value
def take(self, indices, axis=None, out=None, mode='raise'):
if self.write_log:
if out != None:
out = out.view(np.ndarray)
output = super().take(indices, axis, out, mode)
output = output.view(LoggedNDArray)
output.set_write_log(self.write_log)
args = {}
args['indices'] = str(indices)
args['axis'] = str(axis)
args['mode'] = str(mode)
if self.write_log:
write_child_log(self.file, time.time(), str(self.get_id()), str(output.get_id()))
write_log(self.file, str(time.time()), self.take.__name__, input_ids=self.get_id(),
output_ids=output.get_id(), args=args)
return output
else:
return super().take(indices, axis, out, mode)
# self.file.write(str(self.funct) + " ; " + self.take.__name__ + " ; " + str(kwargs) + '\n')
# def __getattr__(self, name):
# if self.write_log:
# write_log(self.file, str(time.time()), self.__getattr__.__name__, input_ids=str(self.get_id()), args = {'name': name})
# print(type(super()))
# return super().__getattr__(name)
def __array_ufunc__(self, ufunc, method, *inputs, out=None, where=True, **kwargs):
args = []
input_ids = []
# input_ids.append(str(self.get_id()))
logged_args = {}
new_nd_arrays = []
for input_ in inputs:
if isinstance(input_, LoggedNDArray):
args.append(input_.view(np.ndarray))
input_ids.append(input_.get_id())
elif isinstance(input_, np.ndarray):
args.append(input_)
id_file = str(id(input_)) + '_' + rand_string(10)
id_ = (input_.shape, id_file)
new_nd_arrays.append((self.file, time.time(), id_))
array_path = os.path.join(self.directory, id_file + '.npy')
with open(array_path, 'w') as file:
np.save(file, input_)
input_ids.append(id_)
else:
args.append(input_)
input_ids.append(input_)
# deal with ufunc methods
if method == 'reduceat' or method == 'at':
if isinstance(inputs[1], LoggedNDArray):
logged_args['indices'] = inputs[1].get_id(rand_string(10))
array_path = os.path.join(self.directory, logged_args['indices'][1] + '.npy')
logged_args['indices'] = str(logged_args['indices'])
input_ids[1] = logged_args['indices']
with open(array_path, 'w') as file:
np.save(file, args[1])
elif isinstance(inputs[1], np.array):
logged_args['indices'] = input_ids[1]
# if indices is a tuple
elif isinstance(inputs[1], tuple):
indices = []
args[1] = []
for index in inputs[1]:
if isinstance(index, LoggedNDArray):
id_ = index.get_id(rand_string(10))
indices.append(str(id_))
array_path = os.path.join(self.directory, id_[1] + '.npy')
arr = index.view(np.ndarray)
args[1].append(arr)
with open(array_path, 'w') as file:
np.save(file, arr)
elif isinstance(index, np.array):
id_file = str(id(index)) + '_' + rand_string(10)
id_ = str((index.shape, id_file))
indices.append(id_)
array_path = os.path.join(self.directory, id_file + '.npy')
args[1].append(index)
with open(array_path, 'w') as file:
np.save(file, index)
else:
id_file = str(id(index)) + '_' + rand_string(10)
indices.append(str(('object', id_file)))
obj_path = os.path.join(self.directory, id_file + '.pickle')
with open(obj_path, 'w') as file:
np.save(file, index)
args[1] = tuple(args[1])
logged_args['indices'] = str(indices)
else:
id_file = str(id(inputs[1])) + '_' + rand_string(10)
logged_args['indices'] = str(('object', id_file))
obj_path = os.path.join(self.directory, id_file + '.pickle')
with open(obj_path, 'w') as file:
pickle.dump(inputs[1], file)
# deal with out argument
if isinstance(out, LoggedNDArray):
outputs = out.view(np.ndarray)
elif isinstance(out, list):
outputs = []
for out_ in out:
if isinstance(out_, LoggedNDArray):
outputs.append(out_.view(np.ndarray))
else:
outputs = out
if not isinstance(outputs, list):
kwargs['out'] = outputs
else:
if outputs != None:
kwargs['out'] = tuple(outputs)
# deal with where argument
if isinstance(where, LoggedNDArray):
w = where.view(np.ndarray)
id_ = where.get_id(rand_string(10))
array_path = os.path.join(self.directory, id_[1] + '.npy')
with open(array_path, 'w') as file:
np.save(file, w)
logged_args['where'] = str(id_)
elif isinstance(where, np.ndarray):
w = where
id_ = str(id(where)) + '_' + rand_string(10)
logged_args['where'] = str((where.shape, id_))
array_path = os.path.join(self.directory, str(id_) + '.npy')
with open(array_path, 'w') as file:
np.save(file, w)
elif where is not True:
w = where
id_file = str(id(where)) + '_' + rand_string(10)
logged_args['where'] = str(('object', id_file))
obj_path = os.path.join(self.directory, id_file + '.pickle')
with open(obj_path, 'w') as file:
pickle.dump(where, file)
else:
w = True
if w is not True:
kwargs['where'] = w
results = super().__array_ufunc__(ufunc, method,
*args, **kwargs)
if results is NotImplemented:
return NotImplemented
if ufunc.nout == 1:
results = (results,)
results_ = []
output_ids = []
if outputs == None:
for result in results:
if isinstance(result, LoggedNDArray):
results_.append(result)
output_ids.append(result.get_id())
elif isinstance(result, np.ndarray):
result_ = result.view(LoggedNDArray)
results_.append(result_)
output_ids.append(result_.get_id())
elif result is None:
pass
else:
results_.append(result)
output_ids.append(result)
else:
if not isinstance(outputs, tuple):
outputs = (outputs,)
for result, output in zip(results, outputs):
if output == None:
if isinstance(result, np.ndarray):
results_.append(result.view(LoggedNDArray))
else:
results_.append(result)
else:
results_.append(output)
output_ids.append(None)
results = tuple(results_)
# write array without output, where, and methods
name = ufunc.__name__ + ',' + method
# these are already saved by their ids in logged_args or output_id
if 'out' in kwargs:
del kwargs['out']
if 'where' in kwargs:
del kwargs['where']
args = kwargs.update(logged_args)
if self.write_log:
write_log(self.file, str(time.time()), name, input_ids=input_ids, output_ids=output_ids, args=args)
if method == 'at':
return
return results[0] if len(results) == 1 else results
| j2zhao/DSClean | ds_clean/logged_array.py | logged_array.py | py | 11,555 | python | en | code | 0 | github-code | 36 |
18374339178 | import numpy as np
from src.utils import *
def ica(X, iterations, limit = 1e-5):
X = center(X)
X = whiten(X)
dim = X.shape[0]
W = np.zeros((dim, dim), dtype=X.dtype)
for i in range(dim):
w = np.random.rand(dim)
for j in range(iterations):
w_new = update_w(w, X)
if i >= 1:
w_new -= np.dot(np.dot(w_new, W[:i].T), W[:i])
distance = np.abs(np.abs((w * w_new).sum()) - 1)
w = w_new
if distance < limit:
break
W[i, :] = w
S = np.dot(W, X)
return S
| sashrika15/Unsupervised_Learning_Algorithms | component_analysis/ICA/src/ica.py | ica.py | py | 519 | python | en | code | 7 | github-code | 36 |
10411828796 | import text_file as tf
import text_dictionary as td
import text_corpus as tc
import text_sentence_extraction as se
def task_9(texts_dir, output_dir):
output_filename = "task_9"
text = tc.get_corpus_text(texts_dir)
normalized_dict = td.create_normalized_dictionary(text)
result = td.dictionary_to_string(normalized_dict)
tf.write_file(result, output_dir, output_filename)
def task_10(texts_dir, output_dir):
output_filename = "task_10"
text = tc.get_corpus_text(texts_dir)
dictionary_normalized = td.create_normalized_dictionary(text)
dictionary_stemmed = td.create_stemmed_dictionary(text)
result = "Normalized:\n" + \
td.dictionary_to_string(dictionary_normalized) + \
"\n\n" + \
"Stemmed:\n" + \
td.dictionary_to_string(dictionary_stemmed)
result += "\n\nDIFFERENCE"
difference = td.compare_dictionaries(dictionary_normalized, dictionary_stemmed)
result += "\n\nOnly normalized:\n"
for value in difference['normalized']:
result += value + "\n"
result += "\n\nOnly stemmed:\n"
for value in difference['stemmed']:
result += value + "\n"
result += "\n\nCommon:\n"
for value in difference['common']:
result += value + "\n"
tf.write_file(result, output_dir, output_filename)
def task_11(texts_dir, output_dir):
output_filename = "task_11"
td_idf_normalized_dict = tc.get_key_words(texts_dir, "normalized")
td_idf_stemmed_dict = tc.get_key_words(texts_dir, "stemmed")
result = 'Normalized key words:\n{0}\n\nStemmed key words:\n{1}'\
.format(td.dictionary_to_string(td_idf_normalized_dict),
td.dictionary_to_string(td_idf_stemmed_dict))
tf.write_file(result, output_dir, output_filename)
def task_12(texts_dir, output_dir):
output_filename = "task_12"
compression = 0.1
tf_idf_result, sentence_result, text_result = se.sentence_extraction(texts_dir, compression)
result = "Normalized words:\n{0}\n\nSentences:\n{1}\n\nFinal text:\n{2}"\
.format(str(tf_idf_result), sentence_result, text_result)
tf.write_file(result, output_dir, output_filename)
| EkatherinaS/Data-Analysis-Technologies-in-Internet | Lab1/lab1.py | lab1.py | py | 2,200 | python | en | code | 0 | github-code | 36 |
8756905825 | # -*- coding: utf-8 -*-
from odoo import fields, models, api
class AccountJournal(models.Model):
_inherit = 'account.journal'
of_pos_payment_mode_ids = fields.Many2many(
comodel_name='of.account.payment.mode', string=u"Mode de paiement associé au journal pour le point de vente")
class AccountMoveLine(models.Model):
_inherit = 'account.move.line'
@api.model
def create(self, values):
# Si le context check_account_tax est présent
if self._context.get('check_account_tax'):
# On regarde si un account_id peut être redéfinie par ses taxes
if values.get('account_id') and values.get('tax_ids'):
income_account = values.get('account_id')
account = self.env['account.account'].browse(income_account)
tax_ids = values.get('tax_ids')
for tax_id in tax_ids[0][2]:
# On récupère la taxe et le compte
tax = self.env['account.tax'].browse(tax_id)
# On regarde si le compte doit être réaffecté à un nouveau compte et on le rajoute dans values
income_account = tax.map_account(account).id
values['account_id'] = income_account
return super(AccountMoveLine, self).create(values)
class AccountPayment(models.Model):
_inherit = 'account.payment'
@api.model
def create(self, values):
if not values.get('of_payment_mode_id') and values.get('journal_id'):
journal = self.env['account.journal'].browse(values.get('journal_id'))
if journal.journal_user:
corresponding_payment_mode_id = journal.of_pos_payment_mode_ids.filtered(
lambda pm: pm.company_id == self.env.user.company_id)
if corresponding_payment_mode_id and corresponding_payment_mode_id[0]:
values.update({'of_payment_mode_id': corresponding_payment_mode_id[0].id})
return super(AccountPayment, self).create(values)
| odof/openfire | of_point_of_sale/models/account.py | account.py | py | 2,047 | python | fr | code | 3 | github-code | 36 |
37000566143 | from itertools import product
from messenger import *
def _write_matrix(builder, matrix):
# Write as column major
for col, row in product(range(4), range(4)):
# Float here since ShuffleLog stores matrices as float
builder.add_float(matrix[row][col])
class ShuffleLogAPI:
_MSG_QUERY_ENVIRONMENT = "TagTracker:QueryEnvironment"
_MSG_ENVIRONMENT = "TagTracker:Environment"
def __init__(self, conn_params, tag_infos, camera_infos):
host = conn_params['host']
port = conn_params['port']
name = conn_params['name']
mute_errors = conn_params['mute_errors']
self.msg = MessengerClient(host, port, name, mute_errors=mute_errors)
self.msg.add_handler(ShuffleLogAPI._MSG_QUERY_ENVIRONMENT, lambda t, r: self._on_query_environment(t, r))
self.tag_infos = tag_infos
self.camera_infos = camera_infos
def read(self):
self.msg.read_messages()
def shutdown(self):
self.msg.disconnect()
# This is temporary
def publish_detection_data(self, detections):
builder = self.msg.prepare('TagTracker:TestData')
builder.add_int(len(detections))
for detect in detections:
_write_matrix(builder, detect['pose'])
_write_matrix(builder, detect['camera'].robot_position)
builder.add_int(detect['tag_id'])
builder.send()
def publish_test_matrices(self, matrices):
builder = self.msg.prepare('TagTracker:TestMtx')
builder.add_int(len(matrices))
for matrix in matrices:
_write_matrix(builder, matrix)
builder.send()
def _on_query_environment(self, type, reader):
print('[debug] sending environment data to ShuffleLog')
builder = self.msg.prepare(ShuffleLogAPI._MSG_ENVIRONMENT)
builder.add_int(len(self.tag_infos))
for tag in self.tag_infos:
builder.add_double(tag['size'])
builder.add_int(tag['id'])
_write_matrix(builder, tag['transform'])
builder.add_int(len(self.camera_infos))
for camera in self.camera_infos:
builder.add_string(camera['name'])
builder.add_int(camera['port'])
_write_matrix(builder, camera['robot_pose'])
builder.send()
| recordrobotics/Jetson2023 | borrowed/TagTracker-master/src/shufflelog_api.py | shufflelog_api.py | py | 2,316 | python | en | code | 0 | github-code | 36 |
37084706650 | import asyncio
import aiohttp
from aiohttp import ClientSession
from utils import page_status
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from fake_useragent import UserAgent
from selenium.webdriver.common.by import By
class PriseParser:
ATB_regular_divclass = '[class="product-price__top"]'
EKO_regular_divclass = '[class="jsx-2be52a4b5bdfcc8a Price__value_title"]'
VARUS_special_divclass = '[class="sf-price__special"]'
SILPO_regular_divclass = '[class="current-integer"]'
def __init__(self):
self.options = webdriver.ChromeOptions()
self.useragent = UserAgent()
self.options.add_argument(f'user-agent={self.useragent.random}')
self.options.add_argument('--disable-blink-features=AutomationControlled')
self.options.add_argument('--headless')
self.serv = Service('/home/andrey/Python Projects/OOP-and-other/selenium_python/chrome_driver/chromedriver')
self.driver=webdriver.Chrome(options=self.options,service=self.serv)
async def get_page(self,url):
try:
print('Открываем страницу...')
page=self.driver.get(url)
time.sleep(5)
print('Страница закрылась!')
# print('Getting item\'s price....')
# price=self.driver.find_element(By.CSS_SELECTOR,div_class)
# print(price.text)
except Exception as ex:
print(ex)
finally:
self.driver.close()
self.driver.quit()
async def all_at_the_same_time(self):
start=time.time()
urls = [
'https://www.atbmarket.com/product/kartopla-1-gat',
'https://eko.zakaz.ua/uk/products/ovochi-kartoplia--ekomarket00000000667970/',
'https://varus.ua/kartoplya-1-gatunok-vag',
'https://shop.silpo.ua/product/kartoplia-bila-531296'
]
task_atb=asyncio.create_task(self.get_page(urls[0]))
task_eko = asyncio.create_task(self.get_page(urls[1]))
task_varus = asyncio.create_task(self.get_page(urls[2]))
task_silpo = asyncio.create_task(self.get_page(urls[3]))
await task_atb
await task_eko
await task_varus
await task_silpo
end=time.time()
print(f'Выполнение кода заняло: {end-start:.4f} c')
res=PriseParser()
asyncio.run(res.all_at_the_same_time()) | Sautenko-Andrey/OOP-and-other | selenium_python/chrome_driver/simple_try.py | simple_try.py | py | 2,459 | python | en | code | 0 | github-code | 36 |
3826160034 | """empty message
Revision ID: 14c462e99a28
Revises: 630e94f464d4
Create Date: 2021-09-16 16:48:21.728550
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '14c462e99a28'
down_revision = '630e94f464d4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('projects', sa.Column('link', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('projects', 'link')
# ### end Alembic commands ###
| knedgen/flaskwebsiteproject | migrations/versions/14c462e99a28_.py | 14c462e99a28_.py | py | 649 | python | en | code | 0 | github-code | 36 |
24327828185 | import os, socket
from time import time
import numpy as np
import tensorflow as tf
tf_float_prec = tf.float64
from pdb import set_trace as st
#from keras import backend as K
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM as LSTM_LAYER
#from keras.models import Sequential, load_model
from keras import optimizers, regularizers, initializers, losses
from dovebirdia.deeplearning.networks.base import AbstractNetwork, FeedForwardNetwork
from dovebirdia.utilities.base import dictToAttributes, saveAttrDict, saveDict
from dovebirdia.datasets.domain_randomization import DomainRandomizationDataset
from dovebirdia.datasets.outliers import generate_outliers
machine = socket.gethostname()
if machine == 'pengy':
import matplotlib.pyplot as plt
else:
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
class LSTM(FeedForwardNetwork):
"""
LSTM Class
"""
def __init__(self, params=None):
assert isinstance(params,dict)
super().__init__(params=params)
##################
# Public Methods #
##################
def evaluate(self,x=None,y=None,labels=None,
eval_ops=None,
attributes=None,
save_results=None):
x, y = self._generateDataset(x,y)
return super().evaluate(x=x,y=y,attributes=attributes,save_results=save_results)
###################
# Private Methods #
###################
def _fitDomainRandomization(self, dr_params=None, save_model=False):
# create domainRandomizationDataset object
#self._dr_dataset = DomainRandomizationDataset(**dr_params)
self._dr_dataset = dr_params.pop('class')(**dr_params)
# dictionaries to hold training and validation data
train_feed_dict = dict()
val_feed_dict = dict()
start_time = time()
with tf.Session() as sess:
# initialize variables
sess.run(tf.global_variables_initializer())
for epoch in range(1, self._epochs+1):
# set x_train, y_train, x_val and y_val in dataset_dict attribute of DomainRandomizationDataset
train_data = self._dr_dataset.generateDataset()
val_data = self._dr_dataset.generateDataset()
# train and val loss lists
train_loss_list = list()
val_loss_list = list()
train_mse_list = list()
val_mse_list = list()
# loop over trials
for x_train, y_train, x_val, y_val in zip(train_data['x_test'],train_data['y_test'],
val_data['x_test'],val_data['y_test']):
# plt.figure(figsize=(18,12))
# plt.subplot(231)
# plt.plot(x_train[:,0],label='x0',marker=None)
# plt.grid()
# plt.legend()
# plt.subplot(232)
# plt.plot(x_train[:,1],label='x1',marker=None)
# plt.grid()
# plt.legend()
# plt.subplot(233)
# plt.scatter(x_train[:,0],x_train[:,1],label='x',marker=None)
# plt.grid()
# plt.legend()
# plt.subplot(234)
# plt.plot(x_val[:,0],label='x0',marker=None)
# plt.grid()
# plt.legend()
# plt.subplot(235)
# plt.plot(x_val[:,1],label='x1',marker=None)
# plt.grid()
# plt.legend()
# plt.subplot(236)
# plt.scatter(x_val[:,0],x_val[:,1],label='x',marker=None)
# plt.grid()
# plt.legend()
# plt.show()
# plt.close()
# generate minibatches
x_train_mb, y_train_mb = self._generateMinibatches(x_train,y_train)
# Generate LSTM 3-rank tensors
x_train_mb, y_train_mb = self._generateDataset(x_train_mb, y_train_mb) if self._train_ground else self._generateDataset(x_train_mb, x_train_mb)
x_val, y_val = self._generateDataset(np.expand_dims(x_val,axis=0), np.expand_dims(y_val,axis=0)) if self._train_ground else \
self._generateDataset(np.expand_dims(x_val,axis=0), np.expand_dims(x_val,axis=0))
for x_mb, y_mb in zip(x_train_mb,y_train_mb):
train_feed_dict.update({self._X:x_mb,self._y:y_mb})
sess.run(self._optimizer_op, feed_dict=train_feed_dict)
train_loss, train_mse = sess.run([self._loss_op,self._mse_op],feed_dict=train_feed_dict)
train_loss_list.append(train_loss)
train_mse_list.append(train_mse)
for x_v, y_v in zip(x_val,y_val):
val_feed_dict.update({self._X:x_v,self._y:y_v})
val_loss, val_mse = sess.run([self._loss_op,self._mse_op],feed_dict=val_feed_dict)
val_loss_list.append(val_loss)
val_mse_list.append(val_mse)
self._history['train_loss'].append(np.asarray(train_loss).mean())
self._history['val_loss'].append(np.asarray(val_loss).mean())
self._history['train_mse'].append(np.asarray(train_mse).mean())
self._history['val_mse'].append(np.asarray(val_mse).mean())
# if epoch % 1 == 0:
# plt.figure(figsize=(6,6))
# plt.subplot(111)
# plt.plot(y_train[:,0],y_train[:,1],c='C0')
# plt.scatter(x_train[:,0],x_train[:,1],c='C1')
# plt.grid()
# plt.show()
# plt.close()
print('Epoch {epoch}, Training Loss/MSE {train_loss:0.4}/{train_mse:0.4}, Val Loss/MSE {val_loss:0.4}/{val_mse:0.4}'.format(epoch=epoch,
train_loss=self._history['train_loss'][-1],
train_mse=self._history['train_mse'][-1],
val_loss=self._history['val_loss'][-1],
val_mse=self._history['val_mse'][-1]))
self._history['runtime'] = (time() - start_time) / 60.0
if save_model:
self._saveModel(sess,'trained_model.ckpt')
return self._history
def _buildNetwork(self):
self._setPlaceholders()
# weight regularizer
try:
self._weight_regularizer = self._weight_regularizer(weight_regularizer_scale)
except:
self._weight_regularizer = None
self._y_hat = self._X
for layer in range(len(self._hidden_dims)):
input_timesteps = (self._seq_len) if layer == 0 else None
input_dim = self._input_dim if layer == 0 else None
return_seq = self._return_seq if layer < len(self._hidden_dims)-1 else False
print('Input timesteps: {input_timesteps}'.format(input_timesteps = input_timesteps))
print('Input Dim: {input_dim}'.format(input_dim = input_dim))
print('Return Seq: {return_seq}'.format(return_seq=return_seq))
print('units: {units}'.format(units = self._hidden_dims[layer]))
if layer == 0 and self._stateful:
# different inputs to first layer due to stateful parameter
self._y_hat = LSTM_LAYER(
units = self._hidden_dims[layer],
activation = self._activation,
batch_input_shape = (self._seq_len, input_timesteps, input_dim),
bias_initializer = initializers.Constant(value=self._bias_initializer),
kernel_regularizer = self._weight_regularizer,
recurrent_regularizer = self._recurrent_regularizer,
kernel_constraint = self._weight_constraint,
return_sequences = return_seq,
stateful = self._stateful,
dropout=self._input_dropout_rate)(self._y_hat)
else:
# different inputs to first layer due to stateful parameter
self._y_hat = LSTM_LAYER(
units = self._hidden_dims[layer],
activation = self._activation,
input_shape = (input_timesteps, input_dim),
bias_initializer = initializers.Constant(value=self._bias_initializer),
kernel_regularizer = self._weight_regularizer,
recurrent_regularizer = self._recurrent_regularizer,
kernel_constraint = self._weight_constraint,
return_sequences = return_seq,
stateful = self._stateful,
dropout=self._dropout_rate)(self._y_hat)
self._y_hat = Dense(units=self._output_dim)(self._y_hat)
def _setPlaceholders(self):
# input and output placeholders
self._X = tf.placeholder(dtype=tf.float32, shape=(None,self._seq_len,self._input_dim), name='X')
self._y = tf.placeholder(dtype=tf.float32, shape=(None,self._input_dim), name='y')
def _generateDataset( self, x, y):
x_wins = list()
y_wins = list()
#for trial_idx in range(x.shape[0]):
for x_trial,y_trial in zip(x,y):
x_wins_trial, y_wins_trial = list(), list()
for sample_idx in range(x_trial.shape[0]-self._seq_len):
x_wins_trial.append(x_trial[sample_idx:sample_idx+self._seq_len,:])
y_wins_trial.append(y_trial[sample_idx+self._seq_len,:])
x_wins.append(np.asarray(x_wins_trial))
y_wins.append(np.asarray(y_wins_trial))
return x_wins, y_wins
def _setLoss(self):
self._mse_op = tf.cast(self._loss(self._y,self._y_hat),tf_float_prec)
self._loss_op = self._mse_op + tf.cast(tf.losses.get_regularization_loss(), tf_float_prec)
| mattweiss/public | deeplearning/networks/lstm_tf.py | lstm_tf.py | py | 11,298 | python | en | code | 0 | github-code | 36 |
3801917693 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from collections import namedtuple
import inspect
ControllableAttributeConfig = namedtuple("ControllableAttributeConfig", "driven_attribute ignored_attributes facemodel_param_name facemodel_param_value facemodel_param_value_other")
class ControllabilityMetricConfigs:
@staticmethod
def all_configs():
all_attributes = inspect.getmembers(ControllabilityMetricConfigs, lambda a: not inspect.isroutine(a))
configs = [x for x in all_attributes if not (x[0].startswith('__') and x[0].endswith('__'))]
return configs
black_hair_config = ControllableAttributeConfig(
driven_attribute = "Black_Hair",
ignored_attributes = ["Blond_Hair", "Brown_Hair", "Gray_Hair"],
facemodel_param_name = "head_hair_color",
facemodel_param_value = (0, 1, 0),
facemodel_param_value_other = (0, 0.1, 0.1)
)
blond_hair_config = ControllableAttributeConfig(
driven_attribute = "Blond_Hair",
ignored_attributes = ["Black_Hair", "Brown_Hair", "Gray_Hair"],
facemodel_param_name = "head_hair_color",
facemodel_param_value = (0, 0.1, 0.1),
facemodel_param_value_other = (0, 1, 0)
)
brown_hair_config = ControllableAttributeConfig(
driven_attribute = "Brown_Hair",
ignored_attributes = ["Blond_Hair", "Black_Hair", "Gray_Hair"],
facemodel_param_name = "head_hair_color",
facemodel_param_value = (0, 0.6, 0.5),
facemodel_param_value_other = (0, 0.1, 0.1)
)
gray_hair_config = ControllableAttributeConfig(
driven_attribute = "Gray_Hair",
ignored_attributes = ["Blond_Hair", "Brown_Hair", "Black_Hair"],
facemodel_param_name = "head_hair_color",
facemodel_param_value = (0.7, 0.7, 0),
facemodel_param_value_other = (0.0, 0.7, 0)
)
mouth_open_config = ControllableAttributeConfig(
driven_attribute = "Mouth_Slightly_Open",
ignored_attributes = ["Narrow_Eyes", "Smiling"],
facemodel_param_name = "blendshape_values",
facemodel_param_value = {"jaw_opening": 0.2},
facemodel_param_value_other = {"jaw_opening": -0.05}
)
smile_config = ControllableAttributeConfig(
driven_attribute = "Smiling",
ignored_attributes = ["Narrow_Eyes", "Mouth_Slightly_Open"],
facemodel_param_name = "blendshape_values",
facemodel_param_value = {"mouthSmileLeft": 1.0, "mouthSmileRight": 1.0},
facemodel_param_value_other = {"mouthFrownLeft": 1.0, "mouthFrownRight": 1.0}
)
squint_config = ControllableAttributeConfig(
driven_attribute = "Narrow_Eyes",
ignored_attributes = ["Smiling", "Mouth_Slightly_Open"],
facemodel_param_name = "blendshape_values",
facemodel_param_value = {"EyeBLinkLeft": 0.7, "EyeBLinkRight": 0.7},
facemodel_param_value_other = {"EyeWideLeft": 1.0, "EyeWideRight": 1.0}
)
mustache_config = ControllableAttributeConfig(
driven_attribute = "Mustache",
ignored_attributes = ["No_Beard", "Goatee", "Sideburns"],
facemodel_param_name = "beard_style_embedding",
# "beard_Wavy_f"
facemodel_param_value = [
0.8493434358437133,
3.087059026013613,
0.46986106722598997,
-1.3821969829871341,
-0.33103870587106415,
-0.03649891754263812,
0.049692808518749985,
0.10727920600451613,
-0.32365312847867017
],
# "beard_none"
facemodel_param_value_other = [
-1.1549744366277825,
-0.15234213575276162,
-0.3302730721199086,
-0.47053537289207514,
-0.158377484760156,
0.3357074575072504,
-0.44934623275285585,
0.013085621430078971,
-0.0021044358910661896
]
)
| microsoft/ConfigNet | confignet/metrics/controllability_metric_configs.py | controllability_metric_configs.py | py | 4,045 | python | en | code | 104 | github-code | 36 |
27596382692 | import matplotlib.pyplot as plot
import gradientDescent as gd
import loadData as data
import numpy as np
def plotConvergence(J_history):
plot.figure()
plot.plot(range(len(J_history)), J_history, 'bo')
plot.title(r'Convergence of J($\theta$)')
plot.xlabel('Number of iterations')
plot.ylabel(r'J($\theta$)')
def main():
theta, J_history = gd.gradientDescent(data.X, data.y, np.zeros((2,1)), 0.01, 1500)
plotConvergence(J_history)
plot.show()
# If this script is executed, then main() will be executed
if __name__ == '__main__':
main()
| mirfanmcs/Machine-Learning | Supervised Learning/Linear Regression/Linear Regression with One Variable/Python/plotConvergence.py | plotConvergence.py | py | 578 | python | en | code | 0 | github-code | 36 |
22636173430 | # scapy 패키지 import
from scapy.all import *
# Pcap 파일 읽기
def pcap_read(filiepath):
packets = rdpcap(filiepath)
data = dict()
detect = dict()
suspect = dict()
data['total_pks'] = len(packets)
try :
# filename 추출
for packet in packets:
filename = None
if packet.haslayer(Raw):
raw = packet[Raw].load
if b"filename=" in raw:
pos = raw.split(b"filename=")[1].split(b"&")[0]
filename = pos.split(b"\r")[0]
detect[filename] = ''
if b"MZ" in pos.split(b" GMT\r\n\r")[1]:
magic_number = pos.split(b" GMT\r\n\r")[1].split(b'\x90')[0]
detect[filename] = magic_number
else:
del detect[filename]
suspect[filename] = ''
except Exception as err:
print(err)
finally:
del detect[filename]
suspect[filename] = ''
print("finally")
print(f"detect: {detect}")
print(f"suspect: {suspect}")
data['detect'] = detect
data['suspect'] = suspect
return data
if __name__ == '__main__':
filiepath = 'exercise.pcap'
print(pcap_read(filiepath))
| Junghayeon/security_capstone_design | malware.py | malware.py | py | 1,317 | python | en | code | 0 | github-code | 36 |
38715938852 | #!/usr/bin/env python3
with open('input.txt', 'r') as f:
sizes = [int(a) for a in f.readline().strip().split(',')]
message = list(range(256))
skip = 0
loc = 0
for size in sizes:
end = loc + size
if end > len(message):
at_start = loc + size - len(message)
at_end = size - at_start
to_reverse = message[loc:] + message[:at_start]
rev_portion = list(reversed(to_reverse))
message = rev_portion[-at_start:] + message[at_start:loc] + rev_portion[:at_end]
else:
to_reverse = message[loc:loc + size]
message = message[:loc] + list(reversed(to_reverse)) + message[loc + size:]
loc = (loc + size + skip) % len(message)
skip += 1
print(message[0] * message[1])
| lvaughn/advent | 2017/10/hash.py | hash.py | py | 735 | python | en | code | 1 | github-code | 36 |
36121022223 | import os
from pathlib import Path
from forte.data.base_pack import PackType
from forte.evaluation.base import Evaluator
from forte.data.extractor.utils import bio_tagging
from ft.onto.base_ontology import Sentence, Token, EntityMention
def _post_edit(element):
if element[0] is None:
return "O"
return "%s-%s" % (element[1], element[0].ner_type)
def _get_tag(data, pack):
based_on = [pack.get_entry(x) for x in data["Token"]["tid"]]
entry = [pack.get_entry(x) for x in data["EntityMention"]["tid"]]
tag = bio_tagging(based_on, entry)
tag = [_post_edit(x) for x in tag]
return tag
def _write_tokens_to_file(
pred_pack, pred_request, refer_pack, refer_request, output_filename
):
opened_file = open(output_filename, "w+")
for pred_data, refer_data in zip(
pred_pack.get_data(**pred_request), refer_pack.get_data(**refer_request)
):
pred_tag = _get_tag(pred_data, pred_pack)
refer_tag = _get_tag(refer_data, refer_pack)
words = refer_data["Token"]["text"]
pos = refer_data["Token"]["pos"]
chunk = refer_data["Token"]["chunk"]
for i, (word, position, chun, tgt, pred) in enumerate(
zip(words, pos, chunk, refer_tag, pred_tag), 1
):
opened_file.write(
"%d %s %s %s %s %s\n" % (i, word, position, chun, tgt, pred)
)
opened_file.write("\n")
opened_file.close()
class CoNLLNEREvaluator(Evaluator):
"""Evaluator for Conll NER task."""
def __init__(self):
super().__init__()
# self.test_component = CoNLLNERPredictor().name
self.output_file = "tmp_eval.txt"
self.score_file = "tmp_eval.score"
self.scores = {}
def consume_next(self, pred_pack: PackType, ref_pack: PackType):
pred_getdata_args = {
"context_type": Sentence,
"request": {
Token: {"fields": ["chunk", "pos"]},
EntityMention: {
"fields": ["ner_type"],
},
Sentence: [], # span by default
},
}
refer_getdata_args = {
"context_type": Sentence,
"request": {
Token: {"fields": ["chunk", "pos", "ner"]},
EntityMention: {
"fields": ["ner_type"],
},
Sentence: [], # span by default
},
}
_write_tokens_to_file(
pred_pack=pred_pack,
pred_request=pred_getdata_args,
refer_pack=ref_pack,
refer_request=refer_getdata_args,
output_filename=self.output_file,
)
eval_script = (
Path(os.path.abspath(__file__)).parents[2]
/ "forte/utils/eval_scripts/conll03eval.v2"
)
os.system(
f"perl {eval_script} < {self.output_file} > " f"{self.score_file}"
)
with open(self.score_file, "r") as fin:
fin.readline()
line = fin.readline()
fields = line.split(";")
acc = float(fields[0].split(":")[1].strip()[:-1])
precision = float(fields[1].split(":")[1].strip()[:-1])
recall = float(fields[2].split(":")[1].strip()[:-1])
f_1 = float(fields[3].split(":")[1].strip())
self.scores = {
"accuracy": acc,
"precision": precision,
"recall": recall,
"f1": f_1,
}
def get_result(self):
return self.scores
| asyml/forte | examples/tagging/evaluator.py | evaluator.py | py | 3,552 | python | en | code | 230 | github-code | 36 |
17684698502 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import biplist
import os.path
application = defines.get('app', '../dist/AYAB-Launcher.app')
appname = os.path.basename(application)
format = defines.get('format', 'UDBZ')
size = defines.get('size', None)
files = [ application ]
symlinks = { 'Applications': '/Applications' }
icon_locations = {
appname: (140, 90),
'Applications': (500, 90)
}
background = 'builtin-arrow'
#background = None
show_status_bar = False
show_tab_view = False
show_toolbar = False
show_pathbar = False
show_sidebar = False
sidebar_width = 180
window_rect = ((100, 100), (640, 280))
default_view = 'icon-view'
show_icon_preview = False
arrange_by = None
grid_offset = (0, 0)
grid_spacing = 100
scroll_position = (0, 0)
label_pos = 'bottom' # or 'right'
text_size = 16
icon_size = 128
| Adrienne200/ayab-desktop | mac-build/dmg_settings.py | dmg_settings.py | py | 845 | python | en | code | null | github-code | 36 |
27438339817 | #!/usr/bin/env python
#coding:utf-8
from math import *
from string import *
from fractions import *
from itertools import *
def sieve(N):
primes = set()
for i in range(2,N):
primes.add(i)
for i in range(2,ceil(sqrt(N))):
if i in primes:
for j in range(i*i,N,i):
primes.discard(j)
return primes
def main():
primes = sieve(10000)
s = set()
for i in range(1,100):
for j in primes:
s.add(j+2*i**2)
for i in range(1,10000,2):
if i not in s and i not in primes:
print(i)
if __name__ == "__main__":
main()
| tomoki/project-euler | 46/main.py | main.py | py | 627 | python | en | code | 0 | github-code | 36 |
1021808135 | import cv2
import numpy as np
import os
import pyrealsense2 as rs
# Ім'я каталогу, в якому будуть зберігатися зображення
save_dir = "path/to/save/directory"
# Ініціалізація камери
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
profile = pipeline.start(config)
# Ініціалізація змінних
saved_pairs = 0
try:
while True:
# Отримання кадру з камери
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
depth_frame = frames.get_depth_frame()
# Перетворення кадру у зображення
color_image = np.asanyarray(color_frame.get_data())
depth_image = np.asanyarray(depth_frame.get_data())
# Відображення зображення
cv2.imshow("Color Image", color_image)
cv2.imshow("Depth Image", depth_image)
# Зчитування клавіші
key = cv2.waitKey(1)
# Збереження зображень при натисканні на клавішу 'Space'
if key == ord(" "):
filename_color = os.path.join(save_dir, f"{saved_pairs}_RGB.jpg")
filename_depth = os.path.join(save_dir, f"{saved_pairs}_D.jpg")
cv2.imwrite(filename_color, color_image)
cv2.imwrite(filename_depth, depth_image)
saved_pairs += 1
# Закриття вікон при натисканні клавіші 'ESC'
if key == key == 27:
break
finally:
# Зупинка камери та закриття вікна
pipeline.stop()
cv2.destroyAllWindows()
| Roman212Koval/Dual-channel_CNN | make_dataset.py | make_dataset.py | py | 1,838 | python | uk | code | 1 | github-code | 36 |
21172342682 | from __future__ import absolute_import
import json
from keshihua.celery import app
from datetime import datetime
from demo.pachong import new_job
from demo.graph import avg_salary,lan_fre,bar_job,job_cate,jieba_count
# 数据更新
@app.task
def start_get_data():
print('正在获取并更新数据...')
count=new_job()
print('处理专业薪资图中...')
avg_salary()
print('处理语言使用情况图中...')
lan_fre()
print('处理技术块状图中...')
bar_job()
print('处理岗位饼图..')
job_cate()
jc = jieba_count()
nowtime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open('demo/data/UpdateTime.json', 'w', encoding='utf-8') as f:
f.write(json.dumps({
'code': 200,
'数据获取时间': nowtime,
'数据量':count,
'词云数据量':jc
}, ensure_ascii=False, indent=4))
print('获取完毕数据已更新!')
print('更新时间:' + nowtime)
with app.connection() as conn:
conn.default_channel.queue_purge(queue='celery')
| junhqin/SH-Internship-KSH | CeleryTask/task.py | task.py | py | 1,085 | python | en | code | 1 | github-code | 36 |
37215946531 | from django.core.management.base import BaseCommand
from academ.models import Apartment, Building, ImageGallery
import json
import os
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
def check_apartment(building):
"""
Проверка на наличие квартир в базе.
Если квартиры имеются - возвращаем 1, иначе 0
@param building: дом
@return: 0, 1
"""
try:
apartments = Apartment.objects.filter(building=building).first()
if apartments:
print(Apartment.objects.filter(building=building).first())
return 1
else:
print('No apartments found...')
except Apartment.DoesNotExist:
print('No apartments found...')
return 0
def last_floor_apartments_generator(apartment_data, building):
apartment_instance = Apartment.objects.filter(number=apartment_data['number'],)
if apartment_instance:
print(f"Apartment with number: '{apartment_data['number']}' already exists!")
pass
else:
print(f"Generating apartment with number: '{apartment_data['number']}'...")
apartment = Apartment(building=building,
floor=14,
area=apartment_data['area'],
number=apartment_data['number'],
section=apartment_data['section'],
type=apartment_data['apartment_type'])
apartment.save()
def apartment_generator(params, building):
"""
Функция, генерирующая квартиры и записывающая их в базу данных на основе набора параметров.
@param params: набор параметров
@param building: дом
"""
number = params['number']
if params['section'] != 3:
start = 4
else:
start = 2
for i in range(start, building.floors):
if i != start:
number += params['delta']
apartment_instance = Apartment.objects.filter(number=number)
if apartment_instance:
print(f"Apartment with number: '{number}' already exists!")
pass
else:
print(f"Generating apartment with number: '{number}'...")
apartment = Apartment(building=building, floor=i, area=params['area'],
number=number, section=params['section'], type=params['apartment_type'])
apartment.save()
def gallery_generator(building):
gallery_instance = ImageGallery.objects.filter(building=building).first()
if gallery_instance:
print(ImageGallery.objects.filter(building=building).first(), ' already exist!')
return
else:
filepath = f'{settings.MEDIA_ROOT}/pictures/building.jpg'
uploaded_image = SimpleUploadedFile(content=open(filepath, 'rb').read(),
content_type="image/jpg",
name='image.jpg')
instance = ImageGallery(building=building, image=uploaded_image)
instance.save()
class Command(BaseCommand):
def handle(self, *args, **options):
address = 'Academ'
try:
building = Building.objects.get(address=address)
except Building.DoesNotExist:
building = Building(address=address, floors=14, sections=3)
building.save()
# if check_apartment(building):
# return
filepath = os.path.abspath(os.path.dirname(__file__))
json_data = open(f'{filepath}/apartment_data.json')
json_data_last_floor = open(f'{filepath}/last_floor_data.json')
params_list = json.load(json_data)
params_list_last_floor = json.load(json_data_last_floor)
print('\nGenerating apartments up to 14th floor...\n')
for element in params_list:
apartment_generator(element, building)
print('\nGenerating 14th floor apartments...\n')
for element_last_floor in params_list_last_floor:
last_floor_apartments_generator(element_last_floor, building)
print('\nGenerating gallery instances...\n')
gallery_generator(building)
| pepegaFace/freedom | freedom/academ/management/commands/content_generator.py | content_generator.py | py | 4,319 | python | en | code | 1 | github-code | 36 |
21167613561 | from application import app
from flask import render_template
from application.models import *
from flask_restplus import Api, Resource, fields
from flask.ext.restplus.reqparse import RequestParser
from flask.ext.restplus.inputs import date
api = Api(app, version='1.0', title='ElesVotam API')
ns = api.namespace('elesvotam', description='ElesVotam operations')
votacao_parser = RequestParser()
votacao_parser.add_argument('votacaoid', type=int)
votacao_fields = {'votacaoid': fields.Integer(),
'sessao_id': fields.Integer(),
'tipo': fields.String(),
'materia': fields.String(),
'ementa': fields.String(),
'resultado': fields.String(),
'presentes': fields.String(),
'sim': fields.Integer(),
'nao': fields.Integer(),
'abstencao': fields.Integer(),
'branco': fields.Integer(),
'notas_rodape': fields.String(),
}
votacao_model = api.model('Votacao', votacao_fields)
@ns.route('/votacao')
class ElesVotamVotacaosApi(Resource):
@api.doc(parser=votacao_parser)
@api.marshal_with(votacao_model)
def get(self):
args = votacao_parser.parse_args()
votacaoid = args['votacaoid']
votacao = db.session.query(Votacao).filter(Votacao.votacaoid == votacaoid).one()
return votacao
sessao_parser = RequestParser()
sessao_parser.add_argument('sessaoid', type=int)
sessao_parser.add_argument('data', type=date)
sessao_fields = {'id': fields.Integer(),
'nome': fields.String(),
'data': fields.Date(),
'votacoes': fields.Nested(votacao_model)
}
sessao_model = api.model('sessao', sessao_fields)
@ns.route('/sessao')
class ElesVotamSessaosApi(Resource):
@api.doc(parser=sessao_parser)
@api.marshal_with(sessao_model)
def get(self):
args = sessao_parser.parse_args()
sessaoid = args['sessaoid']
sessao_date = args['data']
if not sessao_date:
sessao = db.session.query(Sessao).filter(Sessao.id == sessaoid).one()
votacoes = db.session.query(Votacao).filter(Votacao.sessao_id == sessao.id).all()
sessao.votacoes = votacoes
else:
sessao_date = sessao_date.strftime('%Y-%m-%d')
sessao = db.session.query(Sessao).filter(Sessao.data == sessao_date).all()
for i,s in enumerate(sessao):
votacoes = db.session.query(Votacao).filter(Votacao.sessao_id == s.id).all()
sessao[i].votacoes = votacoes
return sessao
partido_fields = {'id': fields.Integer(),
'nome': fields.String(),
}
partido_model = api.model('partido', partido_fields)
@ns.route('/partidos')
class ElesVotamPartidosApi(Resource):
@api.marshal_with(partido_model)
def get(self):
partidos = db.session.query(Partido).all()
return partidos
partido_parser = RequestParser()
partido_parser.add_argument('nome', type=str)
vereador_fields = {'id': fields.Integer(),
'nome': fields.String(),
'idparlamentar': fields.String()
}
vereador_model = api.model('vereador', vereador_fields)
@ns.route('/partidoVereadores')
class ElesVotamPartidoVereadoresApi(Resource):
@api.doc(parser=partido_parser)
@api.marshal_with(vereador_model)
def get(self):
args = partido_parser.parse_args()
partido_nome = args['nome']
partido = db.session.query(Partido).filter(Partido.nome == partido_nome).one()
vereadores = db.session.query(Vereador).filter(Vereador.partido_id == partido.id).all()
return vereadores
votacao_votos_parser = RequestParser()
votacao_votos_parser.add_argument('votacao_id', type=int)
voto_fields = {'id': fields.Integer(),
'vereador': fields.Nested(vereador_model),
'valor': fields.String()
}
voto_model = api.model('voto', voto_fields)
@ns.route('/votacaoVotos')
class ElesVotamVotacaoVotosApi(Resource):
@api.doc(parser=votacao_votos_parser)
@api.marshal_with(voto_model)
def get(self):
args = votacao_votos_parser.parse_args()
votacao_id = args['votacao_id']
votacao = db.session.query(Votacao).filter(Votacao.votacaoid == votacao_id).one()
votos = db.session.query(Voto).filter(Voto.votacao_id == votacao.id).all()
return votos
@app.route('/')
@app.route('/index/')
def index():
return render_template('info/index.html', title='Flask-Bootstrap')
@app.route('/hello/<username>/')
def hello_username(username):
return render_template('info/hello.html', title="Flask-Bootstrap, Hi %s"
% (username), username=username)
| okfn-brasil/elesvotam | application/manager.py | manager.py | py | 4,853 | python | pt | code | 0 | github-code | 36 |
74050386664 | import parlai.core.build_data as build_data
import os
import subprocess
import shutil
import csv
import time
from parlai.core.build_data import DownloadableFile
from parlai.utils.io import PathManager
RESOURCES = [
DownloadableFile(
'https://github.com/deepmind/narrativeqa/archive/master.zip',
'narrative_qa.zip',
'd9fc92d5f53409f845ba44780e6689676d879c739589861b4805064513d1476b',
)
]
def get_rows_for_set(reader, req_set):
selected_rows = [row for row in reader if row['set'].strip() == req_set]
return selected_rows
def read_csv_to_dict_list(filepath):
f = open(filepath, 'r')
return csv.DictReader(f, delimiter=','), f
def write_dict_list_to_csv(dict_list, filepath):
keys = list(dict_list[0].keys())
with PathManager.open(filepath, 'w') as f:
writer = csv.DictWriter(f, fieldnames=keys)
writer.writeheader()
for row in dict_list:
writer.writerow(row)
def divide_csv_into_sets(csv_filepath, sets=('train', 'valid', 'test')):
reader, fh = read_csv_to_dict_list(csv_filepath)
base_filename = os.path.basename(csv_filepath).split('.')[0]
base_path = os.path.dirname(csv_filepath)
for s in sets:
path = os.path.join(base_path, base_filename + '_' + s + '.csv')
fh.seek(0)
rows = get_rows_for_set(reader, s)
write_dict_list_to_csv(rows, path)
fh.close()
def make_folders(base_path, sets=('train', 'valid', 'test')):
for s in sets:
path = os.path.join(base_path, s)
if not os.path.exists(path):
os.mkdir(path)
def move_files(base_path, sets=('train', 'valid', 'test')):
source = os.listdir(base_path)
for f in source:
for s in sets:
if f.endswith('_' + s + '.csv'):
final_name = f[: -(len('_' + s + '.csv'))] + '.csv'
f = os.path.join(base_path, f)
shutil.move(f, os.path.join(base_path, s, final_name))
# Returns false unless the story was already downloaded and
# has appropriate size
def try_downloading(directory, row):
document_id, kind, story_url = row['document_id'], row['kind'], row['story_url']
story_path = os.path.join(directory, document_id + '.content')
actual_story_size = 0
if os.path.exists(story_path):
with PathManager.open(story_path, 'rb') as f:
actual_story_size = len(f.read())
if actual_story_size <= 19000:
if kind == 'gutenberg':
time.sleep(2)
build_data.download(story_url, directory, document_id + '.content')
else:
return True
file_type = subprocess.check_output(['file', '-b', story_path])
file_type = file_type.decode('utf-8')
if 'gzip compressed' in file_type:
gz_path = os.path.join(directory, document_id + '.content.gz')
shutil.move(story_path, gz_path)
build_data.untar(gz_path)
return False
def download_stories(path):
documents_csv = os.path.join(path, 'documents.csv')
tmp_dir = os.path.join(path, 'tmp')
build_data.make_dir(tmp_dir)
with PathManager.open(documents_csv, 'r') as f:
reader = csv.DictReader(f, delimiter=',')
for row in reader:
print("Downloading %s (%s)" % (row['wiki_title'], row['document_id']))
finished = try_downloading(tmp_dir, row)
count = 0
while not finished and count < 5:
if count != 0:
print("Retrying (%d retries left)" % (5 - count - 1))
finished = try_downloading(tmp_dir, row)
count += 1
def build(opt):
dpath = os.path.join(opt['datapath'], 'NarrativeQA')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# an older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
print('downloading stories now')
base_path = os.path.join(dpath, 'narrativeqa-master')
download_stories(base_path)
# move from tmp to stories
tmp_stories_path = os.path.join(base_path, 'tmp')
new_stories_path = os.path.join(base_path, 'stories')
shutil.move(tmp_stories_path, new_stories_path)
# divide into train, valid and test for summaries
summaries_csv_path = os.path.join(
base_path, 'third_party', 'wikipedia', 'summaries.csv'
)
new_path = os.path.join(base_path, 'summaries.csv')
shutil.move(summaries_csv_path, new_path)
divide_csv_into_sets(new_path)
# divide into sets for questions
questions_path = os.path.join(base_path, 'qaps.csv')
divide_csv_into_sets(questions_path)
# divide into sets for documents
documents_path = os.path.join(base_path, 'documents.csv')
divide_csv_into_sets(documents_path)
# move specific set's files into their set's folder
make_folders(base_path)
move_files(base_path)
# move narrativeqa-master to narrative_qa
new_path = os.path.join(dpath, 'narrative_qa')
shutil.move(base_path, new_path)
# mark the data as built
build_data.mark_done(dpath, version_string=version)
| facebookresearch/ParlAI | parlai/tasks/narrative_qa/build.py | build.py | py | 5,467 | python | en | code | 10,365 | github-code | 36 |
39610379992 | # Realizar un programa que permita ingresar el número de mes de un año (1,…,12), en base al
# valor ingresado presenta el número de días que tiene ese mes.
def main():
titulo_1 = "cantidad de días según el mes"
titulo_2 = titulo_1.upper()
print(titulo_2)
mes = int(input("Ingrese el número del mes: "))
if mes in [1,3,5,7,8,10,12]:
dias = 31
print("El mes", mes, "tiene", dias, "dias.")
elif mes == 2:
dias = 28
print("El mes", mes, "tiene", dias, "dias.")
elif mes in [4,6,9,11]:
dias = 30
print("El mes", mes, "tiene", dias, "dias.")
else:
print("El valor digitado no corresponde a ningun mes.")
main()
| jeanchuqui/fp-utpl-18-evaluaciones | eval-parcial-primer-bimestre/Ejercicio6.py | Ejercicio6.py | py | 706 | python | es | code | 0 | github-code | 36 |
13617657820 | import unittest
from datetime import datetime
from pathlib import Path
from we1s_chomp import db, model
class TestModel(unittest.TestCase):
def setUp(self):
self.dirpath = Path("test/data")
def test_source(self):
# Create source.
source = model.Source(
name="we1s",
webpage="http://we1s.ucsb.edu",
tags=["hello"],
country="US",
language="en-US",
copyright="(C) 2017-2019 UCSB and the WE1S Project",
)
self.assertIsInstance(source, model.Source)
self.assertEqual(source.name, "we1s")
# Save to disk.
db.save_manifest_file(source, self.dirpath)
self.assertTrue((self.dirpath / f"{source.name}.json").exists())
# Load from disk.
source2 = db.load_manifest_file("we1s", self.dirpath)
self.assertDictEqual(vars(source), vars(source2))
def test_query(self):
# Create source.
query = model.Query(
source_name="we1s",
query_str="humanities",
start_date=datetime(year=2000, month=1, day=1),
end_date=datetime(year=2019, month=12, day=31),
)
self.assertIsInstance(query, model.Query)
self.assertEqual(query.name, "we1s_humanities_2000-01-01_2019-12-31")
# Save to disk.
db.save_manifest_file(query, self.dirpath)
self.assertTrue((self.dirpath / f"{query.name}.json").exists())
# Load from disk.
query2 = db.load_manifest_file(
"we1s_humanities_2000-01-01_2019-12-31", self.dirpath
)
self.assertDictEqual(vars(query), vars(query2))
def test_response(self):
# Create response.
response = model.Response(
name="chomp-response_we1s_humanities_2000-01-01_2019-12-31_0",
url="http://we1s.ucsb.edu",
content="12345 Hello!",
api_data_provider="wordpress",
source_name="we1s",
query_name="we1s_humanities_2000-01-01_2019-12-31",
)
self.assertIsInstance(response, model.Response)
self.assertEqual(
response.name, "chomp-response_we1s_humanities_2000-01-01_2019-12-31_0"
)
# Save to disk.
db.save_manifest_file(response, self.dirpath)
self.assertTrue((self.dirpath / f"{response.name}.json").exists())
# Load from disk.
response2 = db.load_manifest_file(
"chomp-response_we1s_humanities_2000-01-01_2019-12-31_0", self.dirpath
)
self.assertDictEqual(vars(response), vars(response2))
def test_article(self):
# Create article.
article = model.Article(
name="chomp_we1s_humanities_2000-01-01_2019-12-31_0",
url="http://we1s.ucsb.edu",
title="WhatEvery1Says Article",
pub="WhatEvery1Says",
pub_date=datetime(year=2019, month=12, day=31),
content_html="<h1>Hello!</h1>",
copyright="(C) 2017-2019 UCSB and the WE1S Project",
api_data_provider="wordpress",
)
self.assertIsInstance(article, model.Article)
self.assertEqual(article.name, "chomp_we1s_humanities_2000-01-01_2019-12-31_0")
# Save to disk.
db.save_manifest_file(article, self.dirpath)
self.assertTrue((self.dirpath / f"{article.name}.json").exists())
# Load from disk.
article2 = db.load_manifest_file(
"chomp_we1s_humanities_2000-01-01_2019-12-31_0", self.dirpath
)
self.assertDictEqual(vars(article), vars(article2))
| seangilleran/we1s_chomp | test/test_model.py | test_model.py | py | 3,618 | python | en | code | 1 | github-code | 36 |
73037188583 | # example URL(postgres): postgresql://username:password@host:port/database_name (postgresql://postgres:postgres@localhost:5432/mydatabase)
import pandas as pd
import json
import sqlalchemy as sql
from sqlalchemy import create_engine
from sqlalchemy_utils import create_database, database_exists
class DatabaseHandler:
def __init__(self):
"""
Initializes a new instance of the DatabaseHandler class.
This class is designed to handle database operations, such as
creating databases, connecting to them, inserting data, and executing queries.
"""
self.engine = None
print("Initialized class")
def create_db(self, url):
"""
Creates a new database if it doesn't already exist at the provided URL.
Args:
url (str): The database URL.
"""
self.engine = create_engine(url)
if not database_exists(self.engine.url):
create_database(self.engine.url)
print("Database created")
else:
print("Database already exists")
def connect_db(self, url):
"""
Connects to an existing database at the provided URL.
Args:
url (str): The database URL.
"""
self.engine = create_engine(url)
if database_exists(self.engine.url):
self.engine.connect(url)
print("Database connected")
else:
print("Database doesn't exist")
def insert_data(self, data, tablename):
"""
Inserts data from a JSON or CSV file into a specified table in the database.
Args:
data (str): The path to the data file (JSON or CSV).
tablename (str): The name of the table to insert the data into.
Raises:
ValueError: If the file format is not supported.
"""
if isinstance(data, pd.DataFrame):
df = data
elif data.endswith(".json"):
with open(data, 'r') as f:
file = json.load(f)
df = pd.DataFrame(file)
elif data.endswith(".csv"):
df = pd.read_csv(data)
else:
raise ValueError("Unsupported file format")
df.to_sql(tablename, con=self.engine, if_exists='replace', index=False)
print("Data entered correctly")
def executor(self, query):
"""
Executes a SQL query on the connected database.
Args:
query (str): The SQL query to execute.
Returns:
list: A list of rows as query results.
"""
try:
result = self.engine.execute(query)
rows = [row for row in result]
return rows
except Exception as e:
print(f"Error: '{e}'")
def close_connection(self):
"""
Closes the connection to the database.
"""
self.engine.dispose()
print("Database connection closed")
# usage example
if __name__ == '__main__':
test=DatabaseHandler()
test.connect_db("postgresql://postgres:postgres@localhost:5432/team_project2")
df = pd.read_csv("clean_data/gpclean.csv")
test.insert_data(df, "gpclean3")
#print(test.executor("SELECT * FROM public.revclean LIMIT 3"))
| Christian125px/team_project2 | src/database_handler.py | database_handler.py | py | 3,270 | python | en | code | 0 | github-code | 36 |
42939386801 | import os.path as osp
import numpy as np
import torch
import torch.utils.data as Data
from PIL import Image
__all__ = ['CUB_200_2011']
class CUB_200_2011(Data.Dataset):
def __init__(self, root_dir, phase='train', transform=None):
super(CUB_200_2011, self).__init__()
assert phase in ('train', 'val')
self.root_dir = root_dir
self.file = osp.join(root_dir, phase + '_' + 'classes.txt')
with open(self.file) as file:
self.interst_classes = [line.strip().split(' ')[0] for line in file.readlines()]
self.classIdx2Idx = {class_idx: idx for idx, class_idx in enumerate(self.interst_classes)}
self.Idx2classIdx = {idx: class_idx for idx, class_idx in enumerate(self.interst_classes)}
self.transform = transform
self.images_path = osp.join(root_dir, 'images')
id_class_file = osp.join(root_dir, 'image_class_labels.txt')
id_images_file = osp.join(root_dir, 'images.txt')
attributes_file = osp.join(root_dir, 'attributes', 'class_attribute_labels_continuous.txt')
id_images = self._read_file(id_images_file)
self.set = list()
with open(id_class_file) as file:
for line in file.readlines():
infos = line.strip().split(' ')
if infos[1] in self.interst_classes:
self.set.append((id_images[infos[0]], self.classIdx2Idx[infos[1]]))
# Normalize attributes
source_norm_attris, target_norm_attris = self._normalize_attris(attributes_file, True)
if phase == 'train':
self.attributes = torch.FloatTensor(source_norm_attris)
else:
self.attributes = torch.FloatTensor(target_norm_attris)
def _read_file(self, read_file):
dct = dict()
with open(read_file) as file:
for line in file.readlines():
infos = line.strip().split(' ')
dct[infos[0]] = infos[1]
return dct
def _normalize_attris(self, attributes_file, mean_correction=True):
source_file = osp.join(self.root_dir, 'train_classes.txt')
target_file = osp.join(self.root_dir, 'val_classes.txt')
source_idx = [(int(line.strip().split(' ')[0]) - 1) for line in open(source_file)]
target_idx = [(int(line.strip().split(' ')[0]) - 1) for line in open(target_file)]
codes = np.loadtxt(attributes_file).astype(float)
if codes.max() > 1:
codes /= 100.
code_mean = codes[source_idx, :].mean(axis=0)
for s in range(codes.shape[1]):
codes[codes[:, s] < 0, s] = code_mean[s] if mean_correction else 0.5
# Mean correction
if mean_correction:
for s in range(codes.shape[1]):
codes[:, s] = codes[:, s] - code_mean[s] + 0.5
return codes[source_idx], codes[target_idx]
@property
def get_class_attributes(self):
return self.attributes
def __len__(self):
return len(self.set)
def __getitem__(self, index):
image_file = osp.join(self.images_path, self.set[index][0])
image_label = int(self.set[index][1])
image = Image.open(image_file).convert('RGB')
if self.transform:
image = self.transform(image)
return image, image_label
| SmallHedgehog/ZeroShotLearning | dataset/CUB_200_2011.py | CUB_200_2011.py | py | 3,311 | python | en | code | 2 | github-code | 36 |
17561628211 | def calculaFatorial(n):
nFat = 1
while n > 0:
nFat = nFat * n
n -= 1
return nFat
def informaNumero():
n = 0
while n >= 0:
n = int(input("informe um numero inteiro: "))
if n < 0:
print("Digite um valor positivo.\n"
"Tchau.")
break
auxN = calculaFatorial(n)
print(auxN)
informaNumero()
| wpaulow/coursera-python-1 | funcao-printFatorial.py | funcao-printFatorial.py | py | 399 | python | pt | code | 0 | github-code | 36 |
43735554270 | # Chapter V problem VI by Vincenzo Scotto Di Uccio
def main():
f_g = input("Enter the total amount of fat grams: ")
c_g = input("Enter the total amount of carbohydrate grams: ")
f_g = input_valid(f_g)
c_g =input_valid(c_g)
calculate(f_g,c_g)
def calculate(fat,carb):
cal_fat = fat * 9
cal_carb = carb *4
print("Calories from fat is: ", cal_fat)
print ("Calories from carbohydrates: ", cal_carb)
def input_valid(a_str):
value_s = 0
error_s = 0
make_f = 0
while value_s == 0:
error_s = 0
for aChar in a_str:
if (aChar >= '0' and aChar <= '9'):
error_s = 0
elif aChar == '.':
make_f = 1
else:
error_s = 1
break
if error_s > 0:
a_str = input("Error, invalid data entered, please try again. Enter the fats than the carbohydrates :")
error_s = 0
else:
value_s = 1
if make_f == 1:
return float(a_str)
else:
return int(a_str)
main()
| vincenzo-scotto001/Python | chp5 problems/sco.chp5prob6.py | sco.chp5prob6.py | py | 1,203 | python | en | code | 0 | github-code | 36 |
5780964210 | from pygame import Surface, image, mixer
import os
from engine.common.validated import ValidatedDict
from engine.common.constants import LogConstants
from engine.common.logger import LogManager
class AssetManager:
'''
Asset loaders, renderers, transformers and more!
'''
asset_prefix = "./engine/assets"
def __init__(self, config: ValidatedDict, logger: LogManager) -> None:
self.logger = logger
self.config = config
def loadImage(self, asset_name: str) -> Surface:
'''
Load an image in Texture form.
Given:
- asset_name: name of the asset, including extension.
Returns: Asset as a texture.
'''
asset_path = f"{self.asset_prefix}/images/{asset_name}"
if os.path.exists(asset_path):
self.logger.writeLogEntry(f'Loading asset: {asset_name}', status=LogConstants.STATUS_OK_BLUE, tool="ASSET_MGR")
return image.load(asset_path)
else:
self.logger.writeLogEntry(f'Couldn\'t find {asset_name}!', status=LogConstants.STATUS_FAIL, tool="ASSET_MGR")
def playSfx(self, asset_name: str) -> Surface:
'''
Load a sound in sound form.
Given:
- asset_name: name of the asset, including extension.
Returns: Nothing.
'''
asset_path = f"{self.asset_prefix}/sfx/{asset_name}"
sound_settings = self.config.get_dict('sound')
if sound_settings == None:
raise Exception("Sound settings in JSON are missing!")
if os.path.exists(asset_path):
self.logger.writeLogEntry(f'Loading asset: {asset_name}', status=LogConstants.STATUS_OK_BLUE, tool="ASSET_MGR")
sound = mixer.Sound(asset_path)
sound.set_volume(sound_settings.get('sfx_volume', 1.0)-0.4)
sound.play()
else:
self.logger.writeLogEntry(f'Couldn\'t find {asset_name}!', status=LogConstants.STATUS_FAIL, tool="ASSET_MGR") | Based-Games/BasedEngine | engine/common/asset.py | asset.py | py | 2,000 | python | en | code | 0 | github-code | 36 |
19139046842 | import sys
import pathlib
# base.pyのあるディレクトリの絶対パスを取得
current_dir = pathlib.Path(__file__).resolve().parent
# モジュールのあるパスを追加
sys.path.append(str(current_dir) + "/../")
import pandas as pd
import numpy as np
from mylib.address import Address
from mylib.mypandas import MyPandas as mp
import pandas as pd
import time
def main(input_csv, output_csv):
print("### start main ###")
df = pd.read_csv(input_csv)
rows = df.to_dict(orient="records")
for index, row in enumerate(rows):
if row["prefecture"] is np.nan:
print(row)
prefecture, city = Address.address_to_prefecture_and_city(row["place"])
rows[index]["prefecture"] = prefecture
print("## NEW ##", row)
mp.to_csv(rows, output_csv)
print("### end main ###")
if __name__ == "__main__":
start = time.time()
if len(sys.argv) == 3:
input_csv = sys.argv[1]
output_csv = sys.argv[2]
main(input_csv, output_csv)
else:
print("Not Filename")
exit()
end = time.time()
print(end - start, "s")
| sunajpdev/estates_appsheet | tool/estate_csv_setting.py | estate_csv_setting.py | py | 1,145 | python | en | code | 0 | github-code | 36 |
1123611190 | #!/usr/bin/python3
if __name__ == "__main__":
"""Print the addition of all arguments."""
import sys
summation = 0
for fig in range(len(sys.argv) - 1):
summation += int(sys.argv[fig + 1])
print("{}".format(summation))
| Fran6ixneymar/alx-higher_level_programming | 0x02-python-import_modules/3-infinite_add.py | 3-infinite_add.py | py | 247 | python | en | code | 0 | github-code | 36 |
22861406972 | # ---------- PROBLEM ----------
# Create a random list filled with the characters H and T
# for heads and tails. Output the number of Hs and Ts
# Example Output
# Heads : 46
# Tails : 54
# Create the list
import random
flipList = []
# Populate the list with 100 Hs and Ts
# Trick : random.choice() returns a random value from the list
for i in range(1, 101):
flipList += random.choice(['H', 'T'])
# Output results
print("Heads : ", flipList.count('H'))
print("Tails : ", flipList.count('T')) | chriskok/PythonLearningWorkspace | fliplist.py | fliplist.py | py | 501 | python | en | code | 1 | github-code | 36 |
37683812851 | from django.conf.urls import url, include
from .views import TopicDetailView, QuestionDetailView, TopicListView
urlpatterns = [
url(r'^$', TopicListView.as_view(), name='topic-list'),
url(r'^(?P<pk>\d+)/', include([
url('^$', TopicDetailView.as_view(), name='topic-detail'),
url(r'^question-(?P<number>\d+)/$', QuestionDetailView.as_view(), name='question-detail')
]))
]
| unixander/TestsApp | apps/questions/urls.py | urls.py | py | 401 | python | en | code | 0 | github-code | 36 |
14416867511 |
import sys
sys.path.append("..\\..\\public")
import dunhe_public
import auto
##bat的命令参数
##python auto_ risk_data_manage
if __name__ == '__main__':
if len(sys.argv) > 1:
logger = dunhe_public.SetLog("auto_start")
auto_instance = auto.AutoStartExe(logger)
auto_instance.load_config(sys.argv[1])
auto_instance.run_config()
| matthew59gs/Projects | python/other/autoit/main.py | main.py | py | 368 | python | ja | code | 0 | github-code | 36 |
13989773677 | # -*- coding: utf-8 -*-
import time
import numpy
from ntplib import NTPClient
from .base import Utils, MultiTasks
from .task import IntervalTask
from .future import ThreadPool
from hagworm.extend.error import NTPCalibrateError
from hagworm.extend.interface import TaskInterface
class _Interface(TaskInterface):
"""NTP客户端接口定义
"""
def start(self):
raise NotImplementedError()
def stop(self):
raise NotImplementedError()
def is_running(self):
raise NotImplementedError()
def calibrate_offset(self):
raise NotImplementedError()
@property
def offset(self):
raise NotImplementedError()
@property
def timestamp(self):
raise NotImplementedError()
class AsyncNTPClient(_Interface):
"""异步NTP客户端类
"""
@classmethod
async def create(cls, host):
client = cls(host)
await client.calibrate_offset()
client.start()
return client
def __init__(self, host, *, version=2, port=r'ntp', timeout=5, interval=3600, sampling=5):
self._settings = {
r'host': host,
r'version': version,
r'port': port,
r'timeout': timeout,
}
self._client = NTPClient()
self._offset = 0
self._thread_pool = ThreadPool(1)
self._sync_task = IntervalTask(self.calibrate_offset, interval)
self._sampling = sampling
def start(self):
return self._sync_task.start()
def stop(self):
return self._sync_task.stop()
def is_running(self):
return self._sync_task.is_running()
async def calibrate_offset(self):
return await self._thread_pool.run(self._calibrate_offset)
def _calibrate_offset(self):
samples = []
host_name = self._settings[r'host']
# 多次采样取中位数,减少抖动影响
for _ in range(self._sampling):
try:
resp = self._client.request(**self._settings)
samples.append(resp.offset)
except Exception as err:
Utils.log.error(f'NTP server {host_name} request error: {err}')
if samples:
self._offset = float(numpy.median(samples))
Utils.log.debug(f'NTP server {host_name} offset median {self._offset} samples: {samples}')
else:
raise NTPCalibrateError(f'NTP server {host_name} not available, timestamp uncalibrated')
@property
def offset(self):
return self._offset
@property
def timestamp(self):
return time.time() + self._offset
class AsyncNTPClientPool(_Interface):
"""异步NTP客户端池,多节点取中位数实现高可用
"""
@classmethod
async def create(cls, hosts):
client_pool = cls()
for host in hosts:
client_pool.append(host)
await client_pool.calibrate_offset()
client_pool.start()
return client_pool
def __init__(self):
self._clients = []
self._running = False
def append(self, host, *, version=2, port='ntp', timeout=5, interval=3600, sampling=5):
client = AsyncNTPClient(host, version=version, port=port, timeout=timeout, interval=interval, sampling=sampling)
self._clients.append(client)
if self._running:
client.start()
def start(self):
for client in self._clients:
client.start()
self._running = True
def stop(self):
for client in self._clients:
client.stop()
self._running = False
def is_running(self):
return self._running
async def calibrate_offset(self):
tasks = MultiTasks()
for client in self._clients:
tasks.append(client.calibrate_offset())
await tasks
@property
def offset(self):
samples = []
for client in self._clients:
samples.append(client.offset)
return float(numpy.median(samples))
@property
def timestamp(self):
return time.time() + self.offset
| wsb310/hagworm | hagworm/extend/asyncio/ntp.py | ntp.py | py | 4,131 | python | en | code | 13 | github-code | 36 |
1846177461 | from liberouterapi import app, config
from liberouterapi.dbConnector import dbConnector
from liberouterapi.modules.module import Module
# Load NEMEA configuration file if nemea section is not present in current config
if "nemea" not in config.config.sections():
config.load(path = __path__[0] + '/config.ini')
conf_path = config['nemea']
# We need collection for NEMEA Events and Dashboard to be set up
nemea_conn = dbConnector("nemea",
provider = "mongodb",
config = {
'database' : config['nemea']['database']
})
nemea = nemea_conn.db[config['nemea']['collection']]
# Register a blueprint
nemea_bp = Module('nemea', __name__, url_prefix='/nemea', no_version=True)
from .events import *
from .Query import query
from .Stats import aggregate, top, count
from .Reporters import *
from .Status import *
from .supervisor.controllers import *
# Create index for DetectTime
nemea_bp.add_url_rule('/indexes', view_func=indexes, methods=['GET'])
# Get last N events
nemea_bp.add_url_rule('/events/<int:items>', view_func=get_last_events, methods=['GET'])
# Create a query based on GET params
nemea_bp.add_url_rule('/events/query', view_func=query, methods=['GET'])
# Aggregate stats about recent events specified by time range
nemea_bp.add_url_rule('/events/aggregate', view_func=aggregate, methods=['GET'])
# Get TOP events from each category
nemea_bp.add_url_rule('/events/top', view_func=top, methods=['GET'])
# Count events in given time window
nemea_bp.add_url_rule('/events/count', view_func=count, methods=['GET'])
# Get an event by its ID
nemea_bp.add_url_rule('/events/id/<string:id>', view_func=get_by_id, methods=['GET'])
# Whois lookup (unused)
nemea_bp.add_url_rule('/whois/<string:ip>', view_func=whois, methods=['GET'])
nemea_bp.add_url_rule('/reporters/config', view_func=get_nr_config, methods=['GET'])
nemea_bp.add_url_rule('/reporters/config', view_func=edit_nr_config, methods=['PUT'])
nemea_bp.add_url_rule('/status', view_func=nemea_main, methods=['GET'])
nemea_bp.add_url_rule('/status/stats', view_func=nemea_events, methods=['GET'])
# Supervisor API
nemea_bp.add_url_rule('/modules/<string:module_name>',
view_func=nemea_module.api_get_nemea_module_by_name,
methods=['GET'])
nemea_bp.add_url_rule('/modules/<string:module_name>',
view_func=nemea_module.api_update_nemea_module_by_name,
methods=['PUT'])
nemea_bp.add_url_rule('/modules/<string:module_name>',
view_func=nemea_module.api_delete_nemea_module_by_name,
methods=['DELETE'])
nemea_bp.add_url_rule('/modules',
view_func=nemea_module.api_create_new_nemea_module,
methods=['POST'])
nemea_bp.add_url_rule('/modules',
view_func=nemea_module.api_get_all_nemea_modules,
methods=['GET'])
nemea_bp.add_url_rule('/modules/<string:module_name>/instances',
view_func=nemea_module.api_get_instances_by_nemea_module_name,
methods=['GET'])
nemea_bp.add_url_rule('/instances/<string:instance_name>',
view_func=instance.api_get_instance_by_name,
methods=['GET'])
nemea_bp.add_url_rule('/instances/<string:instance_name>',
view_func=instance.api_update_instance_by_name,
methods=['PUT'])
nemea_bp.add_url_rule('/instances/<string:instance_name>',
view_func=instance.api_delete_instance_by_name,
methods=['DELETE'])
nemea_bp.add_url_rule('/instances/<string:instance_name>/stats',
view_func=instance.api_get_instance_stats_by_name,
methods=['GET'])
nemea_bp.add_url_rule('/instances',
view_func=instance.api_get_all_instances,
methods=['GET'])
nemea_bp.add_url_rule('/instances',
view_func=instance.api_create_new_instance,
methods=['POST'])
nemea_bp.add_url_rule('/instances/<string:instance_name>/<string:action>',
view_func=instance.api_control_instance,
methods=['POST'])
nemea_bp.add_url_rule('/instances/stats',
view_func=instance.api_get_all_instances_stats,
methods=['GET'])
nemea_bp.add_url_rule('/sysrepo/modules',
view_func=sysrepo.api_get_all_sysrepo_modules,
methods=['GET'])
nemea_bp.add_url_rule('/sysrepo/modules/<string:sysrepo_module_name>',
view_func=sysrepo.api_get_sysrepo_module_by_name,
methods=['GET'])
nemea_bp.add_url_rule('/sysrepo/load/<string:sysrepo_module_name>',
view_func=sysrepo.api_sysrepo_load_json,
methods=['POST'])
| zidekmat/nemea-gui | backend/__init__.py | __init__.py | py | 4,924 | python | en | code | 0 | github-code | 36 |
5743766982 | from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
class Vader:
def __init__(self):
self.analyzer = SentimentIntensityAnalyzer()
def AnalyzeSentence(self, sentence):
vs = self.analyzer.polarity_scores(sentence)
if(vs['compound'] >= 0):#.05):
return 1
elif(vs['compound'] < 0):#-0.05):
return 0
# if(vs['pos'] > vs['neg']):
# return 1
# else:
# return 0
def getVaderScore(self, testData):
totalCount = 0
successCount = 0
for item in testData:
totalCount += 1
if(self.AnalyzeSentence(item[0]) == item[1]):
successCount += 1
return (successCount/len(testData)) | rafaatsouza/ufmg-practical-assignments | natural-language-processing/final-assignment/source/Vader.py | Vader.py | py | 781 | python | en | code | 1 | github-code | 36 |
19102323120 | # _*_ coding: utf-8 _*_
import numpy as np
n1=30 #students in class A
x1=78.0 #average grade in class A
s1=10.0 #std dev of exam grade in class A
n2=25 #students in class B
x2=85.0 #average grade in class B
s2=15.0 #std dev of exam grade in class B
# the standard error of the difference between in the average
SE=np.sqrt(s1**2/n1+s2**2/n2)
# compute DOF
DF=(n1-1)+(n2-1)
print('SE=',SE,'DF=',DF)
# calculate t-score
tscore=np.abs(((x1-x2)-0)/SE)
print(tscore)
# calculate t-value
from scipy.stats.distributions import t
# set confident level equal c1
c1=0.95
alpha=1-c1
t95=t.ppf(1.0-alpha/2.0,DF)
print(t95)
# set confident level equal c1
c1=0.94
alpha=1-c1
t95=t.ppf(1.0-alpha/2.0,DF)
print(t95)
f=t.cdf(tscore,DF)-t.cdf(-tscore,DF)
print(f)
| ruanyangry/pycse-data_analysis-code | PYSCE-code/40.py | 40.py | py | 851 | python | en | code | 0 | github-code | 36 |
7285626466 | # -*- coding: utf-8 -*-
from datetime import datetime
from dateutil.parser import parse
import os
import json
from pyramid.view import view_config
from pyramid.security import remember
from pyramid.httpexceptions import HTTPFound
from pyramid.security import (
Allow,
Everyone,
)
from pyramid.view import (
view_config,
forbidden_view_config,
)
from pyramid.security import (
remember,
forget,
)
from .security import authenticate
from .models import CasaCloud, Machines, LocalPorts
@view_config(context='.models.CasaCloud',
route_name='home',
renderer='templates/index.html',
permission="can_use",
)
def view_home(request):
is_success = True
error_message = ""
login = request.authenticated_userid
local_ports = LocalPorts(request.registry.settings)
machines = Machines(local_ports, request.registry.settings)
max_cpu_cores = int(request.registry.settings["docker_container_max_cores"])
max_memory = int(request.registry.settings["docker_container_max_memory"])
min_days_to_use = int(request.registry.settings["min_days_to_use"])
docker_container_create_lock_file = request.registry.settings["docker_container_create_lock_file"]
docker_container_max_num_containers = int(request.registry.settings["docker_container_max_num_containers"])
images_settings = eval(request.registry.settings["docker_image_names"])
title = request.registry.settings["website_title"]
images = []
for image_key in images_settings:
images.append(image_key)
can_add_machine = True
is_lock_create_machine = False
if os.path.isfile(docker_container_create_lock_file):
lock_data = json.load(open(docker_container_create_lock_file, "r"))
lock_time = lock_data["lock_time"]
lock_time = parse(lock_time)
diff_time = datetime.now() - lock_time
if diff_time.seconds < 10*60:
is_lock_create_machine = True
names, values = machines.search_machines(login)
if len(values) >= docker_container_max_num_containers:
can_add_machine = False
if request.POST:
if "del_machine_port" in request.POST:
del_machine_port = request.POST["del_machine_port"]
machines.remove_machine(login, int(del_machine_port))
else:
if is_success and is_lock_create_machine:
is_success = False
error_message = "The container creation is locked. Please wait for a moment to retry."
if is_success and not can_add_machine:
is_success = False
error_message = "You already have max number of %d machines." % docker_container_max_num_containers
if is_success:
lock_data = {}
lock_data["lock_time"] = datetime.now().isoformat()
json.dump(lock_data, open(docker_container_create_lock_file, "w+"))
cpu_cores = request.POST["cpu_cores"]
memory = request.POST["memory"]
expiry_date = parse(request.POST["expiry_date"])
image = request.POST["image"]
diff_time = datetime.now() - expiry_date
if diff_time.days < min_days_to_use:
additional_options = request.registry.settings.get("docker_container_start_opts", "")
machines.create_machine(login, cpu_cores, memory, expiry_date, image, additional_options)
else:
is_success = False
error_message = "You should use at least %d days" % min_days_to_use
if os.path.isfile(docker_container_create_lock_file):
os.remove(docker_container_create_lock_file)
names, values = machines.search_machines(login)
render_machines = []
for row_values in values:
item = {}
for i, name in enumerate(names):
item[name] = row_values[i]
render_machines.append(item)
return {
"render_machines": render_machines,
"cpu_core_options": range(1, max_cpu_cores + 1),
"memory_options": range(1, max_memory + 1),
"is_success" : is_success,
"error_message": error_message,
"images": images,
"title": title,
}
@view_config(route_name='login',
renderer='templates/login.html',
)
@forbidden_view_config(renderer='templates/login.html')
def view_login(request):
login_url = request.resource_url(request.context, 'login')
referrer = request.url
if referrer == login_url:
referrer = '/' # never use the login form itself as came_from
came_from = request.params.get('came_from', referrer)
message = ''
login = ''
password = ''
if "login" in request.params and "password" in request.params:
login = request.params['login']
password = request.params['password']
if authenticate(request, login, password):
#print("view_login correct login and password")
#print("came_from=", came_from)
headers = remember(request, login)
return HTTPFound(location=came_from,
headers=headers)
message = 'Failed to login...'
return dict(
message=message,
url=request.registry.settings["website_base_url"] + '/login',
came_from=came_from,
login=login,
password=password,
)
@view_config(context='.models.CasaCloud', name='logout')
def logout(request):
headers = forget(request)
return HTTPFound(location=request.resource_url(request.context),
headers=headers)
| cati-neuroimaging/casa_cloud | casa_cloud/views.py | views.py | py | 5,714 | python | en | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.