id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
8135968 | class Pilha :
# construtor
def __init__(self, lista):
self.lista = lista
# metodo de insercao
def push(self, elem):
self.lista.append(elem)
# metodo de remocao do
def pop(self):
self.lista.pop()
# metodo de listagem
def listar(self):
a = 0
for item in self.lista:
a += 1
print(a,' ', item) | StarcoderdataPython |
11342008 | from __future__ import annotations
import typing
import jinja2
import weakref
import traceback
import re
import os
import sys
import inspect
import json
import queue
import importlib
import gevent
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pyhtmlgui.pyhtmlgui import PyHtmlGui
from pyhtmlgui.view.pyhtmlview import PyHtmlView
from .lib import WeakFunctionReferences
class PyHtmlGuiInstance:
def __init__(self, parent: PyHtmlGui):
self._parent = parent
self._websocket_connections = []
self._children = weakref.WeakSet()
self._template_env = jinja2.Environment(loader=parent.template_loader, autoescape=jinja2.select_autoescape())
self._template_cache = {}
self._call_number = 0
self._function_references = WeakFunctionReferences()
self._template_env.globals['_create_py_function_reference'] = self._create_function_reference
self._view = self._parent.view_class(parent.app_instance, self)
@property
def connections_count(self) -> int:
return len(self._websocket_connections)
def update(self) -> None:
self.call_javascript("pyhtmlgui.update_element", ["pyHtmlGuiBody", self._view.render()], skip_results=True)
def set_visible(self, visible: bool) -> None:
"""
Set component and childens visibility, components that are not visible get their events detached
"""
if visible is False:
for child in self._children:
if child.is_visible is True:
child.set_visible(False)
if visible is True:
self.update()
def call_javascript(self, js_function_name: str, args: list = None, skip_results: bool = False):
"""
Call javascript function in frontend.
:param js_function_name: Name of javascript function
:param args: Arguments for js function
:param skip_results: Don't receive results, give some slight performance improvement if we don't wait for result
"""
self._call_number += 1
to_delete = self._call_number - 100
# clean old function references, this is needed to results don't stay around if you don't
for websocket_connection in self._websocket_connections:
if to_delete in websocket_connection.javascript_call_result_objects:
del websocket_connection.javascript_call_result_objects[to_delete]
if to_delete in websocket_connection.javascript_call_result_queues:
del websocket_connection.javascript_call_result_queues[to_delete]
if args is None:
args = []
javascript_call_object = {'call': self._call_number, 'name': js_function_name, 'args': args}
if skip_results is True:
javascript_call_result = None
javascript_call_object["skip_results"] = True
else:
javascript_call_result = JavascriptCallResult(self._call_number)
data = json.dumps(javascript_call_object, default=lambda o: None)
for websocket_connection in self._websocket_connections:
if skip_results is False:
websocket_connection.javascript_call_result_objects[self._call_number] = javascript_call_result
javascript_call_result.add_call(websocket_connection)
websocket_connection.ws.send(data)
return javascript_call_result
def websocket_loop(self, websocket_connection) -> None:
self._websocket_connections.append(websocket_connection)
while True:
msg = websocket_connection.ws.receive()
if msg is not None:
message = json.loads(msg)
gevent.spawn(self._websocket_process_message, message, websocket_connection).run()
else:
break
self._websocket_connections.remove(websocket_connection)
if len(self._websocket_connections) == 0:
self.set_visible(False)
def _websocket_process_message(self, message, websocket_connection):
if 'call' in message:
function_name = "Function not found"
args = " "
try:
if message['name'] == "call_python_function_with_args":
functioncall_id = message['args'][0]
args = message['args'][1]
function = self._function_references.get(functioncall_id)
# noinspection PyUnresolvedReferences
function_name = "%s.%s" % (function.__self__.__class__.__name__, function.__name__)
return_val = function(*args)
elif message['name'] == "call_python_function":
functioncall_id = message['args'][0]
function = self._function_references.get(functioncall_id)
# noinspection PyUnresolvedReferences
function_name = "%s.%s" % (function.__self__.__class__.__name__, function.__name__)
return_val = function()
elif message['name'] == "python_bridge":
function_name = "%s.python_bridge" % self.__class__.__name__
return_val = None
if hasattr(self._view, "on_electron_message"):
self._view.on_electron_message(message)
elif message['name'] == "frontend_ready":
function_name = "%s.frontend_ready" % self.__class__.__name__
self.update()
return_val = None
if hasattr(self._view, "on_frontend_ready"):
self._view.on_frontend_ready(len(self._websocket_connections))
else:
return_val = None
print("unknown python function", message['name'])
except Exception:
tb = traceback.format_exc()
msg = " Exception in: %s(%s)\n" % (function_name, ("%s" % args)[1:-1])
msg += " %s" % tb.replace("\n", "\n ").strip()
self.call_javascript("pyhtmlgui.debug_msg", [msg])
print(msg)
return_val = None
if not ("skip_results" in message and message["skip_results"] is True):
data = json.dumps({'return': message['call'], 'value': return_val}, default=lambda o: None)
websocket_connection.ws.send(data)
elif 'return' in message:
call_id = message['return']
del message['return'] # remove internal id from result before passing to next level
if call_id in websocket_connection.javascript_call_result_objects:
js_call_result = websocket_connection.javascript_call_result_objects[call_id]
js_call_result.result_received(websocket_connection, message)
else:
print('Invalid message received: ', message)
def _create_function_reference(self, function: typing.Union[typing.Callable, jinja2.runtime.Undefined]) -> str:
if type(function) == jinja2.runtime.Undefined:
# noinspection PyProtectedMember
raise Exception("Undefined python method in script: '%s'" % function._undefined_name)
return self._function_references.add(function)
def get_template(self, item: PyHtmlView, force_reload: bool = False):
"""
Receive template associated to item
:param item: The view Object
:param force_reload: Force reloading of template
:return:
"""
if force_reload is False:
try:
return self._template_cache[item.__class__.__name__]
except KeyError:
pass
if item.__class__.TEMPLATE_FILE is not None: # load from file
file_to_monitor = self._template_env.get_template(item.TEMPLATE_FILE).filename
string_to_render = open(file_to_monitor, "r").read()
else: # load from class
if self._parent.auto_reload is False:
string_to_render = item.TEMPLATE_STR
file_to_monitor = None
else:
module_name = item.__module__
if module_name is None or module_name == str.__class__.__module__:
module_fullname = item.__class__.__name__ # Avoid reporting __builtin__
else:
module_fullname = module_name + '.' + item.__class__.__name__
try:
file_to_monitor = os.path.abspath(inspect.getfile(item.__class__))
except Exception: # in case its in main. this may be a bug in inspect
file_to_monitor = os.path.abspath(sys.argv[0])
if module_name == "__main__":
name = os.path.splitext(os.path.basename(file_to_monitor))[0]
module = __import__(name)
importlib.reload(module) # reload should work on non complex objects in __main__, but not for more
for comp in module_fullname.split(".")[1:]:
module = getattr(module, comp)
else:
loader = importlib.machinery.SourceFileLoader(module_name, file_to_monitor)
# noinspection PyUnresolvedReferences
spec = importlib.util.spec_from_loader(loader.name, loader)
# noinspection PyUnresolvedReferences
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
module = getattr(module, module_fullname.split(".")[-1])
string_to_render = module.TEMPLATE_STR
if self._parent.auto_reload is True:
self._parent.add_file_to_monitor(file_to_monitor, item.__class__.__name__)
# replace onclick="pyview.my_function(arg1,arg2)"
# with onclick="pyhtmlgui.call({{_create_py_function_reference(pyview.my_function)}}, arg1, arg2)
# this a a convinience function is user does not have to type the annoying stuff and functions look cleaner
string_to_render = self._prepare_template(string_to_render)
try:
self._template_cache[item.__class__.__name__] = self._template_env.from_string(string_to_render)
except Exception as e:
msg = "Failed to load Template "
if item.TEMPLATE_FILE is not None:
msg += "from File '%s': " % item.TEMPLATE_FILE
else:
msg += "from Class '%s': " % item.__class__.__name__
msg += " %s" % e
raise Exception(msg)
return self._template_cache[item.__class__.__name__]
@staticmethod
def _prepare_template(template: str) -> str:
"""
Replace onclick="pyview.my_function(arg1,arg2)"
with onclick="pyhtmlgui.call({{_create_py_function_reference(pyview.my_function)}}, arg1, arg2)
"""
parts = re.split('({{|}}|{%|%})', template)
index = 0
while index < len(parts):
if parts[index] == "{{" or parts[index] == "{%" :
parts[index] = "%s%s%s" % (parts[index] , parts[index+1], parts[index+2])
del parts[index+ 1]
del parts[index+ 1]
index += 1
new_parts = []
for i, part in enumerate(parts):
if part.startswith("{{") or part.startswith("{%") or part.find("pyview.") == -1:
new_parts.append(part)
else:
# noinspection RegExpSingleCharAlternation
subparts = re.split(r'(>| |\(|=|\"|\'|\n|\r|\t|;)(pyview.[a-zA-Z0-9_.]+\()', part)
for x, subpart in enumerate(subparts):
if subpart.startswith("pyview."):
subpart = subpart.replace("(", ")}}, ", 1)
subparts[x] = "pyhtmlgui.call({{_create_py_function_reference(%s" % subpart
for sp in subparts:
new_parts.append(sp)
return "".join(new_parts).replace(r'\pyview.', 'pyview.')
def clear_template_cache(self, classnames: str = None) -> None:
if classnames is None:
self._template_cache = {}
else:
for classname in classnames:
try:
del self._template_cache[classname]
except Exception:
pass
def _add_child(self, child: PyHtmlView) -> None:
self._children.add(child)
def _remove_child(self, child: PyHtmlView) -> None:
self._children.remove(child)
class JavascriptCallResult:
def __init__(self, call_id):
self.call_id = call_id
self.websocket_connections = weakref.WeakSet()
self._results_missing = 0
self._callback = None
def add_call(self, websocket_connection):
websocket_connection.javascript_call_result_queues[self.call_id] = queue.Queue()
self.websocket_connections.add(websocket_connection)
self._results_missing += 1
def result_received(self, websocket_connection, result):
websocket_connection.javascript_call_result_queues[self.call_id].put(result)
self._results_missing -= 1
if self._results_missing == 0 and self._callback is not None:
self._callback(self._collect_results())
def _collect_results(self):
results = []
for websocket_connection in self.websocket_connections:
results.append(websocket_connection.javascript_call_result_queues[self.call_id].get())
del websocket_connection.javascript_call_result_queues[self.call_id]
del websocket_connection.javascript_call_result_objects[self.call_id]
errors = [result["error"] for result in results if "error" in result]
if len(errors) > 0:
msg = "%s of %s connected frontends returned an error\n" % (len(errors), len(results))
msg += "\n".join(errors)
raise Exception(msg)
return [result["value"] for result in results]
def __call__(self, callback: typing.Union[typing.Callable, None] = None):
if callback is None:
return self._collect_results()
else:
if self._results_missing == 0:
callback(self._collect_results())
else:
self._callback = callback
| StarcoderdataPython |
4894472 | # -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------\
from azure.cli.core.profiles import register_resource_type
from ...profiles import CUSTOM_MGMT_STORAGE_ORS
register_resource_type('latest', CUSTOM_MGMT_STORAGE_ORS, '2019-06-01')
| StarcoderdataPython |
1678638 | <reponame>Acuf5928/check-
from fbs import SETTINGS
from fbs_runtime import FbsError
from subprocess import run, DEVNULL, check_call, check_output, PIPE, \
CalledProcessError
import re
def preset_gpg_passphrase():
# Ensure gpg-agent is running:
run(
['gpg-agent', '--daemon', '--use-standard-socket', '-q'],
stdout=DEVNULL, stderr=DEVNULL
)
gpg_key = SETTINGS['gpg_key']
try:
keygrip = _get_keygrip(gpg_key)
except GpgDoesNotSupportKeygrip:
# Old GPG versions don't support keygrips; They use the fingerprint
# instead:
keygrip = gpg_key
check_call([
SETTINGS['gpg_preset_passphrase'], '--preset', '--passphrase',
SETTINGS['gpg_pass'], keygrip
], stdout=DEVNULL)
def _get_keygrip(pubkey_id):
try:
output = check_output(
['gpg2', '--with-keygrip', '-K', pubkey_id],
universal_newlines=True, stderr=PIPE
)
except CalledProcessError as e:
if 'invalid option "--with-keygrip"' in e.stderr:
raise GpgDoesNotSupportKeygrip() from None
elif 'No secret key' in e.stderr:
raise FbsError(
"GPG could not read your key for code signing. Perhaps you "
"don't want\nto run this command here, but after:\n"
" fbs runvm {ubuntu|fedora|arch}"
)
raise
pure_signing_subkey = _find_keygrip(output, 'S')
if pure_signing_subkey:
return pure_signing_subkey
any_signing_key = _find_keygrip(output, '[^]]*S[^]]*')
if any_signing_key:
return any_signing_key
raise RuntimeError('Keygrip not found. Output was:\n' + output)
def _find_keygrip(gpg2_output, type_re):
lines = gpg2_output.split('\n')
for i, line in enumerate(lines):
if re.match(r'.*\[%s\]$' % type_re, line):
for keygrip_line in lines[i + 1:]:
m = re.match(r' +Keygrip = ([A-Z0-9]{40})', keygrip_line)
if m:
return m.group(1)
class GpgDoesNotSupportKeygrip(RuntimeError):
pass | StarcoderdataPython |
9635287 | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
def norm_histogram(hist):
"""
takes a histogram of counts and creates a histogram of probabilities
:param hist: list
:return: list
"""
j = 0;
hist_sum = 0;
hist_new = [];
while j < len(hist):
hist_sum = hist_sum + hist[j];
j += 1;
for i in hist:
p = i / hist_sum
hist_new.append(p);
return(hist_new)
pass
def compute_j(histo, width):
"""
takes histogram of counts, uses norm_histogram to convert to probabilties, it then calculates compute_j for one bin width
:param histo: list
:param width: float
:return: float
"""
sq_sum = 0;
m = 0;
k = 0;
hist_p = norm_histogram(histo)
hist_sq =[0] * len(hist_p)
while k < len(hist_p):
hist_sq[k] = hist_p[k] ** 2;
sq_sum += (hist_sq[k]);
m += histo[k];
k += 1;
j = 2/((m - 1) * width) - (((m + 1)/((m-1)*width))*(sq_sum))
return(j);
pass
def sweep_n(data, minimum, maximum, min_bins, max_bins):
"""
find the optimal bin
calculate compute_j for a full sweep [min_bins to max_bins]
please make sure max_bins is included in your sweep
:param data: list
:param minimum: int
:param maximum: int
:param min_bins: int
:param max_bins: int
:return: list
"""
optimal_hist = []
b = min_bins
while b <= max_bins:
j = compute_j(plt.hist(data, b, (minimum,maximum))[0], (maximum-minimum)/b);
optimal_hist.append(j)
b += 1;
return(optimal_hist)
pass
def find_min(l):
"""
generic function that takes a list of numbers and returns smallest number in that list its index.
return optimal value and the index of the optimal value as a tuple.
:param l: list
:return: tuple
"""
list_min = min(l);
min_index = l.index(list_min);
min_tuple = (list_min,min_index);
return(min_tuple)
pass
if __name__ == '__main__':
data = np.loadtxt('input.txt') # reads data from input.txt
lo = min(data)
hi = max(data)
bin_l = 1
bin_h = 100
js = sweep_n(data, lo, hi, bin_l, bin_h)
"""
the values bin_l and bin_h represent the lower and higher bound of the range of bins.
They will change when we test your code and you should be mindful of that.
"""
print(find_min(js))
| StarcoderdataPython |
1754792 | # Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from ..types import XMLBase
class Direction(XMLBase):
_NAME = 'direction'
_TYPE = 'sdf'
def __init__(self, default=[0, 0, -1]):
super(Direction, self).__init__()
assert self._is_numeric_vector(default) or \
self._is_scalar(default), \
'Direction must be either an array or a scalar'
if isinstance(default, collections.Iterable):
default = list(default)
assert len(default) == 3, \
'Direction must have 3 components'
assert self._is_numeric_vector(default), \
'Direction must be a numerical vector'
else:
default = float(default)
self._default = default
self._value = default
def _set_value(self, value):
if isinstance(value, collections.Iterable):
assert len(value) == 3, \
'Direction must have 3 components'
assert self._is_numeric_vector(value), \
'Direction must be a numerical vector'
self._value = list(value)
else:
self._value = float(value)
def get_formatted_value_as_str(self):
if isinstance(self._value, collections.Iterable):
output_str = ' '.join(['{}'] * len(self._value))
return output_str.format(*[format(x, 'n') for x in self._value])
else:
return '{}'.format(self._value)
| StarcoderdataPython |
3490916 | <reponame>omar115/dvizio-test
from os import name
import task
import traceback
import pandas as pd
import numpy as np
# driver code
def main():
radius, zipcode = task.user_input()
driver = task.init_driver()
print("---Process Started---")
task.login(driver)
df = task.scrape_data(driver, radius, zipcode)
# remove all whitespace
df.replace(r'^\s*$', np.nan, regex=True)
#remove null
df = df.apply(lambda x: pd.Series(x.dropna().values))
#save CSV
df.to_csv(r'C:\Users\Ferntech\final_output.csv', index=False, header=True, mode='a')
driver.quit()
print("---Process Finished---")
main() | StarcoderdataPython |
1607314 | <gh_stars>10-100
import Tkinter as tk
from common.styles import BUTTON_X_MARGIN, LG_BODY_COLOR, LG_BODY_FONT
from common.tools import create_buttons, is_within
class ReversedCheckbutton(tk.Frame):
def __init__(self, parent, text, variable, command):
tk.Frame.__init__(self, parent)
self.checkbutton = tk.Checkbutton(self, variable=variable, command=command)
self.label = tk.Label(self, text=text)
self.label.grid(row=0, column=0)
self.checkbutton.grid(row=0, column=1)
self.widgets = [self.checkbutton, self.label]
class AddStepsWidget(tk.Frame):
def __init__(self, parent, func, text="Add Steps:"):
tk.Frame.__init__(self, parent, bg=parent.cget("bg"))
if func is None:
def do_nothing(*args, **kwargs):
pass
func = do_nothing
self.func=func
self.l = tk.Label(self, text=text, bg=self.cget("bg"))
vcmd = (self.register(self.is_okay),'%P')
self.v = tk.StringVar()
self.v.set('20')
self.s = tk.Spinbox(self, width=5, from_=20, to=9980, increment=20,
textvariable=self.v, validate="all",
validatecommand=vcmd)
self.s.bind("<Command-a>", self.spinbox_select_all)
self.b = tk.Button(self, text="Go!", command=self.go, bg=self.cget("bg"), padx=BUTTON_X_MARGIN)
self.widgets = [self.l, self.s, self.b]
for i, each in enumerate(self.widgets):
each.grid(row=0, column=i)
def is_okay(self, value):
if value == "": return True
if (len(value) > 4): return False
try:
if is_within(int(value), 0, 9999): return True
except:
pass
return False
def spinbox_select_all(self, event):
self.s.selection("range", 0, tk.END)
def go(self):
v = self.v.get()
if v == "":
return
v = int(v)
self.func(v)
class ButtonsFrame(tk.Frame):
def __init__(self, parent, session, new_pop_command, mutate_command, cross_command, add_command):
tk.Frame.__init__(self, parent)
self.new_pop = new_pop_command
self.mutate = mutate_command
self.cross = cross_command
self.session = session
session.bind("general_settings", self.update_checkbutton)
buttons_row = 0
buttons = create_buttons(self,
{"new_pop":["New Population", buttons_row, 0],
"mutate":["Mutate", buttons_row, 1],
"cross":["Crossover", buttons_row, 2]
})
self.new_pop_button, self.mutate_button, self.cross_button = buttons
self.add_steps_widget = AddStepsWidget(self, add_command)
self.add_steps_widget.grid(row=buttons_row, column=3, padx=30)
self.show_movement_intvar = tk.IntVar()
self.show_movement_checkbutton = ReversedCheckbutton(self,
text = "Show Movement", variable=self.show_movement_intvar,
command=self.show_movement
)
self.show_movement_checkbutton.grid(row=buttons_row, column=4)
self.widgets = buttons.values() + [
self.add_steps_widget,
self.show_movement_checkbutton
]
def grid_(self):
self.grid()
def update_checkbutton(self):
movement = self.session.general_settings["show_movement"]
if movement != self.show_movement_intvar.get():
self.show_movement_intvar.set(movement)
def show_movement(self):
new_settings = self.session.general_settings
new_settings["show_movement"] = self.show_movement_intvar.get()
self.session.set("general_settings", new_settings)
class MutateFrame(tk.Frame):
def __init__(self, parent, session, mutate_func, advanced_frame):
tk.Frame.__init__(self, parent)
self.parent = parent
self.session = session
self.mutate_func = mutate_func
self.advanced_frame = advanced_frame
self.label = tk.Label(self, text="Choose a parent", fg=LG_BODY_COLOR, font=LG_BODY_FONT)
self.label.grid(row=0, column=1)
self.buttons = create_buttons(self, {
"ok":["Mutate!", 0, 1], #u"\u2713"
"cancel": ["<Back", 0, 0], # \u21a9
"advanced": ["Advanced", 0, 2]
})
#self.buttons["ok"].config(state=tk.DISABLED)
#self.buttons["ok"].grid(sticky="w")
self.buttons["ok"].grid_remove()
self.buttons["cancel"].grid(sticky="w")
self.buttons["advanced"].grid(sticky="e")
for i, weight in enumerate([1,1,1]):
self.columnconfigure(i, weight=weight, minsize=50)
def grid_(self):
self.buttons["ok"].grid_remove()
self.label.grid()
self.grid()
def chosen(self, graph):
self.reveal_button()
self.current_chosen = graph
def reveal_button(self):
self.label.grid_remove()
self.buttons["ok"].grid()
#self.buttons["ok"].config(state=tk.NORMAL)
self.update()
def ok(self):
self.session.set("advanced_mutate", self.advanced_frame.get())
self.mutate_func(self.current_chosen.sim)
self.cancel()
def cancel(self):
self.advanced_frame.grid_remove()
self.parent.back_to_home_topframe()
def advanced(self):
self.advanced_frame.grid()
class CrossFrame(tk.Frame):
def __init__(self, parent, cross_func):
tk.Frame.__init__(self, parent)
self.parent = parent
self.cross_func = cross_func
self.label = tk.Label(self, text="Choose parents", fg=LG_BODY_COLOR, font=LG_BODY_FONT)
self.label.grid(row=0, column=1)
self.buttons = create_buttons(self, {
"ok":["Crossover!", 0, 1], #u"\u2713"
"cancel": ["<Back", 0, 0], # \u21a9
})
#self.buttons["ok"].config(state=tk.DISABLED)
#self.buttons["ok"].grid(sticky="w")
self.buttons["ok"].grid_remove()
self.buttons["cancel"].grid(sticky="w")
for i, weight in enumerate([1,1,1]):
self.columnconfigure(i, weight=weight, minsize=50)
def grid_(self):
self.unreveal_button()
self.grid()
def chosen(self, graphs):
if len(graphs)>=2:
self.reveal_button()
else:
self.unreveal_button()
self.current_chosen = graphs
def reveal_button(self):
self.label.grid_remove()
self.buttons["ok"].grid()
#self.buttons["ok"].config(state=tk.NORMAL)
self.update()
def unreveal_button(self):
self.buttons["ok"].grid_remove()
self.label.grid()
self.update()
def ok(self):
self.cross_func([_.sim for _ in self.current_chosen])
self.cancel()
def cancel(self):
self.parent.back_to_home_topframe()
class InsertLibFrame(tk.Frame):
def __init__(self, parent, insert_func):
tk.Frame.__init__(self, parent)
self.parent = parent
self.insert_func = insert_func
self.label = tk.Label(self, text="Choose locations to replace", fg=LG_BODY_COLOR, font=LG_BODY_FONT)
self.label.grid(row=0, column=1)
self.buttons = create_buttons(self, {
"ok":["Insert!", 0, 1], #u"\u2713"
"cancel": ["<Back", 0, 0], # \u21a9
})
self.all_intvar = tk.IntVar()
self.all_checkbutton = ReversedCheckbutton(self, "All", self.all_intvar, self.check_all)
#self.buttons["ok"].config(state=tk.DISABLED)
#self.buttons["ok"].grid(sticky="w")
self.all_checkbutton.grid(row=0, column=2, sticky="e")
self.buttons["ok"].grid_remove()
self.buttons["cancel"].grid(sticky="w")
for i, weight in enumerate([1,1,1]):
self.columnconfigure(i, weight=weight)
def grid_(self):
self.unreveal_button()
self.grid()
def chosen(self, graphs):
if len(graphs) >= 1:
self.reveal_button()
else:
self.unreveal_button()
self.current_chosen = graphs
def reveal_button(self):
self.label.grid_remove()
self.buttons["ok"].grid()
#self.buttons["ok"].config(state=tk.NORMAL)
self.update()
def unreveal_button(self):
self.buttons["ok"].grid_remove()
self.label.grid()
self.update()
def ok(self):
self.insert_func(self.chosen_gene, [_.sim for _ in self.current_chosen])
self.parent.expand_range_settings(self.chosen_gene)
self.chosen_gene = None
self.cancel()
def cancel(self):
self.all_intvar.set(0)
self.parent.back_to_home_topframe()
def check_all(self):
if self.all_intvar.get() == 1:
self.parent.frames.sims.choose_all()
self.reveal_button()
else:
self.parent.frames.sims.clear_all_selection()
self.unreveal_button()
class EvolvingFrame(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.display_text = tk.StringVar()
self.label = tk.Label(self, textvariable=self.display_text, fg=LG_BODY_COLOR, font=LG_BODY_FONT)
self.label.grid(row=0, column=1)
self.buttons = create_buttons(self, {
"cancel": ["<Back", 0, 0], # \u21a9
})
#self.buttons["ok"].grid_remove()
self.buttons["cancel"].grid(sticky="w")
self.buttons["cancel"].grid_remove()
for i, weight in enumerate([1,1,1]):
self.columnconfigure(i, weight=weight, minsize=50)
def grid_(self):
self.buttons["cancel"].grid_remove()
self.display_text.set("")
self.grid()
def done(self):
self.buttons["cancel"].grid()
def cancel(self):
self.parent.back_to_home_topframe()
| StarcoderdataPython |
9603196 | from django.apps import AppConfig
class SourceConfig(AppConfig):
name = 'source'
verbose_name = "水印服务"
main_menu_index = 2
| StarcoderdataPython |
301932 | <reponame>pmaigutyak/mp-config
# Generated by Django 2.0.6 on 2018-08-10 16:16
from django.db import migrations, models
import site_config.models
class Migration(migrations.Migration):
dependencies = [
('site_config', '0003_auto_20171023_1207'),
]
operations = [
migrations.AddField(
model_name='configfield',
name='value_html',
field=site_config.models.HTMLField(blank=True, null=True, verbose_name='HTML'),
),
migrations.AlterField(
model_name='configfield',
name='name',
field=models.CharField(max_length=255, unique=True, verbose_name='Name'),
),
migrations.AlterField(
model_name='configfield',
name='splitter',
field=models.CharField(blank=True, choices=[('newline', 'New line'), (',', 'Comma'), ('.', 'Dot'), (';', 'Semicolon'), (' ', 'Tab')], help_text='Доступно тільки для типів: text, input', max_length=10, null=True, verbose_name='Splitter'),
),
migrations.AlterField(
model_name='configfield',
name='type',
field=models.CharField(choices=[('input', 'Input'), ('text', 'Text'), ('html', 'HTML'), ('int', 'Integer'), ('float', 'Float'), ('bool', 'True / False'), ('url', 'Url'), ('email', 'Email'), ('file', 'File'), ('image', 'Image'), ('json', 'JSON')], max_length=50, verbose_name='Type'),
),
migrations.AlterField(
model_name='configfield',
name='value_file',
field=models.FileField(blank=True, null=True, upload_to='site_config', verbose_name='File'),
),
migrations.AlterField(
model_name='configfield',
name='value_image',
field=models.ImageField(blank=True, null=True, upload_to='site_config', verbose_name='Image'),
),
migrations.AlterUniqueTogether(
name='configfield',
unique_together=set(),
),
migrations.RemoveField(
model_name='configfield',
name='site',
),
]
| StarcoderdataPython |
3447494 | <reponame>rrlyman/PythonMachineLearingExamples
"""
Example employing Lasagne for digit recognition using the MNIST dataset.
This example is deliberately structured as a long flat file, focusing on how
to use Lasagne, instead of focusing on writing maximally modular and reusable
code. It is used as the foundation for the introductory Lasagne tutorial:
http://lasagne.readthedocs.org/en/latest/user/tutorial.html
More in-depth examples and reproductions of paper results are maintained in
a separate repository: https://github.com/Lasagne/Recipes
https://github.com/Lasagne/Lasagne/blob/master/examples/mnist.py
Make sure that CUDNN and CUDA are in the paths when running on LiClipse
@author: <NAME>
"""
from __future__ import print_function
import sys
import time
import numpy as np
import theano
# must comment out downsampling in pool.py because pip install did not include downsample
import theano.tensor as T
import lasagne
import ocr_utils
# ##################### Build the neural network model #######################
# This script supports three types of models. For each one, we define a
# function that takes a Theano variable representing the input and returns
# the output layer of a neural network model built in Lasagne.
def build_mlp(input_var=None, nRow=28, nCol=28):
# This creates an MLP of two hidden layers of 800 units each, followed by
# a softmax output layer of 10 units. It applies 20% dropout to the input
# data and 50% dropout to the hidden layers.
# Input layer, specifying the expected input shape of the network
# (unspecified batchsize, 1 channel, 28 rows and 28 columns) and
# linking it to the given Theano variable `input_var`, if any:
l_in = lasagne.layers.InputLayer(shape=(None, 1, nRow, nCol),
input_var=input_var)
# Apply 20% dropout to the input data:
l_in_drop = lasagne.layers.DropoutLayer(l_in, p=0.2)
# Add a fully-connected layer of 800 units, using the linear rectifier, and
# initializing weights with Glorot's scheme (which is the default anyway):
l_hid1 = lasagne.layers.DenseLayer(
l_in_drop, num_units=800,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# We'll now add dropout of 50%:
l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.5)
# Another 800-unit layer:
l_hid2 = lasagne.layers.DenseLayer(
l_hid1_drop, num_units=800,
nonlinearity=lasagne.nonlinearities.rectify)
# 50% dropout again:
l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.5)
# Finally, we'll add the fully-connected output layer, of 10 softmax units:
l_out = lasagne.layers.DenseLayer(
l_hid2_drop, num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
# Each layer is linked to its incoming layer(s), so we only need to pass
# the output layer to give access to a network in Lasagne:
return l_out
def build_custom_mlp(input_var=None, depth=2, width=800, drop_input=.2,
drop_hidden=.5):
# By default, this creates the same network as `build_mlp`, but it can be
# customized with respect to the number and size of hidden layers. This
# mostly showcases how creating a network in Python code can be a lot more
# flexible than a configuration file. Note that to make the code easier,
# all the layers are just called `network` -- there is no need to give them
# different names if all we return is the last one we created anyway; we
# just used different names above for clarity.
# Input layer and dropout (with shortcut `dropout` for `DropoutLayer`):
network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
if drop_input:
network = lasagne.layers.dropout(network, p=drop_input)
# Hidden layers and dropout:
nonlin = lasagne.nonlinearities.rectify
for _ in range(depth):
network = lasagne.layers.DenseLayer(
network, width, nonlinearity=nonlin)
if drop_hidden:
network = lasagne.layers.dropout(network, p=drop_hidden)
# Output layer:
softmax = lasagne.nonlinearities.softmax
network = lasagne.layers.DenseLayer(network, 10, nonlinearity=softmax)
return network
def build_cnn(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Expert note: Lasagne provides alternative convolutional layers that
# override Theano's choice of which implementation to use; for details
# please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.
# Max-pooling layer of factor 2 in both dimensions:
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
return network
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
ln = len(inputs) - len(inputs) % batchsize
assert ln % batchsize == 0
if shuffle:
indices = np.arange(ln)
np.random.shuffle(indices)
for start_idx in range(0, ln , batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
# ############################## Main program ################################
# Everything else will be handled in our main program now. We could pull out
# more functions to better separate the code, but it wouldn't make it any
# easier to read.
def main(model='mlp', num_epochs=50):
print("Loading data...")
input_filters_dict = {'font': ('HANDPRINT',), 'm_label': range(48,57)}
output_feature_list = ['m_label','image']
ds = ocr_utils.read_data(input_filters_dict = input_filters_dict,
output_feature_list=output_feature_list,
engine_type='theano',
test_size = .1,
evaluation_size = .1,
dtype='float32')
nRows = ds.train.num_rows
nCols = ds.train.num_columns
X_train = ds.train.features[1]
X_val = ds.evaluation.features[1]
X_test = ds.test.features[1]
y_train = np.array(ds.train.features[0]-48,dtype=np.int32)
y_test = np.array(ds.test.features[0]-48,dtype=np.int32)
y_val = np.array(ds.evaluation.features[0]-48,dtype=np.int32)
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
if model == 'mlp':
network = build_mlp(input_var,nRows, nCols)
elif model.startswith('custom_mlp:'):
depth, width, drop_in, drop_hid = model.split(':', 1)[1].split(',')
network = build_custom_mlp(input_var, int(depth), int(width),
float(drop_in), float(drop_hid))
elif model == 'cnn':
network = build_cnn(input_var)
else:
print("Unrecognized model type %r." % model,flush=True)
return
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype='float32')
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time),flush=True)
print(" training loss:\t\t{:.6f}".format(train_err / train_batches),flush=True)
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches),flush=True)
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:",flush=True)
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches),flush=True)
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100),flush=True)
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
if ('--help' in sys.argv) or ('-h' in sys.argv):
print("Trains a neural network on MNIST using Lasagne.")
print("Usage: %s [MODEL [EPOCHS]]" % sys.argv[0])
print()
print("MODEL: 'mlp' for a simple Multi-Layer Perceptron (MLP),")
print(" 'custom_mlp:DEPTH,WIDTH,DROP_IN,DROP_HID' for an MLP")
print(" with DEPTH hidden layers of WIDTH units, DROP_IN")
print(" input dropout and DROP_HID hidden dropout,")
print(" 'cnn' for a simple Convolutional Neural Network (CNN).")
print("EPOCHS: number of training epochs to perform (default: 500)")
else:
kwargs = {}
if len(sys.argv) > 1:
kwargs['model'] = sys.argv[1]
if len(sys.argv) > 2:
kwargs['num_epochs'] = int(sys.argv[2])
main(**kwargs)
print ('\n########################### No Errors ####################################')
| StarcoderdataPython |
9774162 | """Test main ARgorithmToolkit classes
"""
import ARgorithmToolkit
def test_base():
"""Test Stateset add_state
"""
a = ARgorithmToolkit.StateSet()
a.states.append('test state')
assert len(a.states) == 1
def test_state():
"""Test State exception handling
"""
try:
ARgorithmToolkit.State(state_def="ErrorTest")
assert False , 'No error raised'
except ARgorithmToolkit.ARgorithmError:
pass
def test_variable():
"""Test Variable
"""
algo = ARgorithmToolkit.StateSet()
s = ARgorithmToolkit.Variable("s",algo,10)
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'variable_declare'
s.value += 1
assert s.value == 11
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'variable_highlight'
def test_comment_state():
"""Test comment state
"""
algo = ARgorithmToolkit.StateSet()
algo.add_comment("Hello world")
last_state = algo.states[-1]
assert last_state.content['state_type'] == 'comment'
assert last_state.content['comments'] == 'Hello world'
assert last_state.content['state_def'] is None
| StarcoderdataPython |
5036810 | # See LICENSE for licensing information.
#
# Copyright (c) 2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
from amaranth import Instance
from amaranth import tracer
from cache_signal import cache_signal
class sram_instance:
"""
This class instantiates Instance class of Amaranth library and holds OpenRAM
SRAM modules instances.
"""
def __init__(self, module_name, row_size, num_arrays, c, m):
# Find the declared name of this instance
array_name = tracer.get_var_name()
# Get "{short_name}_array"
short_name = array_name[:-6]
self.num_arrays = num_arrays
real_row_size = row_size // num_arrays
# Append signals to these lists
self.write_csb = []
self.write_addr = []
self.write_din = []
self.read_csb = []
self.read_addr = []
self.read_dout = []
for i in range(num_arrays):
# Write enable
self.write_csb.append(cache_signal(reset_less=True, reset=1, name="{0}_write_csb{1}".format(short_name, i)))
# Write address
self.write_addr.append(cache_signal(self.set_size, reset_less=True, name="{0}_write_addr{1}".format(short_name, i)))
# Write data
self.write_din.append(cache_signal(real_row_size, reset_less=True, name="{0}_write_din{1}".format(short_name, i)))
# Read enable
self.read_csb.append(cache_signal(reset_less=True, name="{0}_read_csb{1}".format(short_name, i)))
# Read address
self.read_addr.append(cache_signal(self.set_size, reset_less=True, name="{0}_read_addr{1}".format(short_name, i)))
# Read data
self.read_dout.append(cache_signal(real_row_size, name="{0}_read_dout{1}".format(short_name, i)))
# Add this instance to the design module
m.submodules += Instance(module_name,
("i", "clk0", c.clk),
("i", "csb0", self.write_csb[i]),
("i", "addr0", self.write_addr[i]),
("i", "din0", self.write_din[i]),
("i", "clk1", c.clk),
("i", "csb1", self.read_csb[i]),
("i", "addr1", self.read_addr[i]),
("o", "dout1", self.read_dout[i]),
)
# Keep the design module for later use
self.m = m
def input(self, way=0):
""" Return the input signal. """
return self.write_din[way]
def output(self, way=0):
""" Return the output signal. """
return self.read_dout[way]
def read(self, address):
""" Send a new read request to SRAM. """
# Read the same address from all arrays
for i in range(self.num_arrays):
self.m.d.comb += self.read_csb[i].eq(0)
self.m.d.comb += self.read_addr[i].eq(address)
def write(self, address, data, way=None):
""" Send a new write request to SRAM. """
# TODO: Use wmask feature of OpenRAM
# If no way is given, set all input data
if way is None:
for i in range(self.num_arrays):
self.m.d.comb += self.write_csb[i].eq(0)
self.m.d.comb += self.write_addr[i].eq(address)
self.m.d.comb += self.write_din[i].eq(self.read_dout[i])
self.m.d.comb += self.write_din[i].eq(data)
# If way is a signal, wrap it with case statements
elif isinstance(way, cache_signal):
with self.m.Switch(way):
for i in range(1 << way.width):
with self.m.Case(i):
if self.num_arrays > 1:
self.m.d.comb += self.write_csb[i].eq(0)
self.m.d.comb += self.write_addr[i].eq(address)
self.m.d.comb += self.write_din[i].eq(self.read_dout[i])
self.m.d.comb += self.write_din[i].eq(data)
else:
self.m.d.comb += self.write_csb[0].eq(0)
self.m.d.comb += self.write_addr[0].eq(address)
self.m.d.comb += self.write_din[0].eq(self.read_dout[0])
self.m.d.comb += self.write_din[0].way(i).eq(data)
# If way is a constant, calculate the way part of the signal
else:
if self.num_arrays > 1:
self.m.d.comb += self.write_csb[way].eq(0)
self.m.d.comb += self.write_addr[way].eq(address)
self.m.d.comb += self.write_din[way].eq(self.read_dout[way])
self.m.d.comb += self.write_din[way].eq(data)
else:
self.m.d.comb += self.write_csb[0].eq(0)
self.m.d.comb += self.write_addr[0].eq(address)
self.m.d.comb += self.write_din[0].eq(self.read_dout[0])
self.m.d.comb += self.write_din[0].way(way).eq(data)
def write_input(self, way, offset, data, wmask=None):
""" Add input data to write request to SRAM. """
# NOTE: These switch statements are written manually (not only with
# word_select) because word_select fails to generate correct case
# statements if offset calculation is a bit complex.
# If way is a signal, use case statements
if isinstance(way, cache_signal):
with self.m.Switch(way):
for way_idx in range(sram_instance.num_ways):
with self.m.Case(way_idx):
# Offset is used to find the word
with self.m.Switch(offset):
for word_idx in range(sram_instance.words_per_line):
with self.m.Case(word_idx):
# Write the word over the write mask
for mask_idx in range(sram_instance.num_masks):
with self.m.If(wmask[mask_idx]):
self.m.d.comb += self.write_din[way_idx].mask(mask_idx, word_idx).eq(data.mask(mask_idx))
if not sram_instance.num_masks:
self.m.d.comb += self.write_din[way_idx].word(word_idx).eq(data)
# If way is a constant, use it directly
else:
# Offset is used to find the word
with self.m.Switch(offset):
for word_idx in range(sram_instance.words_per_line):
with self.m.Case(word_idx):
# Write the word over the write mask
for mask_idx in range(sram_instance.num_masks):
with self.m.If(wmask[mask_idx]):
self.m.d.comb += self.write_din[way].mask(mask_idx, word_idx).eq(data.mask(mask_idx))
if not sram_instance.num_masks:
self.m.d.comb += self.write_din[way].word(word_idx).eq(data) | StarcoderdataPython |
9658826 | <filename>core/arxiv/submission/services/compiler/__init__.py
"""Integration with the compiler service API."""
from .compiler import Compiler, get_task_id, split_task_id, CompilationFailed
| StarcoderdataPython |
196651 | <reponame>locationlabs/ansible-action-plugins
from ansible.runner.return_data import ReturnData
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
self.runner.run_once = True
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
return ReturnData(conn=conn,
comm_ok=True,
result=dict(failed=False, changed=False, msg="YOLO"))
| StarcoderdataPython |
11264786 | <gh_stars>10-100
from ray.rllib.env.remote_base_env import RemoteBaseEnv
from ray.rllib.utils.deprecation import deprecation_warning
deprecation_warning(
old="rllib.env.remote_base_env.RemoteVectorEnv",
new="ray.rllib.env.remote_base_env.RemoteBaseEnv",
error=False)
RemoteVectorEnv = RemoteBaseEnv
| StarcoderdataPython |
6434640 | <reponame>nijatrajab/fileshare<filename>fileshare/fileup/urls.py
from django.urls import path
from . import views
app_name = 'fileup'
urlpatterns = [
# path('', views.FileListView.as_view(), name='list'),
path('file/<user_id>/', views.user_file, name='filelist'),
path('upload/', views.FileUploadView.as_view(), name='upload'),
path('detail/<int:pk>/', views.FileDetailView.as_view(), name='detail'),
path('delete/<int:pk>/', views.FileDeleteView.as_view(), name='delete'),
path('delete-multi/', views.BulkDeleteView.as_view(), name='delete-multi'),
path('update/<int:pk>/', views.FileUpdateView.as_view(), name='update'),
path('shared/', views.FileSharedListView.as_view(), name='shared'),
path('share/<id>/', views.share_file, name='share'),
path('revoke/<id>/', views.revoke_access, name='revoke'),
# path('revoke/', views.ra, name='revoke'),
path('adminpage/', views.AdminPage.as_view(), name='adminpage'),
]
| StarcoderdataPython |
4828034 | <gh_stars>0
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.types import DoubleType
from pyspark.sql.types import StructField
from pyspark.sql.types import StructType
from pyspark.sql.types import TimestampType
from pyspark.sql.functions import col
from pyspark.sql import DataFrame
from pyspark.sql.functions import window
from pyspark.sql.functions import expr
from pyspark.sql.functions import hour
from pyspark.sql.functions import date_trunc
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("beamformedFiltering") \
.getOrCreate()
# Create DataFrame representing the stream of CSVs
# We will define the schema based on the metadata
# The last 3 entries consist of a the time in second from the start of the observation, the timestamp, and the timestamp with seconds and smaller time units dropped.
variableNames = ["V"+str(i) for i in range(0,960)]
beamformedFieldTypes = [StructField(v, DoubleType(), False) for v in variableNames]
beamformedFieldTypes.append(StructField("secondAfterMeasurement", DoubleType(), False))
beamformedFieldTypes.append(StructField("beamformedTimestamp", TimestampType(), False))
#beamformedFieldTypes.append(StructField("hourlyBeamformedTimestamp", TimestampType(), False))
beamformedSchema = StructType(beamformedFieldTypes)
beamformedDF = spark \
.readStream \
.option("sep", ",") \
.option("header", "true") \
.schema(beamformedSchema) \
.csv("/opt/spark-data/beamformed") \
.withWatermark("beamformedTimestamp", "5 seconds")
def foreach_write(df, epoch_id):
dataDF = df.select(variableNames).toPandas()
bfTimestamp = df.select("beamformedTimestamp").toPandas()
bfSecondsAfterMeasurement = df.select("secondAfterMeasurement").toPandas()
writeColumns = variableNames + ["beamformedTimestamp"]
median = dataDF.median()
scaledDF = dataDF.divide(median)
scaledDF["secondAfterMeasurement"] = bfSecondsAfterMeasurement
scaledDF["beamformedTimestamp"] = bfTimestamp
scaledDF = scaledDF.sort_values("secondAfterMeasurement")
scaledDF.to_csv("/opt/spark-results/median_scaled_data/scaled_data" + str(epoch_id) + ".csv", header=True, index=False, columns=writeColumns)
median.to_frame().T.to_csv("/opt/spark-results/medians/median" + str(epoch_id) + ".csv", header=True, index=False)
query = beamformedDF.writeStream.foreachBatch(foreach_write).start()
query.awaitTermination()
| StarcoderdataPython |
9772063 | from django.core.wsgi import get_wsgi_application
import wsgiserver
def start_server( address="0.0.0.0", port=8080):
"""
server = wsgiserver.WSGIServer(
(address, port), # Use '127.0.0.1' to only bind to the localhost
get_wsgi_application()
)
"""
server = wsgiserver.WSGIServer(get_wsgi_application(), host=address, port=port)
try:
server.start()
except KeyboardInterrupt:
print('Stopping server')
server.stop() | StarcoderdataPython |
9645300 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# title : templates.py
# description : Contains templates for different export types.
# author : <NAME>
# date : 2017.08.17
# version : 0.1
# ==================================================================
CSS = ""
HTML = u"<!DOCTYPE html><html lang='en'><head><meta charset='UTF-8'><meta name='viewport' content='width=device-width, initial-scale=1.0'><link href='https://fonts.googleapis.com/css?family=Montserrat:200,300,700' rel='stylesheet'><link rel='stylesheet' type='text/css' href='https://cdn.rawgit.com/synesenom/whiteprint/master/wp.min.css'><style>{}</style><title>{}</title></head><body><input type='checkbox' id='menu'><label for='menu' id='open'>☰</label><aside><div class='logo'>{}</div><nav><div>{}</div></nav></aside><main>{}<label for='menu' id='exit'></label></main></body></html>"
def html(name, menu, content, style=""):
return HTML.encode('utf-8').format(CSS+style, name, name, menu, content) | StarcoderdataPython |
35407 | from ..fuzzable import Fuzzable
class Simple(Fuzzable):
"""Simple bytes value with manually specified fuzz values only.
:type name: str, optional
:param name: Name, for referencing later. Names should always be provided, but if not, a default name will be given,
defaults to None
:type default_value: Raw, optional
:param default_value: Raw static data
:type fuzz_values: list, optional
:param fuzz_values: List of fuzz values, defaults to None. If empty, Simple is equivalent to Static.
:type fuzzable: bool, optional
:param fuzzable: Enable/disable fuzzing of this primitive, defaults to true
"""
def __init__(self, name=None, default_value=None, fuzz_values=None, *args, **kwargs):
super(Simple, self).__init__(name=name, default_value=default_value, fuzz_values=fuzz_values, *args, **kwargs)
| StarcoderdataPython |
1687334 | from dbArtistsBase import dbArtistsBase
from dbBase import dbBase
from artistMB import artistMB
from discogsUtils import musicbrainzUtils
import urllib
from urllib.parse import quote
from webUtils import getHTML
from fsUtils import isFile, setFile
from multiArtist import multiartist
from ioUtils import getFile
from time import sleep
from hashlib import md5
##################################################################################################################
# Base Class
##################################################################################################################
class dbArtistsMusicBrainz(dbArtistsBase):
def __init__(self, debug=False):
self.db = "MusicBrainz"
self.disc = dbBase(self.db.lower())
self.artist = artistMB(self.disc)
self.dutils = musicbrainzUtils()
self.debug = debug
self.baseURL = "https://musicbrainz.org/"
self.searchURL = "https://musicbrainz.org/search?"
super().__init__(self.db, self.disc, self.artist, self.dutils, debug=debug)
##################################################################################################################
# Artist URL
##################################################################################################################
def getArtistURL(self, artistRef, page=1):
baseURL = self.baseURL
url = urllib.parse.urljoin(baseURL, artistRef)
if isinstance(page, int) and page > 1:
pageURL = "?page={0}".format(page)
url = "{0}{1}".format(url, pageURL)
return url
##################################################################################################################
# Search Functions
##################################################################################################################
def parseSearchArtist(self, artist, data, force=False):
if data is None:
return None
## Parse data
bsdata = getHTML(data)
artistDB = {}
tables = bsdata.findAll("table")
for table in tables:
ths = table.findAll("th")
headers = [x.text for x in ths]
trs = table.findAll("tr")
for tr in trs[1:]:
tds = tr.findAll("td")
name = tds[0].find('a').text
href = tds[0].find('a').attrs['href']
if artistDB.get(href) is None:
artistDB[href] = {"N": 0, "Name": name}
artistDB[href]["N"] += 1
if self.debug:
print("Found {0} artists".format(len(artistDB)))
iArtist = 0
iDown = 0
for href, hrefData in artistDB.items():
if iDown > 20:
break
iArtist += 1
discID = self.dutils.getArtistID(href)
uuid = href.split('/')[-1]
m = md5()
for val in uuid.split("-"):
m.update(val.encode('utf-8'))
hashval = m.hexdigest()
discID = str(int(hashval, 16))
url = self.getArtistURL(href)
savename = self.getArtistSavename(discID)
print(iArtist,'/',len(artistDB),'\t:',discID,'\t',url)
if isFile(savename):
if force is False:
continue
iDown += 1
self.downloadArtistURL(url, savename, force=force)
def getSearchArtistURL(self, artist):
baseURL = self.baseURL
extra = "search?query={0}&type=artist&limit=100&method=indexed".format(quote(artist))
url = urllib.parse.urljoin(baseURL, extra)
return url
def searchForArtist(self, artist, force=False):
print("\n\n===================== Searching For {0} =====================".format(artist))
url = self.getSearchArtistURL(artist)
if url is None:
raise ValueError("URL is None!")
## Download data
data, response = self.downloadURL(url)
if response != 200:
print("Error downloading {0}".format(url))
return False
self.parseSearchArtist(artist, data, force)
##################################################################################################################
# Extra Data
##################################################################################################################
def artistIgnoreList(self):
ignores = ["Downloads", "Various Artists"]
ignores += ["Glee", "Disney", "Sesame Street", "Nashville Cast"]
ignores += ["Various Artists", "Vários intérpretes", "Various Interprets"]
ignores += ["original score", "Downloads", "Glee Cast", "Sound Ideas", "Rain Sounds"]
ignores += ["101 Strings", "TBS RADIO 954kHz", "<NAME> ASOT Radio", "Piano Tribute Players"]
ignores += ["Yoga Music", "GTA San Andreas"]
return ignores
def assertDBModValExtraData(self, modVal, maxPages=None, test=True):
mulArts = multiartist()
print("assertDBModValExtraData(",modVal,")")
artistDBDir = self.disc.getArtistsDBDir()
dbname = setFile(artistDBDir, "{0}-DB.p".format(modVal))
dbdata = getFile(dbname)
nerrs = 0
ignores = self.artistIgnoreList()
for artistID,artistData in dbdata.items():
pages = artistData.pages
if pages.more is True:
npages = pages.pages
if maxPages is not None:
npages = min([npages, maxPages])
artistRef = artistData.url.url
print(artistID,'\t',artistData.artist.name)
if artistData.artist.name in ignores:
print("\tNot downloading artist in ignore list: {0}".format(artistData.artist.name))
continue
for p in range(2, npages+1):
url = self.getArtistURL(artistRef, p)
savename = self.getArtistSavename(artistID, p)
print(artistID,'\t',url,'\t',savename)
print("\t---> {0} / {1}".format(p, npages))
if test is True:
print("\t\tWill download: {0}".format(url))
print("\t\tJust testing... Will not download anything.")
continue
if not isFile(savename):
self.downloadArtistURL(url=url, savename=savename, force=True)
sleep(3) | StarcoderdataPython |
4829994 | <gh_stars>0
from coup import app
from coup.socketio_handlers import run_socketio
from coup.utils.argument_parsing import parse_args
if __name__ == "__main__":
parsed_args = parse_args()
keep_client_order = parsed_args["keep_client_order"]
run_socketio(app, '0.0.0.0', keep_client_order, parsed_args) | StarcoderdataPython |
5075741 | <gh_stars>100-1000
class FrameBuffer:
pass
| StarcoderdataPython |
1832033 | from indra.java_vm import autoclass
from indra.biopax import biopax_api
from indra.pysb_assembler import PysbAssembler
def test_hyphenated_agent_names():
"""This query should contain reactions with agent names RAF1-BRAF,
which need to be canonicalized to Python-compatible names before
model assembly."""
bp = biopax_api.process_pc_neighborhood(['BRAF'])
bp.get_phosphorylation()
pa = PysbAssembler()
pa.add_statements(bp.statements)
pa.make_model()
def test_paxtools_autoclass():
autoclass('org.biopax.paxtools.impl.level3.ProteinImpl')
def test_biopaxpattern_autoclass():
autoclass('org.biopax.paxtools.pattern.PatternBox')
def test_cpath_autoclass():
autoclass('cpath.client.CPathClient')
| StarcoderdataPython |
1784700 | <reponame>Rmond/OperMge
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-19 17:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hd_ansible', '0003_software'),
]
operations = [
migrations.AddField(
model_name='sidinfo',
name='port',
field=models.CharField(default='', max_length=6),
preserve_default=False,
),
migrations.AddField(
model_name='sidinfo',
name='socketpath',
field=models.CharField(default='', max_length=64),
preserve_default=False,
),
]
| StarcoderdataPython |
6700238 | from datetime import date
anoNascimento = int(input('Digite o ano que vc nasceu...'))
atual = date.today().year
idade = atual - anoNascimento
print('==='*25)
if idade >= 18:
print('pode alistar')
elif idade < 18:
print('nao precisa alistar')
print('==='*25)
| StarcoderdataPython |
1763165 | <reponame>Nael-Nathanael/holocn-graduation<gh_stars>10-100
"""
E2E test for whether API messages from Backend and DOM messages displayed
on Frontend matches.
Checks native messages, JP messages, flag emojis, and username.
Exercises the following UI elements:
- Full site translate botan on NavBar
- Loading and displaying cards on the entire frontpage
"""
# Python Standard libs
import argparse
from pprint import pprint
import re
from typing import Dict
# 3rd party libs
from deepdiff import DeepDiff
import requests
from selenium import webdriver
NUM_ERRORS = 0
REGEX_N = re.compile(r"((\\n)|\s){1,5}")
MSG_CARD_HEAD = '//div[@id="root"]/main/section/div/div[5]/div/div/div['
MSG_CARD_TAIL = (
']/div/div/div/div[@class="message-card-text active-message"]/div')
FLAG_TAIL = ']/div/div/div[2]/span/img'
TRANSLATE_BOTAN = '//*[@id="root"]/header[1]/div[2]/button'
# TODO: Replace TEST_DIMENSIONS with "region" once aloe site uses region in db
REGION = "country"
TEST_DIMENSIONS = [REGION, "orig_msg", "tl_msg", "username"]
def test_messages(args):
driver = webdriver.Chrome(args.chromedriver_path)
driver.implicitly_wait(5)
driver.get(f"http://{args.website_url}")
api = requests.get(f"http://{args.backend_url}/api/messages")
api_json = api.json()
# Determines whether fields in the FE and BE are the same.
# Skips UI testing early if there's possible issues here.
api_keys = list(api_json["messages"][0].keys())
# messageID not used on the frontend
api_keys.pop(api_keys.index("messageID"))
api_dom_keys_diff = DeepDiff(
api_keys,
TEST_DIMENSIONS,
ignore_order=True)
assert not api_dom_keys_diff, (
f"Diff between backend and frontend keys {api_dom_keys_diff}")
driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
DOM_dict = get_message_cards_data(driver, api_json)
for message in api_json["messages"]:
message["tl_msg"] = unescape(message["tl_msg"])
message["orig_msg"] = unescape(message["orig_msg"])
global NUM_ERRORS
for message_index, dom_message in DOM_dict.items():
for dimension in TEST_DIMENSIONS:
diff = DeepDiff(
api_json["messages"][message_index][dimension],
dom_message[dimension])
if diff:
print(f"Diff detected in front end index {message_index + 1}")
pprint(diff)
NUM_ERRORS += 1
def get_native_messages(
driver, api_json, front_end_index, message_index) -> str:
native_msg_xpath = "{}{}{}".format(
MSG_CARD_HEAD,
front_end_index,
MSG_CARD_TAIL)
driver.execute_script(
"arguments[0].scrollIntoView();",
driver.find_element_by_xpath(native_msg_xpath))
while True:
if driver.find_element_by_xpath(native_msg_xpath).is_displayed():
return driver.find_element_by_xpath(native_msg_xpath).text
else:
raise ValueError(
"Some operation has gone wrong, front_end_index "
f"{front_end_index} for native msg does not exist on page")
def get_jp_messages(
driver, api_json, front_end_index, message_index) -> str:
jp_msg_xpath = "{}{}{}".format(
MSG_CARD_HEAD,
front_end_index,
MSG_CARD_TAIL)
if api_json["messages"][message_index]["tl_msg"]:
driver.execute_script(
"arguments[0].scrollIntoView();",
driver.find_element_by_xpath(jp_msg_xpath))
while True:
if driver.find_element_by_xpath(jp_msg_xpath).is_displayed():
return driver.find_element_by_xpath(jp_msg_xpath).text
return ""
def get_message_flags(
driver, api_json, front_end_index, message_index) -> str:
flag_xpath = "{}{}{}".format(
MSG_CARD_HEAD,
front_end_index,
FLAG_TAIL
)
if api_json["messages"][message_index][REGION]:
driver.execute_script(
"arguments[0].scrollIntoView();",
driver.find_element_by_xpath(flag_xpath))
while True:
if driver.find_element_by_xpath(flag_xpath).is_displayed():
return ("".join(
map(lambda x: chr(
ord(x) - 0x1F1A5), driver.find_element_by_xpath(
flag_xpath).get_attribute("alt"))))
return ""
def get_message_username(
driver, api_json, message_index) -> str:
return driver.find_elements_by_class_name(
"message-card-footer")[message_index].text
def get_message_cards_data(driver, api_json) -> Dict:
dom_dict = {}
for message in api_json["messages"]:
message_index = api_json["messages"].index(message)
front_end_index = message_index + 1
current_dict = {
REGION: get_message_flags(
driver, api_json, front_end_index, message_index),
"orig_msg": unescape(
get_native_messages(
driver, api_json, front_end_index, message_index)),
"tl_msg": "", # Populate after page completely Japanese
"username": get_message_username(
driver, api_json, message_index)
}
dom_dict[message_index] = current_dict
# TRANSLATE BOTAN GO!
driver.find_element_by_xpath(TRANSLATE_BOTAN).click()
# Not part of the original loop to save time since all cards are
# translated using the site-wide translate botan
for message in api_json["messages"]:
message_index = api_json["messages"].index(message)
front_end_index = message_index + 1
dom_dict[message_index]['tl_msg'] = unescape(
get_jp_messages(
driver, api_json, front_end_index, message_index))
return dom_dict
def unescape(in_str):
"""Unicode-unescape string with only some characters escaped."""
# ideographic space skip, it doesn't work for some reason :(
in_str = in_str.replace('\\u3000', " ")
in_str = REGEX_N.sub(" ", in_str)
in_str = in_str.encode('unicode-escape') # bytes with all chars escaped (the original escapes have the backslash escaped)
in_str = in_str.replace(b'\\\\u', b'\\u') # unescape the \
in_str = in_str.decode('unicode-escape') # unescape unicode
in_str = in_str.rstrip()
return in_str
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Arg parser for chromedriver path and website '
'url.')
parser.add_argument(
'--chromedriver_path', '-c', dest='chromedriver_path',
default='/chromedriver.exe',
help='the path to your chromedriver')
parser.add_argument(
'--website_url', '-w', dest='website_url',
default='manotomo.tk',
help='the website that this test will use, do not include '
'a slash at the end, for example manotomo.tk is '
'good, but manotomo.tk/ or '
'manotomo.tk/api/messages is bad.'
)
parser.add_argument(
'--backend_url', '-b', dest='backend_url',
default='manotomo.tk',
help='URL for the backend. Used for local testing.'
)
args = parser.parse_args()
test_messages(args)
print(NUM_ERRORS)
| StarcoderdataPython |
9614723 | <filename>python-pipelines/setup.py
import setuptools
setuptools.setup(
author="pabs",
author_email="<EMAIL>",
name="pipeline-xlang",
url="https://github.com/prodriguezdefino/dataflow-xlang-pipelines",
packages=setuptools.find_packages()) | StarcoderdataPython |
1700875 | <gh_stars>1000+
from .engine import Event, EventEngine, EVENT_TIMER
| StarcoderdataPython |
1681915 | <filename>tests/gsl/pdf_dirichlet.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME>
# orthologue
# (c) 1998-2019 all rights reserved
#
"""
Exercise the gaussian pdf
"""
def test():
# math
from math import pi, sqrt, exp
# access the package
import gsl
# build a random number generator
rng = gsl.rng()
# the order of the distribution
K = 10
# the weight vectors
α = gsl.vector(shape=K).fill(K**(-1/2))
# build a gaussian distribution
dirichlet = gsl.pdf.dirichlet(alpha=α, rng=rng)
# make a vector
v = gsl.vector(shape=K)
# fill it with random numbers
v.random(pdf=dirichlet)
# make a matrix
m = gsl.matrix(shape=(50, K))
# fill it with random numbers
m.random(pdf=dirichlet)
return dirichlet
# main
if __name__ == "__main__":
test()
# end of file
| StarcoderdataPython |
247114 | # ПРИМЕР ЧТЕНИЯ ТЕКУЩЕЙ ПОЗИЦИИ ВАЛА ЭНКОДЕРА:
# Зная точную позицию Вала энкодера, работа
# с модулем похожа на работу с потенциометром.
from time import sleep
# Подключаем библиотеку для работы с энкодером I2C-flash.
from pyiArduinoI2Cencoder import *
# Инстанцируем объект, указывая адрес модуля на шине I2C.
enc = pyiArduinoI2Cencoder(0x09)
# Указываем считать до 2 полных оборотов.
enc.setPosSettings(2)
print("Вращайте вал!")
while True:
p = enc.getPosition()
# Выводим текущую позицию вала энкодера.
print(p)
sleep(.5)
| StarcoderdataPython |
1693307 | <reponame>gregaw/mapel
import numpy as np
from mapel.main.embedding.kamada_kawai.energy_functions import get_energy_dx, get_energy_dy, get_energy_dx_dx, \
get_energy_dx_dy, get_energy_dy_dx, get_energy_dy_dy
def optimize_bb(func, grad_func, args, x0, max_iter, init_step_size, stop_energy_val=None,
max_iter_without_improvement=8000):
if isinstance(init_step_size, float):
init_step_size = [init_step_size, init_step_size]
init_step_size = np.asarray(init_step_size)
is_2d = len(x0.shape) == 2
prev_x = x0.copy()
x = x0.copy()
prev_grad = grad_func(prev_x, *args)
min_energy = 1e15
min_energy_snap = x0.copy()
min_energy_iter = 0
for i in range(max_iter):
current_energy = func(x, *args)
if current_energy < min_energy:
min_energy = current_energy
min_energy_snap = x.copy()
min_energy_iter = i
elif i - min_energy_iter > max_iter_without_improvement:
return min_energy_snap
print(f'Energy: {current_energy}: {min_energy}, grad norm: {np.linalg.norm(prev_grad)} {i}')
if stop_energy_val is not None and current_energy < stop_energy_val:
return min_energy_snap
s = x - prev_x
g = grad_func(x, *args)
y = g - prev_grad
if i > 0:
denominator = abs(np.tensordot(s, y, [0, 0]))
if is_2d:
denominator = denominator.diagonal()
step_size = np.linalg.norm(s, axis=0) ** 2 / denominator
else:
step_size = init_step_size
prev_grad = g
prev_x = x
x = x - step_size * g
return min_energy_snap
def _get_delta_energy(positions, k, l, x, y):
return np.sqrt(get_energy_dx(x, y, k, l, positions) ** 2 + get_energy_dy(x, y, k, l, positions) ** 2)
def _get_pos_k_l_x_y_for_i(positions, k, l, i):
my_k = np.delete(k[i, :], i)
my_l = np.delete(l[i, :], i)
my_positions = np.delete(positions, i, axis=0)
my_x = positions[i, 0]
my_y = positions[i, 1]
return my_positions, my_k, my_l, my_x, my_y
def _optimize_newton(positions, k, l, i, eps=1e-10):
positions, k, l, x, y = _get_pos_k_l_x_y_for_i(positions, k, l, i)
delta = _get_delta_energy(positions, k, l, x, y)
i = 0
while delta > eps:
a1 = get_energy_dx_dx(x, y, k, l, positions)
b1 = get_energy_dx_dy(x, y, k, l, positions)
c1 = -get_energy_dx(x, y, k, l, positions)
a2 = get_energy_dy_dx(x, y, k, l, positions)
b2 = get_energy_dy_dy(x, y, k, l, positions)
c2 = -get_energy_dy(x, y, k, l, positions)
dx, dy = np.linalg.solve([[a1, b1], [a2, b2]], [c1, c2])
x += dx
y += dy
if i > 1e4:
return (x, y), False
delta = _get_delta_energy(positions, k, l, x, y)
i += 1
return (x, y), True
def adam(
fun,
jac,
x0,
args=(),
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
eps=1e-8,
startiter=0,
maxiter=1000,
callback=None,
**kwargs
):
"""``scipy.optimize.minimize`` compatible implementation of ADAM -
[http://arxiv.org/pdf/1412.6980.pdf].
Adapted from ``autograd/misc/optimizers.py``.
"""
x = x0
m = np.zeros_like(x)
v = np.zeros_like(x)
best_energy = fun(x, *args), x
for i in range(startiter, startiter + maxiter):
energy = fun(x, *args)
if energy < best_energy[0]:
best_energy = energy, np.copy(x)
g = jac(x, *args)
if callback and callback(x):
break
m = (1 - beta1) * g + beta1 * m # first moment estimate.
v = (1 - beta2) * (g ** 2) + beta2 * v # second moment estimate.
mhat = m / (1 - beta1 ** (i + 1)) # bias correction.
vhat = v / (1 - beta2 ** (i + 1))
x = x - learning_rate * mhat / (np.sqrt(vhat) + eps)
if i == (startiter + maxiter) // 2:
learning_rate /= 2
return best_energy[1]
| StarcoderdataPython |
4969480 | # --------------------------------------------------------
# Model from official source: https://github.com/ShoufaChen/CycleMLP
# --------------------------------------------------------
import math
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.features import FeatureInfo
from timm.models.layers import DropPath, trunc_normal_
from timm.models.layers.helpers import to_2tuple
from timm.models.registry import register_model
from torch import Tensor
from torch.nn import init
from torch.nn.modules.utils import _pair
from torchvision.ops.deform_conv import deform_conv2d as deform_conv2d_tv
from .helpers import timmextension_build_model_with_cfg
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'input_size': (3, 224, 224),
'pool_size': None,
'crop_pct': .96,
'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'classifier': 'head',
**kwargs
}
default_cfgs = {
'cyclemlp_b1':
_cfg(
url=
'https://github.com/ShoufaChen/CycleMLP/releases/download/v0.1/CycleMLP_B1.pth', # noqa
crop_pct=0.9),
'cyclemlp_b2':
_cfg(
url=
'https://github.com/ShoufaChen/CycleMLP/releases/download/v0.1/CycleMLP_B2.pth', # noqa
crop_pct=0.9),
'cyclemlp_b3':
_cfg(
url=
'https://github.com/ShoufaChen/CycleMLP/releases/download/v0.1/CycleMLP_B3.pth', # noqa
crop_pct=0.9),
'cyclemlp_b4':
_cfg(
url=
'https://github.com/ShoufaChen/CycleMLP/releases/download/v0.1/CycleMLP_B4.pth', # noqa
crop_pct=0.875),
'cyclemlp_b5':
_cfg(
url=
'https://github.com/ShoufaChen/CycleMLP/releases/download/v0.1/CycleMLP_B5.pth', # noqa
crop_pct=0.875),
}
class Mlp(nn.Module):
def __init__(self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CycleFC(nn.Module):
""""""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
):
super(CycleFC, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
if stride != 1:
raise ValueError('stride must be 1')
if padding != 0:
raise ValueError('padding must be 0')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.weight = nn.Parameter(
torch.empty(out_channels, in_channels // groups, 1,
1)) # kernel size == 1
if bias:
self.bias = nn.Parameter(torch.empty(out_channels))
else:
self.register_parameter('bias', None)
self.register_buffer('offset', self.gen_offset())
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def gen_offset(self):
"""offset (
Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width,
out_height, out_width]): offsets to be applied for each position in the
convolution kernel.
"""
offset = torch.empty(1, self.in_channels * 2, 1, 1)
start_idx = (self.kernel_size[0] * self.kernel_size[1]) // 2
assert self.kernel_size[0] == 1 or self.kernel_size[
1] == 1, self.kernel_size
for i in range(self.in_channels):
if self.kernel_size[0] == 1:
offset[0, 2 * i + 0, 0, 0] = 0
offset[0, 2 * i + 1, 0,
0] = (i + start_idx) % self.kernel_size[1] - (
self.kernel_size[1] // 2)
else:
offset[0, 2 * i + 0, 0,
0] = (i + start_idx) % self.kernel_size[0] - (
self.kernel_size[0] // 2)
offset[0, 2 * i + 1, 0, 0] = 0
return offset
def forward(self, input: Tensor) -> Tensor:
"""
Args:
input (Tensor[batch_size, in_channels, in_height, in_width]):
input tensor
"""
B, C, H, W = input.size()
return deform_conv2d_tv(input,
self.offset.expand(B, -1, H, W),
self.weight,
self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation)
def extra_repr(self) -> str:
s = self.__class__.__name__ + '('
s += '{in_channels}'
s += ', {out_channels}'
s += ', kernel_size={kernel_size}'
s += ', stride={stride}'
s += ', padding={padding}' if self.padding != (0, 0) else ''
s += ', dilation={dilation}' if self.dilation != (1, 1) else ''
s += ', groups={groups}' if self.groups != 1 else ''
s += ', bias=False' if self.bias is None else ''
s += ')'
return s.format(**self.__dict__)
class CycleMLP(nn.Module):
def __init__(self,
dim,
qkv_bias=False,
qk_scale=None,
attn_drop=0.,
proj_drop=0.):
super().__init__()
self.mlp_c = nn.Linear(dim, dim, bias=qkv_bias)
self.sfc_h = CycleFC(dim, dim, (1, 3), 1, 0)
self.sfc_w = CycleFC(dim, dim, (3, 1), 1, 0)
self.reweight = Mlp(dim, dim // 4, dim * 3)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, H, W, C = x.shape
h = self.sfc_h(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
w = self.sfc_w(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
c = self.mlp_c(x)
a = (h + w + c).permute(0, 3, 1, 2).flatten(2).mean(2)
a = self.reweight(a).reshape(B, C, 3).permute(
2, 0, 1).softmax(dim=0).unsqueeze(2).unsqueeze(2)
x = h * a[0] + w * a[1] + c * a[2]
x = self.proj(x)
x = self.proj_drop(x)
return x
class CycleBlock(nn.Module):
def __init__(self,
dim,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
skip_lam=1.0,
mlp_fn=CycleMLP):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = mlp_fn(dim,
qkv_bias=qkv_bias,
qk_scale=None,
attn_drop=attn_drop)
# NOTE: drop path for stochastic depth,
# we shall see if this is better than dropout here
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer)
self.skip_lam = skip_lam
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x))) / self.skip_lam
x = x + self.drop_path(self.mlp(self.norm2(x))) / self.skip_lam
return x
class PatchEmbedOverlapping(nn.Module):
"""2D Image to Patch Embedding with overlapping."""
def __init__(self,
patch_size=16,
stride=16,
padding=0,
in_chans=3,
embed_dim=768,
norm_layer=None,
groups=1):
super().__init__()
patch_size = to_2tuple(patch_size)
stride = to_2tuple(stride)
padding = to_2tuple(padding)
self.patch_size = patch_size
# remove image_size in model init to support dynamic image size
self.proj = nn.Conv2d(in_chans,
embed_dim,
kernel_size=patch_size,
stride=stride,
padding=padding,
groups=groups)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
x = self.proj(x)
return x
class Downsample(nn.Module):
"""Downsample transition stage."""
def __init__(self, in_embed_dim, out_embed_dim, patch_size):
super().__init__()
assert patch_size == 2, patch_size
self.proj = nn.Conv2d(in_embed_dim,
out_embed_dim,
kernel_size=(3, 3),
stride=(2, 2),
padding=1)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
x = self.proj(x) # B, C, H, W
x = x.permute(0, 2, 3, 1)
return x
def basic_blocks(dim,
index,
layers,
mlp_ratio=3.,
qkv_bias=False,
qk_scale=None,
attn_drop=0.,
drop_path_rate=0.,
skip_lam=1.0,
mlp_fn=CycleMLP,
**kwargs):
blocks = []
for block_idx in range(layers[index]):
block_dpr = drop_path_rate * (
block_idx + # noqa
sum(layers[:index])) / (sum(layers) - 1)
blocks.append(
CycleBlock(dim,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
drop_path=block_dpr,
skip_lam=skip_lam,
mlp_fn=mlp_fn))
blocks = nn.Sequential(*blocks)
return blocks
class CycleNet(nn.Module):
"""CycleMLP Network."""
def __init__(self,
layers,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dims=None,
transitions=None,
segment_dim=None,
mlp_ratios=None,
skip_lam=1.0,
qkv_bias=False,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=nn.LayerNorm,
mlp_fn=CycleMLP,
features_only=False,
out_indices=(0, 1, 2, 3)):
super().__init__()
if not features_only:
self.num_classes = num_classes
self.features_only = features_only
self.feature_info_list = [
{
'num_chs': embed_dims[0],
'reduction': 4,
'module': None
},
{
'num_chs': embed_dims[1],
'reduction': 8,
'module': None
},
{
'num_chs': embed_dims[2],
'reduction': 16,
'module': None
},
{
'num_chs': embed_dims[3],
'reduction': 32,
'module': None
},
]
self.feature_info = FeatureInfo(self.feature_info_list, out_indices)
self.out_indices_mapper = {
0: 0,
1: 2,
2: 4,
3: 6,
}
self.out_indices = [self.out_indices_mapper[o] for o in out_indices]
self.patch_embed = PatchEmbedOverlapping(patch_size=7,
stride=4,
padding=2,
in_chans=3,
embed_dim=embed_dims[0])
network = []
for i in range(len(layers)):
stage = basic_blocks(embed_dims[i],
i,
layers,
mlp_ratio=mlp_ratios[i],
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop_rate,
drop_path_rate=drop_path_rate,
norm_layer=norm_layer,
skip_lam=skip_lam,
mlp_fn=mlp_fn)
network.append(stage)
if i >= len(layers) - 1:
break
if transitions[i] or embed_dims[i] != embed_dims[i + 1]:
patch_size = 2 if transitions[i] else 1
network.append(
Downsample(embed_dims[i], embed_dims[i + 1], patch_size))
self.network = nn.ModuleList(network)
self.norm = norm_layer(embed_dims[-1])
self.head = nn.Linear(
embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
self.apply(self.cls_init_weights)
def cls_init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, CycleFC):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def set_features_only(self, out_indices):
self.features_only = True
self.out_indices = [self.out_indices_mapper[o] for o in out_indices]
self.feature_info = FeatureInfo(self.feature_info_list, out_indices)
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(
self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_embeddings(self, x):
x = self.patch_embed(x)
# B,C,H,W-> B,H,W,C
x = x.permute(0, 2, 3, 1)
return x
def forward_tokens(self, x):
outs = []
for idx, block in enumerate(self.network):
x = block(x)
if self.features_only and idx in self.out_indices:
outs.append(x.permute(0, 3, 1, 2).contiguous())
if self.features_only:
return outs
B, H, W, C = x.shape
x = x.reshape(B, -1, C)
return x
def forward_features(self, x):
x = self.forward_embeddings(x)
# B, H, W, C -> B, N, C
x = self.forward_tokens(x)
if self.features_only:
return x
x = self.norm(x)
x = x.mean(1)
return x
def forward(self, x):
x = self.forward_features(x)
if self.features_only:
return x
cls_out = self.head(x)
return cls_out
def _filter_fn(state_dict):
state_dict = state_dict['model']
return state_dict
def _cyclemlp(arch, pretrained, **kwargs):
model = timmextension_build_model_with_cfg(CycleNet,
arch,
pretrained=pretrained,
default_cfg=default_cfgs[arch],
pretrained_filter_fn=_filter_fn,
**kwargs)
return model
@register_model
def cyclemlp_b1(pretrained=False, **kwargs):
transitions = [True, True, True, True]
layers = [2, 2, 4, 2]
mlp_ratios = [4, 4, 4, 4]
embed_dims = [64, 128, 320, 512]
model = _cyclemlp('cyclemlp_b1',
pretrained,
layers=layers,
embed_dims=embed_dims,
patch_size=7,
transitions=transitions,
mlp_ratios=mlp_ratios,
mlp_fn=CycleMLP,
**kwargs)
return model
@register_model
def cyclemlp_b2(pretrained=False, **kwargs):
transitions = [True, True, True, True]
layers = [2, 3, 10, 3]
mlp_ratios = [4, 4, 4, 4]
embed_dims = [64, 128, 320, 512]
model = _cyclemlp('cyclemlp_b2',
pretrained,
layers=layers,
embed_dims=embed_dims,
patch_size=7,
transitions=transitions,
mlp_ratios=mlp_ratios,
mlp_fn=CycleMLP,
**kwargs)
return model
@register_model
def cyclemlp_b3(pretrained=False, **kwargs):
transitions = [True, True, True, True]
layers = [3, 4, 18, 3]
mlp_ratios = [8, 8, 4, 4]
embed_dims = [64, 128, 320, 512]
model = _cyclemlp('cyclemlp_b3',
pretrained,
layers=layers,
embed_dims=embed_dims,
patch_size=7,
transitions=transitions,
mlp_ratios=mlp_ratios,
mlp_fn=CycleMLP,
**kwargs)
return model
@register_model
def cyclemlp_b4(pretrained=False, **kwargs):
transitions = [True, True, True, True]
layers = [3, 8, 27, 3]
mlp_ratios = [8, 8, 4, 4]
embed_dims = [64, 128, 320, 512]
model = _cyclemlp('cyclemlp_b4',
pretrained,
layers=layers,
embed_dims=embed_dims,
patch_size=7,
transitions=transitions,
mlp_ratios=mlp_ratios,
mlp_fn=CycleMLP,
**kwargs)
return model
@register_model
def cyclemlp_b5(pretrained=False, **kwargs):
transitions = [True, True, True, True]
layers = [3, 4, 24, 3]
mlp_ratios = [4, 4, 4, 4]
embed_dims = [96, 192, 384, 768]
model = _cyclemlp('cyclemlp_b5',
pretrained,
layers=layers,
embed_dims=embed_dims,
patch_size=7,
transitions=transitions,
mlp_ratios=mlp_ratios,
mlp_fn=CycleMLP,
**kwargs)
return model
| StarcoderdataPython |
5178603 | from invoke import task
@task
def deploy(ctx):
ctx.run(
"gcloud functions deploy process_crash_reports --runtime python37 --project decoded-cove-239422 --trigger-http --allow-unauthenticated"
)
| StarcoderdataPython |
11345446 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 9 13:01:20 2021
@author: <NAME>
"""
class SpiceHandler:
import os
import subprocess
import time
import re
from Components import Component, Component2Pin, Capacitor, Resistor, BJT
measVRMS = ".meas %s+VRMS RMS V"
measVMAX = ".meas %s+VMAX MAX V"
measIRMS = ".meas %s+IRMS RMS I"
measIMAX = ".meas %s+IMAX MAX I"
measPAVG = ".meas %s+PAVG AVG V"
measPMAX = ".meas %s+PMAX MAX V"
regexMeasName = ".+(?=:)"
regexMeasValue = "(?<==).+?(?= )" #(?<==).+[^ ](?= )
def __init__(self, exeLocation, exeName, fileName):
if(fileName[-4:] == ".asc" ):
self.fileName = fileName
self.exeLocation = exeLocation
self.exeName = exeName
else:
print("Error in file extension")
self.listOfComponents = []
self.__createNetFile()
self.__createTxtFile()
self.__clearUnnecassaryFiles()
# create a initial call for asc file to auto create .net file.
# Wait a little bit, then, close LTSpice
def __createNetFile(self):
self.subprocess.Popen(self.exeLocation + self.exeName + " -Run " + self.fileName)
self.time.sleep(2)
self.os.system("taskkill /im " + self.exeName + " /F" )
# Create txt file using .net file
def __createTxtFile(self):
netFileName = self.fileName[:-3] + "net"
with open(netFileName, 'rb') as file: # Read in the file as binary data
self.spiceData = file.read().decode("ascii")
file.close()
txtFileName = self.fileName[:-3] + "txt"
with open(txtFileName, 'wb') as file: # Write a file as binary data
file.write(self.spiceData.encode("ascii"))
file.close()
print(txtFileName + " created")
# Delete by-product files
def __clearUnnecassaryFiles(self):
extensions = ["log", "op.raw", "raw"]
for e in extensions:
toDelete = self.fileName[:-3] + e
if self.os.path.exists(toDelete):
self.time.sleep(1)
self.os.remove(toDelete)
else:
print("File does not exist")
def getComponentList(self):
spiceLines = self.spiceData.splitlines()
spiceComponentLines = []
for l in spiceLines:
if(self.re.match("^[a-jA-Jl-zL-Z]", l)):
spiceComponentLines.append(l)
print("Number of all components: " + str(len(spiceComponentLines)))
#individual list of components
listResistors = []
listCapacitors = []
listBJTs = []
#seperate components into their own lists
for l in spiceComponentLines:
atoms = l.upper().split()
if(atoms[0].startswith("R")):
res = self.Resistor(atoms[0], atoms[1], atoms[2], atoms[3])
listResistors.append(res)
if(atoms[0].startswith("C")):
cap = self.Capacitor(atoms[0], atoms[1], atoms[2], atoms[3])
listCapacitors.append(cap)
if(atoms[0].startswith("Q")):
bjt = self.BJT(atoms[0], atoms[1], atoms[2], atoms[3], atoms[5])
listBJTs.append(bjt)
print("Number of resistors: " + str(len(listResistors)))
print("Number of capacitors: " + str(len(listCapacitors)))
print("Number of BJTs: " + str(len(listBJTs)))
self.listOfComponents.append(listResistors)
self.listOfComponents.append(listCapacitors)
self.listOfComponents.append(listBJTs)
return self.listOfComponents
def generateMeasCommand(self):
resistors = self.listOfComponents[0]
capacitors = self.listOfComponents[1]
bjts = self.listOfComponents[2]
measCMD = ""
for r in resistors:
measCMD += r.createMeasCommandVol(self.measVRMS)
measCMD += r.createMeasCommandVol(self.measVMAX)
measCMD += r.createMeasCommandCur(self.measIRMS)
measCMD += r.createMeasCommandCur(self.measIMAX)
measCMD += r.createMeasCommandPow(self.measPAVG)
measCMD += r.createMeasCommandPow(self.measPMAX)
for c in capacitors:
measCMD += c.createMeasCommandVol(self.measVRMS)
measCMD += c.createMeasCommandVol(self.measVMAX)
measCMD += c.createMeasCommandCur(self.measIRMS)
measCMD += c.createMeasCommandCur(self.measIMAX)
for b in bjts:
measCMD += b.createMeasCommandVol(self.measVRMS)
measCMD += b.createMeasCommandVol(self.measVMAX)
measCMD += b.createMeasCommandCur(self.measIRMS, "Q")
measCMD += b.createMeasCommandCur(self.measIMAX, "Q")
return measCMD
def createNewSimFile(self, measCommand):
splitData = self.spiceData.split(".backanno")
endOfFile = ".backanno \n.end"
endFile = splitData[0] + measCommand + endOfFile
self.measFileName = self.fileName[:-4] + "_meas.txt"
with open(self.measFileName, 'wb') as file: # Write a file as binary data
file.write(endFile.encode('ascii'))
file.close()
return self.measFileName
def runSimulation(self, name):
self.subprocess.call(self.exeLocation + self.exeName + " -b " + name)
def getSimulationResults(self):
name = self.measFileName[:-3] + "log"
with open(name, 'rb') as file: # Read in the file as binary data
measurements = file.read().decode('ascii')
file.close()
measurementLines = measurements.splitlines()[4:-19] #should check if all applies
for l in measurementLines:
measName = self.re.search(self.regexMeasName, l).group() #r5+vrms
mn = measName[:-5].upper().split("+") #R5
name = mn[0] #R5
measType = measName[-4:].upper() #VRMS
valueText = self.re.search(self.regexMeasValue, l).group() #0.651136
value = float(valueText)
for li in self.listOfComponents:
for co in li:
if(co.name == name):
if (co.numberOfNodes == 2):
co.addMeasurement("P1P2", measType, value)
elif(co.numberOfNodes == 3):
co.addMeasurement(measType[0]+mn[1], measType, value)
return self.listOfComponents
| StarcoderdataPython |
6614761 | <filename>strategies/bayes.py
import os
import math
import dbh_util as util
from sklearn.naive_bayes import GaussianNB
def predict(classifier, test, args, sargs_str, threshold=None):
preds = classifier.predict(test[0])
if threshold is not None:
preds = [1 if x >= threshold else 0 for x in preds]
return preds
def learn(train, dev, test, args, sargs_str):
return util.sklearn_wrapper(train, dev, test, GaussianNB()) | StarcoderdataPython |
5056519 | <filename>libs/coda/io/codec.py
'''Base classes for CODA codecs.'''
from abc import ABCMeta, abstractmethod
class EncodingError(Exception):
'''Base class for exceptions encountered during encoding or decoding.'''
def __init__(self, msg):
self.__msg = msg
def msg(self):
return self.__msg
class Encoder(metaclass=ABCMeta):
'''Interface class for encoders.'''
@abstractmethod
def addExtern(self, obj, index=None):
raise NotImplementedError()
return self
@abstractmethod
def fileBegin(self):
raise NotImplementedError()
return self
@abstractmethod
def fileEnd(self):
raise NotImplementedError()
return self
@abstractmethod
def writeSubtypeHeader(self, name, sid):
raise NotImplementedError()
return self
@abstractmethod
def writeFieldHeader(self, name, fid):
raise NotImplementedError()
return self
@abstractmethod
def writeBoolean(self, value):
raise NotImplementedError()
return self
@abstractmethod
def writeInteger(self, value):
raise NotImplementedError()
return self
@abstractmethod
def writeFixed16(self, value):
raise NotImplementedError()
return self
@abstractmethod
def writeFixed32(self, value):
raise NotImplementedError()
return self
@abstractmethod
def writeFixed64(self, value):
raise NotImplementedError()
return self
@abstractmethod
def writeFloat(self, value):
raise NotImplementedError()
return self
@abstractmethod
def writeDouble(self, value):
raise NotImplementedError()
return self
@abstractmethod
def writeString(self, value):
raise NotImplementedError()
return self
@abstractmethod
def writeBytes(self, value):
raise NotImplementedError()
return self
@abstractmethod
def writeBeginList(self, elementKind, length, fixed=False):
raise NotImplementedError()
return self
@abstractmethod
def writeEndList(self):
raise NotImplementedError()
return self
@abstractmethod
def writeBeginSet(self, elementKind, length, fixed=False):
raise NotImplementedError()
return self
@abstractmethod
def writeEndSet(self):
raise NotImplementedError()
return self
@abstractmethod
def writeBeginMap(self, keyKind, valueKind, length):
raise NotImplementedError()
return self
@abstractmethod
def writeEndMap(self):
raise NotImplementedError()
return self
@abstractmethod
def writeStruct(self, value):
raise NotImplementedError()
return self
class Decoder(metaclass=ABCMeta):
@abstractmethod
def decode(self, cls):
raise NotImplementedError()
pass
class AbstractEncoder(Encoder):
'''Abstract base class for encoders.'''
def __init__(self):
self.__nextSharedId = 1
self.__nextExternId = -1
self.__objectRefs = {}
self.__idsInUse = set()
def getNextSharedId(self):
'''Return the next unused shared object identifier.'''
value = self.__nextSharedId
assert value not in self.__idsInUse
self.__nextSharedId += 1
return value
def getNextExternId(self):
'''Return the next unused shared object identifier.'''
while self.__nextSharedId in self.__idsInUse:
self.__nextSharedId += 1
value = self.__nextSharedId
self.__nextSharedId += 1
return value
def addExtern(self, obj, index=None):
'''Add an object to the externs table.'''
if index is None:
index = self.getNextExternId()
elif index in self.__idsInUse:
raise EncodingError('Extern index {0} is already in use', index)
self.__idsInUse.add(index)
self.__objectRefs[id(obj)] = index
def addShared(self, obj):
'''Add an object to the shared object table. If the object has already
been added, it returns the table index of the existing entry;
Otherwise, the object is added to the table and None is returned.'''
sid = id(obj)
if sid in self.__objectRefs:
return self.__objectRefs[sid]
else:
index = self.getNextSharedId()
self.__idsInUse.add(index)
self.__objectRefs[sid] = index
#print("Object {0}: {1}".format(index, obj.descriptor().getFullName()))
return None
def getShared(self, obj):
'''Return the index of an object in the shared object table, or None if
the object is not in the table.'''
return self.__objectRefs.get(id(obj))
class AbstractDecoder(Decoder):
'''Abstract base class for decoders.'''
def __init__(self, typeRegistry=None):
self.__nextSharedId = 1
self.__nextExternId = -1
self.__objectRefs = {}
if typeRegistry:
self.__typeRegistry = typeRegistry
else:
from coda.runtime import typeregistry
self.__typeRegistry = typeregistry.TypeRegistry.INSTANCE
def getNextExternId(self):
'''Return the next unused shared object identifier.'''
while self.__nextExterndId in self.__objectRefs:
self.__nextExterndId -= 1
value = self.__nextExterndId
self.__nextExterndId -= 1
return value
def getNextSharedId(self):
'''Return the next unused shared object identifier.'''
value = self.__nextSharedId
self.__nextSharedId += 1
return value
def addExtern(self, obj, index=None):
'''Add an object to the externs table.'''
if index is None:
index = self.getNextExternId()
elif index in self.__objectRefs:
raise EncodingError('Extern index {0} is already in use', index)
self.__objectRefs[id(obj)] = index
return self
def addShared(self, obj):
'''Add an object to the shared object table..'''
index = self.getNextSharedId()
self.__objectRefs[index] = obj
return index
def getShared(self, index):
'''Return the shared object with the given id.'''
return self.__objectRefs[index]
@staticmethod
def isSubtype(self, st, base):
while st:
if st is base:
return True
st = st.getBaseType()
return False
| StarcoderdataPython |
228051 | import numpy as np
import mytorch.simple_core
from mytorch.simple_core import Variable
A = mytorch.simple_core.Square()
B = mytorch.simple_core.Exp()
C = mytorch.simple_core.Square()
# y = (e^{x^2})^2
# 계산 그래프의 연결 x -> A -> a -> B -> b -> C -> y
x = Variable(np.array(0.5))
a = A(x)
b = B(a)
y = C(b)
assert y.creator == C
assert y.creator.input == b
assert y.creator.input.creator == B
assert y.creator.input.creator.input == a
assert y.creator.input.creator.input.creator == A
assert y.creator.input.creator.input.creator.input == x
y.grad = np.array(1.0)
y.backward()
print(x.grad)
| StarcoderdataPython |
6699663 | #!/usr/bin/env python3
import platform
import subprocess
print ("WARNING! This is a work in progress script. It has been tested to work for Fedora and debian-based systems. \
There are individual operating system dependent scripts being called from this one. \
You can find them in setup directory. The script for windows is still not complete. \
You can use them as a starting point or as a reference. If you run into any errors while running this script, \
please comment with your issue on https://github.com/WikiEducationFoundation/WikiEduDashboard/issues/1709 \
Please upload your logs for installation with your issue reports. \
The logs can be found in the setup directory. If you can help improve this script, \
We would love your contributions.")
print ("Please install ruby-2.7.1 before running this script.")
def deb_setup():
print ("Your system is found to be debian-based.")
subprocess.run("sudo chmod 775 setup/deb-setup.sh && setup/deb-setup.sh",
shell=True, check=True)
def dnf_setup():
print("Your system is found to be Fedora")
subprocess.run("sudo chmod 775 setup/dnf-setup.sh && setup/dnf-setup.sh",
shell=True, check=True)
def win_setup():
print ("Your system is found to be Windows")
subprocess.run("runas /user:Administrator \"setup\win-setup.bat\"",
shell=True, check=True)
def osx_setup():
print ("Your system is found to be OSX")
subprocess.run("sudo chmod 775 setup/macOS-setup.sh && setup/macOS-setup.sh",
shell=True, check=True)
if platform.platform().lower().find('ubuntu') != -1 \
or platform.platform().lower().find('debian') != -1 \
or platform.platform().lower().find('elementary') != -1:
deb_setup()
elif platform.platform().lower().find('fedora') != -1:
dnf_setup()
elif platform.platform().lower().find('darwin') != -1 \
or platform.platform().lower().find('mac') != -1:
osx_setup()
elif platform.platform().lower().find('windows') != -1:
win_setup()
else:
print ("Sorry! Your operating is not supported by this script. Please refer \
https://github.com/WikiEducationFoundation/WikiEduDashboard/blob/master/ \
docs/setup.md for manual setup instructions.")
| StarcoderdataPython |
6700255 | <filename>middleware/auth.py
from flask import request, redirect
from core.middleware import Middleware
from redis_model import Session
class AuthMiddleware(Middleware):
def pre_check(self, *args, **kwargs):
access_token = request.headers.get("Authorization")
session = Session(access_token)
session.load()
username = session.username
return username is not None
def default(self):
return redirect("/")
| StarcoderdataPython |
6495400 | <filename>app.py
import dash
import dash_bootstrap_components as dbc
#external_stylesheets=[dbc.themes.SUPERHERO]
external_stylesheets=[dbc.themes.GRID]
app = dash.Dash(__name__,external_stylesheets = external_stylesheets, suppress_callback_exceptions=True)
| StarcoderdataPython |
5189299 | <reponame>hiway/python-zentropi
# coding=utf-8
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -mzentropi` python will execute
``__main__.py`` as a script. That means there won't be any
``zentropi.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``zentropi.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import os
import click
from cookiecutter.main import cookiecutter
DEFAULT_CHOICE_NOTE = """
Not what you want? Press Ctrl+C to cancel. See available choices by:
$ zentropi {} --help
"""
@click.group()
def main():
pass
@main.command()
@click.option('--name', default='zentropi_shell')
@click.option('--endpoint', default='wss://local.zentropi.com/')
@click.option('--auth', default='<KEY>')
@click.option('--join', default='zentropia')
def shell(name, endpoint, join, auth):
from .shell import ZentropiShell
shell_agent = ZentropiShell(name)
shell_agent.connect(endpoint, auth=auth)
if join:
shell_agent.join(space=join)
shell_agent.run()
@main.group()
def create():
pass
@create.command()
@click.argument('path', type=click.Path(exists=False), default='.')
@click.option('--template', default='package')
def agent(path, template):
"""
Create a new agent.
- Default path '.' will create a new directory inside your
current working directory.
- Default template 'package' will create a new agent that can
be installed as a package with pip. You can pick either:
- package: default, pip-install and pypi ready package.
- module: one module with multiple files.
- file: a single python file
- tutorial: a zentropi tutorial
"""
if template == 'agent':
click.echo('Chosen options')
click.echo('\t--template={}'.format(template))
click.echo(DEFAULT_CHOICE_NOTE.format('agent create'))
if any([patrn in template for patrn in ['gh:', 'github', 'bitbucket']]):
template_path = template
elif '/' in template: # points to a path on the filesystem
template_path = os.path.abspath(template)
else:
template_dir = os.path.join(os.path.dirname(__file__), 'templates/')
template_path = os.path.join(template_dir, template)
cookiecutter(template_path, output_dir=path)
@create.command()
def project():
pass
| StarcoderdataPython |
258379 | class Logger:
# Logger channel.
channel: str
def __init__(self, channel: str):
self.channel = channel
def log(self, message):
print(f'\033[92m[{self.channel}] {message}\033[0m')
def info(self, message):
print(f'\033[94m[{self.channel}] {message}\033[0m')
def warning(self, message):
print(f'\033[93m[{self.channel}] {message}\033[0m')
def error(self, message):
print(f'\033[91m[{self.channel}] {message}\033[0m')
| StarcoderdataPython |
3587610 | import random
import tensorflow
import pandas
from tqdm import tqdm
import matplotlib.pyplot as plt
import os
import numpy
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization
from mnist_format_images import imageprepare
'''read file'''
digit_0 = 'trainingSet/0'
digit_1 = 'trainingSet/1'
digit_2 = 'trainingSet/2'
digit_3 = 'trainingSet/3'
digit_4 = 'trainingSet/4'
digit_5 = 'trainingSet/5'
digit_6 = 'trainingSet/6'
digit_7 = 'trainingSet/7'
digit_8 = 'trainingSet/8'
digit_9 = 'trainingSet/9'
SIZE_IMG = 28
learning_rate = 0.01
'''Setting up the model which will help with tensorflow models'''
MODEL_NAME = 'digits-{}-{}.model'.format(learning_rate, '6conv-basic')
def convert_img(handwritten_data, name):
data_img_lst = []
# tqdm is only used for interactive loading
# loading the training data
df = pandas.DataFrame()
for img in tqdm(os.listdir(handwritten_data)):
path = os.path.join(handwritten_data, img)
img = imageprepare(path)
# final step-forming the training data list with numpy array of the images
data_img_lst.append([img, numpy.array(int(name))])
return data_img_lst
def cnn_model():
'''Pre-Processing'''
# Convert images into mnist data format
data_0 = convert_img(digit_0, "0")
data_1 = convert_img(digit_1, "1")
data_2 = convert_img(digit_2, "2")
data_3 = convert_img(digit_3, "3")
data_4 = convert_img(digit_4, "4")
data_5 = convert_img(digit_5, "5")
data_6 = convert_img(digit_6, "6")
data_7 = convert_img(digit_7, "7")
data_8 = convert_img(digit_8, "8")
data_9 = convert_img(digit_9, "9")
# appending data
data = data_0
data.extend(data_1)
data.extend(data_2)
data.extend(data_3)
data.extend(data_4)
data.extend(data_5)
data.extend(data_6)
data.extend(data_7)
data.extend(data_8)
data.extend(data_9)
# shuffle the data
random.shuffle(data)
# convert to dataframe
data_df = pandas.DataFrame(data)
# using dataframe try to find any null values
print("Printing any nan values in the dataset: ", data_df.isnull().sum())
# plot data
_ = data_df[1].value_counts().plot(kind='bar')
plt.show()
# split data
train = data[:-41000]
test = data[-41000:]
# Reshape data
# X-Features & Y-Labels
X = numpy.array([i[0] for i in train]).reshape(-1, 28, 28, 1)
Y = numpy.array([i[1] for i in train])
test_x = numpy.array([i[0] for i in test]).reshape(-1, 28, 28, 1)
test_y = numpy.array([i[1] for i in test])
input_shape = (28, 28, 1)
'''Deep Learning Model'''
cnn_dlm = tensorflow.keras.models.Sequential()
cnn_dlm.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
cnn_dlm.add(Conv2D(32, kernel_size=(3, 3), input_shape=input_shape))
BatchNormalization()
cnn_dlm.add(MaxPooling2D(pool_size=(2, 2)))
cnn_dlm.add(Dropout(0.2))
cnn_dlm.add(Flatten())
cnn_dlm.add(Dense(128, activation='relu'))
BatchNormalization()
cnn_dlm.add(Dropout(0.2))
cnn_dlm.add(Dense(10, activation='sigmoid'))
# compile
cnn_dlm.compile(
optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
print(cnn_dlm.summary())
cnn_dlm.fit(x=X, y=Y, epochs=20)
print(cnn_dlm.evaluate(test_x, test_y))
return cnn_dlm
| StarcoderdataPython |
5103157 | <gh_stars>0
# -- coding: utf-8 --
from django.shortcuts import render
from django.views.decorators import csrf
def search_post(request):
ctx = {}
if request.POST:
ctx['rlt'] = request.POST['q']
return render(request, "post.html", ctx)
| StarcoderdataPython |
5017832 | <reponame>obestwalter/pybuilder_pytest_plugin
# pybuilder_header_plugin
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, 'src/main/python')
from pybuilder.core import Author, init, use_plugin
from pybuilder_header_plugin import check_source_file_headers
use_plugin('python.core')
use_plugin('python.distutils')
use_plugin('python.flake8')
use_plugin('python.install_dependencies')
use_plugin('pypi:pybuilder_release_plugin')
url = 'https://github.com/aelgru/pybuilder_header_plugin'
description = 'Please visit {0} for more information!'.format(url)
name = 'pybuilder_header_plugin'
authors = [Author('<NAME>', '<EMAIL>')]
license = 'Apache License, Version 2.0'
summary = 'PyBuilder Header PlugIn'
version = '0.0.2'
default_task = ['analyze', 'publish', 'check_source_file_headers']
@init
def set_properties(project):
project.depends_on('committer')
project.depends_on('wheel')
project.set_property('flake8_verbose_output', True)
project.set_property('flake8_break_build', True)
project.set_property('pybuilder_header_plugin_break_build', True)
project.set_property('pybuilder_header_plugin_expected_header', """# pybuilder_header_plugin
# Copyright 2014 Michael Gruber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""")
project.get_property('distutils_commands').append('bdist_wheel')
project.set_property('distutils_classifiers', [
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities'])
| StarcoderdataPython |
3435363 | <filename>src/dsmil/train.py
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torchvision.transforms.functional as VF
from torchvision import transforms
import sys, argparse, os, copy, itertools, glob, datetime
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
from sklearn.metrics import roc_curve, roc_auc_score, precision_recall_fscore_support
from sklearn.datasets import load_svmlight_file
from collections import OrderedDict
import src.dsmil.dsmil as mil
def get_bag_feats(csv_file_df, args):
if args.dataset == 'TCGA-lung-default':
feats_csv_path = 'datasets/tcga-dataset/tcga_lung_data_feats/' + csv_file_df.iloc[0].split('/')[1] + '.csv'
else:
feats_csv_path = os.path.join(args.dataset_path, csv_file_df.iloc[0])
df = pd.read_csv(feats_csv_path, header=None)
feats = shuffle(df).reset_index(drop=True)
feats = feats.to_numpy()
label = np.zeros(args.num_classes)
if args.num_classes==1:
label[0] = csv_file_df.iloc[1]
else:
if int(csv_file_df.iloc[1])<=(len(label)-1):
label[int(csv_file_df.iloc[1])] = 1
return label, feats
def train(train_df, milnet, criterion, optimizer, args):
csvs = shuffle(train_df).reset_index(drop=True)
total_loss = 0
bc = 0
Tensor = torch.cuda.FloatTensor
for i in range(len(train_df)):
optimizer.zero_grad()
label, feats = get_bag_feats(train_df.iloc[i], args)
bag_label = Variable(Tensor([label]))
bag_feats = Variable(Tensor([feats]))
bag_feats = bag_feats.view(-1, args.feats_size)
ins_prediction, bag_prediction, _, _ = milnet(bag_feats)
max_prediction, _ = torch.max(ins_prediction, 0)
bag_loss = criterion(bag_prediction.view(1, -1), bag_label.view(1, -1))
max_loss = criterion(max_prediction.view(1, -1), bag_label.view(1, -1))
loss = 0.5*bag_loss + 0.5*max_loss
loss.backward()
optimizer.step()
total_loss = total_loss + loss.item()
sys.stdout.write('\r Training bag [%d/%d] bag loss: %.4f' % (i, len(train_df), loss.item()))
return total_loss / len(train_df)
def test(test_df, milnet, criterion, optimizer, args):
csvs = shuffle(test_df).reset_index(drop=True)
total_loss = 0
test_labels = []
test_predictions = []
Tensor = torch.cuda.FloatTensor
with torch.no_grad():
for i in range(len(test_df)):
label, feats = get_bag_feats(test_df.iloc[i], args)
bag_label = Variable(Tensor([label]))
bag_feats = Variable(Tensor([feats]))
bag_feats = bag_feats.view(-1, args.feats_size)
ins_prediction, bag_prediction, _, _ = milnet(bag_feats)
max_prediction, _ = torch.max(ins_prediction, 0)
bag_loss = criterion(bag_prediction.view(1, -1), bag_label.view(1, -1))
max_loss = criterion(max_prediction.view(1, -1), bag_label.view(1, -1))
loss = 0.5*bag_loss + 0.5*max_loss
total_loss = total_loss + loss.item()
sys.stdout.write('\r Testing bag [%d/%d] bag loss: %.4f' % (i, len(test_df), loss.item()))
test_labels.extend([label])
test_predictions.extend([(0.0*torch.sigmoid(max_prediction)+1.0*torch.sigmoid(bag_prediction)).squeeze().cpu().numpy()])
test_labels = np.array(test_labels)
test_predictions = np.array(test_predictions)
auc_value, _, thresholds_optimal = multi_label_roc(test_labels, test_predictions, args.num_classes, pos_label=1)
if args.num_classes==1:
class_prediction_bag = test_predictions
class_prediction_bag[class_prediction_bag>=thresholds_optimal[0]] = 1
class_prediction_bag[class_prediction_bag<thresholds_optimal[0]] = 0
test_predictions = class_prediction_bag
test_labels = np.squeeze(test_labels)
else:
for i in range(args.num_classes):
class_prediction_bag = test_predictions[:, i]
class_prediction_bag[class_prediction_bag>=thresholds_optimal[i]] = 1
class_prediction_bag[class_prediction_bag<thresholds_optimal[i]] = 0
test_predictions[:, i] = class_prediction_bag
bag_score = 0
for i in range(0, len(test_df)):
bag_score = np.array_equal(test_labels[i], test_predictions[i]) + bag_score
avg_score = bag_score / len(test_df)
return total_loss / len(test_df), avg_score, auc_value, thresholds_optimal
def multi_label_roc(labels, predictions, num_classes, pos_label=1):
fprs = []
tprs = []
thresholds = []
thresholds_optimal = []
aucs = []
if len(predictions.shape)==1:
predictions = predictions[:, None]
for c in range(0, num_classes):
label = labels[:, c]
prediction = predictions[:, c]
fpr, tpr, threshold = roc_curve(label, prediction, pos_label=1)
fpr_optimal, tpr_optimal, threshold_optimal = optimal_thresh(fpr, tpr, threshold)
c_auc = roc_auc_score(label, prediction)
aucs.append(c_auc)
thresholds.append(threshold)
thresholds_optimal.append(threshold_optimal)
return aucs, thresholds, thresholds_optimal
def optimal_thresh(fpr, tpr, thresholds, p=0):
loss = (fpr - tpr) - p * tpr / (fpr + tpr + 1)
idx = np.argmin(loss, axis=0)
return fpr[idx], tpr[idx], thresholds[idx]
def main():
parser = argparse.ArgumentParser(description='Train DSMIL on 20x patch features learned by SimCLR')
parser.add_argument('--num_classes', default=2, type=int, help='Number of output classes [2]')
parser.add_argument('--feats_size', default=512, type=int, help='Dimension of the feature size [512]')
parser.add_argument('--lr', default=0.0002, type=float, help='Initial learning rate [0.0002]')
parser.add_argument('--num_epochs', default=200, type=int, help='Number of total training epochs [40|200]')
parser.add_argument('--gpu_index', type=int, nargs='+', default=(0,), help='GPU ID(s) [0]')
parser.add_argument('--weight_decay', default=5e-3, type=float, help='Weight decay [5e-3]')
parser.add_argument('--dataset', default='TCGA-lung-default', type=str, help='Dataset folder name')
parser.add_argument('--split', default=0.2, type=float, help='Training/Validation split [0.2]')
parser.add_argument('--dataset_path', default='.', type=str, help='Base location of the dataset')
args = parser.parse_args()
gpu_ids = tuple(args.gpu_index)
os.environ['CUDA_VISIBLE_DEVICES']=','.join(str(x) for x in gpu_ids)
i_classifier = mil.FCLayer(in_size=args.feats_size, out_size=args.num_classes).cuda()
b_classifier = mil.BClassifier(input_size=args.feats_size, output_class=args.num_classes).cuda()
milnet = mil.MILNet(i_classifier, b_classifier).cuda()
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(milnet.parameters(), lr=args.lr, betas=(0.5, 0.9), weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.num_epochs, 0.000005)
if args.dataset == 'TCGA-lung-default':
bags_csv = 'datasets/tcga-dataset/TCGA.csv'
else:
bags_csv = os.path.join(args.dataset_path ,'embedings', args.dataset+'.csv')
bags_path = pd.read_csv(bags_csv, header=None)
train_path = bags_path.iloc[:270, :]
test_path = bags_path.iloc[270:, :]
best_score = 0
save_path = os.path.join('weights', datetime.date.today().strftime("%m%d%Y"))
os.makedirs(save_path, exist_ok=True)
run = len(glob.glob(os.path.join(save_path, '*.pth')))
for epoch in range(1, args.num_epochs + 1):
train_path = shuffle(train_path).reset_index(drop=True)
test_path = shuffle(test_path).reset_index(drop=True)
train_loss_bag = train(train_path, milnet, criterion, optimizer, args) # iterate all bags
test_loss_bag, avg_score, aucs, thresholds_optimal = test(test_path, milnet, criterion, optimizer, args)
current_score = (sum(aucs) + avg_score + 1 - test_loss_bag)/4
if args.dataset=='TCGA-lung':
print('\r Epoch [%d/%d] train loss: %.4f test loss: %.4f, average score: %.4f, auc_LUAD: %.4f, auc_LUSC: %.4f' %
(epoch, args.num_epochs, train_loss_bag, test_loss_bag, avg_score, aucs[0], aucs[1]))
else:
with open('logging.txt', 'a+') as f:
print('\r Epoch [%d/%d] train loss: %.4f test loss: %.4f, current score: %.4f, average score: %.4f, AUC: ' %
(epoch, args.num_epochs, train_loss_bag, test_loss_bag, current_score, avg_score) + '|'.join('class-{}>>{}'.format(*k) for k in enumerate(aucs)), file=f)
print('\r Epoch [%d/%d] train loss: %.4f test loss: %.4f, current score: %.4f, average score: %.4f, AUC: ' %
(epoch, args.num_epochs, train_loss_bag, test_loss_bag, current_score, avg_score) + '|'.join('class-{}>>{}'.format(*k) for k in enumerate(aucs)))
scheduler.step()
if current_score >= best_score:
best_score = current_score
save_name = os.path.join(save_path, 'AUC_' + str(sum(aucs)) + 'ACC_' + str(avg_score) + '.pth')
torch.save(milnet.state_dict(), save_name)
if args.dataset=='TCGA-lung':
print('Best model saved at: ' + save_name + ' Best thresholds: LUAD %.4f, LUSC %.4f' % (thresholds_optimal[0], thresholds_optimal[1]))
else:
with open('logging.txt', 'a+') as f:
print('Best thresholds ===>>> '+ '|'.join('class-{}>>{}'.format(*k) for k in enumerate(thresholds_optimal)), file=f)
print('Best model saved at: ' + save_name)
print('Best thresholds ===>>> '+ '|'.join('class-{}>>{}'.format(*k) for k in enumerate(thresholds_optimal)))
if __name__ == '__main__':
main() | StarcoderdataPython |
393829 | from torch.utils.data import Dataset, DataLoader
import numpy as np
def noramlization(data):
minVals = data.min(0)
maxVals = data.max(0)
ranges = maxVals - minVals
normData = (data - minVals) / ranges
return normData
def z_score(data):
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
return data
class TrainSets(Dataset):
def __init__(self):
train_data = np.loadtxt('./demo data_Raman liver_train.txt', delimiter='\t')
self.x = train_data[:, 1:]
self.y = train_data[:, 0]
self.len = len(train_data)
self.x = z_score(self.x)
# self.x = noramlization(self.x)
def __getitem__(self, item):
return self.x[item], self.y[item]
def __len__(self):
return self.len
class ValidateSets(Dataset):
def __init__(self):
validate_data = np.loadtxt( './demo data_Raman liver_validate.txt', delimiter='\t')
self.x = validate_data[:, 1:]
self.y = validate_data[:, 0]
self.len = len(validate_data)
self.x = z_score(self.x)
# self.x = noramlization(self.x)
def __getitem__(self, item):
return self.x[item], self.y[item]
def __len__(self):
return self.len
class TestdataSets(Dataset):
def __init__(self):
test_data = np.loadtxt('./demo data_Raman liver_test.txt', delimiter='\t')
self.x = test_data[:, 1:]
self.y = test_data[:, 0]
self.len = len(test_data)
self.x = z_score((self.x))
def __getitem__(self, item):
return self.x[item], self.y[item]
def __len__(self):
return self.len
| StarcoderdataPython |
6445677 | <filename>dev/archery/archery/crossbow/tests/test_reports.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import textwrap
from archery.crossbow.core import yaml
from archery.crossbow.reports import (ChatReport, CommentReport, EmailReport,
Report)
def test_crossbow_comment_formatter(load_fixture):
msg = load_fixture('crossbow-success-message.md')
job = load_fixture('crossbow-job.yaml', decoder=yaml.load)
report = CommentReport(job, crossbow_repo='ursa-labs/crossbow')
expected = msg.format(
repo='ursa-labs/crossbow',
branch='ursabot-1',
revision='f766a1d615dd1b7ee706d05102e579195951a61c',
status='has been succeeded.'
)
assert report.show() == textwrap.dedent(expected).strip()
def test_crossbow_chat_report(load_fixture):
expected_msg = load_fixture('chat-report.txt')
job = load_fixture('crossbow-job.yaml', decoder=yaml.load)
report = Report(job)
assert report.tasks_by_state is not None
report_chat = ChatReport(report=report, extra_message_success=None,
extra_message_failure=None)
assert report_chat.render("text") == textwrap.dedent(expected_msg)
def test_crossbow_chat_report_extra_message_failure(load_fixture):
expected_msg = load_fixture('chat-report-extra-message-failure.txt')
job = load_fixture('crossbow-job.yaml', decoder=yaml.load)
report = Report(job)
assert report.tasks_by_state is not None
report_chat = ChatReport(report=report,
extra_message_success="Should not be present",
extra_message_failure="Failure present")
assert report_chat.render("text") == textwrap.dedent(expected_msg)
def test_crossbow_chat_report_extra_message_success(load_fixture):
expected_msg = load_fixture('chat-report-extra-message-success.txt')
job = load_fixture('crossbow-job-no-failure.yaml', decoder=yaml.load)
report = Report(job)
assert report.tasks_by_state is not None
report_chat = ChatReport(report=report,
extra_message_success="Success present",
extra_message_failure="Should not be present")
assert report_chat.render("text") == textwrap.dedent(expected_msg)
def test_crossbow_email_report(load_fixture):
expected_msg = load_fixture('email-report.txt')
job = load_fixture('crossbow-job.yaml', decoder=yaml.load)
report = Report(job)
assert report.tasks_by_state is not None
email_report = EmailReport(report=report, sender_name="<NAME>",
sender_email="<EMAIL>",
recipient_email="<EMAIL>")
assert email_report.render("text") == textwrap.dedent(expected_msg)
def test_crossbow_export_report(load_fixture):
job = load_fixture('crossbow-job.yaml', decoder=yaml.load)
report = Report(job)
assert len(list(report.rows)) == 4
expected_first_row = [
'docker-cpp-cmake32',
'success',
['https://github.com/apache/crossbow/runs/1'],
'https://github.com/apache/crossbow/tree/'
'ursabot-1-circle-docker-cpp-cmake32',
'circle',
{'commands': ['docker-compose build cpp-cmake32',
'docker-compose run cpp-cmake32']},
'docker-tests/circle.linux.yml',
'f766a1d615dd1b7ee706d05102e579195951a61c'
]
assert next(report.rows) == expected_first_row
| StarcoderdataPython |
11393760 | <reponame>mike-lischke/mysql-shell-plugins
# Copyright (c) 2020, 2022, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0,
# as published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms, as
# designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an additional
# permission to link the program and your derivative works with the
# separately licensed software that they have included with MySQL.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# To use this script you need to set these environment variables:
#
# MYSQLSH=<path to mysqlsh binary>
# MYSQLSH_USER_CONFIG_HOME=<mysqlsh home>
# MYSQLSH_PLUGIN_SOURCE_DIR=<source code path to the plugins>
#
# If not configured, they will be set as follows:
# MYSQLSH to the mysqlsh in PATH
# MYSQLSH_USER_CONFIG_HOME to /tmp/dot_mysqlsh
# MYSQLSH_PLUGIN_SOURCE_DIR to ../../
import shutil
import os
import tempfile
import subprocess
from pathlib import Path
import argparse
from contextlib import contextmanager
import signal
import sqlite3
import zipfile
def signal_handler(sig, frame):
print(f'1) Ctrl+C! captured: {sig}')
if os.name == 'nt':
signal.signal(signal.SIGINT, signal_handler)
@contextmanager
def pushd(new_path):
current = os.getcwd()
os.chdir(new_path.as_posix())
yield
os.chdir(current)
def create_symlink(target: Path, link_name: Path, is_dir):
if os.name == 'nt':
p = subprocess.run(f'mklink /J "{link_name}" "{target}"', shell=True)
print(p.stdout)
p.check_returncode()
else:
os.symlink(target, link_name)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-d', '--debug',
required=False,
choices=['TESTS', 'BACKEND'],
default=os.environ.get('ATTACH_DEBUGGER', None),
help='Attach debugger to TESTS and/or BACKEND')
arg_parser.add_argument('-p', '--portable',
required=False,
type=Path,
help='The path to the portable code')
arg_parser.add_argument('-s', '--shell',
required=False,
type=Path,
default=os.environ.get('MYSQLSH', shutil.which(
'mysqlsh.exe') if os.name == 'nt' else shutil.which('mysqlsh')),
help='Path to MySQL Shell binary')
arg_parser.add_argument('-v', '--verbose',
required=False,
help='Enable verbose mode')
arg_parser.add_argument('-u', '--userhome',
required=False,
type=Path,
default=os.environ.get(
'MYSQLSH_USER_CONFIG_HOME', None),
help='Path to the user config home')
arg_parser.add_argument('-k', '--only',
required=False,
type=str,
default=None,
help='Run only the tests that apply to the pattern')
try:
args = arg_parser.parse_args()
except argparse.ArgumentError as e:
print(str(e))
print(args)
# check if we're running in the backend directory
assert Path(os.path.join(os.getcwd(), 'run_tests.py')).exists(
), "Please run this script inside the backend directory."
assert args.shell is not None, "Could not find the MySQL Shell binary. Please specify it using the --shell parameter of the MYSQLSH environment variable."
class MyPaths:
def __init__(self, debug_mode, portable_path, shell_path, userhome_path: Path):
class MyPathsBase():
pass
self.source = MyPathsBase()
self.runtime = MyPathsBase()
self.runtime.plugins = MyPathsBase()
self.runtime.plugin_data = MyPathsBase()
self.shell = shell_path
if debug_mode:
self.runtime.root = Path(os.path.join(
tempfile.gettempdir(), "backend_debug"))
shutil.rmtree(self.runtime.root, ignore_errors=True)
elif userhome_path is None:
self.runtime.root = Path(os.path.join(
tempfile.TemporaryDirectory().name, 'dot_mysqlsh'))
else:
self.runtime.root = Path(userhome_path)
self.runtime.plugins.root = Path(
os.path.join(self.runtime.root, 'plugins'))
self.runtime.plugins.gui_plugin = Path(
os.path.join(self.runtime.plugins.root, 'gui_plugin'))
self.runtime.plugins.test_plugin = Path(
os.path.join(self.runtime.plugins.root, 'test_plugin'))
self.runtime.plugins.mrs_plugin = Path(os.path.join(
self.runtime.plugins.root, 'mrs_plugin'))
self.runtime.plugin_data.root = Path(
os.path.join(self.runtime.root, 'plugin_data'))
self.runtime.plugin_data.gui_plugin = Path(
os.path.join(self.runtime.plugin_data.root, 'gui_plugin'))
self.source.root = Path(os.path.abspath(
os.path.join(Path().cwd(), '..')))
self.source.plugin = Path(os.path.join(
self.source.root, 'mrs_plugin'))
self.source.pytest_config = Path(os.path.join(
self.source.plugin, "pytest-coverage.ini" if debug_mode is None else self.source.plugin / "pytest.ini"))
if portable_path is None:
self.source.code = self.source.plugin
else:
self.source.code = portable_path
def verify(self):
assert self.runtime.root.is_dir()
assert self.runtime.plugins.root.is_dir()
assert self.runtime.plugins.gui_plugin.is_dir()
assert self.source.webroot.is_dir()
assert self.source.pytest_config.is_file()
if args.portable is not None and zipfile.is_zipfile(args.portable):
# Unzip the portable zip
unzip_path = os.path.dirname(args.portable)
with zipfile.ZipFile(args.portable, 'r') as zip_ref:
zip_ref.extractall(unzip_path)
# Update the portable argument with the final path
args.portable = os.path.join(unzip_path, "gui_plugin")
paths = MyPaths(args.debug, args.portable, args.shell, args.userhome)
# remove the shell home dir
if args.debug is not None and paths.runtime.root.exists():
shutil.rmtree(paths.runtime.root, ignore_errors=True)
# create the shell home dir
if not paths.runtime.root.is_dir():
paths.runtime.root.mkdir(parents=True)
# # create mysqlsh/plugins
if not paths.runtime.plugins.root.is_dir():
paths.runtime.plugins.root.mkdir(parents=True)
# remove link to .mysqlsh/plugins/mrs_plugin
if paths.runtime.plugins.mrs_plugin.exists():
paths.runtime.plugins.mrs_plugin.unlink()
# Create source code symlink into the runtime plugin dir (.mysqlsh/plugins/mrs_plugin)
create_symlink(paths.source.code,
paths.runtime.plugins.mrs_plugin, is_dir=True)
LOGS = ""
PATTERN = ""
# Enables verbose execution
if args.verbose is not None or args.debug is not None:
LOGS = "-sv"
if args.only is not None:
PATTERN = f"-k {args.only}"
with pushd(paths.source.plugin):
env = os.environ.copy()
env['MYSQLSH_USER_CONFIG_HOME'] = paths.runtime.root.as_posix()
env['MYSQLSH_TERM_COLOR_MODE'] = 'nocolor'
env['COV_CORE_DATAFILE'] = '.coverage.eager'
if args.debug is not None:
env['ATTACH_DEBUGGER'] = args.debug
command = f"{paths.shell} --pym pytest --cov={paths.source.code} --cov-append -v -c {paths.source.pytest_config} {LOGS} {paths.source.plugin} {PATTERN}"
print(command)
shell = subprocess.run(command, shell=True, env=env)
if not shell.returncode == 0:
print('----------------------------------------')
print('MYSQLSH log')
print('----------------------------------------')
with open(os.path.join(paths.runtime.root / "mysqlsh.log")) as f:
for line in f.readlines():
print(line.strip())
exit(shell.returncode)
| StarcoderdataPython |
4909049 | """
importguests command
Parse a guest list csv file to store them in the guest list
Created by lmarvaud on 03/11/2019
"""
import csv
import itertools
import logging
import operator
from argparse import RawDescriptionHelpFormatter
from django.conf import settings
from django.core.management import BaseCommand, CommandParser
from django.utils.dateparse import parse_date
from django.utils.translation import ugettext_lazy as _
from ...join_and import join_and
from ...models import Family, Guest, Accompany, Event
MANY_LIST = ['children', 'girls', 'boys', 'colleges']
EMAIL_KEY = "Email"
PHONE_KEY = "Phone"
HOST_KEY = "Host"
GENDER_KEY = "Gender"
SURNAME_KEY = "Surname"
ACCOMPANY_KEY = "Accompany surname"
def strip(listed):
"""Strip a list of string"""
return map(operator.methodcaller("strip"), listed)
def multi_split(string, *seps):
"""split a list of separators"""
split = [string]
for sep in seps:
split = itertools.chain.from_iterable(map(operator.methodcaller('split', sep), split))
return split
def _create_guests(line):
"""Create the guests list from the csv line"""
emails = list(strip(line[EMAIL_KEY].split(',')))
phones = list(strip(line[PHONE_KEY].split(',')))
gender = list(strip(line[GENDER_KEY].split(',')))
names = list(strip(multi_split(line[SURNAME_KEY], ',', ' et ', '&')))
for i, name in enumerate(names):
if i > len(gender):
logging.warning("missing gender to %s : SKIPPED", name)
else:
yield Guest(name=name,
email=emails[i] if len(emails) > i else None,
phone=phones[i] if len(phones) > i else "",
female=gender[i].upper() == 'F')
def _create_accompagnies(line):
"""Create the accompagnies list from the csv line"""
if line[ACCOMPANY_KEY]:
names = list(strip(multi_split(line[ACCOMPANY_KEY], ',', ' ' + str(_('and')) + ' ', '&')))
for name in names:
yield Accompany(name=name,
number=1 if all(str(_(many)) not in name for many in MANY_LIST) else 2)
class Command(BaseCommand):
"""
csv format is like::
"Email","Phone","Host","Gender","Surname","Accompany surname"
"<EMAIL>","0123456789","Pierre","F","Marie","Jean"
+ *First line* is ignored (title)
+ Each line represent a Family
+ Rows are : "Email","Phone","Host","Gender","Surname","Accompany surname"
+ *Email*, *Phone*, *Gender* and *Surname* will be split by coma : ',', 'and' and '&' to
retrieve the guest list. Phone is optional but gender and surname must have the same number of
value (or more) ::
"<EMAIL>,<EMAIL>","0123456789","Pierre","F,M","Marie,Jean"
+ *Host* must be empty or one of the settings.INVITE_HOSTS key. Empty will host will join all
hosts (Pierre and Jeanne) ::
INVITE_HOSTS = {
"Pierre": "<EMAIL>",
"Jeanne": "<EMAIL>"
}
+ *Gender* can be M or F ::
"","", "", "", "M", ""
"","", "", "", "F", ""
+ Lines without "email" are ignored ::
"","ignored", "", "", "", ""
"""
help = _("Import guests from a csv file")
def create_parser(self, prog_name, subcommand):
parser = super(Command, self).create_parser(prog_name=prog_name, subcommand=subcommand)
parser.epilog = self.__doc__
parser.formatter_class = RawDescriptionHelpFormatter
return parser
def add_arguments(self, parser: CommandParser):
invitation = parser.add_argument_group(_("Event"),
_("Create an link imported guests to an event"))
invitation.add_argument("--date", dest="event_date", type=parse_date,
help=_("date of the event"))
invitation.add_argument("--name", dest="event_name", type=str,
help=_("name of the event"))
parser.add_argument("csv", help=_("path to the csv file to parse"))
def handle(self, *args, **options):
"""Process to the parsing of the csv"""
event = self.create_event(**options)
with open(options["csv"], 'r') as csv_file:
csv_reader = csv.DictReader(csv_file, [
EMAIL_KEY, PHONE_KEY, HOST_KEY, GENDER_KEY, SURNAME_KEY, ACCOMPANY_KEY
])
next(csv_reader) # skip 1st line
midday = True
afternoon = True
evening = True
for line in csv_reader:
if line[SURNAME_KEY] == "Sous-total":
if midday:
midday = False
else:
afternoon = False
if line[EMAIL_KEY]:
if line[HOST_KEY] and line[HOST_KEY] not in settings.INVITE_HOSTS:
logging.warning("%s source not referenced in the setting INVITE_HOSTS",
line[HOST_KEY])
host = line[HOST_KEY] if line[HOST_KEY] in settings.INVITE_HOSTS else \
join_and(list(settings.INVITE_HOSTS.keys()))
guests = _create_guests(line)
accompagies = _create_accompagnies(line)
family = Family.objects.create(invited_midday=midday,
invited_afternoon=afternoon,
invited_evening=evening,
host=host)
family.guests.add(*guests, bulk=False)
family.accompanies.add(*accompagies, bulk=False)
if event:
family.invitations.add(event)
@staticmethod
def create_event(event_date, event_name, **unused_options):
"""
Potentially create an invitation with the name and date from the option if one of those is
specified
:param options: the options
:return: The invitation or None
"""
invitation = None
if event_date or event_name:
params = {}
if event_date:
params["date"] = event_date
if event_name:
params["name"] = event_name
invitation = Event.objects.create(**params)
return invitation
| StarcoderdataPython |
4927000 | <filename>tests/fixtures/data.py
# -*- coding: utf8 -*-
from skosprovider.providers import (
DictionaryProvider
)
from skosprovider.skos import (
ConceptScheme,
Label
)
larch = {
'id': 1,
'uri': 'http://python.com/trees/larch',
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'The Larch'},
{'type': 'prefLabel', 'language': 'nl', 'label': 'De Lariks'},
{'type': 'sortLabel', 'language': 'nl', 'label': 'c'}
],
'notes': [
{'type': 'definition', 'language': 'en', 'note': 'A type of tree.'}
],
'sources': [
{'citation': 'Monthy Python. Episode Three: How to recognise different types of trees from quite a long way away.'}
],
'matches': {
'close': ['http://id.python.org/different/types/of/trees/nr/1/the/larch']
}
}
chestnut = {
'id': 2,
'uri': 'http://python.com/trees/chestnut',
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'The Chestnut'},
{'type': 'altLabel', 'language': 'nl', 'label': 'De Paardekastanje'},
{'type': 'sortLabel', 'language': 'nl', 'label': 'a'}
],
'notes': [
{
'type': 'definition', 'language': 'en',
'note': 'A different type of tree.'
}
],
'matches': {
'related': ['http://id.python.org/different/types/of/trees/nr/17/the/other/chestnut']
}
}
species = {
'id': 3,
'uri': 'http://python.com/trees/species',
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'Trees by species'},
{'type': 'altLabel', 'language': 'en', 'label': 'Trees by their species'},
{'type': 'prefLabel', 'language': 'nl', 'label': 'Bomen per soort'},
{'type': 'prefLabel', 'language': 'nl', 'label': 'b'}
],
'notes': [
{
'type': 'scopeNote', 'language': 'en',
'note': 'A division of trees.'
}
],
'type': 'collection',
'members': ['1', '2']
}
trees = DictionaryProvider(
{'id': 'TREES', 'default_language': 'nl'},
[larch, chestnut, species],
concept_scheme=ConceptScheme(
uri='http://python.com/trees',
labels=[
Label('Different types of trees', 'prefLabel', 'en'),
Label('Verschillende soorten bomen', 'prefLabel', 'nl')
]
)
)
| StarcoderdataPython |
9726948 | <reponame>TheCarvalho/atividades-wikipython
'''
45. Desenvolver um programa para verificar a nota do aluno em uma prova com 10 questões, o programa deve perguntar ao
aluno a resposta de cada questão e ao final comparar com o gabarito da prova e assim calcular o total de acertos e a nota
(atribuir 1 ponto por resposta certa). Após cada aluno utilizar o sistema deve ser feita uma pergunta se outro aluno vai
utilizar o sistema. Após todos os alunos terem respondido informar:
a. Maior e Menor Acerto;
b. Total de Alunos que utilizaram o sistema;
c. A Média das Notas da Turma.
Gabarito da Prova:
01 - A
02 - B
03 - C
04 - D
05 - E
06 - E
07 - D
08 - C
09 - B
10 - A
Após concluir isto você poderia incrementar o programa permitindo que o professor digite o gabarito da prova antes
dos alunos usarem o programa.
'''
from time import sleep
from os import system
# gabarito_certo = ['A', 'B', 'C', 'D', 'E', 'E', 'D', 'C', 'B', 'A']
gabarito_certo = list()
gabarito_aluno = list()
nota_aluno = list()
contagem_aluno = 1
system('cls')
print('Insira as resposta do gabarito da prova.')
for i in range(10):
resposta = ' '
while resposta not in 'ABCDE':
resposta = str(input(f'Questão {i+1}: ')).upper().strip()[0]
gabarito_certo.append(resposta)
system('cls')
print('As respostas são: ', end='')
print(*gabarito_certo)
input()
while True:
nota = 0
gabarito_aluno.clear()
op = ' '
for contagem in range(10):
resposta = ' '
while resposta not in 'ABCDE':
system('cls')
resposta = str(
input(f'>>> PROVA <<<\n\nQuestão {contagem + 1}: ')).upper().strip()[0]
if resposta not in 'ABCDE':
system('cls')
print('Insira uma resposta válida! (a,b,c,d,e)')
sleep(2)
gabarito_aluno.append(resposta)
system('cls')
for contagem in range(10):
if gabarito_aluno[contagem] == gabarito_certo[contagem]:
nota += 1
nota_aluno.append(nota)
contagem_aluno += 1
input(f'Sua nota é {nota} pontos')
while op not in 'sn':
system('cls')
op = str(input('Alguém ainda vai fazer a prova? (s/n) ')
).lower().strip()[0]
if op == 's':
system('cls')
print(f'Iniciando a prova do aluno {contagem_aluno}')
sleep(2)
continue
else:
system('cls')
break
print(f'''
O maior acerto foi {max(nota_aluno)}
O menor acerto foi {min(nota_aluno)}
A média das notas da turma foi {sum(nota_aluno)/len(nota_aluno):.1f}
''')
| StarcoderdataPython |
388316 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import combinations
from tensorflow.python.keras.layers import advanced_activations
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import serialization
from tensorflow.python.ops import nn_ops as nn
from tensorflow.python.platform import test
def _ref_softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KerasActivationsTest(test.TestCase, parameterized.TestCase):
def test_serialization(self):
all_activations = [
'softmax', 'relu', 'elu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear',
'softplus', 'softsign', 'selu', 'gelu'
]
for name in all_activations:
fn = activations.get(name)
ref_fn = getattr(activations, name)
assert fn == ref_fn
config = activations.serialize(fn)
fn = activations.deserialize(config)
assert fn == ref_fn
def test_serialization_v2(self):
activation_map = {nn.softmax_v2: 'softmax'}
for fn_v2_key in activation_map:
fn_v2 = activations.get(fn_v2_key)
config = activations.serialize(fn_v2)
fn = activations.deserialize(config)
assert fn.__name__ == activation_map[fn_v2_key]
def test_serialization_with_layers(self):
activation = advanced_activations.LeakyReLU(alpha=0.1)
layer = core.Dense(3, activation=activation)
config = serialization.serialize(layer)
# with custom objects
deserialized_layer = serialization.deserialize(
config, custom_objects={'LeakyReLU': activation})
self.assertEqual(deserialized_layer.__class__.__name__,
layer.__class__.__name__)
self.assertEqual(deserialized_layer.activation.__class__.__name__,
activation.__class__.__name__)
# without custom objects
deserialized_layer = serialization.deserialize(config)
self.assertEqual(deserialized_layer.__class__.__name__,
layer.__class__.__name__)
self.assertEqual(deserialized_layer.activation.__class__.__name__,
activation.__class__.__name__)
def test_softmax(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softmax(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = _ref_softmax(test_values[0])
self.assertAllClose(result[0], expected, rtol=1e-05)
x = backend.placeholder(ndim=1)
with self.assertRaises(ValueError):
activations.softmax(x)
def test_softmax_2d_axis0(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softmax(x, axis=0)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = np.zeros((2, 5))
for i in range(5):
expected[:, i] = _ref_softmax(test_values[:, i])
self.assertAllClose(result, expected, rtol=1e-05)
def test_temporal_softmax(self):
x = backend.placeholder(shape=(2, 2, 3))
f = backend.function([x], [activations.softmax(x)])
test_values = np.random.random((2, 2, 3)) * 10
result = f([test_values])[0]
expected = _ref_softmax(test_values[0, 0])
self.assertAllClose(result[0, 0], expected, rtol=1e-05)
def test_selu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.selu(x)])
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
positive_values = np.array([[1, 2]], dtype=backend.floatx())
result = f([positive_values])[0]
self.assertAllClose(result, positive_values * scale, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) * scale * alpha
self.assertAllClose(result, true_result)
def test_softplus(self):
def softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softplus(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softplus(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_softsign(self):
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softsign(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softsign(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_sigmoid(self):
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_hard_sigmoid(self):
def ref_hard_sigmoid(x):
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.hard_sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_relu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.relu(x)])
positive_values = np.random.random((2, 5))
result = f([positive_values])[0]
self.assertAllClose(result, positive_values, rtol=1e-05)
negative_values = np.random.uniform(-1, 0, (2, 5))
result = f([negative_values])[0]
expected = np.zeros((2, 5))
self.assertAllClose(result, expected, rtol=1e-05)
def test_gelu(self):
def gelu(x, approximate=False):
if approximate:
return 0.5 * x * (1.0 + np.tanh(
np.sqrt(2.0 / np.pi) * (x + 0.044715 * np.power(x, 3))))
else:
from scipy.stats import norm # pylint: disable=g-import-not-at-top
return x * norm.cdf(x)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.gelu(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = gelu(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
f = backend.function([x], [activations.gelu(x, True)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = gelu(test_values, True)
self.assertAllClose(result, expected, rtol=1e-05)
def test_elu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.elu(x, 0.5)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
self.assertAllClose(result, test_values, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) / 2
self.assertAllClose(result, true_result)
def test_tanh(self):
test_values = np.random.random((2, 5))
x = backend.placeholder(ndim=2)
exp = activations.tanh(x)
f = backend.function([x], [exp])
result = f([test_values])[0]
expected = np.tanh(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_exponential(self):
test_values = np.random.random((2, 5))
x = backend.placeholder(ndim=2)
exp = activations.exponential(x)
f = backend.function([x], [exp])
result = f([test_values])[0]
expected = np.exp(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_linear(self):
x = np.random.random((10, 5))
self.assertAllClose(x, activations.linear(x))
def test_invalid_usage(self):
with self.assertRaises(ValueError):
activations.get('unknown')
# The following should be possible but should raise a warning:
activations.get(advanced_activations.LeakyReLU())
if __name__ == '__main__':
test.main()
| StarcoderdataPython |
9791334 | <gh_stars>10-100
'''
Race_Statistics_Functions.py
This module contains the race stats function generate_race_stats().
It is called in Race_Detection_and_Statistics.py to compute race stats.
'''
from collections import OrderedDict
import numpy as np
import pandas as pd
from .Race_Msg_Outcome import get_msg_outcome
######################
## Main Functions ##
######################
def generate_race_stats(date, sym, msgs, top, depth, race_recs, ticktable, price_factor, \
race_param):
'''
This function generates single level race stats for a given symdate after race detection.
This function loops over each row in race record (output of race detection code) and calls
find_single_lvl_races() on each row to generate stats for each race.
Param: Please refer to Section 10.5 of the Code and Data Appendix.
msgs: df of msgs
top: top of book df, generated by order book construction
depth: output of order book construction
race_recs: output of race detection code
ticktable: df, tick size info for different prices
price_factor: constant, unit conversion
sym, date: symbol-date
race_param: dict of race parameters
Return:
stats: df with all race stats for the symbol-date, one row per race
'''
##### Initialization
stats = {}
strict_fail = race_param['strict_fail']
##### Get ME (outbound msgs) dataframes, set index to be UniqueOrderID-EventNum pairs and set them as global variables
# We define those global variables to efficiently find the outbounds associated with the race inbounds
me_cols = ['UniqueOrderID', 'EventNum', 'MessageType', 'ExecType', 'UnifiedMessageType', 'LeavesQty', 'ExecutedPrice', 'ExecutedQty']
me = msgs.loc[msgs['EventNum'].notnull() & ((msgs['MessageType'] == 'Execution_Report') | (msgs['MessageType'] == 'Cancel_Reject')), me_cols].copy()
me = me.reset_index()
me = me.set_index(['UniqueOrderID', 'EventNum'])
me = me.sort_index(level=0)
me_qr_bid_cols = ['UniqueOrderID', 'BidEventNum', 'MessageType', 'ExecType', 'UnifiedMessageType', 'LeavesQty', 'ExecutedPrice', 'ExecutedQty']
me_qr_bid = msgs.loc[msgs['BidEventNum'].notnull() & ((msgs['MessageType'] == 'Execution_Report') | (msgs['MessageType'] == 'Cancel_Reject')), me_qr_bid_cols].copy()
me_qr_bid = me_qr_bid.reset_index()
me_qr_bid = me_qr_bid.set_index(['UniqueOrderID', 'BidEventNum'])
me_qr_bid = me_qr_bid.sort_index(level=0)
me_qr_ask_cols = ['UniqueOrderID', 'AskEventNum', 'MessageType', 'ExecType', 'UnifiedMessageType', 'LeavesQty', 'ExecutedPrice', 'ExecutedQty']
me_qr_ask = msgs.loc[msgs['AskEventNum'].notnull() & ((msgs['MessageType'] == 'Execution_Report') | (msgs['MessageType'] == 'Cancel_Reject')), me_qr_ask_cols].copy()
me_qr_ask = me_qr_ask.reset_index()
me_qr_ask = me_qr_ask.set_index(['UniqueOrderID', 'AskEventNum'])
me_qr_ask = me_qr_ask.sort_index(level=0)
##### Loop over races to populate statistics
for ix, race in race_recs.iterrows():
stats[ix] = stats_for_one_race(race, msgs, me, me_qr_ask, me_qr_bid, top, depth,\
ticktable, price_factor, \
strict_fail)
##### construct pd.DataFrame
stats = pd.DataFrame.from_dict(stats, orient='index')
stats['Date'] = date
stats['Symbol'] = sym
return stats
def stats_for_one_race(race, msgs, me, me_qr_ask, me_qr_bid, top, depth,\
ticktable, price_factor, strict_fail):
'''
This function generates stats for a singlelvl race
Param: Please refer to Section 10.3 of the Code and Data Appendix.
race: pd.Series, the record for one specific race from race_recs
msgs: df of msgs
me, me_qr_ask, me_qr_bid: dfs of outbound msgs for non-quotes and quotes
top: top of book df, generated by order book construction
depth: depth generated by order book construction
ticktable: df, tick size info for different prices
price_factor: constant for price unit conversion
strict_fail: strict fail definition. If True, only expired IOCs are considered as fails
Return:
out: pd.Series with all relevant statistics of the race
'''
###########################################################################
### Specify constant
seconds_to_microseconds = 1e6
### Initialize dictionary for output
out = OrderedDict()
### Extract basic information about the race
S, P_Signed, ix_st, time_st, race_horizon = race[['Side','P_Signed','Race_Start_Idx','MessageTimestamp','Race_Horizon']]
### Get race messages
race_msgs = msgs.loc[race['Race_Msgs_Idx']]
race_msgs['Idx'] = race['Race_Msgs_Idx']
proc_time = race_msgs['ProcessingTime'].values
### Create an unsigned version of price level
Sign = 1 * (S == 'Ask') - 1 * (S == 'Bid')
P = Sign * P_Signed
### Create pointer to top-of-book as-of the first message in the race
top_1 = top.loc[ix_st]
out['MidPt'] = top_1['MidPt'] / price_factor
###########################################################################
##### General variables
### Race IDs
out['SingleLvlRaceID'] = race['SingleLvlRaceID']
out['Race_Start_Idx'] = ix_st
out['Race_Msgs_Idx'] = race['Race_Msgs_Idx']
# The Info Horizon length.
# For 'Info_Horizon' method this is the same as Race_Horizon
# For 'Fixed_Horizon' method, this is not the the same as Race_Horizon
out['Race_Horizon'] = race_horizon.total_seconds() * seconds_to_microseconds
# processing time of the first message
out['Processing_Time'] = pd.Timedelta(proc_time[0]).total_seconds() * seconds_to_microseconds
### Top of book info
out['BestBid'] = top_1['BestBid'] / price_factor
out['BestBidQty'] = top_1['BestBidQty']
out['BestAsk'] = top_1['BestAsk'] / price_factor
out['BestAskQty'] = top_1['BestAskQty']
### RaceID, Side, Price, and Depth at start of race
out['Side'] = S
out['RacePrice'] = P / price_factor
out['TickSize'] = ticktable.loc[ticktable['p_int64'] <= P, 'tick'].iloc[-1].item()
### Near Boundary
# Flag races within 10 ticks of the boundary between tick levels
tick_gr = (1./price_factor) * P + 5 * out['TickSize']
tick_less = max((1./price_factor) * P - 5 * out['TickSize'], 0)
out['NearTickBoundary'] = (ticktable.loc[ticktable['p'] <= tick_gr, 'tick'].iloc[-1].item() > out['TickSize']) \
or (ticktable.loc[ticktable['p'] <= tick_less, 'tick'].iloc[-1].item() < out['TickSize'])
### Depth
out['Depth_Disp'] = GetDepth(S, P, ix_st, 'Displayed', depth)
out['Depth_Total'] = GetDepth(S, P, ix_st, 'Total', depth)
### Number of race relevant No Response Msgs
out['M_RaceRlvtNoResponse'] = race_msgs['%sRaceRlvtNoResponse' % S].sum()
### Write RaceRlvtOutcome with the msg outcome accounting for takes that are price dependent
# Note that Outcome Classification is done twice: here and previously in race detection.
# This is because we don't save the result of the outcome calculation.
race_msgs['%sRaceMsgOutcome' % S] = get_msg_outcome(S, P_Signed, race_msgs, strict_fail)
###########################################################################
##### Race timings
### Time M1 and Time MLast
out['Time_M1'] = race_msgs['MessageTimestamp'].iloc[0]
out['Time_MLast'] = race_msgs['MessageTimestamp'].iloc[-1]
### Time from Success to Fail
Time_S1 = race_msgs.loc[(race_msgs['%sRaceMsgOutcome' % S] == 'Success'), 'MessageTimestamp'].iloc[0]
Time_F1 = race_msgs.loc[(race_msgs['%sRaceMsgOutcome' % S] == 'Fail'), 'MessageTimestamp'].iloc[0]
out['Time_S1_F1'] = (Time_F1 - Time_S1).total_seconds() * seconds_to_microseconds
out['Time_S1_F1_Max_0'] = max(out['Time_S1_F1'], 0.)
out['Time_M1_S1'] = (Time_S1 - out['Time_M1']).total_seconds() * seconds_to_microseconds
out['Time_M1_F1'] = (Time_F1 - out['Time_M1']).total_seconds() * seconds_to_microseconds
###########################################################################
##### Shares traded or canceled in race and number of trades
# Note: max(0, x) prevents missing values to be introduced
# (i.e. if there is no quantity cancelled, we record 0, not missing.)
### Quantity traded and # trades in the race.
# Use the TradeNum_Vol() to get qty traded for trade in the race.
# Only include depth at the race Price/Side.
# (See the code block for the function for additional detail)
at_P = True
out['Qty_Traded'], out['Num_Trades'] = race_msgs.apply(TradeNum_Vol, args=(S, P, at_P, me, me_qr_ask, me_qr_bid), axis=1).sum()
out['Qty_Traded'] = max(0, out['Qty_Traded'])
out['Num_Trades'] = max(0, out['Num_Trades'])
out['Value_Traded'] = out['Qty_Traded'] * out['RacePrice']
### Quantity canceled
# Note that all cancels that have any cancelled quantity are labelled success.
out['Qty_Cancelled'] = max(0,race_msgs.loc[(race_msgs['%sRaceRlvtType' % S] == 'Cancel Attempt') & (race_msgs['%sRaceMsgOutcome' % S] =='Success'), '%sRaceRlvtQty' %S].sum())
out['Value_Cancelled'] = out['Qty_Cancelled'] * out['RacePrice']
### Active Qty: Qty_Traded at P + Qty_Cancelled at P
out['Qty_Active'] = out['Qty_Traded'] + out['Qty_Cancelled']
### Quantity of shares remaining = total depth - qty canceled and qty traded
out['Qty_Remaining'] = max(0, out['Depth_Total'] - out['Qty_Cancelled'] - out['Qty_Traded'])
### Quantity of displayed depth that is remaining (if there was hidden depth, set it to 0)
out['Qty_Remaining_Disp'] = max(0, out['Depth_Disp'] - out['Qty_Cancelled'] - out['Qty_Traded'])
### % of depth remaining after the race
# We assert for zero division. In theory, Depth_Disp should not be zero given there is a race.
# In practice, Depth_Disp might be zero (rarely) because our order book construction is not perfect.
out['Qty_Remaining_Disp_Pct'] = np.float64(out['Qty_Remaining_Disp'])/np.float64(out['Depth_Disp']) if out['Depth_Disp'] != 0. else np.nan
###########################################################################
##### Number of users/messages/firms (N/M/F) in Race Relevant Msgs With T us of the race starting msg
### Preparation
# Slice relevant messages that are close in time and are cancels/takes at P or takes worse than P (deeper in the book).
# time_st and race_horizon come from the race rec data
time_st = time_st.to_datetime64()
time_end = (time_st + race_horizon).to_datetime64()
t_close = np.timedelta64(10, 'ms')
is_close_in_time = (msgs['MessageTimestamp'].to_numpy() >= (time_st - t_close)) \
& (msgs['MessageTimestamp'].to_numpy() <= (time_st + t_close))
RaceRlvt = msgs['%sRaceRlvt' % S].to_numpy()
RaceRlvtType = msgs['%sRaceRlvtType' % S].to_numpy()
RaceRlvtPriceLvlSigned = msgs['%sRaceRlvtPriceLvlSigned' % S].to_numpy()
is_canc_eq_p = is_close_in_time & RaceRlvt & (RaceRlvtType == 'Cancel Attempt') & (RaceRlvtPriceLvlSigned == P_Signed)
is_take_eq_p = is_close_in_time & RaceRlvt & (RaceRlvtType == 'Take Attempt') & (RaceRlvtPriceLvlSigned == P_Signed)
is_take_deeper_p = is_close_in_time & RaceRlvt & (RaceRlvtType == 'Take Attempt') & (RaceRlvtPriceLvlSigned > P_Signed)
rel_msgs = msgs.loc[is_canc_eq_p | is_take_eq_p | is_take_deeper_p,
['MessageTimestamp', 'UserID', '%sRaceRlvtType' % S,
'%sRaceRlvtPriceLvlSigned' % S, 'UnifiedMessageType',
'Event', 'BidRaceRlvtNoResponse', 'AskRaceRlvtNoResponse',
'%sRaceRlvtOutcomeGroup' % S, '%sRaceRlvtBestExecPriceLvlSigned' % S,'TIF', 'FirmID']]
# Generate outcome
rel_msgs['Outcome'] = get_msg_outcome(S, P_Signed, rel_msgs, strict_fail)
### Number of users/messages/firms (N/M/F) in Race Relevant Msgs
# within T from the start of race for T = 50us, 100us, 200us, 500us, 1ms
# N_Within_1000us, M_Canc_Within_1000us and M_Prior_1000us are all used for the filters.
# Note that this section counts all race relevant msgs (no matter whether they are in this race)
# All vars in this section are named by *_Within_Ts.
# The vars in the next section named N/M_Fail/Success Canc/Take, etc. are counting race msgs (only msgs of this race)
is_take = rel_msgs['%sRaceRlvtType' % S].to_numpy() == 'Take Attempt' # including takes at P and deeper than P
is_canc = (rel_msgs['%sRaceRlvtType' % S].to_numpy() == 'Cancel Attempt') \
& (rel_msgs['%sRaceRlvtPriceLvlSigned' % S].to_numpy() == P_Signed)
is_fail = rel_msgs['Outcome'].to_numpy() == 'Fail'
is_success = rel_msgs['Outcome'].to_numpy() == 'Success'
is_IOC = (rel_msgs['UnifiedMessageType'].to_numpy() == 'Gateway New Order (IOC)') & (rel_msgs['TIF'].to_numpy() == 'IOC')
is_Lim = rel_msgs['UnifiedMessageType'].to_numpy() == 'Gateway New Order (Limit)'
T_list = ['50', '100', '200', '500', '1000', '2000', '3000']
for T in T_list:
in_T = (rel_msgs['MessageTimestamp'].to_numpy() >= time_st) \
& (rel_msgs['MessageTimestamp'].to_numpy() <= (time_st + np.timedelta64(int(T), 'us')))
## Number of Messages (M)
out['M_Within_%sus' % T] = in_T.sum()
out['M_Canc_Within_%sus' % T] = (in_T & is_canc).sum()
out['M_Take_Within_%sus' % T] = (in_T & is_take).sum()
out['M_Fail_Within_%sus' % T] = (in_T & is_fail).sum()
out['M_Success_Within_%sus' % T] = (in_T & is_success).sum()
out['M_Take_IOC_Within_%sus' % T] = (is_take & is_IOC & in_T).sum()
out['M_Take_Lim_Within_%sus' % T] = (is_take & is_Lim & in_T).sum()
## Number of Users (N)
out['N_Within_%sus' % T] = rel_msgs.loc[in_T, 'UserID'].nunique()
### The number of firms within T
# For some messages we don't know their FirmID (firm unknown)
# Each of those unknown firms are all treated as distinct firms
out['F_Within_%sus' % T] = rel_msgs.loc[in_T, 'FirmID'].nunique() + rel_msgs.loc[in_T, 'FirmID'].isnull().sum()
###########################################################################
##### Number of users/messages/firms (N/M/F) in Race
is_take = race_msgs['%sRaceRlvtType' % S].to_numpy() == 'Take Attempt'
is_canc = race_msgs['%sRaceRlvtType' % S].to_numpy() == 'Cancel Attempt'
is_success = race_msgs['%sRaceMsgOutcome' % S].to_numpy() == 'Success'
is_fail = race_msgs['%sRaceMsgOutcome' % S].to_numpy() == 'Fail'
is_IOC = (race_msgs['UnifiedMessageType'].to_numpy() == 'Gateway New Order (IOC)') & (race_msgs['TIF'].to_numpy() == 'IOC')
is_Lim = race_msgs['UnifiedMessageType'].to_numpy() == 'Gateway New Order (Limit)'
is_QuoteRelated = race_msgs['QuoteRelated'].to_numpy()
is_at_P = race_msgs['%sRaceRlvtPriceLvlSigned' % S].to_numpy() == P_Signed
### Firms
out['F_All'] = race_msgs['FirmID'].nunique() + race_msgs['FirmID'].isnull().sum()
### Users
out['N_All'] = race_msgs['UserID'].nunique()
### Msgs
out['M_All'] = race_msgs.shape[0]
out['M_Canc'] = is_canc.sum()
out['M_Take'] = is_take.sum()
out['M_Take_IOC'] = (is_take & is_IOC).sum()
out['M_Take_Lim'] = (is_take & is_Lim).sum()
### Successful race_msgs
out['M_Success_All'] = is_success.sum()
out['M_Success_Canc'] = (is_success & is_canc).sum()
out['M_Success_Take'] = (is_success & is_take).sum()
### Failed race_msgs - Note that for all Cancels, the fail is a cancel reject or c/r reject.
out['M_Fail_All'] = (is_fail).sum()
out['M_Fail_Canc'] = (is_fail & is_canc).sum()
out['M_Fail_Take'] = (is_fail & is_take).sum()
out['M_Fail_Take_IOC'] = (is_fail & is_take & is_IOC).sum()
out['M_Fail_Take_at_P'] = (is_fail & is_take & is_at_P).sum()
### Quote-Related Msgs: Flag if any race messages are quote related
out['M_QR'] = is_QuoteRelated.sum()
### Capture fails that are specifically expired IOCs (rather than just trading at the wrong price)
# By restricting the fail to the outcome group (rather than the outcome), we know that it failed regardless of
# price. I.e. It failed because it expired, not because it traded at the wrong price.
out['M_IOC_Expired'] = race_msgs.index[(race_msgs['%sRaceRlvtOutcomeGroup' % S] == 'Fail') & \
(race_msgs['%sRaceRlvtType' % S] == 'Take Attempt') & \
(race_msgs['UnifiedMessageType'] == 'Gateway New Order (IOC)') & \
(race_msgs['TIF'] == 'IOC')].nunique()
###########################################################################
##### Msgs Detailed Info
### S1 F1: message type, firm type, firm number and user number
out['S1_Type'] = race_msgs.loc[is_success, '%sRaceRlvtType' % S].iloc[0]
out['S1_FirmID'] = race_msgs.loc[is_success, 'FirmID'].iloc[0]
out['S1_UserID'] = race_msgs.loc[is_success, 'UserID'].iloc[0]
out['F1_Type'] = race_msgs.loc[is_fail, '%sRaceRlvtType' % S].iloc[0]
out['F1_FirmID'] = race_msgs.loc[is_fail, 'FirmID'].iloc[0]
out['F1_UserID'] = race_msgs.loc[is_fail, 'UserID'].iloc[0]
###########################################################################
##### Profits
# Calculate price T into the future
T_list = ['1ms', '10ms', '100ms', '1s', '10s', '30s', '60s', '100s']
times = top.loc[(top.index >= ix_st) & (top['MessageTimestamp'] <= (time_end + np.timedelta64(100, 's')))]
for t in T_list:
# Missing midpoint will have np.nan raw_midpt_f, hence na for all profit measures
in_t_T = times['MessageTimestamp'].to_numpy() <= time_st + np.timedelta64(pd.Timedelta(t))
last_indix_in_T = int(in_t_T.nonzero()[0][-1]) # get the index of the last "True" in in_t_T
raw_best_bid_f = times['BestBid'].iat[last_indix_in_T]
raw_midpt_f = times['MidPt'].iat[last_indix_in_T]
raw_best_ask_f = times['BestAsk'].iat[last_indix_in_T]
# Order book info
out['BestBid_f_%s' % t] = (1. / price_factor) * raw_best_bid_f
out['BestAsk_f_%s' % t] = (1. / price_factor) * raw_best_ask_f
out['MidPt_f_%s' % t] = (1. / price_factor) * raw_midpt_f
# Share profits
share_profits = (1. / price_factor) * (raw_midpt_f - float(P))
out['Race_Profits_PerShare_%s' % t] = Sign * share_profits
out['Race_Profits_PerShare_bps_%s' % t] = (Sign * share_profits * 10000.) / out['MidPt']
out['Race_Profits_PerShare_Tx_%s' % t] = (1. / out['TickSize']) * Sign * share_profits
# Race profits
out['Race_Profits_DispDepth_%s' % t] = out['Depth_Disp'] * out['Race_Profits_PerShare_%s' % t]
out['Race_Profits_TotalDepth_%s' % t] = out['Depth_Total'] * out['Race_Profits_PerShare_%s' % t]
out['Race_Profits_ActiveQty_%s' % t] = out['Qty_Active'] * out['Race_Profits_PerShare_%s' % t]
###########################################################################
##### Loss Avoidance and Qty Traded Price Impact
out['Eff_Spread_Paid_Race'] = (1./price_factor)*Sign*(float(P)-top_1['MidPt']) * out['Qty_Traded']
out['Eff_Spread_PerShare_Race'] = (1./price_factor)*Sign*(float(P)-top_1['MidPt'])
# Price Impact and Loss Avoidance
T_list = ['1ms', '10ms', '100ms', '1s', '10s', '30s', '60s', '100s']
times = top.loc[(top.index >= ix_st) & (top['MessageTimestamp'] <= (time_end + np.timedelta64(100, 's')))]
for T in T_list:
raw_midpt_f_T = times.loc[times['MessageTimestamp'] <= (time_st + np.timedelta64(pd.Timedelta(T))), 'MidPt'].iloc[-1]
out['LossAvoidance_%s' % T] = (1. / price_factor)*Sign*(raw_midpt_f_T - float(P)) * out['Qty_Cancelled']
out['PriceImpact_Paid_Race_%s' % T] = (1. / price_factor)*Sign*(raw_midpt_f_T - top_1['MidPt']) * out['Qty_Traded']
out['PriceImpact_PerShare_Race_%s' % T] = (1. / price_factor)*Sign*(raw_midpt_f_T - top_1['MidPt'])
###########################################################################
##### Pre-Race Stable BBO dummies
T_list = ['10us','50us','100us', '500us', '1ms']
for T in T_list:
## Preparation
# Get the top-of-book data within T prior to the race
# Note that ix_st is the index of the race starting msg
# We want to select all msgs that are within T of the race start to get the prices T before the race
# Get the msgs that are at least T before the race (whatever before race start - T)
msgs_before_T_prerace = (msgs['MessageTimestamp'] < time_st - np.timedelta64(pd.Timedelta(T)))
# Get the last msg that is at least T before the race
ix_T_prerace = msgs_before_T_prerace[msgs_before_T_prerace==True].last_valid_index()
if ix_T_prerace is None:
ix_T_prerace = 0
within_T_prerace = (msgs.index >= ix_T_prerace) & (msgs.index <= ix_st)
top_within_T = top[within_T_prerace]
# Get the race side BBO prices from race start - T to race start
race_bbo_prc_signed_within_T = top_within_T['Best%sSigned' % S]
# Whether the BBO Quote is stable in price
out['Stable_Prc_RaceBBO_since_%s_PreRace' % T] = np.all(race_bbo_prc_signed_within_T \
== race_bbo_prc_signed_within_T.iloc[-1])
# Whether the BBO Quote has improved
if race_bbo_prc_signed_within_T.iloc[0] < race_bbo_prc_signed_within_T.iloc[-1]:
out['RaceBBO_Improved_since_%s_PreRace' % T] = 1
elif race_bbo_prc_signed_within_T.iloc[0] > race_bbo_prc_signed_within_T.iloc[-1]:
out['RaceBBO_Improved_since_%s_PreRace' % T] = -1
else:
out['RaceBBO_Improved_since_%s_PreRace' % T] = 0
return (pd.Series(out))
######################
## Helper Functions ##
######################
def GetDepth(S, P, ix, total, depth):
'''
Given a side, price lvl and msg index, this function returns the depth at that price-side at the last depth change before
the msg. If total == 'Total', we use hidden depth. Otherwise, displayed depth.
Param:
S: side
P: price
ix: idx of the msg
total: ='Total' to include hidden depth
depth: depth from order book construction
Return:
Sh: depth at that price-side at the last depth change
before the msg (# shares)
'''
# If hidden depth is desired, use _h as suffix
hidden = ''
if total == 'Total':
hidden = '_h'
Sh = 0
if S == 'Ask':
if P in depth['ask%s' % hidden].keys():
depth_ask = depth['ask%s' % hidden][P]
if min(depth_ask.index) <= ix:
Sh = depth_ask.loc[depth_ask.index <= ix].iloc[-1]
else:
if P in depth['bid%s' % hidden].keys():
depth_bid = depth['bid%s' % hidden][P]
if min(depth_bid.index) <= ix:
Sh = depth_bid.loc[depth_bid.index <= ix].iloc[-1]
return (Sh)
def TradeNum_Vol(msg, *args):
'''
This function takes an inbound message and returns the sum of the executed volume and
number of execution messages (trades) from all aggressive outbound fills related
to that inbound. If at_P is true, this only includes quantity traded at the
price/side in the function call (race P/S). If at_P is false, this provides all
quantity traded because of that inbound message regardless of whether or not
it is at the race P/S.
Param:
msg: The inbound message of interest
args should include the three parameters (in order)
S: side
P: price level of the race
at_P: If true, only include quantity traded at the price/side.
If false, provides all quantity traded because of that inbound message
regardless of whether or not it is at the race P/S.
me, me_qr_ask, me_qr_bid: outbound dfs
Global:
Uses the three me (df) global variables generated by get_me_dfs()
Return:
pd.Series([qty, trade]): A pandas series with two values, the sum of the executed depth and
number of execution messages (trades) from all aggressive outbound fills related
to that inbound.
'''
S, P, at_P, me, me_qr_ask, me_qr_bid = args[0], args[1], args[2], args[3], args[4], args[5]
qty, trade = np.nan, np.nan
# If the function call does not require the output
# to be calculated for a specific price/side, then
# use the side on which the message is race relevant
# to ge the associated information (e.g. type) in
# the next step. If the message is not ask race relevant
# then it is either bid race relevant or
# it is not race relevant (and will not matter later)
if not at_P:
if msg['AskRaceRlvt']:
S = 'Ask'
else:
S = 'Bid'
# If the message is a take attempt, then it may trade so continue the logic.
# otherwise, it will not end in a trade and can exit the function
if (msg['%sRaceRlvtType' % S] == 'Take Attempt'):
# If the message is a success, then it took depth (traded). Market orders
# that fail also take depth. In these two cases, continue to calculate volume
# and number of trades. Otherwise, exit the function
if (msg['%sRaceRlvtOutcomeGroup' % S] == 'Race Price Dependent'):
if not msg['QuoteRelated']:
# If the UniqueOrderID and EventNum are in the outbound (me) data, then pull
# the relevant trades and calculate the output. Otherwise, exit the function.
# (This should always be the case)
if (msg['UniqueOrderID'], msg['EventNum']) in me.index:
ev = me.loc[(msg['UniqueOrderID'], msg['EventNum'])]
# Calculate the number of trades and quantity traded at P or at all P depending on the function call
if at_P:
qty = ev.loc[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}) & (ev['ExecutedPrice'] == P), 'ExecutedQty'].sum()
trade = len(ev[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}) & (ev['ExecutedPrice'] == P) & (ev['ExecutedQty'] > 0)])
else:
qty = ev.loc[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}), 'ExecutedQty'].sum()
trade = len(ev[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}) & (ev['ExecutedQty'] > 0)])
else:
qty = np.nan
else:
# If the message is quote-related.
if S == 'Bid':
# If the UniqueOrderID and EventNum are in the outbound (me) data, then pull
# the relevant trades and calculate the output. Otherwise, exit the function.
# (This should always be the case)
if (msg['UniqueOrderID'], msg['AskEventNum']) in me_qr_ask.index:
ev = me_qr_ask.loc[(msg['UniqueOrderID'], msg['AskEventNum'])]
# Calculate the number of trades and quantity traded at P or at all P depending on the function call
if at_P:
qty = ev.loc[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}) & (ev['ExecutedPrice'] == P), 'ExecutedQty'].sum()
trade = len(ev[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}) & (ev['ExecutedPrice'] == P) & (ev['ExecutedQty'] > 0)])
else:
qty = ev.loc[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}), 'ExecutedQty'].sum()
trade = len(ev[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}) & (ev['ExecutedQty'] > 0)])
else:
qty = np.nan
elif S == 'Ask':
# If the UniqueOrderID and EventNum are in the outbound (me) data, then pull
# the relevant trades and calculate the output. Otherwise, exit the function.
# (This should always be the case, but is included as a backup to the function) [REMOVE? or COUNTER?]
if (msg['UniqueOrderID'], msg['BidEventNum']) in me_qr_bid.index:
ev = me_qr_bid.loc[(msg['UniqueOrderID'], msg['BidEventNum'])]
# Calculate the number of trades and quantity traded at P or at all P depending on the function call
if at_P:
qty = ev.loc[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}) & (ev['ExecutedPrice'] == P), 'ExecutedQty'].sum()
trade = len(ev[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}) & (ev['ExecutedPrice'] == P) & (ev['ExecutedQty'] > 0)])
else:
qty = ev.loc[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}), 'ExecutedQty'].sum()
trade = len(ev[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}) & (ev['ExecutedQty'] > 0)])
else:
qty = np.nan
return pd.Series([qty, trade])
| StarcoderdataPython |
4858453 | <gh_stars>0
import re
from Lexical.Token import Token, Definition
from Enum.TokenType import TokenType
class Lexer:
def __init__(self):
self.token_definitions = [];
self.term_pattern = re.compile('\r\n|\r|\n')
def add_definition(self, type, pattern, ignored = False):
definition = Definition(type, pattern, ignored)
self.token_definitions.append(definition)
def tokenize(self, source):
cur_index = 0
cur_line = 1
cur_column = 0
while cur_index < len(source):
match_token = None
for definition in self.token_definitions:
matches = definition.pattern.search(source, cur_index)
success = matches is not None and (matches.regs is not None and len(matches.regs) >= 1)
if not success: continue
match = matches.regs[0]
index = match[0]
length = match[1] - index
if index - cur_index != 0: continue
value = source[index:index+length]
terminator = self.term_pattern.search(value)
cur_index += length
cur_line += [0 if terminator is None or terminator.regs is None or len(terminator.regs) < 1 else 1][0]
cur_column = [cur_column + length if terminator is None else len(value) - (terminator.regs[0][0] + terminator.regs[0][1])][0]
match_token = Token(definition.type, matches.groups(), [cur_index, cur_line, cur_column])
break
if definition.ignored is False:
yield match_token
yield Token(TokenType.EOF, None, [cur_index, cur_line, cur_column])
| StarcoderdataPython |
6434432 | <reponame>willem-vanheemstrasystems/django-frontend<gh_stars>0
from django.conf.urls import url
from . import views
urlpatterns = [
# ex: /client/
url(r'^$', 'mysite.client.views.index', name='index'),
]
| StarcoderdataPython |
3589463 | <reponame>mblackgeo/lambdarado_py
from lambdarado import start
def get_app():
from .app_creator import app
return app
start(get_app)
| StarcoderdataPython |
3387668 | <reponame>vscg/spotify-client<gh_stars>0
import copy
import logging
import random
from base64 import b64encode
from datetime import datetime, timedelta
from typing import List, Union
from urllib.parse import urlencode
import requests
from .config import Config
from .exceptions import ClientException, ImproperlyConfigured, SpotifyException
logger = logging.getLogger(__name__)
class SpotifyClient(object):
"""
Wrapper around the Spotify API
:ivar client_id: Spotify client ID used for authenticating with API
:ivar secret_key: Spotify secret key used for authenticating with API
:ivar identifier: Identifier to include in log messages for identifying requests
"""
BATCH_SIZE = 100
MAX_SEARCH_SIZE = 50
DEFAULT_RECENTLY_PLAYED_TRACKS_LIMIT = 30
SEARCH_TYPE_OPTIONS = ['album', 'artist', 'playlist', 'track', 'show', 'episode']
API_URL = 'https://api.spotify.com/v1'
AUTH_URL = 'https://accounts.spotify.com/api/token'
USER_AUTH_URL = 'https://accounts.spotify.com/authorize'
REDACT_VALUE = '**********'
REDACT_DATA_KEYS = ['Authorization', 'code', 'refresh_token', 'access_token']
def __init__(self, client_id: str = None, secret_key: str = None, identifier: str = 'SpotifyClient'):
self.client_id = client_id or Config.CLIENT_ID
self.secret_key = secret_key or Config.SECRET_KEY
if not all([self.client_id, self.secret_key]):
raise ImproperlyConfigured(
'Missing either client_id or secret_key variable. '
'Please set these variables either in the Config class or as instance arguments'
)
self.fingerprint = identifier
self.auth_token = None
self.auth_token_last_refreshed = None
self.seen_songs = []
def _sanitize_log_data(self, data: dict) -> dict:
"""
Redact sensitive data (auth headers, access tokens, etc.) from logging data and
replace with a sanitized value.
:param data: (dict) Request data to log that may contain sensitive information
:return: (dict)
"""
for name in data:
if name in self.REDACT_DATA_KEYS:
data[name] = self.REDACT_VALUE
return data
def _log(self, level: int, msg: str, extra: dict = None, exc_info: bool = False) -> None:
"""
Log a message to the logger at a given level with optional extra info or traceback info.
NOTE: Any data passed as `extra` should be a copy of the real data used in the code. This
is because we do transformations on the data passed to sanitize sensitive values, so if we
operate on the "real data" we could inadvertently update the actual data being used in the
code.
:param level: (int) Logging level to log at. Should be a constant from the `logging` library
:param msg: (str) Log message to write to write
:param extra: (dict) Optional payload of extra logging information
:param exc_info: (bool) Include traceback information with log message
"""
if extra is None:
extra = {}
extra.update({'fingerprint': self.fingerprint})
# Redact sensitive information from logging data extra
for key, data in extra.items():
if isinstance(data, dict):
extra[key] = self._sanitize_log_data(data)
logger.log(level, msg, extra=extra, exc_info=exc_info)
def _make_spotify_request(
self,
method: str,
url: str,
params: dict = None,
data: Union[dict, bytes] = None,
json: dict = None,
headers: dict = None
) -> dict:
"""
Make a request to the Spotify API and return the JSON response
:param method: (str) HTTP method to use when sending request
:param url: (str) URL to send request to
:param params: (dict) GET query params to add to URL
:param data: (dict or bytes) POST data to send in request
:param json: (dict) JSON data to send in request
:param headers: (dict) Headers to include in request
:return (dict) Response content
:raises: `SpotifyException` if request was unsuccessful
:raises: `ClientException` if unexpected error encountered
"""
if not headers:
# Retrieve the header we need to make an auth request
auth_token = self._get_auth_access_token()
headers = {'Authorization': 'Bearer {}'.format(auth_token)}
logging_params = copy.deepcopy(params)
logging_data = copy.deepcopy(data)
logging_json = copy.deepcopy(json)
logging_headers = copy.deepcopy(headers)
self._log(
logging.INFO,
'Making {method} request to Spotify URL: {url}'.format(
method=method,
url=url,
),
extra={
'request_method': method,
'params': logging_params,
'data': logging_data,
'json': logging_json,
'headers': logging_headers
}
)
try:
response = requests.request(
method,
url,
params=params,
data=data,
json=json,
headers=headers
)
response.raise_for_status()
if response.text:
response = response.json()
self._log(logging.INFO, 'Successful request made to {}.'.format(url))
self._log(
logging.DEBUG,
'Successful request made to {}.'.format(url),
extra={'response_data': copy.deepcopy(response)}
)
return response
except requests.exceptions.HTTPError as exc:
response = exc.response
self._log(
logging.ERROR,
'Received HTTPError requesting {}'.format(url),
extra={
'request_method': method,
'data': logging_data,
'json': logging_json,
'params': logging_params,
'headers': logging_headers,
'response_code': response.status_code,
'response_reason': response.reason,
'response_data': response.text,
},
exc_info=True
)
raise SpotifyException('Received HTTPError requesting {}'.format(url)) from exc
except requests.exceptions.ConnectionError as exc:
self._log(
logging.ERROR,
'Received ConnectionError requesting {}'.format(url),
extra={
'request_method': method,
'data': logging_data,
'json': logging_json,
'params': logging_params,
'headers': logging_headers,
},
exc_info=True
)
raise SpotifyException('Received ConnectionError requesting {}'.format(url)) from exc
except Exception:
self._log(logging.ERROR, 'Received unhandled exception requesting {}'.format(url), exc_info=True)
raise ClientException('Received unhandled exception requesting {}'.format(url))
def _get_auth_access_token(self) -> str:
"""
Return the access token we need to make requests to Spotify.
:return: (str) Key needed to authenticate with Spotify API
:raises: `SpotifyException` if access token not retrieved
"""
if not self.auth_token_last_refreshed or self.auth_token_last_refreshed < datetime.now() - timedelta(hours=1):
access_token = self._make_auth_access_token_request()
if access_token:
self.auth_token = access_token
self.auth_token_last_refreshed = datetime.now()
else:
self._log(logging.ERROR, 'Unable to retrieve access token from Spotify')
raise SpotifyException('Unable to retrieve Spotify access token')
return self.auth_token
def _make_authorization_header(self) -> dict:
"""
Build the Basic Authorization header used for Spotify API authentication
:return: (str) Base 64 encoded string that contains the client ID and client secret key for application
"""
auth_val = f'{self.client_id}:{self.secret_key}'
auth_val = bytes(auth_val, encoding='utf-8')
auth_header = b64encode(auth_val)
return {'Authorization': 'Basic {}'.format(auth_header.decode('utf8'))}
def _make_auth_access_token_request(self) -> str:
"""
Get an access token from Spotify for authentication
:return: (str) Token used for authentication with Spotify
"""
headers = self._make_authorization_header()
data = {'grant_type': 'client_credentials'}
resp = self._make_spotify_request(
'POST',
self.AUTH_URL,
data=data,
headers=headers
)
return resp.get('access_token')
def get_code_from_spotify_uri(self, code: str) -> str:
"""
Get the Spotify code (alphanumeric value) from the Spotify song URI. Used in requests to Spotify
for a track, as Spotify only cares about the alphanumeric value.
Ex. Given 'spotify:track:19p0PEnGr6XtRqCYEI8Ucc', return '19p0PEnGr6XtRqCYEI8Ucc'
:param code: (str) Full Spotify URI for a song
:return: (str) Spotify code for the song
"""
return code.split(':')[2]
def batch_tracks(self, tracks: list, batch_size: int = None) -> List[list]:
"""
Some Spotify endpoints have a limit on the number of tracks to send in one request. This method will
take a list of tracks and create a list of batches for including in Spotify requests.
:param tracks: (list) List of tracks to batch
:param batch_size: (int) Optional size of batches to return
:return: (list[list]) Original list of tracks, batched into lists of `batch_size`
"""
batch_size = batch_size or self.BATCH_SIZE
return [tracks[idx:idx + batch_size] for idx in range(0, len(tracks), batch_size)]
def get_playlists_for_category(self, category: str, num_playlists: int) -> List[dict]:
"""
Get a number of playlists from Spotify for a given category
:param category: (str) Category ID of a genre in Spotify
:param num_playlists: (int) Number of playlists to return
:return: (list[dict]) Playlist mappings for the given category
- name (str): Name of the playlist
- uri (str): Spotify ID for the playlist
- user (str): Spotify ID for the playlist owner
"""
url = '{api_url}/browse/categories/{category_id}/playlists'.format(
api_url=self.API_URL,
category_id=category
)
params = {
'country': 'US',
'limit': num_playlists
}
response = self._make_spotify_request('GET', url, params=params)
retrieved_playlists = []
for playlist in response['playlists']['items']:
payload = {
'name': playlist['name'].encode('ascii', 'ignore'),
'uri': playlist['id'],
'user': playlist['owner']['id']
}
retrieved_playlists.append(payload)
# Shuffle playlists to ensure freshness
random.shuffle(retrieved_playlists)
return retrieved_playlists
def get_songs_from_playlist(self, playlist: dict, num_songs: int, allow_explicit: bool = False) -> List[dict]:
"""
Get a number of songs randomly from the given playlist.
List of songs is shuffled and the number of desired tracks are returned.
:param playlist: (dict) Mapping of values needed to retrieve playlist tracks
:param num_songs: (int) Number of songs to return from this playlist
:param allow_explicit: (bool) Flag to indicate whether or not to return explicit songs (default False)
:return: (list[dict]) Song mappings from the given playlist
- name (str): Name of the song
- artist (str): Name of the artist
- code (str): Spotify ID of the song
"""
url = '{api_url}/users/{user_id}/playlists/{playlist_id}'.format(
api_url=self.API_URL,
user_id=playlist['user'],
playlist_id=playlist['uri']
)
params = {'fields': 'tracks(items(track(id,uri,name,artists,explicit)))'}
response = self._make_spotify_request('GET', url, params=params)
processed_tracks = 0
retrieved_tracks = []
tracks = response['tracks']['items']
# Shuffle tracks to ensure freshness
random.shuffle(tracks)
# Process number of tracks requested, but if playlist does not have enough to return the full
# amount we return what we get
for track in tracks:
if not track['track']:
# Sometimes Spotify doesn't return anything for a track. Unsure why, but if the track is None
# we should just skip it and keep going
continue
uri = track['track']['uri']
is_explicit = track['track']['explicit']
# Skip song if song has already been seen
if uri in self.seen_songs:
continue
# Skip song if we shouldn't include explicit songs and song is explicit
if not allow_explicit and is_explicit:
continue
payload = {
'name': track['track']['name'],
'artist': track['track']['artists'][0]['name'],
'code': uri
}
retrieved_tracks.append(payload)
self.seen_songs.append(uri)
processed_tracks += 1
if processed_tracks >= num_songs:
break
return retrieved_tracks
def get_audio_features_for_tracks(self, tracks: List[dict]) -> List[dict]:
"""
Get audio features (attributes we use for determining song emotion) for a number of tracks. Will update the
tracks in place, each track in the list is a dictionary of values needed to create a Song object. This method
returns the list of tracks updated with the tracks emotion attribute values.
:param tracks: (list[dict]) Song mappings
:return: (list[dict]) Song mappings + (energy, valence, danceability)
"""
# Need to batch tracks as Spotify limits the number of tracks sent in one request
batched_tracks = self.batch_tracks(tracks)
for batch in batched_tracks:
url = '{api_url}/audio-features'.format(api_url=self.API_URL)
# Construct query params list from track ids in batch
# Strip spotify:track: from the uri (Spotify just wants the id)
track_ids = [self.get_code_from_spotify_uri(track['code']) for track in batch]
params = {'ids': ','.join([track_id for track_id in track_ids])}
response = self._make_spotify_request('GET', url, params=params)
# Response is returned in the order requested (req:[1,2,3] -> res:[1,2,3])
# If an object is not found, a null value is returned in the appropriate position
for track, track_data in zip(batch, response['audio_features']):
if track_data:
valence = track_data.get('valence')
energy = track_data.get('energy')
danceability = track_data.get('danceability')
# Skip tracks that are missing any of the attributes we're looking for
if not any([valence, energy, danceability]):
continue
track.update({
'valence': valence,
'energy': energy,
'danceability': danceability
})
return tracks
def build_spotify_oauth_confirm_link(self, state: str, scopes: List[str], redirect_url: str) -> str:
"""
First step in the Spotify user authorization flow. This builds the request to authorize the application with
Spotify. Note that this function simply builds the URL for the user to visit, the actual behavior for the
authorization need to be made client-side.
:param state: (str) State to pass in request. Used for validating redirect URI against request
:param scopes: (list(str)) Spotify OAuth scopes to grant in authentication request
:param redirect_url: (str) URL to redirect to after OAuth confirmation
:return: (str) URL for Spotify OAuth confirmation
"""
params = {
'client_id': self.client_id,
'response_type': 'code',
'scope': ' '.join(scopes),
'redirect_uri': redirect_url,
'state': state
}
return '{url}?{params}'.format(url=self.USER_AUTH_URL, params=urlencode(params))
def get_access_and_refresh_tokens(self, code: str, redirect_url: str) -> dict:
"""
Make a request to the Spotify authorization endpoint to obtain the access and refresh tokens for a user after
they have granted our application permission to Spotify on their behalf.
:param code: (str) Authorization code returned from initial request for Spotify OAuth
:param redirect_url: (str) URL to redirect to after OAuth confirmation
:return: (dict)
- access_token (str)
- refresh_token (str)
"""
data = {
'grant_type': 'authorization_code', # Constant; From Spotify documentation
'code': code,
'redirect_uri': redirect_url,
}
headers = self._make_authorization_header()
response = self._make_spotify_request('POST', self.AUTH_URL, data=data, headers=headers)
return {
'access_token': response['access_token'],
'refresh_token': response['refresh_token']
}
def refresh_access_token(self, refresh_token: str) -> str:
"""
Refresh application on behalf of user given a refresh token. On a successful response, will return an
access token for the user good for the timeout period for Spotify authentication (One hour.)
:param refresh_token: (str) Refresh token for user from Spotify
:return: (str) New access token for user
"""
data = {
'grant_type': 'refresh_token', # Constant; From Spotify documentation
'refresh_token': refresh_token
}
headers = self._make_authorization_header()
response = self._make_spotify_request('POST', self.AUTH_URL, headers=headers, data=data)
return response['access_token']
def get_user_profile(self, access_token: str) -> dict:
"""
Get data on the user from Spotify API /me endpoint
:param access_token: (str) OAuth token from Spotify for the user
:return: (dict) Payload for the given user
"""
url = '{api_url}/me'.format(api_url=self.API_URL)
headers = {'Authorization': 'Bearer {}'.format(access_token)}
return self._make_spotify_request('GET', url, headers=headers)
def get_attributes_for_track(self, uri: str) -> dict:
"""
Fetch song metadata for a singular track
:param uri: (str) URI of song to search for on Spotify
:return: (dict)
- name (str)
- artist (str)
- code (str)
"""
song_id = self.get_code_from_spotify_uri(uri)
url = '{api_url}/tracks/{id}'.format(
api_url=self.API_URL,
id=song_id
)
track = self._make_spotify_request('GET', url)
return {
'name': track['name'],
'artist': track['artists'][0]['name'],
'code': uri
}
def get_user_playlists(self, auth_code: str, spotify_user_id: str) -> dict:
"""
Get all playlists for the given Spotify user.
:param auth_code: (str) Access token for user from Spotify
:param spotify_user_id: (str) Spotify username for the given user
:return: (dict) Spotify response for all users playlists
"""
url = '{api_url}/users/{user_id}/playlists'.format(
api_url=self.API_URL,
user_id=spotify_user_id
)
headers = {
'Authorization': 'Bearer {}'.format(auth_code),
'Content-Type': 'application/json'
}
return self._make_spotify_request('GET', url, headers=headers)
def create_playlist(self, auth_code: str, spotify_user_id: str, playlist_name: str) -> str:
"""
Create a playlist for the given Spotify user. Note that this creates an empty playlist,
a subsequent API call should be made to populate the playlist with songs.
:param auth_code: (str) Access token for user from Spotify
:param spotify_user_id: (str) Spotify username for the given user
:param playlist_name: (str) Name of the playlist to be created
:return: (str) Spotify playlist ID for the created playlist
"""
url = '{api_url}/users/{user_id}/playlists'.format(
api_url=self.API_URL,
user_id=spotify_user_id
)
headers = {'Authorization': 'Bearer {}'.format(auth_code)}
data = {
'name': playlist_name,
'public': True
}
resp = self._make_spotify_request('POST', url, headers=headers, json=data)
return resp['id']
def add_songs_to_playlist(self, auth_code: str, playlist_id: str, songs: list) -> None:
"""
Add songs to a specified playlist
:param auth_code: (str) Access token for user from Spotify
:param playlist_id: (str) Spotify playlist ID to add songs to
:param songs: (list) Collection of Spotify track URIs to add to playlist
"""
url = '{api_url}/playlists/{playlist_id}/tracks'.format(
api_url=self.API_URL,
playlist_id=playlist_id
)
headers = {'Authorization': 'Bearer {}'.format(auth_code)}
data = {'uris': songs}
self._make_spotify_request('POST', url, headers=headers, json=data)
def delete_songs_from_playlist(self, auth_code: str, playlist_id: str, songs: list) -> None:
"""
Remove songs from a specified playlist
:param auth_code: (str) Access token for user from Spotify
:param playlist_id: (str) Spotify playlist ID to remove songs from
:param songs: (list) Collection of Spotify track URIs to remove from playlist
"""
url = '{api_url}/playlists/{playlist_id}/tracks'.format(
api_url=self.API_URL,
playlist_id=playlist_id
)
headers = {'Authorization': 'Bearer {}'.format(auth_code)}
data = {'uris': songs}
self._make_spotify_request('DELETE', url, headers=headers, json=data)
def get_user_top_artists(self, auth_code: str, max_top_artists: int) -> List[str]:
"""
Retrieve the top artists from Spotify for a user.
:param auth_code: (str) Access token for user from Spotify
:param max_top_artists: (int) Max number of top artists to retrieve
:return: (list(str)) List of top artists for the user from Spotify
"""
url = '{api_url}/me/top/artists'.format(api_url=self.API_URL)
headers = {'Authorization': 'Bearer {}'.format(auth_code)}
params = {'limit': max_top_artists}
resp = self._make_spotify_request('GET', url, headers=headers, params=params)
# Parse the response for the artist name values
artists = []
for item in resp['items']:
artists.append(item['name'])
return artists
def upload_image_to_playlist(self, auth_code: str, playlist_id: str, image_filepath: str) -> None:
"""
Upload a custom image for a playlist. Requires ugc-image-upload and
playlist-modify-public/playlist-modify-private scopes from Spotify
:param auth_code: (str) Access token for user who owns the playlist
:param playlist_id: (str) Playlist ID from Spotify
:param image_filepath: (str) Path to the image file to upload
"""
url = '{api_url}/playlists/{playlist_id}/images'.format(api_url=self.API_URL, playlist_id=playlist_id)
headers = {
'Authorization': 'Bearer {}'.format(auth_code),
'Content-Type': 'image/jpeg'
}
try:
with open(image_filepath, 'rb') as image_file:
image_data = b64encode(image_file.read())
except FileNotFoundError:
raise ClientException('File {} does not exist'.format(image_filepath))
self._make_spotify_request('PUT', url, data=image_data, headers=headers)
def search(self, query: str, search_types: Union[str, List], limit: int = None) -> dict:
"""
Query the API for resources that match a given query.
:param query: (str) Query to send to the endpoint
:param search_types: (str | list) Single or multiple item types to search across. Must be one of
[album, artist, playlist, track, show, episode]
:param limit: (int) Maximum number of resources to return. Default to max of 50 resources
:return: (dict) Response from API.
See https://developer.spotify.com/documentation/web-api/reference/search/search/#fields-reference for full
details.
"""
url = '{api_url}/search'.format(api_url=self.API_URL)
limit = limit or self.MAX_SEARCH_SIZE
if limit > self.MAX_SEARCH_SIZE:
raise ClientException(f'Invalid limit. Must be less than {self.MAX_SEARCH_SIZE}')
if not isinstance(search_types, list):
search_types = [search_types]
# Validate that the search_types parameters are valid
for _type in search_types:
if _type not in self.SEARCH_TYPE_OPTIONS:
raise ClientException(f'{_type} is not a valid search type. Options are {self.SEARCH_TYPE_OPTIONS}')
params = {
'q': query,
'type': ','.join(search_types),
'limit': limit
}
return self._make_spotify_request('GET', url, params=params)
def get_all_songs_from_user_playlist(self, auth_code: str, playlist_id: str) -> List[str]:
"""
Get all the song URIs from the playlist for the given user
:param auth_code: (str) Access token for user who owns the playlist
:param playlist_id: (str) Playlist ID from Spotify
:return: List of Spotify URIs for songs in playlist
"""
song_uris = []
url = f'{self.API_URL}/playlists/{playlist_id}/tracks'
headers = {'Authorization': f'Bearer {auth_code}'}
params = {'fields': 'items(track(uri)),next'}
while url:
response = self._make_spotify_request('GET', url, headers=headers, params=params)
tracks = response['items']
for track in tracks:
if track['track'] and track['track']['uri'] not in song_uris:
song_uris.append(track['track']['uri'])
url = response['next']
return song_uris
def get_recently_played_tracks_for_user(self, auth_code: str, limit: int = None) -> dict:
"""
Retrieve the top `limit` tracks that a user has listened to on Spotify
- Requires the `user-read-recently-played` Spotify OAuth scope
:param auth_code: (str) Access token for user from Spotify
:param limit: (int) Number of tracks to return (default to self.DEFAULT_RECENTLY_PLAYED_TRACKS_LIMIT)
:return: (dict) Response of play history objects from Spotify API
"""
headers = {'Authorization': f'Bearer {auth_code}'}
params = {'limit': limit or self.DEFAULT_RECENTLY_PLAYED_TRACKS_LIMIT}
return self._make_spotify_request(
'GET',
f'{self.API_URL}/me/player/recently-played',
headers=headers,
params=params
)
def getAllAlbum(self, auth_code: str, limit: int, offset: int) -> dict:
url = '{api_url}/me/albums'.format(api_url=self.API_URL)
if limit > 50:
raise ClientException(f'Invalid limit. Must be less than 50')
headers = {'Authorization': 'Bearer {}'.format(auth_code)}
params = {
'limit': limit,
'offset': offset
#,'market': 'US'
}
return self._make_spotify_request('GET', url, headers=headers, params=params)
def addAlbum(self, ids: str, auth_code: str) -> dict:
url = '{api_url}/me/albums'.format(api_url=self.API_URL)
headers = {'Authorization': 'Bearer {}'.format(auth_code)}
params = {
'ids': ids
}
return self._make_spotify_request('PUT', url, params=params, headers=headers)
def removeAlbum(self, ids: str, auth_code: str) -> dict:
url = '{api_url}/me/albums'.format(api_url=self.API_URL)
headers = {'Authorization': 'Bearer {}'.format(auth_code)}
params = {
'ids': ids
}
return self._make_spotify_request('DELETE', url, params=params, headers=headers)
def getAlbum(self, query: str) -> dict:
url = '{api_url}/albums'.format(api_url=self.API_URL)
limit = self.MAX_SEARCH_SIZE
if limit > self.MAX_SEARCH_SIZE:
raise ClientException(f'Invalid limit. Must be less than {self.MAX_SEARCH_SIZE}')
search_types = ['album']
# Validate that the search_types parameters are valid
for _type in search_types:
if _type not in self.SEARCH_TYPE_OPTIONS:
raise ClientException(f'{_type} is not a valid search type. Options are {self.SEARCH_TYPE_OPTIONS}')
params = {
'ids': query,
'type': ','.join(search_types),
'limit': limit,
'market': 'US'
}
return self._make_spotify_request('GET', url, params=params)
def getPlaylist(self, playlist_id : str, limit: int, offset: int, fields:str) -> dict:
url = '{api_url}/playlists/{pid}/tracks'.format(api_url=self.API_URL, pid=playlist_id)
if limit > 100:
raise ClientException(f'Invalid limit. Must be less than 100')
params = {
'fields': fields,
'limit': limit,
'offset': offset
#,'market': 'US'
}
return self._make_spotify_request('GET', url, params=params)
def addToPlaylist(self, playlist_id : str, uris: str, auth_code: str) -> dict:
url = '{api_url}/playlists/{pid}/tracks'.format(api_url=self.API_URL, pid=playlist_id)
headers = {'Authorization': 'Bearer {}'.format(auth_code)}
params = {
'uris': uris,
}
return self._make_spotify_request('POST', url, params=params, headers=headers)
| StarcoderdataPython |
11371036 | # Copyright (C) 2022 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import json
import os
from . import utils
from .profiles.base import Profile
class Asset:
def __init__(self, profile: Profile):
self.input = None
self.output = None
self.prefix = ""
self.profile = profile
self.real_sizes = {}
self.real_size_out = "metadata.json"
self.registered_images = set()
def set_input_directory(self, path: str):
self.input = os.path.abspath(path)
def set_output_directory(self, path: str):
self.output = os.path.abspath(path)
def get_profile(self):
return self.profile
def set_prefix(self, prefix: str):
if len(prefix) == 0:
self.prefix = ""
else:
prefix = prefix.replace("\\", "/")
if prefix[-1] != "/":
self.prefix = prefix + "/"
else:
self.prefix = prefix
def get_prefix(self):
return self.prefix
def get_input_path(self, path: str = None):
if path == None:
return self.input
else:
return utils.concat_path(self.input, path)
def get_output_path(self, path: str = None):
if self.output == None:
raise Exception("Output path is not defined")
if path == None:
return self.output
else:
return utils.concat_path(self.output, path)
def to_relative_output(self, abspath: str):
if self.output == None:
raise Exception("Output path is not defined")
abspath = os.path.normpath(abspath)
if abspath[: len(self.output)] != self.output:
raise Exception("Absolute path is outside output directory")
return abspath[len(self.output) + 1 :]
def register_image(self, fullimage: str):
if fullimage in self.registered_images:
print(f"Image {fullimage} already registered.")
return False
else:
self.registered_images.add(fullimage)
def add_real_size(self, image: str, ow: int, oh: int, w: int, h: int):
path = self.prefix + image
self.real_sizes[path] = [ow, oh, w, h]
self.register_image(path)
def dump_real_size(self, f):
if f != None:
json.dump(self.real_sizes, f, indent="\t", ensure_ascii=False)
else:
return json.dumps(self.real_sizes, indent="\t", ensure_ascii=False)
def get_realsize_output(self):
return self.real_size_out
def set_realsize_output(self, out: str):
self.real_size_out = out
| StarcoderdataPython |
3569693 | <filename>spax/linalg/lobpcg/ortho.py
import typing as tp
from functools import partial
import jax
import jax.numpy as jnp
from spax.linalg.lobpcg import utils
from spax.linalg.utils import as_array_fun, standardize_signs
from spax.types import ArrayFun, ArrayOrFun
class OrthoState(tp.NamedTuple):
iterations: int
theta: jnp.ndarray
X: jnp.ndarray
P: jnp.ndarray
R: jnp.ndarray
err: jnp.ndarray
converged: jnp.ndarray
num_converged: int
def lobpcg(
A: ArrayOrFun,
X0: jnp.ndarray,
B: tp.Optional[ArrayOrFun] = None,
# iK: tp.Optional[ArrayOrFun] = None,
largest: bool = False,
k: tp.Optional[int] = None,
tol: tp.Optional[float] = None,
maxiters: int = 1000,
tau_ortho: tp.Optional[float] = None,
tau_replace: tp.Optional[float] = None,
tau_drop: tp.Optional[float] = None,
# tau_skip: tp.Optional[float] = None,
) -> tp.Tuple[jnp.ndarray, jnp.ndarray, OrthoState]:
"""
Find some of the eigenpairs for the generalized eigenvalue problem (A, B).
Args:
A: `[m, m]` hermitian matrix, or function representing pre-multiplication by an
`[m, m]` hermitian matrix.
X0: `[m, n]`, `k <= n < m`. Initial guess of eigenvectors.
B: same type as A. If not given, identity is used.
iK: tp.Optional inverse preconditioner. If not given, identity is used.
largest: if True, return the largest `k` eigenvalues, otherwise the smallest.
k: number of eigenpairs to return. Uses `n` if not provided.
tol: tolerance for convergence.
maxiters: maximum number of iterations.
tau_*: solver parameters.
Returns:
w: [k] smallest/largest eigenvalues of generalized eigenvalue problem `(A, B)`.
v: [n, k] eigenvectors associated with `w`. `v[:, i]` matches `w[i]`.
iters: number of iterations used.
"""
# Perform argument checks and fix default / computed arguments
if B is not None:
raise NotImplementedError("Implementations with non-None B have issues")
# if iK is not None:
# raise NotImplementedError("Inplementations with non-None iK have issues")
ohm = jax.random.normal(jax.random.PRNGKey(0), shape=X0.shape, dtype=X0.dtype)
A = as_array_fun(A)
A_norm = utils.approx_matrix_norm2(A, ohm)
if B is None:
B = utils.identity
B_norm = jnp.ones((), dtype=X0.dtype)
else:
B = as_array_fun(B)
B_norm = utils.approx_matrix_norm2(B, ohm)
# if iK is None:
# iK = utils.identity
# else:
# iK = as_array_fun(iK)
if tol is None:
dtype = X0.dtype
if dtype == jnp.float32:
feps = 1.2e-7
elif dtype == jnp.float64:
feps = 2.23e-16
else:
raise KeyError(dtype)
tol = feps ** 0.5
k = k or X0.shape[1]
return _lobpcg(
A,
X0,
B,
# iK,
largest,
k,
tol,
maxiters,
A_norm,
B_norm,
tau_ortho=tau_ortho or tol,
tau_replace=tau_replace or tol,
tau_drop=tau_drop or tol,
# tau_skip=tau_skip or tol,
)
def _lobpcg(
A: ArrayFun,
X0: jnp.ndarray,
B: ArrayFun,
# iK: ArrayFun,
largest: bool,
k: int,
tol: float,
maxiters: int,
A_norm: float,
B_norm: float,
tau_ortho: float,
tau_replace: float,
tau_drop: float,
# tau_skip: float,
) -> tp.Tuple[jnp.ndarray, jnp.ndarray, OrthoState]:
m, nx = X0.shape
dtype = X0.dtype
compute_residual = partial(utils.compute_residual, A=A, B=B)
compute_residual_error = partial(
utils.compute_residual_error, A_norm=A_norm, B_norm=B_norm
)
ortho_drop = partial(
utils.ortho_drop,
B,
tau_replace=tau_replace,
tau_drop=tau_drop,
largest=largest,
tau_ortho=tau_ortho,
)
theta, C = utils.rayleigh_ritz(X0, A, B, largest=largest)
X = X0 @ C
del X0
P = jnp.zeros((m, 0), dtype=dtype)
R = compute_residual(theta, X)
err = compute_residual_error(R, theta, X)
converged = err < tol
num_converged = jnp.count_nonzero(converged)
state = OrthoState(
iterations=0,
theta=theta,
X=X,
P=P,
R=R,
err=err,
converged=converged,
num_converged=num_converged,
)
del theta
def cond(s: OrthoState):
return jnp.logical_and(s.iterations < maxiters, s.num_converged < k)
def body(s: OrthoState):
XP = jnp.concatenate((s.X, s.P), axis=1)
W = ortho_drop(U=s.R, V=XP)
S = jnp.concatenate((XP, W), axis=1)
theta_x, theta_p, cx, cp = utils.rayleigh_ritz_modified_ortho(
S=S, A=A, nx=nx, nc=s.num_converged, largest=largest
)
del theta_p
X = S @ cx
P = S @ cp
R = compute_residual(theta_x, X)
err = compute_residual_error(R, theta_x, X)
converged = err < tol
num_converged = jnp.count_nonzero(converged)
return OrthoState(
iterations=s.iterations + 1,
theta=theta_x,
X=X,
P=P,
R=R,
err=err,
converged=converged,
num_converged=num_converged,
)
# main loop
while cond(state):
state = body(state)
# # first run through has P=[]. Size will change for subsequent runs.
# state = body(state)
# state = jax.lax.while_loop(cond, body, state)
# clean up return values
def if_converged(state):
indices = jnp.argsort(jnp.logical_not(state.converged))[:k]
theta = state.theta[indices]
vectors = state.X[:, indices]
vectors = standardize_signs(vectors)
return theta, vectors, state
def otherwise(state):
theta = jnp.full((k,), jnp.nan, dtype=dtype)
vectors = jnp.full((m, k), jnp.nan, dtype=dtype)
return theta, vectors, state
pred = state.num_converged >= k
return jax.lax.cond(pred, if_converged, otherwise, state)
# use_ortho = False
# while nc < k and iters < maxiters:
# iters += 1
# if use_ortho:
# W = utils.ortho_drop(
# B, R, XP, tau_ortho, tau_replace, tau_drop, largest=largest
# )
# else:
# W = R
# S = jnp.concatenate((XP, W), axis=1)
# theta, C, next_use_ortho = utils.rayleigh_ritz_modified(
# S, A, B, k, nx, nc, use_ortho, tau_skip, largest=largest
# )
# if use_ortho != next_use_ortho:
# W = utils.ortho_drop(
# B, R, XP, tau_ortho, tau_replace, tau_drop, largest=largest
# )
# # The line below isn't in the pseudocode, but otherwise W isn't used.
# S = jnp.concatenate((XP, W), axis=1)
# theta, C, use_ortho = utils.rayleigh_ritz_modified(
# S, A, B, k, nx, nc, use_ortho, tau_skip, largest=largest
# )
# XP = S @ C
# X = X[:, :nx]
# R = compute_residual(theta, X)
# rerr = compute_residual_error(R, theta, X)
# nc = jnp.count_nonzero(rerr < tol)
| StarcoderdataPython |
8139756 | <filename>packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline_schema/compiled_stage.py
from logging import getLogger
from traceback import format_exc
from typing import List
from watchmen_auth import PrincipalService
from watchmen_data_kernel.storage_bridge import now, parse_prerequisite_defined_as, parse_prerequisite_in_memory, \
PipelineVariables, spent_ms
from watchmen_model.admin import Pipeline, PipelineStage
from watchmen_model.pipeline_kernel import MonitorLogStage, MonitorLogStatus, PipelineMonitorLog
from watchmen_pipeline_kernel.pipeline_schema_interface import CreateQueuePipeline, TopicStorages
from watchmen_utilities import ArrayHelper
from .compiled_unit import compile_units, CompiledUnit
logger = getLogger(__name__)
class CompiledStage:
def __init__(self, pipeline: Pipeline, stage: PipelineStage, principal_service: PrincipalService):
self.pipeline = pipeline
self.stage = stage
self.prerequisiteDefinedAs = parse_prerequisite_defined_as(stage, principal_service)
self.prerequisiteTest = parse_prerequisite_in_memory(stage, principal_service)
self.units = compile_units(pipeline, stage, principal_service)
def run(
self, variables: PipelineVariables,
new_pipeline: CreateQueuePipeline, monitor_log: PipelineMonitorLog,
storages: TopicStorages, principal_service: PrincipalService) -> bool:
stage_monitor_log = MonitorLogStage(
stageId=self.stage.stageId, name=self.stage.name,
status=MonitorLogStatus.DONE, startTime=now(), spentInMills=0, error=None,
prerequisite=True,
prerequisiteDefinedAs=self.prerequisiteDefinedAs(),
units=[]
)
monitor_log.stages.append(stage_monitor_log)
try:
prerequisite = self.prerequisiteTest(variables, principal_service)
if not prerequisite:
stage_monitor_log.prerequisite = False
stage_monitor_log.status = MonitorLogStatus.DONE
all_run = True
else:
stage_monitor_log.prerequisite = True
def run(should_run: bool, unit: CompiledUnit) -> bool:
return self.run_unit(
should_run=should_run, unit=unit, variables=variables, new_pipeline=new_pipeline,
stage_monitor_log=stage_monitor_log,
storages=storages, principal_service=principal_service)
all_run = ArrayHelper(self.units).reduce(lambda should_run, x: run(should_run, x), True)
if all_run:
monitor_log.status = MonitorLogStatus.DONE
else:
monitor_log.status = MonitorLogStatus.ERROR
except Exception as e:
logger.error(e, exc_info=True, stack_info=True)
stage_monitor_log.status = MonitorLogStatus.ERROR
stage_monitor_log.error = format_exc()
all_run = False
stage_monitor_log.spentInMills = spent_ms(stage_monitor_log.startTime)
return all_run
# noinspection PyMethodMayBeStatic
def run_unit(
self, should_run: bool,
unit: CompiledUnit, variables: PipelineVariables,
new_pipeline: CreateQueuePipeline, stage_monitor_log: MonitorLogStage,
storages: TopicStorages, principal_service: PrincipalService
) -> bool:
if not should_run:
return False
else:
return unit.run(
variables=variables, new_pipeline=new_pipeline, stage_monitor_log=stage_monitor_log,
storages=storages, principal_service=principal_service)
def compile_stages(pipeline: Pipeline, principal_service: PrincipalService) -> List[CompiledStage]:
stages = pipeline.stages
if stages is None or len(stages) == 0:
return []
else:
return ArrayHelper(stages).map(lambda x: CompiledStage(pipeline, x, principal_service)).to_list()
| StarcoderdataPython |
4822955 | import paramiko
class server:
def __init__(self, host, port, username, password=<PASSWORD>):
self.ssh = paramiko.SSHClient()
# Since we're just going to be executing some simple commands,
# there's no reason to enforce host key checks.
self.ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
# Now connect to the server!
self.ssh.connect(host, port=port, username=username,
password=password, look_for_keys=False, timeout=30,
key_filename="/home/gitdeploy/.ssh/id_dsa")
self.stderr = ""
self.stdout = ""
def _exec(self, command, log=False):
stdin, stdout, stderr = self.ssh.exec_command(command)
self._log_errors(stderr)
lines = "".join(stdout.readlines())
if(log):
self.stdout = self.stdout + lines
return lines
def _exec_dir(self, command, dir, log=False):
if(not self.has_dir(dir)):
raise IOError(dir + ' does not exist!')
return self._exec("cd " + dir + "; " + command, log)
def _log_errors(self, stderr):
lines = stderr.readlines()
if(len(lines) > 0):
# paramiko leaves the trailing \ns on the lines, so no
# need to remove them ourselves.
self.stderr = self.stderr + ("".join(lines))
def mkdir(self, dir):
self._check_dir(dir)
self._exec('mkdir "' + dir + '"')
def mv_dir(self, old, new):
# Checking if the directories exist will handle input validation for us.
if(not self.has_dir(old)):
raise IOError(old + " does not exist")
if(self.has_dir(new)):
raise IOError(new + " exists")
self._exec('mv "' + old + '" "' + new + '"')
def init_git(self, dir):
self._exec_dir("git init", dir, True)
def get_remotes(self, dir):
return filter(None, self._exec_dir("git remote", dir).split("\n"))
def get_remote_pull(self, dir, remote):
if(not remote.isalnum() or len(remote) == 0):
raise ValueError("Remote name must be alphanumeric")
return self._exec_dir("git remote show " + remote, dir).split("\n")[1].split(': ')[1]
def add_remote(self, dir, name, path):
if(path.replace(';', '').replace(' ', '').replace('#', '') != path):
raise ValueError("Invalid path")
if(not name.isalnum()):
raise ValueError("Name must be alphanumeric")
self._exec_dir("git remote add " + name + " " + path, dir, True)
def pull(self, dir, remote, branch):
if(not remote.isalnum() or not branch.isalnum() or len(remote) == 0 or len(branch) == 0):
raise ValueError("Remote and branch names must be alphanumeric")
return self._exec_dir("git pull " + remote + " " + branch, dir, True)
def has_dir(self, dir):
self._check_dir(dir)
exists = self._exec('[ -d ' + dir + ' ] && echo "Directory exists" || echo "Directory does not exist"')
return len(exists) > 0 and exists.find("Directory exists") >= 0
def _check_dir(self, dir):
sanDir = dir.replace('/', '').replace(' ', '').replace('-', '').replace('_', '').replace('.', '')
if(not sanDir.isalnum() or not len(sanDir) > 0 or dir[0] != '/'):
raise ValueError("Directory can only contain alphanumeric characters, spaces, /, _, and -; must start with a leading slash; and must not be /. Given was " + dir)
def has_git(self):
which = self._exec("which git");
return len(which) > 0 and which[0] == "/"
def close(self):
self.ssh.close()
def get_os(self):
oses = [
'Mint', 'Ubuntu', 'Fedora', 'SUSE', 'Debian', 'Arch', 'CentOS', 'Mageia', 'Puppy',
'PCLinuxOS', 'Lubuntu', 'FreeBSD', 'Sabayon', 'Chakra', 'Ultimate', 'Bodhi',
'Mandriva', 'Slackware', 'PC-BSD', 'Gentoo', 'Funduntu', 'Zorin', 'Pinguy', 'Scientific',
'ArchBang', 'Tiny Core', 'Kubuntu', 'Vector', 'GhostBSD', 'CrunchBang', 'Xubuntu',
'Red Hat', 'Pear', 'KNOPPIX', 'Dreamlinux', 'BackTrack', 'ClearOS', 'MEPIS', 'Tails',
'Salix', 'Unity', 'Frugalware', 'FreeNAS', 'Ubuntu Studio', 'Peppermint', 'Dream Studio',
'ZevenOS', 'Commodore', 'BackBox', 'Solaris'
]
distro_info = self._exec("cat /etc/*-release")
for os in oses:
if(distro_info.lower().find(os.lower()) >= 0):
return os
return "Unknown" | StarcoderdataPython |
6705532 | <filename>recipe/run_test.py
import os
import sys
# Force OpenBLAS to use a single thread to prevent image-rotation
# test failures on macOS. See https://github.com/sunpy/sunpy/issues/4290
# for more information.
os.environ['OMP_NUM_THREADS'] = '1'
import sunpy
sys.exit(sunpy.self_test()) | StarcoderdataPython |
8012506 | <gh_stars>1-10
from .data_set import DataSet
from .admin import AdminAPI
from .collector import CollectorAPI
__import__('pkg_resources').declare_namespace(__name__)
| StarcoderdataPython |
4804868 | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
def model(X, hidden_weights, hidden_bias, ow):
hidden_layer = tf.nn.sigmoid(tf.matmul(X, hidden_weights) + b)
return tf.matmul(hidden_layer, ow)
train_samples = 200
test_samples = 60
ds_X = np.linspace(-1, 1, train_samples + test_samples).transpose()
ds_Y = 0.4 * pow(ds_X, 2) + 2 * ds_X + np.random.randn(*ds_X.shape) * 0.22 + 0.8
plt.title('Original data')
plt.scatter(ds_X, ds_Y)
plt.show()
X = tf.placeholder("float")
Y = tf.placeholder("float")
# hidden layer
hw = tf.Variable(tf.random_normal([1, 10], stddev=0.1))
# output connection
ow = tf.Variable(tf.random_normal([10, 1], stddev=0.1))
# create bias
b = tf.Variable(tf.random_normal([10], stddev=0.1))
model_y = model(X, hw, b, ow)
# cost function
cost = tf.pow(model_y - Y, 2) / 2
# optimizer
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost)
# launch graph in session
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(1, 100):
# randomize samples
ds_X, ds_Y = shuffle(ds_X.transpose(), ds_Y)
train_X, train_Y = ds_X[0:train_samples], ds_Y[0:train_samples]
for x, y in zip(train_X, train_Y):
sess.run(train_op, feed_dict={X: [[x]], Y: y})
test_X, test_Y = ds_X[train_samples: train_samples + test_samples], \
ds_Y[0: train_samples:train_samples + test_samples]
cost1 = 0.
for x, y in zip(test_X, test_Y):
cost1 += sess.run(cost, feed_dict={X: [[x]], Y: y}) / test_samples
if i % 10 == 0:
print("Average cost for epoch " + str(i) + ":" + str(cost1))
| StarcoderdataPython |
5104220 | <reponame>Monstrofil/locale_exporter_wows
#!/usr/bin/python3
# coding=utf-8
import argparse
import shutil
from pathlib import Path
import py7zr
import os
import tempfile
import requests
from lxml import etree
from subprocess import Popen
__author__ = "<NAME>"
class LocalizationHelper(object):
LOCALE_TO_REGION = {
'ru': 'ru',
'uk': 'ru',
'be': 'ru',
'en': 'eu',
'pl': 'eu',
'de': 'eu',
}
def __init__(self, locale_list):
self.__locale_list = locale_list
def _extract_mo_locale_files(self, archive_path, locale_name):
with py7zr.SevenZipFile(archive_path, mode='r') as z:
versions = [
path for path in z.getnames()
if path.startswith('bin/') and path.count('/') == 1
] # => ['3912232', '4046169']
highest_version = max(versions)
locale_path = highest_version + '/res/texts/{locale_name}/LC_MESSAGES/global.mo'.format(
locale_name=locale_name
)
temp_dir = tempfile.gettempdir()
z.extract(targets=[
os.path.basename(locale_path),
locale_path
], path=temp_dir)
return os.path.join(temp_dir, locale_path)
def _download_locale_file(self, locale_file_url):
filename = os.path.basename(locale_file_url)
temp_path = os.path.join(tempfile.gettempdir(), filename)
if os.path.exists(temp_path):
return temp_path
with open(temp_path, 'wb') as f:
f.write(requests.get(locale_file_url).content)
return temp_path
def retrive(self):
locale_file_url = self._get_locale_archive_link()
path = self._download_locale_file(locale_file_url)
for locale in self.__locale_list:
self.__retrive(locale, path)
def __retrive(self, locale_name, locales_file):
mo_file = self._extract_mo_locale_files(locales_file, locale_name)
self.__retrive_locale_file(mo_file, export_to='wows.0_0_locale_{locale_name}.wgpkg.po'.format(locale_name=locale_name))
def __obtain_link_wgpkg(self, locale_name):
"""
I really don't know what is the difference between these links, so I just download one ;)
<http name="Cedexis">
http://dl-wows-cdx.wargaming.net/ru/patches/wows_0.6.7.0.261848_ru/wows.ru_0.6.7.0.261848_locale_be.wgpkg
</http>
<http name="G-Core">
http://dl-wows-gc.wargaming.net/ru/patches/wows_0.6.7.0.261848_ru/wows.ru_0.6.7.0.261848_locale_be.wgpkg
</http>
<web_seeds>
<url threads="10">
http://dl-wows-gc.wargaming.net/ru/patches/wows_0.6.7.0.261848_ru/wows.ru_0.6.7.0.261848_locale_be.wgpkg
</url>
</web_seeds>
:type locale_name: str
:rtype: str
"""
# https://dl-wows-gc.wargaming.net/ww/patches/wows_0.10.5.0.4053181_ww/wows.ww_0.10.5.0.4053181_locale.wgpkg
url = "http://update.worldofwarships.{}".format(self.LOCALE_TO_REGION[locale_name])
data = dict(
target='locale',
locale_ver='unknown',
lang=locale_name
)
xml = requests.get(url, data).content
link = etree.fromstring(xml).xpath('content/file/web_seeds/url/text()')[0]
return link
def _get_locale_archive_link(self):
url = 'https://wgus-eu.wargaming.net/api/v1/patches_chain/'
data = dict(
protocol_version='1.10',
client_type='high',
lang='RU',
metadata_version='20210119113723',
metadata_protocol_version='6.10',
chain_id='f11',
client_current_version='0',
locale_current_version='0',
sdcontent_current_version='0',
game_id='WOWS.WW.PRODUCTION',
)
xml = requests.get(url, data).content
locale_file = etree.fromstring(xml).xpath("patches_chain/patch[part = 'locale']/files[1]/file/name/text()")[0]
locale_file_url = 'https://dl-wows-gc.wargaming.net/ww/patches/' + locale_file
return locale_file_url
def __retrive_locale_file(self, mo_file, export_to):
Popen(['msgunfmt', mo_file, '-o', export_to]).communicate()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--locale', nargs='+', required=True,
help='list of locale names')
namespace = parser.parse_args()
locale_helper = LocalizationHelper(namespace.locale)
locale_helper.retrive()
| StarcoderdataPython |
3554345 | <filename>venv/lib/python3.8/site-packages/stem/util/system.py
# Copyright 2011-2019, <NAME> and The Tor Project
# See LICENSE for licensing information
"""
Helper functions for working with the underlying system. These are mostly os
dependent, only working on linux, osx, and bsd. In almost all cases they're
best-effort, providing **None** if the lookup fails.
.. versionchanged:: 1.3.0
Dropped the get_* prefix from several function names. The old names still
work, but are deprecated aliases.
.. versionchanged:: 1.5.0
Added the **SYSTEM_CALL_TIME** global, which tracks total time spent making
system commands.
**Module Overview:**
::
is_windows - checks if we're running on windows
is_mac - checks if we're running on a mac
is_gentoo - checks if we're running on gentoo
is_slackware - checks if we're running on slackware
is_bsd - checks if we're running on the bsd family of operating systems
is_available - determines if a command is available on this system
is_running - determines if a given process is running
size_of - provides the memory usage of an object
call - runs the given system command and provides back the results
name_by_pid - gets the name for a process by the given pid
pid_by_name - gets the pid for a process by the given name
pid_by_port - gets the pid for a process listening to a given port
pid_by_open_file - gets the pid for the process with an open file
pids_by_user - provides processes owned by a user
cwd - provides the current working directory for a given process
user - provides the user a process is running under
start_time - provides the unix timestamp when the process started
tail - provides lines from the end of a file
bsd_jail_id - provides the BSD jail id a given process is running within
bsd_jail_path - provides the path of the given BSD jail
is_tarfile - checks if the given path is a tarball
expand_path - expands relative paths and ~ entries
files_with_suffix - provides files with the given suffix
get_process_name - provides our process' name
set_process_name - changes our process' name
.. data:: Status (enum)
State of a subprocess.
.. versionadded:: 1.6.0
==================== ===========
Status Description
==================== ===========
PENDING not yet started
RUNNING currently being performed
DONE completed successfully
FAILED failed with an exception
==================== ===========
"""
import collections
import ctypes
import ctypes.util
import itertools
import mimetypes
import multiprocessing
import os
import platform
import re
import subprocess
import sys
import tarfile
import threading
import time
import stem.prereq
import stem.util
import stem.util.enum
import stem.util.proc
import stem.util.str_tools
from stem import UNDEFINED
from stem.util import log
State = stem.util.enum.UppercaseEnum(
'PENDING',
'RUNNING',
'DONE',
'FAILED',
)
SIZE_RECURSES = {
tuple: iter,
list: iter,
collections.deque: iter,
dict: lambda d: itertools.chain.from_iterable(d.items()),
set: iter,
frozenset: iter,
}
# Mapping of commands to if they're available or not.
CMD_AVAILABLE_CACHE = {}
# An incomplete listing of commands provided by the shell. Expand this as
# needed. Some noteworthy things about shell commands...
#
# * They're not in the path so is_available() will fail.
# * subprocess.Popen() without the 'shell = True' argument will fail with...
# OSError: [Errno 2] No such file or directory
SHELL_COMMANDS = ['ulimit']
IS_RUNNING_PS_LINUX = 'ps -A co command'
IS_RUNNING_PS_BSD = 'ps -ao ucomm='
GET_NAME_BY_PID_PS = 'ps -p %s -o comm'
GET_PID_BY_NAME_PGREP = 'pgrep -x %s'
GET_PID_BY_NAME_PIDOF = 'pidof %s'
GET_PID_BY_NAME_PS_LINUX = 'ps -o pid -C %s'
GET_PID_BY_NAME_PS_BSD = 'ps axc'
GET_PID_BY_NAME_LSOF = 'lsof -tc %s'
GET_PID_BY_PORT_NETSTAT = 'netstat -npltu'
GET_PID_BY_PORT_SOCKSTAT = 'sockstat -4l -P tcp -p %s'
GET_PID_BY_PORT_LSOF = 'lsof -wnP -iTCP -sTCP:LISTEN'
GET_PID_BY_FILE_LSOF = 'lsof -tw %s'
GET_PIDS_BY_USER_LINUX = 'ps -o pid -u %s'
GET_PIDS_BY_USER_BSD = 'ps -o pid -U %s'
GET_CWD_PWDX = 'pwdx %s'
GET_CWD_LSOF = 'lsof -a -p %s -d cwd -Fn'
GET_BSD_JAIL_ID_PS = 'ps -p %s -o jid'
GET_BSD_JAIL_PATH = 'jls -j %s'
BLOCK_SIZE = 1024
# flag for setting the process name, found in '/usr/include/linux/prctl.h'
PR_SET_NAME = 15
argc_t = ctypes.POINTER(ctypes.c_char_p)
# The following can fail with pypy...
# AttributeError: No symbol Py_GetArgcArgv found in library <None>
try:
Py_GetArgcArgv = ctypes.pythonapi.Py_GetArgcArgv
Py_GetArgcArgv.restype = None
Py_GetArgcArgv.argtypes = [
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(argc_t),
]
except:
Py_GetArgcArgv = None
# This is both a cache for get_process_name() and tracks what we've changed our
# process name to.
_PROCESS_NAME = None
# Length of our original process name.
#
# The original author our process renaming is based on did a memset for 256,
# while Jake did it for the original process name length (capped at 1608). I'm
# not sure of the reasons for either of these limits, but setting it to
# anything higher than our original name length should be pointless, so opting
# for Jake's limit.
_MAX_NAME_LENGTH = -1
# Tracks total time spent shelling out to other commands like 'ps' and
# 'netstat', so we can account for it as part of our cpu time along with
# os.times().
SYSTEM_CALL_TIME = 0.0
SYSTEM_CALL_TIME_LOCK = threading.RLock()
class CallError(OSError):
"""
Error response when making a system call. This is an **OSError** subclass
with additional information about the process. Depending on the nature of the
error not all of these attributes will be available.
:var str msg: exception string
:var str command: command that was ran
:var int exit_status: exit code of the process
:var float runtime: time the command took to run
:var str stdout: stdout of the process
:var str stderr: stderr of the process
"""
def __init__(self, msg, command, exit_status, runtime, stdout, stderr):
self.msg = msg
self.command = command
self.exit_status = exit_status
self.runtime = runtime
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return self.msg
class CallTimeoutError(CallError):
"""
Error response when making a system call that has timed out.
.. versionadded:: 1.6.0
:var float timeout: time we waited
"""
def __init__(self, msg, command, exit_status, runtime, stdout, stderr, timeout):
super(CallTimeoutError, self).__init__(msg, command, exit_status, runtime, stdout, stderr)
self.timeout = timeout
class DaemonTask(object):
"""
Invokes the given function in a subprocess, returning the value.
.. versionadded:: 1.6.0
:var function runner: function to be invoked by the subprocess
:var tuple args: arguments to provide to the subprocess
:var int priority: subprocess nice priority
:var stem.util.system.State status: state of the subprocess
:var float runtime: seconds subprocess took to complete
:var object result: return value of subprocess if successful
:var exception error: exception raised by subprocess if it failed
"""
def __init__(self, runner, args = None, priority = 15, start = False):
self.runner = runner
self.args = args
self.priority = priority
self.status = State.PENDING
self.runtime = None
self.result = None
self.error = None
self._process = None
self._pipe = None
if start:
self.run()
def run(self):
"""
Invokes the task if it hasn't already been started. If it has this is a
no-op.
"""
if self.status == State.PENDING:
self._pipe, child_pipe = multiprocessing.Pipe()
self._process = multiprocessing.Process(target = DaemonTask._run_wrapper, args = (child_pipe, self.priority, self.runner, self.args))
self._process.start()
self.status = State.RUNNING
def join(self):
"""
Provides the result of the daemon task. If still running this blocks until
the task is completed.
:returns: response of the function we ran
:raises: exception raised by the function if it failed with one
"""
if self.status == State.PENDING:
self.run()
if self.status == State.RUNNING:
self._process.join()
response = self._pipe.recv()
self.status = response[0]
self.runtime = response[1]
if self.status == State.DONE:
self.result = response[2]
elif self.status == State.FAILED:
self.error = response[2]
if self.status == State.DONE:
return self.result
elif self.status == State.FAILED:
raise self.error
else:
raise RuntimeError('BUG: unexpected status from daemon task, %s' % self.status)
@staticmethod
def _run_wrapper(conn, priority, runner, args):
start_time = time.time()
os.nice(priority)
try:
result = runner(*args) if args else runner()
conn.send((State.DONE, time.time() - start_time, result))
except Exception as exc:
conn.send((State.FAILED, time.time() - start_time, exc))
finally:
conn.close()
def is_windows():
"""
Checks if we are running on Windows.
:returns: **bool** to indicate if we're on Windows
"""
return platform.system() == 'Windows'
def is_mac():
"""
Checks if we are running on Mac OSX.
:returns: **bool** to indicate if we're on a Mac
"""
return platform.system() == 'Darwin'
def is_gentoo():
"""
Checks if we're running on Gentoo.
:returns: **bool** to indicate if we're on Gentoo
"""
return os.path.exists('/etc/gentoo-release')
def is_slackware():
"""
Checks if we are running on a Slackware system.
:returns: **bool** to indicate if we're on a Slackware system
"""
return os.path.exists('/etc/slackware-version')
def is_bsd():
"""
Checks if we are within the BSD family of operating systems. This currently
recognizes Macs, FreeBSD, and OpenBSD but may be expanded later.
:returns: **bool** to indicate if we're on a BSD OS
"""
return platform.system() in ('Darwin', 'FreeBSD', 'OpenBSD', 'NetBSD')
def is_available(command, cached=True):
"""
Checks the current PATH to see if a command is available or not. If more
than one command is present (for instance "ls -a | grep foo") then this
just checks the first.
Note that shell (like cd and ulimit) aren't in the PATH so this lookup will
try to assume that it's available. This only happends for recognized shell
commands (those in SHELL_COMMANDS).
:param str command: command to search for
:param bool cached: makes use of available cached results if **True**
:returns: **True** if an executable we can use by that name exists in the
PATH, **False** otherwise
"""
if ' ' in command:
command = command.split(' ')[0]
if command in SHELL_COMMANDS:
return True # we can't actually look it up, so hope the shell really provides it...
elif cached and command in CMD_AVAILABLE_CACHE:
return CMD_AVAILABLE_CACHE[command]
elif 'PATH' not in os.environ:
return False # lacking a path will cause find_executable() to internally fail
cmd_exists = False
for path in os.environ['PATH'].split(os.pathsep):
cmd_path = os.path.join(path, command)
if is_windows():
cmd_path += '.exe'
if os.path.exists(cmd_path) and os.access(cmd_path, os.X_OK):
cmd_exists = True
break
CMD_AVAILABLE_CACHE[command] = cmd_exists
return cmd_exists
def is_running(command):
"""
Checks for if a process with a given name or pid is running.
.. versionchanged:: 1.6.0
Added support for list and pid arguments.
:param str,list,int command: process name if a str, multiple process names if
a list, or pid if an int to be checked
:returns: **True** if the process is running, **False** if it's not among ps
results, and **None** if ps can't be queried
"""
if isinstance(command, int):
try:
os.kill(command, 0)
return True
except OSError:
return False
# Linux and the BSD families have different variants of ps. Guess based on
# the is_bsd() check which to try first, then fall back to the other.
#
# Linux
# -A - Select all processes.
# -co command - Shows just the base command.
#
# Mac / BSD
# -a - Display information about other users' processes as well as
# our own.
# -o ucomm= - Shows just the ucomm attribute ("name to be used for
# accounting")
if is_available('ps'):
if is_bsd():
primary_resolver = IS_RUNNING_PS_BSD
secondary_resolver = IS_RUNNING_PS_LINUX
else:
primary_resolver = IS_RUNNING_PS_LINUX
secondary_resolver = IS_RUNNING_PS_BSD
command_listing = call(primary_resolver, None)
if not command_listing:
command_listing = call(secondary_resolver, None)
if command_listing:
command_listing = [c.strip() for c in command_listing]
if stem.util._is_str(command):
command = [command]
for cmd in command:
if cmd in command_listing:
return True
return False
return None
def size_of(obj, exclude = None):
"""
Provides the `approximate memory usage of an object
<https://code.activestate.com/recipes/577504/>`_. This can recurse tuples,
lists, deques, dicts, and sets. To teach this function to inspect additional
object types expand SIZE_RECURSES...
::
stem.util.system.SIZE_RECURSES[SomeClass] = SomeClass.get_elements
.. versionadded:: 1.6.0
:param object obj: object to provide the size of
:param set exclude: object ids to exclude from size estimation
:returns: **int** with the size of the object in bytes
:raises: **NotImplementedError** if using PyPy
"""
if stem.prereq.is_pypy():
raise NotImplementedError('PyPy does not implement sys.getsizeof()')
if exclude is None:
exclude = set()
elif id(obj) in exclude:
return 0
try:
size = sys.getsizeof(obj)
except TypeError:
size = sys.getsizeof(0) # estimate if object lacks a __sizeof__
exclude.add(id(obj))
if type(obj) in SIZE_RECURSES:
for entry in SIZE_RECURSES[type(obj)](obj):
size += size_of(entry, exclude)
return size
def name_by_pid(pid):
"""
Attempts to determine the name a given process is running under (not
including arguments). This uses...
::
1. Information from /proc
2. ps -p <pid> -o command
:param int pid: process id of the process to be queried
:returns: **str** with the process name, **None** if it can't be determined
"""
process_name = None
if stem.util.proc.is_available():
try:
process_name = stem.util.proc.stats(pid, stem.util.proc.Stat.COMMAND)[0]
except IOError:
pass
# attempts to resolve using ps, failing if:
# - system's ps variant doesn't handle these flags (none known at the moment)
#
# example output:
# <EMAIL>gar@morrigan:~$ ps -p 5767 -o comm
# COMMAND
# vim
if not process_name:
try:
results = call(GET_NAME_BY_PID_PS % pid)
except OSError:
results = None
if results and len(results) == 2 and results[0] == 'COMMAND':
process_name = results[1].strip()
return process_name
def pid_by_name(process_name, multiple = False):
"""
Attempts to determine the process id for a running process, using...
::
1. pgrep -x <name>
2. pidof <name>
3. ps -o pid -C <name> (linux)
ps axc | egrep " <name>$" (bsd)
4. lsof -tc <name>
5. tasklist | str <name>.exe
:param str process_name: process name for which to fetch the pid
:param bool multiple: provides a list of all pids if **True**, otherwise
results with multiple processes are discarded
:returns:
Response depends upon the 'multiple' argument as follows...
* if **False** then this provides an **int** with the process id or **None** if it can't be determined
* if **True** then this provides a **list** of all **int** process ids, and an empty list if it can't be determined
"""
# attempts to resolve using pgrep, failing if:
# - we're running on bsd (command unavailable)
#
# example output:
# <EMAIL>@mor<EMAIL>:~$ pgrep -x vim
# 3283
# 3392
if is_available('pgrep'):
results = call(GET_PID_BY_NAME_PGREP % process_name, None)
if results:
try:
pids = list(map(int, results))
if multiple:
return pids
elif len(pids) == 1:
return pids[0]
except ValueError:
pass
# attempts to resolve using pidof, failing if:
# - we're running on bsd (command unavailable)
#
# example output:
# atagar@morrigan:~$ pidof vim
# 3392 3283
if is_available('pidof'):
results = call(GET_PID_BY_NAME_PIDOF % process_name, None)
if results and len(results) == 1:
try:
pids = list(map(int, results[0].split()))
if multiple:
return pids
elif len(pids) == 1:
return pids[0]
except ValueError:
pass
# attempts to resolve using ps, failing if:
# - system's ps variant doesn't handle these flags (none known at the moment)
#
# example output:
# atagar@morrigan:~/Desktop/stem$ ps -o pid -C vim
# PID
# 3283
# 3392
#
# atagar$ ps axc
# PID TT STAT TIME COMMAND
# 1 ?? Ss 9:00.22 launchd
# 10 ?? Ss 0:09.97 kextd
# 11 ?? Ss 5:47.36 DirectoryService
# 12 ?? Ss 3:01.44 notifyd
if is_available('ps'):
if not is_bsd():
# linux variant of ps
results = call(GET_PID_BY_NAME_PS_LINUX % process_name, None)
if results:
try:
pids = list(map(int, results[1:]))
if multiple:
return pids
elif len(pids) == 1:
return pids[0]
except ValueError:
pass
if is_bsd():
# bsd variant of ps
results = call(GET_PID_BY_NAME_PS_BSD, None)
if results:
# filters results to those with our process name
results = [r.split()[0] for r in results if r.endswith(' %s' % process_name)]
try:
pids = list(map(int, results))
if multiple:
return pids
elif len(pids) == 1:
return pids[0]
except ValueError:
pass
# resolves using lsof which works on both Linux and BSD, only failing if:
# - lsof is unavailable (not included by default on OpenBSD)
# - the process being run as a different user due to permissions
# - the process doesn't have any open files to be reported by lsof?
#
# flags:
# t - only show pids
# c - restrict results to that command
#
# example output:
# <EMAIL>gar@morrigan:~$ lsof -t -c vim
# 2470
# 2561
if is_available('lsof'):
results = call(GET_PID_BY_NAME_LSOF % process_name, None)
if results:
try:
pids = list(map(int, results))
if multiple:
return pids
elif len(pids) == 1:
return pids[0]
except ValueError:
pass
if is_available('tasklist') and is_windows():
if not process_name.endswith('.exe'):
process_name = process_name + '.exe'
process_ids = []
results = stem.util.system.call('tasklist', None)
if results:
tasklist_regex = re.compile('^\\s*%s\\s+(?P<pid>[0-9]*)' % process_name)
for line in results:
match = tasklist_regex.search(line)
if match:
process_ids.append(int(match.group('pid')))
if multiple:
return process_ids
elif len(process_ids) > 0:
return process_ids[0]
log.debug("failed to resolve a pid for '%s'" % process_name)
return [] if multiple else None
def pid_by_port(port):
"""
Attempts to determine the process id for a process with the given port,
using...
::
1. netstat -npltu | grep 127.0.0.1:<port>
2. sockstat -4l -P tcp -p <port>
3. lsof -wnP -iTCP -sTCP:LISTEN | grep ":<port>"
Most queries limit results to listening TCP connections. This function likely
won't work on Mac OSX.
:param int port: port where the process we're looking for is listening
:returns: **int** with the process id, **None** if it can't be determined
"""
# attempts to resolve using netstat, failing if:
# - netstat doesn't accept these flags (Linux only)
# - the process being run as a different user due to permissions
#
# flags:
# n - numeric (disables hostname lookups)
# p - program (include pids)
# l - listening (include listening sockets)
# tu - show tcp and udp sockets, and nothing else
#
# example output:
# <EMAIL>gar@morrigan:~$ netstat -npltu
# Active Internet connections (only servers)
# Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
# tcp 0 0 127.0.0.1:631 0.0.0.0:* LISTEN -
# tcp 0 0 127.0.0.1:9051 0.0.0.0:* LISTEN 1641/tor
# tcp6 0 0 ::1:631 :::* LISTEN -
# udp 0 0 0.0.0.0:5353 0.0.0.0:* -
# udp6 0 0 fe80::7ae4:ff:fdf8:f53e:61e4::18 :::* -
if is_available('netstat'):
results = call(GET_PID_BY_PORT_NETSTAT, None)
if results:
# filters to results with our port
results = [r for r in results if '127.0.0.1:%s' % port in r]
if len(results) == 1 and len(results[0].split()) == 7:
results = results[0].split()[6] # process field (ex. "7184/tor")
pid = results[:results.find('/')]
if pid.isdigit():
return int(pid)
# attempts to resolve using sockstat, failing if:
# - sockstat doesn't accept the -4 flag (BSD only)
# - sockstat isn't available (encountered with OSX 10.5.8)
# - there are multiple instances using the same port on different addresses
#
# flags:
# 4 - only show IPv4 sockets
# l - listening sockets
# P tcp - only show tcp connections
# p - only includes results if the local or foreign port match this
#
# example output:
# # sockstat -4 | grep tor
# _tor tor 4397 7 tcp4 192.168.127.12:9050 *:*
# _tor tor 4397 8 udp4 192.168.127.12:53 *:*
# _tor tor 4397 12 tcp4 192.168.127.12:54011 172.16.31.10:9001
# _tor tor 4397 15 tcp4 192.168.127.12:59374 172.16.17.32:9001
# _tor tor 4397 20 tcp4 192.168.127.12:51946 172.16.17.32:443
if is_available('sockstat'):
results = call(GET_PID_BY_PORT_SOCKSTAT % port, None)
if results:
# filters to results where this is the local port
results = [r for r in results if (len(r.split()) == 7 and (':%s' % port) in r.split()[5])]
if len(results) == 1:
pid = results[0].split()[2]
if pid.isdigit():
return int(pid)
# resolves using lsof which works on both Linux and BSD, only failing if:
# - lsof is unavailable (not included by default on OpenBSD)
# - lsof doesn't provide the port ip/port, nor accept the -i and -s args
# (encountered with OSX 10.5.8)
# - the process being run as a different user due to permissions
# - there are multiple instances using the same port on different addresses
#
# flags:
# w - disables warning messages
# n - numeric addresses (disables hostname lookups)
# P - numeric ports (disables replacement of ports with their protocol)
# iTCP - only show tcp connections
# sTCP:LISTEN - listening sockets
#
# example output:
# atagar@morrigan:~$ lsof -wnP -iTCP -sTCP:LISTEN
# COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
# tor 1745 atagar 6u IPv4 14229 0t0 TCP 127.0.0.1:9051 (LISTEN)
if is_available('lsof'):
results = call(GET_PID_BY_PORT_LSOF, None)
if results:
# filters to results with our port
results = [r for r in results if (len(r.split()) == 10 and (':%s' % port) in r.split()[8])]
if len(results) == 1:
pid = results[0].split()[1]
if pid.isdigit():
return int(pid)
return None # all queries failed
def pid_by_open_file(path):
"""
Attempts to determine the process id for a process with the given open file,
using...
::
lsof -w <path>
:param str path: location of the socket file to query against
:returns: **int** with the process id, **None** if it can't be determined
"""
# resolves using lsof which works on both Linux and BSD, only failing if:
# - lsof is unavailable (not included by default on OpenBSD)
# - the file can't be read due to permissions
#
# flags:
# t - only show pids
# w - disables warning messages
#
# example output:
# atagar@morrigan:~$ lsof -tw /tmp/foo
# 4762
if is_available('lsof'):
results = call(GET_PID_BY_FILE_LSOF % path, [])
if len(results) == 1:
pid = results[0].strip()
if pid.isdigit():
return int(pid)
return None # all queries failed
def pids_by_user(user):
"""
Provides processes owned by a given user.
.. versionadded:: 1.5.0
:param str user: user to look up processes for
:returns: **list** with the process ids, **None** if it can't be determined
"""
# example output:
# atagar@odin:~$ ps -o pid -u avahi
# PID
# 914
# 915
if is_available('ps'):
if is_bsd():
results = call(GET_PIDS_BY_USER_BSD % user, None)
else:
results = call(GET_PIDS_BY_USER_LINUX % user, None)
if results:
try:
return list(map(int, results[1:]))
except ValueError:
pass
return None
def cwd(pid):
"""
Provides the working directory of the given process.
:param int pid: process id of the process to be queried
:returns: **str** with the absolute path for the process' present working
directory, **None** if it can't be determined
"""
# try fetching via the proc contents if it's available
if stem.util.proc.is_available():
try:
return stem.util.proc.cwd(pid)
except IOError:
pass
# Fall back to a pwdx query. This isn't available on BSD.
logging_prefix = 'cwd(%s):' % pid
if is_available('pwdx'):
# pwdx results are of the form:
# 3799: /home/atagar
# 5839: No such process
results = call(GET_CWD_PWDX % pid, None)
if not results:
log.debug("%s pwdx didn't return any results" % logging_prefix)
elif results[0].endswith('No such process'):
log.debug('%s pwdx processes reported for this pid' % logging_prefix)
elif len(results) != 1 or results[0].count(' ') != 1 or not results[0].startswith('%s: ' % pid):
log.debug('%s we got unexpected output from pwdx: %s' % (logging_prefix, results))
else:
return results[0].split(' ', 1)[1].strip()
# Use lsof as the final fallback. This is available on both Linux and is the
# only lookup method here that works for BSD...
# https://trac.torproject.org/projects/tor/ticket/4236
#
# flags:
# a - presents the intersection of the following arguments
# p - limits results to this pid
# d cwd - limits results to just the cwd rather than all open files
# Fn - short listing in a single column, with just the pid and cwd
#
# example output:
# ~$ lsof -a -p 75717 -d cwd -Fn
# p75717
# n/Users/atagar/tor/src/or
if is_available('lsof'):
results = call(GET_CWD_LSOF % pid, [])
if len(results) >= 2 and results[-1].startswith('n/'):
lsof_result = results[-1][1:].strip()
# If we lack read permissions for the cwd then it returns...
# p2683
# n/proc/2683/cwd (readlink: Permission denied)
if ' ' not in lsof_result:
return lsof_result
else:
log.debug('%s we got unexpected output from lsof: %s' % (logging_prefix, results))
return None # all queries failed
def user(pid):
"""
Provides the user a process is running under.
:param int pid: process id of the process to be queried
:returns: **str** with the username a process is running under, **None** if
it can't be determined
"""
if not isinstance(pid, int) or pid < 0:
return None
if stem.util.proc.is_available():
try:
import pwd # only available on unix platforms
uid = stem.util.proc.uid(pid)
if uid and uid.isdigit():
return pwd.getpwuid(int(uid)).pw_name
except:
pass
if is_available('ps'):
results = call('ps -o user %s' % pid, [])
if len(results) >= 2:
return results[1].strip()
return None
def start_time(pid):
"""
Provides the unix timestamp when the given process started.
:param int pid: process id of the process to be queried
:returns: **float** for the unix timestamp when the process began, **None**
if it can't be determined
"""
if not isinstance(pid, int) or pid < 0:
return None
if stem.util.proc.is_available():
try:
return float(stem.util.proc.stats(pid, stem.util.proc.Stat.START_TIME)[0])
except IOError:
pass
try:
ps_results = call('ps -p %s -o etime' % pid, [])
if len(ps_results) >= 2:
etime = ps_results[1].strip()
return time.time() - stem.util.str_tools.parse_short_time_label(etime)
except:
pass
return None
def tail(target, lines = None):
"""
Provides lines of a file starting with the end. For instance,
'tail -n 50 /tmp/my_log' could be done with...
::
reversed(list(tail('/tmp/my_log', 50)))
:param str,file target: path or file object to read from
:param int lines: number of lines to read
:returns: **generator** that reads lines, starting with the end
:raises: **IOError** if unable to read the file
"""
if isinstance(target, str):
with open(target, 'rb') as target_file:
for line in tail(target_file, lines):
yield line
return
# based on snippet from...
# https://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
target.seek(0, 2) # go to the end of the file
block_end_byte = target.tell()
block_number = -1
content = b''
while (lines is None or lines > 0) and block_end_byte > 0:
if (block_end_byte - BLOCK_SIZE > 0):
# read the last block we haven't yet read
target.seek(block_number * BLOCK_SIZE, 2)
content, completed_lines = (target.read(BLOCK_SIZE) + content).split(b'\n', 1)
else:
# reached the start of the file, just read what's left
target.seek(0, 0)
completed_lines = target.read(block_end_byte) + content
for line in reversed(completed_lines.splitlines()):
if lines is None or lines > 0:
if lines is not None:
lines -= 1
yield stem.util.str_tools._to_unicode(line)
block_end_byte -= BLOCK_SIZE
block_number -= 1
def bsd_jail_id(pid):
"""
Gets the jail id for a process. These seem to only exist for FreeBSD (this
style for jails does not exist on Linux, OSX, or OpenBSD).
:param int pid: process id of the jail id to be queried
:returns: **int** for the jail id, zero if this can't be determined
"""
# Output when called from a FreeBSD jail or when Tor isn't jailed:
# JID
# 0
#
# Otherwise it's something like:
# JID
# 1
ps_output = call(GET_BSD_JAIL_ID_PS % pid, [])
if len(ps_output) == 2 and len(ps_output[1].split()) == 1:
jid = ps_output[1].strip()
if jid.isdigit():
return int(jid)
os_name = platform.system()
if os_name == 'FreeBSD':
log.warn('Unable to get the jail id for process %s.' % pid)
else:
log.debug('bsd_jail_id(%s): jail ids do not exist on %s' % (pid, os_name))
return 0
def bsd_jail_path(jid):
"""
Provides the path of the given FreeBSD jail.
:param int jid: jail id to be queried
:returns: **str** of the path prefix, **None** if this can't be determined
"""
if jid != 0:
# Output should be something like:
# JID IP Address Hostname Path
# 1 10.0.0.2 tor-jail /usr/jails/tor-jail
jls_output = call(GET_BSD_JAIL_PATH % jid, [])
if len(jls_output) == 2 and len(jls_output[1].split()) == 4:
return jls_output[1].split()[3]
return None
def is_tarfile(path):
"""
Returns if the path belongs to a tarfile or not.
.. versionadded:: 1.2.0
:param str path: path to be checked
:returns: **True** if the path belongs to a tarball, **False** otherwise
"""
# Checking if it's a tar file may fail due to permissions so failing back
# to the mime type...
#
# IOError: [Errno 13] Permission denied: '/vmlinuz.old'
#
# With python 3 insuffient permissions raises an AttributeError instead...
#
# http://bugs.python.org/issue17059
try:
return tarfile.is_tarfile(path)
except (IOError, AttributeError):
return mimetypes.guess_type(path)[0] == 'application/x-tar'
def expand_path(path, cwd = None):
"""
Provides an absolute path, expanding tildes with the user's home and
appending a current working directory if the path was relative.
:param str path: path to be expanded
:param str cwd: current working directory to expand relative paths with, our
process' if this is **None**
:returns: **str** of the path expanded to be an absolute path, never with an
ending slash
"""
if is_windows():
relative_path = path.replace('/', '\\').rstrip('\\')
else:
relative_path = path.rstrip('/')
if not relative_path or os.path.isabs(relative_path):
# empty or already absolute - nothing to do
pass
elif relative_path.startswith('~'):
# prefixed with a ~ or ~user entry
relative_path = os.path.expanduser(relative_path)
else:
# relative path, expand with the cwd
if not cwd:
cwd = os.getcwd()
# we'll be dealing with both "my/path/" and "./my/path" entries, so
# cropping the later
if relative_path.startswith('./') or relative_path.startswith('.\\'):
relative_path = relative_path[2:]
elif relative_path == '.':
relative_path = ''
if relative_path == '':
relative_path = cwd
else:
relative_path = os.path.join(cwd, relative_path)
return relative_path
def files_with_suffix(base_path, suffix):
"""
Iterates over files in a given directory, providing filenames with a certain
suffix.
.. versionadded:: 1.2.0
:param str base_path: directory to be iterated over
:param str suffix: filename suffix to look for
:returns: iterator that yields the absolute path for files with the given suffix
"""
if os.path.isfile(base_path):
if base_path.endswith(suffix):
yield base_path
else:
for root, _, files in os.walk(base_path):
for filename in files:
if filename.endswith(suffix):
yield os.path.join(root, filename)
def call(command, default = UNDEFINED, ignore_exit_status = False, timeout = None, cwd = None, env = None):
"""
call(command, default = UNDEFINED, ignore_exit_status = False)
Issues a command in a subprocess, blocking until completion and returning the
results. This is not actually ran in a shell so pipes and other shell syntax
are not permitted.
.. versionchanged:: 1.5.0
Providing additional information upon failure by raising a CallError. This
is a subclass of OSError, providing backward compatibility.
.. versionchanged:: 1.5.0
Added env argument.
.. versionchanged:: 1.6.0
Added timeout and cwd arguments.
:param str,list command: command to be issued
:param object default: response if the query fails
:param bool ignore_exit_status: reports failure if our command's exit status
was non-zero
:param float timeout: maximum seconds to wait, blocks indefinitely if
**None**
:param dict env: environment variables
:returns: **list** with the lines of output from the command
:raises:
* **CallError** if this fails and no default was provided
* **CallTimeoutError** if the timeout is reached without a default
"""
# TODO: in stem 2.x return a struct with stdout, stderr, and runtime instead
global SYSTEM_CALL_TIME
if isinstance(command, str):
command_list = command.split(' ')
else:
command_list = list(map(str, command))
exit_status, runtime, stdout, stderr = None, None, None, None
start_time = time.time()
try:
is_shell_command = command_list[0] in SHELL_COMMANDS
process = subprocess.Popen(command_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = is_shell_command, cwd = cwd, env = env)
if timeout:
while process.poll() is None:
if time.time() - start_time > timeout:
raise CallTimeoutError("Process didn't finish after %0.1f seconds" % timeout, ' '.join(command_list), None, timeout, '', '', timeout)
time.sleep(0.001)
stdout, stderr = process.communicate()
stdout, stderr = stdout.strip(), stderr.strip()
runtime = time.time() - start_time
log.debug('System call: %s (runtime: %0.2f)' % (command, runtime))
if log.is_tracing():
trace_prefix = 'Received from system (%s)' % command
if stdout and stderr:
log.trace(trace_prefix + ', stdout:\n%s\nstderr:\n%s' % (stdout, stderr))
elif stdout:
log.trace(trace_prefix + ', stdout:\n%s' % stdout)
elif stderr:
log.trace(trace_prefix + ', stderr:\n%s' % stderr)
exit_status = process.poll()
if not ignore_exit_status and exit_status != 0:
raise OSError('%s returned exit status %i' % (command, exit_status))
if stdout:
return stdout.decode('utf-8', 'replace').splitlines()
else:
return []
except CallTimeoutError:
log.debug('System call (timeout): %s (after %0.4fs)' % (command, timeout))
if default != UNDEFINED:
return default
else:
raise
except OSError as exc:
log.debug('System call (failed): %s (error: %s)' % (command, exc))
if default != UNDEFINED:
return default
else:
raise CallError(str(exc), ' '.join(command_list), exit_status, runtime, stdout, stderr)
finally:
with SYSTEM_CALL_TIME_LOCK:
SYSTEM_CALL_TIME += time.time() - start_time
def get_process_name():
"""
Provides the present name of our process.
:returns: **str** with the present name of our process
"""
global _PROCESS_NAME, _MAX_NAME_LENGTH
if _PROCESS_NAME is None:
# Example output...
#
# COMMAND
# python run_tests.py --unit
ps_output = call('ps -p %i -o args' % os.getpid(), [])
if len(ps_output) == 2 and ps_output[0] in ('COMMAND', 'ARGS'):
_PROCESS_NAME = ps_output[1]
else:
# Falling back on using ctypes to get our argv. Unfortunately the simple
# method for getting this...
#
# ' '.join(['python'] + sys.argv)
#
# ... doesn't do the trick since this will miss interpreter arguments.
#
# python -W ignore::DeprecationWarning my_script.py
args, argc = [], argc_t()
for i in range(100):
# The ending index can be either None or raise a ValueError when
# accessed...
#
# ValueError: NULL pointer access
try:
if argc[i] is None:
break
except ValueError:
break
args.append(str(argc[i]))
_PROCESS_NAME = ' '.join(args)
_MAX_NAME_LENGTH = len(_PROCESS_NAME)
return _PROCESS_NAME
def set_process_name(process_name):
"""
Renames our current process from "python <args>" to a custom name. This is
best-effort, not necessarily working on all platforms.
:param str process_name: new name for our process
:raises: **IOError** if the process cannot be renamed
"""
# This is mostly based on...
#
# http://www.rhinocerus.net/forum/lang-python/569677-setting-program-name-like-0-perl.html#post2272369
#
# ... and an adaptation by Jake...
#
# https://github.com/ioerror/chameleon
#
# A cleaner implementation is available at...
#
# https://github.com/cream/libs/blob/b38970e2a6f6d2620724c828808235be0445b799/cream/util/procname.py
#
# but I'm not quite clear on their implementation, and it only does targeted
# argument replacement (ie, replace argv[0], argv[1], etc but with a string
# the same size).
_set_argv(process_name)
if platform.system() == 'Linux':
_set_prctl_name(process_name)
elif platform.system() in ('Darwin', 'FreeBSD', 'OpenBSD'):
_set_proc_title(process_name)
def _set_argv(process_name):
"""
Overwrites our argv in a similar fashion to how it's done in C with:
strcpy(argv[0], 'new_name');
"""
if Py_GetArgcArgv is None:
return
global _PROCESS_NAME
# both gets the current process name and initializes _MAX_NAME_LENGTH
current_name = get_process_name()
argv, argc = ctypes.c_int(0), argc_t()
Py_GetArgcArgv(argv, ctypes.pointer(argc))
if len(process_name) > _MAX_NAME_LENGTH:
raise IOError("Can't rename process to something longer than our initial name (this would overwrite memory used for the env)")
# space we need to clear
zero_size = max(len(current_name), len(process_name))
ctypes.memset(argc.contents, 0, zero_size + 1) # null terminate the string's end
process_name_encoded = process_name.encode('utf8')
ctypes.memmove(argc.contents, process_name_encoded, len(process_name))
_PROCESS_NAME = process_name
def _set_prctl_name(process_name):
"""
Sets the prctl name, which is used by top and killall. This appears to be
Linux specific and has the max of 15 characters.
This is from...
http://stackoverflow.com/questions/564695/is-there-a-way-to-change-effective-process-name-in-python/923034#923034
"""
libc = ctypes.CDLL(ctypes.util.find_library('c'))
name_buffer = ctypes.create_string_buffer(len(process_name) + 1)
name_buffer.value = stem.util.str_tools._to_bytes(process_name)
libc.prctl(PR_SET_NAME, ctypes.byref(name_buffer), 0, 0, 0)
def _set_proc_title(process_name):
"""
BSD specific calls (should be compataible with both FreeBSD and OpenBSD:
http://fxr.watson.org/fxr/source/gen/setproctitle.c?v=FREEBSD-LIBC
http://www.rootr.net/man/man/setproctitle/3
"""
libc = ctypes.CDLL(ctypes.util.find_library('c'))
name_buffer = ctypes.create_string_buffer(len(process_name) + 1)
name_buffer.value = process_name.encode()
try:
libc.setproctitle(ctypes.byref(name_buffer))
except AttributeError:
# Possible issue (seen on OSX):
# AttributeError: dlsym(0x7fff6a41d1e0, setproctitle): symbol not found
pass
# TODO: drop with stem 2.x
# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old
# names for backward compatability.
get_name_by_pid = name_by_pid
get_pid_by_name = pid_by_name
get_pid_by_port = pid_by_port
get_pid_by_open_file = pid_by_open_file
get_cwd = cwd
get_user = user
get_start_time = start_time
get_bsd_jail_id = bsd_jail_id
get_bsd_jail_path = bsd_jail_path
| StarcoderdataPython |
1946168 | <filename>examples/simple.py
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 <NAME>
# Made available under the MIT license.
import time
import freshroastsr700
# Create a roaster object.
roaster = freshroastsr700.freshroastsr700()
# Conenct to the roaster.
roaster.connect()
# Set variables.
roaster.heat_setting = 3
roaster.fan_speed = 9
roaster.time_remaining = 20
# Begin roasting.
roaster.roast()
# This ensures the example script does not end before the roast.
time.sleep(30)
# Disconnect from the roaster.
roaster.disconnect()
| StarcoderdataPython |
4908208 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 24 15:09:49 2019
@author: epyir
"""
import torch
import torch.nn as nn
class Loss(nn.Module):
def __init__(self):
super(Loss, self).__init__()
def forward(self, input, target):
pass
class baseline(nn.Module):
"""
This baseline simply add/cat outputs of ROI
roi_num = # proposals
class_num = # actions
"""
def __init__(self, roi_num=1000, class_num=4, is_cat=False, is_feature=False):
super(baseline, self).__init__()
self.roi_num = roi_num # number of proposals
self.class_num = class_num # number of actions
self.is_cat = is_cat
self.is_feature = is_feature
self.conv1 = nn.Conv2d(256, 16, 1)
self.conv2 = nn.Conv2d(256, 16, 1)
if is_cat:
# cat
self.fc1 = nn.Linear(roi_num, 256)
else:
# else add
self.fc1 = nn.Linear(16*7*7, 256)
self.fc2 = nn.Linear(16*7*7, 256)
self.relu = nn.PReLU(256)
# self.bn1 = nn.BatchNorm1d(512)
self.fc3 = nn.Linear(512, 64)
self.relu2 = nn.PReLU(64)
self.fc4 = nn.Linear(64, self.class_num)
self.Pool = nn.AvgPool2d((184,320))
def forward(self, x, y):
"""
x (Tensor) is the feature from ROI pooling/align. e.g. x.size = (1000, 256, 7, 7)
y (Tensor) is the feature from backbone.
"""
x = torch.div(x, x.norm())
y = torch.div(y, y.norm())
if not self.is_cat:
#x = x.mean(dim=0).unsqueeze(0) # addition over channels, x.shape = (1,256,7,7)
x = torch.sum(x, dim=0).unsqueeze(0)
else:
x = x.reshape(-1) # under construction
x = self.conv1(x)
x = x.view(x.size(0), -1) # Size (1,256*7*7)
y = self.conv2(y)
y = torch.nn.functional.interpolate(y, size=(7,7), mode="bilinear") #Size(1, 256, 7, 7)
y = y.view(y.size(0), -1) # Size(1, 256*7*7)
x = self.fc1(x) # Size (1,256)
# x = self.bn1(x)
# x = self.bn1(self.fc1(x))
x = self.relu(x) # Size (1, 256)
y = self.fc2(y)
y = self.relu(y)
# concat 2 features
x = torch.cat((x, y), 1) # Size(1,512)
x = self.fc3(x) # Size (1, 64)
x = self.relu2(x)
x = self.fc4(x) # Size (1, 4)
return x
def main():
"""
Just a test.
"""
x = torch.ones(1000,256,7,7)
net = baseline()
out = net(x)
print(out)
print(out.shape)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3369964 | """Run unit tests.
Use this to run tests and understand how tasks.py works.
Setup::
mkdir -p test-data/input
mkdir -p test-data/output
mysql -u root -p
CREATE DATABASE testdb;
CREATE USER 'testusr'@'localhost' IDENTIFIED BY 'test<PASSWORD>';
GRANT ALL PRIVILEGES ON testdb.* TO 'testusr'@'%';
Run tests::
pytest test_combine.py -s
Notes:
* this will create sample csv, xls and xlsx files
* test_combine_() test the main combine function
"""
from d6tstack.combine_csv import *
from d6tstack.sniffer import CSVSniffer
import d6tstack.utils
import math
import pandas as pd
# import pyarrow as pa
# import pyarrow.parquet as pq
import ntpath
import shutil
import dask.dataframe as dd
import sqlalchemy
import pytest
cfg_fname_base_in = 'test-data/input/test-data-'
cfg_fname_base_out_dir = 'test-data/output'
cfg_fname_base_out = cfg_fname_base_out_dir+'/test-data-'
cnxn_string = 'sqlite:///test-data/db/{}.db'
#************************************************************
# fixtures
#************************************************************
class DebugLogger(object):
def __init__(self, event):
pass
def send_log(self, msg, status):
pass
def send(self, data):
pass
logger = DebugLogger('combiner')
# sample data
def create_files_df_clean():
# create sample data
df1=pd.DataFrame({'date':pd.date_range('1/1/2011', periods=10), 'sales': 100, 'cost':-80, 'profit':20})
df2=pd.DataFrame({'date':pd.date_range('2/1/2011', periods=10), 'sales': 200, 'cost':-90, 'profit':200-90})
df3=pd.DataFrame({'date':pd.date_range('3/1/2011', periods=10), 'sales': 300, 'cost':-100, 'profit':300-100})
# cfg_col = [ 'date', 'sales','cost','profit']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
return df1, df2, df3
def create_files_df_clean_combine():
df1,df2,df3 = create_files_df_clean()
df_all = pd.concat([df1,df2,df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_clean_combine_with_filename(fname_list):
df1, df2, df3 = create_files_df_clean()
df1['filename'] = os.path.basename(fname_list[0])
df2['filename'] = os.path.basename(fname_list[1])
df3['filename'] = os.path.basename(fname_list[2])
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine(cfg_col_common,allstr=True):
df1, df2, df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
if allstr:
df_all = df_all[df_all.columns].astype(str)
return df_all
def check_df_colmismatch_combine(dfg,is_common=False, convert_date=True):
dfg = dfg.drop(['filepath','filename'],1).sort_values('date').reset_index(drop=True)
if convert_date:
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
dfchk = create_files_df_colmismatch_combine(is_common,False).reset_index(drop=True)[dfg.columns]
assert dfg.equals(dfchk)
return True
def create_files_df_colmismatch_combine2(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
# csv standard
@pytest.fixture(scope="module")
def create_files_csv():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-csv-clean-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch():
df1,df2,df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch2():
df1,df2,df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch2-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colreorder():
df1,df2,df3 = create_files_df_clean()
cfg_col = [ 'date', 'sales','cost','profit']
cfg_col2 = [ 'date', 'sales','profit','cost']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
# save files
cfg_fname = cfg_fname_base_in+'input-csv-reorder-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan',index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb',index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_noheader():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-noheader-csv-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False, header=False)
df2.to_csv(cfg_fname % 'feb',index=False, header=False)
df3.to_csv(cfg_fname % 'mar',index=False, header=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_col_renamed():
df1, df2, df3 = create_files_df_clean()
df3 = df3.rename(columns={'sales':'revenue'})
cfg_col = ['date', 'sales', 'profit', 'cost']
cfg_col2 = ['date', 'revenue', 'profit', 'cost']
cfg_fname = cfg_fname_base_in + 'input-csv-renamed-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan', index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb', index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar', index=False)
return [cfg_fname % 'jan', cfg_fname % 'feb', cfg_fname % 'mar']
def create_files_csv_dirty(cfg_sep=",", cfg_header=True):
df1,df2,df3 = create_files_df_clean()
df1.to_csv(cfg_fname_base_in+'debug.csv',index=False, sep=cfg_sep, header=cfg_header)
return cfg_fname_base_in+'debug.csv'
# excel single-tab
def create_files_xls_single_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
df1.to_excel(cfg_fname % 'jan',index=False)
df2.to_excel(cfg_fname % 'feb',index=False)
df3.to_excel(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xlsx')
def write_file_xls(dfg, fname, startrow=0,startcol=0):
writer = pd.ExcelWriter(fname)
dfg.to_excel(writer, 'Sheet1', index=False,startrow=startrow,startcol=startcol)
dfg.to_excel(writer, 'Sheet2', index=False,startrow=startrow,startcol=startcol)
writer.save()
# excel multi-tab
def create_files_xls_multiple_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
write_file_xls(df1,cfg_fname % 'jan')
write_file_xls(df2,cfg_fname % 'feb')
write_file_xls(df3,cfg_fname % 'mar')
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xlsx')
#************************************************************
# tests - helpers
#************************************************************
def test_file_extensions_get():
fname_list = ['a.csv','b.csv']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.csv','.csv']
fname_list = ['a.xls','b.xls']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.xls','.xls']
def test_file_extensions_all_equal():
ext_list = ['.csv']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.xls']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.csv','.xls']
assert not file_extensions_all_equal(ext_list)
def test_file_extensions_valid():
ext_list = ['.csv']*2
assert file_extensions_valid(ext_list)
ext_list = ['.xls']*2
assert file_extensions_valid(ext_list)
ext_list = ['.exe','.xls']
assert not file_extensions_valid(ext_list)
#************************************************************
#************************************************************
# scan header
#************************************************************
#************************************************************
def test_csv_sniff(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
with pytest.raises(ValueError) as e:
c = CombinerCSV([])
# clean
combiner = CombinerCSV(fname_list=create_files_csv)
combiner.sniff_columns()
assert combiner.is_all_equal()
assert combiner.is_column_present().all().all()
assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit']
assert combiner.sniff_results['columns_common'] == combiner.sniff_results['columns_all']
assert combiner.sniff_results['columns_unique'] == []
# extra column
combiner = CombinerCSV(fname_list=create_files_csv_colmismatch)
combiner.sniff_columns()
assert not combiner.is_all_equal()
assert not combiner.is_column_present().all().all()
assert combiner.is_column_present().all().values.tolist()==[True, True, True, True, False]
assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit', 'profit2']
assert combiner.sniff_results['columns_common'] == ['date', 'sales', 'cost', 'profit']
assert combiner.is_column_present_common().columns.tolist() == ['date', 'sales', 'cost', 'profit']
assert combiner.sniff_results['columns_unique'] == ['profit2']
assert combiner.is_column_present_unique().columns.tolist() == ['profit2']
# mixed order
combiner = CombinerCSV(fname_list=create_files_csv_colreorder)
combiner.sniff_columns()
assert not combiner.is_all_equal()
assert combiner.sniff_results['df_columns_order']['profit'].values.tolist() == [3, 3, 2]
def test_csv_selectrename(create_files_csv, create_files_csv_colmismatch):
# rename
df = CombinerCSV(fname_list=create_files_csv).preview_rename()
assert df.empty
df = CombinerCSV(fname_list=create_files_csv, columns_rename={'notthere':'nan'}).preview_rename()
assert df.empty
df = CombinerCSV(fname_list=create_files_csv, columns_rename={'cost':'cost2'}).preview_rename()
assert df.columns.tolist()==['cost']
assert df['cost'].unique().tolist()==['cost2']
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_rename={'profit2':'profit3'}).preview_rename()
assert df.columns.tolist()==['profit2']
assert df['profit2'].unique().tolist()==[np.nan, 'profit3']
# select
l = CombinerCSV(fname_list=create_files_csv).preview_select()
assert l == ['date', 'sales', 'cost', 'profit']
l2 = CombinerCSV(fname_list=create_files_csv, columns_select_common=True).preview_select()
assert l2==l
l = CombinerCSV(fname_list=create_files_csv, columns_select=['date', 'sales', 'cost']).preview_select()
assert l == ['date', 'sales', 'cost']
l = CombinerCSV(fname_list=create_files_csv_colmismatch).preview_select()
assert l == ['date', 'sales', 'cost', 'profit', 'profit2']
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).preview_select()
assert l == ['date', 'sales', 'cost', 'profit']
# rename+select
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}).preview_select()
assert l==['date', 'profit3']
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}).preview_select()
assert l==['date', 'profit3']
def test_to_pandas(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
df = CombinerCSV(fname_list=create_files_csv).to_pandas()
assert df.shape == (30, 6)
df = CombinerCSV(fname_list=create_files_csv_colmismatch).to_pandas()
assert df.shape == (30, 6+1)
assert df['profit2'].isnull().unique().tolist() == [True, False]
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).to_pandas()
assert df.shape == (30, 6)
assert 'profit2' not in df.columns
# rename+select
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas()
assert df.shape == (30, 2)
assert 'profit3' in df.columns and not 'profit2' in df.columns
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas()
assert df.shape == (30, 2)
assert 'profit3' in df.columns and not 'profit2' in df.columns
def test_combinepreview(create_files_csv_colmismatch):
df = CombinerCSV(fname_list=create_files_csv_colmismatch).combine_preview()
assert df.shape == (9, 6+1)
assert df.dtypes.tolist() == [np.dtype('O'), np.dtype('int64'), np.dtype('int64'), np.dtype('int64'), np.dtype('float64'), np.dtype('O'), np.dtype('O')]
def apply(dfg):
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
return dfg
df = CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).combine_preview()
assert df.shape == (9, 6+1)
assert df.dtypes.tolist() == [np.dtype('<M8[ns]'), np.dtype('int64'), np.dtype('int64'), np.dtype('int64'), np.dtype('float64'), np.dtype('O'), np.dtype('O')]
def test_tocsv(create_files_csv_colmismatch):
fname = 'test-data/output/combined.csv'
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_combine(filename=fname)
assert fname == fnameout
df = pd.read_csv(fname)
dfchk = df.copy()
assert df.shape == (30, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
assert check_df_colmismatch_combine(df)
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).to_csv_combine(filename=fname)
df = pd.read_csv(fname)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'filepath', 'filename']
assert check_df_colmismatch_combine(df,is_common=True)
def helper(fdir):
fnamesout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_align(output_dir=fdir)
for fname in fnamesout:
df = pd.read_csv(fname)
assert df.shape == (10, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
helper('test-data/output')
helper('test-data/output/')
df = dd.read_csv('test-data/output/d6tstack-test-data-input-csv-colmismatch-*.csv')
df = df.compute()
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
assert df.reset_index(drop=True).equals(dfchk)
assert check_df_colmismatch_combine(df)
# check creates directory
try:
shutil.rmtree('test-data/output-tmp')
except:
pass
_ = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_align(output_dir='test-data/output-tmp')
try:
shutil.rmtree('test-data/output-tmp')
except:
pass
def test_topq(create_files_csv_colmismatch):
fname = 'test-data/output/combined.pq'
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_parquet_combine(filename=fname)
assert fname == fnameout
df = pd.read_parquet(fname, engine='fastparquet')
assert df.shape == (30, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
df2 = pd.read_parquet(fname, engine='pyarrow')
assert df2.equals(df)
assert check_df_colmismatch_combine(df)
df = dd.read_parquet(fname)
df = df.compute()
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
df2 = pd.read_parquet(fname, engine='fastparquet')
assert df2.equals(df)
df3 = pd.read_parquet(fname, engine='pyarrow')
assert df3.equals(df)
assert check_df_colmismatch_combine(df)
def helper(fdir):
fnamesout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_parquet_align(output_dir=fdir)
for fname in fnamesout:
df = pd.read_parquet(fname, engine='fastparquet')
assert df.shape == (10, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
helper('test-data/output')
df = dd.read_parquet('test-data/output/d6tstack-test-data-input-csv-colmismatch-*.pq')
df = df.compute()
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
assert check_df_colmismatch_combine(df)
# todo: write tests such that compare to concat df not always repeat same code to test shape and columns
def test_tosql(create_files_csv_colmismatch):
tblname = 'testd6tstack'
def apply(dfg):
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
return dfg
def helper(uri):
sql_engine = sqlalchemy.create_engine(uri)
CombinerCSV(fname_list=create_files_csv_colmismatch).to_sql_combine(uri, tblname, 'replace')
df = pd.read_sql_table(tblname, sql_engine)
assert check_df_colmismatch_combine(df)
# with date convert
CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).to_sql_combine(uri, tblname, 'replace')
df = pd.read_sql_table(tblname, sql_engine)
assert check_df_colmismatch_combine(df, convert_date=False)
uri = 'postgresql+psycopg2://psqlusr:psqlpwdpsqlpwd@localhost/psqltest'
helper(uri)
uri = 'mysql+pymysql://testusr:testpwd@localhost/testdb'
helper(uri)
uri = 'postgresql+psycopg2://psqlusr:psqlpwdpsqlpwd@localhost/psqltest'
sql_engine = sqlalchemy.create_engine(uri)
CombinerCSV(fname_list=create_files_csv_colmismatch).to_psql_combine(uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert df.shape == (30, 4+1+2)
assert check_df_colmismatch_combine(df)
CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).to_psql_combine(uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert check_df_colmismatch_combine(df, convert_date=False)
uri = 'mysql+mysqlconnector://testusr:testpwd@localhost/testdb'
sql_engine = sqlalchemy.create_engine(uri)
CombinerCSV(fname_list=create_files_csv_colmismatch).to_mysql_combine(uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert df.shape == (30, 4+1+2)
assert check_df_colmismatch_combine(df)
# todo: mysql import makes NaNs 0s
CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).to_mysql_combine(uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert check_df_colmismatch_combine(df, convert_date=False)
def test_tosql_util(create_files_csv_colmismatch):
tblname = 'testd6tstack'
uri = 'postgresql+psycopg2://psqlusr:psqlpwdpsqlpwd@localhost/psqltest'
sql_engine = sqlalchemy.create_engine(uri)
dfc = CombinerCSV(fname_list=create_files_csv_colmismatch).to_pandas()
# psql
d6tstack.utils.pd_to_psql(dfc, uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert df.equals(dfc)
uri = 'mysql+mysqlconnector://testusr:testpwd@localhost/testdb'
sql_engine = sqlalchemy.create_engine(uri)
d6tstack.utils.pd_to_mysql(dfc, uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert df.equals(dfc)
| StarcoderdataPython |
8165357 | import torch
import unittest
from cnns.nnlib.pytorch_layers.pytorch_utils import flip
from cnns.nnlib.pytorch_layers.pytorch_utils import preserve_energy2D
from cnns.nnlib.pytorch_layers.pytorch_utils import complex_mul
from cnns.nnlib.pytorch_layers.conv2D_fft import Conv2dfft
from cnns.nnlib.utils.arguments import Arguments
from cnns.nnlib.utils.general_utils import StrideType
from cnns.nnlib.utils.general_utils import ConvType
from cnns.nnlib.utils.general_utils import ConvExecType
import numpy as np
from torch import tensor
import socket
import time
if torch.cuda.is_available():
from complex_mul_cuda import \
complex_mul_stride_no_permute as complex_mul_stride_no_permute_cuda
from complex_mul_cuda import \
complex_mul_shared_log as complex_mul_shared_log_cuda
class TestPytorchUtils(unittest.TestCase):
def test_cuda_stride_no_permute_multiply_big2(self):
N, C, H, W, I = 32, 128, 8, 4, 2
F = 256
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
dtype = torch.float
x = torch.randn(N, C, H, W, I, device=device, dtype=dtype)
y = torch.randn(F, C, H, W, I, device=device, dtype=dtype)
out = torch.zeros(N, F, H, W, I, device=device, dtype=dtype)
start = time.time()
complex_mul_stride_no_permute_cuda(x, y, out, 1024)
cuda_mul_time = time.time() - start
print("\ncuda mul time: ", cuda_mul_time)
x = x.unsqueeze(dim=1)
start = time.time()
expect = complex_mul(x, y)
expect = expect.sum(dim=2)
pytorch_mul_time = time.time() - start
print("pytorch mul time: ", pytorch_mul_time)
print(f"pytorch is faster: {cuda_mul_time/pytorch_mul_time} X times")
print("out.size(): ", out.size())
np.testing.assert_allclose(
actual=out.cpu().numpy(), desired=expect.cpu().numpy(), rtol=1e-4,
err_msg="actual out different from desired expected")
def test_cuda_stride_no_permute_multiply_big2_repeat(self):
if not torch.cuda.is_available():
print("CUDA is not available")
N, C, H, W, I = 32, 128, 8, 4, 2
F = 256
repeat = 1000
device = torch.device("cuda")
dtype = torch.float
x = torch.randn(N, C, H, W, I, device=device, dtype=dtype)
y = torch.randn(F, C, H, W, I, device=device, dtype=dtype)
start = time.time()
for _ in range(repeat):
out = torch.zeros(N, F, H, W, I, device=device, dtype=dtype)
complex_mul_stride_no_permute_cuda(x, y, out, 1024)
cuda_mul_time = time.time() - start
print("\ncuda mul time: ", cuda_mul_time)
x = x.unsqueeze(dim=1)
start = time.time()
for _ in range(repeat):
expect = complex_mul(x, y)
expect = expect.sum(dim=2)
pytorch_mul_time = time.time() - start
print("pytorch mul time: ", pytorch_mul_time)
print(f"pytorch is faster: {cuda_mul_time/pytorch_mul_time} X times")
print("out.size(): ", out.size())
np.testing.assert_allclose(
actual=out.cpu().numpy(), desired=expect.cpu().numpy(), rtol=1e-4,
err_msg="actual out different from desired expected")
def test_cuda_shared_log_multiply_big2(self):
if not torch.cuda.is_available():
print("No cuda device is available!")
device = torch.device("cuda")
dtype = torch.float
repeat = 1
N = 32
I = 2
# cases: F, C, H, W
cases = [(64, 3, 32, 32),
(128, 64, 8, 5),
(64, 3, 119, 60),
(64, 64, 59, 30),
(128, 64, 57, 29),
(128, 128, 31, 16),
(256, 128, 31, 16),
(256, 256, 16, 9),
(512, 256, 16, 9),
(512, 512, 8, 5)]
for case in cases:
print("\ncase: ", case)
F, C, H, W = case
x = torch.randn(N, C, H, W, I, device=device, dtype=dtype)
y = torch.randn(F, C, H, W, I, device=device, dtype=dtype)
out = torch.zeros(N, F, H, W, I, device=device, dtype=dtype)
# warm-up
z = x*x
del z
w = y*y
del w
start = time.time()
for _ in range(repeat):
out = torch.irfft(input=x,
signal_ndim=2,
onesided=True)
pytorch_fft_time = time.time() - start
print("pytorch fft time: ", pytorch_fft_time)
start = time.time()
for _ in range(repeat):
complex_mul_stride_no_permute_cuda(x, y, out, 1024)
print("cuda stride no permute mul time: ", time.time() - start)
start = time.time()
for _ in range(repeat):
# Move the channels to the last but one dimension.
# We want for xfft: N, H, W, C, I.
x_clone = x.permute(0, 2, 3, 1, 4).contiguous()
# We want for yfft: F, H, W, C, I.
y_clone = y.permute(0, 2, 3, 1, 4).contiguous()
complex_mul_shared_log_cuda(x_clone, y_clone, out)
print("cuda shared log mul time: ", time.time() - start)
x = x.unsqueeze(dim=1)
start = time.time()
for _ in range(repeat):
expect = complex_mul(x, y)
expect = expect.sum(dim=2)
print("pytorch mul time: ", time.time() - start)
# print("out.size(): ", out.size())
# np.testing.assert_allclose(
# actual=out.cpu().numpy(), desired=expect.cpu().numpy(), rtol=2,
# err_msg="actual out different from desired expected")
def test_cuda_stride_no_permute_multiply_big2_repeat_sync(self):
if not torch.cuda.is_available():
print("CUDA is not available")
N = 32
I = 2
# C, H, W, F, HH, WW = 3, 119, 60, 2, 64, 32, 32
# C, H, W, F, HH, WW = 3, 128, 65, 64, 32, 32
# C, H, W, F, HH, WW = 32, 512, 8, 5, 2, 512, 4, 4
C, H, W, F, HH, WW = 512, 8, 5, 512, 2, 2
repeat = 100
device = torch.device("cuda")
dtype = torch.float
x = torch.randn(N, C, H, W, I, device=device, dtype=dtype)
y = torch.randn(F, C, H, W, I, device=device, dtype=dtype)
print("\ntesting\n")
start = time.time()
for _ in range(repeat):
torch.cuda.synchronize()
pytorch_sync_time = time.time() - start
print("pytorch sync time: ", pytorch_sync_time)
x_conv = torch.randn(N, C, HH, WW, device=device, dtype=dtype,
requires_grad=True)
y_conv = torch.randn(F, C, HH, WW, device=device, dtype=dtype,
requires_grad=True)
start = time.time()
for _ in range(repeat):
convStandard = torch.nn.functional.conv2d(
input=x_conv, weight=y_conv, stride=1, padding=1)
torch.cuda.synchronize()
convStandardTime = time.time() - start
print("convStandard time: ", convStandardTime)
conv = Conv2dfft(weight_value=y_conv, stride=1, bias=False, padding=1,
args=Arguments(stride_type=StrideType.STANDARD,
conv_exec_type=ConvExecType.CUDA,
preserve_energy=100))
start = time.time()
for _ in range(repeat):
convFFT = conv.forward(input=x_conv)
torch.cuda.synchronize()
convFFTtime = time.time() - start
print("convFFT time: ", convFFTtime)
speedup = convFFTtime / convStandardTime
print(f"Pytorch forward pass speedup is: {speedup} X")
# warm-up
out = torch.irfft(input=x,
signal_ndim=2,
onesided=True)
torch.cuda.synchronize()
start = time.time()
for _ in range(repeat):
out = torch.irfft(input=x,
signal_ndim=2,
onesided=True)
torch.cuda.synchronize()
pytorch_fft_time = time.time() - start
print("pytorch fft time: ", pytorch_fft_time)
# warm-up
out = torch.zeros(N, F, H, W, I, device=device, dtype=dtype)
complex_mul_stride_no_permute_cuda(x, y, out, H*W)
torch.cuda.synchronize()
start = time.time()
for _ in range(repeat):
out = torch.zeros(N, F, H, W, I, device=device, dtype=dtype)
complex_mul_stride_no_permute_cuda(x, y, out, 1024)
torch.cuda.synchronize()
cuda_mul_time = time.time() - start
print("cuda stride no permute mul time: ", cuda_mul_time)
# warm-up
out = torch.zeros(N, F, H, W, I, device=device, dtype=dtype)
# Move the channels to the last but one dimension.
# We want for xfft: N, H, W, C, I.
x_clone = x.permute(0, 2, 3, 1, 4).contiguous()
# We want for yfft: F, H, W, C, I.
y_clone = y.permute(0, 2, 3, 1, 4).contiguous()
complex_mul_shared_log_cuda(x_clone, y_clone, out)
torch.cuda.synchronize()
start = time.time()
# Move the channels to the last but one dimension.
# We want for xfft: N, H, W, C, I.
x_clone = x.permute(0, 2, 3, 1, 4).contiguous()
# We want for yfft: F, H, W, C, I.
y_clone = y.permute(0, 2, 3, 1, 4).contiguous()
for _ in range(repeat):
out = torch.zeros(N, F, H, W, I, device=device, dtype=dtype)
complex_mul_shared_log_cuda(x_clone, y_clone, out)
torch.cuda.synchronize()
cuda_mul_shared_log_time = time.time() - start
print("cuda stride shared log mul time: ", cuda_mul_shared_log_time)
x = x.unsqueeze(dim=1)
# warm-up
expect = complex_mul(x, y)
expect = expect.sum(dim=2)
start = time.time()
for _ in range(repeat):
expect = complex_mul(x, y)
expect = expect.sum(dim=2)
torch.cuda.synchronize()
pytorch_mul_time = time.time() - start
print("broadcast mul time: ", pytorch_mul_time)
print(f"pytorch is faster: {cuda_mul_time/pytorch_mul_time} X times")
print(f"cuda is faster: {pytorch_mul_time / cuda_mul_time} X times")
print(f"fft is faster than cuda multiply: {cuda_mul_time / pytorch_fft_time} X times")
# print("out.size(): ", out.size())
#
# np.testing.assert_allclose(
# actual=out.cpu().numpy(), desired=expect.cpu().numpy(), rtol=1e-4,
# err_msg="actual out different from desired expected")
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1802600 | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 18 20:25:42 2020
"""
from PIL import Image
import os
import random
def main():
#imgarray = [f for f in os.listdir('../imagesv2') if os.path.isfile(os.path.join('../imagesv2', f))]
SAVE_DIR='path'
IMG_DIR='path'
BG_DIR='path'
NUMBEROFIMG=5000
f = open("path",'a+')
imgarray = [f for f in os.listdir(IMG_DIR) if os.path.isfile(os.path.join(IMG_DIR, f))]
for i in range(0,7):
for file in imgarray:
if "ss" in file:
del imgarray[imgarray.index(file)]
#TODO: REMOVE ALL OCCURENCES OF SS, THE NUMBER SHOULD BE 3410
bgarray = [f for f in os.listdir(BG_DIR) if os.path.isfile(os.path.join(BG_DIR, f))]
SPACING = 10
#program for now goes row by row, so it calculates the X and whether it has reached the limit for the next row
# #paste first mob
# NEXT_Y += mob_y+SPACING
# #loop
# bgmod.paste(mob,(CURR_X,CURR_Y),mob)
# CURR_X +=mob_x+SPACING
# #next row
# CURR_Y= NEXT_Y
# NEXT_Y += mob_y+SPACING
# #loop
# bgmod.paste(mob,(CURR_X,CURR_Y),mob)
# CURR_X +=mob_x+SPACING
#go row by row and paste the images in the photo
for i in range(0,NUMBEROFIMG+1):
mobname = random.choice(imgarray)
bgname = random.choice(bgarray)
mob = Image.open(IMG_DIR+mobname)
mob = mob.convert("RGBA")
bg = Image.open(BG_DIR+bgname)
bg = bg.convert("RGBA")
bgmod = bg.copy()
BG_X_MAX = bg.size[0]
BG_Y_MAX = bg.size[1]
mob_x = mob.size[0]
mob_y = mob.size[1]
#leave 10 pixels between pixels
#start to paste the pictures at (10,10)
CURR_X = 10
CURR_Y = 10
NEXT_Y =10
filename='bgmod'+str(i)+'.png'
#LOOP IS FOR THE SAME BACKGROUND PICTURE
while(CURR_Y+mob_y<BG_Y_MAX):
#THIS CHECKS THE Y AXIS
NEXT_Y += mob_y+SPACING
#THIS IS FOR THE ROWS
while(CURR_X+mob_x<BG_X_MAX):
if(CURR_Y+mob_y>BG_Y_MAX):
CURR_X +=mob_x+SPACING
mobname = random.choice(imgarray)
mob = Image.open(IMG_DIR+mobname)
mob = mob.convert("RGBA")
mob_x = mob.size[0]
mob_y = mob.size[1]
continue
bgmod.paste(mob,(CURR_X,CURR_Y),mob)
printstring=(SAVE_DIR[3:]+filename+","+getCoord(mob,mobname,CURR_X,CURR_Y)+getType(mobname))
f.write(printstring+'\n')
CURR_X +=mob_x+SPACING
mobname = random.choice(imgarray)
mob = Image.open(IMG_DIR+mobname)
mob = mob.convert("RGBA")
mob_x = mob.size[0]
mob_y = mob.size[1]
CURR_X=10
CURR_Y= NEXT_Y
bgmod.save(SAVE_DIR+filename)
#once this img is done, have to start all over again
f.close()
mob.close()
bg.close()
bgmod.close()
def getCoord(image,mobname,x,y):
if "mob" in mobname:
xmin=x
ymin=y
xmax=x+image.size[0]
ymax=y+image.size[1]
return str(xmin)+","+str(ymin)+","+str(xmax)+","+str(ymax)+","
elif "player" in mobname:
xmin=x+27
ymin=y+20
xmax=x+68
ymax=y+87
return str(xmin)+","+str(ymin)+","+str(xmax)+","+str(ymax)+","
def getType(imagename):
if "mob" in imagename:
return "mob"
elif "player" in imagename:
return "player"
if __name__=='__main__':
main()
| StarcoderdataPython |
11342671 | # Modified from SUTD and https://github.com/bentrevett/pytorch-sentiment-analysis
# LeNet Implementation, run on FashionMNIST
# https://ieeexplore.ieee.org/abstract/document/726791
# PDF: http://www.cs.virginia.edu/~vicente/deeplearning/readings/lecun1998.pdf
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import FashionMNIST
import torch.optim as optim
import utils.train # Local utilities! (:
# MODEL ========================================================================
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv_feature_net = nn.Sequential( # Convolution
nn.Conv2d(1, 32, 5),
nn.ReLU(),
nn.MaxPool2d(2), # Kernel size 2, no stride
nn.Conv2d(32, 64, 5),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.classifier = nn.Sequential( # Linear layers
nn.Linear(64*4*4, 120),
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, 10) # No softmax here! nn.CrossEntropy does it for us
)
def forward(self, x):
features = self.conv_feature_net(x)
# This flattens the output of the previous layer into a vector.
features = features.view(features.size(0), -1)
return self.classifier(features)
# TRAINING UTILITIES ===========================================================
if __name__ == "__main__":
# Removes the need to call F.to_image ourselves.
transform = transforms.Compose([transforms.ToTensor()])
# Load the training, and validation datasets.
trainset = FashionMNIST(root = './data', train = True, transform = transform, download = True)
valset = FashionMNIST(root = './data', train = False, transform = transform, download = True)
batchSize = 100
loss_fn = nn.CrossEntropyLoss()
learningRate = 5e-2
cnn_model = LeNet()
optimizer = optim.Adam(cnn_model.parameters(), lr = 3e-3)
utils.train.train_model(cnn_model, loss_fn,
batchSize, trainset, valset,
optimizer,
num_epochs=5)
| StarcoderdataPython |
8122571 | import pandas as pd
from collections import Counter
from wordcloud import WordCloud
import emoji
import altair as alt
from urlextract import URLExtract
import plotly.graph_objects as go
urlextractor = URLExtract()
def fetch_messages(df, user):
"""
Returns messages of selected user
"""
if user.lower() != 'overall':
df = df[df.id == user]
df = df[df.message != '<Media omitted>']
df = df[df.message != 'This message was deleted']
return df
def fetch_stats(df, user):
"""
Returns stats on number of messages, members, media files, links shared
"""
if user.lower() != 'overall':
df = df[df.id == user]
# 1. fetch number of messages
num_messages = df.message.shape[0]
# 2. count number of words, and
# 3. number of urls
words = []
urls = []
for message in df.message:
words.extend(message)
urls.extend(urlextractor.find_urls(message))
num_words = len(words)
num_links = len(urls)
# 4. number of media files shared
num_medias = df[df.message == '<Media omitted>'].shape[0]
return num_messages, num_words, num_medias, num_links
def fetch_active_users(df):
"""
Return dataframe on most active users
"""
new_df = (df['id'].value_counts()
.reset_index()
.rename(columns={'index': 'User', 'id':'Messages'})
.sort_values(by='Messages', ascending=False)
)
active_users_percent = ( (df.id.value_counts()/int(df.shape[0]) * 100)
.apply(lambda x: f'{x:.2f}')
.reset_index()
.rename(columns={'index': 'User', 'id':'Messages(%)'})
.sort_values(by='Messages(%)', ascending=False)
)
return new_df, active_users_percent
def get_wordcloud(df, user):
"""
Generates word cloud
"""
if user.lower() != 'overall':
df = df[df.id == user]
df = df[df.message != '<Media omitted>']
df = df[df.message != 'This message was deleted']
wc = WordCloud(width=700, height=300, min_font_size=12, background_color='white')
wc = wc.generate(df['message'].str.cat(sep=' '))
return wc
def most_common_emojis(df, user, n=10):
"""
Tokenize each message, and build list of nouns, verbs, phrases
to return "n" most common words, and emojis
"""
if user.lower() != 'overall':
df = df[df.id == user]
df = df[df.message != '<Media omitted>']
df = df[df.message != 'This message was deleted']
# tokenize
tokens = [word for msg in df.message for word in msg.split()]
# filter emojis
emojis = [word for word in tokens if word in emoji.UNICODE_EMOJI['en']]
common_emojis = Counter(emojis).most_common(n)
del emojis
# create a dataframe and build a barchart
def to_barchart(table):
df = pd.DataFrame(table)
df = df.rename(columns={0: 'Phrases', 1:'Count'})
chart = _get_barchart(df, 'Count','Phrases', 'Phrases', 'Count')
del df
return chart
return to_barchart(common_emojis)
def _get_barchart(df, x, y, color, label):
"""
helper function to build a barchart
"""
bar_chart = alt.Chart(df).mark_bar(
cornerRadiusTopLeft=3,
cornerRadiusTopRight=3,
).encode(
x=alt.X(x, axis=alt.Axis(title=None)),
y=alt.Y(y, axis=alt.Axis(title=None)),
color=alt.Color(color, legend=None),
)
text = bar_chart.mark_text(
align='center',
dx=9,
color='white'
).encode(
text=label
)
return bar_chart + text
def timeline_stats(df, user):
"""
Return timespan of messages, first message date, and last message date
"""
if user.lower() != 'overall':
df = df[df.id == user]
df = df[df.message != '<Media omitted>']
df = df[df.message != 'This message was deleted']
if user.lower() == 'overall':
total_days = df.Elapsed.max()
else:
total_days = df.groupby(df.Elapsed)['message'].count().shape[0]
first_date = df.iloc[0, [7, 5, 4]].to_dict()
first_date = f"{first_date['Day']}-{first_date['Month']}-{first_date['Year']}"
last_date = df.iloc[-1, [7, 5, 4]].to_dict()
last_date = f"{last_date['Day']}-{last_date['Month']}-{last_date['Year']}"
return total_days, first_date, last_date
def get_timelines(df, user):
"""
Build line chart to showcase yearly timeline,
and chart charts to show most active months, day of week, and hour of day
"""
if user.lower() != 'overall':
df = df[df.id == user]
df = df[df.message != '<Media omitted>']
df = df[df.message != 'This message was deleted']
# timelines
yearly_timeline = df.resample('M')['message'].count().reset_index()
yearly_text = df.resample('M')['message'].apply(pd.Series.mode).tolist()
df_emojis = df['message'].apply(lambda lst:[x if x in emoji.UNICODE_EMOJI['en'] else -1 for x in lst][0])
df_emojis = df[df_emojis!=-1]
levels = ['datetime']
for idx in range(df_emojis.index.nlevels-1):
levels.append(idx)
df_emojis = df_emojis['message'].resample('M').apply(pd.Series.mode).reset_index(level=levels)
daily_timeline = df.resample('D')['message'].count().reset_index()
daily_timeline = daily_timeline[daily_timeline['message'] != 0]
daily_text = df.resample('D')['message'].apply(pd.Series.mode).tolist()
# most active days, and hours
hourly_timeline = df.groupby([df.index.hour])['message'].count().reset_index()
hourly_text = df.groupby([df.index.hour])['message'].apply(pd.Series.mode).reset_index()['message'].tolist()
weekly_timeline = df.groupby([df.index.day_name()])['message'].count().reset_index()
weekly_text = df.groupby([df.index.day_name()])['message'].apply(pd.Series.mode).reset_index()['message'].tolist()
# yearly timeline displaying total messages in each month-year
monthly_fig = go.Figure()
monthly_fig.add_trace(go.Scatter(
x=yearly_timeline['datetime'],
y=yearly_timeline['message'],
hovertemplate =
'<b>%{y:.2s} messages</b>: '+
'<br><i>%{text}</i>',
text = yearly_text,
name='Timeline of emoticons'
))
monthly_fig.add_trace(go.Bar(
x=yearly_timeline['datetime'],
y=yearly_timeline['message'],
hovertemplate="%{y:.2s}",
name='Number of Messages',
marker=dict(color=yearly_timeline['message'], colorbar=None),
))
emoji_title = 'Timeline of messages (month-wise)'
if len(df_emojis) >= len(yearly_timeline):
emoji_title = 'Evolution of Emoticons over time'
monthly_fig.update_layout(
title=emoji_title,
yaxis= go.layout.YAxis(title="Total Messages"),
showlegend=False,
xaxis = go.layout.XAxis(title='Months', tickangle=45),
xaxis_tickformat = '%B<br>%Y',
autosize=False,
height=500,
)
monthly_fig.update_xaxes(
rangeslider_visible=True,
)
monthly_fig.add_trace(go.Scatter(
x=df_emojis['datetime'],
y=yearly_timeline.set_index('datetime').loc[df_emojis.datetime, 'message'],
text=df_emojis['message'].tolist(),
mode="markers+text",
name='',
))
# daily timeline of messages, displays hover text
daily_fig = go.Figure([
go.Scatter(
x=daily_timeline['datetime'],
y=daily_timeline['message'],
hovertemplate =
'<b>%{y:.2s} messages</b>:' +
'<br><i>%{text}</i>',
text = daily_text,
name='',
)])
daily_fig.update_layout(
title='Timeline of messages (day-wise)',
yaxis= go.layout.YAxis(title="Total Messages"),
showlegend=False,
xaxis = go.layout.XAxis(title='Days', tickangle=45),
xaxis_tickformat = '%d %B (%a)<br>%Y',
autosize=False,
height=500,
)
daily_fig.update_xaxes(
rangeslider_visible=True,
)
# most active days
weekly_fig = go.Figure([go.Bar(
x=weekly_timeline['datetime'],
y=weekly_timeline['message'],
hovertemplate =
'<b>%{y:.2s} messages</b>: '+
'<br><i>%{text}</i>',
text = weekly_text,
name='',
marker=dict(color=weekly_timeline['message'], colorbar=None),
)])
weekly_fig.update_layout(
title='Most Active Days',
yaxis= go.layout.YAxis(title="Total Messages"),
showlegend=False,
xaxis = go.layout.XAxis(title='Day of the Week', tickangle=45)
)
weekly_fig.update_traces(texttemplate='%{y:.2s}', textposition='outside')
# most active hours
hourly_fig = go.Figure([go.Bar(
x=hourly_timeline['datetime'],
y=hourly_timeline['message'],
hovertemplate =
'<b>%{y:.2s} messages</b>: '+
'<br><i>%{text}</i>',
text = hourly_text,
name='',
marker=dict(color=hourly_timeline['message'], colorbar=None),
)])
hourly_fig.update_layout(
title='Most Active Hours',
yaxis= go.layout.YAxis(title="Total Messages"),
showlegend=False,
xaxis = go.layout.XAxis(title='Hours', tickangle=45, tickvals=list(range(24)))
)
hourly_fig.update_traces(texttemplate='%{y:.2s}', textposition='outside')
return monthly_fig, daily_fig, weekly_fig, hourly_fig
def get_activity_map(df, user):
"""
Plot activity map for each day and hour
"""
if user.lower() != 'overall':
df = df[df.id == user]
df['period'] = df['hour'].astype(str) + '-' + ((df['hour'] + 1) % 24).astype(str)
df = df.groupby(['DayName', 'period'])['message'].count().reset_index()
df = df.fillna(1)
# pivot = df.pivot_table(index='DayName', columns='period', values='message', aggfunc='count').fillna(0)
fig = alt.Chart(df).mark_rect().encode(
alt.X('period:O', axis=alt.Axis(title='hours')),
alt.Y('DayName:O', axis=alt.Axis(title='days')),
alt.Color('message:Q', scale=alt.Scale(scheme='goldorange'))
)
return fig
| StarcoderdataPython |
1984797 | <reponame>CTSRD-CHERI/cheritest
#-
# Copyright (c) 2012 <NAME>
# Copyright (c) 2019 <NAME>
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase, attr, HexInt
#
# Test that csc raises an exception is the address at which the capability
# is to be stored is not aligned on a 32-byte boundary.
#
@attr('capabilities')
class test_cp2_x_csc_vaddr(BaseBERITestCase):
EXPECTED_EXCEPTIONS = 1
def test_cp2_x_csc_align_value_pre(self):
assert self.MIPS.a0 == HexInt(0x1234), "Incorrect initial value"
def test_cp2_x_csc_align_value_post(self):
assert self.MIPS.a1 == HexInt(0x1234), "CSC wrote to an unaligned address"
def test_cp2_x_csc_trap_kind(self):
"""Test CP0 cause register was set correctly when address was unaligned"""
self.assertCompressedTrapInfo(self.MIPS.s1, mips_cause=self.MIPS.Cause.AdES, trap_count=1, msg="CP0 status was not set to AdES when the address was unaligned")
def test_cp2_x_csc_align_vaddr(self):
'''Test CP0 badvaddr register was set correctly when address was unaligned'''
assert self.MIPS.a4 == self.MIPS.a6, "CP0 badvaddr was not set to cap1 when the address was unaligned"
| StarcoderdataPython |
6592682 | from guillotina import app_settings
from guillotina import configure
from guillotina.profile import profilable
from guillotina_rediscache import cache
from guillotina_rediscache import serialize
from guillotina_rediscache.interfaces import IRedisChannelUtility
import aioredis
import asyncio
import logging
import pickle
logger = logging.getLogger('guillotina_rediscache')
@configure.utility(provides=IRedisChannelUtility)
class RedisChannelUtility:
def __init__(self, settings=None, loop=None):
self._loop = loop
self._settings = {}
self._ignored_tids = []
self._pool = None
self._redis = None
@profilable
async def initialize(self, app=None):
settings = app_settings['redis']
while True:
try:
self._pool = await cache.get_redis_pool(self._loop)
self._redis = aioredis.Redis(self._pool)
res = await self._redis.subscribe(settings['updates_channel'])
ch = res[0]
while (await ch.wait_message()):
try:
msg = serialize.loads(await ch.get())
await self.invalidate(msg)
except (TypeError, pickle.UnpicklingError):
pass
except (asyncio.CancelledError, RuntimeError):
# task cancelled, let it die
return
except Exception:
logger.warn(
'Error subscribing to redis changes. Waiting before trying again',
exc_info=True)
await asyncio.sleep(5)
async def finalize(self, app):
settings = app_settings['redis']
if self._redis is not None:
try:
await self._redis.unsubscribe(settings['updates_channel'])
await cache.close_redis_pool()
except (asyncio.CancelledError, RuntimeError):
# task cancelled, let it die
return
@profilable
async def invalidate(self, data):
assert isinstance(data, dict)
assert 'tid' in data
assert 'keys' in data
if data['tid'] in self._ignored_tids:
# on the same thread, ignore this sucker...
self._ignored_tids.remove(data['tid'])
return
mem_cache = cache.get_memory_cache()
for key in data['keys']:
if key in mem_cache:
del mem_cache[key]
for cache_key, ob in data.get('push', {}).items():
mem_cache[cache_key] = ob
def ignore_tid(self, tid):
# so we don't invalidate twice...
self._ignored_tids.append(tid)
| StarcoderdataPython |
122053 | #! /usr/bin/python
import logging
import os.path
import argparse
from twisted.internet import reactor
from twisted.internet.protocol import Protocol
from flock.roster import Roster
from flock.controller_factory import ControllerFactory
from flock.controller.rfxcom.protocol import RfxcomProtocol
from flock.controller.rfxcom.transport import RfxcomTransport
from flock.controller.enocean.protocol import EnoceanProtocol
from flock.controller.enocean.transport import EnoceanTransport
from flock.frontend.amp import Frontend
from flock.frontend.msgpack.server import FlockMsgServer
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='configuration file')
parser.add_argument('-v', '--verbose', help='verbose level', nargs='?', default=0)
parser.add_argument('-p', '--port', help="tcp frontend server port", default=7109)
args = parser.parse_args()
if args.verbose == '1':
logging.getLogger().setLevel(logging.INFO)
elif args.verbose == '2':
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.CRITICAL)
Roster.instantiate(args.config)
factory = ControllerFactory(reactor)
frontend = Frontend(args.port, reactor)
msgpack_frontend = FlockMsgServer()
reactor.run()
| StarcoderdataPython |
9612496 | from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Importing the Kratos Library
import KratosMultiphysics
# Import applications and dependencies
import KratosMultiphysics.ParticleMechanicsApplication as KratosParticle
# Importing the base class
from KratosMultiphysics.ParticleMechanicsApplication.mpm_solver import MPMSolver
def CreateSolver(model, custom_settings):
return MPMImplicitDynamicSolver(model, custom_settings)
class MPMImplicitDynamicSolver(MPMSolver):
def __init__(self, model, custom_settings):
# Set defaults and validate custom settings in the base class.
# Construct the base solver.
super(MPMImplicitDynamicSolver, self).__init__(model, custom_settings)
KratosMultiphysics.Logger.PrintInfo("::[MPMImplicitDynamicSolver]:: ", "Construction is finished.")
@classmethod
def GetDefaultSettings(cls):
this_defaults = KratosMultiphysics.Parameters("""{
"scheme_type" : "bossak",
"damp_factor_m" : -0.3,
"newmark_beta" : 0.25
}""")
this_defaults.AddMissingParameters(super(MPMImplicitDynamicSolver, cls).GetDefaultSettings())
return this_defaults
def AddVariables(self):
super(MPMImplicitDynamicSolver, self).AddVariables()
self._AddDynamicVariables(self.grid_model_part)
KratosMultiphysics.Logger.PrintInfo("::[MPMImplicitDynamicSolver]:: ", "Variables are all added.")
### Protected functions ###
def _CreateSolutionScheme(self):
grid_model_part = self.GetGridModelPart()
domain_size = self._GetDomainSize()
block_size = domain_size
if (self.settings["pressure_dofs"].GetBool()):
block_size += 1
# Setting the time integration schemes
scheme_type = self.settings["scheme_type"].GetString()
if(scheme_type == "newmark"):
damp_factor_m = 0.0
newmark_beta = self.settings["newmark_beta"].GetDouble()
elif(scheme_type == "bossak"):
damp_factor_m = self.settings["damp_factor_m"].GetDouble()
newmark_beta = self.settings["newmark_beta"].GetDouble()
else:
err_msg = "The requested scheme type \"" + scheme_type + "\" is not available!\n"
err_msg += "Available options are: \"newmark\", \"bossak\""
raise Exception(err_msg)
is_dynamic = self._IsDynamic()
return KratosParticle.MPMResidualBasedBossakScheme( grid_model_part,
domain_size,
block_size,
damp_factor_m,
newmark_beta,
is_dynamic)
def _IsDynamic(self):
return True | StarcoderdataPython |
3478034 | import numpy as np
class Cache(object):
def __init__(self, dim):
self.dim = dim
self.init()
def add(self, i, j, delta_cc, subset):
self.cc_list[i, j] = delta_cc
self.subsets[i, j] = subset
def remove(self, idx):
self.cc_list = np.delete(self.cc_list, idx, 0)
self.cc_list = np.delete(self.cc_list, idx, 1)
self.subsets = np.delete(self.subsets, idx, 0)
self.subsets = np.delete(self.subsets, idx, 1)
def argmax_cc(self):
return np.unravel_index(np.argmax(self.cc_list), self.cc_list.shape)
def init(self):
self.cc_list = np.zeros((self.dim, self.dim))
self.subsets = np.zeros((self.dim, self.dim), dtype=object)
| StarcoderdataPython |
129623 | import ast
import sqlite3
import sys
import traceback
import unittest
from io import StringIO
from time import sleep
from unittest import mock
from littleutils import SimpleNamespace, only
import sorcery as spells
from sorcery import unpack_keys, unpack_attrs, print_args, magic_kwargs, maybe, args_with_source, spell
from sorcery.spells import PYPY
class MyListWrapper(object):
def __init__(self, lst):
self.list = lst
def _make_new_wrapper(self, method_name, *args, **kwargs):
method = getattr(self.list, method_name)
new_list = method(*args, **kwargs)
return type(self)(new_list)
append, extend, clear, __repr__, __str__, __eq__, __hash__, \
__contains__, __len__, remove, insert, pop, index, count, \
sort, __iter__, reverse, __iadd__ = spells.delegate_to_attr('list')
copy, __add__, __radd__, __mul__, __rmul__ = spells.call_with_name(_make_new_wrapper)
class Foo(object):
@magic_kwargs
def bar(self, **kwargs):
return set(kwargs.items()) | {self}
@magic_kwargs
def magic_only_kwarg(n, *, y):
return n, y
class TestStuff(unittest.TestCase):
def test_unpack_keys_basic(self):
obj = SimpleNamespace(thing=SimpleNamespace())
d = dict(foo=1, bar=3, spam=7, baz=8, x=9)
out = {}
foo, obj.thing.spam, obj.bar, out['baz'] = unpack_keys(d)
self.assertEqual(foo, d['foo'])
self.assertEqual(obj.bar, d['bar'])
self.assertEqual(obj.thing.spam, d['spam'])
self.assertEqual(out, {'baz': d['baz']})
def test_unpack_keys_for_loop(self):
results = []
for x, y in unpack_keys([
dict(x=1, y=2),
dict(x=3, z=4),
dict(a=5, y=6),
dict(b=7, c=8),
], default=999):
results.append((x, y))
self.assertEqual(results, [
(1, 2),
(3, 999),
(999, 6),
(999, 999),
])
def test_unpack_keys_list_comprehension(self):
self.assertEqual(
[(y, x) for x, y in unpack_keys([
dict(x=1, y=2),
dict(x=3, y=4),
])],
[
(2, 1),
(4, 3),
])
def test_unpack_keys_bigger_expression(self):
x, y = map(int, unpack_keys(dict(x='1', y='2')))
self.assertEqual(x, 1)
self.assertEqual(y, 2)
def test_unpack_keys_skip_single_assigned_name(self):
x, y = [int(v) for v in unpack_keys(dict(x='1', y='2'))]
self.assertEqual(x, 1)
self.assertEqual(y, 2)
def test_unpack_keys_extras(self):
env = dict(DATABASE_USERNAME='me',
DATABASE_PASSWORD='<PASSWORD>')
username, password = unpack_keys(env, prefix='DATABASE_', swapcase=True)
self.assertEqual(username, 'me')
self.assertEqual(password, '<PASSWORD>')
def test_unpack_attrs(self):
obj = SimpleNamespace(aa='bv', bb='cc', cc='aa')
cc, bb, aa = unpack_attrs(obj)
self.assertEqual(aa, obj.aa)
self.assertEqual(bb, obj.bb)
self.assertEqual(cc, obj.cc)
d, e = unpack_attrs(obj, default=9)
assert d == e == 9
def test_print_args(self):
out = StringIO()
x = 3
y = 4
print_args(x + y,
x * y,
x -
y, file=out)
self.assertEqual('''\
x + y =
7
x * y =
12
x -
y =
-1
''', out.getvalue())
def test_dict_of(self):
a = 1
obj = SimpleNamespace(b=2)
self.assertEqual(spells.dict_of(
a, obj.b,
c=3, d=4
), dict(
a=a, b=obj.b,
c=3, d=4))
def test_no_starargs_in_dict_of(self):
args = [1, 2]
with self.assertRaises(TypeError):
spells.dict_of(*args)
def test_delegation(self):
lst = MyListWrapper([1, 2, 3])
lst.append(4)
lst.extend([1, 2])
lst = (lst + [5]).copy()
self.assertEqual(type(lst), MyListWrapper)
self.assertEqual(lst, [1, 2, 3, 4, 1, 2, 5])
def test_magic_kwargs(self):
foo = Foo()
x = 1
y = 2
w = 10
self.assertEqual(foo.bar(x, y, z=3),
{('x', x), ('y', y), ('z', 3), foo})
self.assertEqual(magic_only_kwarg(x, y), (x, y))
@magic_kwargs
def spam(n, **kwargs):
return n, kwargs
self.assertEqual(spam(x, y, z=5),
(x, dict(y=y, z=5)))
@magic_kwargs
def spam(n, m, **kwargs):
return n, m, kwargs
self.assertEqual(spam(x, w, y, z=5),
(x, w, dict(y=y, z=5)))
with self.assertRaises(TypeError):
@magic_kwargs
def _(a=1):
print(a)
with self.assertRaises(TypeError):
@magic_kwargs
def _(*a):
print(a)
def test_maybe(self):
if PYPY:
with self.assertRaises(NotImplementedError):
maybe(None)
return
n = None
assert maybe(n) is None
self.assertIsNone(maybe(n))
assert maybe(n).a.b.c()[4]().asd.asd()() is None
assert maybe(n)()()() is None
assert maybe(0) == 0
assert maybe({'a': 3})['a'] == 3
assert maybe({'a': {'b': 3}})['a']['b'] == 3
assert maybe({'a': {'b': 3}})['a']['b'] + 2 == 5
assert maybe({'a': {'b': None}})['a']['b'] is None
def test_select_from(self):
conn = sqlite3.connect(':memory:')
c = conn.cursor()
c.execute('CREATE TABLE points (x INT, y INT)')
c.execute("INSERT INTO points VALUES (5, 3), (8, 1)")
conn.commit()
assert [(3, 5), (1, 8)] == [(y, x) for y, x in spells.select_from('points')]
y = 1
x = spells.select_from('points', where=[y])
assert (x, y) == (8, 1)
def test_multiple_attr_calls(self):
x = 3
y = 5
self.assertEqual([
spells.dict_of(x),
spells.dict_of(y),
], [dict(x=x), dict(y=y)])
self.assertEqual([spells.dict_of(x), spells.dict_of(y)],
[dict(x=x), dict(y=y)])
def test_no_assignment(self):
with self.assertRaises(TypeError):
unpack_keys(dict(x=1, y=2))
def test_spell_repr(self):
self.assertRegex(repr(spells.dict_of),
r'Spell\(<function dict_of at 0x.+>\)')
def test_assigned_names(self):
x, y = ['_' + s for s in spells.assigned_names()]
self.assertEqual(x, '_x')
self.assertEqual(y, '_y')
# noinspection PyTrailingSemicolon
def test_semicolons(self):
# @formatter:off
tester(1); tester(2); tester(3)
tester(9
); tester(
8); tester(
99
); tester(33); tester([4,
5, 6, [
7]])
# @formatter:on
def test_args_with_source(self):
self.assertEqual(args_with_source(1 + 2, 3 * 4),
[("1 + 2", 3), ("3 * 4", 12)])
self.assertEqual(
args_with_source(
self.assertEqual(args_with_source(1 + 2), [("1 + 2", 3)])),
[(
'self.assertEqual(args_with_source(1 + 2), [("1 + 2", 3)])',
None,
)],
)
def test_switch(self):
result = spells.switch(2, lambda: {
1: 10,
2: 20,
1 / 0: 1 / 0
})
self.assertEqual(result, 20)
result = spells.switch(2, lambda: {
1: 10,
{{5, 2, 1 / 0}}: 20,
3: 1 / 0
})
self.assertEqual(result, 20)
with self.assertRaises(KeyError):
spells.switch(2, lambda: {
1: 10,
3: 30,
})
result = spells.switch(2, lambda: {
1: 10,
3: 30,
}, default=-1)
self.assertEqual(result, -1)
with self.assertRaises(TypeError):
spells.switch(2, {
1: 10,
2: 20,
})
with self.assertRaises(TypeError):
spells.switch(2, lambda: [{
1: 10,
2: 20,
}])
def test_timeit_in_function(self):
with self.assertRaises(ValueError):
spells.timeit()
def test_decorator(self):
@empty_decorator
@decorator_with_args(tester('123'), x=int())
@tester(list(tuple([1, 2])), returns=empty_decorator)
@tester(
list(
tuple(
[3, 4])),
returns=empty_decorator)
@empty_decorator
@decorator_with_args(
str(),
x=int())
@tester(list(tuple([5, 6])), returns=empty_decorator)
@tester(list(tuple([7, 8])), returns=empty_decorator)
@empty_decorator
@decorator_with_args(tester('sdf'), x=tester('123234'))
def foo():
pass
def test_list_comprehension(self):
str([tester(int(x)) for x in tester([1]) for _ in tester([2]) for __ in [3]])
str([[[tester(int(x)) for x in tester([1])] for _ in tester([2])] for __ in [3]])
return str([(1, [
(2, [
tester(int(x)) for x in tester([1])])
for _ in tester([2])])
for __ in [3]])
def test_lambda(self):
self.assertEqual((lambda x: (tester(x), tester(x)))(tester(3)), (3, 3))
(lambda: (lambda: tester(1))())()
self.assertEqual((lambda: [tester(x) for x in tester([1, 2])])(), [1, 2])
def test_indirect_call(self):
dict(x=tester)['x'](tester)(3)
def test_compound_statements(self):
with self.assertRaises(TypeError):
try:
for _ in tester([2]):
while tester(0):
pass
else:
tester(4)
else:
tester(5)
raise ValueError
except tester(ValueError):
tester(9)
raise TypeError
finally:
tester(10)
# PyCharm getting confused somehow?
# noinspection PyUnreachableCode
str()
with self.assertRaises(tester(Exception)):
if tester(0):
pass
elif tester(0):
pass
elif tester(1 / 0):
pass
def test_generator(self):
def gen():
for x in [1, 2]:
yield tester(x)
gen2 = (tester(x) for x in tester([1, 2]))
assert list(gen()) == list(gen2) == [1, 2]
@spell
def tester(frame_info, arg, returns=None):
result = eval(
compile(ast.Expression(only(frame_info.call.args)), '<>', 'eval'),
frame_info.frame.f_globals,
frame_info.frame.f_locals,
)
assert result == arg, (result, arg)
if returns is None:
return arg
return returns
assert tester([1, 2, 3]) == [1, 2, 3]
def empty_decorator(f):
return f
def decorator_with_args(*_, **__):
return empty_decorator
class TestTimeit(unittest.TestCase):
def patch(self, *args, **kwargs):
patcher = mock.patch(*args, **kwargs)
patcher.start()
self.addCleanup(patcher.stop)
def setUp(self):
self.patch('sorcery.spells._raise', lambda e: e)
self.patch('sys.stdout', StringIO())
def assert_usual_output(self):
self.assertRegex(
sys.stdout.getvalue(),
r"""
Number of trials: 1
Method 1: 1\.\d{3}
Method 2: 1\.\d{3}
Method 1: 1\.\d{3}
Method 2: 1\.\d{3}
Best times:
-----------
Method 1: 1\.\d{3}
Method 2: 1\.\d{3}
""".strip())
def test_no_result(self):
if spells.timeit(repeat=2):
sleep(1)
else:
sleep(1.1)
self.assert_usual_output()
# noinspection PyUnusedLocal
def test_matching_result(self):
if spells.timeit(repeat=2):
sleep(1)
result = 3
else:
sleep(1.1)
result = 3
self.assert_usual_output()
# noinspection PyUnusedLocal
def test_not_matching_result(self):
with self.assertRaises(AssertionError):
if spells.timeit():
result = 3
else:
result = 4
def test_exception(self):
try:
if spells.timeit():
print(1 / 0)
else:
pass
except ZeroDivisionError:
traceback.print_exc(file=sys.stdout)
stdout = sys.stdout.getvalue()
self.assertIn('<timeit-src>', stdout)
self.assertIn('1 / 0', stdout)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9666812 | <filename>AnalysisCurlDemo/interface/i451c4p2.py
# coding: utf-8
from util import fileutil
from module import dataprocess
from module import dataplot
from module import polyprocess
import _thread
def getcurveresult(filledDateValArr):
step = 5
((dateArr, dataArr), (mergedDateArr, mergedDataArr)) = \
dataprocess.mergedata(filledDateValArr, step)
# polyfit
degree = 15
pFunc = polyprocess.getpolycurve(mergedDataArr, degree, step)
# all real roots
realRootList = polyprocess.getpolyderealroots(pFunc, 0, len(dataArr))
# increase and decrease section
(incSecList, decSecList) = polyprocess.getincanddecsec(pFunc, realRootList,
0, len(dataArr) - 1)
print(incSecList)
print(decSecList)
# fit the date
(incsecDateArr, decsecDateArr) = polyprocess.getincanddecfitdate(
incSecList, decSecList, dateArr)
print(incsecDateArr)
print(decsecDateArr)
# line fit
(incPolylineArr, decPolylineArr) = polyprocess.getpolylineinincdecsec(
pFunc, incSecList, decSecList)
print([p.coefficients[0] for p in incPolylineArr])
print([p.coefficients[0] for p in decPolylineArr])
print('\n')
# plot
dataplot.plot2linecurveandpolylinesec(dataArr, mergedDataArr, step,
incSecList, decSecList,
incPolylineArr, decPolylineArr,
pFunc)
def start():
filePath = 'data/raw/451_c4_p2#mv.csv'
dateValDictArr = fileutil.readcsv(filePath, 'gbk', rowstart=399245, rowend=-1,
datecol=2, valcol=1, datescope=(1, -1))
filledDateValArr = dataprocess.fillDateAndVal(dateValDictArr)
getcurveresult(filledDateValArr)
getcurveresult(filledDateValArr[10208:10508])
getcurveresult(filledDateValArr[10376:-1])
dataplot.showplot()
| StarcoderdataPython |
300948 | <filename>demos/samplemax_solution.py
import random
import sys
from mpyc.seclists import seclist
from mpyc.runtime import mpc
"""
Run it with one party:
python3 samplemax.py
Run it at once with 3 parties, of which 1 corrupted:
python3 samplemax.py -M3 -T1
Run it in separate shells with 3 parties, of which 1 corrupted:
python3 samplemax.py -M3 -T1 -I0
python3 samplemax.py -M3 -T1 -I1
python3 samplemax.py -M3 -T1 -I2
"""
# Create a SecInt() placeholder with no value yet
# This is in Essence a class that inherits from SecureInteger. secint instantiation accepts a value -> secint(value)
# The value is mapped onto a Galois Field, defined in secint.field
secint = mpc.SecInt()
# Connect all parties with each other
mpc.run(mpc.start())
n = len(mpc.parties) # number of parties
i = mpc.pid # my index
m = 10 # input array length
B = 100*m # sum of inputs per party
k = max(n, B) # bound for encoding argmax
#
# fill inputs with random numbers that sum to B
# B/isum clculates the factor by which the input needs to be changed such that inputs sum to B
#
inputs = [random.randrange(1, B) for j in range(0, m)]
isum = sum(inputs) # normalize inputs
for j in range(0, m):
inputs[j] *= B/isum
inputs[j] = int(inputs[j])
inputs[0] = B - sum(inputs[1:]) # adjust sum for possible rounding errors
print(f'Party {i} with inputs {inputs}, sum = {sum(inputs)}.')
#
# find maximal input element
#
#A secure list contains secret-shared numbers. Apart from hiding the contents of the
#list, however, it is also possible to hide which items are accessed and which items
#are updated. In principle, only the length of a secure list remains public.
# Subclasses list type so functionality is almost the same for non secure types as with normal list
slist = seclist([], secint)
smaxlist = seclist([], secint)
for j in range(0,m):
# j Go through all 10 values in list
for ii in range(0,n):
# ii Go through all parties
# secint creates a SecInt Object which contains a field entity which handles GF
# - l = runtime.options.bit_length (32)
# - n = 2
# - f = 0
# SecInt.fields = Results in finfields.GF((9223372036854775907, 2, 9223372036854775906), 0)
# SecInt:
# name = f'SecInt{l}' if p is None else f'SecInt{l}({p})'
# sectype = type(name, (SecureInteger,), {'__slots__': ()})
# sectype.__doc__ = 'Class of secret-shared integers.'
# sectype.field = _pfield(l, 0, p, n)
# sectype.bit_length = l
# globals()[name] = sectype # NB: exploit (almost) unique name dynamic SecureInteger type
# return sectype
# Note
# sectype = type(name, (SecureInteger,), {'__slots__': ()})
# sectype is a class object deriving from SecureInteger which takes a value in its __init__, this is where inputs[j] goes
# value = self.field(value) it uses the GF Field above which is a finfields.GF and maps that value to the field
## INPUT
# input calls _distribute, x is secint, sender ii is int
# self._distribute([SecInt32:SecureInteger(value[j])], [sender])
# when the output of mpc.input is not of securetype securelist will first create one with self.sectype(value) before appending
"""
mpc.input:
Input x to the computation.
Value x is a secure object, or a list of secure objects.
The senders are the parties that provide an input.
The default is to let every party be a sender.
"""
#Build execution graph for jth input and all parties, append them to secure list
# Input: Create shares that are sent (senders!) to each other
#in_shares = thresha.random_split(x, t, m), take secure value x, t is the thershold (probably degree of polynomial, m is number of senders
slist.append(mpc.input(secint(inputs[j]), ii))
# maximum value over s list which contains secure jth elements over all parties
smaxlist.append(mpc.max(slist))
# Global maximum
# Compute maximum over maxima of jth value for all parties m values per party, so we should have m x n(parties) values, so we have m values in the smaxlist
# max max is max([list with m values]), thus the global maximum value
#mpc.output is responsible for gathering all shares and recombining them to the correct output, this can be computed by all receivers
# mpc run starts execution of all coroutines that are inside the loop, until all futures recevied actual values
maxmax = mpc.run(mpc.output(mpc.max(smaxlist)))
print(f'Maximal input element is {maxmax}.')
#
# find maximal input element and corresponding party
#
# This finds the index of the maximum value for each of the lists given by a party
smax = secint(0)
sargmax = secint(0)
for j in range(0,m): #Nr of values per list of each party
for ii in range(0,n): #Nr of parties, go through each party
v = mpc.input(secint(inputs[j]), ii)
smaxtmp = mpc.if_else(v > smax, v, smax)
sargmax = mpc.if_else(v > smax, secint(ii), sargmax)
smax = smaxtmp
argmax = mpc.run(mpc.output(sargmax))
maxmax = mpc.run(mpc.output(smax))
print(f'Maximal input element is {maxmax}, from party {argmax}.')
mpc.run(mpc.shutdown())
| StarcoderdataPython |
5026738 | #! /usr/bin/python3
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Code Modified from Rethink Robotics:
import os
import sys
import argparse
import rospy
import cv2
import cv_bridge
from sensor_msgs.msg import Image
from can_sort.srv import DisplayImage, DisplayImageResponse
class Display_Image:
""" A class for sending images of the current object to be recyled to Baxter's Display
"""
def __init__(self):
""" Initialize environment
"""
self.display_srv = rospy.Service(
'display_image', DisplayImage, self.send_image_srv)
rospy.logdebug(f"Service initialized")
def send_image_srv(self, req):
"""
Send the image located at the specified path to the head
display on Baxter.
:param path: path to the image file to load and send
Args:
req: Service request, an integer determinig which image to display
Returns:
DisplayImageResponse() (srv): returns a service respond that displays an image on Baxter's Display
"""
# WE WANT TO PASS A RELATIVE PATH INSTEAD OF USING THE ABOLUTE PATH
if req.img_num == 1:
path = "/images/head_images/recycle.jpg"
elif req.img_num == 2:
path = "/images/head_images/dr_pepper.png"
elif req.img_num == 3:
path = "/images/head_images/juice.png"
elif req.img_num == 4:
path = "/images/head_images/happy_bin.png"
elif req.img_num == 5:
path = "/images/head_images/mystery_man.png"
img = cv2.imread(path)
msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
pub = rospy.Publisher('/robot/xdisplay', Image,
latch=True, queue_size=1)
pub.publish(msg)
rospy.sleep(1)
return DisplayImageResponse()
# Main Loop
def main():
rospy.init_node('disp_img', anonymous=True)
display = Display_Image()
rospy.spin()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
6704196 | <gh_stars>0
'''
Влад любит кататься на общественном транспорте и собирать счастливые билеты. Влад живет в маленьком городе, в котором билеты имеют двузначные номера формата ХХ. Счастливым называется билет, номер которого состоит из одинаковых цифр.
Формат входных данных
Натуральное двузначное число - номер билета, который достался Владу сегодня
Формат выходных данных
Выведите на экран "Ура, повезло!", если Владу достался счастливый билет и "Повезет в следующий раз" в противном случае
Sample Input 1:
33
Sample Output 1:
Ура, повезло!
Sample Input 2:
48
Sample Output 2:
Повезет в следующий раз
'''
happy_num = int(input())
if happy_num // 10 == happy_num % 10:
print('Ура, повезло!')
else:
print('Повезет в следующий раз')
| StarcoderdataPython |
5061149 | <gh_stars>1-10
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 damian <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
Python notepad
"""
from __future__ import print_function
import bisect
from itertools import *
from operator import *
values = [3,6,2,1,7,6,3,5]
print('==========================================================================================')
print("test bisect")
l = []
for v in values:
bisect.bisect(l, v)
bisect.insort(l, v)
print(l)
print('==========================================================================================')
print("test LifoQueue")
import queue
q = queue.LifoQueue()
for i in range(5):
q.put(i)
while not q.empty():
print(q.get(), end=' ')
print('\n')
print('==========================================================================================')
print("test map")
for i in map(lambda x: x*2, range(0, 5)):
print(i)
print('==========================================================================================')
print("test list comperhension")
for i in [x*2 for x in range(0, 5)]:
print(i)
print('==========================================================================================')
print("test zip count repeat")
for i in zip(count(), repeat('damian', 5)):
print('{}: {}'.format(*i))
print('==========================================================================================')
print("test cycle")
for i in zip(range(0,5), cycle(['damian', 'ziobro'])):
print('{}: {}'.format(*i))
print('==========================================================================================')
print("test cycle")
print(list(accumulate('abcde')))
print('==========================================================================================')
print("arg getter example")
class SampleClass(object):
"""docstring for """
def __init__(self, identity):
self._id = identity
def __repr__(self):
return 'SampleClass({})'.format(self._id)
@property
def id(self):
return self._id
@id.setter
def id(self, identity):
self._id = identity
l = [SampleClass(i) for i in range(5)]
# getting ids
getter = attrgetter('id')
ids = [getter(obj) for obj in l]
print("ids: {}".format(ids))
print('==========================================================================================')
print("test decimal and context")
import decimal
dec_num = decimal.Decimal("0.12345")
for i in range(1, 5):
decimal.getcontext().prec = i
decimal.getcontext().rounding = ROUND_HALF_EVEN
print("{:8}: {}".format(dec_num, dec_num * 1))
| StarcoderdataPython |
9787936 | #!/usr/bin/env python3
# -*- coding: utf-8 -*
'''
项目名称: JD-Script / jd_zjd
Author: Curtin
功能:微信小程序-赚京豆-瓜分10亿京豆自动助力,默认给账号1助力,多账号才能玩~
Date: 2021/6/25 下午9:16
TG交流 https://t.me/topstyle996
TG频道 https://t.me/TopStyle2021
updateTime: 2021.7.24 14:22
'''
print("赚京豆-瓜分10亿京豆自动助力--活动已结束\nTG交流 https://t.me/topstyle996\nTG频道 https://t.me/TopStyle2021")
exit(0)
#####
#ck 优先读取【JDCookies.txt】 文件内的ck 再到 ENV的 变量 JD_COOKIE='ck1&ck2' 最后才到脚本内 cookies=ck
cookies=''
#助力账号,填写pt_pin或用户名的值,如 zlzh = ['aaaa','xxxx','yyyy'] ,支持ENV export zlzh=['CurtinLV','xxxx','yyyy']
zlzh = ['Curtinlv', '买买买', '东哥']
#####
import os, re
try:
import requests
except Exception as e:
print(e, "\n缺少requests 模块,请执行命令安装:python3 -m pip install requests")
exit(3)
from urllib.parse import unquote
import json
import time
requests.packages.urllib3.disable_warnings()
pwd = os.path.dirname(os.path.abspath(__file__)) + os.sep
t = time.time()
aNum = 0
beanCount = 0
class getJDCookie(object):
# 适配各种平台环境ck
def getckfile(self):
if os.path.exists('/ql/config/env.sh'):
print("当前环境青龙面板新版")
return '/ql/config/env.sh'
elif os.path.exists('/ql/config/cookie.sh'):
print("当前环境青龙面板旧版")
return '/ql/config/env.sh'
elif os.path.exists('/jd/config/config.sh'):
print("当前环境V4")
return '/jd/config/config.sh'
elif os.path.exists(pwd + 'JDCookies.txt'):
return pwd + 'JDCookies.txt'
else:
return pwd + 'JDCookies.txt'
# 获取cookie
def getCookie(self):
global cookies
ckfile = self.getckfile()
try:
if os.path.exists(ckfile):
with open(ckfile, "r", encoding="utf-8") as f:
cks = f.read()
f.close()
if 'pt_key=' in cks and 'pt_pin=' in cks:
r = re.compile(r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
cks = r.findall(cks)
if len(cks) > 0:
cookies = ''
for i in cks:
cookies += i
else:
with open(pwd + 'JDCookies.txt', "w", encoding="utf-8") as f:
cks = "#多账号换行,以下示例:(通过正则获取此文件的ck,理论上可以自定义名字标记ck,也可以随意摆放ck)\n账号1【Curtinlv】cookie1;\n账号2【TopStyle】cookie2;"
f.write(cks)
f.close()
pass
except Exception as e:
print(f"【getCookie Error】{e}")
# 检测cookie格式是否正确
def getUserInfo(self, ck, pinName, userNum):
url = 'https://me-api.jd.com/user_new/info/GetJDUserInfoUnion?orgFlag=JD_PinGou_New&callSource=mainorder&channel=4&isHomewhite=0&sceneval=2&sceneval=2&callback=GetJDUserInfoUnion'
headers = {
'Cookie': ck,
'Accept': '*/*',
'Connection': 'close',
'Referer': 'https://home.m.jd.com/myJd/home.action',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'me-api.jd.com',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Mobile/15E148 Safari/604.1',
'Accept-Language': 'zh-cn'
}
try:
resp = requests.get(url=url, verify=False, headers=headers, timeout=60).text
r = re.compile(r'GetJDUserInfoUnion.*?\((.*?)\)')
result = r.findall(resp)
userInfo = json.loads(result[0])
nickname = userInfo['data']['userInfo']['baseInfo']['nickname']
return ck, nickname
except Exception:
context = f"账号{userNum}【{pinName}】Cookie 已失效!请重新获取。"
print(context)
return ck, False
def iscookie(self):
"""
:return: cookiesList,userNameList,pinNameList
"""
cookiesList = []
userNameList = []
pinNameList = []
if 'pt_key=' in cookies and 'pt_pin=' in cookies:
r = re.compile(r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
result = r.findall(cookies)
if len(result) >= 1:
print("您已配置{}个账号".format(len(result)))
u = 1
for i in result:
r = re.compile(r"pt_pin=(.*?);")
pinName = r.findall(i)
pinName = unquote(pinName[0])
# 获取账号名
ck, nickname = self.getUserInfo(i, pinName, u)
if nickname != False:
cookiesList.append(ck)
userNameList.append(nickname)
pinNameList.append(pinName)
else:
u += 1
continue
u += 1
if len(cookiesList) > 0 and len(userNameList) > 0:
return cookiesList, userNameList, pinNameList
else:
print("没有可用Cookie,已退出")
exit(3)
else:
print("cookie 格式错误!...本次操作已退出")
exit(4)
else:
print("cookie 格式错误!...本次操作已退出")
exit(4)
# 获取系统ENV环境参数优先使用 适合Ac、云服务等环境
# JD_COOKIE=cookie (多账号&分隔)
if "JD_COOKIE" in os.environ:
if len(os.environ["JD_COOKIE"]) > 10:
cookies = os.environ["JD_COOKIE"]
print("已获取并使用Env环境 Cookie")
if "zlzh" in os.environ:
if len(os.environ["zlzh"]) > 1:
zlzh = os.environ["zlzh"]
zlzh = zlzh.replace('[', '').replace(']', '').replace('\'', '').replace(' ', '').split(',')
print("已获取并使用Env环境 zlzh:", zlzh)
getCk = getJDCookie()
getCk.getCookie()
# 开启助力任务
def starAssist(sid, headers):
global aNum
try:
timestamp = int(round(t * 1000))
url = 'https://api.m.jd.com/api?functionId=vvipclub_distributeBean_startAssist&body={%22activityIdEncrypted%22:%22' + sid + '%22,%22channel%22:%22FISSION_BEAN%22}&appid=swat_miniprogram&client=tjj_m&screen=1920*1080&osVersion=5.0.0&networkType=wifi&sdkName=orderDetail&sdkVersion=1.0.0&clientVersion=3.1.3&area=11&fromType=wxapp×tamp=' + str(timestamp)
requests.get(url=url, headers=headers, verify=False, timeout=30).json()
aNum = 0
except Exception as e:
if aNum < 5:
aNum += 1
return starAssist(sid, headers)
else:
print("starAssist Error", e)
exit(1)
#获取助力码
def getShareCode(headers):
global assistStartRecordId, encPin, sid, aNum
try:
url = f'https://api.m.jd.com/api?functionId=distributeBeanActivityInfo&fromType=wxapp×tamp={int(round(t * 1000))}'
body = 'body=%7B%22paramData%22%3A%7B%22channel%22%3A%22FISSION_BEAN%22%7D%7D&appid=swat_miniprogram&client=tjj_m&screen=1920*1080&osVersion=5.0.0&networkType=wifi&sdkName=orderDetail&sdkVersion=1.0.0&clientVersion=3.1.3&area=11'
responses = requests.post(url, headers=headers, data=body, verify=False, timeout=30).json()
if responses['success']:
data = responses['data']
sid = data['id']
encPin = data['encPin']
try:
assistStartRecordId = data['assistStartRecordId']
except:
starAssist(sid, header)
return getShareCode(headers)
aNum = 0
return assistStartRecordId, encPin, sid
except Exception as e:
if aNum < 5:
aNum += 1
return getShareCode(headers)
else:
print("getShareCode Error", e)
exit(2)
#设置请求头
def setHeaders(cookie):
headers = {
'Cookie': cookie,
'content-type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip,compress,br,deflate',
'Referer': 'https://servicewechat.com/wxa5bf5ee667d91626/148/page-frame.html',
'Host': 'api.m.jd.com',
'User-Agent': 'Mozilla/5.0 (iPhone CPU iPhone OS 13_7 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.7(0x1800072d) NetType/WIFI Language/zh_CN'
}
return headers
def assist(ck, sid, eid, aid, user, name, a):
global beanCount
timestamp = int(round(t * 1000))
headers = {
'Cookie': ck + 'wxclient=gxhwx;ie_ai=1;',
'Accept': '*/*',
'Connection': 'keep-alive',
'Referer': 'https://servicewechat.com/wxa5bf5ee667d91626/148/page-frame.html',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'api.m.jd.com',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.1(0x1800012a) NetType/WIFI Language/zh_CN',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-cn'
}
url = 'https://api.m.jd.com/api?functionId=vvipclub_distributeBean_assist&body=%7B%22activityIdEncrypted%22:%22' + sid + '%5Cn%22,%22assistStartRecordId%22:%22' + str(aid) + '%22,%22assistedPinEncrypted%22:%22' + eid + '%5Cn%22,%22channel%22:%22FISSION_BEAN%22%7D&appid=swat_miniprogram&client=tjj_m&screen=1920*1080&osVersion=5.0.0&networkType=wifi&sdkName=orderDetail&sdkVersion=1.0.0&clientVersion=3.1.3&area=1_72_4137_0&fromType=wxapp×tamp=' + str(timestamp)
resp = requests.get(url, headers=headers, verify=False, timeout=30).json()
if resp['success']:
print(f"用户{a}【{user}】助力【{name}】成功~")
if resp['data']['assistedNum'] == 4:
beanCount += 80
print(f"{name}, 恭喜获得8毛京豆,以到账为准。")
print("## 开启下一轮助力")
starAssist(sid, header)
getShareCode(header)
else:
print(f"用户{a}【{userNameList[a-1]}】没有助力次数了。")
#开始互助
def start():
global header,cookiesList, userNameList, pinNameList
print("微信小程序-赚京豆-瓜分助力")
cookiesList, userNameList, pinNameList = getCk.iscookie()
for ckname in zlzh:
try:
ckNum = userNameList.index(ckname)
except Exception as e:
try:
ckNum = pinNameList.index(ckname)
except:
print("请检查助力账号名称是否正确?提示:助力名字可填pt_pin的值、也可以填用户名。")
exit(9)
print(f"### 开始助力账号【{userNameList[int(ckNum)]}】###")
header = setHeaders(cookiesList[int(ckNum)])
getShareCode(header)
starAssist(sid, header)
getShareCode(header)
a = 1
for i, name in zip(cookiesList, userNameList):
if a == ckNum+1:
a += 1
else:
assist(i, sid, encPin, assistStartRecordId, name, userNameList[int(ckNum)], a)
a += 1
if beanCount > 0:
print(f'\n### 本次累计获得{beanCount}京豆')
if __name__ == '__main__':
start() | StarcoderdataPython |
6466924 | # Copyright (C) 2019 <NAME>.L.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import enum
import itertools
import re
import textwrap
from dateutils import DateUtils
from phpreport import PHPReport
from phpreport import TaskFilter
class PeriodOfWork():
def __init__(self, start_date, num_days, task_filter=TaskFilter(), tasks=None):
self.start_date = start_date
self.num_days = num_days
self.users = set()
self.tasks = tasks
self.task_filter = task_filter.create_same_filter_with_different_dates(
self.start_date,
DateUtils.from_date_offset(self.start_date, num_days - 1))
def get_users(self):
return self.users
def set_tasks(self, tasks):
self.tasks = []
for task in tasks:
self.add_task(task)
def add_task(self, task):
self.tasks.append(task)
self.users.add(task.user)
def add_task_if_starts_in_period(self, task):
ending_date = DateUtils.from_date_offset(self.start_date, self.num_days)
if task.date < self.start_date:
return False
if task.date >= ending_date:
return False
self.add_task(task)
return True
def filter_tasks(self, date=None, day_offset=None,
user=None, only_onsite=False):
if date is None and day_offset is not None:
date = DateUtils.from_date_offset(self.start_date, day_offset)
def filter_task(task):
if user is not None and task.user != user:
return False
if date is not None and not DateUtils.same_date(task.date, date):
return False
if only_onsite and not task.onsite:
return False
return True
return list(filter(filter_task, self.tasks))
def get_all_dates(self):
return [DateUtils.from_date_offset(self.start_date, offset)
for offset in range(0, self.num_days)]
def time_worked(self, date=None, day_offset=None,
user=None, only_onsite=False):
return sum([task.length() for task in
self.filter_tasks(date, day_offset, user, only_onsite)],
datetime.timedelta())
@staticmethod
def fetch_tasks_for_all(periods):
filters = [period.task_filter for period in periods]
tasks = PHPReport.get_tasks_for_task_filters(filters)
for pair in zip(periods, tasks):
pair[0].set_tasks(pair[1])
class WeekOfWork(PeriodOfWork):
def __init__(self, year, week, task_filter=TaskFilter(), tasks=None):
self.week = week
self.year = year
date = DateUtils.from_week_number(year, week)
super(WeekOfWork, self).__init__(date, 7, task_filter, tasks)
def __str__(self):
return "Week %i of %i" % (self.week, self.year)
def short_string(self):
return "Week {}/{} ".format(self.week, self.year)
def wiki_string(self):
return "Week%i-%i" % (self.week, self.year)
@classmethod
def create_array_of_weeks_between_dates(cls, start, end, task_filter):
week_dates = DateUtils.get_weeks_in_date_range(start, end)
weeks = [cls(*DateUtils.year_and_week_number(week_date),
task_filter=task_filter, tasks=[]) for week_date in week_dates]
return weeks
@classmethod
def create_from_string(cls, string, task_filter):
dates = DateUtils.date_range_from_string(string)
weeks = cls.create_array_of_weeks_between_dates(dates[0], dates[1],
task_filter)
cls.fetch_tasks_for_all(weeks)
return weeks
@classmethod
def create_for_entire_project(cls, task_filter):
assert task_filter.project
tasks = PHPReport.get_tasks_for_task_filters([task_filter])
tasks = [item for sublist in tasks for item in sublist]
if not tasks:
return []
first_date = last_date = tasks[0].date
for task in tasks[1:]:
if task.date < first_date:
first_date = task.date
if task.date > last_date:
last_date = task.date
weeks = cls.create_array_of_weeks_between_dates(first_date, last_date,
task_filter)
def add_task_to_weeks(task):
for week in weeks:
if week.add_task_if_starts_in_period(task):
return
raise Exception("Could not assign task to week.")
for task in tasks:
add_task_to_weeks(task)
return weeks
class AggregateReport():
def __init__(self, time_periods, formatter, header, wiki_string):
self.header = header
self.wiki_string = wiki_string
self.time_periods = time_periods
self.formatter = formatter
self.parent = None
@staticmethod
def generate_report_for_period(period, table_contents):
amount = period.time_worked()
amount_onsite = period.time_worked(only_onsite=True)
hours_worked_string = DateUtils.format_delta_as_hours(amount)
if amount_onsite:
hours_worked_string += " (%s onsite)" % \
DateUtils.format_delta_as_hours(amount_onsite)
table_contents.append([period.short_string(), hours_worked_string])
return (amount, amount_onsite)
def generate_report(self):
self.formatter.generate_header(self.header)
table_contents = []
total = datetime.timedelta()
total_onsite = datetime.timedelta()
for period in self.time_periods:
(time, time_onsite) = AggregateReport.generate_report_for_period(period, table_contents)
total += time
total_onsite += time_onsite
self.formatter.generate_table(table_contents, has_headers=False)
self.formatter.generate_header(
"Total hours worked: %s" % DateUtils.format_delta_as_hours(total))
self.formatter.generate_header(
"Total onsite hours worked: %s" % DateUtils.format_delta_as_hours(total_onsite))
return self.formatter.flatten()
class DetailedReport():
def __init__(self, time_period, parent, formatter, include_story=True):
if parent:
header = "{0} for {1}".format(time_period, parent.header)
wiki_string = "{0}-{1}".format(parent.wiki_string, time_period.wiki_string())
else:
header = "{0} for {1}".format(time_period, str(time_period.task_filter))
wiki_string = time_period.wiki_string()
self.header = header
self.wiki_string = wiki_string
self.time_period = time_period
self.formatter = formatter
self.parent = parent
self.include_story = include_story
self.pieces = []
@staticmethod
def format_date(date):
return date.strftime("%d %b")
def time_worked(self, user=None, total=False):
if total:
return [DateUtils.format_delta_as_hours(self.time_period.time_worked(user=user))]
all_dates = self.time_period.get_all_dates()
return [DateUtils.format_delta_as_hours(self.time_period.time_worked(date=x, user=user)) for x in all_dates]
def generate_hours(self):
table = []
table.append([""] + list(map(DetailedReport.format_date, self.time_period.get_all_dates())) + ["Total"])
for user in sorted(self.time_period.get_users()):
table.append([user.login] +
self.time_worked(user=user) +
self.time_worked(user=user, total=True))
table.append(["everyone"] +
self.time_worked() +
self.time_worked(total=True))
self.formatter.generate_table(table)
onsite_time = self.time_period.time_worked(only_onsite=True)
if onsite_time > datetime.timedelta(0):
self.formatter.generate_large_text("Onsite hours worked: %s" % DateUtils.format_delta_as_hours(onsite_time))
def get_stories_for_day_and_user(self, user, date):
tasks_for_day = self.time_period.filter_tasks(date=date, user=user)
def get_story(task):
story = ""
if self.include_story:
story += self.formatter.format_story(task.story)
if story:
story += " "
return story
all_stories = [get_story(task) + task.text for task in tasks_for_day]
# Many times people add a lot of duplicate descriptions. Just output one of each.
all_stories = set(all_stories)
# Strip out duplicated whitespace
return re.compile(r'\s+').sub(' ', " ".join(all_stories)).strip()
def generate_stories_for_user(self, user):
self.formatter.generate_section_header("Stories for %s" % user.login)
all_dates = self.time_period.get_all_dates()
contents = [(date.strftime("%A"), self.get_stories_for_day_and_user(user, date)) for date in all_dates]
self.formatter.generate_aligned_list(contents)
def generate_report(self):
self.pieces = []
self.formatter.generate_header(self.header)
self.generate_hours()
for user in sorted(self.time_period.users):
self.generate_stories_for_user(user)
return self.formatter.flatten()
class TextFormatter():
def __init__(self):
self.pieces = []
def generate_table_row(self, columns, lengths, header=False):
format_string = ""
for length in lengths:
format_string += "%%-%i.%is " % (length, length)
self.pieces.append(format_string % tuple(columns))
self.pieces.append("\n")
@staticmethod
def generate_column_length_list(table):
lengths = [list(map(len, x)) for x in table] # Generate a table of lengths.
# Turn the table of lengths into a row of max lengths for each column.
return list(map(max, list(zip(*lengths))))
def generate_table(self, table, has_headers=True):
if not table:
return
lengths = TextFormatter.generate_column_length_list(table)
self.generate_table_row(table[0], lengths, header=has_headers)
for row in table[1:]:
self.generate_table_row(row, lengths)
def generate_aligned_list(self, contents):
first_column_size = max([len(content[0]) for content in contents])
format_string = "%%%i.%is: %%s\n" % (first_column_size, first_column_size)
indent = (first_column_size + 2) * ' ' # Enough to account for the day name offset.
width = 80 - len(indent)
for content in contents:
second_column = textwrap.fill(content[1],
break_long_words=False, # Don't break URLs.
width=width,
initial_indent=indent,
subsequent_indent=indent).strip()
self.pieces.append(format_string % (content[0], second_column))
def generate_header(self, header):
self.pieces.append("\n%s\n" % header)
def generate_section_header(self, header):
self.pieces.append("\n%s\n" % header)
def generate_large_text(self, text):
self.pieces.append("%s\n" % text)
@classmethod
def format_story(cls, story):
if story:
return "[{}]".format(story)
return ""
def flatten(self):
return "".join(self.pieces)
class TwikiFormatter(TextFormatter):
def generate_table_row(self, columns, lengths=None, header=False, highlight_first=True):
first = "| *%s* "
if not highlight_first:
first = "| %s"
if header:
format_string = first + (len(columns) - 2) * " | *%s*" + " | *%s* |"
else:
format_string = first + (len(columns) - 2) * " | %s" + " | %s |"
self.pieces.append(format_string % tuple(columns))
self.pieces.append("\n")
def generate_table(self, table, has_headers=True):
if len(table) < 10 or has_headers:
return super(TwikiFormatter, self).generate_table(table, has_headers)
def chunks_of_n(list_to_chunk, num_chunks):
for i in range(0, len(list_to_chunk), num_chunks):
yield list_to_chunk[i:i + num_chunks]
def transpose_table(table):
return list(map(list, itertools.zip_longest(*table, fillvalue=['', ''])))
table = transpose_table(chunks_of_n(table, 10))
for row in table:
row = sum(row, [])
self.generate_table_row(row, highlight_first=False)
def generate_header(self, header):
self.pieces.append("\n---++%s\n" % header)
def generate_section_header(self, header):
self.pieces.append("\n---++++%s\n" % header)
def generate_aligned_list(self, contents):
for content in contents:
self.pieces.append(" * *%s* - %s\n" % (content[0], content[1]))
class MarkdownFormatter(TextFormatter):
def generate_table(self, table, has_headers=True):
return ""
def generate_header(self, header):
self.pieces.append("\n# %s\n" % header)
def generate_section_header(self, header):
self.pieces.append("\n## %s\n" % header)
def generate_aligned_list(self, contents):
self.pieces.append("\n")
for content in contents:
self.pieces.append(" * **%s** %s\n" % (content[0], content[1]))
def format_story(self, story):
if story:
return "*{}*".format(story)
return ""
class ReportCreator():
class Mode(enum.Enum):
PROJECT, AGGREGATE, DETAIL = range(3)
def __init__(self, args):
self.args = args
self.task_filter = args.to_task_filter()
if not args.time:
self.time_periods = WeekOfWork.create_for_entire_project(self.task_filter)
self.mode = ReportCreator.Mode.PROJECT
elif args.time:
self.time_periods = WeekOfWork.create_from_string(args.time, self.task_filter)
if len(self.time_periods) > 1:
self.mode = ReportCreator.Mode.AGGREGATE
else:
self.mode = ReportCreator.Mode.DETAIL
self.parent_report = None
self.reports = []
def formatter(self):
if self.args.formatter == "twiki":
return TwikiFormatter()
if self.args.formatter == "markdown":
return MarkdownFormatter()
return TextFormatter()
def create_parent_report(self):
if self.mode == ReportCreator.Mode.PROJECT:
project = self.task_filter.project
return AggregateReport(self.time_periods, self.formatter(),
project.description,
re.sub(r'[^a-zA-Z0-9]', '', project.description) + "Report")
if self.mode == ReportCreator.Mode.AGGREGATE:
return AggregateReport(self.time_periods, self.formatter(),
"%s to %s for %s" %
(self.time_periods[0],
self.time_periods[-1],
self.time_periods[0].task_filter),
"%sTo%s" %
(self.time_periods[0].wiki_string(),
self.time_periods[-1].wiki_string()))
return None
def create_reports(self):
self.parent_report = self.create_parent_report()
for period in self.time_periods:
self.reports.append(DetailedReport(time_period=period,
parent=self.parent_report,
formatter=self.formatter(),
include_story=self.args.story))
| StarcoderdataPython |
1982098 | import numpy as np
from bioscrape.lineage import LineageModel
from bioscrape.lineage import LineageVolumeSplitter
from bioscrape.lineage import py_SimulateInteractingCellLineage
from bioscrape.lineage import py_SimulateCellLineage
from bioscrape.lineage import py_SimulateSingleCell
from bioscrape.lineage import LineageCSimInterface
from bioscrape.lineage import py_PropagateCells
from bioscrape.lineage import py_SingleCellLineage, py_PropagateInteractingCells
import time as pytime
k1 = 1.1111
kgrow = 33.33
Kgrow = 20.2020
kdeath = 6.06
Kdeath = 5.5050
g = .020202
d = 80.808
species = ["S", "X"]
rxn1 = [[], ["S"], "massaction", {"k":k1}]
rxn2 = [["S"], [], "massaction", {"k":d}]
rxns = [rxn1, rxn2]
x0 = {"S": 0, "X":100}
#Instantiate Model
print("Instantiating Model")
M = LineageModel(species = species, reactions = rxns, initial_condition_dict = x0)
vsplit = LineageVolumeSplitter(M)
#M.create_division_event("division", {}, "massaction", {"k":.1, "species":"S"}, vsplit)
M.create_division_rule("deltaV", {"threshold":1.0}, vsplit)
#M.create_death_event("death", {}, "hillpositive", {"k":kdeath, "s1":"S", "n":2, "K":Kdeath})
M.create_volume_event("linear volume", {"growth_rate":g},
"hillnegative", {"k":kgrow, "s1":"S", "n":2, "K":Kgrow})
M.py_initialize()
global_sync_period = .5
N = 1
sum_list = []
#lineage =
lineage = None
cell_state_samples = None
single_cell_states = None
result = None
lineage_list = None
cell_state_sample_list = None
for i in [1]:
maxtime = i*10
dt = 0.01
timepoints = np.arange(0, maxtime+dt, dt)
print("Beginning Simulation", i, "for", maxtime)
#interface = LineageCSimInterface(M)
s = pytime.clock()
#lineage = py_SimulateCellLineage(timepoints, Model = M, initial_cell_states = N)
#cell_state_samples, sample_times = py_PropagateCells(timepoints, Model = M, initial_cell_states = N, sample_times = 10)
#single_cell_states = py_SingleCellLineage(timepoints, Model = M)
#lineage_list, global_results, simulator = py_SimulateInteractingCellLineage(timepoints, global_sync_period, model_list = [M],initial_cell_states = [N], global_species = ["S"], global_volume = 100, average_dist_threshold = 10.0)
cell_state_sample_list, global_species_results, sample_times, simulator = py_PropagateInteractingCells(timepoints, global_sync_period, sample_times = 5, model_list = [M],initial_cell_states = [N], global_species = ["S"], global_volume = 0, average_dist_threshold = 10.0)
#result = py_SimulateSingleCell(timepoints[10:], Model = M)
e = pytime.clock()
print("Simulation", i, "complete in", e-s, "s")
if i > 0:
if lineage_list is not None:
lineage = lineage_list[0]
if lineage is not None:
print("Building Tree")
sch_tree = [[]]
sch_tree_length = 1
for i in range(lineage.py_size()):
sch = lineage.py_get_schnitz(i)
if sch.py_get_parent() == None:
sch_tree[0].append(sch)
else:
for j in range(len(sch_tree)):
parent = sch.py_get_parent()
if parent in sch_tree[j]:
if len(sch_tree)<= j+1:
sch_tree.append([])
sch_tree_length += 1
sch_tree[j+1].append(sch)
counts = [len(sch_list) for sch_list in sch_tree]
print("counts", counts)
sum_list.append((maxtime, sum(counts), e-s))
if cell_state_sample_list is not None:
cell_state_samples = [s[0] for s in cell_state_sample_list]
if cell_state_samples is not None:
sum_list.append((maxtime, sum([len(l) for l in cell_state_samples]), e-s))
#raise ValueError()
import pylab as plt
print("plotting")
if len(sum_list) > 1:
plt.figure()
plt.subplot(121)
plt.plot([e[0] for e in sum_list], [e[1] for e in sum_list])
plt.xlabel("max simulation time")
plt.ylabel("Total Cells Simulated")
plt.subplot(122)
plt.plot([e[1] for e in sum_list], [e[2] for e in sum_list])
plt.xlabel("Total Cells Simulated (Lineage)\nOR\nFinal Cells Returned (Propogate)")
plt.ylabel("CPU runtime (s)")
if result is not None:
single_cell_states = result
if single_cell_states is not None:
plt.figure()
plt.subplot(131)
plt.title("volume")
plt.plot(single_cell_states["time"], single_cell_states["volume"])
import pandas as pd
#with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# print("**volume**\n", single_cell_states["volume"])
# print("**time**\n", single_cell_states["time"])
plt.subplot(132)
plt.title("X")
plt.plot(single_cell_states["time"], single_cell_states["X"])
plt.subplot(133)
plt.title("S")
plt.plot(single_cell_states["time"], single_cell_states["S"])
if cell_state_samples is not None:
plt.figure()
print("len cell_state_samples", len(cell_state_samples), [len(s) for s in cell_state_samples])
ax1, ax2, ax3 = plt.subplot(131), plt.subplot(132), plt.subplot(133)
for i in range(len(sample_times)-1, -1, -1):
volume = [cs.py_get_volume() for cs in cell_state_samples[i]]
S = [cs.py_get_state()[0] for cs in cell_state_samples[i]]
X = [cs.py_get_state()[1] for cs in cell_state_samples[i]]
time = sample_times[i]
plt.sca(ax1)
plt.title("volume histogram")
plt.hist(volume, log = True, label = "Sample Time = "+str(i)+" Cells="+str(len(volume)))
if i == 0: plt.legend()
plt.sca(ax2)
plt.title("X histogram")
plt.hist(X, log = True, label = "Sample Time = "+str(i)+" Cells="+str(len(X)))
if i == 0: plt.legend()
plt.sca(ax3)
plt.title("S histogram")
plt.hist(S, log = True, label = "Sample Time = "+str(i)+" Cells="+str(len(S)))
if i == 0: plt.legend()
if lineage is not None:
color_list = []
for i in range(sch_tree_length):
color_list.append((i/sch_tree_length, 0, 1.-i/sch_tree_length))
import pylab as plt
plt.figure(figsize = (10, 10))
plt.subplot(411)
plt.title(r"$\emptyset \leftrightarrow S$ $P(Grow) = k \frac{1}{S^2+400}$")
plt.plot(range(len(counts)), counts)
plt.ylabel("Cell Count (total ="+str(sum(counts))+")")
plt.xlabel("Generation")
print("sch_tree_length", sch_tree_length)
plt.subplot(412)
plt.ylabel("S per Cell")
for i in range(sch_tree_length):
for sch in sch_tree[i]:
df = sch.py_get_dataframe(Model = M)
plt.plot(df["time"], df["S"], color = color_list[i])
plt.subplot(413)
plt.ylabel("X per cell")
totalX = np.zeros(len(timepoints))
for i in range(sch_tree_length):
for sch in sch_tree[i]:
df = sch.py_get_dataframe(Model = M)
start_ind = np.where(timepoints >= df["time"][0])
start_ind = start_ind[0][0]
end_ind = np.where(timepoints >= df["time"][len(df["time"])-1])[0][0]
plt.plot(df["time"], df["X"], color = color_list[i])
plt.plot(df["time"][len(df["time"])-1], df["X"][len(df["time"])-1], "x", color = color_list[i])
plt.plot(df["time"][0], df["X"][0], "o", color = color_list[i])
#totalX[start_ind:end_ind+1] += df["X"][:len(df["X"])]
#plt.plot(timepoints, totalX, "--", color = "black", label = "total X")
plt.subplot(414)
plt.ylabel("Volume (of each cell)")
for i in range(sch_tree_length):
for sch in sch_tree[i]:
df = sch.py_get_dataframe(Model = M)
plt.plot(df["time"], df["volume"], color = color_list[i])
plt.show()
| StarcoderdataPython |
4856311 | from __future__ import print_function
from typing import List
import logging
import sys
import requests
import time
import swagger_client as cris_client
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(message)s")
SUBSCRIPTION_KEY = "<your subscription key>"
HOST_NAME = "<your region>.cris.ai"
PORT = 443
NAME = "Simple transcription"
DESCRIPTION = "Simple transcription description"
LOCALE = "en-US"
RECORDINGS_BLOB_URI = "<Your SAS Uri to the recording>"
ADAPTED_ACOUSTIC_ID = None # guid of a custom acoustic model
ADAPTED_LANGUAGE_ID = None # guid of a custom language model
def transcribe():
logging.info("Starting transcription client...")
# configure API key authorization: subscription_key
configuration = cris_client.Configuration()
configuration.api_key['Ocp-Apim-Subscription-Key'] = SUBSCRIPTION_KEY
# create the client object and authenticate
client = cris_client.ApiClient(configuration)
# create an instance of the transcription api class
transcription_api = cris_client.CustomSpeechTranscriptionsApi(api_client=client)
# get all transcriptions for the subscription
transcriptions: List[cris_client.Transcription] = transcription_api.get_transcriptions()
logging.info("Deleting all existing completed transcriptions.")
# delete all pre-existing completed transcriptions
# if transcriptions are still running or not started, they will not be deleted
for transcription in transcriptions:
transcription_api.delete_transcription(transcription.id)
logging.info("Creating transcriptions.")
# transcription definition using custom models
transcription_definition = cris_client.TranscriptionDefinition(
name=NAME, description=DESCRIPTION, locale=LOCALE, recordings_url=RECORDINGS_BLOB_URI,
models=[cris_client.ModelIdentity(ADAPTED_ACOUSTIC_ID), cris_client.ModelIdentity(ADAPTED_LANGUAGE_ID)]
)
# comment out the previous statement and uncomment the following to use base models for transcription
# transcription_definition = cris_client.TranscriptionDefinition(
# name=NAME, description=DESCRIPTION, locale=LOCALE, recordings_url=RECORDINGS_BLOB_URI
# )
data, status, headers = transcription_api.create_transcription_with_http_info(transcription_definition)
# extract transcription location from the headers
transcription_location: str = headers["location"]
# get the transcription Id from the location URI
created_transcriptions = list()
created_transcriptions.append(transcription_location.split('/')[-1])
logging.info("Checking status.")
completed, running, not_started = 0, 0, 0
while completed < 1:
# get all transcriptions for the user
transcriptions: List[cris_client.Transcription] = transcription_api.get_transcriptions()
# for each transcription in the list we check the status
for transcription in transcriptions:
if transcription.status == "Failed" or transcription.status == "Succeeded":
# we check to see if it was one of the transcriptions we created from this client
if transcription.id not in created_transcriptions:
continue
completed += 1
if transcription.status == "Succeeded":
results_uri = transcription.results_urls["channel_0"]
results = requests.get(results_uri)
logging.info("Transcription succeeded. Results: ")
logging.info(results.content.decode("utf-8"))
elif transcription.status == "Running":
running += 1
elif transcription.status == "NotStarted":
not_started += 1
logging.info(f"Transcriptions status: {completed} completed, {running} running, {not_started} not started yet")
# wait for 5 seconds
time.sleep(5)
input("Press any key...")
def main():
transcribe()
if __name__ == "__main__":
main()
| StarcoderdataPython |
20125 | <filename>migrations/versions/66d4be40bced_add_attribute_to_handle_multiline_.py
"""Add attribute to handle multiline information
Revision ID: 66d4be40bced
Revises: <PASSWORD>
Create Date: 2018-05-16 12:13:32.023450
"""
import sqlalchemy as sa
from alembic import op
from limonero.migration_utils import is_sqlite
# revision identifiers, used by Alembic.
revision = '66d4be40bced'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
if is_sqlite():
with op.batch_alter_table('data_source') as batch_op:
batch_op.add_column(sa.Column('is_multiline', sa.Boolean(), nullable=False, server_default='0'))
else:
op.add_column('data_source',
sa.Column('is_multiline', sa.Boolean(), nullable=False,
default=0))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
if is_sqlite():
with op.batch_alter_table('data_source') as batch_op:
batch_op.drop_column('is_multiline')
else:
op.drop_column('data_source', 'is_multiline')
# ### end Alembic commands ###
| StarcoderdataPython |
11376136 | from moto.ec2 import models as ec2_models
from moto.ec2.exceptions import InvalidPermissionNotFoundError
from localstack import config
from localstack.services.infra import start_moto_server
def patch_ec2():
def patch_revoke_security_group_egress(backend):
revoke_security_group_egress_orig = backend.revoke_security_group_egress
def revoke_security_group_egress(*args, **kwargs):
try:
return revoke_security_group_egress_orig(*args, **kwargs)
except InvalidPermissionNotFoundError:
# this can happen, as CidrIpv6 is not yet supported by moto
if args[4] == []:
return '_ignore_'
return revoke_security_group_egress
for region, backend in ec2_models.ec2_backends.items():
backend.revoke_security_group_egress = patch_revoke_security_group_egress(backend)
def start_ec2(port=None, asynchronous=False, update_listener=None):
patch_ec2()
port = port or config.PORT_EC2
return start_moto_server('ec2', port, name='EC2', asynchronous=asynchronous, update_listener=update_listener)
| StarcoderdataPython |
3486545 | """
Даны два целых числа A и В. Выведите все числа от A до B
включительно, в порядке возрастания,если A < B, или
в порядке убывания в противном случае.
Формат ввода
Вводятся два целых числа.
Формат вывода
Выведите ответ на задачу.
"""
a, b = int(input()), int(input())
if a < b:
for i in range(a, b + 1):
print(i)
else:
for i in range(a, b - 1, -1):
print(i)
| StarcoderdataPython |
3278410 | <reponame>qGentry/Lematus
import torch
import torch.nn as nn
class EncoderNetwork(nn.Module):
def __init__(self,
enc_hid_dim: int,
emb_dim: int,
dec_hid_dim: int,
emb_count: int,
dropout: float,
num_layers: int,
device: str = 'cpu'):
self.device = device
super().__init__()
self.embeddings = nn.Embedding(
num_embeddings=emb_count,
embedding_dim=emb_dim,
padding_idx=0,
)
self.rnn = nn.GRU(
input_size=emb_dim,
hidden_size=enc_hid_dim,
dropout=dropout,
bidirectional=True,
batch_first=True,
num_layers=num_layers,
)
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear(2 * num_layers * enc_hid_dim, dec_hid_dim)
def get_initial_state(self, inp):
shape = self.rnn.get_expected_hidden_size(inp, None)
return torch.zeros(shape).to(self.device)
def forward(self, x):
x = x.to(self.device)
lens = (x != 0).sum(dim=1)
x = self.embeddings(x)
packed = torch.nn.utils.rnn.pack_padded_sequence(
x,
lengths=lens,
batch_first=True,
enforce_sorted=False
)
states, last_hidden = self.rnn(packed, self.get_initial_state(x))
states, lens = torch.nn.utils.rnn.pad_packed_sequence(states, batch_first=True)
last_hidden = torch.cat([*last_hidden], dim=1)
last_hidden = self.fc(last_hidden)
last_hidden = torch.tanh(last_hidden)
last_hidden = self.dropout(last_hidden)
return (states, lens), last_hidden
| StarcoderdataPython |
5103443 | import unittest
from test.support import import_module
# Skip tests if _ctypes module was not built.
import_module('_ctypes')
import ctypes.test
def load_tests(*args):
skipped, testcases = ctypes.test.get_tests(ctypes.test, "test_*.py", verbosity=0)
suites = [unittest.makeSuite(t) for t in testcases]
return unittest.TestSuite(suites)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
8130584 | <reponame>jared-hardy/django-permafrost
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.auth.backends import ModelBackend, AllowAllUsersModelBackend, RemoteUserBackend, AllowAllUsersRemoteUserBackend
from django.contrib.sites.models import Site
class GroupSiteModelBackendMixin():
def _get_group_permissions(self, user_obj):
'''
Adds the SiteID for filtering Groups
'''
current_site = Site.objects.get_current()
user_groups_field = get_user_model()._meta.get_field('groups')
user_groups_query = 'group__%s' % user_groups_field.related_query_name()
return Permission.objects.filter(**{user_groups_query: user_obj}).filter(group__permafrost_role__site=current_site) # TODO: Should it return Groups that do not have a Permafrost Role also?
class PermafrostModelBackend(GroupSiteModelBackendMixin, ModelBackend):
'''
Permafrost ModelBackend that takes into account SiteID when filtering on
Group permissions via Permafrost Roles.
'''
pass
class PermafrostAllowAllUsersModelBackend(GroupSiteModelBackendMixin, AllowAllUsersModelBackend):
'''
Permafrost AllowAllUsersModelBackend that takes into account SiteID when filtering on
Group permissions via Permafrost Roles.
'''
pass
class PermafrostRemoteUserBackend(GroupSiteModelBackendMixin, RemoteUserBackend):
'''
Permafrost RemoteUserBackend that takes into account SiteID when filtering on
Group permissions via Permafrost Roles.
'''
pass
class PermafrostAllowAllUsersRemoteUserBackend(GroupSiteModelBackendMixin, AllowAllUsersRemoteUserBackend):
'''
Permafrost AllowAllUsersRemoteUserBackend that takes into account SiteID when filtering on
Group permissions via Permafrost Roles.
'''
pass
| StarcoderdataPython |
1873662 | <reponame>wbhuber/local_swift_branch
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import time
from unittest import main
from uuid import uuid4
from swiftclient import client
from swift.common import direct_client
from swift.obj.diskfile import get_data_dir
from swift.common.exceptions import ClientException
from test.probe.common import kill_server, ReplProbeTest, start_server
from swift.common.utils import readconf
from swift.common.manager import Manager
class TestEmptyDevice(ReplProbeTest):
def _get_objects_dir(self, onode):
device = onode['device']
node_id = (onode['port'] - 6000) / 10
obj_server_conf = readconf(self.configs['object-server'][node_id])
devices = obj_server_conf['app:object-server']['devices']
obj_dir = '%s/%s' % (devices, device)
return obj_dir
def test_main(self):
# Create container
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes[0]
obj = 'object-%s' % uuid4()
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
# Kill one container/obj primary server
kill_server(onode['port'], self.port2server, self.pids)
# Delete the default data directory for objects on the primary server
obj_dir = '%s/%s' % (self._get_objects_dir(onode),
get_data_dir(self.policy))
shutil.rmtree(obj_dir, True)
self.assertFalse(os.path.exists(obj_dir))
# Create container/obj (goes to two primary servers and one handoff)
client.put_object(self.url, self.token, container, obj, 'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Kill other two container/obj primary servers
# to ensure GET handoff works
for node in onodes[1:]:
kill_server(node['port'], self.port2server, self.pids)
# Indirectly through proxy assert we can get container/obj
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Restart those other two container/obj primary servers
for node in onodes[1:]:
start_server(node['port'], self.port2server, self.pids)
self.assertFalse(os.path.exists(obj_dir))
# We've indirectly verified the handoff node has the object, but
# let's directly verify it.
# Directly to handoff server assert we can get container/obj
another_onode = self.object_ring.get_more_nodes(opart).next()
odata = direct_client.direct_get_object(
another_onode, opart, self.account, container, obj,
headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
if odata != 'VERIFY':
raise Exception('Direct object GET did not return VERIFY, instead '
'it returned: %s' % repr(odata))
# Assert container listing (via proxy and directly) has container/obj
objs = [o['name'] for o in
client.get_container(self.url, self.token, container)[1]]
if obj not in objs:
raise Exception('Container listing did not know about object')
timeout = time.time() + 5
found_objs_on_cnode = []
while time.time() < timeout:
for cnode in [c for c in cnodes if cnodes not in
found_objs_on_cnode]:
objs = [o['name'] for o in
direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
if obj in objs:
found_objs_on_cnode.append(cnode)
if len(found_objs_on_cnode) >= len(cnodes):
break
time.sleep(0.3)
if len(found_objs_on_cnode) < len(cnodes):
missing = ['%s:%s' % (cnode['ip'], cnode['port']) for cnode in
cnodes if cnode not in found_objs_on_cnode]
raise Exception('Container servers %r did not know about object' %
missing)
# Bring the first container/obj primary server back up
start_server(onode['port'], self.port2server, self.pids)
# Assert that it doesn't have container/obj yet
self.assertFalse(os.path.exists(obj_dir))
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEquals(err.http_status, 404)
self.assertFalse(os.path.exists(obj_dir))
else:
self.fail("Expected ClientException but didn't get it")
try:
port_num = onode['replication_port']
except KeyError:
port_num = onode['port']
try:
another_port_num = another_onode['replication_port']
except KeyError:
another_port_num = another_onode['port']
# Run object replication for first container/obj primary server
num = (port_num - 6000) / 10
Manager(['object-replicator']).once(number=num)
# Run object replication for handoff node
another_num = (another_port_num - 6000) / 10
Manager(['object-replicator']).once(number=another_num)
# Assert the first container/obj primary server now has container/obj
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
if odata != 'VERIFY':
raise Exception('Direct object GET did not return VERIFY, instead '
'it returned: %s' % repr(odata))
# Assert the handoff server no longer has container/obj
try:
direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEquals(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
if __name__ == '__main__':
main()
| StarcoderdataPython |
66212 | <gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UserSubscribeWindow.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(764, 567)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton_subscribe = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_subscribe.setGeometry(QtCore.QRect(490, 380, 112, 32))
self.pushButton_subscribe.setFocusPolicy(QtCore.Qt.StrongFocus)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icon/subscribe.png"), QtGui.QIcon.Disabled, QtGui.QIcon.Off)
self.pushButton_subscribe.setIcon(icon)
self.pushButton_subscribe.setObjectName("pushButton_subscribe")
self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(20, 20, 401, 511))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.tableWidget_subscribe = QtWidgets.QTableWidget(self.gridLayoutWidget)
self.tableWidget_subscribe.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget_subscribe.setObjectName("tableWidget_subscribe")
self.tableWidget_subscribe.setColumnCount(8)
self.tableWidget_subscribe.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_subscribe.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_subscribe.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_subscribe.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_subscribe.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_subscribe.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_subscribe.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_subscribe.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_subscribe.setHorizontalHeaderItem(7, item)
self.gridLayout.addWidget(self.tableWidget_subscribe, 1, 0, 1, 1)
self.tableWidget_newspaper = QtWidgets.QTableWidget(self.gridLayoutWidget)
self.tableWidget_newspaper.setInputMethodHints(QtCore.Qt.ImhNone)
self.tableWidget_newspaper.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget_newspaper.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableWidget_newspaper.setObjectName("tableWidget_newspaper")
self.tableWidget_newspaper.setColumnCount(4)
self.tableWidget_newspaper.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_newspaper.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_newspaper.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_newspaper.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_newspaper.setHorizontalHeaderItem(3, item)
self.gridLayout.addWidget(self.tableWidget_newspaper, 3, 0, 1, 1)
self.label = QtWidgets.QLabel(self.gridLayoutWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(500, 70, 241, 171))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.comboBox_newsid = QtWidgets.QComboBox(self.layoutWidget)
self.comboBox_newsid.setObjectName("comboBox_newsid")
self.comboBox_newsid.addItem("")
self.verticalLayout.addWidget(self.comboBox_newsid)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.spinBox_count = QtWidgets.QSpinBox(self.layoutWidget)
self.spinBox_count.setObjectName("spinBox_count")
self.horizontalLayout_2.addWidget(self.spinBox_count)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.comboBox_addr = QtWidgets.QComboBox(self.layoutWidget)
self.comboBox_addr.setFocusPolicy(QtCore.Qt.StrongFocus)
self.comboBox_addr.setObjectName("comboBox_addr")
self.comboBox_addr.addItem("")
self.verticalLayout.addWidget(self.comboBox_addr)
self.pushButton_fresh = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_fresh.setGeometry(QtCore.QRect(630, 380, 112, 32))
self.pushButton_fresh.setFocusPolicy(QtCore.Qt.WheelFocus)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icon/fresh.png"), QtGui.QIcon.Disabled, QtGui.QIcon.Off)
self.pushButton_fresh.setIcon(icon1)
self.pushButton_fresh.setObjectName("pushButton_fresh")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 764, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.tableWidget_subscribe, self.tableWidget_newspaper)
MainWindow.setTabOrder(self.tableWidget_newspaper, self.comboBox_newsid)
MainWindow.setTabOrder(self.comboBox_newsid, self.spinBox_count)
MainWindow.setTabOrder(self.spinBox_count, self.comboBox_addr)
MainWindow.setTabOrder(self.comboBox_addr, self.pushButton_subscribe)
MainWindow.setTabOrder(self.pushButton_subscribe, self.pushButton_fresh)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton_subscribe.setText(_translate("MainWindow", "订阅"))
self.pushButton_subscribe.setShortcut(_translate("MainWindow", "Return"))
item = self.tableWidget_subscribe.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "name"))
item = self.tableWidget_subscribe.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "newspaper"))
item = self.tableWidget_subscribe.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "price"))
item = self.tableWidget_subscribe.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "pay"))
item = self.tableWidget_subscribe.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "unpay"))
item = self.tableWidget_subscribe.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "phone"))
item = self.tableWidget_subscribe.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "addr"))
item = self.tableWidget_subscribe.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "subscribe_time"))
item = self.tableWidget_newspaper.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "newsid"))
item = self.tableWidget_newspaper.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "newsname"))
item = self.tableWidget_newspaper.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "price"))
item = self.tableWidget_newspaper.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "style"))
self.label.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\">订阅列表</p></body></html>"))
self.label_2.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\">报刊列表</p></body></html>"))
self.comboBox_newsid.setItemText(0, _translate("MainWindow", "--请选择需要订阅的报刊--"))
self.label_3.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\">选择每期订阅数量</p></body></html>"))
self.comboBox_addr.setItemText(0, _translate("MainWindow", "--请选择订阅地址--"))
self.pushButton_fresh.setText(_translate("MainWindow", "刷新"))
self.pushButton_fresh.setShortcut(_translate("MainWindow", "Ctrl+R"))
import resource_rc
| StarcoderdataPython |
1891367 | <gh_stars>0
#
# Plasma
# Copyright (c) 2020 Homedeck, LLC.
#
from torch import cat, clamp, tensor, Tensor
from ..conversion import rgb_to_yuv, yuv_to_rgb
def contrast (input: Tensor, weight: Tensor) -> Tensor:
"""
Apply contrast adjustment to an image.
Parameters:
input (Tensor): Input RGB image with shape (N,3,H,W) in range [-1., 1.].
weight (Tensor | float): Scalar weight with shape (N,1) in range [-1., 1.].
Returns:
Tensor: Filtered image with shape (N,3,H,W) in range [-1., 1.].
"""
_, channels, width, height = input.shape
result = input.flatten(start_dim=1) * (weight + 1.)
result = result.view(-1, channels, width, height).clamp(min=-1., max=1.)
return result
def saturation (input: Tensor, weight: Tensor) -> Tensor:
"""
Apply saturation adjustment to an image.
Parameters:
input (Tensor): RGB image with shape (N,3,H,W) in range [-1., 1.].
weight (Tensor | float): Scalar weight with shape (N,1) in range [-1., 1.].
Returns:
Tensor: Filtered image with shape (N,3,H,W) in range [-1., 1.].
"""
_, _, height, width = input.shape
yuv = rgb_to_yuv(input)
y, u, v = yuv.split(1, dim=1)
u = u.flatten(start_dim=1) * (weight + 1.)
v = v.flatten(start_dim=1) * (weight + 1.)
u = u.view(-1, 1, height, width)
v = v.view(-1, 1, height, width)
y = y.expand_as(u)
yuv = cat([y, u, v], dim=1)
result = yuv_to_rgb(yuv)
return result
def temperature (input: Tensor, weight: Tensor) -> Tensor:
"""
Apply temperature adjustment to an image.
Parameters:
input (Tensor): Input RGB image with shape (N,3,H,W) in range [-1., 1.].
weight (Tensor | float): Scalar weight with shape (N,1) in range [-1., 1.].
Returns:
Tensor: Filtered image with shape (N,3,H,W) in range [-1., 1.].
"""
_, _, height, width = input.shape
yuv = rgb_to_yuv(input)
y, u, v = yuv.split(1, dim=1)
u = u.flatten(start_dim=1) - 0.1 * weight
v = v.flatten(start_dim=1) + 0.1 * weight
u = u.view(-1, 1, height, width)
v = v.view(-1, 1, height, width)
y = y.expand_as(u)
yuv = cat([y, u, v], dim=1)
result = yuv_to_rgb(yuv)
return result
def tint (input: Tensor, weight: Tensor) -> Tensor:
"""
Apply tint adjustment to an image.
Parameters:
input (Tensor): Input RGB image with shape (N,3,H,W) in range [-1., 1.].
weight (Tensor | float): Scalar weight with shape (N,1) in range [-1., 1.].
Returns:
Tensor: Filtered image with shape (N,3,H,W) in range [-1., 1.].
"""
_, _, height, width = input.shape
yuv = rgb_to_yuv(input)
y, u, v = yuv.split(1, dim=1)
u = u.flatten(start_dim=1) + 0.1 * weight
v = v.flatten(start_dim=1) + 0.1 * weight
u = u.view(-1, 1, height, width)
v = v.view(-1, 1, height, width)
y = y.expand_as(u)
yuv = cat([y, u, v], dim=1)
result = yuv_to_rgb(yuv)
return result | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.