id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3530334 | __all__ = ["crc"]
from . import crc
| StarcoderdataPython |
11231545 | import re
import sys
import toml
from collections import namedtuple
# TODO handle [[patch.unused]]
Lock = namedtuple('Lock', ['packages', 'checksums'])
Package = namedtuple('Package', ['name', 'version', 'source'])
Source = namedtuple('Source', ['type', 'value'])
Entry = namedtuple('Entry', ['package', 'dependencies', 'checksum'])
Meta = namedtuple('Meta', ['type', 'value'])
Checksum = namedtuple('Checksum', ['package', 'sha256'])
package_re = re.compile(r'(?P<name>[^ ]+)( (?P<version>[^ ]+)( \((?P<raw_source>[^ ]+)\))?)?')
source_re = re.compile(r'(?P<type>[^+]+)\+(?P<value>.+)')
git_re = re.compile(r'(?P<url>[^?]+)(\?(?P<param_key>rev|branch|tag)=(?P<param_value>[^#]+))?#(?P<actual_rev>[0-9a-f]{40})')
allowed_entry_keys = frozenset(('name', 'version', 'source', 'dependencies', 'checksum'))
def parse_entry(d):
keys = frozenset(d.keys())
if not keys.issubset(allowed_entry_keys):
print(keys, file=sys.stderr)
assert keys.issubset(allowed_entry_keys)
source = None if 'source' not in keys else parse_source(d['source'])
package = Package(d['name'], d['version'], source)
dependencies = set()
if 'dependencies' in keys:
for raw_package in d['dependencies']:
dependencies.add(parse_package(raw_package))
checksum = d.get('checksum')
return Entry(package, dependencies, checksum)
def parse_package(s):
m = package_re.fullmatch(s)
if m is None:
print('bad', s, file=sys.stderr)
assert m is not None
raw_source = m['raw_source']
source = None if raw_source is None else parse_source(raw_source)
return Package(m['name'], m['version'], source)
def parse_source(s):
m = source_re.fullmatch(s)
assert m is not None
return Source(**m.groupdict())
def parse_meta(k, v):
type_, value = k.split(' ', 1)
assert type_ == 'checksum'
package = parse_package(value)
return Meta('checksum', Checksum(package, v))
def parse_lock(d):
packages = dict()
checksums = dict()
for raw_entry in d['package']:
entry = parse_entry(raw_entry)
packages[entry.package] = entry.dependencies
if entry.checksum is not None:
checksums[entry.package] = entry.checksum
if 'metadata' in d:
for raw_meta in d['metadata'].items():
meta = parse_meta(*raw_meta)
assert meta.type == 'checksum'
checksum = meta.value
checksums[checksum.package] = checksum.sha256
return Lock(packages, checksums)
def emit_package(package):
if package.source is not None:
if package.source.type == 'registry':
assert package.source.value == 'https://github.com/rust-lang/crates.io-index'
yield ' "{}-{}" = fetchCratesIOCrate {{'.format(package.name, package.version)
yield ' name = "{}";'.format(package.name)
yield ' version = "{}";'.format(package.version)
yield ' sha256 = "{}";'.format(lock.checksums[package])
yield ' };'
elif package.source.type == 'git':
m = git_re.fullmatch(package.source.value)
assert m is not None
yield ' "{}-{}#{}" = fetchGitCrate {{'.format(package.name, package.version, m['actual_rev']) # HACK
yield ' name = "{}";'.format(package.name)
yield ' version = "{}";'.format(package.version)
yield ' url = "{}";'.format(m['url'])
yield ' rev = "{}";'.format(m['actual_rev'])
yield ' param ='
if m['param_key'] is not None:
assert m['param_value'] is not None
yield ' {{ key = "{}"; value = "{}"; }}'.format(m['param_key'], m['param_value'])
else:
yield ' null'
yield ' ;'
yield ' };'
else:
assert False
def gen_nix(lock):
yield '{ fetchCratesIOCrate, fetchGitCrate }:'
yield ''
yield '{'
yield ' source = {'
for package in lock.packages:
yield from emit_package(package)
yield ' };'
yield ''
yield ' graph = {'
for package, deps in lock.packages.items():
yield ' "{}-{}" = {{'.format(package.name, package.version)
for dep in deps:
yield ' "{}-{}" = null;'.format(dep.name, dep.version)
yield ' };'
yield ' };'
yield '}'
# with open('Cargo.lock') as f:
# raw_lock = toml.load(f)
raw_lock = toml.load(sys.stdin)
lock = parse_lock(raw_lock)
for line in gen_nix(lock):
print(line)
| StarcoderdataPython |
1627778 | import prefect
from prefect import task, Flow
@task
def hello_task():
logger = prefect.context.get("logger")
logger.info("Hello world!")
with Flow("hello-flow") as flow:
hello_task()
flow.run()
| StarcoderdataPython |
11248551 | <filename>melodic/lib/python2.7/dist-packages/rqt_pose_view/pose_view_widget.py
# Copyright (c) 2011, <NAME>, TU Darmstadt
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the TU Darmstadt nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
import os
import rospkg
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Qt, QTimer, qWarning, Slot
from python_qt_binding.QtWidgets import QAction, QMenu, QWidget
import rospy
from rostopic import get_topic_class
from rqt_py_common.topic_helpers import find_slots_by_type_bfs
from tf.transformations import quaternion_matrix, quaternion_about_axis
from geometry_msgs.msg import Quaternion, Pose, Point
from OpenGL.GL import glBegin, glColor3f, glEnd, glLineWidth, glMultMatrixf, glTranslatef, \
glVertex3f, GL_LINES, GL_QUADS
from .gl_widget import GLWidget
# main class inherits from the ui window class
class PoseViewWidget(QWidget):
def __init__(self, plugin):
super(PoseViewWidget, self).__init__()
rp = rospkg.RosPack()
ui_file = os.path.join(rp.get_path('rqt_pose_view'), 'resource', 'PoseViewWidget.ui')
loadUi(ui_file, self)
self._plugin = plugin
self._position = (2.0, 2.0, 2.0)
self._orientation = quaternion_about_axis(0.0, (1.0, 0.0, 0.0))
self._topic_name = None
self._subscriber = None
# create GL view
self._gl_view = GLWidget()
self._gl_view.setAcceptDrops(True)
# backup and replace original paint method
self._gl_view.paintGL_original = self._gl_view.paintGL
self._gl_view.paintGL = self._gl_view_paintGL
# backup and replace original mouse release method
self._gl_view.mouseReleaseEvent_original = self._gl_view.mouseReleaseEvent
self._gl_view.mouseReleaseEvent = self._gl_view_mouseReleaseEvent
# add GL view to widget layout
self.layout().addWidget(self._gl_view)
# init and start update timer with 40ms (25fps)
self._update_timer = QTimer(self)
self._update_timer.timeout.connect(self.update_timeout)
self._update_timer.start(40)
def save_settings(self, plugin_settings, instance_settings):
view_matrix_string = repr(self._gl_view.get_view_matrix())
instance_settings.set_value('view_matrix', view_matrix_string)
def restore_settings(self, plugin_settings, instance_settings):
view_matrix_string = instance_settings.value('view_matrix')
try:
view_matrix = eval(view_matrix_string)
except Exception:
view_matrix = None
if view_matrix is not None:
self._gl_view.set_view_matrix(view_matrix)
else:
self._set_default_view()
def _set_default_view(self):
self._gl_view.makeCurrent()
self._gl_view.reset_view()
self._gl_view.rotate((0, 0, 1), 45)
self._gl_view.rotate((1, 0, 0), -65)
self._gl_view.translate((0, -3, -15))
def update_timeout(self):
self._gl_view.makeCurrent()
self._gl_view.updateGL()
def _gl_view_paintGL(self):
self._gl_view.paintGL_original()
self._paintGLGrid()
self._paintGLCoorsystem()
self._paintGLBox()
def _paintGLBox(self):
# FIXME: add user configurable setting to allow use of translation as well
self._position = (2.0, 2.0, 2.0) # Set fixed translation for now
glTranslatef(*self._position) # Translate Box
matrix = quaternion_matrix(self._orientation) # convert quaternion to translation matrix
# tf uses row-major while gl expects column-major
matrix = matrix.transpose()
glMultMatrixf(matrix) # Rotate Box
glBegin(GL_QUADS) # Start Drawing The Box
glColor3f(0.0, 1.0, 0.0)
glVertex3f(1.0, 1.0, -1.0) # Top Right Of The Quad (Top)
glVertex3f(-1.0, 1.0, -1.0) # Top Left Of The Quad (Top)
glVertex3f(-1.0, 1.0, 1.0) # Bottom Left Of The Quad (Top)
glVertex3f(1.0, 1.0, 1.0) # Bottom Right Of The Quad (Top)
glColor3f(0.5, 1.0, 0.5)
glVertex3f(1.0, -1.0, 1.0) # Top Right Of The Quad (Bottom)
glVertex3f(-1.0, -1.0, 1.0) # Top Left Of The Quad (Bottom)
glVertex3f(-1.0, -1.0, -1.0) # Bottom Left Of The Quad (Bottom)
glVertex3f(1.0, -1.0, -1.0) # Bottom Right Of The Quad (Bottom)
glColor3f(0.0, 0.0, 1.0)
glVertex3f(1.0, 1.0, 1.0) # Top Right Of The Quad (Front)
glVertex3f(-1.0, 1.0, 1.0) # Top Left Of The Quad (Front)
glVertex3f(-1.0, -1.0, 1.0) # Bottom Left Of The Quad (Front)
glVertex3f(1.0, -1.0, 1.0) # Bottom Right Of The Quad (Front)
glColor3f(0.5, 0.5, 1.0)
glVertex3f(1.0, -1.0, -1.0) # Bottom Left Of The Quad (Back)
glVertex3f(-1.0, -1.0, -1.0) # Bottom Right Of The Quad (Back)
glVertex3f(-1.0, 1.0, -1.0) # Top Right Of The Quad (Back)
glVertex3f(1.0, 1.0, -1.0) # Top Left Of The Quad (Back)
glColor3f(1.0, 0.5, 0.5)
glVertex3f(-1.0, 1.0, 1.0) # Top Right Of The Quad (Left)
glVertex3f(-1.0, 1.0, -1.0) # Top Left Of The Quad (Left)
glVertex3f(-1.0, -1.0, -1.0) # Bottom Left Of The Quad (Left)
glVertex3f(-1.0, -1.0, 1.0) # Bottom Right Of The Quad (Left)
glColor3f(1.0, 0.0, 0.0)
glVertex3f(1.0, 1.0, -1.0) # Top Right Of The Quad (Right)
glVertex3f(1.0, 1.0, 1.0) # Top Left Of The Quad (Right)
glVertex3f(1.0, -1.0, 1.0) # Bottom Left Of The Quad (Right)
glVertex3f(1.0, -1.0, -1.0) # Bottom Right Of The Quad (Right)
glEnd() # Done Drawing The Quad
def _paintGLGrid(self):
resolution_millimeters = 1
gridded_area_size = 100
glLineWidth(1.0)
glBegin(GL_LINES)
glColor3f(1.0, 1.0, 1.0)
glVertex3f(gridded_area_size, 0, 0)
glVertex3f(-gridded_area_size, 0, 0)
glVertex3f(0, gridded_area_size, 0)
glVertex3f(0, -gridded_area_size, 0)
num_of_lines = int(gridded_area_size / resolution_millimeters)
for i in range(num_of_lines):
glVertex3f(resolution_millimeters * i, -gridded_area_size, 0)
glVertex3f(resolution_millimeters * i, gridded_area_size, 0)
glVertex3f(gridded_area_size, resolution_millimeters * i, 0)
glVertex3f(-gridded_area_size, resolution_millimeters * i, 0)
glVertex3f(resolution_millimeters * (-i), -gridded_area_size, 0)
glVertex3f(resolution_millimeters * (-i), gridded_area_size, 0)
glVertex3f(gridded_area_size, resolution_millimeters * (-i), 0)
glVertex3f(-gridded_area_size, resolution_millimeters * (-i), 0)
glEnd()
def _paintGLCoorsystem(self):
glLineWidth(10.0)
glBegin(GL_LINES)
glColor3f(1.0, 0.0, 0.0)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(1.0, 0.0, 0.0)
glColor3f(0.0, 1.0, 0.0)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(0.0, 1.0, 0.0)
glColor3f(0.0, 0.0, 1.0)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(0.0, 0.0, 1.0)
glEnd()
def _gl_view_mouseReleaseEvent(self, event):
if event.button() == Qt.RightButton:
menu = QMenu(self._gl_view)
action = QAction(self._gl_view.tr("Reset view"), self._gl_view)
menu.addAction(action)
action.triggered.connect(self._set_default_view)
menu.exec_(self._gl_view.mapToGlobal(event.pos()))
@Slot('QDragEnterEvent*')
def dragEnterEvent(self, event):
if event.mimeData().hasText():
topic_name = str(event.mimeData().text())
if len(topic_name) == 0:
qWarning('PoseViewWidget.dragEnterEvent(): event.mimeData() text is empty')
return
else:
if not hasattr(event.source(), 'selectedItems') or len(event.source().selectedItems()) == 0:
qWarning('PoseViewWidget.dragEnterEvent(): event.source() has no attribute selectedItems or length of selectedItems is 0')
return
item = event.source().selectedItems()[0]
topic_name = item.data(0, Qt.UserRole)
if topic_name is None:
qWarning('PoseViewWidget.dragEnterEvent(): selectedItem has no UserRole data with a topic name')
return
# check for valid topic
msg_class, self._topic_name, _ = get_topic_class(topic_name)
if msg_class is None:
qWarning('PoseViewWidget.dragEnterEvent(): No message class was found for topic "%s".' % topic_name)
return
# check for valid message class
quaternion_slot_path, point_slot_path = self._get_slot_paths(msg_class)
if quaternion_slot_path is None and point_slot_path is None:
qWarning('PoseViewWidget.dragEnterEvent(): No Pose, Quaternion or Point data was found outside of arrays in "%s" on topic "%s".'
% (msg_class._type, topic_name))
return
event.acceptProposedAction()
@Slot('QDropEvent*')
def dropEvent(self, event):
if event.mimeData().hasText():
topic_name = str(event.mimeData().text())
else:
dropped_item = event.source().selectedItems()[0]
topic_name = str(dropped_item.data(0, Qt.UserRole))
self._unregister_topic()
self._subscribe_topic(topic_name)
def _unregister_topic(self):
if self._subscriber:
self._subscriber.unregister()
@staticmethod
def _make_path_list_from_path_string(path):
path = path.split('/')
if path == ['']:
return []
return path
@staticmethod
def _get_slot_paths(msg_class):
# find first Pose in msg_class
pose_slot_paths = find_slots_by_type_bfs(msg_class, Pose)
for path in pose_slot_paths:
# make sure the path does not contain an array, because we don't want to deal with empty arrays...
if '[' not in path:
path = PoseViewWidget._make_path_list_from_path_string(pose_slot_paths[0])
return path + ['orientation'], path + ['position']
# if no Pose is found, find first Quaternion and Point
quaternion_slot_paths = find_slots_by_type_bfs(msg_class, Quaternion)
for path in quaternion_slot_paths:
if '[' not in path:
quaternion_slot_path = PoseViewWidget._make_path_list_from_path_string(path)
break
else:
quaternion_slot_path = None
point_slot_paths = find_slots_by_type_bfs(msg_class, Point)
for path in point_slot_paths:
if '[' not in path:
point_slot_path = PoseViewWidget._make_path_list_from_path_string(path)
break
else:
point_slot_path = None
return quaternion_slot_path, point_slot_path
def _subscribe_topic(self, topic_name):
msg_class, self._topic_name, _ = get_topic_class(topic_name)
quaternion_slot_path, point_slot_path = self._get_slot_paths(msg_class)
self._subscriber = rospy.Subscriber(
self._topic_name,
msg_class,
self.message_callback,
callback_args=(quaternion_slot_path, point_slot_path)
)
def message_callback(self, message, slot_paths):
quaternion_slot_path = slot_paths[0]
point_slot_path = slot_paths[1]
if quaternion_slot_path is None:
self._orientation = quaternion_about_axis(0.0, (1.0, 0.0, 0.0))
else:
orientation = message
for slot_name in quaternion_slot_path:
orientation = getattr(orientation, slot_name)
self._orientation = (orientation.x, orientation.y, orientation.z, orientation.w)
if point_slot_path is None:
# if no point is given, set it to a fixed offset so the axes can be seen
self._position = (2.0, 2.0, 2.0)
else:
position = message
for slot_name in point_slot_path:
position = getattr(position, slot_name)
self._position = (position.x, position.y, position.z)
def shutdown_plugin(self):
self._unregister_topic()
| StarcoderdataPython |
11367907 | <gh_stars>0
from flask import Blueprint, g, request
from flask import current_app as app
import kubecortex_backend.helpers.prometheus_helper as prometheus_helper
import json
import os
main = Blueprint('main', __name__)
prometheus_host = os.environ['PROMETHEUS_HOST']
@main.route('/pods')
def pods():
try:
pod_list = prometheus_helper.get_pod_list(prometheus_host)
sort_by = request.args.get('sort_by', default='az')
filter_key = request.args.get('filter_key')
filter_value = request.args.get('filter_value')
namespace_blacklist = request.args.get('namespace_blacklist')
sorted_pod_list = sorted(pod_list, key=lambda k: k[sort_by])
if filter_key != None and filter_value != None:
final_pod_list = [pod for pod in sorted_pod_list if pod[filter_key] != filter_value]
else:
final_pod_list = sorted_pod_list
if namespace_blacklist:
final_pod_list = [pod for pod in final_pod_list if not pod['namespace'] in namespace_blacklist]
return json.dumps(final_pod_list)
except Exception as ex:
print(ex)
return str(ex), 500
<EMAIL>('/metrics')
#def metrics():
# pod_name = request.args.get('podname')
# rs = client.query('SELECT cpu_usage_nanocores / 1000000 FROM kubernetes_pod_container WHERE pod_name = \'{0}\' AND time > now() - 1h GROUP BY pod_name'.format(pod_name))
# cpu_points = list(rs.get_points())
# rs = client.query('SELECT memory_usage_bytes FROM kubernetes_pod_container WHERE pod_name = \'{0}\' AND time > now() - 1h GROUP BY pod_name'.format(pod_name))
# memory_points = list(rs.get_points())
# return json.dumps({
# "cpu": cpu_points,
# "memory": memory_points
# })
@main.after_request
def apply_cors_header(response):
response.headers["Access-Control-Allow-Origin"] = "*"
return response
| StarcoderdataPython |
351960 | <filename>app/schema/answers/percentage_answer.py
from app.schema.answer import Answer
from app.schema.widgets.percentage_widget import PercentageWidget
from app.validation.percentage_type_check import PercentageTypeCheck
class PercentageAnswer(Answer):
def __init__(self, answer_id=None):
super().__init__(answer_id)
self.widget = PercentageWidget(self.id)
self.type_checkers.append(PercentageTypeCheck())
@staticmethod
def _cast_user_input(user_input):
return int(user_input)
| StarcoderdataPython |
4920574 | # @author: snxq
import datetime
import os
import os.path
import shutil
import sys
import time
"""
照片存档
参数:源文件目录,存档目录
"""
def existsOrCreate(path):
if not os.path.exists(path):
os.makedirs(path)
def archive(src_path, save_path):
count = 0
for dirpath, dirnames, filenames in os.walk(src_path):
for filename in filenames:
count += 1
src_file = os.path.join(dirpath, filename)
last_modified = os.path.getmtime(src_file)
last_modified_datetime = datetime.datetime.fromtimestamp(last_modified)
dst_filepath = os.path.join(
save_path, last_modified_datetime.strftime('%Y%m%d')
)
dst_file = os.path.join(dst_filepath, filename)
existsOrCreate(dst_filepath)
if not os.path.exists(dst_file):
shutil.copyfile(src_file, dst_file)
print(f'{count}\t{filename} copy completed.')
else:
print(f'{count}\t{filename} already exists.')
if __name__ == "__main__":
try:
src_path = sys.argv[1]
save_path = sys.argv[2]
except:
src_path = '~/100D3400'
save_path = '~/Archiving'
archive(src_path, save_path)
| StarcoderdataPython |
6685252 | # -*- coding: utf-8 -*-
import os
import json
from zipfile import ZipFile
from gluon.contrib.markdown import markdown2
import libConCoct.concoct
@auth.requires_login()
def view():
"""
Shows all tasks if no argument is given or details of a specific task.
/task/view/[task id] -> view detailed information about a specific task
"""
if request.args:
# check if argument is valid integer number
try:
task_to_be_shown = int(request.args[0])
except ValueError:
raise HTTP(404, T('Invalid argument given.'))
# check whether argument is a valid task id
row = db(db.Tasks.id == task_to_be_shown)
if not row:
raise HTTP(404, T('Invalid task id given.'))
task_data_path = row.select().first()['DataPath']
task_description_path = os.path.join(task_data_path, 'description.md')
with open(task_description_path, 'r') as task_description:
description = XML(markdown2.markdown(task_description.read()))
back_button = A(T('Back'), _href=URL(f='list'), _class='btn btn-primary', _id='back_button')
submit_entry_button = A(T('Submit entry'), _href=URL(c='entry',f='add', args=(task_to_be_shown)),
_class='btn btn-primary', _id='submit_entry_button')
open_empty_file_button = A(T('Open empty file'), _href=URL(c='default', f='codeeditor', args=(task_to_be_shown,)),
_class='btn btn-primary', _id='open_new_button')
open_empty_project_button = A(T('Open empty CodeBlocks project'), _href=URL(c='task', f='download_project', args=(task_to_be_shown,)),
_class='btn btn-primary', _id='open_project_button-{}'.format(task_to_be_shown))
statistics = []
statistics.append(DIV(A(T('Submitted entries: '), SPAN('{}'.format(count_entries(task_to_be_shown)), _class='badge'),
_id='submitted_entries_badge', _href=URL(c='entry',f='list', args=(task_to_be_shown))), _class='btn btn-primary'))
statistics.append(DIV(T('Executed builds: '), SPAN('{}'.format(count_executed_builds(task_to_be_shown)), _class='badge'), _class='btn btn-primary'))
statistics.append(DIV(T('Successful builds: '), SPAN('{}'.format(count_successful_builds(task_to_be_shown)), _class='badge'), _class='btn btn-primary'))
statistics = DIV(*statistics)
return dict(description=description, back_button=back_button, statistics=statistics,
submit_entry_button=submit_entry_button, open_empty_file_button=open_empty_file_button,
open_empty_project_button=open_empty_project_button, task_name=row.select().first()['Name'])
else:
raise HTTP(404, T('No task number given.'))
@auth.requires_login()
def list():
"""
Lists all tasks with their description embedded and buttons to show the
details, open the source file, create a CodeBlocks project or to upload a
source file as solution.
/task/list -> lists all tasks
"""
task_links_list = []
script_parts_list = ''
for task in db(db.Tasks.id > 0).select():
# build panel header for each task including the button group displayed
# on the right side
current_description_id = 'description-{}'.format(task.id)
current_title_id = 'tasktitle-{}'.format(task.id)
current_title_text = H3(T('Task: {}').format(task.Name), _class='panel-title pull-left')
view_current_task_button = A(T('View task'), _href=URL(c='task', f='view', args=(task.id,)),
_class='btn btn-primary', _id='view_button-{}'.format(task.id))
upload_entry_for_task_button = A(T('Submit entry'), _href=URL(c='entry', f='add', args=(task.id,)),
_class='btn btn-primary', _id='submit_button-{}'.format(task.id))
open_empty_file_button = A(T('Open empty file'), _href=URL(c='default', f='codeeditor', args=(task.id,)),
_class='btn btn-primary', _id='open_button-{}'.format(task.id))
open_empty_project_button = A(T('Open empty CodeBlocks project'), _href=URL(c='task', f='download_project', args=(task.id,)),
_class='btn btn-primary', _id='open_project_button-{}'.format(task.id))
button_group = DIV(view_current_task_button, open_empty_project_button, open_empty_file_button, upload_entry_for_task_button, _class='btn-group pull-right')
task_link = DIV(DIV(current_title_text), DIV(button_group), _id=current_title_id, _class='panel-heading clearfix')
task_description_path = os.path.join(task.DataPath, 'description.md')
# build panel body containing task description
with open(task_description_path, 'r') as task_description:
task_description = DIV(XML(markdown2.markdown(task_description.read())), _id=current_description_id, _class='panel-body')
task_links_list.append(DIV(task_link, task_description, _class='panel panel-default'))
# deactivate task descriptions by default and toggle them by clicking
script_parts_list += '$("#{descID}").hide();'.format(descID=current_description_id)
script_parts_list += '$("#{titleID}").click(function(){{ $("#{descID}").slideToggle(); }});'.format(titleID=current_title_id, descID=current_description_id)
task_table = DIV(task_links_list, _id='task_table')
script = SCRIPT("""
function onclickTask(id) {{
alert(id);
//$("#upload_Task").empty();
//ajax('', ['Teacher'], ':eval');
}};
{moreScript}
""".format(moreScript=script_parts_list))
return dict(task_table=task_table, script=script)
def count_entries(task_id):
entries = db(db.Entries.Task == task_id).count()
return entries
def count_executed_builds(task_id):
builds = db(db.Builds.Task == task_id).count()
return builds
def count_successful_builds(task_id):
builds = db(db.Builds.Task == task_id).select(db.Builds.Report, distinct=True)
count_successful = 0
for build in builds:
build_successful = True
if build['Report']:
report = json.loads(build['Report'])
if 'cunit' in report and 'tests' in report['cunit']:
for suite in report['cunit']['tests']:
suite = report['cunit']['tests'][suite]
for test in suite:
if not suite[test]:
build_successful = False
else:
build_successful = False
if build_successful:
count_successful += 1
return count_successful
def validate_task_id(task_id):
"""
Validates a given task number. The given id can be directly taken from the
arguments of the request (e.g. request.args[0]). This function checks
whether the task id is really a integer and if it is inside the database.
Furthermore it checks if the current user is authorized to handle this task.
In case of errors, the response is an 404 error with a error message.
:param task_id: task id to be validated
:returns: task from database as Row object
"""
# check if argument is valid integer number
try:
task_as_int = int(request.args[0])
except ValueError:
raise HTTP(404, T('Invalid argument given.'))
# validate task number against database
task_from_db = db(db.Tasks.id == task_as_int).select().first()
if not task_from_db:
raise HTTP(404, T('Invalid task id given.'))
# TODO Check authorization of user for this task.
return task_from_db
@auth.requires_login()
def download_project():
if request.args:
task_from_db = validate_task_id(request.args[0])
data_path = task_from_db['DataPath']
# TODO Refactor the project creation to separate module.
t = libConCoct.concoct.Task(data_path)
with open(os.path.join(data_path, 'config.json'), 'r') as config_file:
task_config = json.load(config_file)
solution_file = os.path.join(data_path, 'src', task_config['files_student'][0])
s = libConCoct.concoct.Solution(t, (solution_file, ))
p = t.get_main_project(s)
current_date = datetime.datetime.now().strftime('%Y-%m-%d')
zip_file_name = '{}_{}.zip'.format(task_from_db['Name'], current_date)
project_zip_file = p.create_cb_project(file_name=os.path.join(request.folder, 'private', zip_file_name))
return response.stream(project_zip_file, chunk_size=2**14, attachment=True, filename=zip_file_name)
else:
raise HTTP(404, T('No task number given.'))
@auth.requires_membership('teacher')
def add():
"""
Adds a new task to the system. The task consists of a single ZIP file
containing a single directory with the tasks data.
Task data:
- description.md -> task description and information about functions that should be implemented
- config.json -> task configuration, e.g. information what libraries to link against
- src/main.c -> main program to run the functions that should be implemented
- src/tests.c -> unit tests to check if task was sucessfully completed
- src/solution.c -> code file that should be implemented
- src/solution.h -> header with prototypes of the functions that should be implemented
/task/add -> add a new task by uploading a ZIP file with all necessary files
"""
form = SQLFORM(db.Tasks)
# validate and process the form
if form.process().accepted:
name, data_path = store_task_archive(response)
if data_path:
response.flash = T('Task submitted!')
# store task directory path in database
#if form.vars.id:
new_task_entry = db(db.Tasks.id == form.vars.id).select().first()
new_task_entry.update_record(DataPath=data_path)
new_task_entry.update_record(Name=name)
else:
response.flash = T('Task could not be submitted!')
return locals()
def store_task_archive(response):
"""
Stores a task inside a given ZIP file on disk.
If the task archive contains more than one directory or if some of the
necessary files are missing, no files will be written to disk.
Returns an tuple with empty strings if task archive was not valid or
another error occured. Otherwise the name and path of the new task is
returned.
"""
task_name = ''
new_task_directory_path = ''
with ZipFile(request.vars.SubmittedTask.file) as task_archive:
# find common prefix of all paths in ZIP file
task_name = os.path.commonprefix([x.filename for x in task_archive.infolist()]).replace('/', '')
# a task archive can only contain a single task directory!!!
if not task_name:
return ('', '')
# check if task directory already exists (task name must be unique!!!!)
tasks_store_path = os.path.join(request.folder, 'private/tasks/')
new_task_directory_path = os.path.join(tasks_store_path, task_name)
if os.path.exists(new_task_directory_path):
# when task is already on the server, do not unzip again
# TODO: Handle replacing of a task with a newer version with the same name.
return ('', '')
os.mkdir(new_task_directory_path)
task_archive.extract(task_name + '/description.md', path=tasks_store_path)
task_archive.extract(task_name + '/config.json', path=tasks_store_path)
for filename in task_archive.namelist():
#source_store_path = os.path.join(tasks_store_path, 'src')
if filename.startswith(task_name + '/src/'):
task_archive.extract(filename, path=tasks_store_path)
# create directory to store submissions
os.mkdir(os.path.join(new_task_directory_path, 'submissions'))
return task_name, new_task_directory_path
| StarcoderdataPython |
3553434 | <filename>src/labels/github.py
import logging
from typing import Any, Dict, List, Optional, Tuple
import attr
import requests
from labels.exceptions import GitHubException
@attr.s(auto_attribs=True, frozen=True)
class Repository:
"""Represents a GitHub repository."""
owner: str
name: str
def not_read_only(attr: attr.Attribute, value: Any) -> bool:
"""Filter for attr that checks for a leading underscore."""
return not attr.name.startswith("_")
@attr.s(auto_attribs=True, frozen=True)
class Label:
"""Represents a GitHub issue label."""
color: str
name: str
description: str = ""
# Read-only attributes
_default: bool = False
_id: int = 0
_node_id: str = ""
_url: str = ""
@property
def params_dict(self) -> Dict[str, Any]:
"""Return label parameters as a dict."""
return attr.asdict(self, recurse=True, filter=not_read_only)
@property
def params_tuple(self) -> Tuple[Any, ...]:
"""Return label parameters as a tuple."""
return attr.astuple(self, recurse=True, filter=not_read_only)
class Client:
base_url: str
session: requests.Session
def __init__(
self, auth: requests.auth.AuthBase, base_url: str = "https://api.github.com"
) -> None:
self.base_url = base_url
self.session = requests.Session()
self.session.auth = auth
def list_labels(self, repo: Repository) -> List[Label]:
"""Return the list of Labels from the repository.
GitHub API docs:
https://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository
"""
logger = logging.getLogger("labels")
logger.debug(f"Requesting labels for {repo.owner}/{repo.name}")
headers = {"Accept": "application/vnd.github.symmetra-preview+json"}
response = self.session.get(
f"{self.base_url}/repos/{repo.owner}/{repo.name}/labels", headers=headers
)
if response.status_code != 200:
raise GitHubException(
f"Error retrieving labels: "
f"{response.status_code} - "
f"{response.reason}"
)
repo_labels: List[Dict] = response.json()
next_page: Optional[Dict] = response.links.get("next", None)
while next_page is not None:
logger.debug("Requesting next page of labels")
response = self.session.get(next_page["url"], headers=headers)
if response.status_code != 200:
raise GitHubException(
f"Error retrieving next page of labels: "
f"{response.status_code} - "
f"{response.reason}"
)
repo_labels.extend(response.json())
next_page = response.links.get("next", None)
return [Label(**label) for label in repo_labels]
def get_label(self, repo: Repository, *, name: str) -> Label:
"""Return a single Label from the repository.
GitHub API docs:
https://developer.github.com/v3/issues/labels/#get-a-single-label
"""
logger = logging.getLogger("labels")
logger.debug(f"Requesting label '{name}' for {repo.owner}/{repo.name}")
response = self.session.get(
f"{self.base_url}/repos/{repo.owner}/{repo.name}/labels/{name}",
headers={"Accept": "application/vnd.github.symmetra-preview+json"},
)
if response.status_code != 200:
raise GitHubException(
f"Error retrieving label {name}: "
f"{response.status_code} - "
f"{response.reason}"
)
return Label(**response.json())
def create_label(self, repo: Repository, *, label: Label) -> Label:
"""Create a new Label for the repository.
GitHub API docs:
https://developer.github.com/v3/issues/labels/#create-a-label
"""
logger = logging.getLogger("labels")
logger.debug(f"Creating label '{label.name}' for {repo.owner}/{repo.name}")
response = self.session.post(
f"{self.base_url}/repos/{repo.owner}/{repo.name}/labels",
headers={"Accept": "application/vnd.github.symmetra-preview+json"},
json=label.params_dict,
)
if response.status_code != 201:
raise GitHubException(
f"Error creating label {label.name}: "
f"{response.status_code} - "
f"{response.reason}"
)
return Label(**response.json())
def edit_label(self, repo: Repository, *, name: str, label: Label) -> Label:
"""Update a GitHub issue label.
GitHub API docs:
https://developer.github.com/v3/issues/labels/#update-a-label
"""
logger = logging.getLogger("labels")
logger.debug(f"Editing label '{name}' for {repo.owner}/{repo.name}")
response = self.session.patch(
f"{self.base_url}/repos/{repo.owner}/{repo.name}/labels/{name}",
headers={"Accept": "application/vnd.github.symmetra-preview+json"},
json=label.params_dict,
)
if response.status_code != 200:
raise GitHubException(
f"Error editing label {name}: "
f"{response.status_code} - "
f"{response.reason}"
)
return Label(**response.json())
def delete_label(self, repo: Repository, *, name: str) -> None:
"""Delete a GitHub issue label.
GitHub API docs:
https://developer.github.com/v3/issues/labels/#delete-a-label
"""
logger = logging.getLogger("labels")
logger.debug(f"Deleting label '{name}' for {repo.owner}/{repo.name}")
response = self.session.delete(
f"{self.base_url}/repos/{repo.owner}/{repo.name}/labels/{name}"
)
if response.status_code != 204:
raise GitHubException(
f"Error deleting label {name}: "
f"{response.status_code} - "
f"{response.reason}"
)
| StarcoderdataPython |
5173589 | # -*- coding: utf-8 -*-
def sum_pows(a: int, b: int, c: int, d: int) -> int:
"""
>>> sum_pows(9, 29, 7, 27)
4710194409608608369201743232
"""
return a**b + c**d
if __name__ == '__main__':
a, b, c, d = (int(input()) for _ in range(4))
print(sum_pows(a, b, c, d))
| StarcoderdataPython |
4955903 | import os
from dotenv import load_dotenv
from django import django, render_template, request, abort
from twilio.jwt.access_token import AccessToken
from twilio.jwt.access_token.grants import VideoGrant
load_dotenv()
twilio_account_sid = os.environ.get('TWILIO_ACCOUNT_SID')
twilio_api_key_sid = os.environ.get('TWILIO_API_KEY_SID')
twilio_api_key_secret = os.environ.get('TWILIO_API_KEY_SECRET')
app = django(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login', methods=['POST'])
def login():
username =
#username = request.get_json(force=True).get('username')
#if not username:
# abort(401)
token = AccessToken(twilio_account_sid, twilio_api_key_sid,
twilio_api_key_secret, identity=username)
token.add_grant(VideoGrant(room='My Room'))
return {'token': token.to_jwt().decode()}
if __name__ == '__main__':
app.run(host='0.0.0.0') | StarcoderdataPython |
3293867 | <reponame>stefanmerb/dash_webapp<filename>simple_webapp.py
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
app.layout = html.Div(
[
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row(dbc.Col(html.H1("Schnellrechner"), style={"color" : "blue"})),
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row(
[
dbc.Col(html.Div("Höhe"),
#style={"border" : "blue solid", "align-items" : "center"},
width = {"size" : 1, "offset" : 1}),
dbc.Col(dbc.Input(id="input1", placeholder="...", type="number",
style={"width" : "10%"})),
]
),
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row(
[
dbc.Col(html.Div("Breite"),
width = {"size" : 1, "offset" : 1}),
dbc.Col(dbc.Input(id="input2", placeholder="...", type="number",
style={"width" : "10%"})),
]
),
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row(
[
dbc.Col(html.Div("Länge"),
width={"size": 1, "offset": 1}),
dbc.Col(dbc.Input(id="input3", placeholder="...", type="number",
style={"width": "10%"})),
]
),
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row([
dbc.Col(html.Div(""),
#style={"border": "blue solid"},
width={"size": 1, "offset": 1}),
dbc.Button("Berechnen", id = "button1", color="primary", className="mr-1", n_clicks=0,
style={"width" : "10%"}),
]
),
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row(
[
dbc.Col(html.Div("Kostenwert"),
width={"size": 1, "offset": 1}),
dbc.Col(html.P(id="output",
style={"width": "10%"})),
]
),
]
)
global counter
counter = 0
@app.callback(Output("output", "children"),
[
Input("input1", "value"),
Input("input2", "value"),
Input("input3", "value"),
Input("button1", "n_clicks")
])
def output_text(val1, val2, val3, n_click):
global counter
if n_click > 0 and n_click > counter:
erg = (val1/val2) + 2.3*val3
n_click_new = n_click + 1
counter+=1
return erg
else:
return ""
if __name__ == "__main__":
app.run_server() | StarcoderdataPython |
192303 | from http import HTTPStatus
import pytest
import requests
from rotkehlchen.constants.assets import A_ETH, A_EUR, A_KRW, A_USD
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.api import (
api_url_for,
assert_error_response,
assert_proper_response,
assert_proper_response_with_result,
)
@pytest.mark.parametrize('start_with_logged_in_user', [False])
def test_querying_exchange_rates(rotkehlchen_api_server):
"""Make sure that querying exchange rates works also without logging in"""
# Test with empty list of currencies
data = {'currencies': []}
response = requests.get(
api_url_for(rotkehlchen_api_server, 'exchangeratesresource'), json=data,
)
assert_error_response(
response=response,
contained_in_msg='Empty list of currencies provided',
status_code=HTTPStatus.BAD_REQUEST,
)
def assert_okay(response):
"""Helper function for DRY checking below assertions"""
assert_proper_response(response)
json_data = response.json()
assert json_data['message'] == ''
result = json_data['result']
assert len(result) == 4
assert FVal(result['EUR']) > 0
assert FVal(result['USD']) > 0
assert FVal(result['KRW']) > 0
assert FVal(result['ETH']) > 0
# Test with some currencies, both JSON body and query parameters
data = {'currencies': ['EUR', 'USD', 'KRW', 'ETH']}
response = requests.get(
api_url_for(rotkehlchen_api_server, 'exchangeratesresource'), json=data,
)
assert_okay(response)
# This serves as a test that a list of parameters works with query args too
response = requests.get(
api_url_for(rotkehlchen_api_server, 'exchangeratesresource') + '?currencies=' +
','.join(data['currencies']),
)
result = assert_proper_response_with_result(response)
expected_currencies = [A_EUR, A_USD, A_KRW, A_ETH]
assert len(result) == len(expected_currencies)
for currency in expected_currencies:
assert FVal(result[currency.identifier]) > 0
@pytest.mark.parametrize('start_with_logged_in_user', [False])
def test_querying_exchange_rates_errors(rotkehlchen_api_server):
"""Make sure that querying exchange rates with wrong input is handled"""
# Test with invalid type for currency
data = {'currencies': [4234324.21], 'async_query': False}
response = requests.get(
api_url_for(rotkehlchen_api_server, 'exchangeratesresource'), json=data,
)
assert_error_response(
response=response,
contained_in_msg='Tried to initialize an asset out of a non-string identifier',
status_code=HTTPStatus.BAD_REQUEST,
)
# Test with unknown assets
data = {'currencies': ['DDSAS', 'EUR'], 'async_query': False}
response = requests.get(
api_url_for(rotkehlchen_api_server, 'exchangeratesresource'), json=data,
)
assert_proper_response_with_result(response)
| StarcoderdataPython |
1935145 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-12-20 17:34
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0001_squashed_0094_auto_20180910_2342'),
]
operations = [
migrations.AddField(
model_name='channel',
name='thumbnail_encoding_json',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
| StarcoderdataPython |
1838555 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ARQUIVO.py
#
# Copyright 2021
# Autor: <NAME>
#
############################
# Código fonte em Python 3
############################
def reposta(n):
n = str(n)
arm = ""
for x in range(len(n)-1,-1,-1):
arm = arm + n[x]
arm = int(arm)
return arm
def main():
n = int(input())
arm = reposta(n)
print("N =", n, "->", "M =", arm)
if __name__ == "__main__":
main() | StarcoderdataPython |
9760354 | <gh_stars>1-10
# Device List
devices = {
'pmd':[
'lantz.drivers.thorlabs.pm100d.PM100D',
['USB0::0x1313::0x8078::P0019269::INSTR'],
{}
]
}
# Experiment List
spyrelets = {
'align':[
'spyre.spyrelets.single_step_align_cwicker_spyrelet.ALIGNMENT',
{'pmd':'pmd'},
{}
],
}
| StarcoderdataPython |
4935952 | <reponame>clean-code-craft-tcq-1/add-variety-python-ccharan94
import unittest
import typewise_alert
class TypewiseTest(unittest.TestCase):
def test_infers_breach_as_per_limits(self):
#Check Breaches
self.assertTrue(typewise_alert.infer_breach(10, 20, 60) == 'TOO_LOW')
self.assertTrue(typewise_alert.infer_breach(70, 20, 60) == 'TOO_HIGH')
self.assertTrue(typewise_alert.infer_breach(50, 20, 60) == 'NORMAL')
#Check Alerts
def test_check_and_alert(self):
self.assertTrue(typewise_alert.check_and_alert('TO_EMAIL', 'PASSIVE_COOLING', 60) == 'Temperature is too high')
self.assertTrue(typewise_alert.check_and_alert('TO_CONTROLLER', 'PASSIVE_COOLING', 60) == '65261, TOO_HIGH')
self.assertTrue(typewise_alert.check_and_alert('TO_EMAIL', 'PASSIVE_COOLING', -5) == 'Temperature is too low')
self.assertTrue(typewise_alert.check_and_alert('TO_CONTROLLER', 'PASSIVE_COOLING', -5) == '65261, TOO_LOW')
self.assertTrue(typewise_alert.check_and_alert('TO_EMAIL', 'PASSIVE_COOLING', 20) == 'Temperature is normal')
self.assertTrue(typewise_alert.check_and_alert('TO_CONTROLLER', 'PASSIVE_COOLING', 20) == '65261, NORMAL')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6660638 | <reponame>Ureimu/weather-robot
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class HardwareInfo(object):
def __init__(self, nodeName=None, nodeType=None, nodeStatus=None, innerIp=None, outerIp=None, firewall=None, nodeCoreNum=None, nodeMemoryNum=None, nodeSystemInfo=None, nodeDiskType=None, nodeDiskVolume=None, serverId=None, msg=None, instanceType=None, instanceInfo=None):
"""
:param nodeName: (Optional) 节点名称
:param nodeType: (Optional) 节点类型
:param nodeStatus: (Optional) 节点状态
:param innerIp: (Optional) 内网IP
:param outerIp: (Optional) 外网IP
:param firewall: (Optional) 防火墙
:param nodeCoreNum: (Optional) 节点核心数
:param nodeMemoryNum: (Optional) 节点内存数
:param nodeSystemInfo: (Optional) 节点系统信息
:param nodeDiskType: (Optional) 节点硬盘类型
:param nodeDiskVolume: (Optional) 节点硬盘容量
:param serverId: (Optional) 节点实例ID
:param msg: (Optional) 消息
:param instanceType: (Optional) 节点硬件配置
:param instanceInfo: (Optional) 节点硬件类型
"""
self.nodeName = nodeName
self.nodeType = nodeType
self.nodeStatus = nodeStatus
self.innerIp = innerIp
self.outerIp = outerIp
self.firewall = firewall
self.nodeCoreNum = nodeCoreNum
self.nodeMemoryNum = nodeMemoryNum
self.nodeSystemInfo = nodeSystemInfo
self.nodeDiskType = nodeDiskType
self.nodeDiskVolume = nodeDiskVolume
self.serverId = serverId
self.msg = msg
self.instanceType = instanceType
self.instanceInfo = instanceInfo
| StarcoderdataPython |
58343 | import math
import unittest
from py_range_parse import parse_range
class ParseTest(unittest.TestCase):
def test_parse_equal_values(self):
parsed_range = parse_range("[-inf..-inf]")
self.assertIn(-math.inf, parsed_range)
def test_parse_spaces(self):
parsed_range = parse_range("[ -8.3 .. +18.3 ]")
self.assertIn(-8.3, parsed_range)
self.assertIn(18.3, parsed_range)
def test_parse_all_values(self):
parsed_range = parse_range("[-inf..∞]")
self.assertIn(-math.inf, parsed_range)
self.assertIn(math.inf, parsed_range)
def test_parse_range_negative(self):
parsed_range = parse_range("[-5..-2]")
self.assertEqual(parsed_range.start, -5)
self.assertEqual(parsed_range.end, -2)
def test_parse_range_negative_inverted(self):
parsed_range = parse_range("[5..-2]")
self.assertEqual(parsed_range.start, -2)
self.assertEqual(parsed_range.end, 5)
def test_float_range_contains(self):
parsed_range = parse_range("[1.0..4.3]")
self.assertIn(1, parsed_range)
self.assertIn(1.0, parsed_range)
self.assertIn(2, parsed_range)
self.assertIn(2.0, parsed_range)
self.assertIn(2.1, parsed_range)
self.assertIn(4, parsed_range)
self.assertIn(4.3, parsed_range)
def test_int_range_contains(self):
parsed_range = parse_range("[1..4]")
self.assertIn(1, parsed_range)
self.assertIn(1.0, parsed_range)
self.assertIn(2, parsed_range)
self.assertIn(2.0, parsed_range)
self.assertNotIn(2.1, parsed_range)
self.assertIn(4, parsed_range)
self.assertIn(4.0, parsed_range)
def test_int_range_exclude(self):
parsed_range = parse_range("]1..4[")
assert parsed_range is not None
def test_int_range_inf(self):
parsed_range = parse_range("]-inf..4[")
self.assertNotIn(-math.inf, parsed_range)
self.assertIn(-10000000, parsed_range)
def test_int_range_inf_inverted(self):
parsed_range = parse_range("]inf..4[")
self.assertNotIn(-math.inf, parsed_range)
self.assertNotIn(3, parsed_range)
self.assertNotIn(4, parsed_range)
self.assertIn(4.000000001, parsed_range)
self.assertIn(10000000, parsed_range)
def test_int_inclusion_inverted(self):
parsed_range = parse_range("]2..1]")
self.assertNotIn(0, parsed_range)
self.assertIn(1, parsed_range)
self.assertNotIn(2, parsed_range)
self.assertNotIn(3, parsed_range)
| StarcoderdataPython |
6643467 | <reponame>domWinter/opencv_nn
import torch
from PIL import Image
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.autograd import Variable
import matplotlib.pyplot as plt
from vis_utils import visualize_grid
class ClassificationCNN(nn.Module):
def __init__(self, input_dim=(3, 500, 500), num_filters=16, kernel_size=5,
stride_conv=1, weight_scale=0.001, pool=2, stride_pool=2, hidden_dim=200,
num_classes=4, dropout=0.4):
super(ClassificationCNN, self).__init__()
channels, height, width = input_dim
self.dropout = dropout
self.layer1 = nn.Sequential(
nn.Conv2d(input_dim[0], num_filters, kernel_size=5, padding=2),
nn.BatchNorm2d(num_filters),
nn.ReLU(),
nn.MaxPool2d(2,2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2,2))
self.fc = nn.Linear(500000, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = F.dropout(out, self.dropout, True)
out = F.relu(self.fc(out))
out = self.fc2(out)
return out
@property
def is_cuda(self):
return next(self.parameters()).is_cuda
def save(self, path):
print('Saving model... %s' % path)
torch.save(self, path)
def image_loader(image_name):
"""load image, returns cuda tensor"""
imsize = 500
loader = transforms.Compose([transforms.Scale(imsize), transforms.ToTensor()])
image = Image.open(image_name)
image = loader(image).float()
image = Variable(image, requires_grad=True)
image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet
return image.cuda() #assumes that you're using GPU
def vistensor(tensor, ch=0, allkernels=False, nrow=8, padding=1):
'''
vistensor: visuzlization tensor
@ch: visualization channel
@allkernels: visualization all tensores
'''
n,c,w,h = tensor.shape
if allkernels: tensor = tensor.view(n*c,-1,w,h )
elif c != 3: tensor = tensor[:,ch,:,:].unsqueeze(dim=1)
rows = np.min( (tensor.shape[0]//nrow + 1, 64 ) )
grid = utils.make_grid(tensor, nrow=nrow, normalize=True, padding=padding)
plt.figure( figsize=(nrow,rows) )
plt.imshow(grid.numpy().transpose((1, 2, 0)))
def main():
model = torch.load('/home/dwinter/Dokumente/opencv_nn/models/classification_cnn.model')
classes = ["Dominik", "Maren", "Nathaniel", "Alex"]
for i in range(0,150):
folder = classes[3]
picture_pre = "alex"
path = "/home/dwinter/Dokumente/opencv_nn/test_data/"+ folder + "/" + picture_pre + "" + str(i) + ".png"
print(path)
input = image_loader(path)
output = model(input)
_, pred = torch.max(output, 1)
if pred.data.cpu().numpy()[0] == 1:
print("Domi")
elif pred.data.cpu().numpy()[0] == 2:
print("Maren")
elif pred.data.cpu().numpy()[0] == 3:
print("Nath")
elif pred.data.cpu().numpy()[0] == 0:
print("Alex")
# first (next) parameter should be convolutional
'''
conv_params = next(model.parameters()).detach().cpu().numpy()
grid = visualize_grid(conv_params.transpose(0, 2, 3, 1))
plt.imshow(grid.astype('uint8'))
plt.axis('off')
plt.gcf().set_size_inches(50,50)
plt.show()
'''
if __name__ == "__main__":
main()
| StarcoderdataPython |
1819464 | import os,sys, subprocess
from time import sleep, time
RED, GREEN, BLUE, YELLOW, WHITE, END= '\033[1;31m', '\033[1;32m', '\033[1;34m', '\033[1;33m', '\033[1;37m', '\033[0m'
spaces = " " * 76 # Only for styling
threshold = 12
mac_dict = {}
time_dict = {}
#Checking for root privilleges
try:
if os.getuid() != 0:
print("\n{}[-]{}ERROR: ARPSHIELD requires root privillege to run{}".format(YELLOW,RED,GREEN))
os._exit(1)
except:
# Its a windows system
print("\n{}[-]{}ERROR: ARPSHIELD Cannot run on Windows.{}".format(YELLOW,RED,GREEN))
sleep(1.5)
os._exit(1)
# Check if the dependancy requirements are met.
try:
from scapy.all import sniff
import netifaces
except:
print("{}[-]{}Dependency requirements not met".format(YELLOW,RED))
print("{}[-]{}Exiting Now...".format(YELLOW,RED))
sleep(0.7)
sys.exit()
# Logo and credits
sys.stdout.write(GREEN + spaces + """
▄▄▄ ▄▄▄ ▄▄▄ ▄▄▄ ▄▄▄▄▄ ▄ ▄▄▄▄ ▄▄▄ ▄▄▄
█ . █ █ . \ █ . █ █ ▄▄█ █ █ █ █ █ ▄▄ █ █ █ ▄ █
█ █ █ / █ ▄█ █▄▄ █ █ █ █ █ ▄▄ █ █▄ █ █ █
█▄█▄█ █▄\▄\ █▄█ █▄▄▄█ █▄█▄█ █ █▄▄▄ █▄▄▄█ █▄▄▄█
""" + END + BLUE +
'\n' + '{}Protects you against ARP Spoof Attacks!{}'.format(RED,BLUE).center(98) +
'\n' + 'Made With <3 by: {0}<NAME> ({1}Ash-Shaun{2})'.format(YELLOW, RED, YELLOW).center(104) +
'\n' + 'Version: {}1.0{} \n'.format(YELLOW, END).center(98))
# start main import statements
from scapy.all import *
import netifaces
# Selection on Interface (USER)
available_interface = netifaces.interfaces()
print("")
interface = input(r"{}[-]{}Please select the interface you wish to secure({}):".format(YELLOW,WHITE,available_interface))
if interface not in available_interface:
print("{}[-]{}Interface {} not Available".format(YELLOW,RED,interface))
exit()
def give_notification(txt, intf):
mac = "ARP Spoofing Attack Detected from {}.".format(intf)
if txt == "self":
subprocess.call(["notify-send", "-i", "dialog-warning", "ARP Spoofing Detected", "You are being attacked!. \n" + mac])
elif txt == "network":
subprocess.call(["notify-send", "-i", "dialog-warning", "ARP Spoofing Detected", "Your network is being attacked!. \n" + mac])
elif txt == "netcut":
subprocess.call(["notify-send", "-i", "dialog-warning", "ARP Attack Detected", "Your network is being attacked!. \n" + mac])
# Get IP and MAC of Gateway
def getGateway(txt):
if txt == "ip":
try:
getGateway = sr1(IP(dst="1.1.1.1", ttl=0) / ICMP() / "XXXXXXXXXXX", verbose=False, timeout=2)
print("{}Gateway IP: {}".format(YELLOW,BLUE) , getGateway.src)
return getGateway.src
except:
print("shit happened")
return ("192.168.38.2")
elif txt =="ip_of_selected_interface":
getGateway = sr1(IP(dst="1.1.1.1", ttl=0) / ICMP() / "XXXXXXXXXXX", verbose=False, timeout=2)
print("{}Your IP: {}".format(YELLOW,BLUE) , getGateway[IP].dst)
return getGateway[IP].dst
else:
try:
query = Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(pdst=txt)
ans, _ = srp(query, timeout=2, verbose=0)
for _, rcv in ans:
return(rcv[Ether].src)
break
except:
print("Initialization Failed!")
mac_of_selected_interface = get_if_hwaddr(interface)
gatewayip = getGateway("ip")
gatewaymac = getGateway(gatewayip)
ip_of_selected_interface = getGateway("ip_of_selected_interface")
def check_for_spoof(source, dest, s_mac, gatewaymac, gatewayip):
print(source,"is at", s_mac)
# check for packets to router
if source == ip_of_selected_interface:
if s_mac == mac_of_selected_interface:
pass
else:
print("ARP attack on you.")
give_notification("self",s_mac)
if (source == gatewayip and s_mac != gatewaymac) or (source != gatewayip and s_mac == gatewaymac):
print ("ARP attack on Gateway")
give_notification("network", s_mac)
def check_for_spoof_attack(source, dest, s_mac, gatewaymac, gatewayip, d_mac):
# a = time.time()
# if d_mac == "00:00:00:00:00:00" or "ff:ff:ff:ff:ff:ff" or "FF:FF:FF:FF:FF:FF":
# if s_mac == '00:50:56:c0:00:08':
# pass
# elif s_mac not in mac_dict:
# mac_dict[s_mac] = 0
# time_dict[s_mac] = a
# else:
# mac_dict[s_mac] +=1
# b = time.time()
# print(mac_dict[s_mac])
# if s_mac != '00:50:56:c0:00:08' and mac_dict[s_mac] > threshold:
# if (time_dict[s_mac] - b) > 30:
# # False alarm Maybe
# pass
# else:
# give_notification("netcut", s_mac)
if source == gatewayip and s_mac != gatewaymac:
print("ARP Attack Detected.")
give_notification("netcut", s_mac)
def process_packets(packet):
source = packet.psrc
dest = packet.pdst
operation = packet.op
s_mac = packet.hwsrc
d_mac = packet.hwdst
#print(source,dest,s_mac,operation)
if operation == 2:
check_for_spoof(source,dest,s_mac,gatewaymac, gatewayip)
else:
check_for_spoof_attack(source, dest, s_mac, gatewaymac, gatewayip, d_mac)
print("{}Your MAC: {}".format(YELLOW,BLUE), mac_of_selected_interface)
print("{}[-]{}Menu:{}".format(YELLOW,GREEN,WHITE),"\n", "\t1. Start ARP SHIELD \n\t2. Exit")
choice = input("{}[-]{}Enter your choice :".format(YELLOW,WHITE))
if choice == '1':
os.system("clear")
print("{}[-]{}ARPSHIELD Started. Any output will be redirected to log file.".format(YELLOW,BLUE))
sniff(filter="arp",prn=process_packets, store=0 )
elif choice =='2':
print("{}[-]{}Exiting ARPSHIELD.").format(YELLOW,RED)
sleep(1.5)
print("{}Bye!".format(BLUE))
sleep(1.6)
exit(101)
else:
print("{}[-]{}Invalid Choice".format(YELLOW,RED))
exit(101)
| StarcoderdataPython |
6547967 | from dataclasses import dataclass
from bindings.gmd.code_with_authority_type import CodeWithAuthorityType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class PixelInCell(CodeWithAuthorityType):
"""gml:pixelInCell is a specification of the way an image grid is
associated with the image data attributes.
The required codeSpace attribute shall reference a source of
information specifying the values and meanings of all the allowed
string values for this property.
"""
class Meta:
name = "pixelInCell"
namespace = "http://www.opengis.net/gml"
| StarcoderdataPython |
223840 | # -*- coding: utf-8 -*-
from dataclasses import dataclass
from typing import Optional
@dataclass
class ExternalIDs:
facebook_id: Optional[str]
freebase_id: Optional[str]
freebase_mid: Optional[str]
imdb_id: Optional[str]
instagram_id: Optional[str]
tvdb_id: Optional[int]
tvrage_id: Optional[str]
twitter_id: Optional[str]
@property
def facebook(self) -> Optional[str]:
if self.facebook_id:
return f"https://facebook.com/{self.facebook_id}"
def freebase(self, media_type: str) -> Optional[str]:
raise NotImplementedError()
@property
def instagram(self) -> Optional[str]:
if self.instagram_id:
return f"https://instagram.com/{self.instagram_id}"
def imdb(self, media_type: str) -> Optional[str]:
if self.imdb_id and media_type == "person":
return f"https://www.imdb.com/name/{self.imdb_id}"
if self.imdb_id and media_type == "movie":
return f"https://www.imdb.com/title/{self.imdb_id}"
if self.imdb_id and media_type == "tv":
return f"https://www.imdb.com/title/{self.imdb_id}"
def tvdb(self, media_type: str) -> Optional[str]:
raise NotImplementedError()
def tvrage(self, media_type: str) -> Optional[str]:
raise NotImplementedError()
@property
def twitter(self) -> Optional[str]:
if self.twitter_id:
return f"https://twitter.com/{self.twitter_id}"
| StarcoderdataPython |
6560261 | """Define constants for tests."""
TEST_BAD_ZIP = "abcde"
TEST_ZIP = "00123"
| StarcoderdataPython |
11317395 | <gh_stars>0
from pluto.control.modes.processes import process_factory as pf
class LiveSimulationProcessFactory(pf.ProcessFactory):
def __init__(self, process_factory):
self._process_factory = process_factory
def _create_process(self, framework_url, session_id, root_dir):
pass | StarcoderdataPython |
113175 | <reponame>eengineergz/Lambda
import random
def guessing_game():
print("Guess the number!")
secret_number = random.randrange(101)
while True:
guess = input("Input your guess: ")
try:
guess = int(guess)
except ValueError:
print("Please enter an integer.")
continue
print(f"You guessed: {guess}")
if guess == secret_number:
print("You win!")
break
elif guess < secret_number:
print("Too small!")
else:
print("Too big!")
if __name__ == '__main__':
guessing_game() | StarcoderdataPython |
3464552 | <filename>code/Evaluation.py
'''
<NAME> (2018UCS0078), CSE Department, IIT JMU
contact: <EMAIL>
This code contains a set of functions used for evaluating the
Music Recommender System with the help of mean average precision at tau(=500).
This is part of the Music Recommender System project
(Dataset as Subset of Million Song Dataset) from My ISI Delhi
2020 ( Remote ) Summer Internship.
Copyright 2020, <NAME>
NOTE: Proper credits must be given to the author of this repository
while using this code in any of the concerned research/ project work.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
import numpy as np
import random
# Find AP_at_tau for given list of predictions
def aptau(actual, predicted, tau=500):
if len(predicted)>tau: # AP_at_tau
predicted = predicted[:tau]
score = 0.0
num_hits = 0.0
if not actual:
return 1.0
for i,p in enumerate(predicted):
# Assuming Predicted not contains repeated recommendations
# otherwise uncomment the below commented segment of the code
if p in actual: #and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0) # give more weight to top recommendataions
return score / min(len(actual), tau)
# Randomly select given percentage of elements
# from a given list
def create_test_sample(list_a, percentage):
print("Length of user_test_and_training:%d" % len(list_a))
k = int(len(list_a) * percentage)
random.seed(0)
indicies = random.sample(range(len(list_a)), k)
new_list = [list_a[i] for i in indicies]
print("Length of user sample:%d" % len(new_list))
return new_list
# Create mAP_at_tau vs sampling rate list
def sample_list_map(model, test_users, test_util_dict, tau=500):
sample_list = [0.25, 0.5, 0.75, 1]
list_samples = []
for perc in sample_list:
list_sample = create_test_sample(test_users, perc)
list_samples.append(list_sample)
map_sample_list = []
for user_list in list_samples:
AP_at_tau = 0
for i,user in enumerate(user_list):
predicted = model.recommend(user, return_type="list")
actual = test_util_dict["u2s"][user]
AP_at_tau += aptau(actual, predicted, tau)
if i%10000 == 0:
print(i+1, AP_at_tau/(i+1)*100, "%")
mAP_at_tau = AP_at_tau/(i+1)*100
print(i+1, mAP_at_tau, "%")
map_sample_list.append(mAP_at_tau)
return sample_list, map_sample_list
# Find mAP_at_tau for given test_list and Sampling Rate
def maptau(model, test_users, test_util_dict, sampling_rate=0.3, tau=500):
# Prepare the test Sample with given Sampling Rate
test_sample = create_test_sample(test_users, sampling_rate)
# Calcuate mAP_at_tau for test Sample
AP_at_tau = 0
for i,user in enumerate(test_sample):
predicted = model.recommend(user, return_type="list")
actual = test_util_dict["u2s"][user]
AP_at_tau += aptau(actual, predicted, tau)
if i%10000 == 0:
print(i+1, AP_at_tau/(i+1)*100, "%")
mAP_at_tau = AP_at_tau/(i+1)*100
print(i+1, mAP_at_tau, "%")
return mAP_at_tau
# Utility function to generate test sample for User_User_CF Models
def gen_newtest(newtrain_data, newtrain_util_dict, common_test_sample, test_util_dict):
newtest_sample = list(set(common_test_sample).intersection(newtrain_data["user_id"].unique()))
## since we are using set in above, so order might get changed
newtest_u2s = {}
for user in newtest_sample:
songs = test_util_dict["u2s"][user]
for song in songs:
if song in newtrain_util_dict["newtrain_data"]["songs"]:
if user in newtest_u2s:
newtest_u2s[user] += [song]
else:
newtest_u2s[user] = [song]
| StarcoderdataPython |
8175965 | <reponame>tunealog/python-web-scraping
# Python Web Scraping
# Title : BeautifulSoup4
# Date : 2020-08-15
# Creator : tunealog
import requests
from bs4 import BeautifulSoup
url = "https://comic.naver.com/webtoon/list.nhn?titleId=675554"
res = requests.get(url)
res.raise_for_status()
soup = BeautifulSoup(res.text, "lxml")
# Titles and Links
cartoons = soup.find_all("td", attrs={"class": "title"})
# title = cartoons[0].a.get_text()
# link = cartoons[0].a["href"]
# print(title)
# print("https://comic.naver.com" + link)
for cartoon in cartoons:
title = cartoon.a.get_text()
link = "https://comic.naver.com" + cartoon.a["href"]
print(title, link)
# Rates
total_rates = 0
cartoons = soup.find_all("div", attrs={"class": "rating_type"})
for cartoon in cartoons:
rate = cartoon.find("strong").get_text()
print(rate)
total_rates += float(rate)
print("Total : ", total_rates)
print("Avg : ", total_rates / len(cartoons))
| StarcoderdataPython |
1921994 | #!/usr/bin/env python
import argparse
import re
AMPLICON_PAT = re.compile(r'.*_(?P<num>\d+).*_(?P<name>L(?:EFT)?|R(?:IGHT)?)')
def write_amplicon_info_file(bed_file, amplicon_info_file):
amplicon_sets = {}
for line in bed_file:
fields = line.strip().split('\t')
start = int(fields[1])
name = fields[3]
re_match = AMPLICON_PAT.match(name)
if re_match is None:
raise ValueError(
'{} does not match expected amplicon name format'.format(name)
)
amplicon_id = int(re_match.group('num'))
amplicon_set = amplicon_sets.get(amplicon_id, [])
amplicon_set.append((name, start))
amplicon_sets[amplicon_id] = amplicon_set
# write amplicons sorted by number with primers sorted by start position
for id in sorted(amplicon_sets):
amplicon_info = '\t'.join(
[name for name, start in sorted(
amplicon_sets[id], key=lambda x: x[1]
)]
) + '\n'
amplicon_info_file.write(amplicon_info)
amplicon_info_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Write an amplicon info file for iVar '
'from a BED file describing primer positions'
)
parser.add_argument(
'bed_file', type=argparse.FileType(), help='Primer BED file'
)
parser.add_argument(
'amplicon_info_file', type=argparse.FileType('w'),
help='Output file: amplicon info file in TSV format'
)
args = parser.parse_args()
write_amplicon_info_file(args.bed_file, args.amplicon_info_file)
| StarcoderdataPython |
11223201 | class Pessoa:
def __init__(self):
self.nome = 'Uadson'
def __str__(self):
return self.nome
class Idade:
def __init__(self):
self.idade = 37
class Evolucao(Idade):
def __init__(self):
self.nome = Pessoa()
super().__init__()
self.evolui = [self.nome.nome, self.idade]
def atualiza(self):
self.idade += 1
self.evolui.append(self.nome.nome)
self.evolui.append(self.idade)
return self.evolui
if __name__ == '__main__':
pessoa = Evolucao()
print(pessoa.evolui)
pessoa.atualiza()
print(pessoa.evolui)
pessoa.atualiza()
print(pessoa.evolui) | StarcoderdataPython |
6706654 | <filename>rake_tutorial.py
from __future__ import absolute_import
from __future__ import print_function
import six
__author__ = 'a_medelyan'
import rake
import operator
import io
# EXAMPLE ONE - SIMPLE
stoppath = "SmartStoplist.txt"
# 1. initialize RAKE by providing a path to a stopwords file
rake_object = rake.Rake(stoppath, 5, 3, 4)
# 2. run on RAKE on a given text
sample_file = io.open("data/docs/fao_test/w2167e.txt", 'r',encoding="iso-8859-1")
text = sample_file.read()
keywords = rake_object.run(text)
# 3. print results
print("Keywords:", keywords)
print("----------")
# EXAMPLE TWO - BEHIND THE SCENES (from https://github.com/aneesha/RAKE/rake.py)
# 1. initialize RAKE by providing a path to a stopwords file
rake_object = rake.Rake(stoppath)
text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility " \
"of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. " \
"Upper bounds for components of a minimal set of solutions and algorithms of construction of minimal generating"\
" sets of solutions for all types of systems are given. These criteria and the corresponding algorithms " \
"for constructing a minimal supporting set of solutions can be used in solving all the considered types of " \
"systems and systems of mixed types."
# 1. Split text into sentences
sentenceList = rake.split_sentences(text)
for sentence in sentenceList:
print("Sentence:", sentence)
# generate candidate keywords
stopwords = rake.load_stop_words(stoppath)
stopwordpattern = rake.build_stop_word_regex(stoppath)
phraseList = rake.generate_candidate_keywords(sentenceList, stopwordpattern, stopwords)
print("Phrases:", phraseList)
# calculate individual word scores
wordscores = rake.calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = rake.generate_candidate_keyword_scores(phraseList, wordscores)
for candidate in keywordcandidates.keys():
print("Candidate: ", candidate, ", score: ", keywordcandidates.get(candidate))
# sort candidates by score to determine top-scoring keywords
sortedKeywords = sorted(six.iteritems(keywordcandidates), key=operator.itemgetter(1), reverse=True)
totalKeywords = len(sortedKeywords)
# for example, you could just take the top third as the final keywords
for keyword in sortedKeywords[0:int(totalKeywords / 3)]:
print("Keyword: ", keyword[0], ", score: ", keyword[1])
print(rake_object.run(text))
| StarcoderdataPython |
9685013 | <gh_stars>1-10
import setuptools
def long_desc():
with open('README.md', 'r') as desc:
return desc.read()
setuptools.setup(
name = 'pyislam',
version = '0.1.1',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'An Islamic Python Package',
long_description = long_desc(),
long_description_content_type = 'text/markdown',
url = 'https://github.com/moha369/pyislam',
packages = setuptools.find_packages(),
install_requires = ['requests'],
license = 'MIT',
keywords = 'islam quran sunnah',
classifiers = [
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
],
python_requires = '>=3.6'
)
| StarcoderdataPython |
3366515 | <gh_stars>1-10
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from decimal import Decimal
import discord
from discord.ext import commands
import user_db
import config
# connect to coind
rpc_connection = 'http://{0}:{1}@{2}:{3}'.format(config.rpc_user, config.rpc_password, config.ip, config.rpc_port)
def str_isfloat(str):
try:
float(str)
return True
except ValueError:
return False
class Withdraw(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def withdraw(self, ctx, address=None, amount=None):
client = AuthServiceProxy(rpc_connection)
user_id = str(ctx.author.id)
if not user_db.check_user(user_id):
embed = discord.Embed(
title="**For first-use-user**",
color=0x0043ff)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="First of all, please type `//help`",
value="Welcome to world of CPUchain tipbot !")
embed.set_thumbnail(url=self.bot.user.avatar_url_as(format='png', size=1024))
embed.set_footer(text="CPUchain tipbot {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
else:
pass
if address is None or amount is None:
embed = discord.Embed(color=0xffd800)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="Please check `//help` ",
value=" :mag: ")
embed.set_footer(text="CPUchain tipbot {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
else:
pass
if not str_isfloat(amount) or Decimal(amount) < Decimal('0.5'):
embed = discord.Embed(color=0xff0000)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="invalid amount. (amount must be at least 0.5 CPU)",
value="`{0}`".format(str(amount)))
embed.set_footer(text="CPUchain tipbot {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
else:
sendamount = Decimal(str(float(amount))) - \
Decimal(str(config.FEE)) # Dealing with cases like "001.100" : "float(amount)"
account = str(ctx.author.id)
validate = client.validateaddress(address)
if not validate['isvalid']:
embed = discord.Embed(color=0xff0000)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="invalid address.",
value="`{0}`".format(str(address)))
embed.set_footer(text="CPUchain tipbot {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
elif Decimal(amount) > client.getbalance(account, config.CONFIRM):
embed = discord.Embed(color=0xff0000)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="You don't have enough balances.",
value="Your balances : ```{0} CPU```".format(client.getbalance(account, config.CONFIRM)))
embed.set_footer(text="CPUchain tipbot {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
else:
try:
txid = client.sendfrom(account, address, float(sendamount))
except:
embed = discord.Embed(color=0xff0000)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="invalid amount.\n(You can not specify the einth decimal place or smaller than that.)",
value="`{0}`".format(amount))
embed.set_footer(text="CPUchain tipbot {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
if len(txid) == 64:
tx = client.gettransaction(txid)
txfee = tx['fee']
client.move(account, "tipcpu_wallet", Decimal(str(config.FEE)))
client.move("tipcpu_wallet", account, -txfee)
embed = discord.Embed(
title="**Block explorer**",
url='https://explorer.cpuchain.org/tx/{0}'.format(txid), color=0x0043ff)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="Withdrawal complete `{0} CPU`\nwithdraw fee is `{1} CPU`\nPlease check the transaction at the above link.".format(sendamount, str(config.FEE)),
value="Your balances : `{0} CPU`".format(client.getbalance(account, config.CONFIRM)))
embed.set_footer(text="CPUchain tipbot {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
def setup(bot):
bot.add_cog(Withdraw(bot))
| StarcoderdataPython |
1919292 | from typing import Optional, Tuple
from flexmeasures.data import db
class GenericAssetType(db.Model):
"""An asset type defines what type an asset belongs to.
Examples of asset types: WeatherStation, Market, CP, EVSE, WindTurbine, SolarPanel, Building.
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), default="")
description = db.Column(db.String(80), nullable=True, unique=False)
class GenericAsset(db.Model):
"""An asset is something that has economic value.
Examples of tangible assets: a house, a ship, a weather station.
Examples of intangible assets: a market, a country, a copyright.
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), default="")
latitude = db.Column(db.Float, nullable=True)
longitude = db.Column(db.Float, nullable=True)
generic_asset_type_id = db.Column(
db.Integer, db.ForeignKey("generic_asset_type.id"), nullable=False
)
generic_asset_type = db.relationship(
"GenericAssetType",
foreign_keys=[generic_asset_type_id],
backref=db.backref("generic_assets", lazy=True),
)
account_id = db.Column(
db.Integer, db.ForeignKey("account.id", ondelete="CASCADE"), nullable=True
) # if null, asset is public
owner = db.relationship(
"Account",
backref=db.backref(
"generic_assets",
foreign_keys=[account_id],
lazy=True,
cascade="all, delete-orphan",
passive_deletes=True,
),
)
@property
def location(self) -> Optional[Tuple[float, float]]:
if None not in (self.latitude, self.longitude):
return self.latitude, self.longitude
return None
def create_generic_asset(generic_asset_type: str, **kwargs) -> GenericAsset:
"""Create a GenericAsset and assigns it an id.
:param generic_asset_type: "asset", "market" or "weather_sensor"
:param kwargs: should have values for keys "name", and:
- "asset_type_name" or "asset_type" when generic_asset_type is "asset"
- "market_type_name" or "market_type" when generic_asset_type is "market"
- "weather_sensor_type_name" or "weather_sensor_type" when generic_asset_type is "weather_sensor"
- alternatively, "sensor_type" is also fine
:returns: the created GenericAsset
"""
asset_type_name = kwargs.pop(f"{generic_asset_type}_type_name", None)
if asset_type_name is None:
if f"{generic_asset_type}_type" in kwargs:
asset_type_name = kwargs.pop(f"{generic_asset_type}_type").name
else:
asset_type_name = kwargs.pop("sensor_type").name
generic_asset_type = GenericAssetType.query.filter_by(
name=asset_type_name
).one_or_none()
if generic_asset_type is None:
raise ValueError(f"Cannot find GenericAssetType {asset_type_name} in database.")
new_generic_asset = GenericAsset(
name=kwargs["name"], generic_asset_type_id=generic_asset_type.id
)
for arg in ("latitude", "longitude", "account_id"):
if arg in kwargs:
setattr(new_generic_asset, arg, kwargs[arg])
db.session.add(new_generic_asset)
db.session.flush() # generates the pkey for new_generic_asset
return new_generic_asset
| StarcoderdataPython |
1673711 | t1 = (1, 2, 3, 'a', 'samu')
"""
A tupla funciona exatamente igual a uma lista, a unica diferença é
que eu não posso alterar os valores que ela contem após sua formação
""" | StarcoderdataPython |
11395974 | <gh_stars>0
if __name__ == '__main__':
dic = {}
s = list()
for _ in range(int(input())):
name = input()
score = float(input())
if score in dic:
dic[score].append(name)
else:
dic[score] = [name]
if score not in s:
s.append(score)
m = min(s)
s.remove(m)
m1 = min(s)
dic[m1].sort()
for i in dic[m1]:
print(i)
| StarcoderdataPython |
6516571 | <gh_stars>0
#!python
def is_sorted(items):
"""Return a boolean indicating whether given items are in sorted order.
Running time: O(n) because we are using a loop to traverse through each item in the list
Memory usage: O(1) because we aren't creating any additional data structures in the function"""
# Check that all adjacent items are in order, return early if so
for i in range(len(items) - 1):
current = items[i]
nextItem = items[i + 1]
if nextItem < current:
return False
return True
def bubble_sort(items):
"""Sort given items by swapping adjacent items that are out of order, and
repeating until all items are in sorted order.
Running time: O(n^2) because we are using a nested for loop to traverse through the list
Memory usage: O(1) because we aren't creating any additional data structures in the function"""
# loop through each index in the list
# Repeat until all items are in sorted order
for i in range(len(items)):
# loop through each index in the list while swapping adjacent items
for j in range(len(items) - 1):
# check if current item is greater than the next item
if items[j] > items[j+1]:
# swap adjacent items that are out of order
temp = items[j]
items[j] = items[j+1]
items[j+1] = temp
def selection_sort(items):
"""Sort given items by finding minimum item, swapping it with first
unsorted item, and repeating until all items are in sorted order.
Runtime usage: O(n^2) because we are using a nested for loop to traverse through the list
Memory usage: O(1) because we aren't creating any additional data structures in the function"""
# Repeat until all items are in sorted order
for i in range(len(items) - 1):
# Find minimum item in unsorted items
smallestIndex = i
# loop through each UNSORTED item to find the next smallest number
for j in range(i + 1, len(items) - 1):
if items[j] < items[i]:
smallestIndex = j
# Swap it with first unsorted item
items[i], items[smallestIndex] = items[smallestIndex], items[i]
def insertion_sort(items):
"""Sort given items by taking first unsorted item, inserting it in sorted
order in front of items, and repeating until all items are in order.
TODO: Running time: O(n^2) because we are using a while loop inside of a for loop
TODO: Memory usage: O(1) because we aren't creating any additional data structures in the function"""
# Repeat until all items are in sorted order
# Take first unsorted item
# Insert it in sorted order in front of items
# loop through the list starting at index 1, since we will use index 0 to swap items
for index in range(1, len(array)):
# store the current itemValue in a variable
currentValue = array[index]
# and store the current itemIndex as well
currentIndex = index
# Loop through the list starting from the current item
# The loop will continue until we reach the 0 index of the list
# (Since we are comparing/iterating from right to left within the sorted items when swapping)
# and the loop must also check that the current item is less than the previous item
while currentIndex > 0 and array[currentIndex - 1] > currentValue:
# We set the current item to now equal the previous, greater item since we have a variable
# to keep track of the current item -> currentValue
array[currentIndex] = array[currentIndex -1]
# Set the currentIndex to be - 1 so that when the loop continues, we can compare the next two items in the list
currentIndex -= 1
# When the current item is at the 0 index, or the current item is greater than the item we are comparing it to,
# we know that the item is now in the correct spot and we can make the swap
# we're trying to insert at index currentIndex - 1.
# Either way - we insert the element at currentIndex
array[currentIndex] = currentValue
| StarcoderdataPython |
1788850 | <gh_stars>0
import numpy as np
from VariableUnittest import VariableUnitTest
from gwlfe.BMPs.Stream import UrbLoadRed
class TestUrbLoadRed(VariableUnitTest):
def test_UrbLoadRed(self):
z = self.z
# UrbLoadRed.UrbLoadRed_1(z.NYrs, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
# z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.Qretention,
# z.PctAreaInfil, z.Nqual, z.Storm, z.UrbBMPRed)
# UrbLoadRed.UrbLoadRed_f(z.NYrs, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
# z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.Qretention,
# z.PctAreaInfil, z.Nqual, z.Storm, z.UrbBMPRed)
# UrbLoadRed.UrbLoadRed_3(z.NYrs, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
# z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.Qretention,
# z.PctAreaInfil, z.Nqual, z.Storm, z.UrbBMPRed)
np.testing.assert_array_almost_equal(
UrbLoadRed.UrbLoadRed_f(z.NYrs, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.Qretention,
z.PctAreaInfil, z.Nqual, z.Storm, z.UrbBMPRed),
UrbLoadRed.UrbLoadRed(z.NYrs, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.Qretention,
z.PctAreaInfil, z.Nqual, z.Storm, z.UrbBMPRed), decimal=7)
| StarcoderdataPython |
6496429 | <gh_stars>1-10
from lenstronomy.Data.psf import PSF
import lenstronomy.Util.util as util
import lenstronomy.Util.image_util as image_util
import lenstronomy.Util.kernel_util as kernel_util
import lenstronomy.Util.mask as mask_util
import numpy as np
import copy
import scipy.ndimage.interpolation as interp
class PsfFitting(object):
"""
class to find subsequently a better psf
The method make use of a model and subtracts all the non-point source components of the model from the data.
If the model is sufficient, then the data will be a (better) representation of the actual PSF. The method cuts out
those point sources and combines them to update the estimate of the PSF. This is done in an iterative procedure as
the model components of the extended features is PSF-dependent (hopefully not too much).
Various options can be chosen. There is no quarante that the method works for specific data and models.
'stacking_method': 'median', 'mean'; the different estimates of the PSF are stacked and combined together. The choices are:
'mean': mean of pixel values as the estimator (not robust to outliers)
'median': median of pixel values as the estimator (outlier rejection robust but needs >2 point sources in the data
'block_center_neighbour': angle, radius of neighbouring point sources around their centers the estimates is ignored.
Default is zero, meaning a not optimal subtraction of the neighbouring point sources might contaminate the estimate.
'keep_error_map': bool, if True, does not replace the error term associated with the PSF estimate.
If false, re-estimates the variance between the PSF estimates.
The procedure only requires and changes the 'point_source_kernel' in the PSF() class and the 'psf_error_map'.
Any previously set subgrid kernels or pixel_kernels are removed and constructed from the 'point_source_kernel'.
"""
def __init__(self, image_model_class, kwargs_psf_iter={}):
self._image_model_class = image_model_class
self._stacking_method = kwargs_psf_iter.get('stacking_method', 'median')
self._block_center_neighbour = kwargs_psf_iter.get('block_center_neighbour', 0)
self._keep_psf_error_map = kwargs_psf_iter.get('keep_error_map', True)
self._psf_symmetry = kwargs_psf_iter.get('psf_symmetry', 1)
def update_psf(self, kwargs_psf, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, factor=1):
"""
:param kwargs_data:
:param kwargs_psf:
:param kwargs_options:
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:return:
"""
psf_class = PSF(kwargs_psf)
self._image_model_class.update_psf(psf_class)
kernel_old = psf_class.kernel_point_source
kernel_size = len(kernel_old)
#kwargs_numerics_psf['psf_error_map'] = False
kwargs_psf_copy = copy.deepcopy(kwargs_psf)
kwargs_psf_new = {'psf_type': 'PIXEL', 'kernel_point_source': kwargs_psf_copy['kernel_point_source']}
if 'psf_error_map' in kwargs_psf_copy:
kwargs_psf_new['psf_error_map'] = kwargs_psf_copy['psf_error_map'] / 10
self._image_model_class.update_psf(PSF(kwargs_psf_new))
image_single_point_source_list = self.image_single_point_source(self._image_model_class, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
ra_image, dec_image, amp = self._image_model_class.PointSource.point_source_list(kwargs_ps, kwargs_lens)
x_, y_ = self._image_model_class.Data.map_coord2pix(ra_image, dec_image)
point_source_list = self.cutout_psf(ra_image, dec_image, x_, y_, image_single_point_source_list, kernel_size, kernel_old)
kernel_new, error_map = self.combine_psf(point_source_list, kernel_old,
sigma_bkg=self._image_model_class.Data.background_rms, factor=factor,
stacking_option=self._stacking_method, symmetry=self._psf_symmetry)
kernel_new = kernel_util.cut_psf(kernel_new, psf_size=kernel_size)
kwargs_psf_new['kernel_point_source'] = kernel_new
if 'psf_error_map' in kwargs_psf_new:
kwargs_psf_new['psf_error_map'] *= 10
self._image_model_class.update_psf(PSF(kwargs_psf_new))
logL_after = self._image_model_class.likelihood_data_given_model(kwargs_lens, kwargs_source,
kwargs_lens_light, kwargs_ps)
return kwargs_psf_new, logL_after, error_map
def update_iterative(self, kwargs_psf, kwargs_lens, kwargs_source, kwargs_lens_light,
kwargs_ps, factor=1, num_iter=10, verbose=True, no_break=False):
"""
:param kwargs_data:
:param kwargs_psf:
:param kwargs_options:
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:param factor:
:param num_iter:
:return:
"""
self._image_model_class.PointSource.set_save_cache(True)
if not 'kernel_point_source_init' in kwargs_psf:
kernel_point_source_init = copy.deepcopy(kwargs_psf['kernel_point_source'])
else:
kernel_point_source_init = kwargs_psf['kernel_point_source_init']
kwargs_psf_new = copy.deepcopy(kwargs_psf)
kwargs_psf_final = copy.deepcopy(kwargs_psf)
if 'psf_error_map' in kwargs_psf:
error_map_final = kwargs_psf['psf_error_map']
else:
error_map_final = np.zeros_like(kernel_point_source_init)
error_map_init = copy.deepcopy(error_map_final)
psf_class = PSF(kwargs_psf)
self._image_model_class.update_psf(psf_class)
logL_before = self._image_model_class.likelihood_data_given_model(kwargs_lens, kwargs_source,
kwargs_lens_light, kwargs_ps)
logL_best = copy.deepcopy(logL_before)
i_best = 0
for i in range(num_iter):
kwargs_psf_new, logL_after, error_map = self.update_psf(kwargs_psf_new, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, factor=factor)
if logL_after > logL_best:
kwargs_psf_final = copy.deepcopy(kwargs_psf_new)
error_map_final = copy.deepcopy(error_map)
logL_best = logL_after
i_best = i + 1
else:
if not no_break:
if verbose:
print("iterative PSF reconstruction makes reconstruction worse in step %s - aborted" % i)
break
if verbose is True:
print("iteration of step %s gave best reconstruction." % i_best)
print("log likelihood before: %s and log likelihood after: %s" % (logL_before, logL_best))
if self._keep_psf_error_map is True:
kwargs_psf_final['psf_error_map'] = error_map_init
else:
kwargs_psf_final['psf_error_map'] = error_map_final
kwargs_psf_final['kernel_point_source_init'] = kernel_point_source_init
return kwargs_psf_final
def image_single_point_source(self, image_model_class, kwargs_lens, kwargs_source, kwargs_lens_light,
kwargs_ps):
"""
return model without including the point source contributions as a list (for each point source individually)
:param image_model_class: ImageModel class instance
:param kwargs_lens: lens model kwargs list
:param kwargs_source: source model kwargs list
:param kwargs_lens_light: lens light model kwargs list
:param kwargs_ps: point source model kwargs list
:return: list of images with point source isolated
"""
# reconstructed model with given psf
model, error_map, cov_param, param = image_model_class.image_linear_solve(kwargs_lens, kwargs_source,
kwargs_lens_light, kwargs_ps)
#model = image_model_class.image(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
data = image_model_class.Data.data
mask = image_model_class.ImageNumerics.mask
point_source_list = image_model_class.point_sources_list(kwargs_ps, kwargs_lens)
n = len(point_source_list)
model_single_source_list = []
for i in range(n):
model_single_source = (data - model + point_source_list[i]) * mask
model_single_source_list.append(model_single_source)
return model_single_source_list
def cutout_psf(self, ra_image, dec_image, x, y, image_list, kernelsize, kernel_init):
"""
:param x_:
:param y_:
:param image_list: list of images (i.e. data - all models subtracted, except a single point source)
:param kernelsize:
:return:
"""
mask = self._image_model_class.ImageNumerics.mask
ra_grid, dec_grid = self._image_model_class.Data.coordinates
ra_grid = util.image2array(ra_grid)
dec_grid = util.image2array(dec_grid)
radius = self._block_center_neighbour
kernel_list = []
for l in range(len(x)):
mask_point_source = self.mask_point_source(ra_image, dec_image, ra_grid, dec_grid, radius, i=l)
mask_i = mask * mask_point_source
kernel_deshifted = self.cutout_psf_single(x[l], y[l], image_list[l], mask_i, kernelsize, kernel_init)
kernel_list.append(kernel_deshifted)
return kernel_list
def cutout_psf_single(self, x, y, image, mask, kernelsize, kernel_init):
"""
:param x: x-coordinate of point soure
:param y: y-coordinate of point source
:param image: image (i.e. data - all models subtracted, except a single point source)
:param mask: mask of pixels in the image not to be considered in the PSF estimate (being replaced by kernel_init)
:param kernelsize: width in pixel of the kernel
:param kernel_init: initial guess of kernel (pixels that are masked are replaced by those values)
:return: estimate of the PSF based on the image and position of the point source
"""
# cutout the star
x_int = int(round(x))
y_int = int(round(y))
star_cutout = kernel_util.cutout_source(x_int, y_int, image, kernelsize + 2, shift=False)
# cutout the mask
mask_cutout = kernel_util.cutout_source(x_int, y_int, mask, kernelsize + 2, shift=False)
# enlarge the initial PSF kernel to the new cutout size
kernel_enlarged = np.zeros((kernelsize+2, kernelsize+2))
kernel_enlarged[1:-1, 1:-1] = kernel_init
# shift the initial kernel to the shift of the star
shift_x = x_int - x
shift_y = y_int - y
kernel_shifted = interp.shift(kernel_enlarged, [-shift_y, -shift_x], order=1)
# compute normalization of masked and unmasked region of the shifted kernel
# norm_masked = np.sum(kernel_shifted[mask_i == 0])
norm_unmasked = np.sum(kernel_shifted[mask_cutout == 1])
# normalize star within the unmasked region to the norm of the initial kernel of the same region
star_cutout /= np.sum(star_cutout[mask_cutout == 1]) * norm_unmasked
# replace mask with shifted initial kernel (+2 size)
star_cutout[mask_cutout == 0] = kernel_shifted[mask_cutout == 0]
star_cutout[star_cutout < 0] = 0
# de-shift kernel
kernel_deshifted = kernel_util.de_shift_kernel(star_cutout, shift_x, shift_y)
# re-size kernel
kernel_deshifted = image_util.cut_edges(kernel_deshifted, kernelsize)
# re-normalize kernel again
kernel_deshifted = kernel_util.kernel_norm(kernel_deshifted)
return kernel_deshifted
@staticmethod
def combine_psf(kernel_list_new, kernel_old, sigma_bkg, factor=1, stacking_option='median', symmetry=1):
"""
updates psf estimate based on old kernel and several new estimates
:param kernel_list_new: list of new PSF kernels estimated from the point sources in the image
:param kernel_old: old PSF kernel
:param sigma_bkg: estimated background noise in the image
:param factor: weight of updated estimate based on new and old estimate, factor=1 means new estimate,
factor=0 means old estimate
:param stacking_option: option of stacking, mean or median
:param symmetry: imposed symmetry of PSF estimate
:return: updated PSF estimate and error_map associated with it
"""
n = int(len(kernel_list_new) * symmetry)
angle = 360. / symmetry
kernelsize = len(kernel_old)
kernel_list = np.zeros((n, kernelsize, kernelsize))
i = 0
for kernel_new in kernel_list_new:
for k in range(symmetry):
kernel_rotated = image_util.rotateImage(kernel_new, angle * k)
kernel_norm = kernel_util.kernel_norm(kernel_rotated)
kernel_list[i, :, :] = kernel_norm
i += 1
kernel_old_rotated = np.zeros((symmetry, kernelsize, kernelsize))
for i in range(symmetry):
kernel_old_rotated[i, :, :] = kernel_old
kernel_list_new = np.append(kernel_list, kernel_old_rotated, axis=0)
if stacking_option == 'median':
kernel_new = np.median(kernel_list_new, axis=0)
elif stacking_option == 'mean':
kernel_new = np.mean(kernel_list_new, axis=0)
else:
raise ValueError(" stack_option must be 'median' or 'mean', %s is not supported." % stacking_option)
kernel_new[kernel_new < 0] = 0
kernel_new = kernel_util.kernel_norm(kernel_new)
kernel_return = factor * kernel_new + (1.-factor)* kernel_old
kernel_bkg = copy.deepcopy(kernel_return)
kernel_bkg[kernel_bkg < sigma_bkg] = sigma_bkg
error_map = np.var(kernel_list_new, axis=0) / kernel_bkg**2 / 2.
return kernel_return, error_map
@staticmethod
def mask_point_source(x_pos, y_pos, x_grid, y_grid, radius, i=0):
"""
:param x_pos: x-position of list of point sources
:param y_pos: y-position of list of point sources
:param x_grid: x-coordinates of grid
:param y_grid: y-coordinates of grid
:param i: index of point source not to mask out
:param radius: radius to mask out other point sources
:return: a mask of the size of the image with cutouts around the position
"""
mask = np.ones_like(x_grid)
for k in range(len(x_pos)):
if k != i:
mask_point = 1 - mask_util.mask_sphere(x_grid, y_grid, x_pos[k], y_pos[k], radius)
mask *= mask_point
return util.array2image(mask)
| StarcoderdataPython |
9722147 | import sys
from .finalproject2 import neuralnet
| StarcoderdataPython |
5058723 | <filename>merchant/migrations/0001_initial.py
# Generated by Django 3.2 on 2021-04-25 10:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Merchant',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=22)),
],
),
migrations.CreateModel(
name='MerchantInvoice',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('merchantId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='merchant.merchant')),
],
),
migrations.CreateModel(
name='ProductType',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('typeName', models.CharField(choices=[('Fragile', 'Fragile'), ('Liquid', 'Liquid')], max_length=11)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deliveryLocation', models.IntegerField(choices=[(0, 'Inside of Dhaka'), (1, 'Division of Dhaka'), (2, 'Outside of Dhaka')])),
('weight', models.FloatField()),
('charge', models.FloatField()),
('invoiceId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='merchant.merchantinvoice')),
('type', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='merchant.producttype')),
],
),
]
| StarcoderdataPython |
9661528 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: DeepSeaVectorDrawScene
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class RadialGradient(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = RadialGradient()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsRadialGradient(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# RadialGradient
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# RadialGradient
def Center(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = o + self._tab.Pos
from DeepSeaScene.Vector2f import Vector2f
obj = Vector2f()
obj.Init(self._tab.Bytes, x)
return obj
return None
# RadialGradient
def Radius(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# RadialGradient
def Focus(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = o + self._tab.Pos
from DeepSeaScene.Vector2f import Vector2f
obj = Vector2f()
obj.Init(self._tab.Bytes, x)
return obj
return None
# RadialGradient
def FocusRadius(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# RadialGradient
def Edge(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# RadialGradient
def CoordinateSpace(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# RadialGradient
def Transform(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
x = o + self._tab.Pos
from DeepSeaScene.Matrix33f import Matrix33f
obj = Matrix33f()
obj.Init(self._tab.Bytes, x)
return obj
return None
# RadialGradient
def Stops(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from DeepSeaVectorDrawScene.GradientStop import GradientStop
obj = GradientStop()
obj.Init(self._tab.Bytes, x)
return obj
return None
# RadialGradient
def StopsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.VectorLen(o)
return 0
# RadialGradient
def StopsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
return o == 0
def Start(builder): builder.StartObject(8)
def RadialGradientStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddCenter(builder, center): builder.PrependStructSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(center), 0)
def RadialGradientAddCenter(builder, center):
"""This method is deprecated. Please switch to AddCenter."""
return AddCenter(builder, center)
def AddRadius(builder, radius): builder.PrependFloat32Slot(1, radius, 0.0)
def RadialGradientAddRadius(builder, radius):
"""This method is deprecated. Please switch to AddRadius."""
return AddRadius(builder, radius)
def AddFocus(builder, focus): builder.PrependStructSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(focus), 0)
def RadialGradientAddFocus(builder, focus):
"""This method is deprecated. Please switch to AddFocus."""
return AddFocus(builder, focus)
def AddFocusRadius(builder, focusRadius): builder.PrependFloat32Slot(3, focusRadius, 0.0)
def RadialGradientAddFocusRadius(builder, focusRadius):
"""This method is deprecated. Please switch to AddFocusRadius."""
return AddFocusRadius(builder, focusRadius)
def AddEdge(builder, edge): builder.PrependUint8Slot(4, edge, 0)
def RadialGradientAddEdge(builder, edge):
"""This method is deprecated. Please switch to AddEdge."""
return AddEdge(builder, edge)
def AddCoordinateSpace(builder, coordinateSpace): builder.PrependUint8Slot(5, coordinateSpace, 0)
def RadialGradientAddCoordinateSpace(builder, coordinateSpace):
"""This method is deprecated. Please switch to AddCoordinateSpace."""
return AddCoordinateSpace(builder, coordinateSpace)
def AddTransform(builder, transform): builder.PrependStructSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(transform), 0)
def RadialGradientAddTransform(builder, transform):
"""This method is deprecated. Please switch to AddTransform."""
return AddTransform(builder, transform)
def AddStops(builder, stops): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(stops), 0)
def RadialGradientAddStops(builder, stops):
"""This method is deprecated. Please switch to AddStops."""
return AddStops(builder, stops)
def StartStopsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def RadialGradientStartStopsVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartStopsVector(builder, numElems)
def End(builder): return builder.EndObject()
def RadialGradientEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | StarcoderdataPython |
3333249 | from typing import List
class Solution:
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
# the idea is to iterate through the "intervals" list
# check if each "interval" overlaps with "newInterval"
# if overlapping, we update the "newInterval" and keep iterating the list
# if not overlappeing, then we will have 2 options:
# 1. "interval" is before "newInterval",
# then we add the "interval" to the result
# 2. "interval" is after "newInterval",
# then we can return "newInterval" + "intervals after newInterval"
res = []
for i, interval in enumerate(intervals):
a1, a2 = interval
b1, b2 = newInterval
if a2 < b1:
res.append(interval)
elif b2 < a1:
return res + [newInterval] + intervals[i:]
else:
newInterval = [min(a1,b1), max(a2, b2)]
# b2 < a1 never executed,
# our "newInterval" should be the last interval
res.append(newInterval)
return res | StarcoderdataPython |
9661127 | #!/usr/bin/env python3
'''
Created on 04.02.2020
@author: JM
'''
if __name__ == '__main__':
pass
import time
import PyTrinamic
from PyTrinamic.connections.ConnectionManager import ConnectionManager
from PyTrinamic.modules.TMCC160.TMCC_160 import TMCC_160
PyTrinamic.showInfo()
#connectionManager = ConnectionManager("--interface pcan_tmcl")
connectionManager = ConnectionManager("--interface kvaser_tmcl")
myInterface = connectionManager.connect()
module = TMCC_160(myInterface)
"""
Define all motor configurations for the TMCC160.
The configuration is based on our standard BLDC motor (QBL4208-61-04-013-1024-AT).
If you use a different motor be sure you have the right configuration setup otherwise the script may not work.
"""
" motor/module settings "
module.setMotorPoles(8)
module.setMaxTorque(2000)
module.showMotorConfiguration()
" hall configuration "
module.setHallInvert(0)
module.showHallConfiguration()
" encoder settings "
module.setOpenLoopTorque(1000)
module.setEncoderResolution(4096)
module.setEncoderDirection(0)
module.setEncoderInitMode(module.ENUMs.ENCODER_INIT_MODE_1)
module.showEncoderConfiguration()
" motion settings "
module.setMaxVelocity(4000)
module.setAcceleration(2000)
module.setRampEnabled(1)
module.setTargetReachedVelocity(500)
module.setTargetReachedDistance(5)
module.showMotionConfiguration()
" current PID values "
module.setTorquePParameter(500)
module.setTorqueIParameter(500)
" velocity PID values "
module.setVelocityPParameter(1000)
module.setVelocityIParameter(1000)
" position PID values "
module.setPositionPParameter(300)
module.showPIConfiguration()
" set commutation mode to FOC based on hall sensor signals "
module.setCommutationMode(module.ENUMs.COMM_MODE_FOC_ENCODER)
" set position counter to zero"
module.setActualPosition(0)
print("\nMotor move to first target position")
module.moveToPosition(40960)
while not module.positionReached():
print("target position: " + str(module.targetPosition()) + " actual position: " + str(module.actualPosition()))
time.sleep(0.1)
print("Motor reached first target position")
time.sleep(3)
print("\nMotor move to second target position")
module.moveToPosition(81920)
while not module.positionReached():
print("target position: " + str(module.targetPosition()) + " actual position: " + str(module.actualPosition()))
time.sleep(0.1)
print("Motor reached second target position")
time.sleep(3)
print("\nReturn to zero position")
module.moveToPosition(0)
while not module.positionReached():
print("target position: " + str(module.targetPosition()) + " actual position: " + str(module.actualPosition()))
time.sleep(0.1)
print("Motor reached zero position")
print("\nReady.")
myInterface.close()
| StarcoderdataPython |
202006 | import pandas as pd
from trectools import TrecQrel
class BaseLabelTransfer:
def keep_doc(self, doc_id):
return doc_id in self.ids_to_transfer
class CW12UrlLabelTransfer(BaseLabelTransfer):
def __init__(self, input_files):
df = load_input_files_to_dataframe(input_files)
df = df[(df['cw12Matches'] > 0)]
self.ids_to_transfer = df_to_ids_to_transfer(df)
self.name = 'cw12-url'
class CW12WaybackUrlLabelTransfer(BaseLabelTransfer):
def __init__(self, input_files):
df = load_input_files_to_dataframe(input_files)
df = df[(df['matches'] > 0) | (df['non200Matches'] > 0) | (df['cw12Matches'] > 0)]
self.ids_to_transfer = df_to_ids_to_transfer(df)
self.name = 'wayback-cw12-url'
def df_to_ids_to_transfer(df):
return {str(i) for i in df['document']}
def load_input_files_to_dataframe(input_files):
dfs = []
for input_file in input_files:
dfs += [pd.read_json(input_file, lines=True)]
return pd.concat(dfs)
def qrels_as_str(input_file, labels_to_keep=None):
return parse_qrels(input_file, labels_to_keep).qrels_data[["query", "q0", "docid", "rel"]].to_csv(
sep=" ", header=False, index=False)
def parse_qrels(input_file, labels_to_keep=None):
ret = TrecQrel(input_file)
if labels_to_keep is None:
return ret
else:
ret.filename = None
ret.qrels_data['tmp_delete_me'] = ret.qrels_data['docid'].apply(lambda i: labels_to_keep.keep_doc(i))
ret.qrels_data = ret.qrels_data[ret.qrels_data['tmp_delete_me']]
ret.qrels_data = ret.qrels_data.drop(['tmp_delete_me'], axis=1)
return ret
if __name__ == '__main__':
files = [
'/mnt/ceph/storage/data-in-progress/kibi9872/sigir2021/data-05-10-2020/web-2009.jsonl',
'/mnt/ceph/storage/data-in-progress/kibi9872/sigir2021/data-05-10-2020/web-2010.jsonl',
'/mnt/ceph/storage/data-in-progress/kibi9872/sigir2021/data-05-10-2020/web-2011.jsonl',
'/mnt/ceph/storage/data-in-progress/kibi9872/sigir2021/data-05-10-2020/web-2012.jsonl'
]
label_transfers = [
CW12UrlLabelTransfer(files),
CW12WaybackUrlLabelTransfer(files)
]
base_dir = 'data/'
for trec_run_file in ['qrels-web-2009', 'qrels-web-2010', 'qrels-web-2011', 'qrels-web-2012']:
for label_transfer in label_transfers:
run_file = base_dir + trec_run_file
print('Process ' + str(run_file))
tmp = qrels_as_str(run_file, label_transfer)
with open(run_file + '-' + label_transfer.name, 'w+') as f:
f.write(tmp)
| StarcoderdataPython |
210389 | # Wrapper module for _elementtree
from _elementtree import *
| StarcoderdataPython |
6495204 | __author__ = 'ferrard'
# ---------------------------------------------------------------
# Imports
# ---------------------------------------------------------------
import random
# ---------------------------------------------------------------
# Constants
# ---------------------------------------------------------------
UNREVEALED = '-----'
# ---------------------------------------------------------------
# Interface
# ---------------------------------------------------------------
def play_monty_hall(doors, real_player=False):
# create instance
items = ['goat']*doors
car_index = random.randint(0, doors - 1)
items[car_index] = 'car'
# get the guess of the user
print("You have {} options:".format(doors))
for i in range(doors):
print("\t{}: {}".format(i, UNREVEALED))
print()
print("Where is the car?\n")
if real_player:
guess_index = int(input())
while guess_index not in list(range(doors)):
guess_index = int(input("Hey, give me number from {} to {}!".format(0, doors - 1)))
else:
guess_index = random.randint(0, doors - 1)
print(guess_index)
# select the door that would stay closed - a random index (except for guess our guess) if we guess the car, the car
# index otherwise
if car_index == guess_index:
indices = list(range(doors))
indices.remove(guess_index)
closed_index = random.choice(indices)
else:
closed_index = car_index
# reveal the doors
print("So you think it's at {}! All right... Let's reveal som doors!".format(guess_index))
for i in range(doors):
print("\t{}: {}".format(i, UNREVEALED if i in [guess_index, closed_index] else items[i]), end="")
if i == guess_index:
print(" (your choice)")
else:
print()
# ask the user if he wants to switch
print("Do you want to switch? Yes, or no?\n")
if real_player:
switch_answer = input().lower()
while switch_answer not in ["yes", "no"]:
switch_answer = input("I said \"yes\" or \"no\"!").lower()
else:
switch_answer = random.choice(["yes", "no"])
print(switch_answer)
switched = switch_answer == "yes"
print()
# if he wants, switch the guess to the closed door
if switched:
guess_index = closed_index
# report results
print("So you say {}. Let's see the results!!".format(guess_index))
for i in range(doors):
print("\t{}: {}".format(i, items[i]), end="")
if i == guess_index:
print(" (your choice)")
else:
print()
if guess_index == car_index:
print("CONGRATS!!! You have a car!!!")
elif switched:
print("Bad luck, though I still think a switch was a good choice!!!")
else:
print("Sorry, perhaps switch next time? ;-)")
return guess_index == car_index, switched
# ---------------------------------------------------------------
# Main
# ---------------------------------------------------------------
def main(argv):
iterations = int(argv[0])
doors = int(argv[1])
switched2stats = {
True: [0, 0],
False: [0, 0],
}
for i in range(iterations):
print('-'*100)
print("Playing Monty hall ({})".format(i))
print()
won, switched = play_monty_hall(doors)
switched2stats[switched][1] += 1
if won:
switched2stats[switched][0] += 1
print()
print("="*100)
print("Let's sum it up!")
print("\tWhen switched: {}/{} ({} %%)".format(switched2stats[True][0], switched2stats[True][1],
int(100*switched2stats[True][0]/switched2stats[True][1])))
print("\tWhen stayed: {}/{} ({} %%)".format(switched2stats[False][0], switched2stats[False][1],
int(100*switched2stats[False][0]/switched2stats[False][1])))
if __name__ == '__main__':
import sys
main(sys.argv[1:])
# main([1000, 3]) | StarcoderdataPython |
1986819 | import asyncio
import http.client
import os
import re
import unicodedata
from io import BytesIO
from math import floor
from pathlib import Path
from typing import Literal, Optional
from urllib import parse
import aiohttp
import dateutil.parser
import discord
import httpx
import pyppeteer
import pyppeteer.errors
import unidecode
import youtube_dl
from discord.ext import commands
from jishaku.codeblocks import codeblock_converter
from .. import PATH
from ..bot import Menel
from ..resources import filesizes
from ..resources.languages import LANGUAGES
from ..utils import embeds, imgur, markdown
from ..utils.checks import has_attachments
from ..utils.context import Context
from ..utils.converters import URL, LanguageConverter
from ..utils.errors import SendError
from ..utils.misc import get_image_url_from_message_or_reply
from ..utils.text_tools import escape, escape_str, limit_length, plural
AUTO = "auto"
class YouTubeDownloader:
def __init__(self, *, only_audio: bool = False):
self.status = {}
self.OPTIONS = {
"format": "best",
"outtmpl": str(PATH / "temp" / (os.urandom(16).hex() + ".%(ext)s")),
"merge_output_format": "mp4",
"default_search": "auto",
"progress_hooks": [self._hook],
"max_downloads": 1,
"ignore_config": True,
"no_playlist": True,
"no_mark_watched": True,
"geo_bypass": True,
"no_color": True,
"abort_on_error": True,
"abort_on_unavailable_fragment": True,
"no_overwrites": True,
"no_continue": True,
"quiet": True,
}
if only_audio:
self.OPTIONS.update(format="bestaudio/best", extract_audio=True)
self.ydl = youtube_dl.YoutubeDL(self.OPTIONS)
async def download(self, video: str) -> None:
self.status.clear()
await asyncio.to_thread(self.ydl.extract_info, video)
async def extract_info(self, video: str) -> dict:
return await asyncio.to_thread(self.ydl.extract_info, video, download=False)
def _hook(self, info: dict) -> None:
self.status = info
async def progress_message(self, m: Context):
msg = await m.send("Downloading…")
for _ in range(20):
if self.status:
break
await asyncio.sleep(0.5)
while self.status and self.status["status"] == "downloading":
ratio = self.status["downloaded_bytes"] / self.status["total_bytes"]
progress = ("\N{FULL BLOCK}" * floor(ratio * 20)).ljust(20, "\N{LIGHT SHADE}")
await msg.edit(
content=f"{progress} {ratio:.1%} "
f"{self.status['_speed_str'].strip()} Pozostało {self.status['_eta_str'].strip()}"
)
await asyncio.sleep(1.5)
await msg.delete()
class Utilities(commands.Cog):
@commands.command(aliases=["trans", "tr"])
@commands.cooldown(2, 5, commands.BucketType.user)
async def translate(
self,
ctx: Context,
lang1: LanguageConverter = "en",
lang2: Optional[LanguageConverter] = None,
*,
text: str = None,
):
"""
Tłumaczy teskt Tłumaczem Google
`lang1`: język docelowy, lub źródłowy jeśli podany jest argument `lang2`
`lang2`: język docelowy jeśli podany jest argument `lang1`
`text`: tekst do przetłumaczenia
"""
if lang2 is not None:
src = lang1
dest = lang2
else:
src = AUTO
dest = lang1
if text is None and (ref := ctx.message.reference):
msg = ref.resolved or await ctx.bot.fetch_message(ref.channel_id, ref.message_id)
text = msg.content
if text is None:
raise SendError("Podaj tekst do przetłumaczenia lub odpowiedz na wiadomość")
async with ctx.typing():
r = await ctx.bot.client.get(
"https://translate.googleapis.com/translate_a/single",
params={
"sl": src, # source language
"tl": dest, # translation language
"q": text, # query
"client": "gtx", # Google Translate Extension
"dj": 1, # what?
"dt": "t", # ok.
},
)
json = r.json()
if "sentences" not in json:
await ctx.error("Tłumacz Google nie zwrócił tłumaczenia")
return
if src == AUTO:
src = json.get("src", AUTO)
embed = embeds.with_author(ctx.author)
embed.title = LANGUAGES.get(src, src).title() + " ➜ " + LANGUAGES.get(dest, dest).title()
embed.description = limit_length(
escape(" ".join(s["trans"] for s in json["sentences"])), max_length=4096, max_lines=32
)
await ctx.send(embed=embed)
@commands.command(aliases=["urban-dictionary", "urban", "ud"])
async def urbandictionary(self, ctx: Context, *, query: str):
"""Wyszukuje podaną frazę w słowniku Urban Dictionary"""
async with ctx.typing():
r = await ctx.client.head(
"https://www.urbandictionary.com/define.php", params={"term": query}, allow_redirects=False
)
if r.status_code == 302:
url = httpx.URL(r.headers["Location"])
query = url.params["term"]
elif r.status_code != 200:
await ctx.error("Nie znalazłem tej frazy w Urban Dictionary.")
return
r = await ctx.client.get("https://api.urbandictionary.com/v0/define", params={"term": query})
json = r.json()
if "error" in json:
await ctx.error(f'Urban Dictionary zwróciło błąd:\n{json["error"]}')
return
data = json["list"][0]
def remove_brackets(text: str) -> str:
return re.sub(r"\[(?P<word>.*?)]", r"\g<word>", text, re.DOTALL)
embed = discord.Embed(
title=limit_length(data["word"], max_length=256),
url=data["permalink"],
description=escape(limit_length(remove_brackets(data["definition"]), max_length=2048, max_lines=16)),
color=discord.Color.green(),
)
if data["example"]:
embed.add_field(
name="Example",
value=limit_length(escape(remove_brackets(data["example"])), max_length=1024, max_lines=16),
inline=False,
)
embed.set_footer(text=f"Author: {data['author']}\n👍 {data['thumbs_up']} 👎 {data['thumbs_down']}")
embed.timestamp = dateutil.parser.parse(data["written_on"])
await ctx.send(embed=embed)
@commands.command(aliases=["m", "calculate", "calculator", "calc", "kalkulator"])
async def math(self, ctx: Context, *, expression: str):
"""Kalkulator <NAME>"""
async with ctx.channel.typing():
if re.sub(r"\s+", "", expression) == "2+2":
await asyncio.sleep(0.5)
await ctx.send("5")
return
r = await ctx.client.post("https://api.mathjs.org/v4/", json={"expr": expression})
json = r.json()
if json["error"]:
await ctx.error(escape(limit_length(json["error"], max_length=1024, max_lines=4)))
return
await ctx.send(escape(limit_length(json["result"], max_length=2048, max_lines=16)))
@commands.command()
@commands.cooldown(2, 5, commands.BucketType.user)
async def eval(self, ctx: Context, *, code: codeblock_converter):
"""Bezpiecznie wykonuje podany kod w wybranym języku"""
language, code = code
if not language:
await ctx.error("Umieść kod w bloku:\n\\`\\`\\`język\nkod\n\\`\\`\\`")
return
if not code.strip():
await ctx.error("Podaj kod do wykonania.")
return
async with ctx.channel.typing():
async with aiohttp.request(
"POST", "https://emkc.org/api/v1/piston/execute", json={"language": language, "source": code}
) as r:
json = await r.json()
if r.status != 200:
await ctx.error(json.get("message", "Nieznany błąd."))
return
output = [
markdown.codeblock(limit_length(json[out], max_length=512, max_lines=16))
for out in ("stdout", "stderr")
if json[out].strip()
]
embed = discord.Embed(
description=("\n".join(output) if output else "Twój kod nic nie wypisał.")
+ f'\n{json["language"]} {json["version"]}\n'
f"Powered by [Piston](https://github.com/engineer-man/piston)",
color=discord.Color.green() if not json["stderr"].strip() else discord.Color.red(),
)
await ctx.send(embed=embed)
@commands.command(aliases=["charinfo", "utf", "utf8", "utf-8", "u"])
async def unicode(self, ctx: Context, *, chars: str):
"""Pokazuje nazwy znaków standardu Unicode"""
output = []
for c in chars[:16]:
if c == "\u0020": # space
output.append("")
continue
info = f"{escape_str(c)} \N{EM DASH} U+{ord(c):0>4X}"
try:
info += f" \N{EM DASH} {unicodedata.name(c)}"
except ValueError:
pass
output.append(info)
if len(chars) > 16:
output.append("...")
await ctx.send(markdown.codeblock("\n".join(output)))
@commands.command()
async def unidecode(self, ctx: Context, *, text: str):
"""Zamienia znaki Unicode na ASCII używając [unidecode](https://github.com/avian2/unidecode)"""
await ctx.send(escape(limit_length(unidecode.unidecode(text), max_length=1024, max_lines=16), markdown=False))
@commands.command(aliases=["mc", "skin"])
@commands.cooldown(3, 10, commands.BucketType.user)
async def minecraft(self, ctx: Context, *, player: str):
"""Wysyła skin konta Minecraft Java Edition"""
async with ctx.channel.typing():
r = await ctx.client.get(f"https://api.mojang.com/users/profiles/minecraft/{parse.quote(player)}")
if r.status_code == 204:
await ctx.error("Nie znalazłem gracza o tym nicku.")
return
json = r.json()
uuid = json["id"]
requests = [
(f"https://api.mojang.com/user/profiles/{uuid}/names", None),
(f"https://crafatar.com/avatars/{uuid}", {"size": "256", "overlay": None}),
(f"https://crafatar.com/renders/head/{uuid}", {"scale": "6", "overlay": None}),
(f"https://crafatar.com/renders/body/{uuid}", {"scale": "10", "overlay": None}),
]
responses = await asyncio.gather(*(ctx.client.get(url, params=params) for (url, params) in requests))
name_history = responses[0].json()
avatar, head, body = (r.read() for r in responses[1:])
name_history = ", ".join(escape(name["name"]) for name in name_history)
avatar = discord.File(BytesIO(avatar), "avatar.png")
head = discord.File(BytesIO(head), "head.png")
body = discord.File(BytesIO(body), "body.png")
embed = discord.Embed(
description=f"Historia nazw: {name_history}\nUUID: `{uuid}`", color=discord.Color.green()
)
embed.set_author(name=json["name"], icon_url="attachment://head.png")
embed.set_thumbnail(url="attachment://avatar.png")
embed.set_image(url="attachment://body.png")
await ctx.send(embed=embed, files=[avatar, head, body])
@commands.command(aliases=["webshot"])
@commands.cooldown(2, 20, commands.BucketType.user)
@commands.max_concurrency(3, wait=True)
async def webimg(self, ctx: Context, fullpage: Optional[Literal["fullpage", "full"]], *, url: URL):
"""<NAME>ysyła zrzut ekranu strony internetowej"""
async with ctx.typing():
try:
browser = await pyppeteer.launch(
ignoreHTTPSErrors=True, headless=True, args=["--no-sandbox", "--disable-dev-shm-usage"]
)
except http.client.BadStatusLine:
await ctx.error("Nie udało się otworzyć przeglądarki. Spróbuj ponownie.")
return
page = await browser.newPage()
await page.setViewport(
{"width": 2048, "height": 1024, "deviceScaleFactor": 1 if fullpage is not None else 2}
)
try:
await page.goto(url, timeout=30000)
except TimeoutError:
await ctx.error("Minął czas na wczytanie strony.")
except (pyppeteer.errors.PageError, pyppeteer.errors.NetworkError):
await ctx.error("Nie udało się wczytać strony. Sprawdź czy podany adres jest poprawny.")
else:
await asyncio.sleep(2)
try:
screenshot: bytes = await page.screenshot(type="png", fullPage=fullpage is not None, encoding="binary") # type: ignore
except pyppeteer.errors.NetworkError as e:
await ctx.error(str(e))
else:
embed = embeds.with_author(ctx.author)
image = await imgur.upload_image(screenshot)
embed.description = f"Zdjęcie strony: {image}"
if ctx.channel.nsfw:
embed.set_image(url=image)
else:
embed.set_footer(text="Podgląd dostępny jest wyłącznie na kanałach NSFW")
await ctx.send(embed=embed)
finally:
await browser.close()
@commands.command(aliases=["sauce", "souce", "sn"])
@commands.is_nsfw()
@commands.cooldown(3, 20, commands.BucketType.user)
@commands.cooldown(6, 30) # API rate limit
async def saucenao(self, ctx: Context, *, art_url: URL = None):
"""Znajduje źródło obrazka używając saucenao.com API"""
url = art_url or await get_image_url_from_message_or_reply(ctx)
if url is None:
raise SendError("Podaj URL obrazka, załącz plik lub odpowiedz na wiadomość z załącznikiem")
async with ctx.typing():
r = await ctx.client.get(
"https://saucenao.com/search.php",
params={"url": url, "output_type": 2, "numres": 8, "api_key": os.environ["SAUCENAO_KEY"]},
)
json = r.json()
header = json["header"]
if header["status"] != 0:
raise SendError(f'{header["status"]}: {header["message"]}')
minimum_similarity: float = header["minimum_similarity"]
texts = []
for result in json["results"]:
header = result["header"]
data = result["data"]
similarity = float(header["similarity"])
if similarity < minimum_similarity:
continue
if "ext_urls" not in data:
continue
text = [f'**{similarity / 100:.0%}** {escape(header["index_name"])}']
text.extend(data["ext_urls"])
if "source" in data:
text.append(f'Source: {data["source"]}')
texts.append("\n".join(text))
if not texts:
raise SendError("Nie znaleziono źródła podanego obrazka")
await ctx.send(
embed=embeds.with_author(ctx.author, description="\n\n".join(texts)).set_footer(
text="Powered by saucenao.com"
)
)
@commands.command("unshorten-url", aliases=["unshorten", "unshort"])
async def unshorten_url(self, ctx: Context, *, url: URL):
"""Pokazuje przekierowania skróconego linku"""
urls = []
shortened = False
async with ctx.typing():
while True:
r = await ctx.client.head(url, allow_redirects=False)
urls.append(str(r.url))
if "Location" not in r.headers:
break
url = r.headers["Location"]
if len(urls) >= 16 or url in urls:
shortened = True
break
if len(urls) <= 1:
await ctx.error("Ten link nie jest skrócony")
return
if not shortened:
*urls, last = urls
else:
last = None
text = [markdown.code(limit_length(url, max_length=64)) for url in urls]
text.append(limit_length(last, max_length=512) if not shortened else "…")
await ctx.embed("\n".join(text))
@commands.command(aliases=["rtfm"])
@commands.cooldown(3, 10, commands.BucketType.user)
@commands.cooldown(3, 5) # API rate limit
async def docs(self, ctx: Context, *, query: str):
"""Przeszukuje dokumentację biblioteki discord.py (gałęzi master)"""
r = await ctx.client.get(
"https://idevision.net/api/public/rtfm",
params={
"show-labels": True,
"label-labels": False,
"location": "https://discordpy.readthedocs.io/en/master/",
"query": query,
},
)
json = r.json()
nodes = json["nodes"]
if not nodes:
await ctx.error("Nie znaleziono żadnych pasujących wyników")
return
text = [f"[{markdown.code(name)}]({url})" for name, url in nodes.items()]
embed = embeds.with_author(
ctx.author,
title=plural(len(nodes), "wynik", "wyniki", "wyników"),
description="\n".join(text),
color=discord.Color.green(),
)
embed.set_footer(text=f"Czas wyszukiwania: {float(json['query_time']) * 1000:.0f} ms")
await ctx.send(embed=embed)
@commands.command("youtube-dl", aliases=["youtubedl", "yt-dl", "ytdl", "download", "dl"])
@commands.cooldown(2, 20, commands.BucketType.user)
@commands.max_concurrency(2)
async def youtube_dl(self, ctx: Context, audio: Optional[Literal["audio"]], *, video: str):
"""
Pobiera film ze strony
`audio`: pobiera jedynie dźwięk filmu
`video`: link do strony z filmem
"""
await ctx.channel.trigger_typing()
downloader = YouTubeDownloader(only_audio=audio is not None)
progress_message = None
try:
info = await downloader.extract_info(video)
if "_type" in info and info["_type"] == "playlist":
info = info["entries"][0]
duration = info.get("duration")
filesize = info.get("filesize")
if not duration and not filesize:
await ctx.error("Nieznana długość i rozmiar filmu")
return
if duration and duration > 60 * 30:
await ctx.error("Maksymalna długość filmu to 30 minut")
return
if filesize and filesize > 200 * filesizes.MiB:
await ctx.error("Maksymalny rozmiar filmu to 100 MiB")
return
progress_message = asyncio.create_task(downloader.progress_message(ctx))
await downloader.download(info["webpage_url"])
except youtube_dl.utils.YoutubeDLError as e:
if progress_message:
progress_message.cancel()
await ctx.error(escape(limit_length("\n".join(e.args), max_length=1024, max_lines=16)))
return
path = Path(downloader.status["filename"])
try:
async with ctx.channel.typing():
with open(path, "rb") as f:
video = await imgur.upload_video(f.read())
finally:
path.unlink(missing_ok=True)
await ctx.send(video)
@commands.command("imgur")
@has_attachments(allowed_types=("image/",))
@commands.cooldown(2, 10, commands.BucketType.user)
async def _imgur(self, ctx: Context):
"""Przesyła załączone zdjęcia na Imgur"""
async with ctx.typing():
images = [await imgur.upload_image(await a.read()) for a in ctx.message.attachments]
await ctx.send("\n".join(f"<{image}>" for image in images))
def setup(bot: Menel):
bot.add_cog(Utilities())
| StarcoderdataPython |
9719415 | # The MIT License (MIT)
# Copyright (c) 2018 Massachusetts Institute of Technology
#
# Author: <NAME>
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
from skimage.filters import threshold_li
def wrap(x, to_2pi = False):
'''
Wrap a float or an array
@param x: The float or array
@param to_2pi: If True, wrap to [0, 2pi) instead of [-pi, pi]
@return The wrapped array (in radian between -pi and pi)
'''
if to_2pi == True:
return np.mod(x, 2.*np.pi)
return np.mod(x + np.pi, 2.*np.pi) - np.pi
def compute_los_vector(rad_incidence_angle, rad_los_azimuth):
'''
Compute the line-of-sight vector in Cartesian coordinates from spherical
coordinates, considering that the vector is from the ground to the satellite
@param rad_incidence_angle: The incidence angle of the satellite in radian
@param rad_los_azimuth: The azimuth of the satellite direction in radian
@return The line-of-sight vector in Cartesian coordinates
'''
los_vector = np.empty([3] + list(rad_incidence_angle.shape))
los_vector[0] = np.sin(rad_incidence_angle)*np.sin(rad_los_azimuth)
los_vector[1] = np.sin(rad_incidence_angle)*np.cos(rad_los_azimuth)
los_vector[2] = np.cos(rad_incidence_angle)
return los_vector
def crop_array_from_center(array, crop_shape):
'''
Crop an array along its borders
@param array: The array
@param crop_shape: The number of cells to remove along the y and x axes
@return The cropped array
'''
slices = []
for i in range(len(crop_shape)):
start = array.shape[i]//2 - crop_shape[i]//2
end = start + crop_shape[i]
slices.append(slice(start, end))
return array[slices]
def mask_deformation(deformation, threshold_function = threshold_li):
'''
Mask image using a threshold function
@param deformation: Deformation to mask
@param threshold_function: Function to calculate the threshold value
@return Masked image
'''
mask = np.zeros_like(deformation, dtype=np.bool)
for i in range(deformation.shape[0]):
thresh = threshold_function(np.abs(deformation[i,:,:]))
mask[i, np.abs(deformation[i,:,:]) < thresh] = True
mask = np.all(mask, axis=0)
deformation_masked = deformation.copy()
deformation_masked[:,mask] = np.nan
return deformation_masked
def calc_bounding_box(image, threshold_function = threshold_li):
'''
Calcluate the bounding box around an image using the li threshold
@param image: Input image
@param threshold_function: Threshold function to use
@return Extents of a bounding box around the contents in the image (x_min, x_max, y_min, y_max)
'''
thresh = threshold_function(image)
thresh_image = np.where(image < thresh, 0, 1)
return retrieve_bounds(thresh_image)
def retrieve_bounds(thresh_image):
"""
Retrieve the bounds of an image that has been thesholded
@param thresh_image: Image filled with ones for valid and zeros for invalid
@return: Extents of a rectangle around valid data (x_start, x_end, y_start, y_end)
"""
column_maximums = np.max(thresh_image, axis=0)
row_maximums = np.max(thresh_image, axis=1)
x_start = np.argmax(column_maximums)
x_end = len(column_maximums) - np.argmax(column_maximums[::-1])
y_start = np.argmax(row_maximums)
y_end = len(row_maximums) - np.argmax(row_maximums[::-1])
return x_start, x_end, y_start, y_end
def crop_nans(image):
"""
Shrink image by removing nans
@param image: Input image
@return: Image cropped around valid data
"""
thresh_image = ~np.isnan(image)
x_start, x_end, y_start, y_end = retrieve_bounds(thresh_image)
return image[y_start:y_end, x_start:x_end]
def determine_deformation_bounding_box(deformations, largest_box=True, **kwargs):
'''
Calculate the extent of the deformation in image coordinates
@param deformations: Input deformations
@param largest_box: Choose a bounding max that encomposses all selected values in all dimensions
@param kwargs: Any additional keyword arguments passed to calc_bounding_box
@return Extents deformations (x_min, x_max, y_min, y_max)
'''
bounds = np.stack([calc_bounding_box(np.abs(deformations[i,:,:]), **kwargs) for i in range(3)])
if largest_box:
return np.min(bounds[:,0]), np.max(bounds[:,1]), np.min(bounds[:,2]), np.max(bounds[:,3])
else:
return np.max(bounds[:,0]), np.min(bounds[:,1]), np.max(bounds[:,2]), np.min(bounds[:,3])
def determine_x_y_bounds(deformations, x_array, y_array, offset=5000, **kwargs):
'''
Determine the x and y coordinates of the extent of the deformation
@param deformations: Input deformations
@param x_array: x coordinates
@param y_array: y coordinatse
@param offset: Size to extend the extents of the box
@param kwargs: Any additional keyword arguments passed to determine_deformation_bounding_box
@return Extents of the deformation plus the offset (x_min, x_max, y_min, y_max)
'''
bounding_box = determine_deformation_bounding_box(deformations, **kwargs)
x_start, x_end = x_array[bounding_box[2:], bounding_box[:2]]
y_start, y_end = y_array[bounding_box[2:], bounding_box[:2]]
if y_start > y_end:
tmp = y_start
y_start = y_end
y_end = tmp
return x_start - offset, x_end + offset, y_start - offset, y_end + offset
def generate_interferogram_from_deformation(track_angle,
min_ground_range_1,
height_1,
is_right_looking,
wavelength,
k,
deformation,
xx, yy,
projected_topography=None,
min_ground_range_2 = None,
height_2 = None):
'''
Generate an interferogram from deformations
@param track_angle: Satellite track angle
@param min_ground_range_1: Minimum ground range to deformations for first pass
@param height_1: Height of satellite for first pass
@param is_right_looking: The satellite is looking to the right
@param wavelength: Wavelength of the signal
@param k: number of passes (1 or 2)
@param deformation: map of deformation
@param xx: x coordinates of deformation
@param yy: y coordinates of deformation
@param projected_topography: Elevation data
@param min_ground_range_2: Minimum ground range to deformations for second pass
@param height_2: Height of satellite for second pass
@return Inteferogram due to the deformations
'''
rad_track_angle = track_angle
cross_track_distance = xx * np.cos(rad_track_angle) - yy * np.sin(rad_track_angle)
if is_right_looking:
phi = 2 * np.pi - track_angle
cross_track_distance *= -1.
else:
phi = np.pi - track_angle
cross_track_deformation = deformation[0,:,:].astype(np.float64) * np.cos(phi) + deformation[1,:,:].astype(np.float64) * np.sin(phi)
if height_2 is None:
height_2 = height_1
if min_ground_range_2 is None:
min_ground_range_2 = min_ground_range_1
if projected_topography is not None:
corrected_height_1 = height_1 - projected_topography
corrected_height_2 = height_2 - projected_topography
else:
corrected_height_1 = height_1
corrected_height_2 = height_2
corrected_height_2 -= deformation[2,:,:].astype(np.float64)
cross_track_distance -= cross_track_distance.min()
ground_range_1 = cross_track_distance + min_ground_range_1
ground_range_2 = cross_track_distance + min_ground_range_2 + cross_track_deformation
slant_range_1 = np.sqrt(corrected_height_1**2 + ground_range_1**2)
slant_range_2 = np.sqrt(corrected_height_2**2 + ground_range_2**2)
phase = change_in_range_to_phase(slant_range_2 - slant_range_1, wavelength)
return phase
def old_generate_interferogram_from_deformation(track_angle,
min_ground_range,
height,
is_right_looking,
wavelength,
k,
deformation,
xx, yy,
projected_topography=None):
'''
Generate an interferogram from deformations
@param track_angle: Satellite track angle
@param min_ground_range: Minimum ground range to deformations
@param height: Height of satellite
@param is_right_looking: The satellite is looking to the right
@param wavelength: Wavelength of the signal
@param k: number of passes (1 or 2)
@param deformation: map of deformation
@param xx: x coordinates of deformation
@param yy: y coordinates of deformation
@param projected_topography: Elevation data
@return Inteferogram due to the deformations
'''
rad_track_angle = track_angle
cross_track_distance = xx * np.cos(rad_track_angle) - yy * np.sin(rad_track_angle)
if is_right_looking:
phi = 2 * np.pi - track_angle
cross_track_distance *= -1.
else:
phi = np.pi - track_angle
if projected_topography is not None:
heights = height - projected_topography
else:
heights = height
cross_track_distance -= cross_track_distance.min()
ground_range = cross_track_distance + min_ground_range
rad_look_angle = np.arctan2(ground_range, heights)
theta = np.pi - rad_look_angle
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
look_vectors = np.stack([x, y, z])
los_deformation = np.sum(look_vectors * deformation, axis=0)
phase = 2. * np.pi * k * los_deformation / wavelength
return phase
def change_in_range_to_phase(los_deformation, wavelength, k=2):
'''
Compute phase from change in range
@param los_deformation: Change in distance along line of site
@param wavelength: Wavelength of radar
@param k: Number of passes
@return phase due to change in
'''
return -2. * np.pi * k * los_deformation / wavelength
def phase_to_change_in_range(phase, wavelength, k=2):
'''
Compute change in range from phase
@param phase: Input phase
@param wavelength: Wavelength of radar
@param k: Number of passes
@return Change in range
'''
return -phase * wavelength / (2 * np.pi * k)
| StarcoderdataPython |
12840686 | <reponame>jmzhao/CS704-asn3<filename>pdr/__init__.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
@author: jmzhao
"""
from pdr import *
import test | StarcoderdataPython |
3440789 | <gh_stars>0
def peakfinder(data, time, xbin):
peaks = []
for i in range(len(time)-1):
if data[i]>data[i+1] and data[i]>data[i-1] and data[i]>data[i+xbin] and data[i]>data[i-xbin]:
peaks.append(data[i])
indx = []
for i in range(len(data)):
if data[i] in peaks:
indx.append(i)
return peaks, indx
'''
This simple peakfinder algorithm works on sufficiently smoothed data, although
parameters can be altered to make it work for different data sets.
The rather cumbersome "if" condition finds all local maxima, and then filters
for maxima within the set bin, which can be chosen by eyeballing data to figure out
which peaks are statistically significant and estimating the necessary width.
Returns: peaks: peak values, indx: peak indices
''' | StarcoderdataPython |
1797615 | from fastapi import HTTPException
class RequestValidationError(HTTPException):
def __init__(self, loc, msg, typ):
super().__init__(422, [{'loc': loc, 'msg': msg, 'type': typ}]) | StarcoderdataPython |
5166624 | # static analysis: ignore
from .test_node_visitor import skip_before
from .test_name_check_visitor import TestNameCheckVisitorBase
class TestPatma(TestNameCheckVisitorBase):
@skip_before((3, 10))
def test_singletons(self):
self.assert_passes(
"""
from typing import Literal
def capybara(x: Literal[True, False, None]):
match x:
case True:
assert_is_value(x, KnownValue(True))
case _:
assert_is_value(x, KnownValue(False) | KnownValue(None))
"""
)
@skip_before((3, 10))
def test_value(self):
self.assert_passes(
"""
from typing import Literal
from pyanalyze.tests import assert_never
def capybara(x: int):
match x:
case None: # E: impossible_pattern
assert_never(x)
case "x": # E: impossible_pattern
assert_is_value(x, NO_RETURN_VALUE)
assert_never(x)
case 3:
assert_is_value(x, KnownValue(3))
case _ if x == 4:
assert_is_value(x, KnownValue(4))
case _:
assert_is_value(x, TypedValue(int))
"""
)
@skip_before((3, 10))
def test_sequence(self):
self.assert_passes(
"""
import collections.abc
from typing import Tuple
def capybara(seq: Tuple[int, ...], obj: object):
match seq:
case [1, 2, 3]:
assert_is_value(
seq,
SequenceIncompleteValue(
tuple,
[TypedValue(int), TypedValue(int), TypedValue(int)]
)
)
case [1, *x]:
assert_is_value(x, GenericValue(list, [TypedValue(int)]))
match obj:
case [*x]:
assert_is_value(
obj,
TypedValue(collections.abc.Sequence),
skip_annotated=True
)
assert_is_value(
x,
GenericValue(list, [AnyValue(AnySource.generic_argument)])
)
assert_is_value(seq[0], TypedValue(int))
match seq[0]:
case [1, 2, 3]: # E: impossible_pattern
pass
"""
)
@skip_before((3, 10))
def test_or(self):
self.assert_passes(
"""
import collections.abc
from typing import Tuple
def capybara(obj: object):
match obj:
case 1 | 2:
assert_is_value(obj, KnownValue(1) | KnownValue(2))
case (3 as x) | (4 as x):
assert_is_value(x, KnownValue(3) | KnownValue(4))
"""
)
@skip_before((3, 10))
def test_mapping(self):
self.assert_passes(
"""
import collections.abc
from typing import Tuple
def capybara(obj: object):
match {1: 2, 3: 4, 5: 6}:
case {1: x}:
assert_is_value(x, KnownValue(2))
case {3: 4, **x}:
assert_is_value(x, DictIncompleteValue(
dict, [
KVPair(KnownValue(1), KnownValue(2)),
KVPair(KnownValue(5), KnownValue(6)),
]
))
"""
)
@skip_before((3, 10))
def test_class_pattern(self):
self.assert_passes(
"""
import collections.abc
from typing import Tuple
class NotMatchable:
x: str
class MatchArgs:
__match_args__ = ("x", "y")
x: str
y: int
def capybara(obj: object):
match obj:
case int(1, 2): # E: bad_match
pass
case int(2):
assert_is_value(obj, KnownValue(2))
case int("x"): # E: impossible_pattern
pass
case int():
assert_is_value(obj, TypedValue(int))
case NotMatchable(x="x"):
pass
case NotMatchable("x"): # E: bad_match
pass
case NotMatchable():
pass
case MatchArgs("x", 1 as y):
assert_is_value(y, KnownValue(1))
case MatchArgs(x) if x == "x":
assert_is_value(x, KnownValue("x"))
case MatchArgs(x):
assert_is_value(x, TypedValue(str))
case MatchArgs("x", x="x"): # E: bad_match
pass
case MatchArgs(1, 2, 3): # E: bad_match
pass
"""
)
@skip_before((3, 10))
def test_bool_narrowing(self):
self.assert_passes(
"""
class X:
true = True
def capybara(b: bool):
match b:
# Make sure we hit the MatchValue case, not MatchSingleton
case X.true:
assert_is_value(b, KnownValue(True))
case _ as b2:
assert_is_value(b, KnownValue(False))
assert_is_value(b2, KnownValue(False))
"""
)
self.assert_passes(
"""
def capybara(b: bool):
match b:
case True:
assert_is_value(b, KnownValue(True))
case _ as b2:
assert_is_value(b, KnownValue(False))
assert_is_value(b2, KnownValue(False))
"""
)
@skip_before((3, 10))
def test_enum_narrowing(self):
self.assert_passes(
"""
from enum import Enum
class Planet(Enum):
mercury = 1
venus = 2
earth = 3
def capybara(p: Planet):
match p:
case Planet.mercury:
assert_is_value(p, KnownValue(Planet.mercury))
case Planet.venus:
assert_is_value(p, KnownValue(Planet.venus))
case _ as p2:
assert_is_value(p2, KnownValue(Planet.earth))
assert_is_value(p, KnownValue(Planet.earth))
"""
)
| StarcoderdataPython |
6418976 | <gh_stars>0
from typing import Any, Union, Dict, Mapping
from pathlib import Path
from ruamel.yaml import YAML
import pickle
import h5py
import numpy as np
from numbers import Number
PathLike = Union[str, Path]
yaml = YAML(typ='safe')
def read_yaml(fname: Union[str, Path]) -> Any:
"""Read the given file using YAML.
Parameters
----------
fname : str
the file name.
Returns
-------
content : Any
the object returned by YAML.
"""
with open(fname, 'r') as f:
content = yaml.load(f)
return content
def write_yaml(fname: Union[str, Path], obj: object, mkdir: bool = True) -> None:
"""Writes the given object to a file using YAML format.
Parameters
----------
fname : Union[str, Path]
the file name.
obj : object
the object to write.
mkdir : bool
If True, will create parent directories if they don't exist.
Returns
-------
content : Any
the object returned by YAML.
"""
if isinstance(fname, str):
fpath = Path(fname)
else:
fpath = fname
if mkdir:
fpath.parent.mkdir(parents=True, exist_ok=True)
with open(fpath, 'w') as f:
yaml.dump(obj, f)
def get_full_name(name: str, prefix: str = '', suffix: str = ''):
"""Returns a full name given a base name and prefix and suffix extensions
Parameters
----------
name: str
the base name.
prefix: str
the prefix (default='')
suffix
the suffix (default='')
Returns
-------
full_name: str
the fullname
"""
if prefix:
name = f'{prefix}_{name}'
if suffix:
name = f'{name}_{suffix}'
return name
def read_pickle(fname: Union[str, Path]) -> Any:
"""Read the given file using Pickle.
Parameters
----------
fname : str
the file name.
Returns
-------
content : Any
the object returned by pickle.
"""
with open(fname, 'rb') as f:
content = pickle.load(f)
return content
def write_pickle(fname: Union[str, Path], obj: object, mkdir: bool = True) -> None:
"""Writes the given object to a file using pickle format.
Parameters
----------
fname : Union[str, Path]
the file name.
obj : object
the object to write.
mkdir : bool
If True, will create parent directories if they don't exist.
Returns
-------
content : Any
the object returned by pickle.
"""
if isinstance(fname, str):
fpath = Path(fname)
else:
fpath = fname
if mkdir:
fpath.parent.mkdir(parents=True, exist_ok=True)
with open(fpath, 'wb') as f:
pickle.dump(obj, f)
def read_hdf5(fpath: PathLike) -> Dict[str, Any]:
def _load_hdf5_helper(root: h5py.Group) -> Dict[str, Any]:
init_dict = {}
for k, v in root.items():
if isinstance(v, h5py.Dataset):
init_dict[k] = np.array(v)
elif isinstance(v, h5py.Group):
init_dict[k] = _load_hdf5_helper(v)
else:
raise ValueError(f'Does not support type {type(v)}')
return init_dict
with h5py.File(fpath, 'r') as f:
return _load_hdf5_helper(f)
def write_hdf5(data_dict: Mapping[str, Any], fpath: PathLike) -> None:
def _save_as_hdf5_helper(obj: Mapping[str, Union[Mapping, np.ndarray]], root: h5py.File):
for k, v in obj.items():
if isinstance(v, np.ndarray):
root.create_dataset(name=k, data=v)
elif isinstance(v, dict):
grp = root.create_group(name=k)
_save_as_hdf5_helper(v, grp)
elif isinstance(v, Number):
root.create_dataset(name=k, data=v)
else:
raise ValueError(f'Does not support type {type(v)}')
with h5py.File(fpath, 'w') as root:
_save_as_hdf5_helper(data_dict, root)
class HDF5Error(Exception):
pass
| StarcoderdataPython |
4811519 | <reponame>SuviVappula/hauki
# Generated by Django 3.1.2 on 2020-11-03 08:07
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("hours", "0003_add_is_public_to_resource"),
]
operations = [
migrations.AlterField(
model_name="historicalresource",
name="name",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Name"
),
),
migrations.AlterField(
model_name="historicalresource",
name="name_en",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Name"
),
),
migrations.AlterField(
model_name="historicalresource",
name="name_fi",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Name"
),
),
migrations.AlterField(
model_name="historicalresource",
name="name_sv",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Name"
),
),
migrations.AlterField(
model_name="resource",
name="name",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Name"
),
),
migrations.AlterField(
model_name="resource",
name="name_en",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Name"
),
),
migrations.AlterField(
model_name="resource",
name="name_fi",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Name"
),
),
migrations.AlterField(
model_name="resource",
name="name_sv",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Name"
),
),
migrations.AlterField(
model_name="resourceorigin",
name="resource",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="origins",
to="hours.resource",
),
),
]
| StarcoderdataPython |
1652768 | import numpy as np
# import torch
from PIL import Image
import matplotlib.pyplot as plt
from functools import reduce
A = np.identity(4)
A
P = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
P
Q = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
Q
B = np.arange(16).reshape((4, 4))
B
B
np.dot(B, P)
np.dot(P, np.dot(B, P))
def blockMatrix(blocks):
"""creates a bloack 0-1 matrix.
param blocks: list of non-negative integers which is the size of the blocks.
a 0 block size corresponds to a 0 on the main diagonal.
"""
blocks = np.array(blocks).astype("int64")
f = lambda x: 1 if x == 0 else x
n = np.sum([f(x) for x in blocks])
n = int(n)
A = np.zeros((n, n))
pos = 0
for i in range(len(blocks)):
b = blocks[i]
if b > 0:
A[pos : pos + b, pos : pos + b] = np.ones((b, b))
pos += f(b)
return A
def permutationMatrix(ls):
"""returns a permutation matrix of size len(ls)^2.
param ls: should be a reordering of range(len(ls)), which defines the
permutation on the ROWS.
returns a permutation matrix P.
np.dot(P,A) should be rearrangement of the rows of A according to P.
To permute the columns of a matrix A use:
Q = np.transpose(P), then: np.dot(A,Q).
"""
n = len(ls)
P = np.zeros((n, n))
for i in range(n):
P[i, ls[i]] = 1
return P
def shuffleCopyMatrix(lins, louts, msize):
"""Returns a matrix P that represents switch and copy operations
on the rows of a matrix.
param msize: the size (of the square matrix).
param lins: row indices to be replaced.
param louts: row that replace the ones listed in lins.
lins and louts must be of the same length and contain indiced within
range(msize).
These operations are performed on the identity matrix, and the result
is the return value P.
"""
# P = np.zeros((msize,msize))
P = np.identity(msize)
I = np.identity(msize)
if not len(lins) == len(louts):
return P
for i in range(len(lins)):
P[lins[i]] = I[louts[i]]
return P
def scoreMatrix(n):
"""The score function of the matrix. The assumption is that the true
arrangement maximized the interaction close to the main diagonal.
The total sum of the interaction is an invariant, preserved by permuations.
param n: size of ca 2-d n over n array.
returns the score matrix, which is used to calculate the score of any given
n^2 matrix.
"""
s = np.arange(n)
s = np.exp(-s)
S = np.zeros((n, n))
for i in range(n):
S[i][i:] = s[: n - i]
return S
def score(A, S):
return np.sum(A * S)
def reindexMatrix(iss, jss, A):
"""iss and jss are lists of indices of equal size, representing
a permuation: iss[i] is replaced with jss[i]. all other indices which are
not in the lists left unchanged.
"""
n = len(A)
B = np.zeros_like(A)
tss = [i for i in range(n)]
for i in range(len(iss)):
tss[iss[i]] = jss[i]
print(tss)
for i in range(n):
for j in range(n):
B[i, j] = A[tss[i], tss[j]]
return B
reindexMatrix([1, 5], [5, 1], np.arange(36).reshape((6, 6)))
X = permutationMatrix([0, 4, 3, 2, 5, 1])
Y = np.transpose(X)
S = shuffleCopyMatrix([1, 3], [0, 2], 4)
S
T = np.transpose(S)
T
R = shuffleCopyMatrix([0, 1, 2, 3], [2, 0, 3, 1], 4)
R
blockMatrix([2, 3])
blockMatrix([2, 0, 0, 3])
blocks = [1, 3, 0, 3]
np.random.shuffle(B)
Z = blockMatrix([10, 20, 0, 0, 10, 20, 30]).astype("int64")
Z
ZZ = 255.0 * Z
im = Image.fromarray(ZZ)
im.show()
plt.ion()
# plt.ioff()
plt.imshow(ZZ)
plt.imshow(im)
plt.matshow(ZZ)
plt.close()
# ls = [10,20,0,0,10,20,30]
l1 = [i for i in range(25)]
l2 = [i + 25 for i in range(27)]
l3 = [i + 25 + 27 for i in range(20)]
l4 = [i + 25 + 27 + 20 for i in range(20)]
l3b = l3.copy()
l3b.reverse()
l3b
ZZ.shape
# rows
PP1 = permutationMatrix(l3 + l1 + l2 + l4)
PP2 = permutationMatrix(l1 + l2 + l3b + l4)
PP3 = permutationMatrix(l1 + l3b + l2 + l4)
# columns
QQ1 = np.transpose(PP1) # then: np.dot(A,QQ).
QQ2 = np.transpose(PP2) # then: np.dot(A,QQ).
QQ3 = np.transpose(PP3) # then: np.dot(A,QQ).
ZZZ1 = np.dot(np.dot(PP1, ZZ), QQ1)
ZZZ2 = np.dot(np.dot(PP2, ZZ), QQ2)
ZZZ3 = np.dot(np.dot(PP3, ZZ), QQ3)
# plt.imshow(ZZZ)
# plt.imshow(ZZ)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True)
ax1.imshow(ZZ)
ax2.imshow(ZZZ)
fig, axs = plt.subplots(nrows=2, ncols=2)
fig.suptitle("original pattern and permutations")
axs[0, 0].imshow(ZZ)
axs[0, 0].set_title("original")
axs[0, 1].imshow(ZZZ1)
axs[0, 1].set_title("[52:73] moved to the start")
axs[1, 0].imshow(ZZZ2)
axs[1, 0].set_title("[52:73] reversed")
axs[1, 1].imshow(ZZZ3)
axs[1, 1].set_title("[52:73] moved to [25:52] and reversed")
plt.close()
sm = scoreMatrix(len(ZZ))
sm
score(ZZ, sm)
score(ZZZ1, sm)
score(ZZZ2, sm)
score(ZZZ3, sm)
def scorePair(iss, jss, refmat, scoremat):
A = np.zeros_like(refmat)
l = iss + jss
n = len(l)
for i in range(n):
for j in range(n):
A[i, j] = refmat[l[i], l[j]]
return score(A, scoremat)
[scorePair(l2, l4, ZZ, sm)]
np.argmax([1, 9, 0, 3])
# reassembl
cs = [l2, l4, l1, l3]
cs
while len(cs) > 1:
xs = cs.pop()
l = np.argmax([scorePair(xs, y, ZZ, sm) for y in cs])
sl = scorePair(xs, cs[l], ZZ, sm)
r = np.argmax([scorePair(y, xs, ZZ, sm) for y in cs])
sr = scorePair(cs[r], xs, ZZ, sm)
if sl > sr:
cs[l] = xs + cs[l]
else:
cs[r] = cs[r] + xs
print(l, sl, r, sr)
test = cs[0]
test == l1 + l2 + l3 + l4
def scorePair2(iss, jss, refmat):
s = 0
temp = 0
for i in range(len(iss)):
for j in range(len(jss)):
temp = np.exp(-np.abs(i - j))
# we only care about interaction between the 2 segments and not
# inside each one of them which wouldn't be affected by
# rearrangement.
s += refmat[iss[i], jss[j]] * temp
return s
# reassembly 2
cs = [l2, l4, l1, l3]
cs
while len(cs) > 1:
xs = cs.pop()
l = np.argmax([scorePair2(xs, y, ZZ) for y in cs])
sl = scorePair2(xs, cs[l], ZZ)
r = np.argmax([scorePair2(y, xs, ZZ) for y in cs])
sr = scorePair2(cs[r], xs, ZZ)
if sl > sr:
cs[l] = xs + cs[l]
else:
cs[r] = cs[r] + xs
print(l, sl, r, sr)
test == l1 + l2 + l3 + l4
myblocks = [10, 15, 17, 19, 17, 15, 10]
mymatrix = blockMatrix(myblocks)
dmatrix = scoreMatrix(len(mymatrix))
dmatrix += np.transpose(dmatrix)
dmatrix -= np.identity(len(dmatrix))
plt.matshow(mymatrix)
plt.matshow(np.log10(dmatrix))
fig, axs = plt.subplots(nrows=1, ncols=2)
fig.suptitle("ideal distribution of 1s and 0s")
axs[0].imshow(dmatrix)
axs[0].set_title("original")
axs[1].imshow(np.log(dmatrix))
axs[1].set_title("log scale")
myblocks
# mysegments = [8, 19, 20, 21, 22, 13]
mysegments = [15, 29, 20, 21, 18]
np.cumsum(myblocks)
np.cumsum(mysegments)
# and the corrsponding indices are:
def articulate(l):
"""l is a list of positive integers.
returns the implied articulation, meaning a list of lists (or 1d arrays)
ls, such that ls[0] it the numbers 0 to l[0]-1, ls[1] is a list of the
numbers ls[1] to ls[2]-1 etc.
"""
# ls = [np.arange(l[0]).astype('uint64')]
ls = []
offsets = np.cumsum([0] + l)
for i in range(0, len(l)):
xs = np.arange(l[i]).astype("uint64") + offsets[i]
ls.append(xs)
return ls
# the blocks, explicitly indexed
articulate(myblocks)
temp = articulate(myblocks)
reduce(lambda x, y: x + list(y), temp, [])
# the original segments
mysegments
articulate(mysegments)
np.nancumsum(myblocks)
np.cumsum(mysegments)
# shuffle the order of the segments:
newOrder = np.random.permutation(len(mysegments))
newOrder
temp = articulate(mysegments)
temp
reindexlist = [temp[newOrder[i]] for i in newOrder]
reindexlist
reindexlist
# we shuffled the order, now lets reverse a few of the segments:
for i in [1, 4]:
reindexlist[i] = np.flip(reindexlist[i])
reindexlist
reindexing = reduce(lambda x, y: x + list(y), reindexlist, [])
reindexing
# now lets see the original matrix and the matrix after the transformation:
newmatrix = np.zeros_like(mymatrix)
for i in range(len(mymatrix)):
for j in range(len(mymatrix)):
newmatrix[i, j] = mymatrix[reindexing[i], reindexing[j]]
fig, axs = plt.subplots(nrows=1, ncols=2)
fig.suptitle("original block matrix anb its transformation")
axs[0].imshow(mymatrix)
# axs[0].set_title('')
axs[1].imshow(newmatrix)
# axs[1].set_title('')
# what we have to work with is newmatrix, as well as a list of the segments
# in their shuffled order, not the orignal of course.
newsegments = [mysegments[newOrder[i]] for i in range(len(newOrder))]
newsegments
# so we need to reshuffle the segments
newsegments
# so that eventually they will be order like that (after re-indexing)
mysegments
# and some of the new segments we'll have to reverse as well
# can we do that?
def scorePair3(iss, jss, refmat, lreverse=False, rreverse=False):
"""iss, jss must be lists of segments of the index range of refmat,
our reference matrix.
reurns the interaction score of iss and jss as if reindexed the matrix so
that they will be adjuscent to each other.
"""
s = 0
temp = 0
for i in range(len(iss)):
for j in range(len(jss)):
x = i
y = j
if lreverse:
x = iss[-1] - i
if rreverse:
y = jss[-1] - j
# temp = np.exp(-np.abs(i-j))
temp = np.exp(-np.abs(x - y))
# we only care about interaction between the 2 segments and not
# inside each one of them which wouldn't be affected by
# rearrangement.
s += refmat[iss[i], jss[j]] * temp
return s
cs = articulate(newsegments)
cs = [list(x) for x in cs]
cs
xyz = np.zeros_like(newmatrix)
l = cs[0] + cs[1]
for i in l:
for j in l:
xyz[i, j] = newmatrix[i, j]
plt.imshow(xyz)
xyz = np.zeros_like(newmatrix)
l = cs[5]
for i in l:
for j in l:
xyz[i - l[0], j - l[0]] = newmatrix[i, j]
plt.imshow(xyz)
xyz = np.zeros_like(newmatrix)
l = cs[0] + cs[3]
# l = cs[0] + np.flip(cs[3])
for i in range(len(l)):
for j in range(len(l)):
xyz[i, j] = newmatrix[l[i], l[j]]
print(scorePair2(cs[0], cs[3], mymatrix))
print(scorePair2(cs[0], cs[3], newmatrix))
print(scorePair2(np.flip(cs[0]), cs[3], newmatrix)) # this is the problem?
plt.imshow(xyz)
plt.imshow(mymatrix)
for i in cs:
for j in cs:
# print(scorePair2(i,j, newmatrix))
print(scorePair2(i, j, mymatrix))
reconstructionMatrix = np.zeros_like(newmatrix)
while len(cs) > 1:
xs = cs.pop()
print(xs)
xsrev = xs.copy()
xsrev.reverse()
newmatrixrev = reindexMatrix(xs, xsrev, newmatrix)
l = np.argmax([scorePair2(xs, y, newmatrix) for y in cs])
sl = scorePair2(xs, cs[l], newmatrix)
lrev = np.argmax(
# [scorePair2(xsrev, y, newmatrix) for y in cs]
[scorePair2(xs, y, newmatrixrev) for y in cs]
)
# slrev = scorePair2(xsrev, cs[l], newmatrix)
slrev = scorePair2(xs, cs[lrev], newmatrixrev)
r = np.argmax([scorePair2(y, xs, newmatrix) for y in cs])
sr = scorePair2(cs[r], xs, newmatrix)
rrev = np.argmax(
# [scorePair2(y, xsrev, newmatrix) for y in cs]
[scorePair2(y, xs, newmatrixrev) for y in cs]
)
# srrev = scorePair2(cs[r], xsrev, newmatrix)
srrev = scorePair2(cs[rrev], xs, newmatrixrev)
iascores = [sl, slrev, sr, srrev]
candidates = [cs[l], cs[lrev], cs[r], cs[rrev]]
maxscore = np.max(iascores)
if maxscore == sl:
cs[l] = xs + cs[l]
elif maxscore == sr:
cs[r] = cs[r] + xs
elif maxscore == lrev:
cs[lrev] = xsrev + cs[lrev]
else:
cs[rrev] = cs[rrev] + xsrev
# reconstruction of the matrix
reconstructionMatrix = np.zeros_like(newmatrix)
myindices = cs[0]
myindices
n = len(newmatrix)
for i in range(n):
for j in range(n):
reconstructionMatrix[i, j] = newmatrix[myindices[i], myindices[j]]
# reconstructionMatrix[myindices[i],myindices[j]] = newmatrix[i, j]
plt.imshow(newmatrix)
plt.imshow(reconstructionMatrix)
#### new try
reconstructionMatrix = np.zeros_like(newmatrix)
reconstructionMatrix = newmatrix.copy()
while len(cs) > 1:
xs = cs.pop()
print(xs)
xsrev = xs.copy()
xsrev.reverse()
reconstructionMatrixrev = reindexMatrix(xs, xsrev, reconstructionMatrix)
l = np.argmax([scorePair3(xs, y, reconstructionMatrix) for y in cs])
sl = scorePair3(xs, cs[l], reconstructionMatrix)
lrev = np.argmax(
# [scorePair2(xsrev, y, reconstructionMatrix) for y in cs]
# [scorePair2(xs, y, reconstructionMatrixrev) for y in cs]
[scorePair3(xs, y, reconstructionMatrix, lreverse=True) for y in cs]
)
# slrev = scorePair2(xsrev, cs[l], reconstructionMatrix)
slrev = scorePair3(xs, cs[lrev], reconstructionMatrix, lreverse=True)
r = np.argmax([scorePair3(y, xs, reconstructionMatrix) for y in cs])
sr = scorePair3(cs[r], xs, reconstructionMatrix)
rrev = np.argmax(
# [scorePair2(y, xsrev, reconstructionMatrix) for y in cs]
[scorePair3(y, xs, reconstructionMatrix, rreverse=True) for y in cs]
)
# srrev = scorePair2(cs[r], xsrev, reconstructionMatrix)
srrev = scorePair3(cs[rrev], xs, reconstructionMatrix, rreverse=True)
iascores = [sl, slrev, sr, srrev]
candidates = [cs[l], cs[lrev], cs[r], cs[rrev]]
maxscore = np.max(iascores)
if maxscore == sl:
cs[l] = xs + cs[l]
elif maxscore == sr:
cs[r] = cs[r] + xs
elif maxscore == lrev:
reconstructionMatrix = reindexMatrix(xs, xsrev, reconstructionMatrix)
# cs[lrev] = xsrev + cs[lrev]
cs[lrev] = xs + cs[lrev]
else:
reconstructionMatrix = reindexMatrix(xs, xsrev, reconstructionMatrix)
# cs[rrev] = cs[rrev] + xsrev
cs[rrev] = cs[rrev] + xs
n = len(newmatrix)
temp = np.zeros_like(newmatrix)
for i in range(n):
for j in range(n):
temp[i, j] = reconstructionMatrix[myindices[i], myindices[j]]
# reconstructionMatrix[myindices[i],myindices[j]] = newmatrix[i, j]
######
plt.imshow(newmatrix)
cs = articulate(newsegments)
cs = [list(x) for x in cs]
cs
mysegments
newsegments
def improve(xss, yss, A):
pass
| StarcoderdataPython |
1748202 | from distutils.core import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name = 'et2adem', # How you named your package folder (MyLib)
long_description = long_description,
long_description_content_type = "text/markdown",
packages = ['et2adem'], # Chose the same as "name"
package_dir={'et2adem': 'et2adem'},
version = '0.5', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'A more fun version of tqdm', # Give a short description about your library
author = '<NAME>', # Type in your name
author_email = '<EMAIL>', # Type in your E-Mail
url = 'https://github.com/mabdelhack/et2adem', # Provide either the link to the package's github or website
download_url = 'https://github.com/mabdelhack/et2adem/archive/refs/tags/v_02.tar.gz', # I explain this later on
keywords = ['tqdm', 'fun'], # Keywords that define your package best
package_data={'et2adem': ['data/*.wav']},
install_requires=[ # I get to this in a second
'tqdm',
'playsound==1.2.2',
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3.6',
],
) | StarcoderdataPython |
6524968 | <filename>deep_rl/component/memory_lineworld.py<gh_stars>0
import numpy as np
import gym
from gym import spaces
import matplotlib.pyplot as plt
import random
from PIL import Image, ImageDraw, ImageFont
class MemoryLineWorld(gym.Env):
def __init__(self, size=5, p=0, horizon=100):
# Setting things up
self.updates = 0
self.size = size # size of the chain
self.loc = (self.size - 1) // 2 # Start at the middle
self.action_space = spaces.Discrete(4) # 0 - left, 1 - right, 2 - click blue, 3 - click green
self.observation_space = spaces.Box(low=np.zeros(size+2), high=np.ones(size+2), dtype=np.uint8) # as the state space is one hot encoded
self.p = p # stocasticity
self.horizon = horizon
# object choice
self.color = random.choice(['blue', 'green'])
self.color2val = {'blue':0, 'green':1}
# Reward values
self.step_reward = -1
self.correct_reward = 10
self.wrong_reward = -10
def reset(self):
self.updates = 0
self.loc = (self.size - 1) // 2 # Start at the middle
self.color = random.choice(['blue', 'green'])
return self._loc2state(self.loc)
def _loc2state(self, loc):
temp = np.zeros(self.observation_space.shape[0])
temp[loc] = 1
if(self.loc == 0):
temp[self.size + self.color2val[self.color]] = 1
return temp
def step(self, action):
# Making sure valid action is chosen
assert self.action_space.contains(action)
self.updates += 1
# If the environment is stocastic, you move equivalent to taking a random action
if(np.random.rand() <= self.p):
action = np.random.randint(low=0, high=4)
done = False
# Stepping along on the chain
if(action == 0):
self.loc = self.loc - 1
elif(action == 1):
self.loc = self.loc + 1
# Compensating for walls
if(self.loc < 0):
self.loc = 0
elif(self.loc >= self.size):
self.loc = self.size - 1
# Checking for termination
R = self.step_reward
if(self.updates >= self.horizon):
done = True
if((self.loc == self.size-1) and (action>2)):
done = True
if(action-2 == self.color2val[self.color]):
R = self.correct_reward
else:
R = self.wrong_reward
return self._loc2state(self.loc), R, done, {}
def render(self, mode='human', printR=False, cs=100):
'''Rendering the state of environment
passing for now, need to implement a nice visualization later.
'''
self.cs = cs
h = self.size*cs
w = cs
img = Image.new('RGBA', (h, w), "white")
draw = ImageDraw.Draw(img)
for i in range(1, self.size):
draw.line([cs*i, 0, cs*i, h], fill="gray", width=10)
for i in range(0, self.size):
if(i == self.size-1):
draw.ellipse([i*cs, 0, i*cs+cs, cs], fill="black")
if(i == 0):
draw.ellipse([0, 0, cs/2, cs/2], fill=self.color)
if(i == self.loc):
draw.ellipse([i*cs+cs/3, cs/3, i*cs+cs*2/3, 2*cs/3], fill="red")
plt.title('red: agent | black: choose state')
if(mode=="human"):
plt.imshow(img)
plt.show()
elif(mode=="rgb_array"):
return img
| StarcoderdataPython |
3478243 | from typing import Any, Dict, List, Optional, Tuple
import emojis
ALLIANCE_MEMBERSHIP: Dict[str, str] = {
'Candidate': 'Candidate',
'Ensign': 'Ensign',
'Lieutenant': 'Lieutenant',
'Major': 'Major',
'Commander': 'Commander',
'ViceAdmiral': 'Vice Admiral',
'FleetAdmiral': 'Fleet Admiral'
}
ALLIANCE_MEMBERSHIP_LOOKUP: List[str] = [
'FleetAdmiral',
'ViceAdmiral',
'Commander',
'Major',
'Lieutenant',
'Ensign',
'Candidate',
None # Must always be the last element, for Tourney Data
]
COLLECTION_PERK_LOOKUP: Dict[str, str] = {
'BloodThirstSkill': 'Vampirism',
'EmpSkill': 'EMP Discharge',
'FreezeAttackSkill': 'Cryo Field',
'InstantKillSkill': 'Headshot',
'MedicalSkill': 'Combat Medic',
'ResurrectSkill': 'Resurrection',
'SharpShooterSkill': 'Sharpshooter'
}
CURRENCY_EMOJI_LOOKUP: Dict[str, str] = {
'android': 'droids',
'capacity': 'rounds',
'equipment': 'items',
'gas': emojis.pss_gas_big,
'item101': emojis.pss_gold,
'item102': emojis.pss_carbon,
'item103': emojis.pss_iron,
'item104': emojis.pss_titanium,
'item105': emojis.pss_silicon,
'mineral': emojis.pss_min_big,
'starbux': emojis.pss_bux,
'supply': emojis.pss_supply_big,
}
CURRENCY_EMOJI_LOOKUP_REVERSE: Dict[str, str] = {value: key for key, value in CURRENCY_EMOJI_LOOKUP.items()}
DELETE_ON_CHANGE_ORDER: List[Optional[bool]] = [True, None, False]
DIVISION_CHAR_TO_DESIGN_ID: Dict[str, str] = {
'-': '0',
'A': '1',
'B': '2',
'C': '3',
'D': '4'
}
DIVISION_CUTOFF_LOOKUP: Dict[str, Tuple[int, int]] = {
'-': (-1, -1),
'A': (1, 8),
'B': (9, 20),
'C': (21, 50),
'D': (51, 100)
}
DIVISION_DESIGN_ID_TO_CHAR: Dict[str, str] = dict([(value, key) for key, value in DIVISION_CHAR_TO_DESIGN_ID.items()])
DIVISION_MAX_COUNT_TARGETS_TOP: Dict[str, int] = {
'1': 20,
'2': 14,
'3': 5,
'4': 3,
}
DMG_TYPES: List[str] = [
'SystemDamage',
'CharacterDamage',
'ShieldDamage',
'HullDamage',
'DirectSystemDamage'
]
EQUIPMENT_MASK_LOOKUP: Dict[int, str] = {
1: 'head',
2: 'body',
4: 'leg',
8: 'weapon',
16: 'accessory',
32: 'pet'
}
EQUIPMENT_SLOTS_ORDER_LOOKUP: List[str] = [
'EquipmentHead',
'EquipmentAccessory',
'EquipmentBody',
'EquipmentWeapon',
'EquipmentLeg',
'EquipmentPet',
'Module'
]
EQUIPMENT_SLOTS_LOOKUP: Dict[str, str] = {
'head': 'EquipmentHead',
'hat': 'EquipmentHead',
'helm': 'EquipmentHead',
'helmet': 'EquipmentHead',
'body': 'EquipmentBody',
'chest': 'EquipmentBody',
'shirt': 'EquipmentBody',
'armor': 'EquipmentBody',
'leg': 'EquipmentLeg',
'pant': 'EquipmentLeg',
'pants': 'EquipmentLeg',
'weapon': 'EquipmentWeapon',
'hand': 'EquipmentWeapon',
'gun': 'EquipmentWeapon',
'accessory': 'EquipmentAccessory',
'shoulder': 'EquipmentAccessory',
'pet': 'EquipmentPet',
'module': 'Module',
'mod': 'Module'
}
GAS_COSTS_LEGENDARY_LOOKUP: List[int] = [
0, 130000, 162500, 195000, 227500,
260000, 292500, 325000, 357500, 390000,
422500, 455000, 487500, 520000, 552500,
585000, 617500, 650000, 682500, 715000,
747500, 780000, 812500, 845000, 877500,
910000, 942000, 975000, 1007500, 1040000,
1072500, 1105000, 1137500, 1170000, 1202500,
1235000, 1267500, 1300000, 1332500, 1365000]
GAS_COSTS_LOOKUP: List[int] = [
0, 0, 17, 33, 65,
130, 325, 650, 1300, 3200,
6500, 9700, 13000, 19500, 26000,
35700, 43800, 52000, 61700, 71500,
84500, 104000, 117000, 130000, 156000,
175000, 201000, 227000, 253000, 279000,
312000, 351000, 383000, 422000, 468000,
507000, 552000, 604000, 650000, 715000]
FLEET_TYPE_LOOKUP: Dict[bool, str] = {
None: None,
True: 'Private',
False: 'Public'
}
GRID_TYPE_MASK_LOOKUP: Dict[int, str] = {
1: 'A',
2: 'B'
}
IAP_OPTIONS_MASK_LOOKUP: Dict[int, Tuple[str, int]] = {
1: ('Clip', 500),
2: ('Roll', 1200),
4: ('Stash', 2500),
8: ('Case', 6500),
16: ('Vault', 14000)
}
ITEM_SUB_TYPES_TO_GET_PARENTS_FOR: List[str] = [
'Module'
]
MODULE_TYPE_TO_STAT_LOOKUP: Dict[str, str] = {
'Turret': 'Attack'
}
PROGRESSION_TYPES: Dict[str, float] = {
'Linear': 1.0,
'EaseIn': 2.0,
'EaseOut': 0.5
}
RARITY_ORDER_LOOKUP: Dict[str, int] = {
'Common': 70,
'Elite': 60,
'Unique': 50,
'Epic': 40,
'Hero': 30,
'Special': 20,
'Legendary': 10
}
RARITY_EMOJIS_LOOKUP: Dict[str, str] = {
'Common': emojis.pss_rarity,
'Elite': emojis.pss_rarity * 2,
'Unique': emojis.pss_rarity * 3,
'Epic': emojis.pss_rarity * 4,
'Hero': emojis.pss_rarity * 5,
'Special': emojis.pss_rarity_special,
'Legendary': emojis.pss_rarity_legendary
}
PROMO_REQUIREMENT_TYPE_LOOKUP: Dict[str, Dict[str, str]] = {
'en': {
'shipLevel': 'ship lvl',
'purchaseRewardPoints': f'{emojis.pss_dove} purchased'
}
}
SPECIAL_ABILITIES_LOOKUP: Dict[str, str] = {
'AddReload': 'Rush Command',
'DamageToCurrentEnemy': 'Critical Strike',
'DamageToRoom': 'Ultra Dismantle',
'DamageToSameRoomCharacters': 'Poison Gas',
'DeductReload': 'System Hack',
'FireWalk': 'Fire Walk',
'Freeze': 'Freeze',
'HealRoomHp': 'Urgent Repair',
'HealSameRoomCharacters': 'Healing Rain',
'HealSelfHp': 'First Aid',
'Invulnerability': 'Phase Shift',
'ProtectRoom': 'Stasis Shield',
'SetFire': 'Arson'
}
STAT_EMOJI_LOOKUP: Dict[str, str] = {
'Ability': emojis.pss_stat_ability,
'Attack': emojis.pss_stat_attack,
'Engine': emojis.pss_stat_engine,
'FireResistance': emojis.pss_stat_fireresistance,
'Hp': emojis.pss_stat_hp,
'Pilot': emojis.pss_stat_pilot,
'Repair': emojis.pss_stat_repair,
'Science': emojis.pss_stat_research,
'Stamina': emojis.pss_stat_stamina,
'Weapon': emojis.pss_stat_weapon,
'Xp': emojis.pss_stat_xp,
}
STAT_TYPES_LOOKUP: Dict[str, str] = {
'hp': 'HP',
'health': 'HP',
'attack': 'Attack',
'atk': 'Attack',
'att': 'Attack',
'damage': 'Attack',
'dmg': 'Attack',
'repair': 'Repair',
'rep': 'Repair',
'ability': 'Ability',
'abl': 'Ability',
'pilot': 'Pilot',
'plt': 'Pilot',
'science': 'Science',
'sci': 'Science',
'stamina': 'Stamina',
'stam': 'Stamina',
'stm': 'Stamina',
'sta': 'Stamina',
'engine': 'Engine',
'eng': 'Engine',
'weapon': 'Weapon',
'wpn': 'Weapon',
'fire': 'FireResistance',
'fireresistance': 'FireResistance',
'fireres': 'FireResistance',
'fire res': 'FireResistance',
'fire resistance': 'FireResistance',
'fr': 'FireResistance',
}
STAT_UNITS_ENHANCEMENT_MODIFIER_LOOKUP: Dict[str, str] = {
'Ability': '%',
}
STAT_UNITS_TRAINING_MODIFIER_LOOKUP: Dict[str, str] = {
'Ability': '%',
'Attack': '%',
'Engine': '%',
'FireResistance': '',
'Hp': '%',
'Pilot': '%',
'Repair': '%',
'Science': '%',
'Stamina': '',
'Weapon': '%',
'Xp': ''
}
STATS_LEFT: List[str] = [
'Hp',
'Attack',
'Repair',
'Ability',
'Stamina'
]
STATS_RIGHT: List[str] = [
'Pilot',
'Science',
'Engine',
'Weapon',
'FireResistance'
]
TRAINING_RANK_ROOM_LOOKUP: Dict[int, Tuple[str, str]] = {
1: ('Gym', 'GYM'),
2: ('Academy', 'ACA')
# 100: Consumable
}
USER_STATUS: Dict[str, str] = {
'Attacking': 'Attacking',
'Defending': 'Defending / Immunity',
'Offline': 'Offline'
}
USER_TYPE: Dict[str, str] = {
'Administrator': 'Administrator',
'Banned': 'Banned',
'Mission': 'NPC',
'UserTypeAlliance': 'Starbase',
'UserTypeCommunityManager': 'Community Manager',
}
XP_COSTS_LEGENDARY_LOOKUP: List[int] = [
0, 0, 810, 1350, 1890,
2430, 3060, 3690, 4320, 4950,
5580, 6360, 7090, 7840, 8610,
9400, 10210, 11040, 11890, 12760,
13650, 14560, 15490, 16440, 17410,
18400, 19410, 20440, 21490, 24660,
23650, 24760, 25890, 27040, 28210,
29400, 30610, 31840, 33090, 34360]
XP_COSTS_LOOKUP: List[int] = [
0, 90, 270, 450, 630,
810, 1020, 1230, 1440, 1650,
1860, 2130, 2400, 2670, 2940,
3210, 3540, 3870, 4200, 4530,
4860, 5220, 5580, 5940, 6300,
6660, 7050, 7440, 7830, 8220,
8610, 9030, 9450, 9870, 10290,
10710, 11160, 11610, 12060, 12510]
# ---------- Helper functions ----------
def get_lookup_value_or_default(lookup: Any, key: Any, default: Any = None) -> Any:
if key in lookup.keys():
result = lookup[key]
else:
result = default
return result
def select_next_element(lookup: List[Any], current_element: Any) -> Any:
if lookup is None:
return None
elif lookup:
index_of_current_element = lookup.index(current_element)
index_of_next_element = (index_of_current_element + 1) % len(lookup)
return lookup[index_of_next_element]
else:
return current_element | StarcoderdataPython |
1964701 | <gh_stars>1-10
import base64
import copy
import json
import logging
import os
import tempfile
from unittest import mock
import pandas as pd
import pytest
import requests
from fortigaterepr.devicedata import get_helper, clean_columns_helper
from .example_data import INTERFACE_DETAILS_RESULT
# TODO: Module scoped fixture for dataframe to pass to get_helper() ??
class Test_TEMPLATE:
"""Class for testing TEMPLATE functionality
"""
def test_basic_data(self):
pass
def test_get_method(self):
pass
class Test_clean_columns_helper:
"""Class for testing clean_columns_helper() method
"""
intf_test_tuple = [
("vdom", "N/A"),
("status", "N/A"),
("mac_address", "N/A"),
("alias", "N/A"),
("zone", "N/A"),
("ipv4_addresses", "None"),
("link", "N/A"),
("speed", "N/A"),
("media", "N/A"),
("Description", "None"),
("duplex", "N/A"),
]
def test_empty_list(self):
"""test empty list passed to helper function returns the original dataframe
"""
test_list = []
df = pd.DataFrame(INTERFACE_DETAILS_RESULT)
result = clean_columns_helper(df, test_list)
assert isinstance(result, pd.DataFrame)
assert result.equals(df)
def test_list_tuples(self):
"""test passing proper list of tuples cleans the data properly
"""
df = pd.DataFrame(INTERFACE_DETAILS_RESULT)
result = clean_columns_helper(df, self.intf_test_tuple)
assert isinstance(result, pd.DataFrame)
# TODO: need to assert one or more of the values were set properly
def test_list_tuples_missing_key(self):
"""test passing proper list of tuples but where one of the columns does not exist in the dataframe
verify that it returns DataFrame with the columns that were specified cleaned as expected
"""
test_list = [("vdom", "N/A"), ("status", "N/A"), ("Non-Existing-Column", "N/A")]
df = pd.DataFrame(INTERFACE_DETAILS_RESULT)
result = clean_columns_helper(df, test_list)
assert isinstance(result, pd.DataFrame)
# TODO: need to assert one or more of the values were set properly
class Test_get_helper:
"""Class for testing get_helper() functionality
"""
def test_get_helper_with_list(self):
"""tests get_helper() with proper list passed as exclude_columns parameter
"""
df = pd.DataFrame(INTERFACE_DETAILS_RESULT)
exclude_list = ["media", "is_system_interface"]
# first make sure columns are present in the dataframe:
for col in exclude_list:
assert col in df.columns
result = get_helper(df, exclude_list)
for col in exclude_list:
assert col not in result.columns
assert isinstance(result, pd.DataFrame)
def test_get_helper_with_none(self):
"""tests get_helper() with None passed as exclude_columns parameter
"""
df = pd.DataFrame(INTERFACE_DETAILS_RESULT)
exclude_list = None
# first make sure columns are present in the dataframe:
result = get_helper(df, exclude_list)
assert result.equals(df)
def test_get_helper_with_non_list(self):
"""test to validate get_helper() when passed invalid type for exclude_columns returns the original dataframe
"""
df = pd.DataFrame(INTERFACE_DETAILS_RESULT)
exclude_list = 500
# first make sure columns are present in the dataframe:
result = get_helper(df, exclude_list)
assert result.equals(df)
| StarcoderdataPython |
3575667 | <reponame>gezp/ros_ign_gazebo_py
import threading
import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Transform
from ros_ign_interfaces.srv import ControlWorld,SpawnEntity,DeleteEntity,SetEntityPose
class IgnGazeboInterface(Node):
def __init__(self,world_name="default",nodename="IgnGazeboInterface"):
super().__init__(nodename)
#control client
srv_name = '/ign/%s/control'%world_name
self.control_cli = self.create_client(ControlWorld,srv_name)
while not self.control_cli.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service %s not available, waiting again...'%srv_name)
#create client
srv_name = '/ign/%s/create'%world_name
self.create_cli = self.create_client(SpawnEntity,srv_name )
while not self.create_cli.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service %s not available, waiting again...'%srv_name)
#remove client
srv_name = '/ign/%s/remove'%world_name
self.remove_cli = self.create_client(DeleteEntity, srv_name)
while not self.remove_cli.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service %s not available, waiting again...'%srv_name)
#move client
srv_name = '/ign/%s/set_pose'%world_name
self.move_cli = self.create_client(SetEntityPose, srv_name)
while not self.move_cli.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service %s not available, waiting again...'%srv_name)
#
self.get_logger().info("IgnGazeboInterface initialised successfuly")
def resume(self,is_wait=False):
req = ControlWorld.Request()
req.world_control.pause = False
srv_call = self.control_cli.call_async(req)
if is_wait:
#wait
while rclpy.ok():
if srv_call.done():
break
#result
return srv_call.result().success
return True
def pause(self,step=0,is_wait=False):
req = ControlWorld.Request()
req.world_control.pause = True
req.world_control.multi_step = step
srv_call = self.control_cli.call_async(req)
if is_wait:
#wait
while rclpy.ok():
if srv_call.done():
break
#result
return srv_call.result().success
return True
def create_model(self,name,pose,model_path,is_wait=False):
req = SpawnEntity.Request()
req.entity_factory.name = name
req.entity_factory.pose = pose
req.entity_factory.sdf_filename = model_path
srv_call = self.create_cli.call_async(req)
if is_wait:
#wait
while rclpy.ok():
if srv_call.done():
break
#result
return srv_call.result().success
return True
def remove_model(self,name,is_wait=False):
req = DeleteEntity.Request()
req.entity.name = name
req.entity.type = req.entity.MODEL
srv_call = self.remove_cli.call_async(req)
if is_wait:
#wait
while rclpy.ok():
if srv_call.done():
break
#result
return srv_call.result().success
return True
def set_model_pose(self,name,pose,is_wait=False):
req = SetEntityPose.Request()
req.entity.name = name
req.pose = pose
srv_call = self.move_cli.call_async(req)
if is_wait:
#wait
while rclpy.ok():
if srv_call.done():
break
#result
return srv_call.result().success
return True
| StarcoderdataPython |
3334227 | <filename>rdmo_re3data/__init__.py
__title__ = 'rdmo-re3data'
__version__ = '1.0'
__author__ = 'RDMO Arbeitsgemeinschaft'
__email__ = '<EMAIL>'
__license__ = 'Apache-2.0'
VERSION = __version__
from .providers import *
| StarcoderdataPython |
1875591 | #
# @lc app=leetcode id=16 lang=python
#
# [16] 3Sum Closest
#
# https://leetcode.com/problems/3sum-closest/description/
#
# algorithms
# Medium (38.77%)
# Total Accepted: 286K
# Total Submissions: 704.1K
# Testcase Example: '[-1,2,1,-4]\n1'
#
# Given an array nums of n integers and an integer target, find three integers
# in nums such that the sum is closest to target. Return the sum of the three
# integers. You may assume that each input would have exactly one solution.
#
# Example:
#
#
# Given array nums = [-1, 2, 1, -4], and target = 1.
#
# The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
#
#
#
class Solution(object):
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums_len = len(nums)
if nums_len < 3:
return []
nums.sort()
visible_ids = []
closest = target
margin = 9999
sum_res = 0
for i in range(nums_len-2):
if i >0 and nums[i] == nums[i-1]:
continue
head, tail = i+1, nums_len-1
while head < tail:
sum_res = nums[head] + nums[tail] + nums[i]
tmp_margin = max(sum_res, target) - min(sum_res, target)
if tmp_margin < margin:
margin = tmp_margin
closest = sum_res
if sum_res < target:
head += 1
elif sum_res > target:
tail -= 1
else:
return sum_res
return closest
| StarcoderdataPython |
1720362 | import os
import subprocess
import sys
import tempfile
import MeCab
__neologd_repo_name = 'mecab-ipadic-neologd'
__neologd_repo_url = 'https://github.com/neologd/mecab-ipadic-neologd.git'
def download_neologd(dic_path):
dic_path = os.path.abspath(dic_path)
with tempfile.TemporaryDirectory() as temp_dir:
subprocess.call(['git', 'clone', '--depth', '1', __neologd_repo_url],
stdout=sys.stdout, cwd=temp_dir)
neologd_dir_path = os.path.join(temp_dir, __neologd_repo_name)
subprocess.call(['./bin/install-mecab-ipadic-neologd', '-y', '-u',
'-p', dic_path],
stdout=sys.stdout, cwd=neologd_dir_path)
def get_tagger(dic_path):
if dic_path is None:
return MeCab.Tagger()
return MeCab.Tagger('-d {}'.format(dic_path))
def tokenize(text, tagger):
tokens = []
for line in tagger.parse(text).split('\n'):
if line == 'EOS':
break
surface = line.split('\t')[0]
tokens.append(surface)
return tokens
| StarcoderdataPython |
297723 | def rand_phase(p):
import numpy as np
# This assumes -pi<p<pi.
# Move p to 0<p<2pi and add random phase
pf = p+np.pi+np.random.uniform(0,2*np.pi,p.size)
# Move phases back into 0<pf<2pi
pf[pf>2*np.pi] -= 2*np.pi
# Move pf to -pi<pf<pi
return pf-np.pi
def signal_rand_phase(S):
import numpy as np
# Returns a signal with the same power spectrum as S
# but random phases.
# Assumes S real.
F = np.fft.rfft(S)
pf = rand_phase(np.angle(F))
Ff = np.abs(F)*np.exp(1.0j*pf)
return np.fft.irfft(Ff)
| StarcoderdataPython |
160356 | """
This is uproot-browser. There is no user accessible API; only a terminal
interface is provided currently.
"""
from __future__ import annotations
__all__ = ()
| StarcoderdataPython |
3531900 | <filename>app/helpers.py
# helpers.py
#
# Copyright(c) <NAME> <<EMAIL>>
# Licensed under MIT
# Version 2.0.0
import string
import random
def rand_uid(length):
str_pool = string.ascii_lowercase + string.ascii_uppercase + string.digits
random_str = ''.join((random.SystemRandom().choice(str_pool) for _ in range(length)))
return random_str
def strip_uid(text_with_uid, identifier_prefix=":uid-"):
return text_with_uid[0:text_with_uid.rfind(identifier_prefix)]
def extract_uid(text_with_uid, identifier_prefix=":uid-"):
start = len(identifier_prefix) + text_with_uid.rfind(identifier_prefix)
return text_with_uid[start:]
| StarcoderdataPython |
12845280 | <filename>galaxy/main/urls.py
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.conf.urls import url
from django.conf import settings
from django.views.decorators.cache import never_cache
from django.contrib.staticfiles.views import serve as serve_staticfiles
from django.views.static import serve as serve_static
from galaxy.main import views
urlpatterns = [
# Non-secure URLs
url(r'^$', views.home, name='home'),
url(r'^explore$', views.explore, name='explore'),
url(r'^intro$', views.intro, name='intro'),
url(r'^accounts/landing[/]?$', views.accounts_landing,
name='accounts-landing'),
url(r'^list$', views.list_category, name='list-category'),
url(r'^detail$', views.detail_category, name='detail-category'),
url(r'^roleadd$', views.role_add_view, name='role-add-category'),
url(r'^imports$', views.import_status_view, name='import-status'),
url(r'^stars$', views.stars_list_view, name='stars-list'),
# Logged in/secured URLs
url(r'^accounts/connect/$', views.accounts_connect),
url(r'^accounts/connect/success/$', views.accounts_connect_success,
name='accounts-connect-success'),
url(r'^accounts/profile/$', views.accounts_profile,
name='accounts-profile'),
url(r'^authors/$', views.NamespaceListView.as_view(),
name='namespace-list'),
url(r'^([\w\-._+]+)/$', views.RoleListView.as_view(), name='role-list'),
url(r'^([\w\-._+]+)/([\w\-._+]+)/$',
views.RoleDetailView.as_view(), name='role-detail'),
]
# FIX
if settings.DEBUG:
urlpatterns += [
url(r'^static/(?P<path>.*)$',
never_cache(serve_staticfiles))
]
else:
urlpatterns += [
url(r'^static/(?P<path>.*)$', serve_static,
kwargs={'document_root': settings.STATIC_ROOT})
]
| StarcoderdataPython |
6459601 | <filename>fiftyone/utils/data/converters.py
"""
Dataset format conversion utilities.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import inspect
import logging
import eta.core.utils as etau
import fiftyone as fo
import fiftyone.types as fot
logger = logging.getLogger(__name__)
def convert_dataset(
input_dir=None,
input_type=None,
dataset_importer=None,
output_dir=None,
output_type=None,
dataset_exporter=None,
):
"""Converts a dataset stored on disk to another format on disk.
The input dataset may be specified by providing either an ``input_dir`` and
a corresponding ``input_type`` or by providing a ``dataset_importer``.
The output dataset may be specified by providing either an ``output_dir``
and a corresponding ``output_type`` or by providing a ``dataset_exporter``.
Args:
input_dir (None): the input dataset directory
input_type (None): the :class:`fiftyone.types.dataset_types.Dataset`
type of the dataset in ``input_dir``
dataset_importer (None): a
:class:`fiftyone.utils.data.importers.DatasetImporter` to use to
import the input dataset
output_dir (None): the directory to which to write the output dataset
output_type (None): the :class:`fiftyone.types.dataset_types.Dataset`
type to write to ``output_dir``
dataset_exporter (None): a
:class:`fiftyone.utils.data.exporters.DatasetExporter` to use to
export the dataset
"""
if input_type is None and dataset_importer is None:
raise ValueError(
"Either `input_type` or `dataset_importer` must be provided"
)
if output_type is None and dataset_exporter is None:
raise ValueError(
"Either `output_type` or `dataset_exporter` must be provided"
)
# Label field used (if necessary) when converting labeled datasets
label_field = "label"
# Import dataset
if dataset_importer is not None:
# Import via ``dataset_importer``
logger.info("Loading dataset from '%s'", dataset_importer.dataset_dir)
logger.info(
"Using DatasetImporter '%s'", etau.get_class_name(dataset_importer)
)
dataset = fo.Dataset.from_importer(
dataset_importer, label_field=label_field
)
logger.info("Import complete")
else:
# Import via ``input_type``
if inspect.isclass(input_type):
input_type = input_type()
# If the input dataset contains TFRecords, they must be unpacked into a
# temporary directory during conversion
if isinstance(
input_type,
(fot.TFImageClassificationDataset, fot.TFObjectDetectionDataset),
):
with etau.TempDir() as images_dir:
dataset_importer_cls = input_type.get_dataset_importer_cls()
dataset_importer = dataset_importer_cls(input_dir, images_dir)
convert_dataset(
dataset_importer=dataset_importer,
output_dir=output_dir,
output_type=output_type,
dataset_exporter=dataset_exporter,
)
return
logger.info("Loading dataset from '%s'", input_dir)
logger.info("Input format '%s'", etau.get_class_name(input_type))
dataset = fo.Dataset.from_dir(
input_dir, input_type, label_field=label_field
)
logger.info("Import complete")
# Export dataset
if dataset_exporter is not None:
# Export via ``dataset_exporter``
logger.info("Exporting dataset to '%s'", dataset_exporter.export_dir)
logger.info(
"Using DatasetExporter '%s'", etau.get_class_name(dataset_exporter)
)
dataset.export(
dataset_exporter=dataset_exporter, label_field=label_field
)
logger.info("Export complete")
else:
# Export via ``output_type``
if inspect.isclass(output_type):
output_type = output_type()
logger.info("Exporting dataset to '%s'", output_dir)
logger.info("Export format '%s'", etau.get_class_name(output_type))
dataset.export(
export_dir=output_dir,
dataset_type=output_type,
label_field=label_field,
)
logger.info("Export complete")
# Cleanup
dataset.delete()
| StarcoderdataPython |
128934 | <filename>tests/unit/decider/test_daemon.py
import pytest
import floto.decider
@pytest.fixture
def history(init_response):
return floto.History(domain='d', task_list='tl', response=init_response)
@pytest.fixture
def daemon(history):
daemon = floto.decider.Daemon(domain='d', task_list='tl_daemon', swf='swf')
daemon.history = history
return daemon
@pytest.fixture
def decider_spec():
task1 = floto.specs.task.ActivityTask(domain='d',name='at', version='v1')
spec = floto.specs.DeciderSpec(domain='d', task_list='tl', activity_tasks=[task1])
return spec
@pytest.fixture
def json_decider_spec(decider_spec):
return decider_spec.to_json()
class TestDaemon():
def test_init(self):
assert floto.decider.Daemon(domain='d').task_list == 'floto_daemon'
def test_init_with_swf(self):
d = floto.decider.Daemon(domain='d', swf='my_swf')
assert d.swf == 'my_swf'
def test_init_with_args(self):
d = floto.decider.Daemon(domain='d', task_list='tl')
assert d.task_list == 'tl'
assert d.domain == 'd'
def test_get_decisions_child_workflow(self, mocker, daemon):
mocker.patch('floto.decider.Daemon.get_decision_child_workflow', return_value='d')
decisions = daemon.get_decisions_child_workflows(['cwf1'])
daemon.get_decision_child_workflow.assert_called_once_with('cwf1')
assert decisions == ['d']
def test_get_decision_child_workflow(self, daemon, json_decider_spec, mocker):
signal_event = {'eventType':'WorkflowExecutionSignaled',
'workflowExecutionSignaledEventAttributes':{
'signalName':'startChildWorkflowExecution',
'input':{'decider_spec':json_decider_spec}}}
decision = daemon.get_decision_start_child_workflow_execution()
mocker.patch('floto.decider.Daemon.get_decision_start_child_workflow_execution',
return_value=decision)
mocker.patch('floto.decider.Daemon.start_child_decider')
mocker.patch('floto.decider.Daemon.get_decider_spec')
decision = daemon.get_decision_child_workflow(signal_event)
daemon.get_decider_spec.assert_called_once_with(json_decider_spec,
decision.task_list['name'],
daemon.domain)
def test_get_decider_spec(self, json_decider_spec, daemon):
spec = daemon.get_decider_spec(json_decider_spec, 'tl', 'd')
assert isinstance(spec, floto.specs.DeciderSpec)
assert spec.task_list == 'tl'
assert spec.domain == 'd'
assert spec.activity_tasks[0].name == 'at'
| StarcoderdataPython |
9642479 | from scrapli.driver.core import NXOSDriver
def test_nxos_driver_init_telnet():
conn = NXOSDriver(host="myhost", transport="telnet")
assert conn.transport.username_prompt == "login:"
| StarcoderdataPython |
5108960 | <gh_stars>0
"""
You are given an array of non-negative integers, and are initially positioned at the first index of the array.
Each element in the array represents your maximum jump length from that position.
Determine if you are able to reach the last index.
Example 1:
Input: [2, 3, 1, 1, 4], Output: true
Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
Example 2:
Input: [3, 2, 1, 0, 4], Output: false
Explanation: You will always arrive at index 3 no matter what. Its maximum
jump length is 0, which makes it impossible to reach the last index.
"""
"""
We iterate through the list and keep a running track of the maximum index we can reach from that position (current
index + current value). If at some point we reach an index higher than the maximum, the last index is not reachable.
We can terminate the iteration early if we observe the max index reaching the last index.
"""
def can_jump(nums):
if len(nums) <= 1:
return True
max_idx = 0
for idx, val in enumerate(nums):
if idx > max_idx:
return False
max_idx = max(max_idx, idx+val)
if max_idx >= len(nums) - 1:
return True
assert can_jump([2, 0, 0]) is True
assert can_jump([2, 1, 2, 0, 4]) is True
assert can_jump([2, 3, 1, 1, 4]) is True
assert can_jump([3, 2, 1, 0, 4]) is False
| StarcoderdataPython |
8040070 | <filename>apiai_assistant/widgets/image.py<gh_stars>1-10
from . import GoogleAssistantWidget
class Button(GoogleAssistantWidget):
def __init__(self, title, weblink=None):
self.title = title
self.weblink = weblink
super(Button, self).__init__()
def render(self):
return {
'title': self.title,
'openUrlAction': {
'url': self.weblink,
}
}
class Image(GoogleAssistantWidget):
def __init__(self, url, alt=None):
self.url = url
self.alt = alt
super(Image, self).__init__()
def render(self):
return {'url': self.url, 'accessibilityText': self.alt}
| StarcoderdataPython |
6680220 | <filename>src/graphic/gquery.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import copy
from .graph import GraphEntity
from .query.query_utils import Q, Field, Expression
__all__ = ['GQuery']
class Context:
FIELD_LOOKUP_SPLIT_BYS = ('__', '.', )
__slots__ = ('_name_2_ent')
def __init__(self):
self._name_2_ent = {}
def __contains__(self, key):
return key in self._name_2_ent
def clone(self):
cloned = type(self)()
cloned._name_2_ent = copy.copy(self._name_2_ent)
return cloned
def add(self, *entities):
if not all(isinstance(e, GraphEntity) for e in entities):
raise TypeError('can only accept GraphEntity')
for entity in entities:
self._add(entity)
return self
def _add(self, entity):
if entity.is_path():
raise NotImplementedError('not support path yet')
self.add_ref(entity.alias, entity)
if entity.is_edge():
for node in entity.nodes:
self.add_ref(node.alias, node)
def add_ref(self, name, val):
if name in self._name_2_ent:
raise KeyError(
'{} has already to use to ref to {}'.format(
name,
self._name_2_ent.get(name).__repr__()
)
)
self._name_2_ent[name] = val
def get(self, name):
"""
Raises:
KeyError
"""
return self._name_2_ent.get(name)
def lookup_field(self, exp) -> Field:
"""
Args:
exp: all support expressions: a__id, a.id, id
"""
target_ent_alias = GraphEntity.DEFAULT_ALIAS
field_name = exp
for split_by in self.FIELD_LOOKUP_SPLIT_BYS:
if split_by in exp:
try:
target_ent_alias, field_name = exp.split(split_by)
except: # noqa
raise ValueError(exp)
else:
break
target_ent = self.get(target_ent_alias)
return Field(target_ent, field_name)
class GQuery:
# TODO(chuter): support path
_LIMIT = 20
__slots__ = ('_where', '_entities', '_context',
'_select', '_limit', '_order_by')
def __init__(self, *entities):
self._where = Q()
self._entities = entities[:]
self._context = Context().add(*entities)
self._select = set()
self._limit = self._LIMIT
self._order_by = None
for entity in entities:
self._add_filter_by_entity_properties(entity)
def _add_filter_by_entity_properties(self, entity):
for name, val in entity:
if entity.alias != entity.DEFAULT_ALIAS:
left_exp = Expression.LOOKUP_SPLIT_BY.join([
entity.alias,
name
])
else:
left_exp = name
kwargs = dict(((left_exp, val),))
self.filter(**kwargs)
if entity.is_edge():
for node in entity.nodes:
self._add_filter_by_entity_properties(node)
@property
def context(self):
return self._context.clone()
@property
def queryfor(self):
return self._entities
@property
def where(self):
return self._where._clone()
@property
def returns(self):
return frozenset(self._select)
def filter(self, *qargs, **kwargs):
"""
examples:
query.filter(name__eq="chuter", age__gte=32)
query.filter(Q(name__eq="chuter") & Q(age__gte=32))
the top two are equal
q = Q(name__eq="chuter", age__gte=32)
q |= Q(comp__eq="BOSS")
query.filter(q)
"""
self._where &= Q(*qargs, **kwargs)
return self
def select(self, *exps):
for exp_item in exps:
if isinstance(exp_item, str):
exp_item = self.context.lookup_field(exp_item)
self._select.add(exp_item)
return self
def order_by(self, by=None):
if by is None:
return self._order_by
self._order_by = by
return self
def limit(self, to=None):
if to is None:
return self._limit
self._limit = to
return self
| StarcoderdataPython |
6463199 | <reponame>shiksha360-site/site
# Ported from https://github.com/Fates-List/FatesList/blob/main/modules/core/system.py
from fastapi.responses import HTMLResponse
from starlette.middleware.base import BaseHTTPMiddleware
from loguru import logger
from http import HTTPStatus
import uuid
import datetime
from lynxfall.utils.fastapi import api_versioner
import time
import os
class KalanRequestHandler(BaseHTTPMiddleware):
"""Request Handler for Fates List ported to Kalam Academy/Infilearn"""
def __init__(self, app, *, exc_handler, api_ver):
super().__init__(app)
self.exc_handler = exc_handler
self.api_ver = api_ver
self.cwd = os.getcwd()
app.add_exception_handler(Exception, exc_handler)
# Methods that should be allowed by CORS
self.cors_allowed = "GET, POST, PUT, PATCH, DELETE, HEAD, OPTIONS"
# Default response
self.default_res = HTMLResponse(
"Something happened!",
status_code=500
)
@staticmethod
def _log_req(path, request, response):
"""Logs HTTP requests to console (and file)"""
code = response.status_code
phrase = HTTPStatus(response.status_code).phrase
query_str_raw = request.scope["query_string"]
if query_str_raw:
query_str = f'?{query_str_raw.decode("utf-8")}'
else:
query_str = ""
if request.app.state.gunicorn:
logger.info(
f"{request.method} {path}{query_str} | {code} {phrase}"
)
async def dispatch(self, request, call_next):
"""Run _dispatch, if that fails, log error and do exc handler"""
# Ensure we are always in cwd
if os.getcwd() != self.cwd and not request.query_params.get("internal_http_call", False):
os.chdir(self.cwd)
request.state.error_id = str(uuid.uuid4())
request.state.curr_time = str(datetime.datetime.now())
path = request.scope["path"]
if not request.app.state.ipc_up:
# This middleware does not apply
return await call_next(request)
try:
res = await self._dispatcher(path, request, call_next)
except BaseException as exc: # pylint: disable=broad-except
#logger.exception("Site Error Occurred")
res = await self.exc_handler(request, exc, log=True)
self._log_req(path, request, res)
return res if res else self.default_res
async def _dispatcher(self, path, request, call_next):
"""Actual middleware"""
logger.trace(request.headers.get("X-Forwarded-For"))
# These are checks path should not start with
is_api = path.startswith("/api")
request.scope["path"] = path
if is_api:
# Handle /api as /api/vX excluding docs + pinned requests
request.scope, api_ver = api_versioner(request, self.api_ver)
start_time = time.time()
# Process request with retry
try:
response = await call_next(request)
except BaseException as exc: # pylint: disable=broad-except
#logger.exception("Site Error Occurred")
response = await self.exc_handler(request, exc)
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
if is_api:
response.headers["X-API-Version"] = api_ver
# Fuck CORS by force setting headers with proper origin
origin = request.headers.get('Origin')
# Make commonly repepated headers shorter
acac = "Access-Control-Allow-Credentials"
acao = "Access-Control-Allow-Origin"
acam = "Access-Control-Allow-Methods"
response.headers[acao] = origin if origin else "*"
if is_api and origin:
response.headers[acac] = "true"
else:
response.headers[acac] = "false"
response.headers[acam] = self.cors_allowed
if response.status_code == 405:
if request.method == "OPTIONS" and is_api:
response.status_code = 204
response.headers["Allow"] = self.cors_allowed
return response | StarcoderdataPython |
3255371 | <filename>yggdrasil/examples/tests/__init__.py
import os
import six
import uuid
import unittest
import tempfile
import shutil
import itertools
import flaky
from yggdrasil.components import ComponentMeta, import_component
from yggdrasil import runner, tools, platform
from yggdrasil.examples import (
get_example_yaml, get_example_source, get_example_languages,
ext_map, display_example)
from yggdrasil.tests import YggTestBase, check_enabled_languages, assert_raises
from yggdrasil.tests import timeout as timeout_dec
_ext2lang = {v: k for k, v in ext_map.items()}
_test_registry = {}
_default_comm = tools.get_default_comm()
def test_get_example_yaml():
r"""Test get_example_yaml."""
assert_raises(KeyError, get_example_yaml, 'invalid', 'invalid')
assert_raises(KeyError, get_example_yaml, 'hello', 'invalid')
get_example_yaml('hello', 'r')
get_example_yaml('hello', 'R')
def test_get_example_source():
r"""Test get_example_source."""
assert_raises(KeyError, get_example_source, 'invalid', 'invalid')
assert_raises(KeyError, get_example_source, 'hello', 'invalid')
get_example_source('hello', 'r')
get_example_source('hello', 'R')
def test_get_example_languages():
r"""Test get_example_languages."""
assert_raises(KeyError, get_example_languages, 'invalid')
get_example_languages('ascii_io')
get_example_languages('ascii_io', language='python')
get_example_languages('ascii_io', language='all')
get_example_languages('ascii_io', language='all_nomatlab')
def test_display_example():
r"""Test display_example."""
display_example('hello', 'r')
def iter_pattern_match(a, b):
r"""Determine if two sets of iteration parameters match, allowing
for wild cards.
Args:
a (tuple): Iteration parameters.
b (tuple): Iteration parameters.
Returns:
bool: True if the parameters match, False otherwise.
"""
assert(not isinstance(a, list))
if isinstance(b, list):
matches = [iter_pattern_match(a, ib) for ib in b]
return any(matches)
matches = []
for ia, ib in zip(a, b):
if not ((ia == ib) or (ia == '*') or (ib == '*')
or (isinstance(ia, tuple) and (ib in ia))
or (isinstance(ib, tuple) and (ia in ib))
or (isinstance(ia, set) and (ib not in ia))
or (isinstance(ib, set) and (ia not in ib))):
return False
return True
def make_iter_test(is_flaky=False, **kwargs):
def itest(self):
if is_flaky:
self.sleep(1.0)
self.run_iteration(**kwargs)
if is_flaky:
itest = flaky.flaky(max_runs=3)(itest)
return itest
def example_decorator(name, x, iter_over, timeout):
r"""Consolidate decorator based on iteration values.
Args:
name (str): Example name.
x (list): Iteration parameters.
iter_over (list): Iteration dimensions.
timeout (float): Test timeout.
Returns:
function: Decorator.
"""
def deco(func):
add_timeout_dec = True
flag = (not tools.check_environ_bool('YGG_ENABLE_EXAMPLE_TESTS'))
if not flag:
add_timeout_dec = False
deco_list = [unittest.skipIf(flag, "Example tests not enabled.")]
for i, k in enumerate(iter_over):
v = x[i]
flag = None
msg = None
if k == 'comm':
flag = tools.is_comm_installed(v)
elif k == 'language':
flag = True
for vv in get_example_languages(name, language=v):
if not tools.is_lang_installed(vv):
flag = False
break
else:
try:
check_enabled_languages(vv)
except unittest.SkipTest as e:
msg = str(e)
flag = False
break
if flag is not None:
if not flag:
# Don't add timeout if the test is going to be skipped
add_timeout_dec = False
if msg is None:
msg = "%s %s not installed." % (k.title(), v)
deco_list.append(unittest.skipIf(not flag, msg))
if add_timeout_dec:
deco_list.insert(0, timeout_dec(timeout=timeout))
for v in deco_list:
func = v(func)
return func
return deco
class ExampleMeta(ComponentMeta):
def __new__(cls, name, bases, dct):
if dct.get('example_name', None) is not None:
dct.setdefault('iter_list_language',
get_example_languages(dct['example_name']))
timeout = dct.get('timeout', 600)
iter_lists = []
iter_keys = []
test_name_fmt = 'test'
iter_skip = dct.get('iter_skip', [])
iter_flaky = dct.get('iter_flaky', [])
iter_over = dct.get('iter_over', ['language'])
iter_aliases = {'lang': 'language',
'type': 'datatype',
'types': 'datatype'}
iter_over = [iter_aliases.get(x, x) for x in iter_over]
for x in iter_over:
test_name_fmt += '_%s'
x_iter_list = dct.get('iter_list_%s' % x, None)
for ibase in bases:
if x_iter_list is not None:
break
x_iter_list = getattr(ibase, 'iter_list_%s' % x, None)
if x_iter_list is not None:
iter_lists.append(x_iter_list)
iter_keys.append(x)
elif dct.get('example_name', None) is not None: # pragma: debug
raise ValueError("Unsupported iter dimension: %s" % x)
if dct.get('example_name', None) is not None:
for x in itertools.product(*iter_lists):
if iter_pattern_match(x, iter_skip):
continue
itest_name = (test_name_fmt % x)
if itest_name not in dct:
itest_func = make_iter_test(
is_flaky=iter_pattern_match(x, iter_flaky),
**{k: v for k, v in
zip(iter_keys, x)})
itest_func.__name__ = itest_name
if timeout is not None:
itest_func = example_decorator(
dct['example_name'], x, iter_over, timeout)(itest_func)
dct[itest_name] = itest_func
out = super(ExampleMeta, cls).__new__(cls, name, bases, dct)
if out.example_name is not None:
global _test_registry
_test_registry[out.example_name] = out
# else:
# out = unittest.skipIf(True, "Test uninitialized.")(out)
return out
@six.add_metaclass(ExampleMeta)
class ExampleTstBase(YggTestBase, tools.YggClass):
r"""Base class for running examples."""
example_name = None
expects_error = False
env = {}
iter_over = ['language']
iter_skip = []
iter_flaky = []
iter_list_language = None
iter_list_comm = tools.get_supported_comm(dont_include_value=True)
iter_list_datatype = tools.get_supported_type()
def __init__(self, *args, **kwargs):
tools.YggClass.__init__(self, self.example_name)
self.iter_param = {}
self.uuid = str(uuid.uuid4())
self.runner = None
# self.debug_flag = True
super(ExampleTstBase, self).__init__(*args, **kwargs)
@property
def language(self):
r"""str: Language of the currect test."""
return self.iter_param.get('language', None)
@property
def comm(self):
r"""str: Comm used by the current test."""
return self.iter_param.get('comm', _default_comm)
@property
def description_prefix(self):
r"""Prefix message with test name."""
return self.name
@property
def namespace(self):
r"""str: Namespace for the example."""
return "%s_%s" % (self.name, self.uuid)
@property
def tempdir(self):
r"""str: Temporary directory."""
return tempfile.gettempdir()
@property
def languages_tested(self):
r"""list: Languages covered by the example."""
return get_example_languages(self.name, language=self.language)
@property
def yaml(self):
r"""str: The full path to the yaml file for this example."""
return get_example_yaml(self.name, self.language)
@property
def yamldir(self):
r"""str: Full path to the directory containing the yaml file."""
if self.yaml is None: # pragma: no cover
return None
if isinstance(self.yaml, list):
out = os.path.dirname(self.yaml[0])
else:
out = os.path.dirname(self.yaml)
return out
# @property
# def yaml_contents(self):
# r"""dict: Contents of yaml file."""
# if self.yaml is None: # pragma: no cover
# return None
# return tools.parse_yaml(self.yaml)
@property
def input_files(self): # pragma: debug
r"""list: Input files for the run."""
return None
@property
def expected_output_files(self): # pragma: debug
r"""list: Examples of expected output for the run."""
return self.input_files
@property
def output_files(self):
r"""list: Output files for the run."""
return None
@property
def results(self):
r"""list: Results that should be found in the output files."""
if self.expected_output_files is None: # pragma: debug
return None
out = []
for fname in self.expected_output_files:
assert(os.path.isfile(fname))
out.append(self.read_file(fname))
return out
def check_results(self):
r"""This should be overridden with checks for the result."""
if self.output_files is None:
return
res_list = self.results
out_list = self.output_files
assert(res_list is not None)
assert(out_list is not None)
self.assert_equal(len(res_list), len(out_list))
for res, fout in zip(res_list, out_list):
self.check_file_exists(fout)
if isinstance(res, tuple):
res[0](fout, *res[1:])
else:
self.check_file_size(fout, res)
self.check_file_contents(fout, res)
def run_example(self):
r"""This runs an example in the correct language."""
assert(self.yaml is not None)
assert(self.name is not None)
# Check that language is installed
for x in self.languages_tested:
if not tools.is_lang_installed(x):
raise unittest.SkipTest("%s not installed." % x)
check_enabled_languages(x)
# Copy platform specific makefile
if self.language == 'make':
makefile = os.path.join(self.yamldir, 'src', 'Makefile')
if platform._is_win: # pragma: windows
makedrv = import_component('model', 'make')
assert(makedrv.get_tool('compiler').toolname == 'nmake')
make_ext = '_windows'
else:
make_ext = '_linux'
shutil.copy(makefile + make_ext, makefile)
# Check that comm is installed
if self.comm in ['ipc', 'IPCComm']:
from yggdrasil.communication.IPCComm import (
ipcrm_queues, ipc_queues)
qlist = ipc_queues()
if qlist: # pragma: debug
print('Existing queues:', qlist)
ipcrm_queues()
# Run
os.environ.update(self.env)
self.runner = runner.get_runner(self.yaml, namespace=self.namespace,
production_run=True)
self.runner.run()
self.runner.printStatus()
if self.expects_error:
assert(self.runner.error_flag)
else:
assert(not self.runner.error_flag)
try:
self.check_results()
except BaseException: # pragma: debug
self.runner.printStatus()
raise
finally:
self.example_cleanup()
# Remove copied makefile
if self.language == 'make':
makefile = os.path.join(self.yamldir, 'src', 'Makefile')
if os.path.isfile(makefile):
os.remove(makefile)
self.runner = None
def example_cleanup(self):
r"""Cleanup files created during the test."""
if (self.yaml is not None) and (self.output_files is not None):
timer_class = tools.YggClass()
for fout in self.output_files:
if os.path.isfile(fout):
tools.remove_path(fout, timer_class=timer_class, timeout=5)
def setup_iteration(self, **kwargs):
r"""Perform setup associated with an iteration."""
for k, v in kwargs.items():
k_setup = getattr(self, 'setup_iteration_%s' % k, None)
if k_setup is not None:
v = k_setup(v)
self.iter_param[k] = v
def teardown_iteration(self, **kwargs):
r"""Perform teardown associated with an iteration."""
for k, v in kwargs.items():
k_teardown = getattr(self, 'teardown_iteration_%s' % k, None)
if k_teardown is not None:
k_teardown(v)
del self.iter_param[k]
assert(not self.iter_param)
self.iter_param = {}
def setup_iteration_language(self, language=None):
r"""Perform setup associated with a language iteration."""
if language is not None:
for x in get_example_languages(self.example_name,
language=language):
check_enabled_languages(x)
return language
def setup_iteration_comm(self, comm=None):
r"""Perform setup associated with a comm iteration."""
assert(comm is not None)
# if not tools.is_comm_installed(comm):
# raise unittest.SkipTest("%s library not installed."
# % comm)
self.set_default_comm(default_comm=comm)
return comm
def teardown_iteration_comm(self, comm=None):
r"""Peform teardown associated with a comm iteration."""
self.reset_default_comm()
def run_iteration(self, **kwargs):
r"""Run a test for the specified parameters."""
if not tools.check_environ_bool('YGG_ENABLE_EXAMPLE_TESTS'):
raise unittest.SkipTest("Example tests not enabled.")
self.setup_iteration(**kwargs)
try:
getattr(self, kwargs.get('method', 'run_example'))()
finally:
self.teardown_iteration(**kwargs)
| StarcoderdataPython |
3590974 | <reponame>thomasfrederikhoeck/ml_tooling
import numpy as np
import pandas as pd
from ml_tooling.utils import DataType
def target_correlation(
features: pd.DataFrame, target: DataType, method: str = "pearson"
) -> pd.Series:
"""
Calculate target_correlation between features and target and returns a sorted pd.Series
Parameters
----------
features: pd.DataFrame
Features to calculate target_correlation for
target: np.ndarray or pd.Series
Target variable
method: str
Which correlation to use. One of 'pearson', 'spearman', 'kendall'
Returns
-------
pd.Series
Series of feature importance sorted by absolute value
"""
if isinstance(target, np.ndarray):
target = pd.Series(target)
corr = features.corrwith(target, method=method)
sorted_idx = np.argsort(corr.abs())
return corr[sorted_idx]
| StarcoderdataPython |
5154188 | import os
import torch
import torch.utils.data as data
from PIL import Image
import numpy as np
import pandas as pd
import ast
import utils
def get_loader(transform,
mode='train',
batch_size=1,
start_word="<start>",
end_word="<end>",
num_workers=0
):
"""Returns the data loader.
Args:
transform: Image transform.
mode: One of 'train' or 'test'.
batch_size: Batch size (if in testing mode, must have batch_size=1).
start_word: Special word denoting sentence start.
end_word: Special word denoting sentence end.
num_workers: Number of subprocesses to use for data loading
"""
# Based on mode (train, val, test), obtain img_folder and annotations_file.
if mode == 'train':
spectrogram_folder = 'train_set/spectrogram'
annotations_file = 'train_set/annotations.csv'
if mode == 'test':
assert batch_size == 1, "Please change batch_size to 1 if testing your model."
spectrogram_folder = 'test_set'
annotations_file = None # TODO remove this but take note its fed into Dataset
# Spectrogram-Dance dataset.
dataset = TikTokDataset(transform=transform,
mode=mode,
batch_size=batch_size,
start_word=start_word,
end_word=end_word,
annotations_file=annotations_file,
spectrogram_folder=spectrogram_folder)
if mode == 'train':
# data loader for TikTok dataset.
data_loader = data.DataLoader(dataset=dataset,
num_workers=num_workers,
shuffle=True)
else:
data_loader = data.DataLoader(dataset=dataset,
batch_size=dataset.batch_size,
shuffle=True,
num_workers=num_workers)
return data_loader
class TikTokDataset(data.Dataset): # Map-style dataset
def __init__(self, transform, mode, batch_size, start_word,
end_word, annotations_file, spectrogram_folder):
self.transform = transform
self.mode = mode
self.batch_size = batch_size
self.spectrogram_folder = spectrogram_folder
if self.mode == 'train':
self.annotations = pd.read_csv(
annotations_file)
self.num_vids = len(self.annotations.index)
else:
spectrograms = os.listdir(self.spectrogram_folder)
self.paths = [self.spectrogram_folder + "/" + item for item in spectrograms]
def __getitem__(self, idx):
# obtain image and caption if in training mode
if self.mode == 'train':
# remove .mp4 in index
spectrogram_filename = os.path.basename(
self.annotations.iloc[idx, 0])
spectrogram_basename = os.path.splitext(spectrogram_filename)[0]
# construct spectrogram path
spectrogram_path = os.path.join(self.spectrogram_folder,
spectrogram_basename + ".png")
pdseries = self.annotations.iloc[idx, 1:]
# Convert spectrogram to tensor and pre-process using transform
spectrogram = Image.open(spectrogram_path).convert('RGB')
spectrogram = self.transform(spectrogram)
# If I later on decide to put coordinates for each joint under individual headers, I can process them here.
pose_coordinates = utils.concatenate_arrays(
pdseries)
# return pre-processed image and caption tensors
return spectrogram, pose_coordinates
# obtain spectrogram if in test mode
else:
path = self.paths[idx]
# Convert image to tensor and pre-process using transform
PIL_image = Image.open(path).convert('RGB')
orig_spectrogram = np.array(PIL_image)
spectrogram = self.transform(PIL_image)
# return original image and pre-processed image tensor
return orig_spectrogram, spectrogram
def __len__(self):
if self.mode == 'train':
return len(self.annotations)
else:
return len(self.paths)
| StarcoderdataPython |
1971794 | <reponame>WilsonWangTHU/neural_graph_evolution
# -----------------------------------------------------------------------------
# @brief:
# In this function, we define the base agent.
# The base agent should be responsible for building the policy network,
# fetch the io placeholders / tensors, and set up the variable list
# @author:
# code originally from kvfran, modified by <NAME>
# -----------------------------------------------------------------------------
import tensorflow as tf
import init_path
import multiprocessing
from network import policy_network
from network import gated_graph_policy_network
from network import nervenet_policy
from network import nervenetplus_policy
from network import treenet_policy
from util import logger
from graph_util import graph_data_util
from graph_util import nervenetplus_util
from util import utils
import numpy as np
class base_agent(multiprocessing.Process):
def __init__(self,
args,
observation_size,
action_size,
task_q,
result_q,
name_scope='trpo_agent',
is_rollout_agent=False):
# the multiprocessing initialization
multiprocessing.Process.__init__(self)
self.task_q = task_q
self.result_q = result_q
self.gnn_placeholder_list = None
self.obs_placeholder = None
# the configurations for the agent
self.args = args
# the network parameters
self.name_scope = name_scope
self.observation_size = observation_size
self.action_size = action_size
self.is_rollout_agent = is_rollout_agent
# the variables and networks to be used, init them before use them
self.policy_network = None
self.step_policy_network = None
self.policy_var_list = None
self.tf_var_list = None
self.iteration = None
# the gnn parameters
if self.args.use_gnn_as_policy:
self.gnn_parameter_initialization()
self.base_path = init_path.get_base_dir()
def build_session(self):
if self.args.use_gpu:
config = tf.ConfigProto(device_count={'GPU': 1})
else:
config = tf.ConfigProto(device_count={'GPU': 0})
config.gpu_options.allow_growth = True # don't take full gpu memory
self.session = tf.Session(config=config)
def fetch_policy_info(self):
assert self.policy_network is not None, \
logger.error('Init the policy network before using it')
# input placeholders to the policy networks
if self.args.use_gnn_as_policy:
# index placeholders
self.receive_idx_placeholder, self.send_idx_placeholder, \
self.node_type_idx_placeholder, \
self.inverse_node_type_idx_placeholder, \
self.output_type_idx_placeholder, \
self.inverse_output_type_idx_placeholder, \
self.batch_size_int_placeholder = \
self.policy_network.get_gnn_idx_placeholder()
# the graph_obs placeholders and graph_parameters_placeholders
self.graph_obs_placeholder = \
self.policy_network.get_input_obs_placeholder()
self.graph_parameters_placeholder = \
self.policy_network.get_input_parameters_placeholder()
self.gnn_placeholder_list = [
self.receive_idx_placeholder,
self.send_idx_placeholder,
self.node_type_idx_placeholder,
self.inverse_node_type_idx_placeholder,
self.output_type_idx_placeholder,
self.inverse_output_type_idx_placeholder,
self.batch_size_int_placeholder,
self.graph_obs_placeholder,
self.graph_parameters_placeholder
]
if self.args.nervenetplus:
self.step_receive_idx_placeholder, \
self.step_send_idx_placeholder, \
self.step_node_type_idx_placeholder, \
self.step_inverse_node_type_idx_placeholder, \
self.step_output_type_idx_placeholder, \
self.step_inverse_output_type_idx_placeholder, \
self.step_batch_size_int_placeholder = \
self.step_policy_network.get_gnn_idx_placeholder()
# the graph_obs placeholders and graph_parameters_placeholders
self.step_graph_obs_placeholder = \
self.step_policy_network.get_input_obs_placeholder()
self.step_graph_parameters_placeholder = \
self.step_policy_network.get_input_parameters_placeholder()
else:
self.obs_placeholder = self.policy_network.get_input_placeholder()
self.raw_obs_placeholder = None
# output from the policy networks means for each action
self.action_dist_mu = self.policy_network.get_action_dist_mu()
# log std parameters of actions (all the same)
self.action_dist_logstd_param = \
self.policy_network.get_action_dist_logstd_param()
self.action_dist_logstd = self.action_dist_logstd_param
if self.args.nervenetplus:
self.step_action_dist_mu = \
self.step_policy_network.get_action_dist_mu()
# log std parameters of actions (all the same)
self.step_action_dist_logstd_param = \
self.step_policy_network.get_action_dist_logstd_param()
self.step_action_dist_logstd = self.step_action_dist_logstd_param
# "policy_var_list": to be passed to the rollout agents
# "all_policy_var_list": to be saved into the checkpoint
self.policy_var_list, self.all_policy_var_list = \
self.policy_network.get_var_list()
self.iteration = self.policy_network.get_iteration_var()
self.iteration_add_op = self.iteration.assign_add(1)
def build_policy_network(self, adj_matrix=None, node_attr=None):
if self.args.nervenetplus:
assert self.args.use_gnn_as_policy and self.args.use_nervenet
if self.args.use_gnn_as_policy:
if self.args.use_nervenet:
if self.args.nervenetplus:
if self.args.tree_net:
self.policy_network = treenet_policy.nervenet(
session=self.session,
name_scope=self.name_scope + '_policy',
input_size=self.observation_size,
output_size=self.action_size,
adj_matrix=adj_matrix,
node_attr=node_attr,
args=self.args,
is_rollout_agent=self.is_rollout_agent
)
self.step_policy_network = treenet_policy.nervenet(
session=self.session,
name_scope=self.name_scope + 'step_policy',
input_size=self.observation_size,
output_size=self.action_size,
adj_matrix=self.adj_matrix,
node_attr=self.node_attr,
args=self.args,
is_rollout_agent=True
)
self.node_info = self.policy_network.get_node_info()
self.step_policy_var_list, _ = \
self.step_policy_network.get_var_list()
self.set_step_policy = \
utils.SetPolicyWeights(self.session,
self.step_policy_var_list)
else:
self.policy_network = nervenetplus_policy.nervenet(
session=self.session,
name_scope=self.name_scope + '_policy',
input_size=self.observation_size,
output_size=self.action_size,
adj_matrix=adj_matrix,
node_attr=node_attr,
args=self.args,
is_rollout_agent=self.is_rollout_agent
)
self.step_policy_network = nervenetplus_policy.nervenet(
session=self.session,
name_scope=self.name_scope + 'step_policy',
input_size=self.observation_size,
output_size=self.action_size,
adj_matrix=self.adj_matrix,
node_attr=self.node_attr,
args=self.args,
is_rollout_agent=True
)
self.node_info = self.policy_network.get_node_info()
self.step_policy_var_list, _ = \
self.step_policy_network.get_var_list()
self.set_step_policy = \
utils.SetPolicyWeights(self.session,
self.step_policy_var_list)
else:
self.policy_network = nervenet_policy.nervenet(
session=self.session,
name_scope=self.name_scope + '_policy',
input_size=self.observation_size,
output_size=self.action_size,
adj_matrix=adj_matrix,
node_attr=node_attr,
args=self.args
)
else:
self.policy_network = gated_graph_policy_network.GGNN(
session=self.session,
name_scope=self.name_scope + '_policy',
input_size=self.observation_size,
output_size=self.action_size,
ob_placeholder=None,
trainable=True,
build_network_now=True,
is_baseline=False,
placeholder_list=None,
args=self.args
)
self.raw_obs_placeholder = None
self.node_info = self.policy_network.get_node_info()
else:
self.policy_network = policy_network.policy_network(
session=self.session,
name_scope=self.name_scope + '_policy',
input_size=self.observation_size,
output_size=self.action_size,
ob_placeholder=None,
trainable=True,
build_network_now=True,
define_std=True,
is_baseline=False,
args=self.args
)
# if use the nervenetplus model
# if self.args.nervenetplus:
# # build the action model
# with tf.variable_scope('', reuse=True):
# self.step_policy_network = nervenetplus_policy.nervenet(
# session=self.session,
# name_scope=self.name_scope + '_policy',
# input_size=self.observation_size,
# output_size=self.action_size,
# adj_matrix=self.adj_matrix,
# node_attr=self.node_attr,
# args=self.args,
# is_rollout_agent=True
# )
# self.node_info = self.policy_network.get_node_info()
self.fetch_policy_info()
def gnn_parameter_initialization(self):
'''
@brief:
the parameters for the gnn, see the gated_graph_network_policy
file for details what these variables mean.
'''
self.receive_idx = None
self.send_idx = None
self.node_type_idx = None
self.inverse_node_type_idx = None
self.output_type_idx = None
self.inverse_output_type_idx = None
self.last_batch_size = -1
self.nervenetplus_batch_pos = None
def prepared_policy_network_feeddict(self, obs_n, rollout_data=None,
step_model=False):
'''
@brief: prepare the feed dict for the policy network part
'''
nervenetplus_batch_pos = None
if self.args.use_gnn_as_policy:
if not self.args.nervenetplus or obs_n.shape[0] == 1:
graph_obs, graph_parameters, \
self.receive_idx, self.send_idx, \
self.node_type_idx, self.inverse_node_type_idx, \
self.output_type_idx, self.inverse_output_type_idx, \
self.last_batch_size = \
graph_data_util.construct_graph_input_feeddict(
self.node_info,
obs_n,
self.receive_idx,
self.send_idx,
self.node_type_idx,
self.inverse_node_type_idx,
self.output_type_idx,
self.inverse_output_type_idx,
self.last_batch_size,
request_data=['ob', 'idx']
)
else:
assert rollout_data is not None
# preprocess the episodic information
graph_obs, graph_parameters, _, _, _, _, _, _, _ = \
graph_data_util.construct_graph_input_feeddict(
self.node_info, obs_n,
-1, -1, -1, -1, -1, -1, -1,
request_data=['ob']
)
nervenetplus_batch_pos, total_size = \
nervenetplus_util.nervenetplus_step_assign(
rollout_data, self.args.gnn_num_prop_steps
)
_, _, self.receive_idx, self.send_idx, \
self.node_type_idx, self.inverse_node_type_idx, \
self.output_type_idx, self.inverse_output_type_idx, \
self.last_batch_size = \
graph_data_util.construct_graph_input_feeddict(
self.node_info,
np.empty(
[int(total_size / self.args.gnn_num_prop_steps)]
),
self.receive_idx,
self.send_idx,
self.node_type_idx,
self.inverse_node_type_idx,
self.output_type_idx,
self.inverse_output_type_idx,
self.last_batch_size,
request_data=['idx']
)
if step_model:
feed_dict = {
self.step_batch_size_int_placeholder:
int(self.last_batch_size),
self.step_receive_idx_placeholder:
self.receive_idx,
self.step_inverse_node_type_idx_placeholder:
self.inverse_node_type_idx,
self.step_inverse_output_type_idx_placeholder:
self.inverse_output_type_idx
}
# append the input obs and parameters
for i_node_type in self.node_info['node_type_dict']:
feed_dict[self.step_graph_obs_placeholder[i_node_type]] = \
graph_obs[i_node_type]
feed_dict[self.step_graph_parameters_placeholder[i_node_type]] = \
graph_parameters[i_node_type]
# append the send idx
for i_edge in self.node_info['edge_type_list']:
feed_dict[self.step_send_idx_placeholder[i_edge]] = \
self.send_idx[i_edge]
# append the node type idx
for i_node_type in self.node_info['node_type_dict']:
feed_dict[self.step_node_type_idx_placeholder[i_node_type]] \
= self.node_type_idx[i_node_type]
# append the output type idx
for i_output_type in self.node_info['output_type_dict']:
feed_dict[self.step_output_type_idx_placeholder[i_output_type]] \
= self.output_type_idx[i_output_type]
# if the raw_obs is needed for the baseline
if self.raw_obs_placeholder is not None:
feed_dict[self.raw_obs_placeholder] = obs_n
else:
feed_dict = {
self.batch_size_int_placeholder:
int(self.last_batch_size),
self.receive_idx_placeholder:
self.receive_idx,
self.inverse_node_type_idx_placeholder:
self.inverse_node_type_idx,
self.inverse_output_type_idx_placeholder:
self.inverse_output_type_idx
}
# append the input obs and parameters
for i_node_type in self.node_info['node_type_dict']:
feed_dict[self.graph_obs_placeholder[i_node_type]] = \
graph_obs[i_node_type]
feed_dict[self.graph_parameters_placeholder[i_node_type]] = \
graph_parameters[i_node_type]
# append the send idx
for i_edge in self.node_info['edge_type_list']:
feed_dict[self.send_idx_placeholder[i_edge]] = \
self.send_idx[i_edge]
# append the node type idx
for i_node_type in self.node_info['node_type_dict']:
feed_dict[self.node_type_idx_placeholder[i_node_type]] \
= self.node_type_idx[i_node_type]
# append the output type idx
for i_output_type in self.node_info['output_type_dict']:
feed_dict[self.output_type_idx_placeholder[i_output_type]] \
= self.output_type_idx[i_output_type]
# if the raw_obs is needed for the baseline
if self.raw_obs_placeholder is not None:
feed_dict[self.raw_obs_placeholder] = obs_n
else:
# it is the most easy case, nice and easy
feed_dict = {self.obs_placeholder: obs_n}
self.nervenetplus_batch_pos = nervenetplus_batch_pos
return feed_dict, nervenetplus_batch_pos
def build_update_op_preprocess(self):
'''
@brief: The preprocess that is shared by trpo, ppo and vpg updates
'''
# the input placeholders for the input
self.action_placeholder = tf.placeholder(
tf.float32, [None, self.action_size],
name='action_sampled_in_rollout'
)
self.advantage_placeholder = tf.placeholder(
tf.float32, [None], name='advantage_value'
)
self.oldaction_dist_mu_placeholder = tf.placeholder(
tf.float32, [None, self.action_size], name='old_act_dist_mu'
)
self.oldaction_dist_logstd_placeholder = tf.placeholder(
tf.float32, [None, self.action_size], name='old_act_dist_logstd'
)
self.batch_size_float_placeholder = tf.placeholder(
tf.float32, [], name='batch_size_float'
)
# the adaptive kl penalty
if self.args.use_kl_penalty:
self.kl_lambda_placeholder = tf.placeholder(tf.float32, [],
name='kl_lambda')
# what are the probabilities of taking self.action, given new and old
# distributions
self.log_p_n = utils.gauss_log_prob(
self.action_dist_mu,
self.action_dist_logstd,
self.action_placeholder
)
self.log_oldp_n = utils.gauss_log_prob(
self.oldaction_dist_mu_placeholder,
self.oldaction_dist_logstd_placeholder,
self.action_placeholder
)
self.ratio = tf.exp(self.log_p_n - self.log_oldp_n)
# the kl divergence between the old and new action
self.kl = utils.gauss_KL(
self.oldaction_dist_mu_placeholder,
self.oldaction_dist_logstd_placeholder,
self.action_dist_mu,
self.action_dist_logstd
)
self.kl = self.kl / self.batch_size_float_placeholder
# the entropy
self.ent = utils.gauss_ent(
self.action_dist_mu, self.action_dist_logstd
)
self.ent = self.ent / self.batch_size_float_placeholder
def build_update_op_postprocess(self):
'''
@brief: The postprocess that is shared by trpo, ppo and vpg updates
'''
raise NotImplementedError
def run(self):
'''
@brief:
This is the standard function to be called by the
"multiprocessing.Process"
'''
raise NotImplementedError
def build_models(self):
'''
@brief:
This is the function where the rollout agents and trpo agent
build their networks, set up the placeholders, and gather the
variable list.
'''
raise NotImplementedError
def prepared_network_feeddict(self, data_dict):
'''
@brief:
For the general policy network and graph network, we have
different format, as we batch the network in the policy network
@return:
The feed_dict structure
'''
raise NotImplementedError
def get_sess(self):
return self.session
def get_iteration_count(self):
return self.session.run(self.iteration)
def get_experiment_name(self):
'''
@brief:
this is the unique id of the experiments. it might be useful if
we are running several tasks on the server
'''
return self.args.task + '_' + self.args.time_id
| StarcoderdataPython |
1732217 | <filename>speech/livespeech_recognise.py
#!/usr/bin/python
# encoding: utf-8
from __future__ import print_function
import os
import socket
from pocketsphinx import LiveSpeech, get_model_path
from __playwave import playwave
from __baidu_speech_recognise import get_baidu_asr
sys_model_path = get_model_path()
voice_path = os.path.join(os.getcwd(), 'voice')
usr_model_path = os.path.join(os.getcwd(), 'model')
speech = LiveSpeech(
verbose=False,
sampling_rate=44100,
buffer_size=4096,
no_search=False,
full_utt=False,
hmm=os.path.join(sys_model_path, 'en-us'),
lm=os.path.join(usr_model_path, '4767.lm'),
dic=os.path.join(usr_model_path, '4767.dic')
)
def turn_on_the_light():
#TODO
print("do: turn on the light")
def turn_off_the_light():
#TODO
print("do: turn off the light")
def null_func():
pass
switch_funcs = {"light on": turn_on_the_light, "light off": turn_off_the_light, "null": null_func}
def is_net_ok(testserver):
s = socket.socket()
s.settimeout(3)
try:
status = s.connect_ex(testserver)
if status == 0:
s.close()
return True
else:
return False
except Exception as e:
return False
def do_baidu_speech_recognise():
result_str = get_baidu_asr()
if result_str == "turn on the light":
playwave(os.path.join(voice_path, 'beep_lo.wav'))
print("turn on the light")
return "light on"
elif result_str == "turn off the light":
playwave(os.path.join(voice_path, 'beep_lo.wav'))
print("turn off the light")
return "light off"
else:
playwave(os.path.join(voice_path, 'sorry.wav'))
return "null"
def do_sphinx_speech_recognise():
for phrase in speech:
if str(phrase) == "TURN ON THE LIGHT":
playwave(os.path.join(voice_path, 'beep_lo.wav'))
print("turn on the light")
return "light on"
elif str(phrase) == "TURN OFF THE LIGHT":
playwave(os.path.join(voice_path, 'beep_lo.wav'))
print("turn off the light")
return "light off"
else:
playwave(os.path.join(voice_path, 'sorry.wav'))
return "null"
if __name__ == "__main__":
while True:
for phrase in speech:
if str(phrase) == "HI BABY":
print("recognise right")
playwave(os.path.join(voice_path, 'beep_hi.wav'))
break
else:
continue
net_status = is_net_ok(('www.baidu.com',443))
net_status = False
if net_status:
# online speech recognise based on baidu AI
recognise_result = do_baidu_speech_recognise()
else:
# offline speech recognise based on PocketSphinx
recognise_result = do_sphinx_speech_recognise()
func = switch_funcs[recognise_result]
func()
| StarcoderdataPython |
5101155 | <filename>Python - Desafios e Execercios resolvidos/Des002.py
nome = input('Qual o seu nome? ')
print(f'Olá {nome}, seja bem vindo!')
| StarcoderdataPython |
8145994 | <filename>crypto_tracking/trackingAPI/tasks.py
# tracking the cryptocurrecny using celery
# crawling cryptocurrency using beautifulsoup4
# celery used for async tasks
from celery import shared_task
from celery.schedules import crontab
from celery.decorators import periodic_task
# beautifulsoup used for scraping data
from bs4 import BeautifulSoup
# requests used for make a request (used get request in this example)
import requests
import time
from .models import Cryptocurrency
@shared_task
def start_crawling_cryptocurrency():
requested_data = requests.get('https://coinranking.com/')
bs = BeautifulSoup(requested_data.text, 'html.parser')
# by this code we will get the first 10 rows of the table
rows = bs.find('tbody', class_='table__body')
rows = rows.find_all('tr', class_='table__row')[0:10]
# looping for all rows
for row in rows:
# get the cryptocurrency data
cryptocurrency = row.find('span', class_='profile__name').get_text().strip().replace('\n', '')
# get price and market cap values
values = row.find_all('div', class_='valuta')
# values[0] is the value of the price
# values[1] is the value of the market cap
price = values[0].get_text().strip().replace('\n', '')
market_cap = values[1].get_text().strip().replace('\n', '')
# get change data
change = row.find('div', class_='change').get_text().strip().replace('\n', '')
# create a new object in the database
obj, created = Cryptocurrency.objects.get_or_create(
crypto_name=cryptocurrency,
price=price,
market_cap=market_cap,
change=change
)
@periodic_task(run_every=(crontab()), name="run_every_1_minute", ignore_result=True)
def update_data():
requested_data = requests.get('https://coinranking.com/')
bs = BeautifulSoup(requested_data.text, 'html.parser')
# by this code we will get the first 10 rows of the table
rows = bs.find('tbody', class_='table__body')
rows = rows.find_all('tr', class_='table__row')[0:10]
# looping for all rows
for row in rows:
# get the cryptocurrency data
cryptocurrency = row.find('span', class_='profile__name').get_text().strip().replace('\n', '')
# get price and market cap values
values = row.find_all('div', class_='valuta')
# values[0] is the value of the price
# values[1] is the value of the market cap
price = values[0].get_text().strip().replace('\n', '')
market_cap = values[1].get_text().strip().replace('\n', '')
# get change data
change = row.find('div', class_='change').get_text().strip().replace('\n', '')
# update object in the database
data = {
'crypto_name': cryptocurrency,
'price':price,
'market_cap':market_cap,
'change':change
}
Cryptocurrency.objects.filter(crypto_name=cryptocurrency).update(**data)
if not Cryptocurrency.objects.all():
print("TEST")
start_crawling_cryptocurrency()
| StarcoderdataPython |
8069453 | <filename>src/ai_harness/harnessutils.py
import yaml
import logging
import logging.config
from ai_harness import xml2object
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def load_yaml(file: str):
try:
with open(file, 'r') as stream:
return yaml.load(stream=stream, Loader=Loader)
except FileNotFoundError:
print('File Not Found: ' + file)
return None
conf = load_yaml('logging.yaml')
if conf is not None:
try:
logging.config.dictConfig(conf)
except:
pass
finally:
pass
def getLogger(name: str):
return logging.getLogger(name)
def getRootLogger():
return logging.getLogger('root')
log = getRootLogger()
def load_xml(xml_file):
try:
return xml2object.parse(xml_file)
except Exception as e:
log.error(e)
return None
| StarcoderdataPython |
5122002 | <reponame>CSIRT-MU/CRUSOE
class Perspectives:
def __init__(self, client):
self.client = client
def all(self):
resource = "/perspectives"
return self.client.get(resource)
def name_to_id(self, name):
"""
Obtain all perspectives and then seek for ID of perspective with the given name.
ValueError is thrown when there is no perspective with the given name.
:param name:
:return: respective ID
"""
perspectives = self.all()
for perspective in perspectives:
if perspective['name'] == name:
return perspective['id']
raise ValueError("Perspective with name '{}' doesn't exist.".format(name))
| StarcoderdataPython |
4983060 | <reponame>IsThisLoss/manga-notify
# flake8: noqa: E402
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO,
)
import os
import datetime
from . import bot
from . import background
from . import settings
import sqlite3
import telegram.ext
def init(cfg: settings.Settings):
"""
Needs to initialize database
on first run
"""
if os.path.exists(cfg.db_string):
return
if not os.path.exists(cfg.db_init):
logging.error(f'Cannot find {cfg.db_init} file')
return
with open(cfg.db_init) as f:
conn = sqlite3.connect(cfg.db_string)
conn.executescript(f.read())
def main():
cfg = settings.get_config()
init(cfg)
updater = telegram.ext.Updater(token=cfg.tg_token)
dispatcher = bot.make_dispatcher(updater)
dispatcher.job_queue.run_repeating(
background.job,
datetime.timedelta(minutes=cfg.parsing_interval)
)
dispatcher.job_queue.start()
updater.start_polling()
if __name__ == '__main__':
main()
| StarcoderdataPython |
272371 | from pathlib import Path
import os, sys, shutil
import subprocess
import pandas as pd
import string
if len(sys.argv) != 2:
print("Usage: ./extract_gps.py <video dir>")
sys.exit()
def convert_latlong(in_str):
split_latlong = in_str.split(' ')
return float(split_latlong[0]) + float(split_latlong[2][:-1])/60.0 + float(split_latlong[3][:-1])/3600.00
def on_bancroft(latitude, longitude):
# Southwest corner of bancroft -- Intersection of Bancroft and oxford st.
# lat_0 = 37.867745120011236
# long_0 = 122.265914980762
lat_0 = 37.86792981681717
long_0 = 122.26526052183016
# Northeast corner of bancroft -- Intersection of Bancroft and Piedmont Ave
# lat_1 = 37.86974393324088
# long_1 = 122.25221425754695
lat_1 = 37.86956443944309
long_1 = 122.25276142821582
# Bounding box calculation
return (latitude > lat_0 and latitude < lat_1) and (longitude > long_1 and longitude < long_0)
vid_dir = sys.argv[1]
vid_path = Path(vid_dir)
out_path = vid_path/Path("out")
if os.path.exists(out_path):
shutil.rmtree(out_path)
os.makedirs(out_path)
for filename in os.listdir(vid_path):
if not filename.endswith(".MP4") and not filename.endswith(".mp4"):
continue
# outfile = open(out_path/Path(filename[:-4]+"out.txt"), 'w')
out_process = subprocess.run(args = ["./exiftool.exe", "-a", "\"-gps*\"", "-ee", str(vid_path) + "/" + filename], universal_newlines = True, stdout = subprocess.PIPE)
output = out_process.stdout
output_lines = output[output.index("Sample Time"):].split('\n')
#gps_df = pd.dataframe({'Lat': [], 'Long': [], 'Speed': })
lats = []
longs = []
speeds = []
stimes = []
sdurations = []
datetimes = []
vid_on_bancroft = False
banc_ratio = 0.0
for line in output_lines:
if len(line) == 0:
continue
split_line = line.split(':')
split_line[1] = split_line[1][1:]
if line.startswith('Sample Time'):
if len(split_line) == 2:
stimes.append(float(split_line[1][:-2]))
else:
stimes.append(float(split_line[3]))
if line.startswith('Sample Duration'):
sdurations.append(split_line[1])
if line.startswith('GPS Latitude'):
lats.append(split_line[1])
if line.startswith('GPS Longitude'):
longs.append(split_line[1])
# Can check the most recent latitude and longitude to see if the vid is on bancroft
# Perform the check here since longitude measurement always comes after latitude measurement
if on_bancroft(convert_latlong(lats[-1]), convert_latlong(longs[-1])):
# print(convert_latlong(lats[-1]))
# print(convert_latlong(longs[-1]))
vid_on_bancroft = True
banc_ratio += 1.0
if line.startswith('GPS Speed'):
speeds.append(split_line[1])
if line.startswith('GPS Date/Time'):
datetimes.append(line[line.index(': ')+2:])
gps_df = pd.DataFrame( {'lat': pd.Series(lats),
'long': pd.Series(longs),
'speed': pd.Series(speeds),
'datetime': pd.Series(datetimes),
'sample_time': pd.Series(stimes),
'sample_dur': pd.Series(sdurations)
} ).set_index('sample_time')
# Since this is in the Berkeley area, N and W are implied for the latitude and longitude, respectively
gps_df['converted_lat'] = gps_df['lat'].apply(convert_latlong)
gps_df['converted_long'] = gps_df['long'].apply(convert_latlong)
#print(gps_df[['converted_lat', 'converted_long', 'speed', 'datetime']].head())
print(filename + " on Bancroft Way: " + str(vid_on_bancroft), end="\t")
print(filename + " Bancroft Ratio: " + str(banc_ratio/59))
#print(gps_df.head())
#print(output_lines[:10])
# outfile.close()
| StarcoderdataPython |
1879282 | <gh_stars>0
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth import authenticate, get_user_model, login, logout
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework import mixins
from rest_framework import generics
from .serializers import EmployeeSerializer
from .models import Employee
from .forms import UserLoginForm, UserRegisterForm
def login_view(request):
next = request.GET.get('next')
form = UserLoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
if next:
return redirect(next)
return redirect('/')
context = {
'form': form,
}
return render(request, "login.html", context)
def register_view(request):
next = request.GET.get('next')
form = UserRegisterForm(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
password = form.cleaned_data.get('password')
user.set_password(password)
user.save()
new_user = authenticate(username=user.username, password=password)
login(request, new_user)
if next:
return redirect(next)
return redirect('/')
context = {
'form': form,
}
return render(request, "signup.html", context)
def logout_view(request):
logout(request)
return redirect('/')
# Add employee
# post
class EmployeeCreateView(generics.CreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = EmployeeSerializer
queryset = Employee.objects.all()
# View all employee
class EmployeeListView(generics.ListAPIView):
serializer_class = EmployeeSerializer
queryset = Employee.objects.all()
# View, Update, Delete employee instance
# get, put, patch, delete
class EmployeeUpdateView(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = EmployeeSerializer
queryset = Employee.objects.all()
| StarcoderdataPython |
3211576 | <filename>src/main/Client/Client.py
# coding : utf-8
from ssl import SSLSocket
from time import sleep
from util import *
import yaml
import requests
from PySide2.QtWidgets import QApplication, QMessageBox
from PySide2.QtUiTools import QUiLoader
from PySide2.QtCore import QFile
# 登陆UI界面类
class ClientWindow:
def __init__(self):
# 初始化客户端
self.user_accout = None
self.user_password = <PASSWORD>
print('===========SDP客户端===========')
# 读取配置文件
try:
f = open('config.yaml', 'r')
self.global_config = yaml.load(f.read(), Loader=yaml.FullLoader)
self.is_debug_mode = self.global_config['isDebugMode']
print('==========读取配置文件=========')
f = open('config.yaml', 'r')
print(f.read() + '\n===============================')
f.close()
except Exception as e:
self.log(con='配置读取错误!错误信息:', type=ERROR)
exit(1)
# 获取本地用户一些信息
# self.local_public_ip = requests.get('https://checkip.amazonaws.com').text.strip()
self.local_public_ip = '127.0.0.1'
self.log(con='本地公网IP地址:'+self.local_public_ip)
# 加载登陆界面
qFile = QFile("ui/LoginWindow.ui")
qFile.open(QFile.ReadOnly)
qFile.close()
self.ui = QUiLoader().load(qFile)
self.ui.button_login.clicked.connect(self.login)
self.ui.show()
# 加载浏览界面
qFile = QFile("ui/Brower.ui")
qFile.open(QFile.ReadOnly)
qFile.close()
self.ui2 = QUiLoader().load(qFile)
def log(self, add='NO ADDR', con:str = '', type = CONTENT):
# 精简日志
# if len(con) >= 40:
# con = con[:40] + '... ...'
if type == CONNECTION:
print('\033[32m[%s]新的连接:\033[0m%s' % (gFTime(), str(add)))
elif type == DISCONNECT:
print('\033[33m[%s]连接断开:\033[0m%s' % (gFTime(), str(add)))
elif type == ERROR:
print('\033[31m[%s]错误信息:\033[0m%s' % (gFTime(), con))
QMessageBox.about(self.ui, 'ERROR', '[%s]\n错误信息:%s' % (gFTime(), con))
return -1
elif type == SEND:
print('\033[35m[%s]发送信息:\033[0m%s' % (gFTime(), con))
elif type == RECEIVE:
print('\033[35m[%s]接受%s的信息:\033[0m%s' % (gFTime(), str(add), con))
else:
print('\033[36m[%s]服务记录:\033[0m%s' % (gFTime(), con))
return 0
def login(self):
if len(self.ui.text_one.toPlainText()) > 0 and len(self.ui.text_two.toPlainText()) > 0:
self.user_accout = self.ui.text_one.toPlainText()
self.user_password = self.ui.text_two.toPlainText()
sign_result = self.sign_in()
if sign_result != -1:
QMessageBox.about(self.ui, 'login', '登陆成功')
self.ui.setHidden(True)
self.ui2.show()
self.valid_application(sign_result['serverIP'], sign_result['content'])
else:
QMessageBox.about(self.ui, 'ERROR', '请输入账号和密码!')
def sign_in(self):
debug(self.is_debug_mode)
# 连接权限服务器
try:
authServer = ssl_client(
self.global_config['AuthServer']['ip'], self.global_config['AuthServer']['port'])
self.log(con='权限服务器连接成功', type=CONNECTION,
add=self.global_config['AuthServer']['ip'])
self.log(add=self.global_config['AuthServer']['ip'], con=authServer.recv(
1024).decode('utf-8'), type=RECEIVE)
except Exception as e:
return self.log(con='连接权限服务器失败,请稍后后重试...', type=ERROR)
# 接收服务器消息
while True:
try:
# 发送用户登陆消息
authServer.send(pack_mess(uIP=self.local_public_ip, uID=self.user_accout, sIP='',
sID=self.global_config['AppServer']['id'], cre='', mess_type='log', mess=f'{self.user_accout}:{self.user_password}'))
# 服务器返回消息
date = authServer.recv(1024)
# 检查是否断开
if not date:
self.log(add=self.global_config['AuthServer']['ip'], type=DISCONNECT)
break
# 解码消息
date_str = date.decode('utf-8').strip()
# 打印消息
self.log(add=self.global_config['AuthServer']
['ip'], con=date_str, type=RECEIVE)
# 解析消息
server_result = json.loads(date_str)
# 如果登陆成功
if server_result['content'] != 'Failure':
authServer.close()
# 关闭连接,返回凭证
return server_result
return self.log(con='登陆失败', type=ERROR)
except Exception as e:
return self.log(con='会话出错', type=ERROR)
authServer.close()
return self.log(con='登陆失败', type=ERROR)
def access_application(self, appserver_ip: str, ssl_appServer: SSLSocket, credential: str):
# 最终访问应用
debug(self.is_debug_mode)
# 接收服务器消息
try:
# 发送应用访问消息
ssl_appServer.send(pack_mess(uIP=self.local_public_ip, uID=self.user_accout, sIP=appserver_ip,
sID=self.global_config['AppServer']['id'], cre=credential, mess_type='con', mess=''))
data = ssl_appServer.recv(1024)
# 解码消息
date_str = data.decode('utf-8').strip()
# 打印消息
self.log(add=appserver_ip, con=date_str, type=RECEIVE)
# 解析消息
accesss_result = json.loads(date_str)
# 如果验证敲门成功
if accesss_result['content'] != 'invalid':
self.log(con="成功访问应用服务器!")
self.log(con=accesss_result)
self.ui2.brower.append(accesss_result['content'])
return
# 关闭连接,返回结果
ssl_appServer.close()
return accesss_result
except Exception as e:
self.log(type=ERROR, con=e)
ssl_appServer.close()
return 'invalid'
def valid_application(self, appserver_ip: str, credential: str):
# 与应用服务器敲门授权票据
debug(self.is_debug_mode)
# 连接应用服务器
while True:
try:
ssl_appServer = ssl_client(
appserver_ip, self.global_config['AppServer']['port'])
self.log(con='应用服务器连接成功')
self.log(add=appserver_ip, con=ssl_appServer.recv(
1024).decode('utf-8'), type=RECEIVE)
break
except Exception as e:
self.log(con='连接应用服务器失败,五秒后重试...', type=ERROR)
sleep(5)
continue
# 接收服务器消息
try:
# 发送应用验证消息
ssl_appServer.send(pack_mess(uIP=self.local_public_ip, uID=self.user_accout, sIP=appserver_ip,
sID=self.global_config['AppServer']['id'], cre='', mess_type='cre', mess=f'{credential}'))
# 服务器返回消息
date = ssl_appServer.recv(1024)
# 检查是否断开
if not date:
self.log(add=self.global_config['AppServer']['ip'], type=DISCONNECT)
return 'invalid'
# 解码消息
date_str = date.decode('utf-8').strip()
# 打印消息
self.log(add=appserver_ip, con=date_str, type=RECEIVE)
# 解析消息
validation_result = json.loads(date_str)
# 如果验证敲门成功
if validation_result['content'] != 'invalid':
self.log(con="成功登陆应用服务器!")
self.log(con=validation_result)
self.access_application(appserver_ip, ssl_appServer, credential)
# 关闭连接,返回结果
ssl_appServer.close()
return validation_result
except Exception as e:
self.log(type=ERROR, con=e)
ssl_appServer.close()
return 'invalid'
def main():
app = QApplication([])
MainWindowObj = ClientWindow()
app.exec_()
return 0
if __name__ == '__main__':
main()
| StarcoderdataPython |
1621500 | """Signal handlers of Zinnia"""
import inspect
from functools import wraps
from django.db.models import F
from django.dispatch import Signal
from django.contrib import comments
from django.db.models.signals import post_save
from django.db.models.signals import pre_delete
from django.contrib.comments.signals import comment_was_posted
from django.contrib.comments.signals import comment_was_flagged
from zinnia import settings
from zinnia.models.entry import Entry
comment_model = comments.get_model()
ENTRY_PS_PING_DIRECTORIES = 'zinnia.entry.post_save.ping_directories'
ENTRY_PS_PING_EXTERNAL_URLS = 'zinnia.entry.post_save.ping_external_urls'
COMMENT_PS_COUNT_DISCUSSIONS = 'zinnia.comment.post_save.count_discussions'
COMMENT_PD_COUNT_DISCUSSIONS = 'zinnia.comment.pre_delete.count_discussions'
COMMENT_WF_COUNT_DISCUSSIONS = 'zinnia.comment.was_flagged.count_discussions'
COMMENT_WP_COUNT_COMMENTS = 'zinnia.comment.was_posted.count_comments'
PINGBACK_WP_COUNT_PINGBACKS = 'zinnia.pingback.was_flagged.count_pingbacks'
TRACKBACK_WP_COUNT_TRACKBACKS = 'zinnia.trackback.was_flagged.count_trackbacks'
pingback_was_posted = Signal(providing_args=['pingback', 'entry'])
trackback_was_posted = Signal(providing_args=['trackback', 'entry'])
def disable_for_loaddata(signal_handler):
"""
Decorator for disabling signals sent by 'post_save'
on loaddata command.
http://code.djangoproject.com/ticket/8399
"""
@wraps(signal_handler)
def wrapper(*args, **kwargs):
for fr in inspect.stack():
if inspect.getmodulename(fr[1]) == 'loaddata':
return
signal_handler(*args, **kwargs)
return wrapper
@disable_for_loaddata
def ping_directories_handler(sender, **kwargs):
"""
Ping directories when an entry is saved.
"""
entry = kwargs['instance']
if entry.is_visible and settings.SAVE_PING_DIRECTORIES:
from zinnia.ping import DirectoryPinger
for directory in settings.PING_DIRECTORIES:
DirectoryPinger(directory, [entry])
@disable_for_loaddata
def ping_external_urls_handler(sender, **kwargs):
"""
Ping externals URLS when an entry is saved.
"""
entry = kwargs['instance']
if entry.is_visible and settings.SAVE_PING_EXTERNAL_URLS:
from zinnia.ping import ExternalUrlsPinger
ExternalUrlsPinger(entry)
def count_discussions_handler(sender, **kwargs):
"""
Update the count of each type of discussion on an entry.
"""
if kwargs.get('instance') and kwargs.get('created'):
# The signal is emitted by the comment creation,
# so we do nothing, comment_was_posted is used instead.
return
comment = 'comment' in kwargs and kwargs['comment'] or kwargs['instance']
entry = comment.content_object
if isinstance(entry, Entry):
entry.comment_count = entry.comments.count()
entry.pingback_count = entry.pingbacks.count()
entry.trackback_count = entry.trackbacks.count()
entry.save(force_update=True)
def count_comments_handler(sender, **kwargs):
"""
Update Entry.comment_count when a comment was posted.
"""
entry = kwargs['comment'].content_object
if isinstance(entry, Entry):
entry.comment_count = F('comment_count') + 1
entry.save(force_update=True)
def count_pingbacks_handler(sender, **kwargs):
"""
Update Entry.pingback_count when a pingback was posted.
"""
entry = kwargs['entry']
entry.pingback_count = F('pingback_count') + 1
entry.save(force_update=True)
def count_trackbacks_handler(sender, **kwargs):
"""
Update Entry.trackback_count when a trackback was posted.
"""
entry = kwargs['entry']
entry.trackback_count = F('trackback_count') + 1
entry.save(force_update=True)
def connect_entry_signals():
"""
Connect all the signals on Entry model.
"""
post_save.connect(
ping_directories_handler, sender=Entry,
dispatch_uid=ENTRY_PS_PING_DIRECTORIES)
post_save.connect(
ping_external_urls_handler, sender=Entry,
dispatch_uid=ENTRY_PS_PING_EXTERNAL_URLS)
def disconnect_entry_signals():
"""
Disconnect all the signals on Entry model.
"""
post_save.disconnect(
sender=Entry,
dispatch_uid=ENTRY_PS_PING_DIRECTORIES)
post_save.disconnect(
sender=Entry,
dispatch_uid=ENTRY_PS_PING_EXTERNAL_URLS)
def connect_discussion_signals():
"""
Connect all the signals on the Comment model to
maintains a valid discussion count on each entries
when an action is done with the comments.
"""
post_save.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_PS_COUNT_DISCUSSIONS)
pre_delete.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_PD_COUNT_DISCUSSIONS)
comment_was_flagged.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_WF_COUNT_DISCUSSIONS)
comment_was_posted.connect(
count_comments_handler, sender=comment_model,
dispatch_uid=COMMENT_WP_COUNT_COMMENTS)
pingback_was_posted.connect(
count_pingbacks_handler, sender=comment_model,
dispatch_uid=PINGBACK_WP_COUNT_PINGBACKS)
trackback_was_posted.connect(
count_trackbacks_handler, sender=comment_model,
dispatch_uid=TRACKBACK_WP_COUNT_TRACKBACKS)
def disconnect_discussion_signals():
"""
Disconnect all the signals on Comment model
provided by Zinnia.
"""
post_save.disconnect(
sender=comment_model,
dispatch_uid=COMMENT_PS_COUNT_DISCUSSIONS)
pre_delete.disconnect(
sender=comment_model,
dispatch_uid=COMMENT_PD_COUNT_DISCUSSIONS)
comment_was_flagged.disconnect(
sender=comment_model,
dispatch_uid=COMMENT_WF_COUNT_DISCUSSIONS)
comment_was_posted.disconnect(
sender=comment_model,
dispatch_uid=COMMENT_WP_COUNT_COMMENTS)
pingback_was_posted.disconnect(
sender=comment_model,
dispatch_uid=PINGBACK_WP_COUNT_PINGBACKS)
trackback_was_posted.disconnect(
sender=comment_model,
dispatch_uid=TRACKBACK_WP_COUNT_TRACKBACKS)
| StarcoderdataPython |
6636827 | <gh_stars>0
# Generated by Django 3.2 on 2021-06-20 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('area', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='area',
name='name',
),
migrations.AddField(
model_name='area',
name='en',
field=models.CharField(default=1, max_length=250, unique=True, verbose_name='name'),
preserve_default=False,
),
]
| StarcoderdataPython |
11378595 | import json
import requests
from ._volumes import _Volumes
from ._infos import _Infos
from ._liquidations import _Liquidations
from ._indicators import _Indicators
class Cryptometer():
def __init__(self, api_key):
self._api_key = api_key
self._api_url = "https://api.cryptometer.io"
self.indicators = _Indicators(parent=self)
self.infos = _Infos(parent=self)
self.liquidations = _Liquidations(parent=self)
self.volumes = _Volumes(parent=self)
def _response(self, **args):
'''
This function is checking the output of the API and raises Exceptions if something goes wrong.
'''
no_data_errors = [ #Errors that dont raise a Execption
"No Data",
"No Liquidation"
]
success = args["success"]
error = args["error"]
if "data" in args:
data = args["data"]
else:
data = []
if success == "true":
success = True
else:
success = False
if error == "false":
error = None
else:
if args["error"] not in no_data_errors:
raise Exception(error)
else:
success = True
if success == True:
return data
def _casefold(self, exchange=None, market_pair=None, pair=None, coin=None, timeframe=None, exchange_type=None, source=None, period=None, long_period=None, short_period=None, signal_period=None, filter=None):
'''
This functions takes all arguments and normalizeses them, so the API accepts them.
'''
args = {}
if exchange != None:
args.update({"e": exchange.lower()})
if market_pair != None:
args.update({"market_pair": market_pair.replace("-", "").upper()})
if pair != None:
args.update({"pair": pair.replace("-", "").upper()})
if coin != None:
args.update({"symbol": coin.upper()})
if timeframe != None:
args.update({"timeframe": timeframe.lower()})
if exchange_type != None:
args.update({"exchange_type": exchange_type.lower()})
if source != None:
args.update({"source": source.lower()})
if period != None:
args.update({"period": str(period)})
if long_period != None:
args.update({"long_period": str(long_period)})
if short_period != None:
args.update({"short_period": str(short_period)})
if signal_period != None:
args.update({"signal_period": str(signal_period)})
if filter != None:
args.update({"filter": str(filter)})
return args
def _send_request(self, endpoint:str, arguments:dict={}):
'''
This function sends the API request and returns the json response as dict
'''
args = ["api_key="+self._api_key]
for x in arguments.items():
args.append(x[0]+"="+x[1]) #throwing the link arguments together
url = self._api_url+endpoint+"?"+"&".join(args) #assembling the url
r = requests.get(url) #sending the API request
r = self._response(**json.loads(r.content.decode())) #sending data to _response for error checking
return self._fix_data(r) #returning the data after sending it to _fix_data for fixing the topology
def _fix_data(self, r):
'''
This function is fixing the formatting of the data
'''
if r != [] and type(r) == list: #list but not empty list
if type(r[0]) == list or type(r[0]) == dict: #first thing in it == list or dict
if len(r) == 1: #first thing == only thing
r = r[0]
return r | StarcoderdataPython |
6585537 | <reponame>lynnli92/leetcode-group-solution<filename>AlgorithmProblems/0245. Shortest Word Distance III/main0245.py
from typing import List
class Solution0245:
def shortestWordDistance(self, wordsDict: List[str], word1: str, word2: str) -> int:
wordLen = len(wordsDict)
prev = -1
isEqual = (word1 == word2)
res = wordLen
for i in range(wordLen):
if wordsDict[i] == word1 or wordsDict[i] == word2:
if prev != -1 and (isEqual or wordsDict[prev] != wordsDict[i]):
res = min(res, i - prev)
prev = i
return res
wordsDict = ["practice", "makes", "perfect", "coding", "makes"]
word1 = "makes"
word2 = "coding"
sol = Solution0245()
res = sol.shortestWordDistance(wordsDict, word1, word2)
print(res)
wordsDict = ["practice", "makes", "perfect", "coding", "makes"]
word1 = "makes"
word2 = "makes"
sol = Solution0245()
res = sol.shortestWordDistance(wordsDict, word1, word2)
print(res) | StarcoderdataPython |
8040620 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Optional, Tuple, Union
import hypothesis.strategies as st
import torch
import torch.nn as nn
from hypothesis import given, settings
from opacus.layers import DPGRU, DPLSTM, DPRNN
from opacus.utils.packed_sequences import _gen_packed_data
from torch.nn.utils.rnn import PackedSequence
from .common import DPModules_test
def rnn_train_fn(
model: nn.Module,
x: Union[torch.Tensor, PackedSequence],
state_init: Optional[Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]] = None,
):
model.train()
criterion = nn.MSELoss()
logits, _ = model(x, state_init)
if isinstance(logits, PackedSequence):
y = torch.zeros_like(logits[0])
loss = criterion(logits[0], y)
else:
y = torch.zeros_like(logits)
loss = criterion(logits, y)
loss.backward()
class DPLSTM_test(DPModules_test):
@given(
mode=st.one_of(st.just("rnn"), st.just("gru"), st.just("lstm")),
batch_size=st.integers(1, 5),
seq_len=st.integers(1, 6),
emb_size=st.integers(5, 10),
hidden_size=st.integers(3, 7),
num_layers=st.integers(1, 3),
bidirectional=st.booleans(),
bias=st.booleans(),
batch_first=st.booleans(),
zero_init=st.booleans(),
packed_input_flag=st.integers(0, 2),
)
@settings(deadline=20000)
def test_rnn(
self,
mode: str,
batch_size: int,
seq_len: int,
emb_size: int,
hidden_size: int,
num_layers: int,
bidirectional: bool,
bias: bool,
batch_first: bool,
zero_init: bool,
packed_input_flag: int,
):
use_cn = False
if mode == "rnn":
original_rnn_class = nn.RNN
dp_rnn_class = DPRNN
elif mode == "gru":
original_rnn_class = nn.GRU
dp_rnn_class = DPGRU
elif mode == "lstm":
original_rnn_class = nn.LSTM
dp_rnn_class = DPLSTM
use_cn = True
else:
raise ValueError("Invalid RNN mode")
rnn = original_rnn_class(
emb_size,
hidden_size,
num_layers=num_layers,
batch_first=batch_first,
bidirectional=bidirectional,
bias=bias,
)
dp_rnn = dp_rnn_class(
emb_size,
hidden_size,
num_layers=num_layers,
batch_first=batch_first,
bidirectional=bidirectional,
bias=bias,
)
dp_rnn.load_state_dict(rnn.state_dict())
if packed_input_flag == 0:
# no packed sequence input
x = (
torch.randn([batch_size, seq_len, emb_size])
if batch_first
else torch.randn([seq_len, batch_size, emb_size])
)
elif packed_input_flag == 1:
# packed sequence input in sorted order
x = _gen_packed_data(
batch_size, seq_len, emb_size, batch_first, sorted_=True
)
elif packed_input_flag == 2:
# packed sequence input in unsorted order
x = _gen_packed_data(
batch_size, seq_len, emb_size, batch_first, sorted_=False
)
else:
raise ValueError("Invalid packed input flag")
if zero_init:
self.compare_forward_outputs(
rnn,
dp_rnn,
x,
output_names=("out", "hn", "cn") if use_cn else ("out", "hn"),
atol=1e-5,
rtol=1e-3,
)
self.compare_gradients(
rnn,
dp_rnn,
rnn_train_fn,
x,
atol=1e-5,
rtol=1e-3,
)
else:
num_directions = 2 if bidirectional else 1
h0 = torch.randn([num_layers * num_directions, batch_size, hidden_size])
c0 = torch.randn([num_layers * num_directions, batch_size, hidden_size])
self.compare_forward_outputs(
rnn,
dp_rnn,
x,
(h0, c0) if use_cn else h0,
output_names=("out", "hn", "cn") if use_cn else ("out", "hn"),
atol=1e-5,
rtol=1e-3,
)
self.compare_gradients(
rnn,
dp_rnn,
rnn_train_fn,
x,
(h0, c0) if use_cn else h0,
atol=1e-5,
rtol=1e-3,
)
| StarcoderdataPython |
9733106 | <filename>backend/app/crud/crud_blog.py
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from sqlalchemy import desc
from app.core.security import get_password_hash, verify_password
from app.crud.base import CRUDBase
from app.models.blog import Blog
from app.schemas.blog import BlogCreate, BlogUpdate, BlogSimple
LIMIT_N = 20
class CRUDBlog(CRUDBase[Blog, BlogCreate, BlogUpdate]):
def create_by_user(self, db: Session, *, obj_in: BlogCreate, user_id: int) -> Blog:
db_obj = Blog(
user_id=user_id,
add_time=datetime.utcnow(),
**jsonable_encoder(obj_in),
)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def get_blogs_by_user_id_with_state(
self,
db: Session,
*,
user_id: int,
before: Optional[int] = None, # 防止下拉出现重复, 0 表示不限制
skip: Optional[int] = None, # 和 before 并不冲突, 跳页时有用
limit: int = LIMIT_N,
state: int = 2,
# { 0: 公开, -1: 草稿, -2: 回收站, 1: 仅自己, 2: 公开+仅自己 }
) -> List[BlogSimple]:
filter_before = before is None or Blog.id < before
if state == 2:
filter_state = Blog.state >= 0
else:
filter_state = Blog.state == state
return (db
.query(Blog)
.filter(Blog.user_id == user_id)
.filter(filter_before)
.filter(filter_state)
.order_by(desc(Blog.id))
.offset(skip)
.limit(limit)
.all()
)
def get_blogs_by_user_ids(
self,
db: Session,
*,
user_ids: List[int],
before: Optional[int] = None,
skip: Optional[int] = None,
limit: int = LIMIT_N,
) -> List[BlogSimple]:
filter_cond = before is None or Blog.id < before
return (db
.query(Blog)
.filter(Blog.user_id.in_(user_ids))
.filter(filter_cond)
.filter(Blog.state == 0)
.order_by(desc(Blog.id))
.offset(skip)
.limit(limit)
.all()
)
def get_blogs_by_blog_ids(
self,
db: Session,
*,
blog_ids: List[int],
before: Optional[int] = None,
skip: Optional[int] = None,
limit: int = LIMIT_N,
) -> List[BlogSimple]:
"""example: find all my recent comments as well as their blogs"""
filter_cond = before is None or Blog.id < before
return (db
.query(Blog)
.filter(Blog.id.in_(blog_ids))
.filter(filter_cond)
#.order_by(desc(Blog.id))
.offset(skip)
.limit(limit)
.all()
)
blog = CRUDBlog(Blog)
| StarcoderdataPython |
1811314 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Testing authentication functions."""
import logging
import hvac
import pytest
from broccolini.authentication_functions import VaultFunctions
logging.basicConfig(level=logging.DEBUG, format=" %(asctime)s - %(levelname)s - %(message)s")
class TestVaultFunctions:
"""Test Vault Functions."""
@staticmethod
@pytest.mark.skip(reason="needs mock")
def test_unseal_vault():
"""Test unseal vault."""
result = VaultFunctions().unseal_vault(
vault_url="VAULT_URL",
vault_token="<PASSWORD>_TOKEN",
vault_unseal_token="VAULT_<PASSWORD>TOKEN",
)
expected = True
expected_type = bool
assert isinstance(result, expected_type)
assert result == expected
@staticmethod
@pytest.mark.dependency(name="test_login_to_vault")
def test_login_to_vault():
"""Test login to vault.
input : vault settings
output: vault client connection
"""
result = VaultFunctions.get_vault_credentials(vault_url="VAULT_URL", vault_token="<PASSWORD>TOKEN")
expected = "hvac.v1.Client object at 0"
expected_type = hvac.v1.Client
assert isinstance(result, expected_type)
assert expected in str(result)
@staticmethod
@pytest.mark.dependency(depends=["test_login_to_vault"])
def test_login_to_vault_exception_bad_vault_url():
"""Test vault login exception.
input : deliberately bad vault settings
output: none
"""
message = "Missing environment variables"
with pytest.raises(ValueError, match=message):
VaultFunctions.get_vault_credentials(vault_url="VAULT_URL_BAD", vault_token="VAULT_TOKEN_BAD")
@staticmethod
@pytest.mark.dependency(depends=["test_login_to_vault"])
def test_query_vault_data(return_data_dict):
"""Test query to vault.
input : successful client
input: secret_path: dict path to secret
output: output of secret : str
result = client.read(path) # gets all variables at this path
"""
result = VaultFunctions().query_vault_data(
vault_url="VAULT_URL",
vault_token="VA<PASSWORD>",
secret_path=return_data_dict["secret_path"],
)
result_key = result["data"]["data"]["secret_test_key"]
expected_type = dict
expected = "secret_value_from_conftest"
assert isinstance(result, expected_type)
assert expected in result_key
@staticmethod
@pytest.mark.dependency(depends=["test_login_to_vault"])
def test_add_to_vault(return_data_dict):
"""Test add vault data."""
result = VaultFunctions().add_to_vault(
vault_url="VAULT_URL",
vault_token="<PASSWORD>",
secret_path=return_data_dict["secret_path"],
secret=return_data_dict["secret"],
)
expected_type = tuple
expected_1 = "202"
creation_time = result[1]["data"]["created_time"]
assert expected_1 in creation_time
assert isinstance(result, expected_type)
@staticmethod
@pytest.mark.dependency(depends=["test_login_to_vault"])
def test_initialized_vault():
"""Test that vault is initialized."""
result = VaultFunctions().initialized_vault(
vault_url="VAULT_URL",
vault_token="<PASSWORD>",
)
expected = True
expected_type = bool
assert isinstance(result, expected_type)
assert result == expected
| StarcoderdataPython |
12851055 | import pandas as pd
import numpy as np
from os import path
from path_service import LOG_DIR, DATA_DIR
from sklearn.metrics import log_loss
import re
prob_columns = list(map(lambda x: f"prob{x}", range(8)))
prob_columns_without_end = list(map(lambda x: f"prob{x}", range(7)))
def row_check(df: pd.DataFrame):
df.loc[:,prob_columns]=df.loc[:,prob_columns].apply(lambda x: x/np.sum(x),axis=1,result_type='expand')
df = df.round(5)
sum7 = np.sum(df.loc[:,prob_columns_without_end],axis=1)
df.loc[:,'prob7'] = 1.0 - sum7
return df
def get_prob_res(file_name: str):
df: pd.DataFrame = pd.DataFrame([])
with open(path.join(LOG_DIR, file_name), 'r') as prob_file:
prob_lines = prob_file.readlines()
probs = {}
for i in range(8):
probs[i] = []
for line in prob_lines:
words = re.split(r"\s", line)
for i in range(8):
pos = i * 2
prob_index = int(words[pos][-1])
probs[prob_index].append(float(words[pos + 1]))
df.loc[:, "file_id"] = pd.Series(list(range(1, len(probs[0]) + 1)), dtype=np.int)
for i in range(8):
df.loc[:, f"prob{i}"] = pd.Series(probs[i], dtype=np.float)
return row_check(df)
def get_single_res(file_name: str, true_mode: bool = True):
df: pd.DataFrame = pd.DataFrame([])
with open(path.join(LOG_DIR if not true_mode else DATA_DIR, file_name), 'r') as prob_file:
prob_lines = prob_file.readlines()
probs = {}
for i in range(8):
probs[i] = []
j = 0
for line in prob_lines:
label = int(str.strip(re.split(r"\s", line)[0])[-1])
for i in range(8):
if i == label:
probs[i].append(1.0)
else:
probs[i].append(0.0)
df.loc[:, "file_id"] = pd.Series(list(range(1, len(probs[0]) + 1)), dtype=np.int)
for i in range(8):
df.loc[:, f"prob{i}"] = pd.Series(probs[i], dtype=np.float)
return df
def get_probs(df: pd.DataFrame) -> pd.DataFrame:
return df.loc[:, list(map(lambda x: f"prob{x}", range(8)))]
def check_valid_log_loss():
valid_prob_df = get_prob_res('valid_prob.log')
labels = get_single_res('security.valid', True)
print("prob mode: ", log_loss(get_probs(labels), get_probs(valid_prob_df)))
def check_train_log_loss():
valid_prob_df = get_prob_res('train_prob.log')
labels = get_single_res('new_train', True)
print("prob mode: ", log_loss(get_probs(labels), get_probs(valid_prob_df)))
def save_train_res(df: pd.DataFrame):
df.to_csv(path.join(DATA_DIR, "test_submit.csv"), sep=",", index=False, float_format='%.5f')
if __name__ == "__main__":
check_valid_log_loss()
check_train_log_loss()
test_prob_df = get_prob_res("test_prob.log")
save_train_res(test_prob_df)
df = pd.read_csv(path.join(DATA_DIR, "test_submit.csv"), sep=",")
for index, row in df.iterrows():
if np.abs(np.sum(row[list(map(lambda x: f"prob{x}", range(8)))]) - 1.0) > 1e-6:
raise Exception(f"sum prob not equal 1.0 in {index}")
| StarcoderdataPython |
5008262 | <gh_stars>1-10
from wrapper import Bittrex
__version__ = "0.0.1"
| StarcoderdataPython |
4933713 | <reponame>marvinquiet/RefConstruction_supervisedCelltyping
'''
Configuration generation for running performance saturation
'''
import os, sys, argparse
import random
from pipelines import method_utils, dataloading_utils
from preprocess.process_train_test_data import *
if __name__ == "__main__":
data_dir = "~/gpu/data"
## parse arguments
parser = argparse.ArgumentParser(description="Test for saturation pipeline.")
parser.add_argument('data_source', help="Load which dataset")
parser.add_argument('--train', help="Specify which as train", required=True)
parser.add_argument('--test', help="Specify which as test", required=True)
parser.add_argument('--sample_seed', help="Downsample seed in combined individual effect",
default=0, type=int)
parser.add_argument('--downsample', help="Whether do downsample or not",
dest='downsample', action='store_true')
parser.add_argument('--downsample_size', help="Downsample size for testing saturation",
default=3000, type=int)
args = parser.parse_args()
args.method = "MLP"
args.select_on = "train"
args.select_method = "F-test"
args.n_features = 1000
if args.downsample:
pipeline_dir = "pipelines/result_saturation_downsample_collections"
else:
pipeline_dir = "pipelines/result_saturation_collections"
result_collections_dir = pipeline_dir+os.sep+"result_"+args.data_source+'_'+args.train.split('_')[0]+'s'
result_prefix = result_collections_dir+os.sep+str(args.sample_seed) ## to distinguish PBMC tasks
os.makedirs(result_prefix, exist_ok=True)
## create file directory
if args.select_on is None and args.select_method is None:
result_dir = result_prefix+os.sep+"no_feature"
else:
result_dir = result_prefix+os.sep+args.select_method+'_'+\
str(args.n_features)+'_on_'+args.select_on
os.makedirs(result_dir, exist_ok=True)
load_ind, train_adata, test_adata = load_adata(result_collections_dir)
if not load_ind:
train_adata, test_adata = dataloading_utils.load_PBMC_adata(
data_dir, result_collections_dir, args=args)
if train_adata is None or test_adata is None:
train_adata, test_adata = dataloading_utils.load_Pancreas_adata(
data_dir, result_collections_dir, args=args)
if train_adata is None or test_adata is None:
train_adata, test_adata = dataloading_utils.load_Mousebrain_adata(
data_dir, result_collections_dir, args=args)
if train_adata is None or test_adata is None:
sys.exit("Please check your data source to make sure it matches an input.")
## whether to purify reference dataset
purify_method = ""
if "purify_dist" in args.data_source:
purify_method = "distance"
elif "purify_SVM" in args.data_source:
purify_method = "SVM"
train_adata, test_adata = dataloading_utils.process_loaded_data(
train_adata, test_adata, result_collections_dir, args=args, purify_method=purify_method)
print("Train anndata: \n", train_adata)
print("Test anndata: \n", test_adata)
if args.downsample: ## add shuffled cells
train_cells = train_adata.obs_names.tolist()
random.seed(args.sample_seed)
random.shuffle(train_cells) ## shuffle original cell list
original_train_adata = train_adata.copy()[train_cells]
for i in range(original_train_adata.shape[0]//args.downsample_size+1):
sampled_number = (i+1)*args.downsample_size if (i+1)*args.downsample_size < original_train_adata.shape[0] else original_train_adata.shape[0]
train_adata = original_train_adata[:sampled_number]
sampled_result_dir = result_dir+os.sep+str(sampled_number)
os.makedirs(sampled_result_dir, exist_ok=True)
method_utils.run_pipeline(args, train_adata, test_adata, data_dir, sampled_result_dir)
else: ## add shuffled individuals
if args.data_source == "mousebrain_crossdataset_inds": ## a combined version
pFC_samples = [x for x in train_adata.obs["Sample"].tolist() if x != 'nan']
allen_samples = [x for x in train_adata.obs["external_donor_name_label"].tolist() if x != 'nan']
train_adata.obs["ind"] = pFC_samples + allen_samples
original_train_adata = train_adata.copy()
train_inds = list(set(original_train_adata.obs['ind']))
random.seed(args.sample_seed)
random.shuffle(train_inds)
for i in range(len(train_inds)):
train_idx = original_train_adata.obs['ind'].isin(train_inds[:i+1])
train_adata = original_train_adata[train_idx]
ind_result_dir = result_dir+os.sep+str(i+1)
os.makedirs(ind_result_dir, exist_ok=True)
method_utils.run_pipeline(args, train_adata, test_adata, data_dir, ind_result_dir)
| StarcoderdataPython |
5156661 | <filename>zvt/__init__.py
# -*- coding: utf-8 -*-
import enum
import json
import logging
import os
from logging.handlers import RotatingFileHandler
import pandas as pd
from pkg_resources import get_distribution, DistributionNotFound
from zvt.settings import DATA_SAMPLE_ZIP_PATH, ZVT_TEST_HOME, ZVT_HOME, ZVT_TEST_DATA_PATH, ZVT_TEST_ZIP_DATA_PATH
try:
dist_name = __name__
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = 'unknown'
finally:
del get_distribution, DistributionNotFound
# common class
class IntervalLevel(enum.Enum):
LEVEL_TICK = 'tick'
LEVEL_1MIN = '1m'
LEVEL_5MIN = '5m'
LEVEL_15MIN = '15m'
LEVEL_30MIN = '30m'
LEVEL_1HOUR = '1h'
LEVEL_4HOUR = '4h'
LEVEL_1DAY = '1d'
LEVEL_1WEEK = '1wk'
LEVEL_1MON = '1mon'
def to_pd_freq(self):
if self == IntervalLevel.LEVEL_1MIN:
return '1min'
if self == IntervalLevel.LEVEL_5MIN:
return '5min'
if self == IntervalLevel.LEVEL_15MIN:
return '15min'
if self == IntervalLevel.LEVEL_30MIN:
return '30min'
if self == IntervalLevel.LEVEL_1HOUR:
return '1H'
if self == IntervalLevel.LEVEL_4HOUR:
return '4H'
if self >= IntervalLevel.LEVEL_1DAY:
return '1D'
def floor_timestamp(self, pd_timestamp):
if self == IntervalLevel.LEVEL_1MIN:
return pd_timestamp.floor('1min')
if self == IntervalLevel.LEVEL_5MIN:
return pd_timestamp.floor('5min')
if self == IntervalLevel.LEVEL_15MIN:
return pd_timestamp.floor('15min')
if self == IntervalLevel.LEVEL_30MIN:
return pd_timestamp.floor('30min')
if self == IntervalLevel.LEVEL_1HOUR:
return pd_timestamp.floor('1h')
if self == IntervalLevel.LEVEL_4HOUR:
return pd_timestamp.floor('4h')
if self == IntervalLevel.LEVEL_1DAY:
return pd_timestamp.floor('1d')
def to_minute(self):
return int(self.to_second() / 60)
def to_second(self):
return int(self.to_ms() / 1000)
def to_ms(self):
# we treat tick intervals is 5s, you could change it
if self == IntervalLevel.LEVEL_TICK:
return 5 * 1000
if self == IntervalLevel.LEVEL_1MIN:
return 60 * 1000
if self == IntervalLevel.LEVEL_5MIN:
return 5 * 60 * 1000
if self == IntervalLevel.LEVEL_15MIN:
return 15 * 60 * 1000
if self == IntervalLevel.LEVEL_30MIN:
return 30 * 60 * 1000
if self == IntervalLevel.LEVEL_1HOUR:
return 60 * 60 * 1000
if self == IntervalLevel.LEVEL_4HOUR:
return 4 * 60 * 60 * 1000
if self == IntervalLevel.LEVEL_1DAY:
return 24 * 60 * 60 * 1000
if self == IntervalLevel.LEVEL_1WEEK:
return 7 * 24 * 60 * 60 * 1000
if self == IntervalLevel.LEVEL_1MON:
return 31 * 7 * 24 * 60 * 60 * 1000
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.to_ms() >= other.to_ms()
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.to_ms() > other.to_ms()
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.to_ms() <= other.to_ms()
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.to_ms() < other.to_ms()
return NotImplemented
class AdjustType(enum.Enum):
# 这里用拼音,因为英文不直观 split-adjusted?wtf?
# 不复权
bfq = 'bfq'
# 前复权
qfq = 'qfq'
# 后复权
hfq = 'hfq'
def init_log(file_name='zvt.log', log_dir=None, simple_formatter=True):
if not log_dir:
log_dir = zvt_env['log_path']
root_logger = logging.getLogger()
# reset the handlers
root_logger.handlers = []
root_logger.setLevel(logging.INFO)
file_name = os.path.join(log_dir, file_name)
fh = RotatingFileHandler(file_name, maxBytes=524288000, backupCount=10)
fh.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
if simple_formatter:
formatter = logging.Formatter(
"%(asctime)s %(levelname)s %(threadName)s %(message)s")
else:
formatter = logging.Formatter(
"%(asctime)s %(levelname)s %(threadName)s %(name)s:%(filename)s:%(lineno)s %(funcName)s %(message)s")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
root_logger.addHandler(fh)
root_logger.addHandler(ch)
pd.set_option('expand_frame_repr', False)
pd.set_option('mode.chained_assignment', 'raise')
zvt_env = {}
def init_env(zvt_home: str) -> None:
"""
:param zvt_home: home path for zvt
"""
data_path = os.path.join(zvt_home, 'data')
tmp_path = os.path.join(zvt_home, 'tmp')
if not os.path.exists(data_path):
os.makedirs(data_path)
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
zvt_env['zvt_home'] = zvt_home
zvt_env['data_path'] = data_path
zvt_env['tmp_path'] = tmp_path
# path for storing ui results
zvt_env['ui_path'] = os.path.join(zvt_home, 'ui')
if not os.path.exists(zvt_env['ui_path']):
os.makedirs(zvt_env['ui_path'])
# path for storing logs
zvt_env['log_path'] = os.path.join(zvt_home, 'logs')
if not os.path.exists(zvt_env['log_path']):
os.makedirs(zvt_env['log_path'])
# create default config.json if not exist
config_path = os.path.join(zvt_home, 'config.json')
if not os.path.exists(config_path):
from shutil import copyfile
copyfile(os.path.abspath(os.path.join(os.path.dirname(__file__), 'samples', 'config.json')), config_path)
with open(config_path) as f:
config_json = json.load(f)
for k in config_json:
zvt_env[k] = config_json[k]
init_log()
import pprint
pprint.pprint(zvt_env)
if os.getenv('TESTING_ZVT'):
init_env(zvt_home=ZVT_TEST_HOME)
# init the sample data if need
same = False
if os.path.exists(ZVT_TEST_ZIP_DATA_PATH):
import filecmp
same = filecmp.cmp(ZVT_TEST_ZIP_DATA_PATH, DATA_SAMPLE_ZIP_PATH)
if not same:
from shutil import copyfile
from zvt.utils.zip_utils import unzip
copyfile(DATA_SAMPLE_ZIP_PATH, ZVT_TEST_ZIP_DATA_PATH)
unzip(ZVT_TEST_ZIP_DATA_PATH, ZVT_TEST_DATA_PATH)
else:
init_env(zvt_home=ZVT_HOME)
# import the recorders for register them to the domain
import zvt.recorders as zvt_recorders
__all__ = ['zvt_env', 'init_log', 'init_env', 'IntervalLevel', '__version__', 'AdjustType']
| StarcoderdataPython |
11316060 | <filename>chapter_05/15_kinetic_energy.py
# kinetic energy
def main():
mass = float(input("Please enter the object's mass in kg: "))
velocity = float(input("Please enter the object's velocity "
"in meters per second: "))
kin_energy = kinetic_energy(mass, velocity)
print("The total kinetic energy is", format(kin_energy, ",.2f"), "joule.")
def kinetic_energy(m, v):
kin_energy = (1 / 2) * m * v ** 2
return kin_energy
main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.